@azure/search-documents 12.2.0 → 12.3.0-alpha.20251031.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (717) hide show
  1. package/dist/browser/errorModels.d.ts +23 -17
  2. package/dist/browser/errorModels.d.ts.map +1 -1
  3. package/dist/browser/errorModels.js.map +1 -1
  4. package/dist/browser/generated/data/models/index.d.ts +593 -20
  5. package/dist/browser/generated/data/models/index.d.ts.map +1 -1
  6. package/dist/browser/generated/data/models/index.js +216 -6
  7. package/dist/browser/generated/data/models/index.js.map +1 -1
  8. package/dist/browser/generated/data/models/mappers.d.ts +18 -0
  9. package/dist/browser/generated/data/models/mappers.d.ts.map +1 -1
  10. package/dist/browser/generated/data/models/mappers.js +463 -4
  11. package/dist/browser/generated/data/models/mappers.js.map +1 -1
  12. package/dist/browser/generated/data/models/parameters.d.ts +6 -0
  13. package/dist/browser/generated/data/models/parameters.d.ts.map +1 -1
  14. package/dist/browser/generated/data/models/parameters.js +60 -0
  15. package/dist/browser/generated/data/models/parameters.js.map +1 -1
  16. package/dist/browser/generated/data/operations/documents.js +20 -3
  17. package/dist/browser/generated/data/operations/documents.js.map +1 -1
  18. package/dist/browser/generated/data/searchClient.d.ts +3 -3
  19. package/dist/browser/generated/data/searchClient.d.ts.map +1 -1
  20. package/dist/browser/generated/data/searchClient.js +1 -1
  21. package/dist/browser/generated/data/searchClient.js.map +1 -1
  22. package/dist/browser/generated/knowledgeBase/index.d.ts +4 -0
  23. package/dist/browser/generated/knowledgeBase/index.d.ts.map +1 -0
  24. package/dist/browser/generated/knowledgeBase/index.js +11 -0
  25. package/dist/browser/generated/knowledgeBase/index.js.map +1 -0
  26. package/dist/browser/generated/knowledgeBase/models/index.d.ts +561 -0
  27. package/dist/browser/generated/knowledgeBase/models/index.d.ts.map +1 -0
  28. package/dist/browser/generated/knowledgeBase/models/index.js +62 -0
  29. package/dist/browser/generated/knowledgeBase/models/index.js.map +1 -0
  30. package/dist/browser/generated/knowledgeBase/models/mappers.d.ts +88 -0
  31. package/dist/browser/generated/knowledgeBase/models/mappers.d.ts.map +1 -0
  32. package/dist/browser/generated/knowledgeBase/models/mappers.js +1280 -0
  33. package/dist/browser/generated/knowledgeBase/models/mappers.js.map +1 -0
  34. package/dist/browser/generated/knowledgeBase/models/parameters.d.ts +9 -0
  35. package/dist/browser/generated/knowledgeBase/models/parameters.d.ts.map +1 -0
  36. package/dist/browser/generated/knowledgeBase/models/parameters.js +75 -0
  37. package/dist/browser/generated/knowledgeBase/models/parameters.js.map +1 -0
  38. package/dist/browser/generated/knowledgeBase/operations/index.d.ts +2 -0
  39. package/dist/browser/generated/knowledgeBase/operations/index.d.ts.map +1 -0
  40. package/dist/browser/generated/knowledgeBase/operations/index.js +9 -0
  41. package/dist/browser/generated/knowledgeBase/operations/index.js.map +1 -0
  42. package/dist/browser/generated/knowledgeBase/operations/knowledgeRetrieval.d.ts +19 -0
  43. package/dist/browser/generated/knowledgeBase/operations/knowledgeRetrieval.d.ts.map +1 -0
  44. package/dist/browser/generated/knowledgeBase/operations/knowledgeRetrieval.js +57 -0
  45. package/dist/browser/generated/knowledgeBase/operations/knowledgeRetrieval.js.map +1 -0
  46. package/dist/browser/generated/knowledgeBase/operationsInterfaces/index.d.ts +2 -0
  47. package/dist/browser/generated/knowledgeBase/operationsInterfaces/index.d.ts.map +1 -0
  48. package/dist/browser/generated/knowledgeBase/operationsInterfaces/index.js +9 -0
  49. package/dist/browser/generated/knowledgeBase/operationsInterfaces/index.js.map +1 -0
  50. package/dist/browser/generated/knowledgeBase/operationsInterfaces/knowledgeRetrieval.d.ts +11 -0
  51. package/dist/browser/generated/knowledgeBase/operationsInterfaces/knowledgeRetrieval.d.ts.map +1 -0
  52. package/dist/browser/generated/knowledgeBase/operationsInterfaces/knowledgeRetrieval.js +9 -0
  53. package/dist/browser/generated/knowledgeBase/operationsInterfaces/knowledgeRetrieval.js.map +1 -0
  54. package/dist/browser/generated/knowledgeBase/searchClient.d.ts +21 -0
  55. package/dist/browser/generated/knowledgeBase/searchClient.d.ts.map +1 -0
  56. package/dist/browser/generated/knowledgeBase/searchClient.js +88 -0
  57. package/dist/browser/generated/knowledgeBase/searchClient.js.map +1 -0
  58. package/dist/browser/generated/service/models/index.d.ts +1403 -131
  59. package/dist/browser/generated/service/models/index.d.ts.map +1 -1
  60. package/dist/browser/generated/service/models/index.js +274 -24
  61. package/dist/browser/generated/service/models/index.js.map +1 -1
  62. package/dist/browser/generated/service/models/mappers.d.ts +104 -17
  63. package/dist/browser/generated/service/models/mappers.d.ts.map +1 -1
  64. package/dist/browser/generated/service/models/mappers.js +3716 -1601
  65. package/dist/browser/generated/service/models/mappers.js.map +1 -1
  66. package/dist/browser/generated/service/models/parameters.d.ts +14 -2
  67. package/dist/browser/generated/service/models/parameters.d.ts.map +1 -1
  68. package/dist/browser/generated/service/models/parameters.js +89 -7
  69. package/dist/browser/generated/service/models/parameters.js.map +1 -1
  70. package/dist/browser/generated/service/operations/aliases.d.ts +43 -1
  71. package/dist/browser/generated/service/operations/aliases.d.ts.map +1 -1
  72. package/dist/browser/generated/service/operations/aliases.js +160 -200
  73. package/dist/browser/generated/service/operations/aliases.js.map +1 -1
  74. package/dist/browser/generated/service/operations/dataSources.js +4 -1
  75. package/dist/browser/generated/service/operations/dataSources.js.map +1 -1
  76. package/dist/browser/generated/service/operations/index.d.ts +3 -0
  77. package/dist/browser/generated/service/operations/index.d.ts.map +1 -1
  78. package/dist/browser/generated/service/operations/index.js +3 -0
  79. package/dist/browser/generated/service/operations/index.js.map +1 -1
  80. package/dist/browser/generated/service/operations/indexers.d.ts +14 -1
  81. package/dist/browser/generated/service/operations/indexers.d.ts.map +1 -1
  82. package/dist/browser/generated/service/operations/indexers.js +54 -1
  83. package/dist/browser/generated/service/operations/indexers.js.map +1 -1
  84. package/dist/browser/generated/service/operations/knowledgeBases.d.ts +43 -0
  85. package/dist/browser/generated/service/operations/knowledgeBases.d.ts.map +1 -0
  86. package/dist/browser/generated/service/operations/knowledgeBases.js +160 -0
  87. package/dist/browser/generated/service/operations/knowledgeBases.js.map +1 -0
  88. package/dist/browser/generated/service/operations/knowledgeSources.d.ts +49 -0
  89. package/dist/browser/generated/service/operations/knowledgeSources.d.ts.map +1 -0
  90. package/dist/browser/generated/service/operations/knowledgeSources.js +184 -0
  91. package/dist/browser/generated/service/operations/knowledgeSources.js.map +1 -0
  92. package/dist/browser/generated/service/operations/skillsets.d.ts +8 -1
  93. package/dist/browser/generated/service/operations/skillsets.d.ts.map +1 -1
  94. package/dist/browser/generated/service/operations/skillsets.js +30 -1
  95. package/dist/browser/generated/service/operations/skillsets.js.map +1 -1
  96. package/dist/browser/generated/service/operationsInterfaces/aliases.d.ts +35 -1
  97. package/dist/browser/generated/service/operationsInterfaces/aliases.d.ts.map +1 -1
  98. package/dist/browser/generated/service/operationsInterfaces/aliases.js +7 -66
  99. package/dist/browser/generated/service/operationsInterfaces/aliases.js.map +1 -1
  100. package/dist/browser/generated/service/operationsInterfaces/index.d.ts +3 -0
  101. package/dist/browser/generated/service/operationsInterfaces/index.d.ts.map +1 -1
  102. package/dist/browser/generated/service/operationsInterfaces/index.js +3 -0
  103. package/dist/browser/generated/service/operationsInterfaces/index.js.map +1 -1
  104. package/dist/browser/generated/service/operationsInterfaces/indexers.d.ts +14 -1
  105. package/dist/browser/generated/service/operationsInterfaces/indexers.d.ts.map +1 -1
  106. package/dist/browser/generated/service/operationsInterfaces/indexers.js.map +1 -1
  107. package/dist/browser/generated/service/operationsInterfaces/knowledgeBases.d.ts +35 -0
  108. package/dist/browser/generated/service/operationsInterfaces/knowledgeBases.d.ts.map +1 -0
  109. package/dist/browser/generated/service/operationsInterfaces/knowledgeBases.js +9 -0
  110. package/dist/browser/generated/service/operationsInterfaces/knowledgeBases.js.map +1 -0
  111. package/dist/browser/generated/service/operationsInterfaces/knowledgeSources.d.ts +41 -0
  112. package/dist/browser/generated/service/operationsInterfaces/knowledgeSources.d.ts.map +1 -0
  113. package/dist/browser/generated/service/operationsInterfaces/knowledgeSources.js +9 -0
  114. package/dist/browser/generated/service/operationsInterfaces/knowledgeSources.js.map +1 -0
  115. package/dist/browser/generated/service/operationsInterfaces/skillsets.d.ts +8 -1
  116. package/dist/browser/generated/service/operationsInterfaces/skillsets.d.ts.map +1 -1
  117. package/dist/browser/generated/service/operationsInterfaces/skillsets.js.map +1 -1
  118. package/dist/browser/generated/service/searchServiceClient.d.ts +12 -4
  119. package/dist/browser/generated/service/searchServiceClient.d.ts.map +1 -1
  120. package/dist/browser/generated/service/searchServiceClient.js +31 -2
  121. package/dist/browser/generated/service/searchServiceClient.js.map +1 -1
  122. package/dist/browser/index.d.ts +7 -4
  123. package/dist/browser/index.d.ts.map +1 -1
  124. package/dist/browser/index.js +5 -3
  125. package/dist/browser/index.js.map +1 -1
  126. package/dist/browser/indexDocumentsBatch.d.ts +6 -5
  127. package/dist/browser/indexDocumentsBatch.d.ts.map +1 -1
  128. package/dist/browser/indexDocumentsBatch.js +5 -4
  129. package/dist/browser/indexDocumentsBatch.js.map +1 -1
  130. package/dist/browser/indexModels.d.ts +236 -30
  131. package/dist/browser/indexModels.d.ts.map +1 -1
  132. package/dist/browser/indexModels.js.map +1 -1
  133. package/dist/browser/knowledgeBaseModels.d.ts +44 -0
  134. package/dist/browser/knowledgeBaseModels.d.ts.map +1 -0
  135. package/dist/browser/knowledgeBaseModels.js +4 -0
  136. package/dist/browser/knowledgeBaseModels.js.map +1 -0
  137. package/dist/browser/knowledgeRetrievalClient.d.ts +68 -0
  138. package/dist/browser/knowledgeRetrievalClient.d.ts.map +1 -0
  139. package/dist/browser/knowledgeRetrievalClient.js +108 -0
  140. package/dist/browser/knowledgeRetrievalClient.js.map +1 -0
  141. package/dist/browser/odata.d.ts +5 -2
  142. package/dist/browser/odata.d.ts.map +1 -1
  143. package/dist/browser/odata.js +5 -2
  144. package/dist/browser/odata.js.map +1 -1
  145. package/dist/browser/odataMetadataPolicy.d.ts +1 -1
  146. package/dist/browser/odataMetadataPolicy.d.ts.map +1 -1
  147. package/dist/browser/odataMetadataPolicy.js.map +1 -1
  148. package/dist/browser/searchApiKeyCredentialPolicy.d.ts +4 -4
  149. package/dist/browser/searchApiKeyCredentialPolicy.d.ts.map +1 -1
  150. package/dist/browser/searchApiKeyCredentialPolicy.js +2 -2
  151. package/dist/browser/searchApiKeyCredentialPolicy.js.map +1 -1
  152. package/dist/browser/searchClient.d.ts +64 -53
  153. package/dist/browser/searchClient.d.ts.map +1 -1
  154. package/dist/browser/searchClient.js +113 -53
  155. package/dist/browser/searchClient.js.map +1 -1
  156. package/dist/browser/searchIndexClient.d.ts +157 -18
  157. package/dist/browser/searchIndexClient.d.ts.map +1 -1
  158. package/dist/browser/searchIndexClient.js +482 -17
  159. package/dist/browser/searchIndexClient.js.map +1 -1
  160. package/dist/browser/searchIndexerClient.d.ts +26 -11
  161. package/dist/browser/searchIndexerClient.d.ts.map +1 -1
  162. package/dist/browser/searchIndexerClient.js +62 -7
  163. package/dist/browser/searchIndexerClient.js.map +1 -1
  164. package/dist/browser/searchIndexingBufferedSender.d.ts +3 -3
  165. package/dist/browser/searchIndexingBufferedSender.d.ts.map +1 -1
  166. package/dist/browser/searchIndexingBufferedSender.js +1 -1
  167. package/dist/browser/searchIndexingBufferedSender.js.map +1 -1
  168. package/dist/browser/serviceModels.d.ts +1228 -220
  169. package/dist/browser/serviceModels.d.ts.map +1 -1
  170. package/dist/browser/serviceModels.js +8 -7
  171. package/dist/browser/serviceModels.js.map +1 -1
  172. package/dist/browser/serviceUtils.d.ts +14 -6
  173. package/dist/browser/serviceUtils.d.ts.map +1 -1
  174. package/dist/browser/serviceUtils.js +301 -45
  175. package/dist/browser/serviceUtils.js.map +1 -1
  176. package/dist/browser/synonymMapHelper-browser.d.mts.map +1 -1
  177. package/dist/browser/synonymMapHelper-browser.mjs.map +1 -1
  178. package/dist/browser/synonymMapHelper.d.ts +1 -1
  179. package/dist/commonjs/errorModels.d.ts +23 -17
  180. package/dist/commonjs/errorModels.d.ts.map +1 -1
  181. package/dist/commonjs/errorModels.js.map +1 -1
  182. package/dist/commonjs/generated/data/models/index.d.ts +593 -20
  183. package/dist/commonjs/generated/data/models/index.d.ts.map +1 -1
  184. package/dist/commonjs/generated/data/models/index.js +217 -7
  185. package/dist/commonjs/generated/data/models/index.js.map +1 -1
  186. package/dist/commonjs/generated/data/models/mappers.d.ts +18 -0
  187. package/dist/commonjs/generated/data/models/mappers.d.ts.map +1 -1
  188. package/dist/commonjs/generated/data/models/mappers.js +464 -5
  189. package/dist/commonjs/generated/data/models/mappers.js.map +1 -1
  190. package/dist/commonjs/generated/data/models/parameters.d.ts +6 -0
  191. package/dist/commonjs/generated/data/models/parameters.d.ts.map +1 -1
  192. package/dist/commonjs/generated/data/models/parameters.js +62 -2
  193. package/dist/commonjs/generated/data/models/parameters.js.map +1 -1
  194. package/dist/commonjs/generated/data/operations/documents.js +20 -3
  195. package/dist/commonjs/generated/data/operations/documents.js.map +1 -1
  196. package/dist/commonjs/generated/data/searchClient.d.ts +3 -3
  197. package/dist/commonjs/generated/data/searchClient.d.ts.map +1 -1
  198. package/dist/commonjs/generated/data/searchClient.js +1 -1
  199. package/dist/commonjs/generated/data/searchClient.js.map +1 -1
  200. package/dist/commonjs/generated/knowledgeBase/index.d.ts +4 -0
  201. package/dist/commonjs/generated/knowledgeBase/index.d.ts.map +1 -0
  202. package/dist/commonjs/generated/knowledgeBase/index.js +16 -0
  203. package/dist/commonjs/generated/knowledgeBase/index.js.map +1 -0
  204. package/dist/commonjs/generated/knowledgeBase/models/index.d.ts +561 -0
  205. package/dist/commonjs/generated/knowledgeBase/models/index.d.ts.map +1 -0
  206. package/dist/commonjs/generated/knowledgeBase/models/index.js +65 -0
  207. package/dist/commonjs/generated/knowledgeBase/models/index.js.map +1 -0
  208. package/dist/commonjs/generated/knowledgeBase/models/mappers.d.ts +88 -0
  209. package/dist/commonjs/generated/knowledgeBase/models/mappers.d.ts.map +1 -0
  210. package/dist/commonjs/generated/knowledgeBase/models/mappers.js +1284 -0
  211. package/dist/commonjs/generated/knowledgeBase/models/mappers.js.map +1 -0
  212. package/dist/commonjs/generated/knowledgeBase/models/parameters.d.ts +9 -0
  213. package/dist/commonjs/generated/knowledgeBase/models/parameters.d.ts.map +1 -0
  214. package/dist/commonjs/generated/knowledgeBase/models/parameters.js +78 -0
  215. package/dist/commonjs/generated/knowledgeBase/models/parameters.js.map +1 -0
  216. package/dist/commonjs/generated/knowledgeBase/operations/index.d.ts +2 -0
  217. package/dist/commonjs/generated/knowledgeBase/operations/index.d.ts.map +1 -0
  218. package/dist/commonjs/generated/knowledgeBase/operations/index.js +12 -0
  219. package/dist/commonjs/generated/knowledgeBase/operations/index.js.map +1 -0
  220. package/dist/commonjs/generated/knowledgeBase/operations/knowledgeRetrieval.d.ts +19 -0
  221. package/dist/commonjs/generated/knowledgeBase/operations/knowledgeRetrieval.d.ts.map +1 -0
  222. package/dist/commonjs/generated/knowledgeBase/operations/knowledgeRetrieval.js +62 -0
  223. package/dist/commonjs/generated/knowledgeBase/operations/knowledgeRetrieval.js.map +1 -0
  224. package/dist/commonjs/generated/knowledgeBase/operationsInterfaces/index.d.ts +2 -0
  225. package/dist/commonjs/generated/knowledgeBase/operationsInterfaces/index.d.ts.map +1 -0
  226. package/dist/commonjs/generated/knowledgeBase/operationsInterfaces/index.js +12 -0
  227. package/dist/commonjs/generated/knowledgeBase/operationsInterfaces/index.js.map +1 -0
  228. package/dist/commonjs/generated/knowledgeBase/operationsInterfaces/knowledgeRetrieval.d.ts +11 -0
  229. package/dist/commonjs/generated/knowledgeBase/operationsInterfaces/knowledgeRetrieval.d.ts.map +1 -0
  230. package/dist/commonjs/generated/knowledgeBase/operationsInterfaces/knowledgeRetrieval.js +10 -0
  231. package/dist/commonjs/generated/knowledgeBase/operationsInterfaces/knowledgeRetrieval.js.map +1 -0
  232. package/dist/commonjs/generated/knowledgeBase/searchClient.d.ts +21 -0
  233. package/dist/commonjs/generated/knowledgeBase/searchClient.d.ts.map +1 -0
  234. package/dist/commonjs/generated/knowledgeBase/searchClient.js +93 -0
  235. package/dist/commonjs/generated/knowledgeBase/searchClient.js.map +1 -0
  236. package/dist/commonjs/generated/service/models/index.d.ts +1403 -131
  237. package/dist/commonjs/generated/service/models/index.d.ts.map +1 -1
  238. package/dist/commonjs/generated/service/models/index.js +276 -25
  239. package/dist/commonjs/generated/service/models/index.js.map +1 -1
  240. package/dist/commonjs/generated/service/models/mappers.d.ts +104 -17
  241. package/dist/commonjs/generated/service/models/mappers.d.ts.map +1 -1
  242. package/dist/commonjs/generated/service/models/mappers.js +3721 -1605
  243. package/dist/commonjs/generated/service/models/mappers.js.map +1 -1
  244. package/dist/commonjs/generated/service/models/parameters.d.ts +14 -2
  245. package/dist/commonjs/generated/service/models/parameters.d.ts.map +1 -1
  246. package/dist/commonjs/generated/service/models/parameters.js +89 -7
  247. package/dist/commonjs/generated/service/models/parameters.js.map +1 -1
  248. package/dist/commonjs/generated/service/operations/aliases.d.ts +43 -1
  249. package/dist/commonjs/generated/service/operations/aliases.d.ts.map +1 -1
  250. package/dist/commonjs/generated/service/operations/aliases.js +163 -199
  251. package/dist/commonjs/generated/service/operations/aliases.js.map +1 -1
  252. package/dist/commonjs/generated/service/operations/dataSources.js +4 -1
  253. package/dist/commonjs/generated/service/operations/dataSources.js.map +1 -1
  254. package/dist/commonjs/generated/service/operations/index.d.ts +3 -0
  255. package/dist/commonjs/generated/service/operations/index.d.ts.map +1 -1
  256. package/dist/commonjs/generated/service/operations/index.js +3 -0
  257. package/dist/commonjs/generated/service/operations/index.js.map +1 -1
  258. package/dist/commonjs/generated/service/operations/indexers.d.ts +14 -1
  259. package/dist/commonjs/generated/service/operations/indexers.d.ts.map +1 -1
  260. package/dist/commonjs/generated/service/operations/indexers.js +54 -1
  261. package/dist/commonjs/generated/service/operations/indexers.js.map +1 -1
  262. package/dist/commonjs/generated/service/operations/knowledgeBases.d.ts +43 -0
  263. package/dist/commonjs/generated/service/operations/knowledgeBases.d.ts.map +1 -0
  264. package/dist/commonjs/generated/service/operations/knowledgeBases.js +165 -0
  265. package/dist/commonjs/generated/service/operations/knowledgeBases.js.map +1 -0
  266. package/dist/commonjs/generated/service/operations/knowledgeSources.d.ts +49 -0
  267. package/dist/commonjs/generated/service/operations/knowledgeSources.d.ts.map +1 -0
  268. package/dist/commonjs/generated/service/operations/knowledgeSources.js +189 -0
  269. package/dist/commonjs/generated/service/operations/knowledgeSources.js.map +1 -0
  270. package/dist/commonjs/generated/service/operations/skillsets.d.ts +8 -1
  271. package/dist/commonjs/generated/service/operations/skillsets.d.ts.map +1 -1
  272. package/dist/commonjs/generated/service/operations/skillsets.js +30 -1
  273. package/dist/commonjs/generated/service/operations/skillsets.js.map +1 -1
  274. package/dist/commonjs/generated/service/operationsInterfaces/aliases.d.ts +35 -1
  275. package/dist/commonjs/generated/service/operationsInterfaces/aliases.d.ts.map +1 -1
  276. package/dist/commonjs/generated/service/operationsInterfaces/aliases.js +7 -66
  277. package/dist/commonjs/generated/service/operationsInterfaces/aliases.js.map +1 -1
  278. package/dist/commonjs/generated/service/operationsInterfaces/index.d.ts +3 -0
  279. package/dist/commonjs/generated/service/operationsInterfaces/index.d.ts.map +1 -1
  280. package/dist/commonjs/generated/service/operationsInterfaces/index.js +3 -0
  281. package/dist/commonjs/generated/service/operationsInterfaces/index.js.map +1 -1
  282. package/dist/commonjs/generated/service/operationsInterfaces/indexers.d.ts +14 -1
  283. package/dist/commonjs/generated/service/operationsInterfaces/indexers.d.ts.map +1 -1
  284. package/dist/commonjs/generated/service/operationsInterfaces/indexers.js.map +1 -1
  285. package/dist/commonjs/generated/service/operationsInterfaces/knowledgeBases.d.ts +35 -0
  286. package/dist/commonjs/generated/service/operationsInterfaces/knowledgeBases.d.ts.map +1 -0
  287. package/dist/commonjs/generated/service/operationsInterfaces/knowledgeBases.js +10 -0
  288. package/dist/commonjs/generated/service/operationsInterfaces/knowledgeBases.js.map +1 -0
  289. package/dist/commonjs/generated/service/operationsInterfaces/knowledgeSources.d.ts +41 -0
  290. package/dist/commonjs/generated/service/operationsInterfaces/knowledgeSources.d.ts.map +1 -0
  291. package/dist/commonjs/generated/service/operationsInterfaces/knowledgeSources.js +10 -0
  292. package/dist/commonjs/generated/service/operationsInterfaces/knowledgeSources.js.map +1 -0
  293. package/dist/commonjs/generated/service/operationsInterfaces/skillsets.d.ts +8 -1
  294. package/dist/commonjs/generated/service/operationsInterfaces/skillsets.d.ts.map +1 -1
  295. package/dist/commonjs/generated/service/operationsInterfaces/skillsets.js.map +1 -1
  296. package/dist/commonjs/generated/service/searchServiceClient.d.ts +12 -4
  297. package/dist/commonjs/generated/service/searchServiceClient.d.ts.map +1 -1
  298. package/dist/commonjs/generated/service/searchServiceClient.js +30 -1
  299. package/dist/commonjs/generated/service/searchServiceClient.js.map +1 -1
  300. package/dist/commonjs/index.d.ts +7 -4
  301. package/dist/commonjs/index.d.ts.map +1 -1
  302. package/dist/commonjs/index.js +71 -44
  303. package/dist/commonjs/index.js.map +1 -1
  304. package/dist/commonjs/indexDocumentsBatch.d.ts +6 -5
  305. package/dist/commonjs/indexDocumentsBatch.d.ts.map +1 -1
  306. package/dist/commonjs/indexDocumentsBatch.js +5 -4
  307. package/dist/commonjs/indexDocumentsBatch.js.map +1 -1
  308. package/dist/commonjs/indexModels.d.ts +236 -30
  309. package/dist/commonjs/indexModels.d.ts.map +1 -1
  310. package/dist/commonjs/indexModels.js.map +1 -1
  311. package/dist/commonjs/knowledgeBaseModels.d.ts +44 -0
  312. package/dist/commonjs/knowledgeBaseModels.d.ts.map +1 -0
  313. package/dist/commonjs/knowledgeBaseModels.js +5 -0
  314. package/dist/commonjs/knowledgeBaseModels.js.map +1 -0
  315. package/dist/commonjs/knowledgeRetrievalClient.d.ts +68 -0
  316. package/dist/commonjs/knowledgeRetrievalClient.d.ts.map +1 -0
  317. package/dist/commonjs/knowledgeRetrievalClient.js +113 -0
  318. package/dist/commonjs/knowledgeRetrievalClient.js.map +1 -0
  319. package/dist/commonjs/odata.d.ts +5 -2
  320. package/dist/commonjs/odata.d.ts.map +1 -1
  321. package/dist/commonjs/odata.js +5 -2
  322. package/dist/commonjs/odata.js.map +1 -1
  323. package/dist/commonjs/odataMetadataPolicy.d.ts +1 -1
  324. package/dist/commonjs/odataMetadataPolicy.d.ts.map +1 -1
  325. package/dist/commonjs/odataMetadataPolicy.js.map +1 -1
  326. package/dist/commonjs/searchApiKeyCredentialPolicy.d.ts +4 -4
  327. package/dist/commonjs/searchApiKeyCredentialPolicy.d.ts.map +1 -1
  328. package/dist/commonjs/searchApiKeyCredentialPolicy.js +2 -2
  329. package/dist/commonjs/searchApiKeyCredentialPolicy.js.map +1 -1
  330. package/dist/commonjs/searchClient.d.ts +64 -53
  331. package/dist/commonjs/searchClient.d.ts.map +1 -1
  332. package/dist/commonjs/searchClient.js +113 -53
  333. package/dist/commonjs/searchClient.js.map +1 -1
  334. package/dist/commonjs/searchIndexClient.d.ts +157 -18
  335. package/dist/commonjs/searchIndexClient.d.ts.map +1 -1
  336. package/dist/commonjs/searchIndexClient.js +482 -17
  337. package/dist/commonjs/searchIndexClient.js.map +1 -1
  338. package/dist/commonjs/searchIndexerClient.d.ts +26 -11
  339. package/dist/commonjs/searchIndexerClient.d.ts.map +1 -1
  340. package/dist/commonjs/searchIndexerClient.js +62 -7
  341. package/dist/commonjs/searchIndexerClient.js.map +1 -1
  342. package/dist/commonjs/searchIndexingBufferedSender.d.ts +3 -3
  343. package/dist/commonjs/searchIndexingBufferedSender.d.ts.map +1 -1
  344. package/dist/commonjs/searchIndexingBufferedSender.js +2 -2
  345. package/dist/commonjs/searchIndexingBufferedSender.js.map +1 -1
  346. package/dist/commonjs/serviceModels.d.ts +1228 -220
  347. package/dist/commonjs/serviceModels.d.ts.map +1 -1
  348. package/dist/commonjs/serviceModels.js +8 -7
  349. package/dist/commonjs/serviceModels.js.map +1 -1
  350. package/dist/commonjs/serviceUtils.d.ts +14 -6
  351. package/dist/commonjs/serviceUtils.d.ts.map +1 -1
  352. package/dist/commonjs/serviceUtils.js +306 -44
  353. package/dist/commonjs/serviceUtils.js.map +1 -1
  354. package/dist/commonjs/synonymMapHelper.d.ts +1 -1
  355. package/dist/commonjs/synonymMapHelper.d.ts.map +1 -1
  356. package/dist/commonjs/synonymMapHelper.js +3 -3
  357. package/dist/commonjs/synonymMapHelper.js.map +1 -1
  358. package/dist/commonjs/tsdoc-metadata.json +1 -1
  359. package/dist/esm/errorModels.d.ts +23 -17
  360. package/dist/esm/errorModels.d.ts.map +1 -1
  361. package/dist/esm/errorModels.js.map +1 -1
  362. package/dist/esm/generated/data/models/index.d.ts +593 -20
  363. package/dist/esm/generated/data/models/index.d.ts.map +1 -1
  364. package/dist/esm/generated/data/models/index.js +216 -6
  365. package/dist/esm/generated/data/models/index.js.map +1 -1
  366. package/dist/esm/generated/data/models/mappers.d.ts +18 -0
  367. package/dist/esm/generated/data/models/mappers.d.ts.map +1 -1
  368. package/dist/esm/generated/data/models/mappers.js +463 -4
  369. package/dist/esm/generated/data/models/mappers.js.map +1 -1
  370. package/dist/esm/generated/data/models/parameters.d.ts +6 -0
  371. package/dist/esm/generated/data/models/parameters.d.ts.map +1 -1
  372. package/dist/esm/generated/data/models/parameters.js +60 -0
  373. package/dist/esm/generated/data/models/parameters.js.map +1 -1
  374. package/dist/esm/generated/data/operations/documents.js +20 -3
  375. package/dist/esm/generated/data/operations/documents.js.map +1 -1
  376. package/dist/esm/generated/data/searchClient.d.ts +3 -3
  377. package/dist/esm/generated/data/searchClient.d.ts.map +1 -1
  378. package/dist/esm/generated/data/searchClient.js +1 -1
  379. package/dist/esm/generated/data/searchClient.js.map +1 -1
  380. package/dist/esm/generated/knowledgeBase/index.d.ts +4 -0
  381. package/dist/esm/generated/knowledgeBase/index.d.ts.map +1 -0
  382. package/dist/esm/generated/knowledgeBase/index.js +11 -0
  383. package/dist/esm/generated/knowledgeBase/index.js.map +1 -0
  384. package/dist/esm/generated/knowledgeBase/models/index.d.ts +561 -0
  385. package/dist/esm/generated/knowledgeBase/models/index.d.ts.map +1 -0
  386. package/dist/esm/generated/knowledgeBase/models/index.js +62 -0
  387. package/dist/esm/generated/knowledgeBase/models/index.js.map +1 -0
  388. package/dist/esm/generated/knowledgeBase/models/mappers.d.ts +88 -0
  389. package/dist/esm/generated/knowledgeBase/models/mappers.d.ts.map +1 -0
  390. package/dist/esm/generated/knowledgeBase/models/mappers.js +1280 -0
  391. package/dist/esm/generated/knowledgeBase/models/mappers.js.map +1 -0
  392. package/dist/esm/generated/knowledgeBase/models/parameters.d.ts +9 -0
  393. package/dist/esm/generated/knowledgeBase/models/parameters.d.ts.map +1 -0
  394. package/dist/esm/generated/knowledgeBase/models/parameters.js +75 -0
  395. package/dist/esm/generated/knowledgeBase/models/parameters.js.map +1 -0
  396. package/dist/esm/generated/knowledgeBase/operations/index.d.ts +2 -0
  397. package/dist/esm/generated/knowledgeBase/operations/index.d.ts.map +1 -0
  398. package/dist/esm/generated/knowledgeBase/operations/index.js +9 -0
  399. package/dist/esm/generated/knowledgeBase/operations/index.js.map +1 -0
  400. package/dist/esm/generated/knowledgeBase/operations/knowledgeRetrieval.d.ts +19 -0
  401. package/dist/esm/generated/knowledgeBase/operations/knowledgeRetrieval.d.ts.map +1 -0
  402. package/dist/esm/generated/knowledgeBase/operations/knowledgeRetrieval.js +57 -0
  403. package/dist/esm/generated/knowledgeBase/operations/knowledgeRetrieval.js.map +1 -0
  404. package/dist/esm/generated/knowledgeBase/operationsInterfaces/index.d.ts +2 -0
  405. package/dist/esm/generated/knowledgeBase/operationsInterfaces/index.d.ts.map +1 -0
  406. package/dist/esm/generated/knowledgeBase/operationsInterfaces/index.js +9 -0
  407. package/dist/esm/generated/knowledgeBase/operationsInterfaces/index.js.map +1 -0
  408. package/dist/esm/generated/knowledgeBase/operationsInterfaces/knowledgeRetrieval.d.ts +11 -0
  409. package/dist/esm/generated/knowledgeBase/operationsInterfaces/knowledgeRetrieval.d.ts.map +1 -0
  410. package/dist/esm/generated/knowledgeBase/operationsInterfaces/knowledgeRetrieval.js +9 -0
  411. package/dist/esm/generated/knowledgeBase/operationsInterfaces/knowledgeRetrieval.js.map +1 -0
  412. package/dist/esm/generated/knowledgeBase/searchClient.d.ts +21 -0
  413. package/dist/esm/generated/knowledgeBase/searchClient.d.ts.map +1 -0
  414. package/dist/esm/generated/knowledgeBase/searchClient.js +88 -0
  415. package/dist/esm/generated/knowledgeBase/searchClient.js.map +1 -0
  416. package/dist/esm/generated/service/models/index.d.ts +1403 -131
  417. package/dist/esm/generated/service/models/index.d.ts.map +1 -1
  418. package/dist/esm/generated/service/models/index.js +274 -24
  419. package/dist/esm/generated/service/models/index.js.map +1 -1
  420. package/dist/esm/generated/service/models/mappers.d.ts +104 -17
  421. package/dist/esm/generated/service/models/mappers.d.ts.map +1 -1
  422. package/dist/esm/generated/service/models/mappers.js +3716 -1601
  423. package/dist/esm/generated/service/models/mappers.js.map +1 -1
  424. package/dist/esm/generated/service/models/parameters.d.ts +14 -2
  425. package/dist/esm/generated/service/models/parameters.d.ts.map +1 -1
  426. package/dist/esm/generated/service/models/parameters.js +89 -7
  427. package/dist/esm/generated/service/models/parameters.js.map +1 -1
  428. package/dist/esm/generated/service/operations/aliases.d.ts +43 -1
  429. package/dist/esm/generated/service/operations/aliases.d.ts.map +1 -1
  430. package/dist/esm/generated/service/operations/aliases.js +160 -200
  431. package/dist/esm/generated/service/operations/aliases.js.map +1 -1
  432. package/dist/esm/generated/service/operations/dataSources.js +4 -1
  433. package/dist/esm/generated/service/operations/dataSources.js.map +1 -1
  434. package/dist/esm/generated/service/operations/index.d.ts +3 -0
  435. package/dist/esm/generated/service/operations/index.d.ts.map +1 -1
  436. package/dist/esm/generated/service/operations/index.js +3 -0
  437. package/dist/esm/generated/service/operations/index.js.map +1 -1
  438. package/dist/esm/generated/service/operations/indexers.d.ts +14 -1
  439. package/dist/esm/generated/service/operations/indexers.d.ts.map +1 -1
  440. package/dist/esm/generated/service/operations/indexers.js +54 -1
  441. package/dist/esm/generated/service/operations/indexers.js.map +1 -1
  442. package/dist/esm/generated/service/operations/knowledgeBases.d.ts +43 -0
  443. package/dist/esm/generated/service/operations/knowledgeBases.d.ts.map +1 -0
  444. package/dist/esm/generated/service/operations/knowledgeBases.js +160 -0
  445. package/dist/esm/generated/service/operations/knowledgeBases.js.map +1 -0
  446. package/dist/esm/generated/service/operations/knowledgeSources.d.ts +49 -0
  447. package/dist/esm/generated/service/operations/knowledgeSources.d.ts.map +1 -0
  448. package/dist/esm/generated/service/operations/knowledgeSources.js +184 -0
  449. package/dist/esm/generated/service/operations/knowledgeSources.js.map +1 -0
  450. package/dist/esm/generated/service/operations/skillsets.d.ts +8 -1
  451. package/dist/esm/generated/service/operations/skillsets.d.ts.map +1 -1
  452. package/dist/esm/generated/service/operations/skillsets.js +30 -1
  453. package/dist/esm/generated/service/operations/skillsets.js.map +1 -1
  454. package/dist/esm/generated/service/operationsInterfaces/aliases.d.ts +35 -1
  455. package/dist/esm/generated/service/operationsInterfaces/aliases.d.ts.map +1 -1
  456. package/dist/esm/generated/service/operationsInterfaces/aliases.js +7 -66
  457. package/dist/esm/generated/service/operationsInterfaces/aliases.js.map +1 -1
  458. package/dist/esm/generated/service/operationsInterfaces/index.d.ts +3 -0
  459. package/dist/esm/generated/service/operationsInterfaces/index.d.ts.map +1 -1
  460. package/dist/esm/generated/service/operationsInterfaces/index.js +3 -0
  461. package/dist/esm/generated/service/operationsInterfaces/index.js.map +1 -1
  462. package/dist/esm/generated/service/operationsInterfaces/indexers.d.ts +14 -1
  463. package/dist/esm/generated/service/operationsInterfaces/indexers.d.ts.map +1 -1
  464. package/dist/esm/generated/service/operationsInterfaces/indexers.js.map +1 -1
  465. package/dist/esm/generated/service/operationsInterfaces/knowledgeBases.d.ts +35 -0
  466. package/dist/esm/generated/service/operationsInterfaces/knowledgeBases.d.ts.map +1 -0
  467. package/dist/esm/generated/service/operationsInterfaces/knowledgeBases.js +9 -0
  468. package/dist/esm/generated/service/operationsInterfaces/knowledgeBases.js.map +1 -0
  469. package/dist/esm/generated/service/operationsInterfaces/knowledgeSources.d.ts +41 -0
  470. package/dist/esm/generated/service/operationsInterfaces/knowledgeSources.d.ts.map +1 -0
  471. package/dist/esm/generated/service/operationsInterfaces/knowledgeSources.js +9 -0
  472. package/dist/esm/generated/service/operationsInterfaces/knowledgeSources.js.map +1 -0
  473. package/dist/esm/generated/service/operationsInterfaces/skillsets.d.ts +8 -1
  474. package/dist/esm/generated/service/operationsInterfaces/skillsets.d.ts.map +1 -1
  475. package/dist/esm/generated/service/operationsInterfaces/skillsets.js.map +1 -1
  476. package/dist/esm/generated/service/searchServiceClient.d.ts +12 -4
  477. package/dist/esm/generated/service/searchServiceClient.d.ts.map +1 -1
  478. package/dist/esm/generated/service/searchServiceClient.js +31 -2
  479. package/dist/esm/generated/service/searchServiceClient.js.map +1 -1
  480. package/dist/esm/index.d.ts +7 -4
  481. package/dist/esm/index.d.ts.map +1 -1
  482. package/dist/esm/index.js +5 -3
  483. package/dist/esm/index.js.map +1 -1
  484. package/dist/esm/indexDocumentsBatch.d.ts +6 -5
  485. package/dist/esm/indexDocumentsBatch.d.ts.map +1 -1
  486. package/dist/esm/indexDocumentsBatch.js +5 -4
  487. package/dist/esm/indexDocumentsBatch.js.map +1 -1
  488. package/dist/esm/indexModels.d.ts +236 -30
  489. package/dist/esm/indexModels.d.ts.map +1 -1
  490. package/dist/esm/indexModels.js.map +1 -1
  491. package/dist/esm/knowledgeBaseModels.d.ts +44 -0
  492. package/dist/esm/knowledgeBaseModels.d.ts.map +1 -0
  493. package/dist/esm/knowledgeBaseModels.js +4 -0
  494. package/dist/esm/knowledgeBaseModels.js.map +1 -0
  495. package/dist/esm/knowledgeRetrievalClient.d.ts +68 -0
  496. package/dist/esm/knowledgeRetrievalClient.d.ts.map +1 -0
  497. package/dist/esm/knowledgeRetrievalClient.js +108 -0
  498. package/dist/esm/knowledgeRetrievalClient.js.map +1 -0
  499. package/dist/esm/odata.d.ts +5 -2
  500. package/dist/esm/odata.d.ts.map +1 -1
  501. package/dist/esm/odata.js +5 -2
  502. package/dist/esm/odata.js.map +1 -1
  503. package/dist/esm/odataMetadataPolicy.d.ts +1 -1
  504. package/dist/esm/odataMetadataPolicy.d.ts.map +1 -1
  505. package/dist/esm/odataMetadataPolicy.js.map +1 -1
  506. package/dist/esm/searchApiKeyCredentialPolicy.d.ts +4 -4
  507. package/dist/esm/searchApiKeyCredentialPolicy.d.ts.map +1 -1
  508. package/dist/esm/searchApiKeyCredentialPolicy.js +2 -2
  509. package/dist/esm/searchApiKeyCredentialPolicy.js.map +1 -1
  510. package/dist/esm/searchClient.d.ts +64 -53
  511. package/dist/esm/searchClient.d.ts.map +1 -1
  512. package/dist/esm/searchClient.js +113 -53
  513. package/dist/esm/searchClient.js.map +1 -1
  514. package/dist/esm/searchIndexClient.d.ts +157 -18
  515. package/dist/esm/searchIndexClient.d.ts.map +1 -1
  516. package/dist/esm/searchIndexClient.js +482 -17
  517. package/dist/esm/searchIndexClient.js.map +1 -1
  518. package/dist/esm/searchIndexerClient.d.ts +26 -11
  519. package/dist/esm/searchIndexerClient.d.ts.map +1 -1
  520. package/dist/esm/searchIndexerClient.js +62 -7
  521. package/dist/esm/searchIndexerClient.js.map +1 -1
  522. package/dist/esm/searchIndexingBufferedSender.d.ts +3 -3
  523. package/dist/esm/searchIndexingBufferedSender.d.ts.map +1 -1
  524. package/dist/esm/searchIndexingBufferedSender.js +1 -1
  525. package/dist/esm/searchIndexingBufferedSender.js.map +1 -1
  526. package/dist/esm/serviceModels.d.ts +1228 -220
  527. package/dist/esm/serviceModels.d.ts.map +1 -1
  528. package/dist/esm/serviceModels.js +8 -7
  529. package/dist/esm/serviceModels.js.map +1 -1
  530. package/dist/esm/serviceUtils.d.ts +14 -6
  531. package/dist/esm/serviceUtils.d.ts.map +1 -1
  532. package/dist/esm/serviceUtils.js +301 -45
  533. package/dist/esm/serviceUtils.js.map +1 -1
  534. package/dist/esm/synonymMapHelper.d.ts +1 -1
  535. package/dist/esm/synonymMapHelper.d.ts.map +1 -1
  536. package/dist/esm/synonymMapHelper.js +2 -2
  537. package/dist/esm/synonymMapHelper.js.map +1 -1
  538. package/dist/react-native/errorModels.d.ts +23 -17
  539. package/dist/react-native/errorModels.d.ts.map +1 -1
  540. package/dist/react-native/errorModels.js.map +1 -1
  541. package/dist/react-native/generated/data/models/index.d.ts +593 -20
  542. package/dist/react-native/generated/data/models/index.d.ts.map +1 -1
  543. package/dist/react-native/generated/data/models/index.js +216 -6
  544. package/dist/react-native/generated/data/models/index.js.map +1 -1
  545. package/dist/react-native/generated/data/models/mappers.d.ts +18 -0
  546. package/dist/react-native/generated/data/models/mappers.d.ts.map +1 -1
  547. package/dist/react-native/generated/data/models/mappers.js +463 -4
  548. package/dist/react-native/generated/data/models/mappers.js.map +1 -1
  549. package/dist/react-native/generated/data/models/parameters.d.ts +6 -0
  550. package/dist/react-native/generated/data/models/parameters.d.ts.map +1 -1
  551. package/dist/react-native/generated/data/models/parameters.js +60 -0
  552. package/dist/react-native/generated/data/models/parameters.js.map +1 -1
  553. package/dist/react-native/generated/data/operations/documents.js +20 -3
  554. package/dist/react-native/generated/data/operations/documents.js.map +1 -1
  555. package/dist/react-native/generated/data/searchClient.d.ts +3 -3
  556. package/dist/react-native/generated/data/searchClient.d.ts.map +1 -1
  557. package/dist/react-native/generated/data/searchClient.js +1 -1
  558. package/dist/react-native/generated/data/searchClient.js.map +1 -1
  559. package/dist/react-native/generated/knowledgeBase/index.d.ts +4 -0
  560. package/dist/react-native/generated/knowledgeBase/index.d.ts.map +1 -0
  561. package/dist/react-native/generated/knowledgeBase/index.js +11 -0
  562. package/dist/react-native/generated/knowledgeBase/index.js.map +1 -0
  563. package/dist/react-native/generated/knowledgeBase/models/index.d.ts +561 -0
  564. package/dist/react-native/generated/knowledgeBase/models/index.d.ts.map +1 -0
  565. package/dist/react-native/generated/knowledgeBase/models/index.js +62 -0
  566. package/dist/react-native/generated/knowledgeBase/models/index.js.map +1 -0
  567. package/dist/react-native/generated/knowledgeBase/models/mappers.d.ts +88 -0
  568. package/dist/react-native/generated/knowledgeBase/models/mappers.d.ts.map +1 -0
  569. package/dist/react-native/generated/knowledgeBase/models/mappers.js +1280 -0
  570. package/dist/react-native/generated/knowledgeBase/models/mappers.js.map +1 -0
  571. package/dist/react-native/generated/knowledgeBase/models/parameters.d.ts +9 -0
  572. package/dist/react-native/generated/knowledgeBase/models/parameters.d.ts.map +1 -0
  573. package/dist/react-native/generated/knowledgeBase/models/parameters.js +75 -0
  574. package/dist/react-native/generated/knowledgeBase/models/parameters.js.map +1 -0
  575. package/dist/react-native/generated/knowledgeBase/operations/index.d.ts +2 -0
  576. package/dist/react-native/generated/knowledgeBase/operations/index.d.ts.map +1 -0
  577. package/dist/react-native/generated/knowledgeBase/operations/index.js +9 -0
  578. package/dist/react-native/generated/knowledgeBase/operations/index.js.map +1 -0
  579. package/dist/react-native/generated/knowledgeBase/operations/knowledgeRetrieval.d.ts +19 -0
  580. package/dist/react-native/generated/knowledgeBase/operations/knowledgeRetrieval.d.ts.map +1 -0
  581. package/dist/react-native/generated/knowledgeBase/operations/knowledgeRetrieval.js +57 -0
  582. package/dist/react-native/generated/knowledgeBase/operations/knowledgeRetrieval.js.map +1 -0
  583. package/dist/react-native/generated/knowledgeBase/operationsInterfaces/index.d.ts +2 -0
  584. package/dist/react-native/generated/knowledgeBase/operationsInterfaces/index.d.ts.map +1 -0
  585. package/dist/react-native/generated/knowledgeBase/operationsInterfaces/index.js +9 -0
  586. package/dist/react-native/generated/knowledgeBase/operationsInterfaces/index.js.map +1 -0
  587. package/dist/react-native/generated/knowledgeBase/operationsInterfaces/knowledgeRetrieval.d.ts +11 -0
  588. package/dist/react-native/generated/knowledgeBase/operationsInterfaces/knowledgeRetrieval.d.ts.map +1 -0
  589. package/dist/react-native/generated/knowledgeBase/operationsInterfaces/knowledgeRetrieval.js +9 -0
  590. package/dist/react-native/generated/knowledgeBase/operationsInterfaces/knowledgeRetrieval.js.map +1 -0
  591. package/dist/react-native/generated/knowledgeBase/searchClient.d.ts +21 -0
  592. package/dist/react-native/generated/knowledgeBase/searchClient.d.ts.map +1 -0
  593. package/dist/react-native/generated/knowledgeBase/searchClient.js +88 -0
  594. package/dist/react-native/generated/knowledgeBase/searchClient.js.map +1 -0
  595. package/dist/react-native/generated/service/models/index.d.ts +1403 -131
  596. package/dist/react-native/generated/service/models/index.d.ts.map +1 -1
  597. package/dist/react-native/generated/service/models/index.js +274 -24
  598. package/dist/react-native/generated/service/models/index.js.map +1 -1
  599. package/dist/react-native/generated/service/models/mappers.d.ts +104 -17
  600. package/dist/react-native/generated/service/models/mappers.d.ts.map +1 -1
  601. package/dist/react-native/generated/service/models/mappers.js +3716 -1601
  602. package/dist/react-native/generated/service/models/mappers.js.map +1 -1
  603. package/dist/react-native/generated/service/models/parameters.d.ts +14 -2
  604. package/dist/react-native/generated/service/models/parameters.d.ts.map +1 -1
  605. package/dist/react-native/generated/service/models/parameters.js +89 -7
  606. package/dist/react-native/generated/service/models/parameters.js.map +1 -1
  607. package/dist/react-native/generated/service/operations/aliases.d.ts +43 -1
  608. package/dist/react-native/generated/service/operations/aliases.d.ts.map +1 -1
  609. package/dist/react-native/generated/service/operations/aliases.js +160 -200
  610. package/dist/react-native/generated/service/operations/aliases.js.map +1 -1
  611. package/dist/react-native/generated/service/operations/dataSources.js +4 -1
  612. package/dist/react-native/generated/service/operations/dataSources.js.map +1 -1
  613. package/dist/react-native/generated/service/operations/index.d.ts +3 -0
  614. package/dist/react-native/generated/service/operations/index.d.ts.map +1 -1
  615. package/dist/react-native/generated/service/operations/index.js +3 -0
  616. package/dist/react-native/generated/service/operations/index.js.map +1 -1
  617. package/dist/react-native/generated/service/operations/indexers.d.ts +14 -1
  618. package/dist/react-native/generated/service/operations/indexers.d.ts.map +1 -1
  619. package/dist/react-native/generated/service/operations/indexers.js +54 -1
  620. package/dist/react-native/generated/service/operations/indexers.js.map +1 -1
  621. package/dist/react-native/generated/service/operations/knowledgeBases.d.ts +43 -0
  622. package/dist/react-native/generated/service/operations/knowledgeBases.d.ts.map +1 -0
  623. package/dist/react-native/generated/service/operations/knowledgeBases.js +160 -0
  624. package/dist/react-native/generated/service/operations/knowledgeBases.js.map +1 -0
  625. package/dist/react-native/generated/service/operations/knowledgeSources.d.ts +49 -0
  626. package/dist/react-native/generated/service/operations/knowledgeSources.d.ts.map +1 -0
  627. package/dist/react-native/generated/service/operations/knowledgeSources.js +184 -0
  628. package/dist/react-native/generated/service/operations/knowledgeSources.js.map +1 -0
  629. package/dist/react-native/generated/service/operations/skillsets.d.ts +8 -1
  630. package/dist/react-native/generated/service/operations/skillsets.d.ts.map +1 -1
  631. package/dist/react-native/generated/service/operations/skillsets.js +30 -1
  632. package/dist/react-native/generated/service/operations/skillsets.js.map +1 -1
  633. package/dist/react-native/generated/service/operationsInterfaces/aliases.d.ts +35 -1
  634. package/dist/react-native/generated/service/operationsInterfaces/aliases.d.ts.map +1 -1
  635. package/dist/react-native/generated/service/operationsInterfaces/aliases.js +7 -66
  636. package/dist/react-native/generated/service/operationsInterfaces/aliases.js.map +1 -1
  637. package/dist/react-native/generated/service/operationsInterfaces/index.d.ts +3 -0
  638. package/dist/react-native/generated/service/operationsInterfaces/index.d.ts.map +1 -1
  639. package/dist/react-native/generated/service/operationsInterfaces/index.js +3 -0
  640. package/dist/react-native/generated/service/operationsInterfaces/index.js.map +1 -1
  641. package/dist/react-native/generated/service/operationsInterfaces/indexers.d.ts +14 -1
  642. package/dist/react-native/generated/service/operationsInterfaces/indexers.d.ts.map +1 -1
  643. package/dist/react-native/generated/service/operationsInterfaces/indexers.js.map +1 -1
  644. package/dist/react-native/generated/service/operationsInterfaces/knowledgeBases.d.ts +35 -0
  645. package/dist/react-native/generated/service/operationsInterfaces/knowledgeBases.d.ts.map +1 -0
  646. package/dist/react-native/generated/service/operationsInterfaces/knowledgeBases.js +9 -0
  647. package/dist/react-native/generated/service/operationsInterfaces/knowledgeBases.js.map +1 -0
  648. package/dist/react-native/generated/service/operationsInterfaces/knowledgeSources.d.ts +41 -0
  649. package/dist/react-native/generated/service/operationsInterfaces/knowledgeSources.d.ts.map +1 -0
  650. package/dist/react-native/generated/service/operationsInterfaces/knowledgeSources.js +9 -0
  651. package/dist/react-native/generated/service/operationsInterfaces/knowledgeSources.js.map +1 -0
  652. package/dist/react-native/generated/service/operationsInterfaces/skillsets.d.ts +8 -1
  653. package/dist/react-native/generated/service/operationsInterfaces/skillsets.d.ts.map +1 -1
  654. package/dist/react-native/generated/service/operationsInterfaces/skillsets.js.map +1 -1
  655. package/dist/react-native/generated/service/searchServiceClient.d.ts +12 -4
  656. package/dist/react-native/generated/service/searchServiceClient.d.ts.map +1 -1
  657. package/dist/react-native/generated/service/searchServiceClient.js +31 -2
  658. package/dist/react-native/generated/service/searchServiceClient.js.map +1 -1
  659. package/dist/react-native/index.d.ts +7 -4
  660. package/dist/react-native/index.d.ts.map +1 -1
  661. package/dist/react-native/index.js +5 -3
  662. package/dist/react-native/index.js.map +1 -1
  663. package/dist/react-native/indexDocumentsBatch.d.ts +6 -5
  664. package/dist/react-native/indexDocumentsBatch.d.ts.map +1 -1
  665. package/dist/react-native/indexDocumentsBatch.js +5 -4
  666. package/dist/react-native/indexDocumentsBatch.js.map +1 -1
  667. package/dist/react-native/indexModels.d.ts +236 -30
  668. package/dist/react-native/indexModels.d.ts.map +1 -1
  669. package/dist/react-native/indexModels.js.map +1 -1
  670. package/dist/react-native/knowledgeBaseModels.d.ts +44 -0
  671. package/dist/react-native/knowledgeBaseModels.d.ts.map +1 -0
  672. package/dist/react-native/knowledgeBaseModels.js +4 -0
  673. package/dist/react-native/knowledgeBaseModels.js.map +1 -0
  674. package/dist/react-native/knowledgeRetrievalClient.d.ts +68 -0
  675. package/dist/react-native/knowledgeRetrievalClient.d.ts.map +1 -0
  676. package/dist/react-native/knowledgeRetrievalClient.js +108 -0
  677. package/dist/react-native/knowledgeRetrievalClient.js.map +1 -0
  678. package/dist/react-native/odata.d.ts +5 -2
  679. package/dist/react-native/odata.d.ts.map +1 -1
  680. package/dist/react-native/odata.js +5 -2
  681. package/dist/react-native/odata.js.map +1 -1
  682. package/dist/react-native/odataMetadataPolicy.d.ts +1 -1
  683. package/dist/react-native/odataMetadataPolicy.d.ts.map +1 -1
  684. package/dist/react-native/odataMetadataPolicy.js.map +1 -1
  685. package/dist/react-native/searchApiKeyCredentialPolicy.d.ts +4 -4
  686. package/dist/react-native/searchApiKeyCredentialPolicy.d.ts.map +1 -1
  687. package/dist/react-native/searchApiKeyCredentialPolicy.js +2 -2
  688. package/dist/react-native/searchApiKeyCredentialPolicy.js.map +1 -1
  689. package/dist/react-native/searchClient.d.ts +64 -53
  690. package/dist/react-native/searchClient.d.ts.map +1 -1
  691. package/dist/react-native/searchClient.js +113 -53
  692. package/dist/react-native/searchClient.js.map +1 -1
  693. package/dist/react-native/searchIndexClient.d.ts +157 -18
  694. package/dist/react-native/searchIndexClient.d.ts.map +1 -1
  695. package/dist/react-native/searchIndexClient.js +482 -17
  696. package/dist/react-native/searchIndexClient.js.map +1 -1
  697. package/dist/react-native/searchIndexerClient.d.ts +26 -11
  698. package/dist/react-native/searchIndexerClient.d.ts.map +1 -1
  699. package/dist/react-native/searchIndexerClient.js +62 -7
  700. package/dist/react-native/searchIndexerClient.js.map +1 -1
  701. package/dist/react-native/searchIndexingBufferedSender.d.ts +3 -3
  702. package/dist/react-native/searchIndexingBufferedSender.d.ts.map +1 -1
  703. package/dist/react-native/searchIndexingBufferedSender.js +1 -1
  704. package/dist/react-native/searchIndexingBufferedSender.js.map +1 -1
  705. package/dist/react-native/serviceModels.d.ts +1228 -220
  706. package/dist/react-native/serviceModels.d.ts.map +1 -1
  707. package/dist/react-native/serviceModels.js +8 -7
  708. package/dist/react-native/serviceModels.js.map +1 -1
  709. package/dist/react-native/serviceUtils.d.ts +14 -6
  710. package/dist/react-native/serviceUtils.d.ts.map +1 -1
  711. package/dist/react-native/serviceUtils.js +301 -45
  712. package/dist/react-native/serviceUtils.js.map +1 -1
  713. package/dist/react-native/synonymMapHelper.d.ts +1 -1
  714. package/dist/react-native/synonymMapHelper.d.ts.map +1 -1
  715. package/dist/react-native/synonymMapHelper.js +2 -2
  716. package/dist/react-native/synonymMapHelper.js.map +1 -1
  717. package/package.json +13 -9
@@ -1 +1 @@
1
- {"version":3,"file":"index.js","sourceRoot":"","sources":["../../../../../src/generated/service/models/index.ts"],"names":[],"mappings":"AAAA;;;;;;GAMG;AAypEH,2EAA2E;AAC3E,MAAM,CAAN,IAAY,uBAGX;AAHD,WAAY,uBAAuB;IACjC,+BAA+B;IAC/B,mEAAwC,CAAA;AAC1C,CAAC,EAHW,uBAAuB,KAAvB,uBAAuB,QAGlC;AAWD,oFAAoF;AACpF,MAAM,CAAN,IAAY,gCAeX;AAfD,WAAY,gCAAgC;IAC1C,4QAA4Q;IAC5Q,yDAAqB,CAAA;IACrB,+YAA+Y;IAC/Y,yDAAqB,CAAA;IACrB,uSAAuS;IACvS,2DAAuB,CAAA;IACvB,gRAAgR;IAChR,6DAAyB,CAAA;IACzB,8RAA8R;IAC9R,mDAAe,CAAA;IACf,6SAA6S;IAC7S,yDAAqB,CAAA;IACrB,+QAA+Q;IAC/Q,uDAAmB,CAAA;AACrB,CAAC,EAfW,gCAAgC,KAAhC,gCAAgC,QAe3C;AAiBD,+EAA+E;AAC/E,MAAM,CAAN,IAAY,2BAaX;AAbD,WAAY,2BAA2B;IACrC,iDAAiD;IACjD,kDAAmB,CAAA;IACnB,uFAAuF;IACvF,4CAAa,CAAA;IACb,2DAA2D;IAC3D,8DAA+B,CAAA;IAC/B,iEAAiE;IACjE,4CAAa,CAAA;IACb,6FAA6F;IAC7F,sDAAuB,CAAA;IACvB,4GAA4G;IAC5G,sDAAuB,CAAA;AACzB,CAAC,EAbW,2BAA2B,KAA3B,2BAA2B,QAatC;AAgBD,iFAAiF;AACjF,MAAM,CAAN,IAAY,6BAOX;AAPD,WAAY,6BAA6B;IACvC,6EAA6E;IAC7E,oEAAmC,CAAA;IACnC,2KAA2K;IAC3K,4DAA2B,CAAA;IAC3B,gEAAgE;IAChE,0EAAyC,CAAA;AAC3C,CAAC,EAPW,6BAA6B,KAA7B,6BAA6B,QAOxC;AAaD,+EAA+E;AAC/E,MAAM,CAAN,IAAY,2BAOX;AAPD,WAAY,2BAA2B;IACrC,oFAAoF;IACpF,4CAAa,CAAA;IACb,4cAA4c;IAC5c,oFAAqD,CAAA;IACrD,yWAAyW;IACzW,gGAAiE,CAAA;AACnE,CAAC,EAPW,2BAA2B,KAA3B,2BAA2B,QAOtC;AAaD,4FAA4F;AAC5F,MAAM,CAAN,IAAY,wCAKX;AALD,WAAY,wCAAwC;IAClD,8DAA8D;IAC9D,yDAAa,CAAA;IACb,qXAAqX;IACrX,yEAA6B,CAAA;AAC/B,CAAC,EALW,wCAAwC,KAAxC,wCAAwC,QAKnD;AAYD,oFAAoF;AACpF,MAAM,CAAN,IAAY,gCAKX;AALD,WAAY,gCAAgC;IAC1C,gLAAgL;IAChL,yDAAqB,CAAA;IACrB,mQAAmQ;IACnQ,uDAAmB,CAAA;AACrB,CAAC,EALW,gCAAgC,KAAhC,gCAAgC,QAK3C;AAYD,4EAA4E;AAC5E,MAAM,CAAN,IAAY,wBAKX;AALD,WAAY,wBAAwB;IAClC,wFAAwF;IACxF,uFAA2D,CAAA;IAC3D,wGAAwG;IACxG,6FAAiE,CAAA;AACnE,CAAC,EALW,wBAAwB,KAAxB,wBAAwB,QAKnC;AAYD,4EAA4E;AAC5E,MAAM,CAAN,IAAY,wBA2BX;AA3BD,WAAY,wBAAwB;IAClC,gDAAgD;IAChD,iDAAqB,CAAA;IACrB,+DAA+D;IAC/D,+CAAmB,CAAA;IACnB,+DAA+D;IAC/D,+CAAmB,CAAA;IACnB,sFAAsF;IACtF,iDAAqB,CAAA;IACrB,uEAAuE;IACvE,mDAAuB,CAAA;IACvB,0FAA0F;IAC1F,iEAAqC,CAAA;IACrC,yFAAyF;IACzF,iEAAqC,CAAA;IACrC,+GAA+G;IAC/G,uDAA2B,CAAA;IAC3B,0IAA0I;IAC1I,iDAAqB,CAAA;IACrB,sIAAsI;IACtI,6CAAiB,CAAA;IACjB,wHAAwH;IACxH,+CAAmB,CAAA;IACnB,uHAAuH;IACvH,+CAAmB,CAAA;IACnB,wHAAwH;IACxH,6CAAiB,CAAA;AACnB,CAAC,EA3BW,wBAAwB,KAAxB,wBAAwB,QA2BnC;AAuBD,4EAA4E;AAC5E,MAAM,CAAN,IAAY,wBA2LX;AA3LD,WAAY,wBAAwB;IAClC,qCAAqC;IACrC,wDAA4B,CAAA;IAC5B,kCAAkC;IAClC,kDAAsB,CAAA;IACtB,oCAAoC;IACpC,kDAAsB,CAAA;IACtB,qCAAqC;IACrC,wDAA4B,CAAA;IAC5B,kCAAkC;IAClC,kDAAsB,CAAA;IACtB,wCAAwC;IACxC,wDAA4B,CAAA;IAC5B,qCAAqC;IACrC,kDAAsB,CAAA;IACtB,sCAAsC;IACtC,wDAA4B,CAAA;IAC5B,mCAAmC;IACnC,kDAAsB,CAAA;IACtB,mDAAmD;IACnD,iEAAqC,CAAA;IACrC,gDAAgD;IAChD,2DAA+B,CAAA;IAC/B,oDAAoD;IACpD,iEAAqC,CAAA;IACrC,iDAAiD;IACjD,2DAA+B,CAAA;IAC/B,uCAAuC;IACvC,wDAA4B,CAAA;IAC5B,oCAAoC;IACpC,wDAA4B,CAAA;IAC5B,iCAAiC;IACjC,kDAAsB,CAAA;IACtB,qCAAqC;IACrC,wDAA4B,CAAA;IAC5B,kCAAkC;IAClC,kDAAsB,CAAA;IACtB,oCAAoC;IACpC,wDAA4B,CAAA;IAC5B,iCAAiC;IACjC,kDAAsB,CAAA;IACtB,sCAAsC;IACtC,wDAA4B,CAAA;IAC5B,mCAAmC;IACnC,kDAAsB,CAAA;IACtB,uCAAuC;IACvC,wDAA4B,CAAA;IAC5B,sCAAsC;IACtC,wDAA4B,CAAA;IAC5B,mCAAmC;IACnC,kDAAsB,CAAA;IACtB,qCAAqC;IACrC,wDAA4B,CAAA;IAC5B,kCAAkC;IAClC,kDAAsB,CAAA;IACtB,oCAAoC;IACpC,kDAAsB,CAAA;IACtB,qCAAqC;IACrC,wDAA4B,CAAA;IAC5B,kCAAkC;IAClC,kDAAsB,CAAA;IACtB,oCAAoC;IACpC,wDAA4B,CAAA;IAC5B,iCAAiC;IACjC,kDAAsB,CAAA;IACtB,uCAAuC;IACvC,wDAA4B,CAAA;IAC5B,qCAAqC;IACrC,wDAA4B,CAAA;IAC5B,oCAAoC;IACpC,wDAA4B,CAAA;IAC5B,iCAAiC;IACjC,kDAAsB,CAAA;IACtB,wCAAwC;IACxC,wDAA4B,CAAA;IAC5B,qCAAqC;IACrC,kDAAsB,CAAA;IACtB,wCAAwC;IACxC,wDAA4B,CAAA;IAC5B,kDAAkD;IAClD,wDAA4B,CAAA;IAC5B,sCAAsC;IACtC,kDAAsB,CAAA;IACtB,iCAAiC;IACjC,kDAAsB,CAAA;IACtB,sCAAsC;IACtC,wDAA4B,CAAA;IAC5B,mCAAmC;IACnC,kDAAsB,CAAA;IACtB,uCAAuC;IACvC,wDAA4B,CAAA;IAC5B,oCAAoC;IACpC,kDAAsB,CAAA;IACtB,sCAAsC;IACtC,wDAA4B,CAAA;IAC5B,qCAAqC;IACrC,wDAA4B,CAAA;IAC5B,kCAAkC;IAClC,kDAAsB,CAAA;IACtB,sCAAsC;IACtC,wDAA4B,CAAA;IAC5B,mCAAmC;IACnC,kDAAsB,CAAA;IACtB,yCAAyC;IACzC,wDAA4B,CAAA;IAC5B,wCAAwC;IACxC,wDAA4B,CAAA;IAC5B,4CAA4C;IAC5C,wDAA4B,CAAA;IAC5B,sCAAsC;IACtC,wDAA4B,CAAA;IAC5B,iDAAiD;IACjD,wDAA4B,CAAA;IAC5B,qCAAqC;IACrC,kDAAsB,CAAA;IACtB,mCAAmC;IACnC,kDAAsB,CAAA;IACtB,qCAAqC;IACrC,wDAA4B,CAAA;IAC5B,kCAAkC;IAClC,kDAAsB,CAAA;IACtB,kDAAkD;IAClD,6DAAiC,CAAA;IACjC,+CAA+C;IAC/C,uDAA2B,CAAA;IAC3B,oDAAoD;IACpD,6DAAiC,CAAA;IACjC,iDAAiD;IACjD,uDAA2B,CAAA;IAC3B,sCAAsC;IACtC,wDAA4B,CAAA;IAC5B,uCAAuC;IACvC,wDAA4B,CAAA;IAC5B,oCAAoC;IACpC,kDAAsB,CAAA;IACtB,sCAAsC;IACtC,wDAA4B,CAAA;IAC5B,mCAAmC;IACnC,kDAAsB,CAAA;IACtB,iDAAiD;IACjD,yEAA6C,CAAA;IAC7C,8CAA8C;IAC9C,mEAAuC,CAAA;IACvC,qCAAqC;IACrC,wDAA4B,CAAA;IAC5B,wCAAwC;IACxC,wDAA4B,CAAA;IAC5B,sCAAsC;IACtC,wDAA4B,CAAA;IAC5B,mCAAmC;IACnC,kDAAsB,CAAA;IACtB,sCAAsC;IACtC,wDAA4B,CAAA;IAC5B,mCAAmC;IACnC,kDAAsB,CAAA;IACtB,oCAAoC;IACpC,wDAA4B,CAAA;IAC5B,qCAAqC;IACrC,wDAA4B,CAAA;IAC5B,mCAAmC;IACnC,wDAA4B,CAAA;IAC5B,gCAAgC;IAChC,kDAAsB,CAAA;IACtB,sCAAsC;IACtC,wDAA4B,CAAA;IAC5B,mCAAmC;IACnC,kDAAsB,CAAA;IACtB,wCAAwC;IACxC,wDAA4B,CAAA;IAC5B,mCAAmC;IACnC,wDAA4B,CAAA;IAC5B,yCAAyC;IACzC,wDAA4B,CAAA;IAC5B,gCAAgC;IAChC,8DAAkC,CAAA;IAClC,qJAAqJ;IACrJ,sFAA0D,CAAA;IAC1D,4PAA4P;IAC5P,+CAAmB,CAAA;IACnB,6MAA6M;IAC7M,+CAAmB,CAAA;IACnB,2LAA2L;IAC3L,6CAAiB,CAAA;IACjB,2MAA2M;IAC3M,yCAAa,CAAA;IACb,kLAAkL;IAClL,qDAAyB,CAAA;AAC3B,CAAC,EA3LW,wBAAwB,KAAxB,wBAAwB,QA2LnC;AAuGD,8EAA8E;AAC9E,MAAM,CAAN,IAAY,0BAWX;AAXD,WAAY,0BAA0B;IACpC,oVAAoV;IACpV,2DAA6B,CAAA;IAC7B,wNAAwN;IACxN,iDAAmB,CAAA;IACnB,mKAAmK;IACnK,qDAAuB,CAAA;IACvB,yMAAyM;IACzM,mDAAqB,CAAA;IACrB,mKAAmK;IACnK,qDAAuB,CAAA;AACzB,CAAC,EAXW,0BAA0B,KAA1B,0BAA0B,QAWrC;AAeD,6EAA6E;AAC7E,MAAM,CAAN,IAAY,yBAGX;AAHD,WAAY,yBAAyB;IACnC,uEAAuE;IACvE,oDAAuB,CAAA;AACzB,CAAC,EAHW,yBAAyB,KAAzB,yBAAyB,QAGpC;AAWD,qEAAqE;AACrE,MAAM,CAAN,IAAY,iBAKX;AALD,WAAY,iBAAiB;IAC3B,8CAA8C;IAC9C,kEAA6C,CAAA;IAC7C,uCAAuC;IACvC,oDAA+B,CAAA;AACjC,CAAC,EALW,iBAAiB,KAAjB,iBAAiB,QAK5B;AAYD,kFAAkF;AAClF,MAAM,CAAN,IAAY,8BAKX;AALD,WAAY,8BAA8B;IACxC,oGAAoG;IACpG,+CAAa,CAAA;IACb,sEAAsE;IACtE,iEAA+B,CAAA;AACjC,CAAC,EALW,8BAA8B,KAA9B,8BAA8B,QAKzC;AAYD,mFAAmF;AACnF,MAAM,CAAN,IAAY,+BAKX;AALD,WAAY,+BAA+B;IACzC,wEAAwE;IACxE,8DAA2B,CAAA;IAC3B,qEAAqE;IACrE,gEAA6B,CAAA;AAC/B,CAAC,EALW,+BAA+B,KAA/B,+BAA+B,QAK1C;AAYD,oFAAoF;AACpF,MAAM,CAAN,IAAY,gCAKX;AALD,WAAY,gCAAgC;IAC1C,4RAA4R;IAC5R,6EAAyC,CAAA;IACzC,iRAAiR;IACjR,6EAAyC,CAAA;AAC3C,CAAC,EALW,gCAAgC,KAAhC,gCAAgC,QAK3C;AAYD,oGAAoG;AACpG,MAAM,CAAN,IAAY,gDAKX;AALD,WAAY,gDAAgD;IAC1D,oOAAoO;IACpO,2FAAuC,CAAA;IACvC,+OAA+O;IAC/O,yFAAqC,CAAA;AACvC,CAAC,EALW,gDAAgD,KAAhD,gDAAgD,QAK3D;AAYD,wEAAwE;AACxE,MAAM,CAAN,IAAY,oBAqEX;AArED,WAAY,oBAAoB;IAC9B,uNAAuN;IACvN,oEAA4C,CAAA;IAC5C,2MAA2M;IAC3M,iDAAyB,CAAA;IACzB,oVAAoV;IACpV,qDAA6B,CAAA;IAC7B,yMAAyM;IACzM,gDAAwB,CAAA;IACxB,0RAA0R;IAC1R,8CAAsB,CAAA;IACtB,sLAAsL;IACtL,2CAAmB,CAAA;IACnB,oQAAoQ;IACpQ,mDAA2B,CAAA;IAC3B,mOAAmO;IACnO,kDAA0B,CAAA;IAC1B,wNAAwN;IACxN,2CAAmB,CAAA;IACnB,mOAAmO;IACnO,oEAA4C,CAAA;IAC5C,kNAAkN;IAClN,kEAA0C,CAAA;IAC1C,yMAAyM;IACzM,kEAA0C,CAAA;IAC1C,sNAAsN;IACtN,wDAAgC,CAAA;IAChC,sKAAsK;IACtK,uCAAe,CAAA;IACf,mLAAmL;IACnL,yCAAiB,CAAA;IACjB,0LAA0L;IAC1L,uCAAe,CAAA;IACf,oKAAoK;IACpK,+CAAuB,CAAA;IACvB,yKAAyK;IACzK,0CAAkB,CAAA;IAClB,2KAA2K;IAC3K,sEAA8C,CAAA;IAC9C,uKAAuK;IACvK,6CAAqB,CAAA;IACrB,yHAAyH;IACzH,kDAA0B,CAAA;IAC1B,iKAAiK;IACjK,2CAAmB,CAAA;IACnB,uNAAuN;IACvN,gFAAwD,CAAA;IACxD,mTAAmT;IACnT,iFAAyD,CAAA;IACzD,kLAAkL;IAClL,2CAAmB,CAAA;IACnB,gMAAgM;IAChM,6CAAqB,CAAA;IACrB,8LAA8L;IAC9L,oEAA4C,CAAA;IAC5C,mJAAmJ;IACnJ,2CAAmB,CAAA;IACnB,kKAAkK;IAClK,+CAAuB,CAAA;IACvB,sLAAsL;IACtL,qCAAa,CAAA;IACb,sLAAsL;IACtL,6CAAqB,CAAA;IACrB,6MAA6M;IAC7M,yCAAiB,CAAA;IACjB,oKAAoK;IACpK,+CAAuB,CAAA;IACvB,0FAA0F;IAC1F,wDAAgC,CAAA;AAClC,CAAC,EArEW,oBAAoB,KAApB,oBAAoB,QAqE/B;AA4CD,uEAAuE;AACvE,MAAM,CAAN,IAAY,mBAGX;AAHD,WAAY,mBAAmB;IAC7B,yMAAyM;IACzM,+CAAwB,CAAA;AAC1B,CAAC,EAHW,mBAAmB,KAAnB,mBAAmB,QAG9B;AAWD,oFAAoF;AACpF,MAAM,CAAN,IAAY,gCASX;AATD,WAAY,gCAAgC;IAC1C,iJAAiJ;IACjJ,qDAAiB,CAAA;IACjB,6IAA6I;IAC7I,2DAAuB,CAAA;IACvB,wJAAwJ;IACxJ,6DAAyB,CAAA;IACzB,qLAAqL;IACrL,uDAAmB,CAAA;AACrB,CAAC,EATW,gCAAgC,KAAhC,gCAAgC,QAS3C;AAcD,sFAAsF;AACtF,MAAM,CAAN,IAAY,kCAGX;AAHD,WAAY,kCAAkC;IAC5C,WAAW;IACX,mDAAa,CAAA;AACf,CAAC,EAHW,kCAAkC,KAAlC,kCAAkC,QAG7C;AAWD,6EAA6E;AAC7E,MAAM,CAAN,IAAY,yBAOX;AAPD,WAAY,yBAAyB;IACnC,0BAA0B;IAC1B,2EAA8C,CAAA;IAC9C,0BAA0B;IAC1B,2EAA8C,CAAA;IAC9C,0BAA0B;IAC1B,2EAA8C,CAAA;AAChD,CAAC,EAPW,yBAAyB,KAAzB,yBAAyB,QAOpC;AAaD,yFAAyF;AACzF,MAAM,CAAN,IAAY,qCAiCX;AAjCD,WAAY,qCAAqC;IAC/C,aAAa;IACb,kDAAS,CAAA;IACT,YAAY;IACZ,kDAAS,CAAA;IACT,cAAc;IACd,kDAAS,CAAA;IACT,cAAc;IACd,kDAAS,CAAA;IACT,aAAa;IACb,kDAAS,CAAA;IACT,aAAa;IACb,kDAAS,CAAA;IACT,cAAc;IACd,kDAAS,CAAA;IACT,eAAe;IACf,kDAAS,CAAA;IACT,aAAa;IACb,kDAAS,CAAA;IACT,0BAA0B;IAC1B,kDAAS,CAAA;IACT,aAAa;IACb,kDAAS,CAAA;IACT,4BAA4B;IAC5B,uDAAc,CAAA;IACd,0BAA0B;IAC1B,uDAAc,CAAA;IACd,cAAc;IACd,kDAAS,CAAA;IACT,cAAc;IACd,kDAAS,CAAA;IACT,cAAc;IACd,kDAAS,CAAA;AACX,CAAC,EAjCW,qCAAqC,KAArC,qCAAqC,QAiChD;AA0BD,yEAAyE;AACzE,MAAM,CAAN,IAAY,qBAqVX;AArVD,WAAY,qBAAqB;IAC/B,gBAAgB;IAChB,kCAAS,CAAA;IACT,eAAe;IACf,kCAAS,CAAA;IACT,0BAA0B;IAC1B,oCAAW,CAAA;IACX,aAAa;IACb,kCAAS,CAAA;IACT,eAAe;IACf,oCAAW,CAAA;IACX,gCAAgC;IAChC,oCAAW,CAAA;IACX,0BAA0B;IAC1B,kCAAS,CAAA;IACT,cAAc;IACd,oCAAW,CAAA;IACX,aAAa;IACb,kCAAS,CAAA;IACT,sCAAsC;IACtC,kCAAS,CAAA;IACT,4BAA4B;IAC5B,2CAAkB,CAAA;IAClB,yBAAyB;IACzB,2CAAkB,CAAA;IAClB,kCAAkC;IAClC,oCAAW,CAAA;IACX,cAAc;IACd,kCAAS,CAAA;IACT,wBAAwB;IACxB,oCAAW,CAAA;IACX,oBAAoB;IACpB,kCAAS,CAAA;IACT,cAAc;IACd,oCAAW,CAAA;IACX,aAAa;IACb,kCAAS,CAAA;IACT,gBAAgB;IAChB,kCAAS,CAAA;IACT,cAAc;IACd,oCAAW,CAAA;IACX,wBAAwB;IACxB,oCAAW,CAAA;IACX,cAAc;IACd,kCAAS,CAAA;IACT,cAAc;IACd,oCAAW,CAAA;IACX,eAAe;IACf,oCAAW,CAAA;IACX,eAAe;IACf,kCAAS,CAAA;IACT,iCAAiC;IACjC,oCAAW,CAAA;IACX,yBAAyB;IACzB,2CAAkB,CAAA;IAClB,0BAA0B;IAC1B,2CAAkB,CAAA;IAClB,cAAc;IACd,kCAAS,CAAA;IACT,eAAe;IACf,kCAAS,CAAA;IACT,4BAA4B;IAC5B,oCAAW,CAAA;IACX,eAAe;IACf,kCAAS,CAAA;IACT,YAAY;IACZ,kCAAS,CAAA;IACT,aAAa;IACb,kCAAS,CAAA;IACT,WAAW;IACX,oCAAW,CAAA;IACX,0BAA0B;IAC1B,oCAAW,CAAA;IACX,yBAAyB;IACzB,oCAAW,CAAA;IACX,YAAY;IACZ,kCAAS,CAAA;IACT,cAAc;IACd,kCAAS,CAAA;IACT,uBAAuB;IACvB,oCAAW,CAAA;IACX,eAAe;IACf,kCAAS,CAAA;IACT,cAAc;IACd,kCAAS,CAAA;IACT,aAAa;IACb,kCAAS,CAAA;IACT,eAAe;IACf,oCAAW,CAAA;IACX,cAAc;IACd,kCAAS,CAAA;IACT,aAAa;IACb,kCAAS,CAAA;IACT,cAAc;IACd,oCAAW,CAAA;IACX,qBAAqB;IACrB,oCAAW,CAAA;IACX,eAAe;IACf,kCAAS,CAAA;IACT,aAAa;IACb,kCAAS,CAAA;IACT,iBAAiB;IACjB,oCAAW,CAAA;IACX,yBAAyB;IACzB,oCAAW,CAAA;IACX,YAAY;IACZ,kCAAS,CAAA;IACT,kBAAkB;IAClB,kCAAS,CAAA;IACT,0BAA0B;IAC1B,oCAAW,CAAA;IACX,qBAAqB;IACrB,kCAAS,CAAA;IACT,yBAAyB;IACzB,oCAAW,CAAA;IACX,WAAW;IACX,oCAAW,CAAA;IACX,eAAe;IACf,oCAAW,CAAA;IACX,eAAe;IACf,oCAAW,CAAA;IACX,YAAY;IACZ,kCAAS,CAAA;IACT,wBAAwB;IACxB,oCAAW,CAAA;IACX,sBAAsB;IACtB,oCAAW,CAAA;IACX,gBAAgB;IAChB,kCAAS,CAAA;IACT,gBAAgB;IAChB,kCAAS,CAAA;IACT,iBAAiB;IACjB,oCAAW,CAAA;IACX,iBAAiB;IACjB,kCAAS,CAAA;IACT,kBAAkB;IAClB,kCAAS,CAAA;IACT,wBAAwB;IACxB,kCAAS,CAAA;IACT,YAAY;IACZ,kCAAS,CAAA;IACT,cAAc;IACd,kCAAS,CAAA;IACT,eAAe;IACf,kCAAS,CAAA;IACT,4BAA4B;IAC5B,oCAAW,CAAA;IACX,eAAe;IACf,kCAAS,CAAA;IACT,mBAAmB;IACnB,oCAAW,CAAA;IACX,qBAAqB;IACrB,oCAAW,CAAA;IACX,0BAA0B;IAC1B,oCAAW,CAAA;IACX,sBAAsB;IACtB,oCAAW,CAAA;IACX,6BAA6B;IAC7B,6CAAoB,CAAA;IACpB,0BAA0B;IAC1B,oCAAW,CAAA;IACX,gBAAgB;IAChB,oCAAW,CAAA;IACX,wBAAwB;IACxB,2CAAkB,CAAA;IAClB,qBAAqB;IACrB,2CAAkB,CAAA;IAClB,cAAc;IACd,oCAAW,CAAA;IACX,YAAY;IACZ,oCAAW,CAAA;IACX,cAAc;IACd,oCAAW,CAAA;IACX,aAAa;IACb,kCAAS,CAAA;IACT,YAAY;IACZ,oCAAW,CAAA;IACX,aAAa;IACb,oCAAW,CAAA;IACX,eAAe;IACf,oCAAW,CAAA;IACX,uBAAuB;IACvB,oCAAW,CAAA;IACX,uBAAuB;IACvB,2CAAkB,CAAA;IAClB,sBAAsB;IACtB,2CAAkB,CAAA;IAClB,0BAA0B;IAC1B,oCAAW,CAAA;IACX,wBAAwB;IACxB,kCAAS,CAAA;IACT,aAAa;IACb,oCAAW,CAAA;IACX,YAAY;IACZ,kCAAS,CAAA;IACT,iBAAiB;IACjB,kCAAS,CAAA;IACT,oBAAoB;IACpB,oCAAW,CAAA;IACX,gBAAgB;IAChB,oCAAW,CAAA;IACX,oBAAoB;IACpB,kCAAS,CAAA;IACT,iCAAiC;IACjC,oCAAW,CAAA;IACX,oBAAoB;IACpB,kCAAS,CAAA;IACT,cAAc;IACd,kCAAS,CAAA;IACT,yBAAyB;IACzB,oCAAW,CAAA;IACX,WAAW;IACX,kCAAS,CAAA;IACT,YAAY;IACZ,kCAAS,CAAA;IACT,cAAc;IACd,kCAAS,CAAA;IACT,2BAA2B;IAC3B,kCAAS,CAAA;IACT,6BAA6B;IAC7B,6CAAoB,CAAA;IACpB,0BAA0B;IAC1B,6CAAoB,CAAA;IACpB,iBAAiB;IACjB,oCAAW,CAAA;IACX,aAAa;IACb,kCAAS,CAAA;IACT,aAAa;IACb,oCAAW,CAAA;IACX,YAAY;IACZ,oCAAW,CAAA;IACX,4BAA4B;IAC5B,oCAAW,CAAA;IACX,gBAAgB;IAChB,kCAAS,CAAA;IACT,gBAAgB;IAChB,kCAAS,CAAA;IACT,cAAc;IACd,kCAAS,CAAA;IACT,cAAc;IACd,kCAAS,CAAA;IACT,aAAa;IACb,kCAAS,CAAA;IACT,cAAc;IACd,kCAAS,CAAA;IACT,aAAa;IACb,kCAAS,CAAA;IACT,iBAAiB;IACjB,kCAAS,CAAA;IACT,uBAAuB;IACvB,kCAAS,CAAA;IACT,gBAAgB;IAChB,oCAAW,CAAA;IACX,eAAe;IACf,kCAAS,CAAA;IACT,cAAc;IACd,kCAAS,CAAA;IACT,cAAc;IACd,kCAAS,CAAA;IACT,yBAAyB;IACzB,oCAAW,CAAA;IACX,qBAAqB;IACrB,kCAAS,CAAA;IACT,4BAA4B;IAC5B,kCAAS,CAAA;IACT,2BAA2B;IAC3B,oCAAW,CAAA;IACX,YAAY;IACZ,oCAAW,CAAA;IACX,sBAAsB;IACtB,kCAAS,CAAA;IACT,sBAAsB;IACtB,kCAAS,CAAA;IACT,yBAAyB;IACzB,2CAAkB,CAAA;IAClB,sBAAsB;IACtB,2CAAkB,CAAA;IAClB,0BAA0B;IAC1B,oCAAW,CAAA;IACX,4BAA4B;IAC5B,oCAAW,CAAA;IACX,iBAAiB;IACjB,oCAAW,CAAA;IACX,aAAa;IACb,kCAAS,CAAA;IACT,gBAAgB;IAChB,kCAAS,CAAA;IACT,sBAAsB;IACtB,kCAAS,CAAA;IACT,oBAAoB;IACpB,oCAAW,CAAA;IACX,cAAc;IACd,kCAAS,CAAA;IACT,sBAAsB;IACtB,kCAAS,CAAA;IACT,cAAc;IACd,kCAAS,CAAA;IACT,uBAAuB;IACvB,kCAAS,CAAA;IACT,oBAAoB;IACpB,kCAAS,CAAA;IACT,YAAY;IACZ,oCAAW,CAAA;IACX,cAAc;IACd,oCAAW,CAAA;IACX,aAAa;IACb,kCAAS,CAAA;IACT,cAAc;IACd,kCAAS,CAAA;IACT,sBAAsB;IACtB,kCAAS,CAAA;IACT,YAAY;IACZ,oCAAW,CAAA;IACX,oBAAoB;IACpB,oCAAW,CAAA;IACX,WAAW;IACX,kCAAS,CAAA;IACT,sBAAsB;IACtB,kCAAS,CAAA;IACT,qBAAqB;IACrB,2CAAkB,CAAA;IAClB,uBAAuB;IACvB,2CAAkB,CAAA;IAClB,oBAAoB;IACpB,kCAAS,CAAA;IACT,cAAc;IACd,kCAAS,CAAA;IACT,aAAa;IACb,oCAAW,CAAA;IACX,YAAY;IACZ,kCAAS,CAAA;IACT,sBAAsB;IACtB,kCAAS,CAAA;IACT,mBAAmB;IACnB,oCAAW,CAAA;IACX,aAAa;IACb,kCAAS,CAAA;IACT,WAAW;IACX,kCAAS,CAAA;IACT,oBAAoB;IACpB,oCAAW,CAAA;AACb,CAAC,EArVW,qBAAqB,KAArB,qBAAqB,QAqVhC;AAoLD,sEAAsE;AACtE,MAAM,CAAN,IAAY,kBASX;AATD,WAAY,kBAAkB;IAC5B,uDAAuD;IACvD,qCAAe,CAAA;IACf,iEAAiE;IACjE,uDAAiC,CAAA;IACjC,kEAAkE;IAClE,2CAAqB,CAAA;IACrB,mFAAmF;IACnF,uEAAiD,CAAA;AACnD,CAAC,EATW,kBAAkB,KAAlB,kBAAkB,QAS7B;AAcD,mFAAmF;AACnF,MAAM,CAAN,IAAY,+BAyGX;AAzGD,WAAY,+BAA+B;IACzC,aAAa;IACb,4CAAS,CAAA;IACT,kBAAkB;IAClB,4CAAS,CAAA;IACT,gBAAgB;IAChB,4CAAS,CAAA;IACT,oBAAoB;IACpB,4CAAS,CAAA;IACT,cAAc;IACd,4CAAS,CAAA;IACT,YAAY;IACZ,4CAAS,CAAA;IACT,YAAY;IACZ,4CAAS,CAAA;IACT,aAAa;IACb,4CAAS,CAAA;IACT,aAAa;IACb,4CAAS,CAAA;IACT,YAAY;IACZ,4CAAS,CAAA;IACT,cAAc;IACd,4CAAS,CAAA;IACT,cAAc;IACd,4CAAS,CAAA;IACT,eAAe;IACf,4CAAS,CAAA;IACT,aAAa;IACb,4CAAS,CAAA;IACT,cAAc;IACd,4CAAS,CAAA;IACT,aAAa;IACb,4CAAS,CAAA;IACT,YAAY;IACZ,4CAAS,CAAA;IACT,eAAe;IACf,4CAAS,CAAA;IACT,aAAa;IACb,4CAAS,CAAA;IACT,YAAY;IACZ,4CAAS,CAAA;IACT,eAAe;IACf,4CAAS,CAAA;IACT,gBAAgB;IAChB,4CAAS,CAAA;IACT,iBAAiB;IACjB,4CAAS,CAAA;IACT,cAAc;IACd,4CAAS,CAAA;IACT,eAAe;IACf,4CAAS,CAAA;IACT,aAAa;IACb,4CAAS,CAAA;IACT,aAAa;IACb,4CAAS,CAAA;IACT,iBAAiB;IACjB,4CAAS,CAAA;IACT,cAAc;IACd,4CAAS,CAAA;IACT,iBAAiB;IACjB,4CAAS,CAAA;IACT,qBAAqB;IACrB,4CAAS,CAAA;IACT,yBAAyB;IACzB,4CAAS,CAAA;IACT,YAAY;IACZ,4CAAS,CAAA;IACT,aAAa;IACb,4CAAS,CAAA;IACT,WAAW;IACX,8CAAW,CAAA;IACX,wBAAwB;IACxB,iDAAc,CAAA;IACd,0BAA0B;IAC1B,4CAAS,CAAA;IACT,0BAA0B;IAC1B,iDAAc,CAAA;IACd,eAAe;IACf,4CAAS,CAAA;IACT,cAAc;IACd,4CAAS,CAAA;IACT,aAAa;IACb,4CAAS,CAAA;IACT,gBAAgB;IAChB,4CAAS,CAAA;IACT,4BAA4B;IAC5B,qDAAkB,CAAA;IAClB,yBAAyB;IACzB,qDAAkB,CAAA;IAClB,cAAc;IACd,4CAAS,CAAA;IACT,WAAW;IACX,4CAAS,CAAA;IACT,cAAc;IACd,4CAAS,CAAA;IACT,gBAAgB;IAChB,4CAAS,CAAA;IACT,iBAAiB;IACjB,4CAAS,CAAA;IACT,yBAAyB;IACzB,4CAAS,CAAA;IACT,yBAAyB;IACzB,qDAAkB,CAAA;IAClB,0BAA0B;IAC1B,qDAAkB,CAAA;AACpB,CAAC,EAzGW,+BAA+B,KAA/B,+BAA+B,QAyG1C;AA8DD,sEAAsE;AACtE,MAAM,CAAN,IAAY,kBAeX;AAfD,WAAY,kBAAkB;IAC5B,mDAAmD;IACnD,qCAAe,CAAA;IACf,uDAAuD;IACvD,uCAAiB,CAAA;IACjB,kBAAkB;IAClB,+CAAyB,CAAA;IACzB,mBAAmB;IACnB,iDAA2B,CAAA;IAC3B,kDAAkD;IAClD,qCAAe,CAAA;IACf,6CAA6C;IAC7C,yCAAmB,CAAA;IACnB,YAAY;IACZ,mCAAa,CAAA;AACf,CAAC,EAfW,kBAAkB,KAAlB,kBAAkB,QAe7B;AAiBD,oEAAoE;AACpE,MAAM,CAAN,IAAY,gBAKX;AALD,WAAY,gBAAgB;IAC1B,yCAAyC;IACzC,+CAA2B,CAAA;IAC3B,uCAAuC;IACvC,2CAAuB,CAAA;AACzB,CAAC,EALW,gBAAgB,KAAhB,gBAAgB,QAK3B;AAYD,uEAAuE;AACvE,MAAM,CAAN,IAAY,mBAeX;AAfD,WAAY,mBAAmB;IAC7B,+CAA+C;IAC/C,4CAAqB,CAAA;IACrB,2CAA2C;IAC3C,oDAA6B,CAAA;IAC7B,oCAAoC;IACpC,wCAAiB,CAAA;IACjB,sCAAsC;IACtC,4CAAqB,CAAA;IACrB,2CAA2C;IAC3C,4CAAqB,CAAA;IACrB,iCAAiC;IACjC,kCAAW,CAAA;IACX,4CAA4C;IAC5C,sCAAe,CAAA;AACjB,CAAC,EAfW,mBAAmB,KAAnB,mBAAmB,QAe9B;AAiBD,uFAAuF;AACvF,MAAM,CAAN,IAAY,mCA+CX;AA/CD,WAAY,mCAAmC;IAC7C,aAAa;IACb,gDAAS,CAAA;IACT,YAAY;IACZ,gDAAS,CAAA;IACT,yBAAyB;IACzB,yDAAkB,CAAA;IAClB,0BAA0B;IAC1B,yDAAkB,CAAA;IAClB,aAAa;IACb,gDAAS,CAAA;IACT,YAAY;IACZ,gDAAS,CAAA;IACT,cAAc;IACd,gDAAS,CAAA;IACT,cAAc;IACd,gDAAS,CAAA;IACT,aAAa;IACb,gDAAS,CAAA;IACT,aAAa;IACb,gDAAS,CAAA;IACT,YAAY;IACZ,gDAAS,CAAA;IACT,gBAAgB;IAChB,gDAAS,CAAA;IACT,cAAc;IACd,gDAAS,CAAA;IACT,eAAe;IACf,gDAAS,CAAA;IACT,aAAa;IACb,gDAAS,CAAA;IACT,0BAA0B;IAC1B,gDAAS,CAAA;IACT,aAAa;IACb,gDAAS,CAAA;IACT,4BAA4B;IAC5B,qDAAc,CAAA;IACd,0BAA0B;IAC1B,qDAAc,CAAA;IACd,cAAc;IACd,gDAAS,CAAA;IACT,cAAc;IACd,gDAAS,CAAA;IACT,cAAc;IACd,gDAAS,CAAA;IACT,cAAc;IACd,gDAAS,CAAA;AACX,CAAC,EA/CW,mCAAmC,KAAnC,mCAAmC,QA+C9C;AAiCD,+EAA+E;AAC/E,MAAM,CAAN,IAAY,2BA+BX;AA/BD,WAAY,2BAA2B;IACrC,aAAa;IACb,wCAAS,CAAA;IACT,YAAY;IACZ,wCAAS,CAAA;IACT,cAAc;IACd,wCAAS,CAAA;IACT,cAAc;IACd,wCAAS,CAAA;IACT,aAAa;IACb,wCAAS,CAAA;IACT,aAAa;IACb,wCAAS,CAAA;IACT,YAAY;IACZ,wCAAS,CAAA;IACT,cAAc;IACd,wCAAS,CAAA;IACT,0BAA0B;IAC1B,wCAAS,CAAA;IACT,aAAa;IACb,wCAAS,CAAA;IACT,4BAA4B;IAC5B,6CAAc,CAAA;IACd,cAAc;IACd,wCAAS,CAAA;IACT,cAAc;IACd,wCAAS,CAAA;IACT,cAAc;IACd,wCAAS,CAAA;IACT,cAAc;IACd,wCAAS,CAAA;AACX,CAAC,EA/BW,2BAA2B,KAA3B,2BAA2B,QA+BtC;AAyBD,qFAAqF;AACrF,MAAM,CAAN,IAAY,iCAKX;AALD,WAAY,iCAAiC;IAC3C,wEAAwE;IACxE,kDAAa,CAAA;IACb,4QAA4Q;IAC5Q,wDAAmB,CAAA;AACrB,CAAC,EALW,iCAAiC,KAAjC,iCAAiC,QAK5C;AAYD,2EAA2E;AAC3E,MAAM,CAAN,IAAY,uBAmEX;AAnED,WAAY,uBAAuB;IACjC,cAAc;IACd,oCAAS,CAAA;IACT,cAAc;IACd,oCAAS,CAAA;IACT,YAAY;IACZ,oCAAS,CAAA;IACT,aAAa;IACb,oCAAS,CAAA;IACT,aAAa;IACb,oCAAS,CAAA;IACT,cAAc;IACd,oCAAS,CAAA;IACT,cAAc;IACd,oCAAS,CAAA;IACT,eAAe;IACf,oCAAS,CAAA;IACT,cAAc;IACd,oCAAS,CAAA;IACT,aAAa;IACb,oCAAS,CAAA;IACT,aAAa;IACb,oCAAS,CAAA;IACT,YAAY;IACZ,oCAAS,CAAA;IACT,eAAe;IACf,oCAAS,CAAA;IACT,gBAAgB;IAChB,oCAAS,CAAA;IACT,iBAAiB;IACjB,oCAAS,CAAA;IACT,gBAAgB;IAChB,oCAAS,CAAA;IACT,cAAc;IACd,oCAAS,CAAA;IACT,eAAe;IACf,oCAAS,CAAA;IACT,aAAa;IACb,oCAAS,CAAA;IACT,cAAc;IACd,oCAAS,CAAA;IACT,gBAAgB;IAChB,oCAAS,CAAA;IACT,YAAY;IACZ,oCAAS,CAAA;IACT,aAAa;IACb,oCAAS,CAAA;IACT,4BAA4B;IAC5B,oCAAS,CAAA;IACT,0BAA0B;IAC1B,yCAAc,CAAA;IACd,cAAc;IACd,oCAAS,CAAA;IACT,aAAa;IACb,oCAAS,CAAA;IACT,gBAAgB;IAChB,oCAAS,CAAA;IACT,cAAc;IACd,oCAAS,CAAA;IACT,cAAc;IACd,oCAAS,CAAA;IACT,cAAc;IACd,oCAAS,CAAA;IACT,WAAW;IACX,oCAAS,CAAA;IACT,2BAA2B;IAC3B,oCAAS,CAAA;AACX,CAAC,EAnEW,uBAAuB,KAAvB,uBAAuB,QAmElC;AA2CD,sEAAsE;AACtE,MAAM,CAAN,IAAY,kBAKX;AALD,WAAY,kBAAkB;IAC5B,4CAA4C;IAC5C,qCAAe,CAAA;IACf,gDAAgD;IAChD,6CAAuB,CAAA;AACzB,CAAC,EALW,kBAAkB,KAAlB,kBAAkB,QAK7B;AAYD,wFAAwF;AACxF,MAAM,CAAN,IAAY,oCAmBX;AAnBD,WAAY,oCAAoC;IAC9C,aAAa;IACb,iDAAS,CAAA;IACT,aAAa;IACb,iDAAS,CAAA;IACT,cAAc;IACd,iDAAS,CAAA;IACT,cAAc;IACd,iDAAS,CAAA;IACT,cAAc;IACd,iDAAS,CAAA;IACT,aAAa;IACb,iDAAS,CAAA;IACT,cAAc;IACd,iDAAS,CAAA;IACT,aAAa;IACb,iDAAS,CAAA;IACT,iBAAiB;IACjB,iDAAS,CAAA;AACX,CAAC,EAnBW,oCAAoC,KAApC,oCAAoC,QAmB/C;AAmBD,qFAAqF;AACrF,MAAM,CAAN,IAAY,iCAiJX;AAjJD,WAAY,iCAAiC;IAC3C,gBAAgB;IAChB,8CAAS,CAAA;IACT,aAAa;IACb,8CAAS,CAAA;IACT,aAAa;IACb,8CAAS,CAAA;IACT,sBAAsB;IACtB,8CAAS,CAAA;IACT,gBAAgB;IAChB,8CAAS,CAAA;IACT,8BAA8B;IAC9B,gDAAW,CAAA;IACX,cAAc;IACd,8CAAS,CAAA;IACT,yBAAyB;IACzB,uDAAkB,CAAA;IAClB,0BAA0B;IAC1B,uDAAkB,CAAA;IAClB,eAAe;IACf,8CAAS,CAAA;IACT,YAAY;IACZ,8CAAS,CAAA;IACT,aAAa;IACb,8CAAS,CAAA;IACT,YAAY;IACZ,8CAAS,CAAA;IACT,cAAc;IACd,8CAAS,CAAA;IACT,eAAe;IACf,8CAAS,CAAA;IACT,aAAa;IACb,8CAAS,CAAA;IACT,eAAe;IACf,gDAAW,CAAA;IACX,cAAc;IACd,8CAAS,CAAA;IACT,aAAa;IACb,8CAAS,CAAA;IACT,aAAa;IACb,8CAAS,CAAA;IACT,YAAY;IACZ,8CAAS,CAAA;IACT,qBAAqB;IACrB,8CAAS,CAAA;IACT,aAAa;IACb,8CAAS,CAAA;IACT,YAAY;IACZ,8CAAS,CAAA;IACT,gBAAgB;IAChB,gDAAW,CAAA;IACX,gBAAgB;IAChB,8CAAS,CAAA;IACT,gBAAgB;IAChB,8CAAS,CAAA;IACT,iBAAiB;IACjB,8CAAS,CAAA;IACT,cAAc;IACd,8CAAS,CAAA;IACT,eAAe;IACf,8CAAS,CAAA;IACT,gBAAgB;IAChB,8CAAS,CAAA;IACT,cAAc;IACd,gDAAW,CAAA;IACX,6BAA6B;IAC7B,yDAAoB,CAAA;IACpB,+BAA+B;IAC/B,yDAAoB,CAAA;IACpB,aAAa;IACb,8CAAS,CAAA;IACT,cAAc;IACd,8CAAS,CAAA;IACT,iBAAiB;IACjB,8CAAS,CAAA;IACT,eAAe;IACf,8CAAS,CAAA;IACT,YAAY;IACZ,8CAAS,CAAA;IACT,cAAc;IACd,8CAAS,CAAA;IACT,gBAAgB;IAChB,8CAAS,CAAA;IACT,cAAc;IACd,8CAAS,CAAA;IACT,aAAa;IACb,8CAAS,CAAA;IACT,iBAAiB;IACjB,8CAAS,CAAA;IACT,0BAA0B;IAC1B,mDAAc,CAAA;IACd,4BAA4B;IAC5B,mDAAc,CAAA;IACd,sBAAsB;IACtB,gDAAW,CAAA;IACX,eAAe;IACf,8CAAS,CAAA;IACT,cAAc;IACd,8CAAS,CAAA;IACT,aAAa;IACb,8CAAS,CAAA;IACT,yBAAyB;IACzB,uDAAkB,CAAA;IAClB,sBAAsB;IACtB,uDAAkB,CAAA;IAClB,aAAa;IACb,8CAAS,CAAA;IACT,gBAAgB;IAChB,8CAAS,CAAA;IACT,cAAc;IACd,8CAAS,CAAA;IACT,cAAc;IACd,8CAAS,CAAA;IACT,eAAe;IACf,8CAAS,CAAA;IACT,YAAY;IACZ,8CAAS,CAAA;IACT,aAAa;IACb,8CAAS,CAAA;IACT,WAAW;IACX,8CAAS,CAAA;IACT,aAAa;IACb,8CAAS,CAAA;IACT,cAAc;IACd,8CAAS,CAAA;IACT,gBAAgB;IAChB,8CAAS,CAAA;IACT,WAAW;IACX,8CAAS,CAAA;IACT,iBAAiB;IACjB,8CAAS,CAAA;IACT,YAAY;IACZ,8CAAS,CAAA;IACT,mBAAmB;IACnB,gDAAW,CAAA;IACX,YAAY;IACZ,8CAAS,CAAA;IACT,cAAc;IACd,8CAAS,CAAA;IACT,YAAY;IACZ,8CAAS,CAAA;IACT,gBAAgB;IAChB,8CAAS,CAAA;IACT,cAAc;IACd,8CAAS,CAAA;AACX,CAAC,EAjJW,iCAAiC,KAAjC,iCAAiC,QAiJ5C;AAkFD,oGAAoG;AACpG,MAAM,CAAN,IAAY,gDAKX;AALD,WAAY,gDAAgD;IAC1D,gDAAgD;IAChD,iEAAa,CAAA;IACb,oDAAoD;IACpD,yEAAqB,CAAA;AACvB,CAAC,EALW,gDAAgD,KAAhD,gDAAgD,QAK3D;AAYD,kGAAkG;AAClG,MAAM,CAAN,IAAY,8CAGX;AAHD,WAAY,8CAA8C;IACxD,+DAA+D;IAC/D,yEAAuB,CAAA;AACzB,CAAC,EAHW,8CAA8C,KAA9C,8CAA8C,QAGzD;AAWD,2GAA2G;AAC3G,MAAM,CAAN,IAAY,uDAaX;AAbD,WAAY,uDAAuD;IACjE,sBAAsB;IACtB,oEAAS,CAAA;IACT,sBAAsB;IACtB,oEAAS,CAAA;IACT,sBAAsB;IACtB,oEAAS,CAAA;IACT,sBAAsB;IACtB,oEAAS,CAAA;IACT,sBAAsB;IACtB,oEAAS,CAAA;IACT,sBAAsB;IACtB,oEAAS,CAAA;AACX,CAAC,EAbW,uDAAuD,KAAvD,uDAAuD,QAalE;AAgBD,yGAAyG;AACzG,MAAM,CAAN,IAAY,qDAKX;AALD,WAAY,qDAAqD;IAC/D,wEAAwE;IACxE,0EAAiB,CAAA;IACjB,4EAA4E;IAC5E,8FAAqC,CAAA;AACvC,CAAC,EALW,qDAAqD,KAArD,qDAAqD,QAKhE;AAYD,oGAAoG;AACpG,MAAM,CAAN,IAAY,gDAGX;AAHD,WAAY,gDAAgD;IAC1D,qCAAqC;IACrC,6EAAyB,CAAA;AAC3B,CAAC,EAHW,gDAAgD,KAAhD,gDAAgD,QAG3D;AAWD,6EAA6E;AAC7E,MAAM,CAAN,IAAY,yBA2BX;AA3BD,WAAY,yBAAyB;IACnC,8NAA8N;IAC9N,gDAAmB,CAAA;IACnB,wMAAwM;IACxM,oDAAuB,CAAA;IACvB,0KAA0K;IAC1K,mDAAsB,CAAA;IACtB,4JAA4J;IAC5J,8CAAiB,CAAA;IACjB,+LAA+L;IAC/L,oDAAuB,CAAA;IACvB,kDAAkD;IAClD,wFAA2D,CAAA;IAC3D,wFAAwF;IACxF,yGAA4E,CAAA;IAC5E,sLAAsL;IACtL,4CAAe,CAAA;IACf,2KAA2K;IAC3K,gEAAmC,CAAA;IACnC,4MAA4M;IAC5M,gDAAmB,CAAA;IACnB,qOAAqO;IACrO,qDAAwB,CAAA;IACxB,kLAAkL;IAClL,0DAA6B,CAAA;IAC7B,+JAA+J;IAC/J,sDAAyB,CAAA;AAC3B,CAAC,EA3BW,yBAAyB,KAAzB,yBAAyB,QA2BpC;AAuBD,mEAAmE;AACnE,MAAM,CAAN,IAAY,eAiBX;AAjBD,WAAY,eAAe;IACzB,qCAAqC;IACrC,uCAAoB,CAAA;IACpB,yCAAyC;IACzC,uDAAoC,CAAA;IACpC,sDAAsD;IACtD,wCAAqB,CAAA;IACrB,2BAA2B;IAC3B,oCAAiB,CAAA;IACjB,8CAA8C;IAC9C,sCAAmB,CAAA;IACnB,8BAA8B;IAC9B,0CAAuB,CAAA;IACvB,0CAA0C;IAC1C,+CAA4B,CAAA;IAC5B,+BAA+B;IAC/B,2CAAwB,CAAA;AAC1B,CAAC,EAjBW,eAAe,KAAf,eAAe,QAiB1B","sourcesContent":["/*\n * Copyright (c) Microsoft Corporation.\n * Licensed under the MIT License.\n *\n * Code generated by Microsoft (R) AutoRest Code Generator.\n * Changes may cause incorrect behavior and will be lost if the code is regenerated.\n */\n\nimport * as coreClient from \"@azure/core-client\";\nimport * as coreHttpCompat from \"@azure/core-http-compat\";\n\nexport type DataChangeDetectionPolicyUnion =\n | DataChangeDetectionPolicy\n | HighWaterMarkChangeDetectionPolicy\n | SqlIntegratedChangeTrackingPolicy;\nexport type DataDeletionDetectionPolicyUnion =\n | DataDeletionDetectionPolicy\n | SoftDeleteColumnDeletionDetectionPolicy;\nexport type SearchIndexerSkillUnion =\n | SearchIndexerSkill\n | ConditionalSkill\n | KeyPhraseExtractionSkill\n | OcrSkill\n | ImageAnalysisSkill\n | LanguageDetectionSkill\n | ShaperSkill\n | MergeSkill\n | EntityRecognitionSkill\n | SentimentSkill\n | SentimentSkillV3\n | EntityLinkingSkill\n | EntityRecognitionSkillV3\n | PIIDetectionSkill\n | SplitSkill\n | CustomEntityLookupSkill\n | TextTranslationSkill\n | DocumentExtractionSkill\n | DocumentIntelligenceLayoutSkill\n | WebApiSkill\n | AzureOpenAIEmbeddingSkill;\nexport type CognitiveServicesAccountUnion =\n | CognitiveServicesAccount\n | DefaultCognitiveServicesAccount\n | CognitiveServicesAccountKey;\nexport type ScoringFunctionUnion =\n | ScoringFunction\n | DistanceScoringFunction\n | FreshnessScoringFunction\n | MagnitudeScoringFunction\n | TagScoringFunction;\nexport type LexicalAnalyzerUnion =\n | LexicalAnalyzer\n | CustomAnalyzer\n | PatternAnalyzer\n | LuceneStandardAnalyzer\n | StopAnalyzer;\nexport type LexicalTokenizerUnion =\n | LexicalTokenizer\n | ClassicTokenizer\n | EdgeNGramTokenizer\n | KeywordTokenizer\n | KeywordTokenizerV2\n | MicrosoftLanguageTokenizer\n | MicrosoftLanguageStemmingTokenizer\n | NGramTokenizer\n | PathHierarchyTokenizerV2\n | PatternTokenizer\n | LuceneStandardTokenizer\n | LuceneStandardTokenizerV2\n | UaxUrlEmailTokenizer;\nexport type TokenFilterUnion =\n | TokenFilter\n | AsciiFoldingTokenFilter\n | CjkBigramTokenFilter\n | CommonGramTokenFilter\n | DictionaryDecompounderTokenFilter\n | EdgeNGramTokenFilter\n | EdgeNGramTokenFilterV2\n | ElisionTokenFilter\n | KeepTokenFilter\n | KeywordMarkerTokenFilter\n | LengthTokenFilter\n | LimitTokenFilter\n | NGramTokenFilter\n | NGramTokenFilterV2\n | PatternCaptureTokenFilter\n | PatternReplaceTokenFilter\n | PhoneticTokenFilter\n | ShingleTokenFilter\n | SnowballTokenFilter\n | StemmerTokenFilter\n | StemmerOverrideTokenFilter\n | StopwordsTokenFilter\n | SynonymTokenFilter\n | TruncateTokenFilter\n | UniqueTokenFilter\n | WordDelimiterTokenFilter;\nexport type CharFilterUnion =\n | CharFilter\n | MappingCharFilter\n | PatternReplaceCharFilter;\nexport type BaseLexicalNormalizerUnion =\n | BaseLexicalNormalizer\n | CustomLexicalNormalizer;\nexport type SimilarityUnion = Similarity | ClassicSimilarity | BM25Similarity;\nexport type VectorSearchAlgorithmConfigurationUnion =\n | VectorSearchAlgorithmConfiguration\n | HnswAlgorithmConfiguration\n | ExhaustiveKnnAlgorithmConfiguration;\nexport type VectorSearchVectorizerUnion =\n | VectorSearchVectorizer\n | AzureOpenAIVectorizer\n | WebApiVectorizer;\nexport type VectorSearchCompressionUnion =\n | VectorSearchCompression\n | ScalarQuantizationCompression\n | BinaryQuantizationCompression;\nexport type SearchIndexerDataIdentityUnion =\n | SearchIndexerDataIdentity\n | SearchIndexerDataNoneIdentity\n | SearchIndexerDataUserAssignedIdentity;\n\n/** Represents a datasource definition, which can be used to configure an indexer. */\nexport interface SearchIndexerDataSource {\n /** The name of the datasource. */\n name: string;\n /** The description of the datasource. */\n description?: string;\n /** The type of the datasource. */\n type: SearchIndexerDataSourceType;\n /** Credentials for the datasource. */\n credentials: DataSourceCredentials;\n /** The data container for the datasource. */\n container: SearchIndexerDataContainer;\n /** The data change detection policy for the datasource. */\n dataChangeDetectionPolicy?: DataChangeDetectionPolicyUnion;\n /** The data deletion detection policy for the datasource. */\n dataDeletionDetectionPolicy?: DataDeletionDetectionPolicyUnion;\n /** The ETag of the data source. */\n etag?: string;\n /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your datasource definition when you want full assurance that no one, not even Microsoft, can decrypt your data source definition. Once you have encrypted your data source definition, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your datasource definition will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */\n encryptionKey?: SearchResourceEncryptionKey;\n}\n\n/** Represents credentials that can be used to connect to a datasource. */\nexport interface DataSourceCredentials {\n /** The connection string for the datasource. For Azure SQL, Azure Blob, ADLS Gen 2 and Azure Table, this would be the connection string or resource ID if using managed identity. For CosmosDB this would be a formatted connection string specifying ApiKind or resource ID for managed identity. For Onelake files, connection string would be either the workspace guid or workspace FQDN; Onelake only supports managed identity connections. Set to `<unchanged>` (with brackets) if you don't want the connection string updated. Set to `<redacted>` if you want to remove the connection string value from the datasource. */\n connectionString?: string;\n}\n\n/** Represents information about the entity (such as Azure SQL table or CosmosDB collection) that will be indexed. */\nexport interface SearchIndexerDataContainer {\n /** The name of the table or view (for Azure SQL datasource), collection (for CosmosDB datasource), container (for Azure Blob and ADLS Gen 2 datasources), Azure Table (for Azure Table datasource), or lakehouse (for Onelake datasource) that will be indexed. */\n name: string;\n /** A query that is applied to this data container. For CosmosDB datasource query can flatten and filter data. For Azure Blob and ADLS Gen 2 query can filter by folders. For Azure Table query can filter by row data. For Onelake query can filter by folder or shortcut. Not supported by Azure SQL datasources. */\n query?: string;\n}\n\n/** Base type for data change detection policies. */\nexport interface DataChangeDetectionPolicy {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype:\n | \"#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy\"\n | \"#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy\";\n}\n\n/** Base type for data deletion detection policies. */\nexport interface DataDeletionDetectionPolicy {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy\";\n}\n\n/** A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be used to encrypt or decrypt data-at-rest, such as indexes and synonym maps. */\nexport interface SearchResourceEncryptionKey {\n /** The name of your Azure Key Vault key to be used to encrypt your data at rest. */\n keyName: string;\n /** The version of your Azure Key Vault key to be used to encrypt your data at rest. */\n keyVersion: string;\n /** The URI of your Azure Key Vault, also referred to as DNS name, that contains the key to be used to encrypt your data at rest. An example URI might be `https://my-keyvault-name.vault.azure.net`. */\n vaultUri: string;\n /** Optional Azure Active Directory credentials used for accessing your Azure Key Vault. Not required if using managed identity instead. */\n accessCredentials?: AzureActiveDirectoryApplicationCredentials;\n}\n\n/** Credentials of a registered application created for your search service, used for authenticated access to the encryption keys stored in Azure Key Vault. */\nexport interface AzureActiveDirectoryApplicationCredentials {\n /** An AAD Application ID that was granted the required access permissions to the Azure Key Vault that is to be used when encrypting your data at rest. The Application ID should not be confused with the Object ID for your AAD Application. */\n applicationId: string;\n /** The authentication key of the specified AAD application. */\n applicationSecret?: string;\n}\n\n/** Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.). */\nexport interface ErrorResponse {\n /** The error object. */\n error?: ErrorDetail;\n}\n\n/** The error detail. */\nexport interface ErrorDetail {\n /**\n * The error code.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly code?: string;\n /**\n * The error message.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly message?: string;\n /**\n * The error target.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly target?: string;\n /**\n * The error details.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly details?: ErrorDetail[];\n /**\n * The error additional info.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly additionalInfo?: ErrorAdditionalInfo[];\n}\n\n/** The resource management error additional info. */\nexport interface ErrorAdditionalInfo {\n /**\n * The additional info type.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly type?: string;\n /**\n * The additional info.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly info?: Record<string, unknown>;\n}\n\n/** Response from a List Datasources request. If successful, it includes the full definitions of all datasources. */\nexport interface ListDataSourcesResult {\n /**\n * The datasources in the Search service.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly dataSources: SearchIndexerDataSource[];\n}\n\n/** Represents an indexer. */\nexport interface SearchIndexer {\n /** The name of the indexer. */\n name: string;\n /** The description of the indexer. */\n description?: string;\n /** The name of the datasource from which this indexer reads data. */\n dataSourceName: string;\n /** The name of the skillset executing with this indexer. */\n skillsetName?: string;\n /** The name of the index to which this indexer writes data. */\n targetIndexName: string;\n /** The schedule for this indexer. */\n schedule?: IndexingSchedule;\n /** Parameters for indexer execution. */\n parameters?: IndexingParameters;\n /** Defines mappings between fields in the data source and corresponding target fields in the index. */\n fieldMappings?: FieldMapping[];\n /** Output field mappings are applied after enrichment and immediately before indexing. */\n outputFieldMappings?: FieldMapping[];\n /** A value indicating whether the indexer is disabled. Default is false. */\n isDisabled?: boolean;\n /** The ETag of the indexer. */\n etag?: string;\n /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your indexer definition (as well as indexer execution status) when you want full assurance that no one, not even Microsoft, can decrypt them. Once you have encrypted your indexer definition, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your indexer definition (and indexer execution status) will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */\n encryptionKey?: SearchResourceEncryptionKey;\n}\n\n/** Represents a schedule for indexer execution. */\nexport interface IndexingSchedule {\n /** The interval of time between indexer executions. */\n interval: string;\n /** The time when an indexer should start running. */\n startTime?: Date;\n}\n\n/** Represents parameters for indexer execution. */\nexport interface IndexingParameters {\n /** The number of items that are read from the data source and indexed as a single batch in order to improve performance. The default depends on the data source type. */\n batchSize?: number;\n /** The maximum number of items that can fail indexing for indexer execution to still be considered successful. -1 means no limit. Default is 0. */\n maxFailedItems?: number;\n /** The maximum number of items in a single batch that can fail indexing for the batch to still be considered successful. -1 means no limit. Default is 0. */\n maxFailedItemsPerBatch?: number;\n /** A dictionary of indexer-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */\n configuration?: IndexingParametersConfiguration;\n}\n\n/** A dictionary of indexer-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */\nexport interface IndexingParametersConfiguration {\n /** Describes unknown properties. The value of an unknown property can be of \"any\" type. */\n [property: string]: any;\n /** Represents the parsing mode for indexing from an Azure blob data source. */\n parsingMode?: BlobIndexerParsingMode;\n /** Comma-delimited list of filename extensions to ignore when processing from Azure blob storage. For example, you could exclude \".png, .mp4\" to skip over those files during indexing. */\n excludedFileNameExtensions?: string;\n /** Comma-delimited list of filename extensions to select when processing from Azure blob storage. For example, you could focus indexing on specific application files \".docx, .pptx, .msg\" to specifically include those file types. */\n indexedFileNameExtensions?: string;\n /** For Azure blobs, set to false if you want to continue indexing when an unsupported content type is encountered, and you don't know all the content types (file extensions) in advance. */\n failOnUnsupportedContentType?: boolean;\n /** For Azure blobs, set to false if you want to continue indexing if a document fails indexing. */\n failOnUnprocessableDocument?: boolean;\n /** For Azure blobs, set this property to true to still index storage metadata for blob content that is too large to process. Oversized blobs are treated as errors by default. For limits on blob size, see https://learn.microsoft.com/azure/search/search-limits-quotas-capacity. */\n indexStorageMetadataOnlyForOversizedDocuments?: boolean;\n /** For CSV blobs, specifies a comma-delimited list of column headers, useful for mapping source fields to destination fields in an index. */\n delimitedTextHeaders?: string;\n /** For CSV blobs, specifies the end-of-line single-character delimiter for CSV files where each line starts a new document (for example, \"|\"). */\n delimitedTextDelimiter?: string;\n /** For CSV blobs, indicates that the first (non-blank) line of each blob contains headers. */\n firstLineContainsHeaders?: boolean;\n /** For JSON arrays, given a structured or semi-structured document, you can specify a path to the array using this property. */\n documentRoot?: string;\n /** Specifies the data to extract from Azure blob storage and tells the indexer which data to extract from image content when \"imageAction\" is set to a value other than \"none\". This applies to embedded image content in a .PDF or other application, or image files such as .jpg and .png, in Azure blobs. */\n dataToExtract?: BlobIndexerDataToExtract;\n /** Determines how to process embedded images and image files in Azure blob storage. Setting the \"imageAction\" configuration to any value other than \"none\" requires that a skillset also be attached to that indexer. */\n imageAction?: BlobIndexerImageAction;\n /** If true, will create a path //document//file_data that is an object representing the original file data downloaded from your blob data source. This allows you to pass the original file data to a custom skill for processing within the enrichment pipeline, or to the Document Extraction skill. */\n allowSkillsetToReadFileData?: boolean;\n /** Determines algorithm for text extraction from PDF files in Azure blob storage. */\n pdfTextRotationAlgorithm?: BlobIndexerPDFTextRotationAlgorithm;\n /** Specifies the environment in which the indexer should execute. */\n executionEnvironment?: IndexerExecutionEnvironment;\n /** Increases the timeout beyond the 5-minute default for Azure SQL database data sources, specified in the format \"hh:mm:ss\". */\n queryTimeout?: string;\n}\n\n/** Defines a mapping between a field in a data source and a target field in an index. */\nexport interface FieldMapping {\n /** The name of the field in the data source. */\n sourceFieldName: string;\n /** The name of the target field in the index. Same as the source field name by default. */\n targetFieldName?: string;\n /** A function to apply to each source field value before indexing. */\n mappingFunction?: FieldMappingFunction;\n}\n\n/** Represents a function that transforms a value from a data source before indexing. */\nexport interface FieldMappingFunction {\n /** The name of the field mapping function. */\n name: string;\n /** A dictionary of parameter name/value pairs to pass to the function. Each value must be of a primitive type. */\n parameters?: { [propertyName: string]: any };\n}\n\n/** Response from a List Indexers request. If successful, it includes the full definitions of all indexers. */\nexport interface ListIndexersResult {\n /**\n * The indexers in the Search service.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly indexers: SearchIndexer[];\n}\n\n/** Represents the current status and execution history of an indexer. */\nexport interface SearchIndexerStatus {\n /**\n * The name of the indexer.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly name: string;\n /**\n * Overall indexer status.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly status: IndexerStatus;\n /**\n * The result of the most recent or an in-progress indexer execution.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly lastResult?: IndexerExecutionResult;\n /**\n * History of the recent indexer executions, sorted in reverse chronological order.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly executionHistory: IndexerExecutionResult[];\n /**\n * The execution limits for the indexer.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly limits: SearchIndexerLimits;\n}\n\n/** Represents the result of an individual indexer execution. */\nexport interface IndexerExecutionResult {\n /**\n * The outcome of this indexer execution.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly status: IndexerExecutionStatus;\n /**\n * The error message indicating the top-level error, if any.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly errorMessage?: string;\n /**\n * The start time of this indexer execution.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly startTime?: Date;\n /**\n * The end time of this indexer execution, if the execution has already completed.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly endTime?: Date;\n /**\n * The item-level indexing errors.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly errors: SearchIndexerError[];\n /**\n * The item-level indexing warnings.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly warnings: SearchIndexerWarning[];\n /**\n * The number of items that were processed during this indexer execution. This includes both successfully processed items and items where indexing was attempted but failed.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly itemCount: number;\n /**\n * The number of items that failed to be indexed during this indexer execution.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly failedItemCount: number;\n /**\n * Change tracking state with which an indexer execution started.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly initialTrackingState?: string;\n /**\n * Change tracking state with which an indexer execution finished.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly finalTrackingState?: string;\n}\n\n/** Represents an item- or document-level indexing error. */\nexport interface SearchIndexerError {\n /**\n * The key of the item for which indexing failed.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly key?: string;\n /**\n * The message describing the error that occurred while processing the item.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly errorMessage: string;\n /**\n * The status code indicating why the indexing operation failed. Possible values include: 400 for a malformed input document, 404 for document not found, 409 for a version conflict, 422 when the index is temporarily unavailable, or 503 for when the service is too busy.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly statusCode: number;\n /**\n * The name of the source at which the error originated. For example, this could refer to a particular skill in the attached skillset. This may not be always available.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly name?: string;\n /**\n * Additional, verbose details about the error to assist in debugging the indexer. This may not be always available.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly details?: string;\n /**\n * A link to a troubleshooting guide for these classes of errors. This may not be always available.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly documentationLink?: string;\n}\n\n/** Represents an item-level warning. */\nexport interface SearchIndexerWarning {\n /**\n * The key of the item which generated a warning.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly key?: string;\n /**\n * The message describing the warning that occurred while processing the item.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly message: string;\n /**\n * The name of the source at which the warning originated. For example, this could refer to a particular skill in the attached skillset. This may not be always available.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly name?: string;\n /**\n * Additional, verbose details about the warning to assist in debugging the indexer. This may not be always available.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly details?: string;\n /**\n * A link to a troubleshooting guide for these classes of warnings. This may not be always available.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly documentationLink?: string;\n}\n\nexport interface SearchIndexerLimits {\n /**\n * The maximum duration that the indexer is permitted to run for one execution.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly maxRunTime?: string;\n /**\n * The maximum size of a document, in bytes, which will be considered valid for indexing.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly maxDocumentExtractionSize?: number;\n /**\n * The maximum number of characters that will be extracted from a document picked up for indexing.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly maxDocumentContentCharactersToExtract?: number;\n}\n\n/** A list of skills. */\nexport interface SearchIndexerSkillset {\n /** The name of the skillset. */\n name: string;\n /** The description of the skillset. */\n description?: string;\n /** A list of skills in the skillset. */\n skills: SearchIndexerSkillUnion[];\n /** Details about the Azure AI service to be used when running skills. */\n cognitiveServicesAccount?: CognitiveServicesAccountUnion;\n /** Definition of additional projections to Azure blob, table, or files, of enriched data. */\n knowledgeStore?: SearchIndexerKnowledgeStore;\n /** Definition of additional projections to secondary search index(es). */\n indexProjection?: SearchIndexerIndexProjection;\n /** The ETag of the skillset. */\n etag?: string;\n /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your skillset definition when you want full assurance that no one, not even Microsoft, can decrypt your skillset definition. Once you have encrypted your skillset definition, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your skillset definition will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */\n encryptionKey?: SearchResourceEncryptionKey;\n}\n\n/** Base type for skills. */\nexport interface SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype:\n | \"#Microsoft.Skills.Util.ConditionalSkill\"\n | \"#Microsoft.Skills.Text.KeyPhraseExtractionSkill\"\n | \"#Microsoft.Skills.Vision.OcrSkill\"\n | \"#Microsoft.Skills.Vision.ImageAnalysisSkill\"\n | \"#Microsoft.Skills.Text.LanguageDetectionSkill\"\n | \"#Microsoft.Skills.Util.ShaperSkill\"\n | \"#Microsoft.Skills.Text.MergeSkill\"\n | \"#Microsoft.Skills.Text.EntityRecognitionSkill\"\n | \"#Microsoft.Skills.Text.SentimentSkill\"\n | \"#Microsoft.Skills.Text.V3.SentimentSkill\"\n | \"#Microsoft.Skills.Text.V3.EntityLinkingSkill\"\n | \"#Microsoft.Skills.Text.V3.EntityRecognitionSkill\"\n | \"#Microsoft.Skills.Text.PIIDetectionSkill\"\n | \"#Microsoft.Skills.Text.SplitSkill\"\n | \"#Microsoft.Skills.Text.CustomEntityLookupSkill\"\n | \"#Microsoft.Skills.Text.TranslationSkill\"\n | \"#Microsoft.Skills.Util.DocumentExtractionSkill\"\n | \"#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill\"\n | \"#Microsoft.Skills.Custom.WebApiSkill\"\n | \"#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill\";\n /** The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. */\n name?: string;\n /** The description of the skill which describes the inputs, outputs, and usage of the skill. */\n description?: string;\n /** Represents the level at which operations take place, such as the document root or document content (for example, /document or /document/content). The default is /document. */\n context?: string;\n /** Inputs of the skills could be a column in the source data set, or the output of an upstream skill. */\n inputs: InputFieldMappingEntry[];\n /** The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. */\n outputs: OutputFieldMappingEntry[];\n}\n\n/** Input field mapping for a skill. */\nexport interface InputFieldMappingEntry {\n /** The name of the input. */\n name: string;\n /** The source of the input. */\n source?: string;\n /** The source context used for selecting recursive inputs. */\n sourceContext?: string;\n /** The recursive inputs used when creating a complex type. */\n inputs?: InputFieldMappingEntry[];\n}\n\n/** Output field mapping for a skill. */\nexport interface OutputFieldMappingEntry {\n /** The name of the output defined by the skill. */\n name: string;\n /** The target name of the output. It is optional and default to name. */\n targetName?: string;\n}\n\n/** Base type for describing any Azure AI service resource attached to a skillset. */\nexport interface CognitiveServicesAccount {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype:\n | \"#Microsoft.Azure.Search.DefaultCognitiveServices\"\n | \"#Microsoft.Azure.Search.CognitiveServicesByKey\";\n /** Description of the Azure AI service resource attached to a skillset. */\n description?: string;\n}\n\n/** Definition of additional projections to azure blob, table, or files, of enriched data. */\nexport interface SearchIndexerKnowledgeStore {\n /** The connection string to the storage account projections will be stored in. */\n storageConnectionString: string;\n /** A list of additional projections to perform during indexing. */\n projections: SearchIndexerKnowledgeStoreProjection[];\n}\n\n/** Container object for various projection selectors. */\nexport interface SearchIndexerKnowledgeStoreProjection {\n /** Projections to Azure Table storage. */\n tables?: SearchIndexerKnowledgeStoreTableProjectionSelector[];\n /** Projections to Azure Blob storage. */\n objects?: SearchIndexerKnowledgeStoreObjectProjectionSelector[];\n /** Projections to Azure File storage. */\n files?: SearchIndexerKnowledgeStoreFileProjectionSelector[];\n}\n\n/** Abstract class to share properties between concrete selectors. */\nexport interface SearchIndexerKnowledgeStoreProjectionSelector {\n /** Name of reference key to different projection. */\n referenceKeyName?: string;\n /** Name of generated key to store projection under. */\n generatedKeyName?: string;\n /** Source data to project. */\n source?: string;\n /** Source context for complex projections. */\n sourceContext?: string;\n /** Nested inputs for complex projections. */\n inputs?: InputFieldMappingEntry[];\n}\n\n/** Definition of additional projections to secondary search indexes. */\nexport interface SearchIndexerIndexProjection {\n /** A list of projections to be performed to secondary search indexes. */\n selectors: SearchIndexerIndexProjectionSelector[];\n /** A dictionary of index projection-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */\n parameters?: SearchIndexerIndexProjectionParameters;\n}\n\n/** Description for what data to store in the designated search index. */\nexport interface SearchIndexerIndexProjectionSelector {\n /** Name of the search index to project to. Must have a key field with the 'keyword' analyzer set. */\n targetIndexName: string;\n /** Name of the field in the search index to map the parent document's key value to. Must be a string field that is filterable and not the key field. */\n parentKeyFieldName: string;\n /** Source context for the projections. Represents the cardinality at which the document will be split into multiple sub documents. */\n sourceContext: string;\n /** Mappings for the projection, or which source should be mapped to which field in the target index. */\n mappings: InputFieldMappingEntry[];\n}\n\n/** A dictionary of index projection-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */\nexport interface SearchIndexerIndexProjectionParameters {\n /** Describes unknown properties. The value of an unknown property can be of \"any\" type. */\n [property: string]: any;\n /** Defines behavior of the index projections in relation to the rest of the indexer. */\n projectionMode?: IndexProjectionMode;\n}\n\n/** Response from a list skillset request. If successful, it includes the full definitions of all skillsets. */\nexport interface ListSkillsetsResult {\n /**\n * The skillsets defined in the Search service.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly skillsets: SearchIndexerSkillset[];\n}\n\n/** Represents a synonym map definition. */\nexport interface SynonymMap {\n /** The name of the synonym map. */\n name: string;\n /** The format of the synonym map. Only the 'solr' format is currently supported. */\n format: \"solr\";\n /** A series of synonym rules in the specified synonym map format. The rules must be separated by newlines. */\n synonyms: string;\n /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your data when you want full assurance that no one, not even Microsoft, can decrypt your data. Once you have encrypted your data, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */\n encryptionKey?: SearchResourceEncryptionKey;\n /** The ETag of the synonym map. */\n etag?: string;\n}\n\n/** Response from a List SynonymMaps request. If successful, it includes the full definitions of all synonym maps. */\nexport interface ListSynonymMapsResult {\n /**\n * The synonym maps in the Search service.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly synonymMaps: SynonymMap[];\n}\n\n/** Represents a search index definition, which describes the fields and search behavior of an index. */\nexport interface SearchIndex {\n /** The name of the index. */\n name: string;\n /** The description of the index. */\n description?: string;\n /** The fields of the index. */\n fields: SearchField[];\n /** The scoring profiles for the index. */\n scoringProfiles?: ScoringProfile[];\n /** The name of the scoring profile to use if none is specified in the query. If this property is not set and no scoring profile is specified in the query, then default scoring (tf-idf) will be used. */\n defaultScoringProfile?: string;\n /** Options to control Cross-Origin Resource Sharing (CORS) for the index. */\n corsOptions?: CorsOptions;\n /** The suggesters for the index. */\n suggesters?: Suggester[];\n /** The analyzers for the index. */\n analyzers?: LexicalAnalyzerUnion[];\n /** The tokenizers for the index. */\n tokenizers?: LexicalTokenizerUnion[];\n /** The token filters for the index. */\n tokenFilters?: TokenFilterUnion[];\n /** The character filters for the index. */\n charFilters?: CharFilterUnion[];\n /** The normalizers for the index. */\n normalizers?: BaseLexicalNormalizerUnion[];\n /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your data when you want full assurance that no one, not even Microsoft, can decrypt your data. Once you have encrypted your data, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */\n encryptionKey?: SearchResourceEncryptionKey;\n /** The type of similarity algorithm to be used when scoring and ranking the documents matching a search query. The similarity algorithm can only be defined at index creation time and cannot be modified on existing indexes. If null, the ClassicSimilarity algorithm is used. */\n similarity?: SimilarityUnion;\n /** Defines parameters for a search index that influence semantic capabilities. */\n semanticSearch?: SemanticSearch;\n /** Contains configuration options related to vector search. */\n vectorSearch?: VectorSearch;\n /** The ETag of the index. */\n etag?: string;\n}\n\n/** Represents a field in an index definition, which describes the name, data type, and search behavior of a field. */\nexport interface SearchField {\n /** The name of the field, which must be unique within the fields collection of the index or parent field. */\n name: string;\n /** The data type of the field. */\n type: SearchFieldDataType;\n /** A value indicating whether the field uniquely identifies documents in the index. Exactly one top-level field in each index must be chosen as the key field and it must be of type Edm.String. Key fields can be used to look up documents directly and update or delete specific documents. Default is false for simple fields and null for complex fields. */\n key?: boolean;\n /** A value indicating whether the field can be returned in a search result. You can disable this option if you want to use a field (for example, margin) as a filter, sorting, or scoring mechanism but do not want the field to be visible to the end user. This property must be true for key fields, and it must be null for complex fields. This property can be changed on existing fields. Enabling this property does not cause any increase in index storage requirements. Default is true for simple fields, false for vector fields, and null for complex fields. */\n retrievable?: boolean;\n /** An immutable value indicating whether the field will be persisted separately on disk to be returned in a search result. You can disable this option if you don't plan to return the field contents in a search response to save on storage overhead. This can only be set during index creation and only for vector fields. This property cannot be changed for existing fields or set as false for new fields. If this property is set as false, the property 'retrievable' must also be set to false. This property must be true or unset for key fields, for new fields, and for non-vector fields, and it must be null for complex fields. Disabling this property will reduce index storage requirements. The default is true for vector fields. */\n stored?: boolean;\n /** A value indicating whether the field is full-text searchable. This means it will undergo analysis such as word-breaking during indexing. If you set a searchable field to a value like \"sunny day\", internally it will be split into the individual tokens \"sunny\" and \"day\". This enables full-text searches for these terms. Fields of type Edm.String or Collection(Edm.String) are searchable by default. This property must be false for simple fields of other non-string data types, and it must be null for complex fields. Note: searchable fields consume extra space in your index to accommodate additional tokenized versions of the field value for full-text searches. If you want to save space in your index and you don't need a field to be included in searches, set searchable to false. */\n searchable?: boolean;\n /** A value indicating whether to enable the field to be referenced in $filter queries. filterable differs from searchable in how strings are handled. Fields of type Edm.String or Collection(Edm.String) that are filterable do not undergo word-breaking, so comparisons are for exact matches only. For example, if you set such a field f to \"sunny day\", $filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property must be null for complex fields. Default is true for simple fields and null for complex fields. */\n filterable?: boolean;\n /** A value indicating whether to enable the field to be referenced in $orderby expressions. By default, the search engine sorts results by score, but in many experiences users will want to sort by fields in the documents. A simple field can be sortable only if it is single-valued (it has a single value in the scope of the parent document). Simple collection fields cannot be sortable, since they are multi-valued. Simple sub-fields of complex collections are also multi-valued, and therefore cannot be sortable. This is true whether it's an immediate parent field, or an ancestor field, that's the complex collection. Complex fields cannot be sortable and the sortable property must be null for such fields. The default for sortable is true for single-valued simple fields, false for multi-valued simple fields, and null for complex fields. */\n sortable?: boolean;\n /** A value indicating whether to enable the field to be referenced in facet queries. Typically used in a presentation of search results that includes hit count by category (for example, search for digital cameras and see hits by brand, by megapixels, by price, and so on). This property must be null for complex fields. Fields of type Edm.GeographyPoint or Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple fields. */\n facetable?: boolean;\n /** The name of the analyzer to use for the field. This option can be used only with searchable fields and it can't be set together with either searchAnalyzer or indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. */\n analyzer?: LexicalAnalyzerName;\n /** The name of the analyzer used at search time for the field. This option can be used only with searchable fields. It must be set together with indexAnalyzer and it cannot be set together with the analyzer option. This property cannot be set to the name of a language analyzer; use the analyzer property instead if you need a language analyzer. This analyzer can be updated on an existing field. Must be null for complex fields. */\n searchAnalyzer?: LexicalAnalyzerName;\n /** The name of the analyzer used at indexing time for the field. This option can be used only with searchable fields. It must be set together with searchAnalyzer and it cannot be set together with the analyzer option. This property cannot be set to the name of a language analyzer; use the analyzer property instead if you need a language analyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. */\n indexAnalyzer?: LexicalAnalyzerName;\n /** The name of the normalizer to use for the field. This option can be used only with fields with filterable, sortable, or facetable enabled. Once the normalizer is chosen, it cannot be changed for the field. Must be null for complex fields. */\n normalizer?: LexicalNormalizerName;\n /** The dimensionality of the vector field. */\n vectorSearchDimensions?: number;\n /** The name of the vector search profile that specifies the algorithm and vectorizer to use when searching the vector field. */\n vectorSearchProfileName?: string;\n /** The encoding format to interpret the field contents. */\n vectorEncodingFormat?: VectorEncodingFormat;\n /** A list of the names of synonym maps to associate with this field. This option can be used only with searchable fields. Currently only one synonym map per field is supported. Assigning a synonym map to a field ensures that query terms targeting that field are expanded at query-time using the rules in the synonym map. This attribute can be changed on existing fields. Must be null or an empty collection for complex fields. */\n synonymMaps?: string[];\n /** A list of sub-fields if this is a field of type Edm.ComplexType or Collection(Edm.ComplexType). Must be null or empty for simple fields. */\n fields?: SearchField[];\n}\n\n/** Defines parameters for a search index that influence scoring in search queries. */\nexport interface ScoringProfile {\n /** The name of the scoring profile. */\n name: string;\n /** Parameters that boost scoring based on text matches in certain index fields. */\n textWeights?: TextWeights;\n /** The collection of functions that influence the scoring of documents. */\n functions?: ScoringFunctionUnion[];\n /** A value indicating how the results of individual scoring functions should be combined. Defaults to \"Sum\". Ignored if there are no scoring functions. */\n functionAggregation?: ScoringFunctionAggregation;\n}\n\n/** Defines weights on index fields for which matches should boost scoring in search queries. */\nexport interface TextWeights {\n /** The dictionary of per-field weights to boost document scoring. The keys are field names and the values are the weights for each field. */\n weights: { [propertyName: string]: number };\n}\n\n/** Base type for functions that can modify document scores during ranking. */\nexport interface ScoringFunction {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n type: \"distance\" | \"freshness\" | \"magnitude\" | \"tag\";\n /** The name of the field used as input to the scoring function. */\n fieldName: string;\n /** A multiplier for the raw score. Must be a positive number not equal to 1.0. */\n boost: number;\n /** A value indicating how boosting will be interpolated across document scores; defaults to \"Linear\". */\n interpolation?: ScoringFunctionInterpolation;\n}\n\n/** Defines options to control Cross-Origin Resource Sharing (CORS) for an index. */\nexport interface CorsOptions {\n /** The list of origins from which JavaScript code will be granted access to your index. Can contain a list of hosts of the form {protocol}://{fully-qualified-domain-name}[:{port#}], or a single '*' to allow all origins (not recommended). */\n allowedOrigins: string[];\n /** The duration for which browsers should cache CORS preflight responses. Defaults to 5 minutes. */\n maxAgeInSeconds?: number;\n}\n\n/** Defines how the Suggest API should apply to a group of fields in the index. */\nexport interface Suggester {\n /** The name of the suggester. */\n name: string;\n /** A value indicating the capabilities of the suggester. */\n searchMode: \"analyzingInfixMatching\";\n /** The list of field names to which the suggester applies. Each field must be searchable. */\n sourceFields: string[];\n}\n\n/** Base type for analyzers. */\nexport interface LexicalAnalyzer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype:\n | \"#Microsoft.Azure.Search.CustomAnalyzer\"\n | \"#Microsoft.Azure.Search.PatternAnalyzer\"\n | \"#Microsoft.Azure.Search.StandardAnalyzer\"\n | \"#Microsoft.Azure.Search.StopAnalyzer\";\n /** The name of the analyzer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. */\n name: string;\n}\n\n/** Base type for tokenizers. */\nexport interface LexicalTokenizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype:\n | \"#Microsoft.Azure.Search.ClassicTokenizer\"\n | \"#Microsoft.Azure.Search.EdgeNGramTokenizer\"\n | \"#Microsoft.Azure.Search.KeywordTokenizer\"\n | \"#Microsoft.Azure.Search.KeywordTokenizerV2\"\n | \"#Microsoft.Azure.Search.MicrosoftLanguageTokenizer\"\n | \"#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer\"\n | \"#Microsoft.Azure.Search.NGramTokenizer\"\n | \"#Microsoft.Azure.Search.PathHierarchyTokenizerV2\"\n | \"#Microsoft.Azure.Search.PatternTokenizer\"\n | \"#Microsoft.Azure.Search.StandardTokenizer\"\n | \"#Microsoft.Azure.Search.StandardTokenizerV2\"\n | \"#Microsoft.Azure.Search.UaxUrlEmailTokenizer\";\n /** The name of the tokenizer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. */\n name: string;\n}\n\n/** Base type for token filters. */\nexport interface TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype:\n | \"#Microsoft.Azure.Search.AsciiFoldingTokenFilter\"\n | \"#Microsoft.Azure.Search.CjkBigramTokenFilter\"\n | \"#Microsoft.Azure.Search.CommonGramTokenFilter\"\n | \"#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter\"\n | \"#Microsoft.Azure.Search.EdgeNGramTokenFilter\"\n | \"#Microsoft.Azure.Search.EdgeNGramTokenFilterV2\"\n | \"#Microsoft.Azure.Search.ElisionTokenFilter\"\n | \"#Microsoft.Azure.Search.KeepTokenFilter\"\n | \"#Microsoft.Azure.Search.KeywordMarkerTokenFilter\"\n | \"#Microsoft.Azure.Search.LengthTokenFilter\"\n | \"#Microsoft.Azure.Search.LimitTokenFilter\"\n | \"#Microsoft.Azure.Search.NGramTokenFilter\"\n | \"#Microsoft.Azure.Search.NGramTokenFilterV2\"\n | \"#Microsoft.Azure.Search.PatternCaptureTokenFilter\"\n | \"#Microsoft.Azure.Search.PatternReplaceTokenFilter\"\n | \"#Microsoft.Azure.Search.PhoneticTokenFilter\"\n | \"#Microsoft.Azure.Search.ShingleTokenFilter\"\n | \"#Microsoft.Azure.Search.SnowballTokenFilter\"\n | \"#Microsoft.Azure.Search.StemmerTokenFilter\"\n | \"#Microsoft.Azure.Search.StemmerOverrideTokenFilter\"\n | \"#Microsoft.Azure.Search.StopwordsTokenFilter\"\n | \"#Microsoft.Azure.Search.SynonymTokenFilter\"\n | \"#Microsoft.Azure.Search.TruncateTokenFilter\"\n | \"#Microsoft.Azure.Search.UniqueTokenFilter\"\n | \"#Microsoft.Azure.Search.WordDelimiterTokenFilter\";\n /** The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. */\n name: string;\n}\n\n/** Base type for character filters. */\nexport interface CharFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype:\n | \"#Microsoft.Azure.Search.MappingCharFilter\"\n | \"#Microsoft.Azure.Search.PatternReplaceCharFilter\";\n /** The name of the char filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. */\n name: string;\n}\n\n/** Base type for normalizers. */\nexport interface BaseLexicalNormalizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.CustomNormalizer\";\n /** The name of the normalizer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named 'asciifolding', 'standard', 'lowercase', 'uppercase', or 'elision'. */\n name: string;\n}\n\n/** Base type for similarity algorithms. Similarity algorithms are used to calculate scores that tie queries to documents. The higher the score, the more relevant the document is to that specific query. Those scores are used to rank the search results. */\nexport interface Similarity {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype:\n | \"#Microsoft.Azure.Search.ClassicSimilarity\"\n | \"#Microsoft.Azure.Search.BM25Similarity\";\n}\n\n/** Defines parameters for a search index that influence semantic capabilities. */\nexport interface SemanticSearch {\n /** Allows you to set the name of a default semantic configuration in your index, making it optional to pass it on as a query parameter every time. */\n defaultConfigurationName?: string;\n /** The semantic configurations for the index. */\n configurations?: SemanticConfiguration[];\n}\n\n/** Defines a specific configuration to be used in the context of semantic capabilities. */\nexport interface SemanticConfiguration {\n /** The name of the semantic configuration. */\n name: string;\n /** Describes the title, content, and keyword fields to be used for semantic ranking, captions, highlights, and answers. At least one of the three sub properties (titleField, prioritizedKeywordsFields and prioritizedContentFields) need to be set. */\n prioritizedFields: SemanticPrioritizedFields;\n /** Specifies the score type to be used for the sort order of the search results. */\n rankingOrder?: RankingOrder;\n}\n\n/** Describes the title, content, and keywords fields to be used for semantic ranking, captions, highlights, and answers. */\nexport interface SemanticPrioritizedFields {\n /** Defines the title field to be used for semantic ranking, captions, highlights, and answers. If you don't have a title field in your index, leave this blank. */\n titleField?: SemanticField;\n /** Defines the content fields to be used for semantic ranking, captions, highlights, and answers. For the best result, the selected fields should contain text in natural language form. The order of the fields in the array represents their priority. Fields with lower priority may get truncated if the content is long. */\n contentFields?: SemanticField[];\n /** Defines the keyword fields to be used for semantic ranking, captions, highlights, and answers. For the best result, the selected fields should contain a list of keywords. The order of the fields in the array represents their priority. Fields with lower priority may get truncated if the content is long. */\n keywordsFields?: SemanticField[];\n}\n\n/** A field that is used as part of the semantic configuration. */\nexport interface SemanticField {\n name: string;\n}\n\n/** Contains configuration options related to vector search. */\nexport interface VectorSearch {\n /** Defines combinations of configurations to use with vector search. */\n profiles?: VectorSearchProfile[];\n /** Contains configuration options specific to the algorithm used during indexing or querying. */\n algorithms?: VectorSearchAlgorithmConfigurationUnion[];\n /** Contains configuration options on how to vectorize text vector queries. */\n vectorizers?: VectorSearchVectorizerUnion[];\n /** Contains configuration options specific to the compression method used during indexing or querying. */\n compressions?: VectorSearchCompressionUnion[];\n}\n\n/** Defines a combination of configurations to use with vector search. */\nexport interface VectorSearchProfile {\n /** The name to associate with this particular vector search profile. */\n name: string;\n /** The name of the vector search algorithm configuration that specifies the algorithm and optional parameters. */\n algorithmConfigurationName: string;\n /** The name of the vectorization being configured for use with vector search. */\n vectorizerName?: string;\n /** The name of the compression method configuration that specifies the compression method and optional parameters. */\n compressionName?: string;\n}\n\n/** Contains configuration options specific to the algorithm used during indexing or querying. */\nexport interface VectorSearchAlgorithmConfiguration {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"hnsw\" | \"exhaustiveKnn\";\n /** The name to associate with this particular configuration. */\n name: string;\n}\n\n/** Specifies the vectorization method to be used during query time. */\nexport interface VectorSearchVectorizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"azureOpenAI\" | \"customWebApi\";\n /** The name to associate with this particular vectorization method. */\n vectorizerName: string;\n}\n\n/** Contains configuration options specific to the compression method used during indexing or querying. */\nexport interface VectorSearchCompression {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"scalarQuantization\" | \"binaryQuantization\";\n /** The name to associate with this particular configuration. */\n compressionName: string;\n /** Contains the options for rescoring. */\n rescoringOptions?: RescoringOptions;\n /** The number of dimensions to truncate the vectors to. Truncating the vectors reduces the size of the vectors and the amount of data that needs to be transferred during search. This can save storage cost and improve search performance at the expense of recall. It should be only used for embeddings trained with Matryoshka Representation Learning (MRL) such as OpenAI text-embedding-3-large (small). The default value is null, which means no truncation. */\n truncationDimension?: number;\n /** If set to true, once the ordered set of results calculated using compressed vectors are obtained, they will be reranked again by recalculating the full-precision similarity scores. This will improve recall at the expense of latency. */\n rerankWithOriginalVectors?: boolean;\n /** Default oversampling factor. Oversampling will internally request more documents (specified by this multiplier) in the initial search. This increases the set of results that will be reranked using recomputed similarity scores from full-precision vectors. Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when rerankWithOriginalVectors is true. Higher values improve recall at the expense of latency. */\n defaultOversampling?: number;\n}\n\n/** Contains the options for rescoring. */\nexport interface RescoringOptions {\n /** If set to true, after the initial search on the compressed vectors, the similarity scores are recalculated using the full-precision vectors. This will improve recall at the expense of latency. */\n enableRescoring?: boolean;\n /** Default oversampling factor. Oversampling retrieves a greater set of potential documents to offset the resolution loss due to quantization. This increases the set of results that will be rescored on full-precision vectors. Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when 'enableRescoring' is true. Higher values improve recall at the expense of latency. */\n defaultOversampling?: number;\n /** Controls the storage method for original vectors. This setting is immutable. */\n rescoreStorageMethod?: VectorSearchCompressionRescoreStorageMethod;\n}\n\n/** Response from a List Indexes request. If successful, it includes the full definitions of all indexes. */\nexport interface ListIndexesResult {\n /**\n * The indexes in the Search service.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly indexes: SearchIndex[];\n}\n\n/** Statistics for a given index. Statistics are collected periodically and are not guaranteed to always be up-to-date. */\nexport interface GetIndexStatisticsResult {\n /**\n * The number of documents in the index.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly documentCount: number;\n /**\n * The amount of storage in bytes consumed by the index.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly storageSize: number;\n /**\n * The amount of memory in bytes consumed by vectors in the index.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly vectorIndexSize: number;\n}\n\n/** Specifies some text and analysis components used to break that text into tokens. */\nexport interface AnalyzeRequest {\n /** The text to break into tokens. */\n text: string;\n /** The name of the analyzer to use to break the given text. If this parameter is not specified, you must specify a tokenizer instead. The tokenizer and analyzer parameters are mutually exclusive. KnownAnalyzerNames is an enum containing known values. */\n analyzer?: string;\n /** The name of the tokenizer to use to break the given text. If this parameter is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters are mutually exclusive. KnownTokenizerNames is an enum containing known values. */\n tokenizer?: string;\n /** The name of the normalizer to use to normalize the given text. */\n normalizer?: LexicalNormalizerName;\n /** An optional list of token filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. */\n tokenFilters?: string[];\n /** An optional list of character filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. */\n charFilters?: string[];\n}\n\n/** The result of testing an analyzer on text. */\nexport interface AnalyzeResult {\n /** The list of tokens returned by the analyzer specified in the request. */\n tokens: AnalyzedTokenInfo[];\n}\n\n/** Information about a token returned by an analyzer. */\nexport interface AnalyzedTokenInfo {\n /**\n * The token returned by the analyzer.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly token: string;\n /**\n * The index of the first character of the token in the input text.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly startOffset: number;\n /**\n * The index of the last character of the token in the input text.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly endOffset: number;\n /**\n * The position of the token in the input text relative to other tokens. The first token in the input text has position 0, the next has position 1, and so on. Depending on the analyzer used, some tokens might have the same position, for example if they are synonyms of each other.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly position: number;\n}\n\n/** Response from a get service statistics request. If successful, it includes service level counters and limits. */\nexport interface ServiceStatistics {\n /** Service level resource counters. */\n counters: ServiceCounters;\n /** Service level general limits. */\n limits: ServiceLimits;\n}\n\n/** Represents service-level resource counters and quotas. */\nexport interface ServiceCounters {\n /** Total number of documents across all indexes in the service. */\n documentCounter: ResourceCounter;\n /** Total number of indexes. */\n indexCounter: ResourceCounter;\n /** Total number of indexers. */\n indexerCounter: ResourceCounter;\n /** Total number of data sources. */\n dataSourceCounter: ResourceCounter;\n /** Total size of used storage in bytes. */\n storageSizeCounter: ResourceCounter;\n /** Total number of synonym maps. */\n synonymMapCounter: ResourceCounter;\n /** Total number of skillsets. */\n skillsetCounter: ResourceCounter;\n /** Total memory consumption of all vector indexes within the service, in bytes. */\n vectorIndexSizeCounter: ResourceCounter;\n}\n\n/** Represents a resource's usage and quota. */\nexport interface ResourceCounter {\n /** The resource usage amount. */\n usage: number;\n /** The resource amount quota. */\n quota?: number;\n}\n\n/** Represents various service level limits. */\nexport interface ServiceLimits {\n /** The maximum allowed fields per index. */\n maxFieldsPerIndex?: number;\n /** The maximum depth which you can nest sub-fields in an index, including the top-level complex field. For example, a/b/c has a nesting depth of 3. */\n maxFieldNestingDepthPerIndex?: number;\n /** The maximum number of fields of type Collection(Edm.ComplexType) allowed in an index. */\n maxComplexCollectionFieldsPerIndex?: number;\n /** The maximum number of objects in complex collections allowed per document. */\n maxComplexObjectsInCollectionsPerDocument?: number;\n /** The maximum amount of storage in bytes allowed per index. */\n maxStoragePerIndexInBytes?: number;\n}\n\n/** Contains the parameters specific to the HNSW algorithm. */\nexport interface HnswParameters {\n /** The number of bi-directional links created for every new element during construction. Increasing this parameter value may improve recall and reduce retrieval times for datasets with high intrinsic dimensionality at the expense of increased memory consumption and longer indexing time. */\n m?: number;\n /** The size of the dynamic list containing the nearest neighbors, which is used during index time. Increasing this parameter may improve index quality, at the expense of increased indexing time. At a certain point, increasing this parameter leads to diminishing returns. */\n efConstruction?: number;\n /** The size of the dynamic list containing the nearest neighbors, which is used during search time. Increasing this parameter may improve search results, at the expense of slower search. At a certain point, increasing this parameter leads to diminishing returns. */\n efSearch?: number;\n /** The similarity metric to use for vector comparisons. */\n metric?: VectorSearchAlgorithmMetric;\n}\n\n/** Contains the parameters specific to exhaustive KNN algorithm. */\nexport interface ExhaustiveKnnParameters {\n /** The similarity metric to use for vector comparisons. */\n metric?: VectorSearchAlgorithmMetric;\n}\n\n/** Contains the parameters specific to Scalar Quantization. */\nexport interface ScalarQuantizationParameters {\n /** The quantized data type of compressed vector values. */\n quantizedDataType?: VectorSearchCompressionTarget;\n}\n\n/** Specifies the parameters for connecting to the Azure OpenAI resource. */\nexport interface AzureOpenAIParameters {\n /** The resource URI of the Azure OpenAI resource. */\n resourceUrl?: string;\n /** ID of the Azure OpenAI model deployment on the designated resource. */\n deploymentId?: string;\n /** API key of the designated Azure OpenAI resource. */\n apiKey?: string;\n /** The user-assigned managed identity used for outbound connections. */\n authIdentity?: SearchIndexerDataIdentityUnion;\n /** The name of the embedding model that is deployed at the provided deploymentId path. */\n modelName?: AzureOpenAIModelName;\n}\n\n/** Abstract base type for data identities. */\nexport interface SearchIndexerDataIdentity {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype:\n | \"#Microsoft.Azure.Search.DataNoneIdentity\"\n | \"#Microsoft.Azure.Search.DataUserAssignedIdentity\";\n}\n\n/** Specifies the properties for connecting to a user-defined vectorizer. */\nexport interface WebApiParameters {\n /** The URI of the Web API providing the vectorizer. */\n uri?: string;\n /** The headers required to make the HTTP request. */\n httpHeaders?: { [propertyName: string]: string };\n /** The method for the HTTP request. */\n httpMethod?: string;\n /** The desired timeout for the request. Default is 30 seconds. */\n timeout?: string;\n /** Applies to custom endpoints that connect to external code in an Azure function or some other application that provides the transformations. This value should be the application ID created for the function or app when it was registered with Azure Active Directory. When specified, the vectorization connects to the function or app using a managed ID (either system or user-assigned) of the search service and the access token of the function or app, using this value as the resource id for creating the scope of the access token. */\n authResourceId?: string;\n /** The user-assigned managed identity used for outbound connections. If an authResourceId is provided and it's not specified, the system-assigned managed identity is used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. If set to \"none\", the value of this property is cleared. */\n authIdentity?: SearchIndexerDataIdentityUnion;\n}\n\n/** Provides parameter values to a distance scoring function. */\nexport interface DistanceScoringParameters {\n /** The name of the parameter passed in search queries to specify the reference location. */\n referencePointParameter: string;\n /** The distance in kilometers from the reference location where the boosting range ends. */\n boostingDistance: number;\n}\n\n/** Provides parameter values to a freshness scoring function. */\nexport interface FreshnessScoringParameters {\n /** The expiration period after which boosting will stop for a particular document. */\n boostingDuration: string;\n}\n\n/** Provides parameter values to a magnitude scoring function. */\nexport interface MagnitudeScoringParameters {\n /** The field value at which boosting starts. */\n boostingRangeStart: number;\n /** The field value at which boosting ends. */\n boostingRangeEnd: number;\n /** A value indicating whether to apply a constant boost for field values beyond the range end value; default is false. */\n shouldBoostBeyondRangeByConstant?: boolean;\n}\n\n/** Provides parameter values to a tag scoring function. */\nexport interface TagScoringParameters {\n /** The name of the parameter passed in search queries to specify the list of tags to compare against the target field. */\n tagsParameter: string;\n}\n\n/** A dictionary of knowledge store-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */\nexport interface SearchIndexerKnowledgeStoreParameters {\n /** Describes unknown properties. The value of an unknown property can be of \"any\" type. */\n [property: string]: any;\n /** Whether or not projections should synthesize a generated key name if one isn't already present. */\n synthesizeGeneratedKeyName?: boolean;\n}\n\n/** An object that contains information about the matches that were found, and related metadata. */\nexport interface CustomEntity {\n /** The top-level entity descriptor. Matches in the skill output will be grouped by this name, and it should represent the \"normalized\" form of the text being found. */\n name: string;\n /** This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. */\n description?: string;\n /** This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. */\n type?: string;\n /** This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. */\n subtype?: string;\n /** This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. */\n id?: string;\n /** Defaults to false. Boolean value denoting whether comparisons with the entity name should be sensitive to character casing. Sample case insensitive matches of \"Microsoft\" could be: microsoft, microSoft, MICROSOFT. */\n caseSensitive?: boolean;\n /** Defaults to false. Boolean value denoting whether comparisons with the entity name should be sensitive to accent. */\n accentSensitive?: boolean;\n /** Defaults to 0. Maximum value of 5. Denotes the acceptable number of divergent characters that would still constitute a match with the entity name. The smallest possible fuzziness for any given match is returned. For instance, if the edit distance is set to 3, \"Windows10\" would still match \"Windows\", \"Windows10\" and \"Windows 7\". When case sensitivity is set to false, case differences do NOT count towards fuzziness tolerance, but otherwise do. */\n fuzzyEditDistance?: number;\n /** Changes the default case sensitivity value for this entity. It be used to change the default value of all aliases caseSensitive values. */\n defaultCaseSensitive?: boolean;\n /** Changes the default accent sensitivity value for this entity. It be used to change the default value of all aliases accentSensitive values. */\n defaultAccentSensitive?: boolean;\n /** Changes the default fuzzy edit distance value for this entity. It can be used to change the default value of all aliases fuzzyEditDistance values. */\n defaultFuzzyEditDistance?: number;\n /** An array of complex objects that can be used to specify alternative spellings or synonyms to the root entity name. */\n aliases?: CustomEntityAlias[];\n}\n\n/** A complex object that can be used to specify alternative spellings or synonyms to the root entity name. */\nexport interface CustomEntityAlias {\n /** The text of the alias. */\n text: string;\n /** Determine if the alias is case sensitive. */\n caseSensitive?: boolean;\n /** Determine if the alias is accent sensitive. */\n accentSensitive?: boolean;\n /** Determine the fuzzy edit distance of the alias. */\n fuzzyEditDistance?: number;\n}\n\n/** Controls the cardinality for chunking the content. */\nexport interface DocumentIntelligenceLayoutSkillChunkingProperties {\n /** The unit of the chunk. */\n unit?: DocumentIntelligenceLayoutSkillChunkingUnit;\n /** The maximum chunk length in characters. Default is 500. */\n maximumLength?: number;\n /** The length of overlap provided between two text chunks. Default is 0. */\n overlapLength?: number;\n}\n\n/** Defines a data change detection policy that captures changes based on the value of a high water mark column. */\nexport interface HighWaterMarkChangeDetectionPolicy\n extends DataChangeDetectionPolicy {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy\";\n /** The name of the high water mark column. */\n highWaterMarkColumnName: string;\n}\n\n/** Defines a data change detection policy that captures changes using the Integrated Change Tracking feature of Azure SQL Database. */\nexport interface SqlIntegratedChangeTrackingPolicy\n extends DataChangeDetectionPolicy {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy\";\n}\n\n/** Defines a data deletion detection policy that implements a soft-deletion strategy. It determines whether an item should be deleted based on the value of a designated 'soft delete' column. */\nexport interface SoftDeleteColumnDeletionDetectionPolicy\n extends DataDeletionDetectionPolicy {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy\";\n /** The name of the column to use for soft-deletion detection. */\n softDeleteColumnName?: string;\n /** The marker value that identifies an item as deleted. */\n softDeleteMarkerValue?: string;\n}\n\n/** A skill that enables scenarios that require a Boolean operation to determine the data to assign to an output. */\nexport interface ConditionalSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Util.ConditionalSkill\";\n}\n\n/** A skill that uses text analytics for key phrase extraction. */\nexport interface KeyPhraseExtractionSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Text.KeyPhraseExtractionSkill\";\n /** A value indicating which language code to use. Default is `en`. */\n defaultLanguageCode?: KeyPhraseExtractionSkillLanguage;\n /** A number indicating how many key phrases to return. If absent, all identified key phrases will be returned. */\n maxKeyPhraseCount?: number;\n /** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */\n modelVersion?: string;\n}\n\n/** A skill that extracts text from image files. */\nexport interface OcrSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Vision.OcrSkill\";\n /** A value indicating which language code to use. Default is `en`. */\n defaultLanguageCode?: OcrSkillLanguage;\n /** A value indicating to turn orientation detection on or not. Default is false. */\n shouldDetectOrientation?: boolean;\n /** Defines the sequence of characters to use between the lines of text recognized by the OCR skill. The default value is \"space\". */\n lineEnding?: OcrLineEnding;\n}\n\n/** A skill that analyzes image files. It extracts a rich set of visual features based on the image content. */\nexport interface ImageAnalysisSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Vision.ImageAnalysisSkill\";\n /** A value indicating which language code to use. Default is `en`. */\n defaultLanguageCode?: ImageAnalysisSkillLanguage;\n /** A list of visual features. */\n visualFeatures?: VisualFeature[];\n /** A string indicating which domain-specific details to return. */\n details?: ImageDetail[];\n}\n\n/** A skill that detects the language of input text and reports a single language code for every document submitted on the request. The language code is paired with a score indicating the confidence of the analysis. */\nexport interface LanguageDetectionSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Text.LanguageDetectionSkill\";\n /** A country code to use as a hint to the language detection model if it cannot disambiguate the language. */\n defaultCountryHint?: string;\n /** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */\n modelVersion?: string;\n}\n\n/** A skill for reshaping the outputs. It creates a complex type to support composite fields (also known as multipart fields). */\nexport interface ShaperSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Util.ShaperSkill\";\n}\n\n/** A skill for merging two or more strings into a single unified string, with an optional user-defined delimiter separating each component part. */\nexport interface MergeSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Text.MergeSkill\";\n /** The tag indicates the start of the merged text. By default, the tag is an empty space. */\n insertPreTag?: string;\n /** The tag indicates the end of the merged text. By default, the tag is an empty space. */\n insertPostTag?: string;\n}\n\n/**\n * This skill is deprecated. Use the V3.EntityRecognitionSkill instead.\n *\n * @deprecated\n */\nexport interface EntityRecognitionSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Text.EntityRecognitionSkill\";\n /** A list of entity categories that should be extracted. */\n categories?: EntityCategory[];\n /** A value indicating which language code to use. Default is `en`. */\n defaultLanguageCode?: EntityRecognitionSkillLanguage;\n /** Determines whether or not to include entities which are well known but don't conform to a pre-defined type. If this configuration is not set (default), set to null or set to false, entities which don't conform to one of the pre-defined types will not be surfaced. */\n includeTypelessEntities?: boolean;\n /** A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. */\n minimumPrecision?: number;\n}\n\n/**\n * This skill is deprecated. Use the V3.SentimentSkill instead.\n *\n * @deprecated\n */\nexport interface SentimentSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Text.SentimentSkill\";\n /** A value indicating which language code to use. Default is `en`. */\n defaultLanguageCode?: SentimentSkillLanguage;\n}\n\n/** Using the Text Analytics API, evaluates unstructured text and for each record, provides sentiment labels (such as \"negative\", \"neutral\" and \"positive\") based on the highest confidence score found by the service at a sentence and document-level. */\nexport interface SentimentSkillV3 extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Text.V3.SentimentSkill\";\n /** A value indicating which language code to use. Default is `en`. */\n defaultLanguageCode?: string;\n /** If set to true, the skill output will include information from Text Analytics for opinion mining, namely targets (nouns or verbs) and their associated assessment (adjective) in the text. Default is false. */\n includeOpinionMining?: boolean;\n /** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */\n modelVersion?: string;\n}\n\n/** Using the Text Analytics API, extracts linked entities from text. */\nexport interface EntityLinkingSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Text.V3.EntityLinkingSkill\";\n /** A value indicating which language code to use. Default is `en`. */\n defaultLanguageCode?: string;\n /** A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. */\n minimumPrecision?: number;\n /** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */\n modelVersion?: string;\n}\n\n/** Using the Text Analytics API, extracts entities of different types from text. */\nexport interface EntityRecognitionSkillV3 extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Text.V3.EntityRecognitionSkill\";\n /** A list of entity categories that should be extracted. */\n categories?: string[];\n /** A value indicating which language code to use. Default is `en`. */\n defaultLanguageCode?: string;\n /** A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. */\n minimumPrecision?: number;\n /** The version of the model to use when calling the Text Analytics API. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */\n modelVersion?: string;\n}\n\n/** Using the Text Analytics API, extracts personal information from an input text and gives you the option of masking it. */\nexport interface PIIDetectionSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Text.PIIDetectionSkill\";\n /** A value indicating which language code to use. Default is `en`. */\n defaultLanguageCode?: string;\n /** A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. */\n minimumPrecision?: number;\n /** A parameter that provides various ways to mask the personal information detected in the input text. Default is 'none'. */\n maskingMode?: PIIDetectionSkillMaskingMode;\n /** The character used to mask the text if the maskingMode parameter is set to replace. Default is '*'. */\n maskingCharacter?: string;\n /** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */\n modelVersion?: string;\n /** A list of PII entity categories that should be extracted and masked. */\n categories?: string[];\n /** If specified, will set the PII domain to include only a subset of the entity categories. Possible values include: 'phi', 'none'. Default is 'none'. */\n domain?: string;\n}\n\n/** A skill to split a string into chunks of text. */\nexport interface SplitSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Text.SplitSkill\";\n /** A value indicating which language code to use. Default is `en`. */\n defaultLanguageCode?: SplitSkillLanguage;\n /** A value indicating which split mode to perform. */\n textSplitMode?: TextSplitMode;\n /** The desired maximum page length. Default is 10000. */\n maxPageLength?: number;\n /** Only applicable when textSplitMode is set to 'pages'. If specified, n+1th chunk will start with this number of characters/tokens from the end of the nth chunk. */\n pageOverlapLength?: number;\n /** Only applicable when textSplitMode is set to 'pages'. If specified, the SplitSkill will discontinue splitting after processing the first 'maximumPagesToTake' pages, in order to improve performance when only a few initial pages are needed from each document. */\n maximumPagesToTake?: number;\n}\n\n/** A skill looks for text from a custom, user-defined list of words and phrases. */\nexport interface CustomEntityLookupSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Text.CustomEntityLookupSkill\";\n /** A value indicating which language code to use. Default is `en`. */\n defaultLanguageCode?: CustomEntityLookupSkillLanguage;\n /** Path to a JSON or CSV file containing all the target text to match against. This entity definition is read at the beginning of an indexer run. Any updates to this file during an indexer run will not take effect until subsequent runs. This config must be accessible over HTTPS. */\n entitiesDefinitionUri?: string;\n /** The inline CustomEntity definition. */\n inlineEntitiesDefinition?: CustomEntity[];\n /** A global flag for CaseSensitive. If CaseSensitive is not set in CustomEntity, this value will be the default value. */\n globalDefaultCaseSensitive?: boolean;\n /** A global flag for AccentSensitive. If AccentSensitive is not set in CustomEntity, this value will be the default value. */\n globalDefaultAccentSensitive?: boolean;\n /** A global flag for FuzzyEditDistance. If FuzzyEditDistance is not set in CustomEntity, this value will be the default value. */\n globalDefaultFuzzyEditDistance?: number;\n}\n\n/** A skill to translate text from one language to another. */\nexport interface TextTranslationSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Text.TranslationSkill\";\n /** The language code to translate documents into for documents that don't specify the to language explicitly. */\n defaultToLanguageCode: TextTranslationSkillLanguage;\n /** The language code to translate documents from for documents that don't specify the from language explicitly. */\n defaultFromLanguageCode?: TextTranslationSkillLanguage;\n /** The language code to translate documents from when neither the fromLanguageCode input nor the defaultFromLanguageCode parameter are provided, and the automatic language detection is unsuccessful. Default is `en`. */\n suggestedFrom?: TextTranslationSkillLanguage;\n}\n\n/** A skill that extracts content from a file within the enrichment pipeline. */\nexport interface DocumentExtractionSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Util.DocumentExtractionSkill\";\n /** The parsingMode for the skill. Will be set to 'default' if not defined. */\n parsingMode?: string;\n /** The type of data to be extracted for the skill. Will be set to 'contentAndMetadata' if not defined. */\n dataToExtract?: string;\n /** A dictionary of configurations for the skill. */\n configuration?: { [propertyName: string]: any };\n}\n\n/** A skill that extracts content and layout information, via Azure AI Services, from files within the enrichment pipeline. */\nexport interface DocumentIntelligenceLayoutSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill\";\n /** Controls the cardinality of the output format. Default is 'markdown'. */\n outputFormat?: DocumentIntelligenceLayoutSkillOutputFormat;\n /** Controls the cardinality of the output produced by the skill. Default is 'oneToMany'. */\n outputMode?: DocumentIntelligenceLayoutSkillOutputMode;\n /** The depth of headers in the markdown output. Default is h6. */\n markdownHeaderDepth?: DocumentIntelligenceLayoutSkillMarkdownHeaderDepth;\n /** Controls the cardinality of the content extracted from the document by the skill */\n extractionOptions?: DocumentIntelligenceLayoutSkillExtractionOptions[];\n /** Controls the cardinality for chunking the content. */\n chunkingProperties?: DocumentIntelligenceLayoutSkillChunkingProperties;\n}\n\n/** A skill that can call a Web API endpoint, allowing you to extend a skillset by having it call your custom code. */\nexport interface WebApiSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Custom.WebApiSkill\";\n /** The url for the Web API. */\n uri: string;\n /** The headers required to make the http request. */\n httpHeaders?: { [propertyName: string]: string };\n /** The method for the http request. */\n httpMethod?: string;\n /** The desired timeout for the request. Default is 30 seconds. */\n timeout?: string;\n /** The desired batch size which indicates number of documents. */\n batchSize?: number;\n /** If set, the number of parallel calls that can be made to the Web API. */\n degreeOfParallelism?: number;\n /** Applies to custom skills that connect to external code in an Azure function or some other application that provides the transformations. This value should be the application ID created for the function or app when it was registered with Azure Active Directory. When specified, the custom skill connects to the function or app using a managed ID (either system or user-assigned) of the search service and the access token of the function or app, using this value as the resource id for creating the scope of the access token. */\n authResourceId?: string;\n /** The user-assigned managed identity used for outbound connections. If an authResourceId is provided and it's not specified, the system-assigned managed identity is used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. If set to \"none\", the value of this property is cleared. */\n authIdentity?: SearchIndexerDataIdentityUnion;\n}\n\n/** Allows you to generate a vector embedding for a given text input using the Azure OpenAI resource. */\nexport interface AzureOpenAIEmbeddingSkill\n extends SearchIndexerSkill,\n AzureOpenAIParameters {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill\";\n /** The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models. */\n dimensions?: number;\n}\n\n/** An empty object that represents the default Azure AI service resource for a skillset. */\nexport interface DefaultCognitiveServicesAccount\n extends CognitiveServicesAccount {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.DefaultCognitiveServices\";\n}\n\n/** The multi-region account key of an Azure AI service resource that's attached to a skillset. */\nexport interface CognitiveServicesAccountKey extends CognitiveServicesAccount {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.CognitiveServicesByKey\";\n /** The key used to provision the Azure AI service resource attached to a skillset. */\n key: string;\n}\n\n/** Description for what data to store in Azure Tables. */\nexport interface SearchIndexerKnowledgeStoreTableProjectionSelector\n extends SearchIndexerKnowledgeStoreProjectionSelector {\n /** Name of the Azure table to store projected data in. */\n tableName: string;\n}\n\n/** Abstract class to share properties between concrete selectors. */\nexport interface SearchIndexerKnowledgeStoreBlobProjectionSelector\n extends SearchIndexerKnowledgeStoreProjectionSelector {\n /** Blob container to store projections in. */\n storageContainer: string;\n}\n\n/** Defines a function that boosts scores based on distance from a geographic location. */\nexport interface DistanceScoringFunction extends ScoringFunction {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n type: \"distance\";\n /** Parameter values for the distance scoring function. */\n parameters: DistanceScoringParameters;\n}\n\n/** Defines a function that boosts scores based on the value of a date-time field. */\nexport interface FreshnessScoringFunction extends ScoringFunction {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n type: \"freshness\";\n /** Parameter values for the freshness scoring function. */\n parameters: FreshnessScoringParameters;\n}\n\n/** Defines a function that boosts scores based on the magnitude of a numeric field. */\nexport interface MagnitudeScoringFunction extends ScoringFunction {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n type: \"magnitude\";\n /** Parameter values for the magnitude scoring function. */\n parameters: MagnitudeScoringParameters;\n}\n\n/** Defines a function that boosts scores of documents with string values matching a given list of tags. */\nexport interface TagScoringFunction extends ScoringFunction {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n type: \"tag\";\n /** Parameter values for the tag scoring function. */\n parameters: TagScoringParameters;\n}\n\n/** Allows you to take control over the process of converting text into indexable/searchable tokens. It's a user-defined configuration consisting of a single predefined tokenizer and one or more filters. The tokenizer is responsible for breaking text into tokens, and the filters for modifying tokens emitted by the tokenizer. */\nexport interface CustomAnalyzer extends LexicalAnalyzer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.CustomAnalyzer\";\n /** The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as breaking a sentence into words. KnownTokenizerNames is an enum containing known values. */\n tokenizerName: string;\n /** A list of token filters used to filter out or modify the tokens generated by a tokenizer. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. */\n tokenFilters?: string[];\n /** A list of character filters used to prepare input text before it is processed by the tokenizer. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. */\n charFilters?: string[];\n}\n\n/** Flexibly separates text into terms via a regular expression pattern. This analyzer is implemented using Apache Lucene. */\nexport interface PatternAnalyzer extends LexicalAnalyzer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.PatternAnalyzer\";\n /** A value indicating whether terms should be lower-cased. Default is true. */\n lowerCaseTerms?: boolean;\n /** A regular expression pattern to match token separators. Default is an expression that matches one or more non-word characters. */\n pattern?: string;\n /** Regular expression flags. */\n flags?: string;\n /** A list of stopwords. */\n stopwords?: string[];\n}\n\n/** Standard Apache Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. */\nexport interface LuceneStandardAnalyzer extends LexicalAnalyzer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.StandardAnalyzer\";\n /** The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */\n maxTokenLength?: number;\n /** A list of stopwords. */\n stopwords?: string[];\n}\n\n/** Divides text at non-letters; Applies the lowercase and stopword token filters. This analyzer is implemented using Apache Lucene. */\nexport interface StopAnalyzer extends LexicalAnalyzer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.StopAnalyzer\";\n /** A list of stopwords. */\n stopwords?: string[];\n}\n\n/** Grammar-based tokenizer that is suitable for processing most European-language documents. This tokenizer is implemented using Apache Lucene. */\nexport interface ClassicTokenizer extends LexicalTokenizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.ClassicTokenizer\";\n /** The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */\n maxTokenLength?: number;\n}\n\n/** Tokenizes the input from an edge into n-grams of the given size(s). This tokenizer is implemented using Apache Lucene. */\nexport interface EdgeNGramTokenizer extends LexicalTokenizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.EdgeNGramTokenizer\";\n /** The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. */\n minGram?: number;\n /** The maximum n-gram length. Default is 2. Maximum is 300. */\n maxGram?: number;\n /** Character classes to keep in the tokens. */\n tokenChars?: TokenCharacterKind[];\n}\n\n/** Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene. */\nexport interface KeywordTokenizer extends LexicalTokenizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.KeywordTokenizer\";\n /** The read buffer size in bytes. Default is 256. */\n bufferSize?: number;\n}\n\n/** Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene. */\nexport interface KeywordTokenizerV2 extends LexicalTokenizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.KeywordTokenizerV2\";\n /** The maximum token length. Default is 256. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */\n maxTokenLength?: number;\n}\n\n/** Divides text using language-specific rules. */\nexport interface MicrosoftLanguageTokenizer extends LexicalTokenizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.MicrosoftLanguageTokenizer\";\n /** The maximum token length. Tokens longer than the maximum length are split. Maximum token length that can be used is 300 characters. Tokens longer than 300 characters are first split into tokens of length 300 and then each of those tokens is split based on the max token length set. Default is 255. */\n maxTokenLength?: number;\n /** A value indicating how the tokenizer is used. Set to true if used as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. */\n isSearchTokenizer?: boolean;\n /** The language to use. The default is English. */\n language?: MicrosoftTokenizerLanguage;\n}\n\n/** Divides text using language-specific rules and reduces words to their base forms. */\nexport interface MicrosoftLanguageStemmingTokenizer extends LexicalTokenizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer\";\n /** The maximum token length. Tokens longer than the maximum length are split. Maximum token length that can be used is 300 characters. Tokens longer than 300 characters are first split into tokens of length 300 and then each of those tokens is split based on the max token length set. Default is 255. */\n maxTokenLength?: number;\n /** A value indicating how the tokenizer is used. Set to true if used as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. */\n isSearchTokenizer?: boolean;\n /** The language to use. The default is English. */\n language?: MicrosoftStemmingTokenizerLanguage;\n}\n\n/** Tokenizes the input into n-grams of the given size(s). This tokenizer is implemented using Apache Lucene. */\nexport interface NGramTokenizer extends LexicalTokenizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.NGramTokenizer\";\n /** The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. */\n minGram?: number;\n /** The maximum n-gram length. Default is 2. Maximum is 300. */\n maxGram?: number;\n /** Character classes to keep in the tokens. */\n tokenChars?: TokenCharacterKind[];\n}\n\n/** Tokenizer for path-like hierarchies. This tokenizer is implemented using Apache Lucene. */\nexport interface PathHierarchyTokenizerV2 extends LexicalTokenizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.PathHierarchyTokenizerV2\";\n /** The delimiter character to use. Default is \"/\". */\n delimiter?: string;\n /** A value that, if set, replaces the delimiter character. Default is \"/\". */\n replacement?: string;\n /** The maximum token length. Default and maximum is 300. */\n maxTokenLength?: number;\n /** A value indicating whether to generate tokens in reverse order. Default is false. */\n reverseTokenOrder?: boolean;\n /** The number of initial tokens to skip. Default is 0. */\n numberOfTokensToSkip?: number;\n}\n\n/** Tokenizer that uses regex pattern matching to construct distinct tokens. This tokenizer is implemented using Apache Lucene. */\nexport interface PatternTokenizer extends LexicalTokenizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.PatternTokenizer\";\n /** A regular expression pattern to match token separators. Default is an expression that matches one or more non-word characters. */\n pattern?: string;\n /** Regular expression flags. */\n flags?: string;\n /** The zero-based ordinal of the matching group in the regular expression pattern to extract into tokens. Use -1 if you want to use the entire pattern to split the input into tokens, irrespective of matching groups. Default is -1. */\n group?: number;\n}\n\n/** Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene. */\nexport interface LuceneStandardTokenizer extends LexicalTokenizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.StandardTokenizer\";\n /** The maximum token length. Default is 255. Tokens longer than the maximum length are split. */\n maxTokenLength?: number;\n}\n\n/** Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene. */\nexport interface LuceneStandardTokenizerV2 extends LexicalTokenizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.StandardTokenizerV2\";\n /** The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */\n maxTokenLength?: number;\n}\n\n/** Tokenizes urls and emails as one token. This tokenizer is implemented using Apache Lucene. */\nexport interface UaxUrlEmailTokenizer extends LexicalTokenizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.UaxUrlEmailTokenizer\";\n /** The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */\n maxTokenLength?: number;\n}\n\n/** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the \"Basic Latin\" Unicode block) into their ASCII equivalents, if such equivalents exist. This token filter is implemented using Apache Lucene. */\nexport interface AsciiFoldingTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.AsciiFoldingTokenFilter\";\n /** A value indicating whether the original token will be kept. Default is false. */\n preserveOriginal?: boolean;\n}\n\n/** Forms bigrams of CJK terms that are generated from the standard tokenizer. This token filter is implemented using Apache Lucene. */\nexport interface CjkBigramTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.CjkBigramTokenFilter\";\n /** The scripts to ignore. */\n ignoreScripts?: CjkBigramTokenFilterScripts[];\n /** A value indicating whether to output both unigrams and bigrams (if true), or just bigrams (if false). Default is false. */\n outputUnigrams?: boolean;\n}\n\n/** Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. This token filter is implemented using Apache Lucene. */\nexport interface CommonGramTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.CommonGramTokenFilter\";\n /** The set of common words. */\n commonWords: string[];\n /** A value indicating whether common words matching will be case insensitive. Default is false. */\n ignoreCase?: boolean;\n /** A value that indicates whether the token filter is in query mode. When in query mode, the token filter generates bigrams and then removes common words and single terms followed by a common word. Default is false. */\n useQueryMode?: boolean;\n}\n\n/** Decomposes compound words found in many Germanic languages. This token filter is implemented using Apache Lucene. */\nexport interface DictionaryDecompounderTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter\";\n /** The list of words to match against. */\n wordList: string[];\n /** The minimum word size. Only words longer than this get processed. Default is 5. Maximum is 300. */\n minWordSize?: number;\n /** The minimum subword size. Only subwords longer than this are outputted. Default is 2. Maximum is 300. */\n minSubwordSize?: number;\n /** The maximum subword size. Only subwords shorter than this are outputted. Default is 15. Maximum is 300. */\n maxSubwordSize?: number;\n /** A value indicating whether to add only the longest matching subword to the output. Default is false. */\n onlyLongestMatch?: boolean;\n}\n\n/** Generates n-grams of the given size(s) starting from the front or the back of an input token. This token filter is implemented using Apache Lucene. */\nexport interface EdgeNGramTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.EdgeNGramTokenFilter\";\n /** The minimum n-gram length. Default is 1. Must be less than the value of maxGram. */\n minGram?: number;\n /** The maximum n-gram length. Default is 2. */\n maxGram?: number;\n /** Specifies which side of the input the n-gram should be generated from. Default is \"front\". */\n side?: EdgeNGramTokenFilterSide;\n}\n\n/** Generates n-grams of the given size(s) starting from the front or the back of an input token. This token filter is implemented using Apache Lucene. */\nexport interface EdgeNGramTokenFilterV2 extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.EdgeNGramTokenFilterV2\";\n /** The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. */\n minGram?: number;\n /** The maximum n-gram length. Default is 2. Maximum is 300. */\n maxGram?: number;\n /** Specifies which side of the input the n-gram should be generated from. Default is \"front\". */\n side?: EdgeNGramTokenFilterSide;\n}\n\n/** Removes elisions. For example, \"l'avion\" (the plane) will be converted to \"avion\" (plane). This token filter is implemented using Apache Lucene. */\nexport interface ElisionTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.ElisionTokenFilter\";\n /** The set of articles to remove. */\n articles?: string[];\n}\n\n/** A token filter that only keeps tokens with text contained in a specified list of words. This token filter is implemented using Apache Lucene. */\nexport interface KeepTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.KeepTokenFilter\";\n /** The list of words to keep. */\n keepWords: string[];\n /** A value indicating whether to lower case all words first. Default is false. */\n lowerCaseKeepWords?: boolean;\n}\n\n/** Marks terms as keywords. This token filter is implemented using Apache Lucene. */\nexport interface KeywordMarkerTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.KeywordMarkerTokenFilter\";\n /** A list of words to mark as keywords. */\n keywords: string[];\n /** A value indicating whether to ignore case. If true, all words are converted to lower case first. Default is false. */\n ignoreCase?: boolean;\n}\n\n/** Removes words that are too long or too short. This token filter is implemented using Apache Lucene. */\nexport interface LengthTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.LengthTokenFilter\";\n /** The minimum length in characters. Default is 0. Maximum is 300. Must be less than the value of max. */\n minLength?: number;\n /** The maximum length in characters. Default and maximum is 300. */\n maxLength?: number;\n}\n\n/** Limits the number of tokens while indexing. This token filter is implemented using Apache Lucene. */\nexport interface LimitTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.LimitTokenFilter\";\n /** The maximum number of tokens to produce. Default is 1. */\n maxTokenCount?: number;\n /** A value indicating whether all tokens from the input must be consumed even if maxTokenCount is reached. Default is false. */\n consumeAllTokens?: boolean;\n}\n\n/** Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene. */\nexport interface NGramTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.NGramTokenFilter\";\n /** The minimum n-gram length. Default is 1. Must be less than the value of maxGram. */\n minGram?: number;\n /** The maximum n-gram length. Default is 2. */\n maxGram?: number;\n}\n\n/** Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene. */\nexport interface NGramTokenFilterV2 extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.NGramTokenFilterV2\";\n /** The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. */\n minGram?: number;\n /** The maximum n-gram length. Default is 2. Maximum is 300. */\n maxGram?: number;\n}\n\n/** Uses Java regexes to emit multiple tokens - one for each capture group in one or more patterns. This token filter is implemented using Apache Lucene. */\nexport interface PatternCaptureTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.PatternCaptureTokenFilter\";\n /** A list of patterns to match against each token. */\n patterns: string[];\n /** A value indicating whether to return the original token even if one of the patterns matches. Default is true. */\n preserveOriginal?: boolean;\n}\n\n/** A character filter that replaces characters in the input string. It uses a regular expression to identify character sequences to preserve and a replacement pattern to identify characters to replace. For example, given the input text \"aa bb aa bb\", pattern \"(aa)\\s+(bb)\", and replacement \"$1#$2\", the result would be \"aa#bb aa#bb\". This token filter is implemented using Apache Lucene. */\nexport interface PatternReplaceTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.PatternReplaceTokenFilter\";\n /** A regular expression pattern. */\n pattern: string;\n /** The replacement text. */\n replacement: string;\n}\n\n/** Create tokens for phonetic matches. This token filter is implemented using Apache Lucene. */\nexport interface PhoneticTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.PhoneticTokenFilter\";\n /** The phonetic encoder to use. Default is \"metaphone\". */\n encoder?: PhoneticEncoder;\n /** A value indicating whether encoded tokens should replace original tokens. If false, encoded tokens are added as synonyms. Default is true. */\n replaceOriginalTokens?: boolean;\n}\n\n/** Creates combinations of tokens as a single token. This token filter is implemented using Apache Lucene. */\nexport interface ShingleTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.ShingleTokenFilter\";\n /** The maximum shingle size. Default and minimum value is 2. */\n maxShingleSize?: number;\n /** The minimum shingle size. Default and minimum value is 2. Must be less than the value of maxShingleSize. */\n minShingleSize?: number;\n /** A value indicating whether the output stream will contain the input tokens (unigrams) as well as shingles. Default is true. */\n outputUnigrams?: boolean;\n /** A value indicating whether to output unigrams for those times when no shingles are available. This property takes precedence when outputUnigrams is set to false. Default is false. */\n outputUnigramsIfNoShingles?: boolean;\n /** The string to use when joining adjacent tokens to form a shingle. Default is a single space (\" \"). */\n tokenSeparator?: string;\n /** The string to insert for each position at which there is no token. Default is an underscore (\"_\"). */\n filterToken?: string;\n}\n\n/** A filter that stems words using a Snowball-generated stemmer. This token filter is implemented using Apache Lucene. */\nexport interface SnowballTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.SnowballTokenFilter\";\n /** The language to use. */\n language: SnowballTokenFilterLanguage;\n}\n\n/** Language specific stemming filter. This token filter is implemented using Apache Lucene. */\nexport interface StemmerTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.StemmerTokenFilter\";\n /** The language to use. */\n language: StemmerTokenFilterLanguage;\n}\n\n/** Provides the ability to override other stemming filters with custom dictionary-based stemming. Any dictionary-stemmed terms will be marked as keywords so that they will not be stemmed with stemmers down the chain. Must be placed before any stemming filters. This token filter is implemented using Apache Lucene. */\nexport interface StemmerOverrideTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.StemmerOverrideTokenFilter\";\n /** A list of stemming rules in the following format: \"word => stem\", for example: \"ran => run\". */\n rules: string[];\n}\n\n/** Removes stop words from a token stream. This token filter is implemented using Apache Lucene. */\nexport interface StopwordsTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.StopwordsTokenFilter\";\n /** The list of stopwords. This property and the stopwords list property cannot both be set. */\n stopwords?: string[];\n /** A predefined list of stopwords to use. This property and the stopwords property cannot both be set. Default is English. */\n stopwordsList?: StopwordsList;\n /** A value indicating whether to ignore case. If true, all words are converted to lower case first. Default is false. */\n ignoreCase?: boolean;\n /** A value indicating whether to ignore the last search term if it's a stop word. Default is true. */\n removeTrailingStopWords?: boolean;\n}\n\n/** Matches single or multi-word synonyms in a token stream. This token filter is implemented using Apache Lucene. */\nexport interface SynonymTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.SynonymTokenFilter\";\n /** A list of synonyms in following one of two formats: 1. incredible, unbelievable, fabulous => amazing - all terms on the left side of => symbol will be replaced with all terms on its right side; 2. incredible, unbelievable, fabulous, amazing - comma separated list of equivalent words. Set the expand option to change how this list is interpreted. */\n synonyms: string[];\n /** A value indicating whether to case-fold input for matching. Default is false. */\n ignoreCase?: boolean;\n /** A value indicating whether all words in the list of synonyms (if => notation is not used) will map to one another. If true, all words in the list of synonyms (if => notation is not used) will map to one another. The following list: incredible, unbelievable, fabulous, amazing is equivalent to: incredible, unbelievable, fabulous, amazing => incredible, unbelievable, fabulous, amazing. If false, the following list: incredible, unbelievable, fabulous, amazing will be equivalent to: incredible, unbelievable, fabulous, amazing => incredible. Default is true. */\n expand?: boolean;\n}\n\n/** Truncates the terms to a specific length. This token filter is implemented using Apache Lucene. */\nexport interface TruncateTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.TruncateTokenFilter\";\n /** The length at which terms will be truncated. Default and maximum is 300. */\n length?: number;\n}\n\n/** Filters out tokens with same text as the previous token. This token filter is implemented using Apache Lucene. */\nexport interface UniqueTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.UniqueTokenFilter\";\n /** A value indicating whether to remove duplicates only at the same position. Default is false. */\n onlyOnSamePosition?: boolean;\n}\n\n/** Splits words into subwords and performs optional transformations on subword groups. This token filter is implemented using Apache Lucene. */\nexport interface WordDelimiterTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.WordDelimiterTokenFilter\";\n /** A value indicating whether to generate part words. If set, causes parts of words to be generated; for example \"AzureSearch\" becomes \"Azure\" \"Search\". Default is true. */\n generateWordParts?: boolean;\n /** A value indicating whether to generate number subwords. Default is true. */\n generateNumberParts?: boolean;\n /** A value indicating whether maximum runs of word parts will be catenated. For example, if this is set to true, \"Azure-Search\" becomes \"AzureSearch\". Default is false. */\n catenateWords?: boolean;\n /** A value indicating whether maximum runs of number parts will be catenated. For example, if this is set to true, \"1-2\" becomes \"12\". Default is false. */\n catenateNumbers?: boolean;\n /** A value indicating whether all subword parts will be catenated. For example, if this is set to true, \"Azure-Search-1\" becomes \"AzureSearch1\". Default is false. */\n catenateAll?: boolean;\n /** A value indicating whether to split words on caseChange. For example, if this is set to true, \"AzureSearch\" becomes \"Azure\" \"Search\". Default is true. */\n splitOnCaseChange?: boolean;\n /** A value indicating whether original words will be preserved and added to the subword list. Default is false. */\n preserveOriginal?: boolean;\n /** A value indicating whether to split on numbers. For example, if this is set to true, \"Azure1Search\" becomes \"Azure\" \"1\" \"Search\". Default is true. */\n splitOnNumerics?: boolean;\n /** A value indicating whether to remove trailing \"'s\" for each subword. Default is true. */\n stemEnglishPossessive?: boolean;\n /** A list of tokens to protect from being delimited. */\n protectedWords?: string[];\n}\n\n/** A character filter that applies mappings defined with the mappings option. Matching is greedy (longest pattern matching at a given point wins). Replacement is allowed to be the empty string. This character filter is implemented using Apache Lucene. */\nexport interface MappingCharFilter extends CharFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.MappingCharFilter\";\n /** A list of mappings of the following format: \"a=>b\" (all occurrences of the character \"a\" will be replaced with character \"b\"). */\n mappings: string[];\n}\n\n/** A character filter that replaces characters in the input string. It uses a regular expression to identify character sequences to preserve and a replacement pattern to identify characters to replace. For example, given the input text \"aa bb aa bb\", pattern \"(aa)\\s+(bb)\", and replacement \"$1#$2\", the result would be \"aa#bb aa#bb\". This character filter is implemented using Apache Lucene. */\nexport interface PatternReplaceCharFilter extends CharFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.PatternReplaceCharFilter\";\n /** A regular expression pattern. */\n pattern: string;\n /** The replacement text. */\n replacement: string;\n}\n\n/** Allows you to configure normalization for filterable, sortable, and facetable fields, which by default operate with strict matching. This is a user-defined configuration consisting of at least one or more filters, which modify the token that is stored. */\nexport interface CustomLexicalNormalizer extends BaseLexicalNormalizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.CustomNormalizer\";\n /** A list of token filters used to filter out or modify the input token. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. */\n tokenFilters?: TokenFilterName[];\n /** A list of character filters used to prepare input text before it is processed. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. */\n charFilters?: CharFilterName[];\n}\n\n/** Legacy similarity algorithm which uses the Lucene TFIDFSimilarity implementation of TF-IDF. This variation of TF-IDF introduces static document length normalization as well as coordinating factors that penalize documents that only partially match the searched queries. */\nexport interface ClassicSimilarity extends Similarity {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.ClassicSimilarity\";\n}\n\n/** Ranking function based on the Okapi BM25 similarity algorithm. BM25 is a TF-IDF-like algorithm that includes length normalization (controlled by the 'b' parameter) as well as term frequency saturation (controlled by the 'k1' parameter). */\nexport interface BM25Similarity extends Similarity {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.BM25Similarity\";\n /** This property controls the scaling function between the term frequency of each matching terms and the final relevance score of a document-query pair. By default, a value of 1.2 is used. A value of 0.0 means the score does not scale with an increase in term frequency. */\n k1?: number;\n /** This property controls how the length of a document affects the relevance score. By default, a value of 0.75 is used. A value of 0.0 means no length normalization is applied, while a value of 1.0 means the score is fully normalized by the length of the document. */\n b?: number;\n}\n\n/** Contains configuration options specific to the HNSW approximate nearest neighbors algorithm used during indexing and querying. The HNSW algorithm offers a tunable trade-off between search speed and accuracy. */\nexport interface HnswAlgorithmConfiguration\n extends VectorSearchAlgorithmConfiguration {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"hnsw\";\n /** Contains the parameters specific to HNSW algorithm. */\n parameters?: HnswParameters;\n}\n\n/** Contains configuration options specific to the exhaustive KNN algorithm used during querying, which will perform brute-force search across the entire vector index. */\nexport interface ExhaustiveKnnAlgorithmConfiguration\n extends VectorSearchAlgorithmConfiguration {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"exhaustiveKnn\";\n /** Contains the parameters specific to exhaustive KNN algorithm. */\n parameters?: ExhaustiveKnnParameters;\n}\n\n/** Specifies the Azure OpenAI resource used to vectorize a query string. */\nexport interface AzureOpenAIVectorizer extends VectorSearchVectorizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"azureOpenAI\";\n /** Contains the parameters specific to Azure OpenAI embedding vectorization. */\n parameters?: AzureOpenAIParameters;\n}\n\n/** Specifies a user-defined vectorizer for generating the vector embedding of a query string. Integration of an external vectorizer is achieved using the custom Web API interface of a skillset. */\nexport interface WebApiVectorizer extends VectorSearchVectorizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"customWebApi\";\n /** Specifies the properties of the user-defined vectorizer. */\n parameters?: WebApiParameters;\n}\n\n/** Contains configuration options specific to the scalar quantization compression method used during indexing and querying. */\nexport interface ScalarQuantizationCompression extends VectorSearchCompression {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"scalarQuantization\";\n /** Contains the parameters specific to Scalar Quantization. */\n parameters?: ScalarQuantizationParameters;\n}\n\n/** Contains configuration options specific to the binary quantization compression method used during indexing and querying. */\nexport interface BinaryQuantizationCompression extends VectorSearchCompression {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"binaryQuantization\";\n}\n\n/** Clears the identity property of a datasource. */\nexport interface SearchIndexerDataNoneIdentity\n extends SearchIndexerDataIdentity {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.DataNoneIdentity\";\n}\n\n/** Specifies the identity for a datasource to use. */\nexport interface SearchIndexerDataUserAssignedIdentity\n extends SearchIndexerDataIdentity {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.DataUserAssignedIdentity\";\n /** The fully qualified Azure resource Id of a user assigned managed identity typically in the form \"/subscriptions/12345678-1234-1234-1234-1234567890ab/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myId\" that should have been assigned to the search service. */\n resourceId: string;\n}\n\n/** Projection definition for what data to store in Azure Blob. */\nexport interface SearchIndexerKnowledgeStoreObjectProjectionSelector\n extends SearchIndexerKnowledgeStoreBlobProjectionSelector {}\n\n/** Projection definition for what data to store in Azure Files. */\nexport interface SearchIndexerKnowledgeStoreFileProjectionSelector\n extends SearchIndexerKnowledgeStoreBlobProjectionSelector {}\n\n/** Known values of {@link ApiVersion20250901} that the service accepts. */\nexport enum KnownApiVersion20250901 {\n /** Api Version '2025-09-01' */\n TwoThousandTwentyFive0901 = \"2025-09-01\",\n}\n\n/**\n * Defines values for ApiVersion20250901. \\\n * {@link KnownApiVersion20250901} can be used interchangeably with ApiVersion20250901,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **2025-09-01**: Api Version '2025-09-01'\n */\nexport type ApiVersion20250901 = string;\n\n/** Known values of {@link SearchIndexerDataSourceType} that the service accepts. */\nexport enum KnownSearchIndexerDataSourceType {\n /** Definition of an Azure SQL datasource whose credentials can either be a standard SQL connection string or the ResourceId of the SQL resource. The container property refers to the table or view to be indexed. Query parameter is not supported for this datasource. */\n AzureSql = \"azuresql\",\n /** Definition of an CosmosDB datasource whose credentials can either be a formatted connection string containing details for AccountEndpoint, AccountKey, and Database for a key based connection or details for ResourceID and ApiKind for keyless connection. The container property refers to cosmosdb collection to be indexed and the optional query property refers to a SQL query on the collection. */\n CosmosDb = \"cosmosdb\",\n /** Definition of an Azure Blob datasource whose credentials can either be a storage connection string or the ResourceId of the storage account. The container property refers to the blob container to be indexed and the optional query property refers to a specific sub-folder in the container. */\n AzureBlob = \"azureblob\",\n /** Definition of an Azure Table datasource whose credentials can either be a table connection string or the ResourceId of the storage account. The container property refers to the blob container to be indexed and the optional query property can be used to filter rows. */\n AzureTable = \"azuretable\",\n /** Definition of an Azure SQL datasource whose credentials can either be a standard ADO.NET formatted SQL connection string or the ResourceId of the SQL resource. The container property refers to the table or view to be indexed. Query parameter is not supported for this datasource. */\n MySql = \"mysql\",\n /** Definition of an Azure ADLS Gen 2 datasource whose credentials can either be a storage connection string or the ResourceId of the storage account. The container property refers to the blob container to be indexed and the optional query property refers to a specific sub-folder in the container. */\n AdlsGen2 = \"adlsgen2\",\n /** Definition of an Microsoft Fabric Onelake datasource whose credentials can either be the Fabric workspace GUID or a workspace FQDN. The container property refers to the lakehouse GUID and the optional query property refers to folders or shortcuts in the lakehouse. */\n OneLake = \"onelake\",\n}\n\n/**\n * Defines values for SearchIndexerDataSourceType. \\\n * {@link KnownSearchIndexerDataSourceType} can be used interchangeably with SearchIndexerDataSourceType,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **azuresql**: Definition of an Azure SQL datasource whose credentials can either be a standard SQL connection string or the ResourceId of the SQL resource. The container property refers to the table or view to be indexed. Query parameter is not supported for this datasource. \\\n * **cosmosdb**: Definition of an CosmosDB datasource whose credentials can either be a formatted connection string containing details for AccountEndpoint, AccountKey, and Database for a key based connection or details for ResourceID and ApiKind for keyless connection. The container property refers to cosmosdb collection to be indexed and the optional query property refers to a SQL query on the collection. \\\n * **azureblob**: Definition of an Azure Blob datasource whose credentials can either be a storage connection string or the ResourceId of the storage account. The container property refers to the blob container to be indexed and the optional query property refers to a specific sub-folder in the container. \\\n * **azuretable**: Definition of an Azure Table datasource whose credentials can either be a table connection string or the ResourceId of the storage account. The container property refers to the blob container to be indexed and the optional query property can be used to filter rows. \\\n * **mysql**: Definition of an Azure SQL datasource whose credentials can either be a standard ADO.NET formatted SQL connection string or the ResourceId of the SQL resource. The container property refers to the table or view to be indexed. Query parameter is not supported for this datasource. \\\n * **adlsgen2**: Definition of an Azure ADLS Gen 2 datasource whose credentials can either be a storage connection string or the ResourceId of the storage account. The container property refers to the blob container to be indexed and the optional query property refers to a specific sub-folder in the container. \\\n * **onelake**: Definition of an Microsoft Fabric Onelake datasource whose credentials can either be the Fabric workspace GUID or a workspace FQDN. The container property refers to the lakehouse GUID and the optional query property refers to folders or shortcuts in the lakehouse.\n */\nexport type SearchIndexerDataSourceType = string;\n\n/** Known values of {@link BlobIndexerParsingMode} that the service accepts. */\nexport enum KnownBlobIndexerParsingMode {\n /** Set to default for normal file processing. */\n Default = \"default\",\n /** Set to text to improve indexing performance on plain text files in blob storage. */\n Text = \"text\",\n /** Set to delimitedText when blobs are plain CSV files. */\n DelimitedText = \"delimitedText\",\n /** Set to json to extract structured content from JSON files. */\n Json = \"json\",\n /** Set to jsonArray to extract individual elements of a JSON array as separate documents. */\n JsonArray = \"jsonArray\",\n /** Set to jsonLines to extract individual JSON entities, separated by a new line, as separate documents. */\n JsonLines = \"jsonLines\",\n}\n\n/**\n * Defines values for BlobIndexerParsingMode. \\\n * {@link KnownBlobIndexerParsingMode} can be used interchangeably with BlobIndexerParsingMode,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **default**: Set to default for normal file processing. \\\n * **text**: Set to text to improve indexing performance on plain text files in blob storage. \\\n * **delimitedText**: Set to delimitedText when blobs are plain CSV files. \\\n * **json**: Set to json to extract structured content from JSON files. \\\n * **jsonArray**: Set to jsonArray to extract individual elements of a JSON array as separate documents. \\\n * **jsonLines**: Set to jsonLines to extract individual JSON entities, separated by a new line, as separate documents.\n */\nexport type BlobIndexerParsingMode = string;\n\n/** Known values of {@link BlobIndexerDataToExtract} that the service accepts. */\nexport enum KnownBlobIndexerDataToExtract {\n /** Indexes just the standard blob properties and user-specified metadata. */\n StorageMetadata = \"storageMetadata\",\n /** Extracts metadata provided by the Azure blob storage subsystem and the content-type specific metadata (for example, metadata unique to just .png files are indexed). */\n AllMetadata = \"allMetadata\",\n /** Extracts all metadata and textual content from each blob. */\n ContentAndMetadata = \"contentAndMetadata\",\n}\n\n/**\n * Defines values for BlobIndexerDataToExtract. \\\n * {@link KnownBlobIndexerDataToExtract} can be used interchangeably with BlobIndexerDataToExtract,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **storageMetadata**: Indexes just the standard blob properties and user-specified metadata. \\\n * **allMetadata**: Extracts metadata provided by the Azure blob storage subsystem and the content-type specific metadata (for example, metadata unique to just .png files are indexed). \\\n * **contentAndMetadata**: Extracts all metadata and textual content from each blob.\n */\nexport type BlobIndexerDataToExtract = string;\n\n/** Known values of {@link BlobIndexerImageAction} that the service accepts. */\nexport enum KnownBlobIndexerImageAction {\n /** Ignores embedded images or image files in the data set. This is the default. */\n None = \"none\",\n /** Extracts text from images (for example, the word \"STOP\" from a traffic stop sign), and embeds it into the content field. This action requires that \"dataToExtract\" is set to \"contentAndMetadata\". A normalized image refers to additional processing resulting in uniform image output, sized and rotated to promote consistent rendering when you include images in visual search results. This information is generated for each image when you use this option. */\n GenerateNormalizedImages = \"generateNormalizedImages\",\n /** Extracts text from images (for example, the word \"STOP\" from a traffic stop sign), and embeds it into the content field, but treats PDF files differently in that each page will be rendered as an image and normalized accordingly, instead of extracting embedded images. Non-PDF file types will be treated the same as if \"generateNormalizedImages\" was set. */\n GenerateNormalizedImagePerPage = \"generateNormalizedImagePerPage\",\n}\n\n/**\n * Defines values for BlobIndexerImageAction. \\\n * {@link KnownBlobIndexerImageAction} can be used interchangeably with BlobIndexerImageAction,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **none**: Ignores embedded images or image files in the data set. This is the default. \\\n * **generateNormalizedImages**: Extracts text from images (for example, the word \"STOP\" from a traffic stop sign), and embeds it into the content field. This action requires that \"dataToExtract\" is set to \"contentAndMetadata\". A normalized image refers to additional processing resulting in uniform image output, sized and rotated to promote consistent rendering when you include images in visual search results. This information is generated for each image when you use this option. \\\n * **generateNormalizedImagePerPage**: Extracts text from images (for example, the word \"STOP\" from a traffic stop sign), and embeds it into the content field, but treats PDF files differently in that each page will be rendered as an image and normalized accordingly, instead of extracting embedded images. Non-PDF file types will be treated the same as if \"generateNormalizedImages\" was set.\n */\nexport type BlobIndexerImageAction = string;\n\n/** Known values of {@link BlobIndexerPDFTextRotationAlgorithm} that the service accepts. */\nexport enum KnownBlobIndexerPDFTextRotationAlgorithm {\n /** Leverages normal text extraction. This is the default. */\n None = \"none\",\n /** May produce better and more readable text extraction from PDF files that have rotated text within them. Note that there may be a small performance speed impact when this parameter is used. This parameter only applies to PDF files, and only to PDFs with embedded text. If the rotated text appears within an embedded image in the PDF, this parameter does not apply. */\n DetectAngles = \"detectAngles\",\n}\n\n/**\n * Defines values for BlobIndexerPDFTextRotationAlgorithm. \\\n * {@link KnownBlobIndexerPDFTextRotationAlgorithm} can be used interchangeably with BlobIndexerPDFTextRotationAlgorithm,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **none**: Leverages normal text extraction. This is the default. \\\n * **detectAngles**: May produce better and more readable text extraction from PDF files that have rotated text within them. Note that there may be a small performance speed impact when this parameter is used. This parameter only applies to PDF files, and only to PDFs with embedded text. If the rotated text appears within an embedded image in the PDF, this parameter does not apply.\n */\nexport type BlobIndexerPDFTextRotationAlgorithm = string;\n\n/** Known values of {@link IndexerExecutionEnvironment} that the service accepts. */\nexport enum KnownIndexerExecutionEnvironment {\n /** Indicates that the search service can determine where the indexer should execute. This is the default environment when nothing is specified and is the recommended value. */\n Standard = \"standard\",\n /** Indicates that the indexer should run with the environment provisioned specifically for the search service. This should only be specified as the execution environment if the indexer needs to access resources securely over shared private link resources. */\n Private = \"private\",\n}\n\n/**\n * Defines values for IndexerExecutionEnvironment. \\\n * {@link KnownIndexerExecutionEnvironment} can be used interchangeably with IndexerExecutionEnvironment,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **standard**: Indicates that the search service can determine where the indexer should execute. This is the default environment when nothing is specified and is the recommended value. \\\n * **private**: Indicates that the indexer should run with the environment provisioned specifically for the search service. This should only be specified as the execution environment if the indexer needs to access resources securely over shared private link resources.\n */\nexport type IndexerExecutionEnvironment = string;\n\n/** Known values of {@link IndexProjectionMode} that the service accepts. */\nexport enum KnownIndexProjectionMode {\n /** The source document will be skipped from writing into the indexer's target index. */\n SkipIndexingParentDocuments = \"skipIndexingParentDocuments\",\n /** The source document will be written into the indexer's target index. This is the default pattern. */\n IncludeIndexingParentDocuments = \"includeIndexingParentDocuments\",\n}\n\n/**\n * Defines values for IndexProjectionMode. \\\n * {@link KnownIndexProjectionMode} can be used interchangeably with IndexProjectionMode,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **skipIndexingParentDocuments**: The source document will be skipped from writing into the indexer's target index. \\\n * **includeIndexingParentDocuments**: The source document will be written into the indexer's target index. This is the default pattern.\n */\nexport type IndexProjectionMode = string;\n\n/** Known values of {@link SearchFieldDataType} that the service accepts. */\nexport enum KnownSearchFieldDataType {\n /** Indicates that a field contains a string. */\n String = \"Edm.String\",\n /** Indicates that a field contains a 32-bit signed integer. */\n Int32 = \"Edm.Int32\",\n /** Indicates that a field contains a 64-bit signed integer. */\n Int64 = \"Edm.Int64\",\n /** Indicates that a field contains an IEEE double-precision floating point number. */\n Double = \"Edm.Double\",\n /** Indicates that a field contains a Boolean value (true or false). */\n Boolean = \"Edm.Boolean\",\n /** Indicates that a field contains a date\\/time value, including timezone information. */\n DateTimeOffset = \"Edm.DateTimeOffset\",\n /** Indicates that a field contains a geo-location in terms of longitude and latitude. */\n GeographyPoint = \"Edm.GeographyPoint\",\n /** Indicates that a field contains one or more complex objects that in turn have sub-fields of other types. */\n Complex = \"Edm.ComplexType\",\n /** Indicates that a field contains a single-precision floating point number. This is only valid when used with Collection(Edm.Single). */\n Single = \"Edm.Single\",\n /** Indicates that a field contains a half-precision floating point number. This is only valid when used with Collection(Edm.Half). */\n Half = \"Edm.Half\",\n /** Indicates that a field contains a 16-bit signed integer. This is only valid when used with Collection(Edm.Int16). */\n Int16 = \"Edm.Int16\",\n /** Indicates that a field contains a 8-bit signed integer. This is only valid when used with Collection(Edm.SByte). */\n SByte = \"Edm.SByte\",\n /** Indicates that a field contains a 8-bit unsigned integer. This is only valid when used with Collection(Edm.Byte). */\n Byte = \"Edm.Byte\",\n}\n\n/**\n * Defines values for SearchFieldDataType. \\\n * {@link KnownSearchFieldDataType} can be used interchangeably with SearchFieldDataType,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **Edm.String**: Indicates that a field contains a string. \\\n * **Edm.Int32**: Indicates that a field contains a 32-bit signed integer. \\\n * **Edm.Int64**: Indicates that a field contains a 64-bit signed integer. \\\n * **Edm.Double**: Indicates that a field contains an IEEE double-precision floating point number. \\\n * **Edm.Boolean**: Indicates that a field contains a Boolean value (true or false). \\\n * **Edm.DateTimeOffset**: Indicates that a field contains a date\\/time value, including timezone information. \\\n * **Edm.GeographyPoint**: Indicates that a field contains a geo-location in terms of longitude and latitude. \\\n * **Edm.ComplexType**: Indicates that a field contains one or more complex objects that in turn have sub-fields of other types. \\\n * **Edm.Single**: Indicates that a field contains a single-precision floating point number. This is only valid when used with Collection(Edm.Single). \\\n * **Edm.Half**: Indicates that a field contains a half-precision floating point number. This is only valid when used with Collection(Edm.Half). \\\n * **Edm.Int16**: Indicates that a field contains a 16-bit signed integer. This is only valid when used with Collection(Edm.Int16). \\\n * **Edm.SByte**: Indicates that a field contains a 8-bit signed integer. This is only valid when used with Collection(Edm.SByte). \\\n * **Edm.Byte**: Indicates that a field contains a 8-bit unsigned integer. This is only valid when used with Collection(Edm.Byte).\n */\nexport type SearchFieldDataType = string;\n\n/** Known values of {@link LexicalAnalyzerName} that the service accepts. */\nexport enum KnownLexicalAnalyzerName {\n /** Microsoft analyzer for Arabic. */\n ArMicrosoft = \"ar.microsoft\",\n /** Lucene analyzer for Arabic. */\n ArLucene = \"ar.lucene\",\n /** Lucene analyzer for Armenian. */\n HyLucene = \"hy.lucene\",\n /** Microsoft analyzer for Bangla. */\n BnMicrosoft = \"bn.microsoft\",\n /** Lucene analyzer for Basque. */\n EuLucene = \"eu.lucene\",\n /** Microsoft analyzer for Bulgarian. */\n BgMicrosoft = \"bg.microsoft\",\n /** Lucene analyzer for Bulgarian. */\n BgLucene = \"bg.lucene\",\n /** Microsoft analyzer for Catalan. */\n CaMicrosoft = \"ca.microsoft\",\n /** Lucene analyzer for Catalan. */\n CaLucene = \"ca.lucene\",\n /** Microsoft analyzer for Chinese (Simplified). */\n ZhHansMicrosoft = \"zh-Hans.microsoft\",\n /** Lucene analyzer for Chinese (Simplified). */\n ZhHansLucene = \"zh-Hans.lucene\",\n /** Microsoft analyzer for Chinese (Traditional). */\n ZhHantMicrosoft = \"zh-Hant.microsoft\",\n /** Lucene analyzer for Chinese (Traditional). */\n ZhHantLucene = \"zh-Hant.lucene\",\n /** Microsoft analyzer for Croatian. */\n HrMicrosoft = \"hr.microsoft\",\n /** Microsoft analyzer for Czech. */\n CsMicrosoft = \"cs.microsoft\",\n /** Lucene analyzer for Czech. */\n CsLucene = \"cs.lucene\",\n /** Microsoft analyzer for Danish. */\n DaMicrosoft = \"da.microsoft\",\n /** Lucene analyzer for Danish. */\n DaLucene = \"da.lucene\",\n /** Microsoft analyzer for Dutch. */\n NlMicrosoft = \"nl.microsoft\",\n /** Lucene analyzer for Dutch. */\n NlLucene = \"nl.lucene\",\n /** Microsoft analyzer for English. */\n EnMicrosoft = \"en.microsoft\",\n /** Lucene analyzer for English. */\n EnLucene = \"en.lucene\",\n /** Microsoft analyzer for Estonian. */\n EtMicrosoft = \"et.microsoft\",\n /** Microsoft analyzer for Finnish. */\n FiMicrosoft = \"fi.microsoft\",\n /** Lucene analyzer for Finnish. */\n FiLucene = \"fi.lucene\",\n /** Microsoft analyzer for French. */\n FrMicrosoft = \"fr.microsoft\",\n /** Lucene analyzer for French. */\n FrLucene = \"fr.lucene\",\n /** Lucene analyzer for Galician. */\n GlLucene = \"gl.lucene\",\n /** Microsoft analyzer for German. */\n DeMicrosoft = \"de.microsoft\",\n /** Lucene analyzer for German. */\n DeLucene = \"de.lucene\",\n /** Microsoft analyzer for Greek. */\n ElMicrosoft = \"el.microsoft\",\n /** Lucene analyzer for Greek. */\n ElLucene = \"el.lucene\",\n /** Microsoft analyzer for Gujarati. */\n GuMicrosoft = \"gu.microsoft\",\n /** Microsoft analyzer for Hebrew. */\n HeMicrosoft = \"he.microsoft\",\n /** Microsoft analyzer for Hindi. */\n HiMicrosoft = \"hi.microsoft\",\n /** Lucene analyzer for Hindi. */\n HiLucene = \"hi.lucene\",\n /** Microsoft analyzer for Hungarian. */\n HuMicrosoft = \"hu.microsoft\",\n /** Lucene analyzer for Hungarian. */\n HuLucene = \"hu.lucene\",\n /** Microsoft analyzer for Icelandic. */\n IsMicrosoft = \"is.microsoft\",\n /** Microsoft analyzer for Indonesian (Bahasa). */\n IdMicrosoft = \"id.microsoft\",\n /** Lucene analyzer for Indonesian. */\n IdLucene = \"id.lucene\",\n /** Lucene analyzer for Irish. */\n GaLucene = \"ga.lucene\",\n /** Microsoft analyzer for Italian. */\n ItMicrosoft = \"it.microsoft\",\n /** Lucene analyzer for Italian. */\n ItLucene = \"it.lucene\",\n /** Microsoft analyzer for Japanese. */\n JaMicrosoft = \"ja.microsoft\",\n /** Lucene analyzer for Japanese. */\n JaLucene = \"ja.lucene\",\n /** Microsoft analyzer for Kannada. */\n KnMicrosoft = \"kn.microsoft\",\n /** Microsoft analyzer for Korean. */\n KoMicrosoft = \"ko.microsoft\",\n /** Lucene analyzer for Korean. */\n KoLucene = \"ko.lucene\",\n /** Microsoft analyzer for Latvian. */\n LvMicrosoft = \"lv.microsoft\",\n /** Lucene analyzer for Latvian. */\n LvLucene = \"lv.lucene\",\n /** Microsoft analyzer for Lithuanian. */\n LtMicrosoft = \"lt.microsoft\",\n /** Microsoft analyzer for Malayalam. */\n MlMicrosoft = \"ml.microsoft\",\n /** Microsoft analyzer for Malay (Latin). */\n MsMicrosoft = \"ms.microsoft\",\n /** Microsoft analyzer for Marathi. */\n MrMicrosoft = \"mr.microsoft\",\n /** Microsoft analyzer for Norwegian (Bokmål). */\n NbMicrosoft = \"nb.microsoft\",\n /** Lucene analyzer for Norwegian. */\n NoLucene = \"no.lucene\",\n /** Lucene analyzer for Persian. */\n FaLucene = \"fa.lucene\",\n /** Microsoft analyzer for Polish. */\n PlMicrosoft = \"pl.microsoft\",\n /** Lucene analyzer for Polish. */\n PlLucene = \"pl.lucene\",\n /** Microsoft analyzer for Portuguese (Brazil). */\n PtBrMicrosoft = \"pt-BR.microsoft\",\n /** Lucene analyzer for Portuguese (Brazil). */\n PtBrLucene = \"pt-BR.lucene\",\n /** Microsoft analyzer for Portuguese (Portugal). */\n PtPtMicrosoft = \"pt-PT.microsoft\",\n /** Lucene analyzer for Portuguese (Portugal). */\n PtPtLucene = \"pt-PT.lucene\",\n /** Microsoft analyzer for Punjabi. */\n PaMicrosoft = \"pa.microsoft\",\n /** Microsoft analyzer for Romanian. */\n RoMicrosoft = \"ro.microsoft\",\n /** Lucene analyzer for Romanian. */\n RoLucene = \"ro.lucene\",\n /** Microsoft analyzer for Russian. */\n RuMicrosoft = \"ru.microsoft\",\n /** Lucene analyzer for Russian. */\n RuLucene = \"ru.lucene\",\n /** Microsoft analyzer for Serbian (Cyrillic). */\n SrCyrillicMicrosoft = \"sr-cyrillic.microsoft\",\n /** Microsoft analyzer for Serbian (Latin). */\n SrLatinMicrosoft = \"sr-latin.microsoft\",\n /** Microsoft analyzer for Slovak. */\n SkMicrosoft = \"sk.microsoft\",\n /** Microsoft analyzer for Slovenian. */\n SlMicrosoft = \"sl.microsoft\",\n /** Microsoft analyzer for Spanish. */\n EsMicrosoft = \"es.microsoft\",\n /** Lucene analyzer for Spanish. */\n EsLucene = \"es.lucene\",\n /** Microsoft analyzer for Swedish. */\n SvMicrosoft = \"sv.microsoft\",\n /** Lucene analyzer for Swedish. */\n SvLucene = \"sv.lucene\",\n /** Microsoft analyzer for Tamil. */\n TaMicrosoft = \"ta.microsoft\",\n /** Microsoft analyzer for Telugu. */\n TeMicrosoft = \"te.microsoft\",\n /** Microsoft analyzer for Thai. */\n ThMicrosoft = \"th.microsoft\",\n /** Lucene analyzer for Thai. */\n ThLucene = \"th.lucene\",\n /** Microsoft analyzer for Turkish. */\n TrMicrosoft = \"tr.microsoft\",\n /** Lucene analyzer for Turkish. */\n TrLucene = \"tr.lucene\",\n /** Microsoft analyzer for Ukrainian. */\n UkMicrosoft = \"uk.microsoft\",\n /** Microsoft analyzer for Urdu. */\n UrMicrosoft = \"ur.microsoft\",\n /** Microsoft analyzer for Vietnamese. */\n ViMicrosoft = \"vi.microsoft\",\n /** Standard Lucene analyzer. */\n StandardLucene = \"standard.lucene\",\n /** Standard ASCII Folding Lucene analyzer. See https:\\//learn.microsoft.com\\/rest\\/api\\/searchservice\\/Custom-analyzers-in-Azure-Search#Analyzers */\n StandardAsciiFoldingLucene = \"standardasciifolding.lucene\",\n /** Treats the entire content of a field as a single token. This is useful for data like zip codes, ids, and some product names. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/KeywordAnalyzer.html */\n Keyword = \"keyword\",\n /** Flexibly separates text into terms via a regular expression pattern. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/PatternAnalyzer.html */\n Pattern = \"pattern\",\n /** Divides text at non-letters and converts them to lower case. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/SimpleAnalyzer.html */\n Simple = \"simple\",\n /** Divides text at non-letters; Applies the lowercase and stopword token filters. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/StopAnalyzer.html */\n Stop = \"stop\",\n /** An analyzer that uses the whitespace tokenizer. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/WhitespaceAnalyzer.html */\n Whitespace = \"whitespace\",\n}\n\n/**\n * Defines values for LexicalAnalyzerName. \\\n * {@link KnownLexicalAnalyzerName} can be used interchangeably with LexicalAnalyzerName,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **ar.microsoft**: Microsoft analyzer for Arabic. \\\n * **ar.lucene**: Lucene analyzer for Arabic. \\\n * **hy.lucene**: Lucene analyzer for Armenian. \\\n * **bn.microsoft**: Microsoft analyzer for Bangla. \\\n * **eu.lucene**: Lucene analyzer for Basque. \\\n * **bg.microsoft**: Microsoft analyzer for Bulgarian. \\\n * **bg.lucene**: Lucene analyzer for Bulgarian. \\\n * **ca.microsoft**: Microsoft analyzer for Catalan. \\\n * **ca.lucene**: Lucene analyzer for Catalan. \\\n * **zh-Hans.microsoft**: Microsoft analyzer for Chinese (Simplified). \\\n * **zh-Hans.lucene**: Lucene analyzer for Chinese (Simplified). \\\n * **zh-Hant.microsoft**: Microsoft analyzer for Chinese (Traditional). \\\n * **zh-Hant.lucene**: Lucene analyzer for Chinese (Traditional). \\\n * **hr.microsoft**: Microsoft analyzer for Croatian. \\\n * **cs.microsoft**: Microsoft analyzer for Czech. \\\n * **cs.lucene**: Lucene analyzer for Czech. \\\n * **da.microsoft**: Microsoft analyzer for Danish. \\\n * **da.lucene**: Lucene analyzer for Danish. \\\n * **nl.microsoft**: Microsoft analyzer for Dutch. \\\n * **nl.lucene**: Lucene analyzer for Dutch. \\\n * **en.microsoft**: Microsoft analyzer for English. \\\n * **en.lucene**: Lucene analyzer for English. \\\n * **et.microsoft**: Microsoft analyzer for Estonian. \\\n * **fi.microsoft**: Microsoft analyzer for Finnish. \\\n * **fi.lucene**: Lucene analyzer for Finnish. \\\n * **fr.microsoft**: Microsoft analyzer for French. \\\n * **fr.lucene**: Lucene analyzer for French. \\\n * **gl.lucene**: Lucene analyzer for Galician. \\\n * **de.microsoft**: Microsoft analyzer for German. \\\n * **de.lucene**: Lucene analyzer for German. \\\n * **el.microsoft**: Microsoft analyzer for Greek. \\\n * **el.lucene**: Lucene analyzer for Greek. \\\n * **gu.microsoft**: Microsoft analyzer for Gujarati. \\\n * **he.microsoft**: Microsoft analyzer for Hebrew. \\\n * **hi.microsoft**: Microsoft analyzer for Hindi. \\\n * **hi.lucene**: Lucene analyzer for Hindi. \\\n * **hu.microsoft**: Microsoft analyzer for Hungarian. \\\n * **hu.lucene**: Lucene analyzer for Hungarian. \\\n * **is.microsoft**: Microsoft analyzer for Icelandic. \\\n * **id.microsoft**: Microsoft analyzer for Indonesian (Bahasa). \\\n * **id.lucene**: Lucene analyzer for Indonesian. \\\n * **ga.lucene**: Lucene analyzer for Irish. \\\n * **it.microsoft**: Microsoft analyzer for Italian. \\\n * **it.lucene**: Lucene analyzer for Italian. \\\n * **ja.microsoft**: Microsoft analyzer for Japanese. \\\n * **ja.lucene**: Lucene analyzer for Japanese. \\\n * **kn.microsoft**: Microsoft analyzer for Kannada. \\\n * **ko.microsoft**: Microsoft analyzer for Korean. \\\n * **ko.lucene**: Lucene analyzer for Korean. \\\n * **lv.microsoft**: Microsoft analyzer for Latvian. \\\n * **lv.lucene**: Lucene analyzer for Latvian. \\\n * **lt.microsoft**: Microsoft analyzer for Lithuanian. \\\n * **ml.microsoft**: Microsoft analyzer for Malayalam. \\\n * **ms.microsoft**: Microsoft analyzer for Malay (Latin). \\\n * **mr.microsoft**: Microsoft analyzer for Marathi. \\\n * **nb.microsoft**: Microsoft analyzer for Norwegian (Bokmål). \\\n * **no.lucene**: Lucene analyzer for Norwegian. \\\n * **fa.lucene**: Lucene analyzer for Persian. \\\n * **pl.microsoft**: Microsoft analyzer for Polish. \\\n * **pl.lucene**: Lucene analyzer for Polish. \\\n * **pt-BR.microsoft**: Microsoft analyzer for Portuguese (Brazil). \\\n * **pt-BR.lucene**: Lucene analyzer for Portuguese (Brazil). \\\n * **pt-PT.microsoft**: Microsoft analyzer for Portuguese (Portugal). \\\n * **pt-PT.lucene**: Lucene analyzer for Portuguese (Portugal). \\\n * **pa.microsoft**: Microsoft analyzer for Punjabi. \\\n * **ro.microsoft**: Microsoft analyzer for Romanian. \\\n * **ro.lucene**: Lucene analyzer for Romanian. \\\n * **ru.microsoft**: Microsoft analyzer for Russian. \\\n * **ru.lucene**: Lucene analyzer for Russian. \\\n * **sr-cyrillic.microsoft**: Microsoft analyzer for Serbian (Cyrillic). \\\n * **sr-latin.microsoft**: Microsoft analyzer for Serbian (Latin). \\\n * **sk.microsoft**: Microsoft analyzer for Slovak. \\\n * **sl.microsoft**: Microsoft analyzer for Slovenian. \\\n * **es.microsoft**: Microsoft analyzer for Spanish. \\\n * **es.lucene**: Lucene analyzer for Spanish. \\\n * **sv.microsoft**: Microsoft analyzer for Swedish. \\\n * **sv.lucene**: Lucene analyzer for Swedish. \\\n * **ta.microsoft**: Microsoft analyzer for Tamil. \\\n * **te.microsoft**: Microsoft analyzer for Telugu. \\\n * **th.microsoft**: Microsoft analyzer for Thai. \\\n * **th.lucene**: Lucene analyzer for Thai. \\\n * **tr.microsoft**: Microsoft analyzer for Turkish. \\\n * **tr.lucene**: Lucene analyzer for Turkish. \\\n * **uk.microsoft**: Microsoft analyzer for Ukrainian. \\\n * **ur.microsoft**: Microsoft analyzer for Urdu. \\\n * **vi.microsoft**: Microsoft analyzer for Vietnamese. \\\n * **standard.lucene**: Standard Lucene analyzer. \\\n * **standardasciifolding.lucene**: Standard ASCII Folding Lucene analyzer. See https:\\/\\/learn.microsoft.com\\/rest\\/api\\/searchservice\\/Custom-analyzers-in-Azure-Search#Analyzers \\\n * **keyword**: Treats the entire content of a field as a single token. This is useful for data like zip codes, ids, and some product names. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/KeywordAnalyzer.html \\\n * **pattern**: Flexibly separates text into terms via a regular expression pattern. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/PatternAnalyzer.html \\\n * **simple**: Divides text at non-letters and converts them to lower case. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/SimpleAnalyzer.html \\\n * **stop**: Divides text at non-letters; Applies the lowercase and stopword token filters. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/StopAnalyzer.html \\\n * **whitespace**: An analyzer that uses the whitespace tokenizer. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/WhitespaceAnalyzer.html\n */\nexport type LexicalAnalyzerName = string;\n\n/** Known values of {@link LexicalNormalizerName} that the service accepts. */\nexport enum KnownLexicalNormalizerName {\n /** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the \"Basic Latin\" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/ASCIIFoldingFilter.html */\n AsciiFolding = \"asciifolding\",\n /** Removes elisions. For example, \"l'avion\" (the plane) will be converted to \"avion\" (plane). See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/util\\/ElisionFilter.html */\n Elision = \"elision\",\n /** Normalizes token text to lowercase. See https:\\//lucene.apache.org\\/core\\/6_6_1\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/LowerCaseFilter.html */\n Lowercase = \"lowercase\",\n /** Standard normalizer, which consists of lowercase and asciifolding. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/reverse\\/ReverseStringFilter.html */\n Standard = \"standard\",\n /** Normalizes token text to uppercase. See https:\\//lucene.apache.org\\/core\\/6_6_1\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/UpperCaseFilter.html */\n Uppercase = \"uppercase\",\n}\n\n/**\n * Defines values for LexicalNormalizerName. \\\n * {@link KnownLexicalNormalizerName} can be used interchangeably with LexicalNormalizerName,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **asciifolding**: Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the \"Basic Latin\" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/ASCIIFoldingFilter.html \\\n * **elision**: Removes elisions. For example, \"l'avion\" (the plane) will be converted to \"avion\" (plane). See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/util\\/ElisionFilter.html \\\n * **lowercase**: Normalizes token text to lowercase. See https:\\/\\/lucene.apache.org\\/core\\/6_6_1\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/LowerCaseFilter.html \\\n * **standard**: Standard normalizer, which consists of lowercase and asciifolding. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/reverse\\/ReverseStringFilter.html \\\n * **uppercase**: Normalizes token text to uppercase. See https:\\/\\/lucene.apache.org\\/core\\/6_6_1\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/UpperCaseFilter.html\n */\nexport type LexicalNormalizerName = string;\n\n/** Known values of {@link VectorEncodingFormat} that the service accepts. */\nexport enum KnownVectorEncodingFormat {\n /** Encoding format representing bits packed into a wider data type. */\n PackedBit = \"packedBit\",\n}\n\n/**\n * Defines values for VectorEncodingFormat. \\\n * {@link KnownVectorEncodingFormat} can be used interchangeably with VectorEncodingFormat,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **packedBit**: Encoding format representing bits packed into a wider data type.\n */\nexport type VectorEncodingFormat = string;\n\n/** Known values of {@link RankingOrder} that the service accepts. */\nexport enum KnownRankingOrder {\n /** Sets sort order as BoostedRerankerScore */\n BoostedRerankerScore = \"BoostedRerankerScore\",\n /** Sets sort order as ReRankerScore */\n RerankerScore = \"RerankerScore\",\n}\n\n/**\n * Defines values for RankingOrder. \\\n * {@link KnownRankingOrder} can be used interchangeably with RankingOrder,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **BoostedRerankerScore**: Sets sort order as BoostedRerankerScore \\\n * **RerankerScore**: Sets sort order as ReRankerScore\n */\nexport type RankingOrder = string;\n\n/** Known values of {@link VectorSearchAlgorithmKind} that the service accepts. */\nexport enum KnownVectorSearchAlgorithmKind {\n /** HNSW (Hierarchical Navigable Small World), a type of approximate nearest neighbors algorithm. */\n Hnsw = \"hnsw\",\n /** Exhaustive KNN algorithm which will perform brute-force search. */\n ExhaustiveKnn = \"exhaustiveKnn\",\n}\n\n/**\n * Defines values for VectorSearchAlgorithmKind. \\\n * {@link KnownVectorSearchAlgorithmKind} can be used interchangeably with VectorSearchAlgorithmKind,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **hnsw**: HNSW (Hierarchical Navigable Small World), a type of approximate nearest neighbors algorithm. \\\n * **exhaustiveKnn**: Exhaustive KNN algorithm which will perform brute-force search.\n */\nexport type VectorSearchAlgorithmKind = string;\n\n/** Known values of {@link VectorSearchVectorizerKind} that the service accepts. */\nexport enum KnownVectorSearchVectorizerKind {\n /** Generate embeddings using an Azure OpenAI resource at query time. */\n AzureOpenAI = \"azureOpenAI\",\n /** Generate embeddings using a custom web endpoint at query time. */\n CustomWebApi = \"customWebApi\",\n}\n\n/**\n * Defines values for VectorSearchVectorizerKind. \\\n * {@link KnownVectorSearchVectorizerKind} can be used interchangeably with VectorSearchVectorizerKind,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **azureOpenAI**: Generate embeddings using an Azure OpenAI resource at query time. \\\n * **customWebApi**: Generate embeddings using a custom web endpoint at query time.\n */\nexport type VectorSearchVectorizerKind = string;\n\n/** Known values of {@link VectorSearchCompressionKind} that the service accepts. */\nexport enum KnownVectorSearchCompressionKind {\n /** Scalar Quantization, a type of compression method. In scalar quantization, the original vectors values are compressed to a narrower type by discretizing and representing each component of a vector using a reduced set of quantized values, thereby reducing the overall data size. */\n ScalarQuantization = \"scalarQuantization\",\n /** Binary Quantization, a type of compression method. In binary quantization, the original vectors values are compressed to the narrower binary type by discretizing and representing each component of a vector using binary values, thereby reducing the overall data size. */\n BinaryQuantization = \"binaryQuantization\",\n}\n\n/**\n * Defines values for VectorSearchCompressionKind. \\\n * {@link KnownVectorSearchCompressionKind} can be used interchangeably with VectorSearchCompressionKind,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **scalarQuantization**: Scalar Quantization, a type of compression method. In scalar quantization, the original vectors values are compressed to a narrower type by discretizing and representing each component of a vector using a reduced set of quantized values, thereby reducing the overall data size. \\\n * **binaryQuantization**: Binary Quantization, a type of compression method. In binary quantization, the original vectors values are compressed to the narrower binary type by discretizing and representing each component of a vector using binary values, thereby reducing the overall data size.\n */\nexport type VectorSearchCompressionKind = string;\n\n/** Known values of {@link VectorSearchCompressionRescoreStorageMethod} that the service accepts. */\nexport enum KnownVectorSearchCompressionRescoreStorageMethod {\n /** This option preserves the original full-precision vectors. Choose this option for maximum flexibility and highest quality of compressed search results. This consumes more storage but allows for rescoring and oversampling. */\n PreserveOriginals = \"preserveOriginals\",\n /** This option discards the original full-precision vectors. Choose this option for maximum storage savings. Since this option does not allow for rescoring and oversampling, it will often cause slight to moderate reductions in quality. */\n DiscardOriginals = \"discardOriginals\",\n}\n\n/**\n * Defines values for VectorSearchCompressionRescoreStorageMethod. \\\n * {@link KnownVectorSearchCompressionRescoreStorageMethod} can be used interchangeably with VectorSearchCompressionRescoreStorageMethod,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **preserveOriginals**: This option preserves the original full-precision vectors. Choose this option for maximum flexibility and highest quality of compressed search results. This consumes more storage but allows for rescoring and oversampling. \\\n * **discardOriginals**: This option discards the original full-precision vectors. Choose this option for maximum storage savings. Since this option does not allow for rescoring and oversampling, it will often cause slight to moderate reductions in quality.\n */\nexport type VectorSearchCompressionRescoreStorageMethod = string;\n\n/** Known values of {@link TokenFilterName} that the service accepts. */\nexport enum KnownTokenFilterName {\n /** A token filter that applies the Arabic normalizer to normalize the orthography. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/ar\\/ArabicNormalizationFilter.html */\n ArabicNormalization = \"arabic_normalization\",\n /** Strips all characters after an apostrophe (including the apostrophe itself). See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/tr\\/ApostropheFilter.html */\n Apostrophe = \"apostrophe\",\n /** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the \"Basic Latin\" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/ASCIIFoldingFilter.html */\n AsciiFolding = \"asciifolding\",\n /** Forms bigrams of CJK terms that are generated from the standard tokenizer. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/cjk\\/CJKBigramFilter.html */\n CjkBigram = \"cjk_bigram\",\n /** Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/cjk\\/CJKWidthFilter.html */\n CjkWidth = \"cjk_width\",\n /** Removes English possessives, and dots from acronyms. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/standard\\/ClassicFilter.html */\n Classic = \"classic\",\n /** Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/commongrams\\/CommonGramsFilter.html */\n CommonGram = \"common_grams\",\n /** Generates n-grams of the given size(s) starting from the front or the back of an input token. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/ngram\\/EdgeNGramTokenFilter.html */\n EdgeNGram = \"edgeNGram_v2\",\n /** Removes elisions. For example, \"l'avion\" (the plane) will be converted to \"avion\" (plane). See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/util\\/ElisionFilter.html */\n Elision = \"elision\",\n /** Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/de\\/GermanNormalizationFilter.html */\n GermanNormalization = \"german_normalization\",\n /** Normalizes text in Hindi to remove some differences in spelling variations. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/hi\\/HindiNormalizationFilter.html */\n HindiNormalization = \"hindi_normalization\",\n /** Normalizes the Unicode representation of text in Indian languages. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/in\\/IndicNormalizationFilter.html */\n IndicNormalization = \"indic_normalization\",\n /** Emits each incoming token twice, once as keyword and once as non-keyword. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/KeywordRepeatFilter.html */\n KeywordRepeat = \"keyword_repeat\",\n /** A high-performance kstem filter for English. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/en\\/KStemFilter.html */\n KStem = \"kstem\",\n /** Removes words that are too long or too short. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/LengthFilter.html */\n Length = \"length\",\n /** Limits the number of tokens while indexing. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/LimitTokenCountFilter.html */\n Limit = \"limit\",\n /** Normalizes token text to lower case. See https:\\//lucene.apache.org\\/core\\/6_6_1\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/LowerCaseFilter.html */\n Lowercase = \"lowercase\",\n /** Generates n-grams of the given size(s). See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/ngram\\/NGramTokenFilter.html */\n NGram = \"nGram_v2\",\n /** Applies normalization for Persian. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/fa\\/PersianNormalizationFilter.html */\n PersianNormalization = \"persian_normalization\",\n /** Create tokens for phonetic matches. See https:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-phonetic\\/org\\/apache\\/lucene\\/analysis\\/phonetic\\/package-tree.html */\n Phonetic = \"phonetic\",\n /** Uses the Porter stemming algorithm to transform the token stream. See http:\\//tartarus.org\\/~martin\\/PorterStemmer */\n PorterStem = \"porter_stem\",\n /** Reverses the token string. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/reverse\\/ReverseStringFilter.html */\n Reverse = \"reverse\",\n /** Normalizes use of the interchangeable Scandinavian characters. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/ScandinavianNormalizationFilter.html */\n ScandinavianNormalization = \"scandinavian_normalization\",\n /** Folds Scandinavian characters åÅäæÄÆ-&gt;a and öÖøØ-&gt;o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/ScandinavianFoldingFilter.html */\n ScandinavianFoldingNormalization = \"scandinavian_folding\",\n /** Creates combinations of tokens as a single token. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/shingle\\/ShingleFilter.html */\n Shingle = \"shingle\",\n /** A filter that stems words using a Snowball-generated stemmer. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/snowball\\/SnowballFilter.html */\n Snowball = \"snowball\",\n /** Normalizes the Unicode representation of Sorani text. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/ckb\\/SoraniNormalizationFilter.html */\n SoraniNormalization = \"sorani_normalization\",\n /** Language specific stemming filter. See https:\\//learn.microsoft.com\\/rest\\/api\\/searchservice\\/Custom-analyzers-in-Azure-Search#TokenFilters */\n Stemmer = \"stemmer\",\n /** Removes stop words from a token stream. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/StopFilter.html */\n Stopwords = \"stopwords\",\n /** Trims leading and trailing whitespace from tokens. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/TrimFilter.html */\n Trim = \"trim\",\n /** Truncates the terms to a specific length. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/TruncateTokenFilter.html */\n Truncate = \"truncate\",\n /** Filters out tokens with same text as the previous token. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/RemoveDuplicatesTokenFilter.html */\n Unique = \"unique\",\n /** Normalizes token text to upper case. See https:\\//lucene.apache.org\\/core\\/6_6_1\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/UpperCaseFilter.html */\n Uppercase = \"uppercase\",\n /** Splits words into subwords and performs optional transformations on subword groups. */\n WordDelimiter = \"word_delimiter\",\n}\n\n/**\n * Defines values for TokenFilterName. \\\n * {@link KnownTokenFilterName} can be used interchangeably with TokenFilterName,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **arabic_normalization**: A token filter that applies the Arabic normalizer to normalize the orthography. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/ar\\/ArabicNormalizationFilter.html \\\n * **apostrophe**: Strips all characters after an apostrophe (including the apostrophe itself). See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/tr\\/ApostropheFilter.html \\\n * **asciifolding**: Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the \"Basic Latin\" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/ASCIIFoldingFilter.html \\\n * **cjk_bigram**: Forms bigrams of CJK terms that are generated from the standard tokenizer. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/cjk\\/CJKBigramFilter.html \\\n * **cjk_width**: Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/cjk\\/CJKWidthFilter.html \\\n * **classic**: Removes English possessives, and dots from acronyms. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/standard\\/ClassicFilter.html \\\n * **common_grams**: Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/commongrams\\/CommonGramsFilter.html \\\n * **edgeNGram_v2**: Generates n-grams of the given size(s) starting from the front or the back of an input token. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/ngram\\/EdgeNGramTokenFilter.html \\\n * **elision**: Removes elisions. For example, \"l'avion\" (the plane) will be converted to \"avion\" (plane). See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/util\\/ElisionFilter.html \\\n * **german_normalization**: Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/de\\/GermanNormalizationFilter.html \\\n * **hindi_normalization**: Normalizes text in Hindi to remove some differences in spelling variations. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/hi\\/HindiNormalizationFilter.html \\\n * **indic_normalization**: Normalizes the Unicode representation of text in Indian languages. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/in\\/IndicNormalizationFilter.html \\\n * **keyword_repeat**: Emits each incoming token twice, once as keyword and once as non-keyword. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/KeywordRepeatFilter.html \\\n * **kstem**: A high-performance kstem filter for English. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/en\\/KStemFilter.html \\\n * **length**: Removes words that are too long or too short. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/LengthFilter.html \\\n * **limit**: Limits the number of tokens while indexing. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/LimitTokenCountFilter.html \\\n * **lowercase**: Normalizes token text to lower case. See https:\\/\\/lucene.apache.org\\/core\\/6_6_1\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/LowerCaseFilter.html \\\n * **nGram_v2**: Generates n-grams of the given size(s). See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/ngram\\/NGramTokenFilter.html \\\n * **persian_normalization**: Applies normalization for Persian. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/fa\\/PersianNormalizationFilter.html \\\n * **phonetic**: Create tokens for phonetic matches. See https:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-phonetic\\/org\\/apache\\/lucene\\/analysis\\/phonetic\\/package-tree.html \\\n * **porter_stem**: Uses the Porter stemming algorithm to transform the token stream. See http:\\/\\/tartarus.org\\/~martin\\/PorterStemmer \\\n * **reverse**: Reverses the token string. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/reverse\\/ReverseStringFilter.html \\\n * **scandinavian_normalization**: Normalizes use of the interchangeable Scandinavian characters. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/ScandinavianNormalizationFilter.html \\\n * **scandinavian_folding**: Folds Scandinavian characters åÅäæÄÆ-&gt;a and öÖøØ-&gt;o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/ScandinavianFoldingFilter.html \\\n * **shingle**: Creates combinations of tokens as a single token. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/shingle\\/ShingleFilter.html \\\n * **snowball**: A filter that stems words using a Snowball-generated stemmer. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/snowball\\/SnowballFilter.html \\\n * **sorani_normalization**: Normalizes the Unicode representation of Sorani text. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/ckb\\/SoraniNormalizationFilter.html \\\n * **stemmer**: Language specific stemming filter. See https:\\/\\/learn.microsoft.com\\/rest\\/api\\/searchservice\\/Custom-analyzers-in-Azure-Search#TokenFilters \\\n * **stopwords**: Removes stop words from a token stream. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/StopFilter.html \\\n * **trim**: Trims leading and trailing whitespace from tokens. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/TrimFilter.html \\\n * **truncate**: Truncates the terms to a specific length. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/TruncateTokenFilter.html \\\n * **unique**: Filters out tokens with same text as the previous token. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/RemoveDuplicatesTokenFilter.html \\\n * **uppercase**: Normalizes token text to upper case. See https:\\/\\/lucene.apache.org\\/core\\/6_6_1\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/UpperCaseFilter.html \\\n * **word_delimiter**: Splits words into subwords and performs optional transformations on subword groups.\n */\nexport type TokenFilterName = string;\n\n/** Known values of {@link CharFilterName} that the service accepts. */\nexport enum KnownCharFilterName {\n /** A character filter that attempts to strip out HTML constructs. See https:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/charfilter\\/HTMLStripCharFilter.html */\n HtmlStrip = \"html_strip\",\n}\n\n/**\n * Defines values for CharFilterName. \\\n * {@link KnownCharFilterName} can be used interchangeably with CharFilterName,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **html_strip**: A character filter that attempts to strip out HTML constructs. See https:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/charfilter\\/HTMLStripCharFilter.html\n */\nexport type CharFilterName = string;\n\n/** Known values of {@link VectorSearchAlgorithmMetric} that the service accepts. */\nexport enum KnownVectorSearchAlgorithmMetric {\n /** Measures the angle between vectors to quantify their similarity, disregarding magnitude. The smaller the angle, the closer the similarity. */\n Cosine = \"cosine\",\n /** Computes the straight-line distance between vectors in a multi-dimensional space. The smaller the distance, the closer the similarity. */\n Euclidean = \"euclidean\",\n /** Calculates the sum of element-wise products to gauge alignment and magnitude similarity. The larger and more positive, the closer the similarity. */\n DotProduct = \"dotProduct\",\n /** Only applicable to bit-packed binary data types. Determines dissimilarity by counting differing positions in binary vectors. The fewer differences, the closer the similarity. */\n Hamming = \"hamming\",\n}\n\n/**\n * Defines values for VectorSearchAlgorithmMetric. \\\n * {@link KnownVectorSearchAlgorithmMetric} can be used interchangeably with VectorSearchAlgorithmMetric,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **cosine**: Measures the angle between vectors to quantify their similarity, disregarding magnitude. The smaller the angle, the closer the similarity. \\\n * **euclidean**: Computes the straight-line distance between vectors in a multi-dimensional space. The smaller the distance, the closer the similarity. \\\n * **dotProduct**: Calculates the sum of element-wise products to gauge alignment and magnitude similarity. The larger and more positive, the closer the similarity. \\\n * **hamming**: Only applicable to bit-packed binary data types. Determines dissimilarity by counting differing positions in binary vectors. The fewer differences, the closer the similarity.\n */\nexport type VectorSearchAlgorithmMetric = string;\n\n/** Known values of {@link VectorSearchCompressionTarget} that the service accepts. */\nexport enum KnownVectorSearchCompressionTarget {\n /** Int8 */\n Int8 = \"int8\",\n}\n\n/**\n * Defines values for VectorSearchCompressionTarget. \\\n * {@link KnownVectorSearchCompressionTarget} can be used interchangeably with VectorSearchCompressionTarget,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **int8**\n */\nexport type VectorSearchCompressionTarget = string;\n\n/** Known values of {@link AzureOpenAIModelName} that the service accepts. */\nexport enum KnownAzureOpenAIModelName {\n /** TextEmbeddingAda002 */\n TextEmbeddingAda002 = \"text-embedding-ada-002\",\n /** TextEmbedding3Large */\n TextEmbedding3Large = \"text-embedding-3-large\",\n /** TextEmbedding3Small */\n TextEmbedding3Small = \"text-embedding-3-small\",\n}\n\n/**\n * Defines values for AzureOpenAIModelName. \\\n * {@link KnownAzureOpenAIModelName} can be used interchangeably with AzureOpenAIModelName,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **text-embedding-ada-002** \\\n * **text-embedding-3-large** \\\n * **text-embedding-3-small**\n */\nexport type AzureOpenAIModelName = string;\n\n/** Known values of {@link KeyPhraseExtractionSkillLanguage} that the service accepts. */\nexport enum KnownKeyPhraseExtractionSkillLanguage {\n /** Danish */\n Da = \"da\",\n /** Dutch */\n Nl = \"nl\",\n /** English */\n En = \"en\",\n /** Finnish */\n Fi = \"fi\",\n /** French */\n Fr = \"fr\",\n /** German */\n De = \"de\",\n /** Italian */\n It = \"it\",\n /** Japanese */\n Ja = \"ja\",\n /** Korean */\n Ko = \"ko\",\n /** Norwegian (Bokmaal) */\n No = \"no\",\n /** Polish */\n Pl = \"pl\",\n /** Portuguese (Portugal) */\n PtPT = \"pt-PT\",\n /** Portuguese (Brazil) */\n PtBR = \"pt-BR\",\n /** Russian */\n Ru = \"ru\",\n /** Spanish */\n Es = \"es\",\n /** Swedish */\n Sv = \"sv\",\n}\n\n/**\n * Defines values for KeyPhraseExtractionSkillLanguage. \\\n * {@link KnownKeyPhraseExtractionSkillLanguage} can be used interchangeably with KeyPhraseExtractionSkillLanguage,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **da**: Danish \\\n * **nl**: Dutch \\\n * **en**: English \\\n * **fi**: Finnish \\\n * **fr**: French \\\n * **de**: German \\\n * **it**: Italian \\\n * **ja**: Japanese \\\n * **ko**: Korean \\\n * **no**: Norwegian (Bokmaal) \\\n * **pl**: Polish \\\n * **pt-PT**: Portuguese (Portugal) \\\n * **pt-BR**: Portuguese (Brazil) \\\n * **ru**: Russian \\\n * **es**: Spanish \\\n * **sv**: Swedish\n */\nexport type KeyPhraseExtractionSkillLanguage = string;\n\n/** Known values of {@link OcrSkillLanguage} that the service accepts. */\nexport enum KnownOcrSkillLanguage {\n /** Afrikaans */\n Af = \"af\",\n /** Albanian */\n Sq = \"sq\",\n /** Angika (Devanagiri) */\n Anp = \"anp\",\n /** Arabic */\n Ar = \"ar\",\n /** Asturian */\n Ast = \"ast\",\n /** Awadhi-Hindi (Devanagiri) */\n Awa = \"awa\",\n /** Azerbaijani (Latin) */\n Az = \"az\",\n /** Bagheli */\n Bfy = \"bfy\",\n /** Basque */\n Eu = \"eu\",\n /** Belarusian (Cyrillic and Latin) */\n Be = \"be\",\n /** Belarusian (Cyrillic) */\n BeCyrl = \"be-cyrl\",\n /** Belarusian (Latin) */\n BeLatn = \"be-latn\",\n /** Bhojpuri-Hindi (Devanagiri) */\n Bho = \"bho\",\n /** Bislama */\n Bi = \"bi\",\n /** Bodo (Devanagiri) */\n Brx = \"brx\",\n /** Bosnian Latin */\n Bs = \"bs\",\n /** Brajbha */\n Bra = \"bra\",\n /** Breton */\n Br = \"br\",\n /** Bulgarian */\n Bg = \"bg\",\n /** Bundeli */\n Bns = \"bns\",\n /** Buryat (Cyrillic) */\n Bua = \"bua\",\n /** Catalan */\n Ca = \"ca\",\n /** Cebuano */\n Ceb = \"ceb\",\n /** Chamling */\n Rab = \"rab\",\n /** Chamorro */\n Ch = \"ch\",\n /** Chhattisgarhi (Devanagiri) */\n Hne = \"hne\",\n /** Chinese Simplified */\n ZhHans = \"zh-Hans\",\n /** Chinese Traditional */\n ZhHant = \"zh-Hant\",\n /** Cornish */\n Kw = \"kw\",\n /** Corsican */\n Co = \"co\",\n /** Crimean Tatar (Latin) */\n Crh = \"crh\",\n /** Croatian */\n Hr = \"hr\",\n /** Czech */\n Cs = \"cs\",\n /** Danish */\n Da = \"da\",\n /** Dari */\n Prs = \"prs\",\n /** Dhimal (Devanagiri) */\n Dhi = \"dhi\",\n /** Dogri (Devanagiri) */\n Doi = \"doi\",\n /** Dutch */\n Nl = \"nl\",\n /** English */\n En = \"en\",\n /** Erzya (Cyrillic) */\n Myv = \"myv\",\n /** Estonian */\n Et = \"et\",\n /** Faroese */\n Fo = \"fo\",\n /** Fijian */\n Fj = \"fj\",\n /** Filipino */\n Fil = \"fil\",\n /** Finnish */\n Fi = \"fi\",\n /** French */\n Fr = \"fr\",\n /** Frulian */\n Fur = \"fur\",\n /** Gagauz (Latin) */\n Gag = \"gag\",\n /** Galician */\n Gl = \"gl\",\n /** German */\n De = \"de\",\n /** Gilbertese */\n Gil = \"gil\",\n /** Gondi (Devanagiri) */\n Gon = \"gon\",\n /** Greek */\n El = \"el\",\n /** Greenlandic */\n Kl = \"kl\",\n /** Gurung (Devanagiri) */\n Gvr = \"gvr\",\n /** Haitian Creole */\n Ht = \"ht\",\n /** Halbi (Devanagiri) */\n Hlb = \"hlb\",\n /** Hani */\n Hni = \"hni\",\n /** Haryanvi */\n Bgc = \"bgc\",\n /** Hawaiian */\n Haw = \"haw\",\n /** Hindi */\n Hi = \"hi\",\n /** Hmong Daw (Latin) */\n Mww = \"mww\",\n /** Ho (Devanagiri) */\n Hoc = \"hoc\",\n /** Hungarian */\n Hu = \"hu\",\n /** Icelandic */\n Is = \"is\",\n /** Inari Sami */\n Smn = \"smn\",\n /** Indonesian */\n Id = \"id\",\n /** Interlingua */\n Ia = \"ia\",\n /** Inuktitut (Latin) */\n Iu = \"iu\",\n /** Irish */\n Ga = \"ga\",\n /** Italian */\n It = \"it\",\n /** Japanese */\n Ja = \"ja\",\n /** Jaunsari (Devanagiri) */\n Jns = \"Jns\",\n /** Javanese */\n Jv = \"jv\",\n /** Kabuverdianu */\n Kea = \"kea\",\n /** Kachin (Latin) */\n Kac = \"kac\",\n /** Kangri (Devanagiri) */\n Xnr = \"xnr\",\n /** Karachay-Balkar */\n Krc = \"krc\",\n /** Kara-Kalpak (Cyrillic) */\n KaaCyrl = \"kaa-cyrl\",\n /** Kara-Kalpak (Latin) */\n Kaa = \"kaa\",\n /** Kashubian */\n Csb = \"csb\",\n /** Kazakh (Cyrillic) */\n KkCyrl = \"kk-cyrl\",\n /** Kazakh (Latin) */\n KkLatn = \"kk-latn\",\n /** Khaling */\n Klr = \"klr\",\n /** Khasi */\n Kha = \"kha\",\n /** K'iche' */\n Quc = \"quc\",\n /** Korean */\n Ko = \"ko\",\n /** Korku */\n Kfq = \"kfq\",\n /** Koryak */\n Kpy = \"kpy\",\n /** Kosraean */\n Kos = \"kos\",\n /** Kumyk (Cyrillic) */\n Kum = \"kum\",\n /** Kurdish (Arabic) */\n KuArab = \"ku-arab\",\n /** Kurdish (Latin) */\n KuLatn = \"ku-latn\",\n /** Kurukh (Devanagiri) */\n Kru = \"kru\",\n /** Kyrgyz (Cyrillic) */\n Ky = \"ky\",\n /** Lakota */\n Lkt = \"lkt\",\n /** Latin */\n La = \"la\",\n /** Lithuanian */\n Lt = \"lt\",\n /** Lower Sorbian */\n Dsb = \"dsb\",\n /** Lule Sami */\n Smj = \"smj\",\n /** Luxembourgish */\n Lb = \"lb\",\n /** Mahasu Pahari (Devanagiri) */\n Bfz = \"bfz\",\n /** Malay (Latin) */\n Ms = \"ms\",\n /** Maltese */\n Mt = \"mt\",\n /** Malto (Devanagiri) */\n Kmj = \"kmj\",\n /** Manx */\n Gv = \"gv\",\n /** Maori */\n Mi = \"mi\",\n /** Marathi */\n Mr = \"mr\",\n /** Mongolian (Cyrillic) */\n Mn = \"mn\",\n /** Montenegrin (Cyrillic) */\n CnrCyrl = \"cnr-cyrl\",\n /** Montenegrin (Latin) */\n CnrLatn = \"cnr-latn\",\n /** Neapolitan */\n Nap = \"nap\",\n /** Nepali */\n Ne = \"ne\",\n /** Niuean */\n Niu = \"niu\",\n /** Nogay */\n Nog = \"nog\",\n /** Northern Sami (Latin) */\n Sme = \"sme\",\n /** Norwegian */\n Nb = \"nb\",\n /** Norwegian */\n No = \"no\",\n /** Occitan */\n Oc = \"oc\",\n /** Ossetic */\n Os = \"os\",\n /** Pashto */\n Ps = \"ps\",\n /** Persian */\n Fa = \"fa\",\n /** Polish */\n Pl = \"pl\",\n /** Portuguese */\n Pt = \"pt\",\n /** Punjabi (Arabic) */\n Pa = \"pa\",\n /** Ripuarian */\n Ksh = \"ksh\",\n /** Romanian */\n Ro = \"ro\",\n /** Romansh */\n Rm = \"rm\",\n /** Russian */\n Ru = \"ru\",\n /** Sadri (Devanagiri) */\n Sck = \"sck\",\n /** Samoan (Latin) */\n Sm = \"sm\",\n /** Sanskrit (Devanagiri) */\n Sa = \"sa\",\n /** Santali (Devanagiri) */\n Sat = \"sat\",\n /** Scots */\n Sco = \"sco\",\n /** Scottish Gaelic */\n Gd = \"gd\",\n /** Serbian (Latin) */\n Sr = \"sr\",\n /** Serbian (Cyrillic) */\n SrCyrl = \"sr-Cyrl\",\n /** Serbian (Latin) */\n SrLatn = \"sr-Latn\",\n /** Sherpa (Devanagiri) */\n Xsr = \"xsr\",\n /** Sirmauri (Devanagiri) */\n Srx = \"srx\",\n /** Skolt Sami */\n Sms = \"sms\",\n /** Slovak */\n Sk = \"sk\",\n /** Slovenian */\n Sl = \"sl\",\n /** Somali (Arabic) */\n So = \"so\",\n /** Southern Sami */\n Sma = \"sma\",\n /** Spanish */\n Es = \"es\",\n /** Swahili (Latin) */\n Sw = \"sw\",\n /** Swedish */\n Sv = \"sv\",\n /** Tajik (Cyrillic) */\n Tg = \"tg\",\n /** Tatar (Latin) */\n Tt = \"tt\",\n /** Tetum */\n Tet = \"tet\",\n /** Thangmi */\n Thf = \"thf\",\n /** Tongan */\n To = \"to\",\n /** Turkish */\n Tr = \"tr\",\n /** Turkmen (Latin) */\n Tk = \"tk\",\n /** Tuvan */\n Tyv = \"tyv\",\n /** Upper Sorbian */\n Hsb = \"hsb\",\n /** Urdu */\n Ur = \"ur\",\n /** Uyghur (Arabic) */\n Ug = \"ug\",\n /** Uzbek (Arabic) */\n UzArab = \"uz-arab\",\n /** Uzbek (Cyrillic) */\n UzCyrl = \"uz-cyrl\",\n /** Uzbek (Latin) */\n Uz = \"uz\",\n /** Volapük */\n Vo = \"vo\",\n /** Walser */\n Wae = \"wae\",\n /** Welsh */\n Cy = \"cy\",\n /** Western Frisian */\n Fy = \"fy\",\n /** Yucatec Maya */\n Yua = \"yua\",\n /** Zhuang */\n Za = \"za\",\n /** Zulu */\n Zu = \"zu\",\n /** Unknown (All) */\n Unk = \"unk\",\n}\n\n/**\n * Defines values for OcrSkillLanguage. \\\n * {@link KnownOcrSkillLanguage} can be used interchangeably with OcrSkillLanguage,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **af**: Afrikaans \\\n * **sq**: Albanian \\\n * **anp**: Angika (Devanagiri) \\\n * **ar**: Arabic \\\n * **ast**: Asturian \\\n * **awa**: Awadhi-Hindi (Devanagiri) \\\n * **az**: Azerbaijani (Latin) \\\n * **bfy**: Bagheli \\\n * **eu**: Basque \\\n * **be**: Belarusian (Cyrillic and Latin) \\\n * **be-cyrl**: Belarusian (Cyrillic) \\\n * **be-latn**: Belarusian (Latin) \\\n * **bho**: Bhojpuri-Hindi (Devanagiri) \\\n * **bi**: Bislama \\\n * **brx**: Bodo (Devanagiri) \\\n * **bs**: Bosnian Latin \\\n * **bra**: Brajbha \\\n * **br**: Breton \\\n * **bg**: Bulgarian \\\n * **bns**: Bundeli \\\n * **bua**: Buryat (Cyrillic) \\\n * **ca**: Catalan \\\n * **ceb**: Cebuano \\\n * **rab**: Chamling \\\n * **ch**: Chamorro \\\n * **hne**: Chhattisgarhi (Devanagiri) \\\n * **zh-Hans**: Chinese Simplified \\\n * **zh-Hant**: Chinese Traditional \\\n * **kw**: Cornish \\\n * **co**: Corsican \\\n * **crh**: Crimean Tatar (Latin) \\\n * **hr**: Croatian \\\n * **cs**: Czech \\\n * **da**: Danish \\\n * **prs**: Dari \\\n * **dhi**: Dhimal (Devanagiri) \\\n * **doi**: Dogri (Devanagiri) \\\n * **nl**: Dutch \\\n * **en**: English \\\n * **myv**: Erzya (Cyrillic) \\\n * **et**: Estonian \\\n * **fo**: Faroese \\\n * **fj**: Fijian \\\n * **fil**: Filipino \\\n * **fi**: Finnish \\\n * **fr**: French \\\n * **fur**: Frulian \\\n * **gag**: Gagauz (Latin) \\\n * **gl**: Galician \\\n * **de**: German \\\n * **gil**: Gilbertese \\\n * **gon**: Gondi (Devanagiri) \\\n * **el**: Greek \\\n * **kl**: Greenlandic \\\n * **gvr**: Gurung (Devanagiri) \\\n * **ht**: Haitian Creole \\\n * **hlb**: Halbi (Devanagiri) \\\n * **hni**: Hani \\\n * **bgc**: Haryanvi \\\n * **haw**: Hawaiian \\\n * **hi**: Hindi \\\n * **mww**: Hmong Daw (Latin) \\\n * **hoc**: Ho (Devanagiri) \\\n * **hu**: Hungarian \\\n * **is**: Icelandic \\\n * **smn**: Inari Sami \\\n * **id**: Indonesian \\\n * **ia**: Interlingua \\\n * **iu**: Inuktitut (Latin) \\\n * **ga**: Irish \\\n * **it**: Italian \\\n * **ja**: Japanese \\\n * **Jns**: Jaunsari (Devanagiri) \\\n * **jv**: Javanese \\\n * **kea**: Kabuverdianu \\\n * **kac**: Kachin (Latin) \\\n * **xnr**: Kangri (Devanagiri) \\\n * **krc**: Karachay-Balkar \\\n * **kaa-cyrl**: Kara-Kalpak (Cyrillic) \\\n * **kaa**: Kara-Kalpak (Latin) \\\n * **csb**: Kashubian \\\n * **kk-cyrl**: Kazakh (Cyrillic) \\\n * **kk-latn**: Kazakh (Latin) \\\n * **klr**: Khaling \\\n * **kha**: Khasi \\\n * **quc**: K'iche' \\\n * **ko**: Korean \\\n * **kfq**: Korku \\\n * **kpy**: Koryak \\\n * **kos**: Kosraean \\\n * **kum**: Kumyk (Cyrillic) \\\n * **ku-arab**: Kurdish (Arabic) \\\n * **ku-latn**: Kurdish (Latin) \\\n * **kru**: Kurukh (Devanagiri) \\\n * **ky**: Kyrgyz (Cyrillic) \\\n * **lkt**: Lakota \\\n * **la**: Latin \\\n * **lt**: Lithuanian \\\n * **dsb**: Lower Sorbian \\\n * **smj**: Lule Sami \\\n * **lb**: Luxembourgish \\\n * **bfz**: Mahasu Pahari (Devanagiri) \\\n * **ms**: Malay (Latin) \\\n * **mt**: Maltese \\\n * **kmj**: Malto (Devanagiri) \\\n * **gv**: Manx \\\n * **mi**: Maori \\\n * **mr**: Marathi \\\n * **mn**: Mongolian (Cyrillic) \\\n * **cnr-cyrl**: Montenegrin (Cyrillic) \\\n * **cnr-latn**: Montenegrin (Latin) \\\n * **nap**: Neapolitan \\\n * **ne**: Nepali \\\n * **niu**: Niuean \\\n * **nog**: Nogay \\\n * **sme**: Northern Sami (Latin) \\\n * **nb**: Norwegian \\\n * **no**: Norwegian \\\n * **oc**: Occitan \\\n * **os**: Ossetic \\\n * **ps**: Pashto \\\n * **fa**: Persian \\\n * **pl**: Polish \\\n * **pt**: Portuguese \\\n * **pa**: Punjabi (Arabic) \\\n * **ksh**: Ripuarian \\\n * **ro**: Romanian \\\n * **rm**: Romansh \\\n * **ru**: Russian \\\n * **sck**: Sadri (Devanagiri) \\\n * **sm**: Samoan (Latin) \\\n * **sa**: Sanskrit (Devanagiri) \\\n * **sat**: Santali (Devanagiri) \\\n * **sco**: Scots \\\n * **gd**: Scottish Gaelic \\\n * **sr**: Serbian (Latin) \\\n * **sr-Cyrl**: Serbian (Cyrillic) \\\n * **sr-Latn**: Serbian (Latin) \\\n * **xsr**: Sherpa (Devanagiri) \\\n * **srx**: Sirmauri (Devanagiri) \\\n * **sms**: Skolt Sami \\\n * **sk**: Slovak \\\n * **sl**: Slovenian \\\n * **so**: Somali (Arabic) \\\n * **sma**: Southern Sami \\\n * **es**: Spanish \\\n * **sw**: Swahili (Latin) \\\n * **sv**: Swedish \\\n * **tg**: Tajik (Cyrillic) \\\n * **tt**: Tatar (Latin) \\\n * **tet**: Tetum \\\n * **thf**: Thangmi \\\n * **to**: Tongan \\\n * **tr**: Turkish \\\n * **tk**: Turkmen (Latin) \\\n * **tyv**: Tuvan \\\n * **hsb**: Upper Sorbian \\\n * **ur**: Urdu \\\n * **ug**: Uyghur (Arabic) \\\n * **uz-arab**: Uzbek (Arabic) \\\n * **uz-cyrl**: Uzbek (Cyrillic) \\\n * **uz**: Uzbek (Latin) \\\n * **vo**: Volapük \\\n * **wae**: Walser \\\n * **cy**: Welsh \\\n * **fy**: Western Frisian \\\n * **yua**: Yucatec Maya \\\n * **za**: Zhuang \\\n * **zu**: Zulu \\\n * **unk**: Unknown (All)\n */\nexport type OcrSkillLanguage = string;\n\n/** Known values of {@link OcrLineEnding} that the service accepts. */\nexport enum KnownOcrLineEnding {\n /** Lines are separated by a single space character. */\n Space = \"space\",\n /** Lines are separated by a carriage return ('\\r') character. */\n CarriageReturn = \"carriageReturn\",\n /** Lines are separated by a single line feed ('\\n') character. */\n LineFeed = \"lineFeed\",\n /** Lines are separated by a carriage return and a line feed ('\\r\\n') character. */\n CarriageReturnLineFeed = \"carriageReturnLineFeed\",\n}\n\n/**\n * Defines values for OcrLineEnding. \\\n * {@link KnownOcrLineEnding} can be used interchangeably with OcrLineEnding,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **space**: Lines are separated by a single space character. \\\n * **carriageReturn**: Lines are separated by a carriage return ('\\r') character. \\\n * **lineFeed**: Lines are separated by a single line feed ('\\n') character. \\\n * **carriageReturnLineFeed**: Lines are separated by a carriage return and a line feed ('\\r\\n') character.\n */\nexport type OcrLineEnding = string;\n\n/** Known values of {@link ImageAnalysisSkillLanguage} that the service accepts. */\nexport enum KnownImageAnalysisSkillLanguage {\n /** Arabic */\n Ar = \"ar\",\n /** Azerbaijani */\n Az = \"az\",\n /** Bulgarian */\n Bg = \"bg\",\n /** Bosnian Latin */\n Bs = \"bs\",\n /** Catalan */\n Ca = \"ca\",\n /** Czech */\n Cs = \"cs\",\n /** Welsh */\n Cy = \"cy\",\n /** Danish */\n Da = \"da\",\n /** German */\n De = \"de\",\n /** Greek */\n El = \"el\",\n /** English */\n En = \"en\",\n /** Spanish */\n Es = \"es\",\n /** Estonian */\n Et = \"et\",\n /** Basque */\n Eu = \"eu\",\n /** Finnish */\n Fi = \"fi\",\n /** French */\n Fr = \"fr\",\n /** Irish */\n Ga = \"ga\",\n /** Galician */\n Gl = \"gl\",\n /** Hebrew */\n He = \"he\",\n /** Hindi */\n Hi = \"hi\",\n /** Croatian */\n Hr = \"hr\",\n /** Hungarian */\n Hu = \"hu\",\n /** Indonesian */\n Id = \"id\",\n /** Italian */\n It = \"it\",\n /** Japanese */\n Ja = \"ja\",\n /** Kazakh */\n Kk = \"kk\",\n /** Korean */\n Ko = \"ko\",\n /** Lithuanian */\n Lt = \"lt\",\n /** Latvian */\n Lv = \"lv\",\n /** Macedonian */\n Mk = \"mk\",\n /** Malay Malaysia */\n Ms = \"ms\",\n /** Norwegian (Bokmal) */\n Nb = \"nb\",\n /** Dutch */\n Nl = \"nl\",\n /** Polish */\n Pl = \"pl\",\n /** Dari */\n Prs = \"prs\",\n /** Portuguese-Brazil */\n PtBR = \"pt-BR\",\n /** Portuguese-Portugal */\n Pt = \"pt\",\n /** Portuguese-Portugal */\n PtPT = \"pt-PT\",\n /** Romanian */\n Ro = \"ro\",\n /** Russian */\n Ru = \"ru\",\n /** Slovak */\n Sk = \"sk\",\n /** Slovenian */\n Sl = \"sl\",\n /** Serbian - Cyrillic RS */\n SrCyrl = \"sr-Cyrl\",\n /** Serbian - Latin RS */\n SrLatn = \"sr-Latn\",\n /** Swedish */\n Sv = \"sv\",\n /** Thai */\n Th = \"th\",\n /** Turkish */\n Tr = \"tr\",\n /** Ukrainian */\n Uk = \"uk\",\n /** Vietnamese */\n Vi = \"vi\",\n /** Chinese Simplified */\n Zh = \"zh\",\n /** Chinese Simplified */\n ZhHans = \"zh-Hans\",\n /** Chinese Traditional */\n ZhHant = \"zh-Hant\",\n}\n\n/**\n * Defines values for ImageAnalysisSkillLanguage. \\\n * {@link KnownImageAnalysisSkillLanguage} can be used interchangeably with ImageAnalysisSkillLanguage,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **ar**: Arabic \\\n * **az**: Azerbaijani \\\n * **bg**: Bulgarian \\\n * **bs**: Bosnian Latin \\\n * **ca**: Catalan \\\n * **cs**: Czech \\\n * **cy**: Welsh \\\n * **da**: Danish \\\n * **de**: German \\\n * **el**: Greek \\\n * **en**: English \\\n * **es**: Spanish \\\n * **et**: Estonian \\\n * **eu**: Basque \\\n * **fi**: Finnish \\\n * **fr**: French \\\n * **ga**: Irish \\\n * **gl**: Galician \\\n * **he**: Hebrew \\\n * **hi**: Hindi \\\n * **hr**: Croatian \\\n * **hu**: Hungarian \\\n * **id**: Indonesian \\\n * **it**: Italian \\\n * **ja**: Japanese \\\n * **kk**: Kazakh \\\n * **ko**: Korean \\\n * **lt**: Lithuanian \\\n * **lv**: Latvian \\\n * **mk**: Macedonian \\\n * **ms**: Malay Malaysia \\\n * **nb**: Norwegian (Bokmal) \\\n * **nl**: Dutch \\\n * **pl**: Polish \\\n * **prs**: Dari \\\n * **pt-BR**: Portuguese-Brazil \\\n * **pt**: Portuguese-Portugal \\\n * **pt-PT**: Portuguese-Portugal \\\n * **ro**: Romanian \\\n * **ru**: Russian \\\n * **sk**: Slovak \\\n * **sl**: Slovenian \\\n * **sr-Cyrl**: Serbian - Cyrillic RS \\\n * **sr-Latn**: Serbian - Latin RS \\\n * **sv**: Swedish \\\n * **th**: Thai \\\n * **tr**: Turkish \\\n * **uk**: Ukrainian \\\n * **vi**: Vietnamese \\\n * **zh**: Chinese Simplified \\\n * **zh-Hans**: Chinese Simplified \\\n * **zh-Hant**: Chinese Traditional\n */\nexport type ImageAnalysisSkillLanguage = string;\n\n/** Known values of {@link VisualFeature} that the service accepts. */\nexport enum KnownVisualFeature {\n /** Visual features recognized as adult persons. */\n Adult = \"adult\",\n /** Visual features recognized as commercial brands. */\n Brands = \"brands\",\n /** Categories. */\n Categories = \"categories\",\n /** Description. */\n Description = \"description\",\n /** Visual features recognized as people faces. */\n Faces = \"faces\",\n /** Visual features recognized as objects. */\n Objects = \"objects\",\n /** Tags. */\n Tags = \"tags\",\n}\n\n/**\n * Defines values for VisualFeature. \\\n * {@link KnownVisualFeature} can be used interchangeably with VisualFeature,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **adult**: Visual features recognized as adult persons. \\\n * **brands**: Visual features recognized as commercial brands. \\\n * **categories**: Categories. \\\n * **description**: Description. \\\n * **faces**: Visual features recognized as people faces. \\\n * **objects**: Visual features recognized as objects. \\\n * **tags**: Tags.\n */\nexport type VisualFeature = string;\n\n/** Known values of {@link ImageDetail} that the service accepts. */\nexport enum KnownImageDetail {\n /** Details recognized as celebrities. */\n Celebrities = \"celebrities\",\n /** Details recognized as landmarks. */\n Landmarks = \"landmarks\",\n}\n\n/**\n * Defines values for ImageDetail. \\\n * {@link KnownImageDetail} can be used interchangeably with ImageDetail,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **celebrities**: Details recognized as celebrities. \\\n * **landmarks**: Details recognized as landmarks.\n */\nexport type ImageDetail = string;\n\n/** Known values of {@link EntityCategory} that the service accepts. */\nexport enum KnownEntityCategory {\n /** Entities describing a physical location. */\n Location = \"location\",\n /** Entities describing an organization. */\n Organization = \"organization\",\n /** Entities describing a person. */\n Person = \"person\",\n /** Entities describing a quantity. */\n Quantity = \"quantity\",\n /** Entities describing a date and time. */\n Datetime = \"datetime\",\n /** Entities describing a URL. */\n Url = \"url\",\n /** Entities describing an email address. */\n Email = \"email\",\n}\n\n/**\n * Defines values for EntityCategory. \\\n * {@link KnownEntityCategory} can be used interchangeably with EntityCategory,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **location**: Entities describing a physical location. \\\n * **organization**: Entities describing an organization. \\\n * **person**: Entities describing a person. \\\n * **quantity**: Entities describing a quantity. \\\n * **datetime**: Entities describing a date and time. \\\n * **url**: Entities describing a URL. \\\n * **email**: Entities describing an email address.\n */\nexport type EntityCategory = string;\n\n/** Known values of {@link EntityRecognitionSkillLanguage} that the service accepts. */\nexport enum KnownEntityRecognitionSkillLanguage {\n /** Arabic */\n Ar = \"ar\",\n /** Czech */\n Cs = \"cs\",\n /** Chinese-Simplified */\n ZhHans = \"zh-Hans\",\n /** Chinese-Traditional */\n ZhHant = \"zh-Hant\",\n /** Danish */\n Da = \"da\",\n /** Dutch */\n Nl = \"nl\",\n /** English */\n En = \"en\",\n /** Finnish */\n Fi = \"fi\",\n /** French */\n Fr = \"fr\",\n /** German */\n De = \"de\",\n /** Greek */\n El = \"el\",\n /** Hungarian */\n Hu = \"hu\",\n /** Italian */\n It = \"it\",\n /** Japanese */\n Ja = \"ja\",\n /** Korean */\n Ko = \"ko\",\n /** Norwegian (Bokmaal) */\n No = \"no\",\n /** Polish */\n Pl = \"pl\",\n /** Portuguese (Portugal) */\n PtPT = \"pt-PT\",\n /** Portuguese (Brazil) */\n PtBR = \"pt-BR\",\n /** Russian */\n Ru = \"ru\",\n /** Spanish */\n Es = \"es\",\n /** Swedish */\n Sv = \"sv\",\n /** Turkish */\n Tr = \"tr\",\n}\n\n/**\n * Defines values for EntityRecognitionSkillLanguage. \\\n * {@link KnownEntityRecognitionSkillLanguage} can be used interchangeably with EntityRecognitionSkillLanguage,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **ar**: Arabic \\\n * **cs**: Czech \\\n * **zh-Hans**: Chinese-Simplified \\\n * **zh-Hant**: Chinese-Traditional \\\n * **da**: Danish \\\n * **nl**: Dutch \\\n * **en**: English \\\n * **fi**: Finnish \\\n * **fr**: French \\\n * **de**: German \\\n * **el**: Greek \\\n * **hu**: Hungarian \\\n * **it**: Italian \\\n * **ja**: Japanese \\\n * **ko**: Korean \\\n * **no**: Norwegian (Bokmaal) \\\n * **pl**: Polish \\\n * **pt-PT**: Portuguese (Portugal) \\\n * **pt-BR**: Portuguese (Brazil) \\\n * **ru**: Russian \\\n * **es**: Spanish \\\n * **sv**: Swedish \\\n * **tr**: Turkish\n */\nexport type EntityRecognitionSkillLanguage = string;\n\n/** Known values of {@link SentimentSkillLanguage} that the service accepts. */\nexport enum KnownSentimentSkillLanguage {\n /** Danish */\n Da = \"da\",\n /** Dutch */\n Nl = \"nl\",\n /** English */\n En = \"en\",\n /** Finnish */\n Fi = \"fi\",\n /** French */\n Fr = \"fr\",\n /** German */\n De = \"de\",\n /** Greek */\n El = \"el\",\n /** Italian */\n It = \"it\",\n /** Norwegian (Bokmaal) */\n No = \"no\",\n /** Polish */\n Pl = \"pl\",\n /** Portuguese (Portugal) */\n PtPT = \"pt-PT\",\n /** Russian */\n Ru = \"ru\",\n /** Spanish */\n Es = \"es\",\n /** Swedish */\n Sv = \"sv\",\n /** Turkish */\n Tr = \"tr\",\n}\n\n/**\n * Defines values for SentimentSkillLanguage. \\\n * {@link KnownSentimentSkillLanguage} can be used interchangeably with SentimentSkillLanguage,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **da**: Danish \\\n * **nl**: Dutch \\\n * **en**: English \\\n * **fi**: Finnish \\\n * **fr**: French \\\n * **de**: German \\\n * **el**: Greek \\\n * **it**: Italian \\\n * **no**: Norwegian (Bokmaal) \\\n * **pl**: Polish \\\n * **pt-PT**: Portuguese (Portugal) \\\n * **ru**: Russian \\\n * **es**: Spanish \\\n * **sv**: Swedish \\\n * **tr**: Turkish\n */\nexport type SentimentSkillLanguage = string;\n\n/** Known values of {@link PIIDetectionSkillMaskingMode} that the service accepts. */\nexport enum KnownPIIDetectionSkillMaskingMode {\n /** No masking occurs and the maskedText output will not be returned. */\n None = \"none\",\n /** Replaces the detected entities with the character given in the maskingCharacter parameter. The character will be repeated to the length of the detected entity so that the offsets will correctly correspond to both the input text as well as the output maskedText. */\n Replace = \"replace\",\n}\n\n/**\n * Defines values for PIIDetectionSkillMaskingMode. \\\n * {@link KnownPIIDetectionSkillMaskingMode} can be used interchangeably with PIIDetectionSkillMaskingMode,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **none**: No masking occurs and the maskedText output will not be returned. \\\n * **replace**: Replaces the detected entities with the character given in the maskingCharacter parameter. The character will be repeated to the length of the detected entity so that the offsets will correctly correspond to both the input text as well as the output maskedText.\n */\nexport type PIIDetectionSkillMaskingMode = string;\n\n/** Known values of {@link SplitSkillLanguage} that the service accepts. */\nexport enum KnownSplitSkillLanguage {\n /** Amharic */\n Am = \"am\",\n /** Bosnian */\n Bs = \"bs\",\n /** Czech */\n Cs = \"cs\",\n /** Danish */\n Da = \"da\",\n /** German */\n De = \"de\",\n /** English */\n En = \"en\",\n /** Spanish */\n Es = \"es\",\n /** Estonian */\n Et = \"et\",\n /** Finnish */\n Fi = \"fi\",\n /** French */\n Fr = \"fr\",\n /** Hebrew */\n He = \"he\",\n /** Hindi */\n Hi = \"hi\",\n /** Croatian */\n Hr = \"hr\",\n /** Hungarian */\n Hu = \"hu\",\n /** Indonesian */\n Id = \"id\",\n /** Icelandic */\n Is = \"is\",\n /** Italian */\n It = \"it\",\n /** Japanese */\n Ja = \"ja\",\n /** Korean */\n Ko = \"ko\",\n /** Latvian */\n Lv = \"lv\",\n /** Norwegian */\n Nb = \"nb\",\n /** Dutch */\n Nl = \"nl\",\n /** Polish */\n Pl = \"pl\",\n /** Portuguese (Portugal) */\n Pt = \"pt\",\n /** Portuguese (Brazil) */\n PtBr = \"pt-br\",\n /** Russian */\n Ru = \"ru\",\n /** Slovak */\n Sk = \"sk\",\n /** Slovenian */\n Sl = \"sl\",\n /** Serbian */\n Sr = \"sr\",\n /** Swedish */\n Sv = \"sv\",\n /** Turkish */\n Tr = \"tr\",\n /** Urdu */\n Ur = \"ur\",\n /** Chinese (Simplified) */\n Zh = \"zh\",\n}\n\n/**\n * Defines values for SplitSkillLanguage. \\\n * {@link KnownSplitSkillLanguage} can be used interchangeably with SplitSkillLanguage,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **am**: Amharic \\\n * **bs**: Bosnian \\\n * **cs**: Czech \\\n * **da**: Danish \\\n * **de**: German \\\n * **en**: English \\\n * **es**: Spanish \\\n * **et**: Estonian \\\n * **fi**: Finnish \\\n * **fr**: French \\\n * **he**: Hebrew \\\n * **hi**: Hindi \\\n * **hr**: Croatian \\\n * **hu**: Hungarian \\\n * **id**: Indonesian \\\n * **is**: Icelandic \\\n * **it**: Italian \\\n * **ja**: Japanese \\\n * **ko**: Korean \\\n * **lv**: Latvian \\\n * **nb**: Norwegian \\\n * **nl**: Dutch \\\n * **pl**: Polish \\\n * **pt**: Portuguese (Portugal) \\\n * **pt-br**: Portuguese (Brazil) \\\n * **ru**: Russian \\\n * **sk**: Slovak \\\n * **sl**: Slovenian \\\n * **sr**: Serbian \\\n * **sv**: Swedish \\\n * **tr**: Turkish \\\n * **ur**: Urdu \\\n * **zh**: Chinese (Simplified)\n */\nexport type SplitSkillLanguage = string;\n\n/** Known values of {@link TextSplitMode} that the service accepts. */\nexport enum KnownTextSplitMode {\n /** Split the text into individual pages. */\n Pages = \"pages\",\n /** Split the text into individual sentences. */\n Sentences = \"sentences\",\n}\n\n/**\n * Defines values for TextSplitMode. \\\n * {@link KnownTextSplitMode} can be used interchangeably with TextSplitMode,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **pages**: Split the text into individual pages. \\\n * **sentences**: Split the text into individual sentences.\n */\nexport type TextSplitMode = string;\n\n/** Known values of {@link CustomEntityLookupSkillLanguage} that the service accepts. */\nexport enum KnownCustomEntityLookupSkillLanguage {\n /** Danish */\n Da = \"da\",\n /** German */\n De = \"de\",\n /** English */\n En = \"en\",\n /** Spanish */\n Es = \"es\",\n /** Finnish */\n Fi = \"fi\",\n /** French */\n Fr = \"fr\",\n /** Italian */\n It = \"it\",\n /** Korean */\n Ko = \"ko\",\n /** Portuguese */\n Pt = \"pt\",\n}\n\n/**\n * Defines values for CustomEntityLookupSkillLanguage. \\\n * {@link KnownCustomEntityLookupSkillLanguage} can be used interchangeably with CustomEntityLookupSkillLanguage,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **da**: Danish \\\n * **de**: German \\\n * **en**: English \\\n * **es**: Spanish \\\n * **fi**: Finnish \\\n * **fr**: French \\\n * **it**: Italian \\\n * **ko**: Korean \\\n * **pt**: Portuguese\n */\nexport type CustomEntityLookupSkillLanguage = string;\n\n/** Known values of {@link TextTranslationSkillLanguage} that the service accepts. */\nexport enum KnownTextTranslationSkillLanguage {\n /** Afrikaans */\n Af = \"af\",\n /** Arabic */\n Ar = \"ar\",\n /** Bangla */\n Bn = \"bn\",\n /** Bosnian (Latin) */\n Bs = \"bs\",\n /** Bulgarian */\n Bg = \"bg\",\n /** Cantonese (Traditional) */\n Yue = \"yue\",\n /** Catalan */\n Ca = \"ca\",\n /** Chinese Simplified */\n ZhHans = \"zh-Hans\",\n /** Chinese Traditional */\n ZhHant = \"zh-Hant\",\n /** Croatian */\n Hr = \"hr\",\n /** Czech */\n Cs = \"cs\",\n /** Danish */\n Da = \"da\",\n /** Dutch */\n Nl = \"nl\",\n /** English */\n En = \"en\",\n /** Estonian */\n Et = \"et\",\n /** Fijian */\n Fj = \"fj\",\n /** Filipino */\n Fil = \"fil\",\n /** Finnish */\n Fi = \"fi\",\n /** French */\n Fr = \"fr\",\n /** German */\n De = \"de\",\n /** Greek */\n El = \"el\",\n /** Haitian Creole */\n Ht = \"ht\",\n /** Hebrew */\n He = \"he\",\n /** Hindi */\n Hi = \"hi\",\n /** Hmong Daw */\n Mww = \"mww\",\n /** Hungarian */\n Hu = \"hu\",\n /** Icelandic */\n Is = \"is\",\n /** Indonesian */\n Id = \"id\",\n /** Italian */\n It = \"it\",\n /** Japanese */\n Ja = \"ja\",\n /** Kiswahili */\n Sw = \"sw\",\n /** Klingon */\n Tlh = \"tlh\",\n /** Klingon (Latin script) */\n TlhLatn = \"tlh-Latn\",\n /** Klingon (Klingon script) */\n TlhPiqd = \"tlh-Piqd\",\n /** Korean */\n Ko = \"ko\",\n /** Latvian */\n Lv = \"lv\",\n /** Lithuanian */\n Lt = \"lt\",\n /** Malagasy */\n Mg = \"mg\",\n /** Malay */\n Ms = \"ms\",\n /** Maltese */\n Mt = \"mt\",\n /** Norwegian */\n Nb = \"nb\",\n /** Persian */\n Fa = \"fa\",\n /** Polish */\n Pl = \"pl\",\n /** Portuguese */\n Pt = \"pt\",\n /** Portuguese (Brazil) */\n PtBr = \"pt-br\",\n /** Portuguese (Portugal) */\n PtPT = \"pt-PT\",\n /** Queretaro Otomi */\n Otq = \"otq\",\n /** Romanian */\n Ro = \"ro\",\n /** Russian */\n Ru = \"ru\",\n /** Samoan */\n Sm = \"sm\",\n /** Serbian (Cyrillic) */\n SrCyrl = \"sr-Cyrl\",\n /** Serbian (Latin) */\n SrLatn = \"sr-Latn\",\n /** Slovak */\n Sk = \"sk\",\n /** Slovenian */\n Sl = \"sl\",\n /** Spanish */\n Es = \"es\",\n /** Swedish */\n Sv = \"sv\",\n /** Tahitian */\n Ty = \"ty\",\n /** Tamil */\n Ta = \"ta\",\n /** Telugu */\n Te = \"te\",\n /** Thai */\n Th = \"th\",\n /** Tongan */\n To = \"to\",\n /** Turkish */\n Tr = \"tr\",\n /** Ukrainian */\n Uk = \"uk\",\n /** Urdu */\n Ur = \"ur\",\n /** Vietnamese */\n Vi = \"vi\",\n /** Welsh */\n Cy = \"cy\",\n /** Yucatec Maya */\n Yua = \"yua\",\n /** Irish */\n Ga = \"ga\",\n /** Kannada */\n Kn = \"kn\",\n /** Maori */\n Mi = \"mi\",\n /** Malayalam */\n Ml = \"ml\",\n /** Punjabi */\n Pa = \"pa\",\n}\n\n/**\n * Defines values for TextTranslationSkillLanguage. \\\n * {@link KnownTextTranslationSkillLanguage} can be used interchangeably with TextTranslationSkillLanguage,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **af**: Afrikaans \\\n * **ar**: Arabic \\\n * **bn**: Bangla \\\n * **bs**: Bosnian (Latin) \\\n * **bg**: Bulgarian \\\n * **yue**: Cantonese (Traditional) \\\n * **ca**: Catalan \\\n * **zh-Hans**: Chinese Simplified \\\n * **zh-Hant**: Chinese Traditional \\\n * **hr**: Croatian \\\n * **cs**: Czech \\\n * **da**: Danish \\\n * **nl**: Dutch \\\n * **en**: English \\\n * **et**: Estonian \\\n * **fj**: Fijian \\\n * **fil**: Filipino \\\n * **fi**: Finnish \\\n * **fr**: French \\\n * **de**: German \\\n * **el**: Greek \\\n * **ht**: Haitian Creole \\\n * **he**: Hebrew \\\n * **hi**: Hindi \\\n * **mww**: Hmong Daw \\\n * **hu**: Hungarian \\\n * **is**: Icelandic \\\n * **id**: Indonesian \\\n * **it**: Italian \\\n * **ja**: Japanese \\\n * **sw**: Kiswahili \\\n * **tlh**: Klingon \\\n * **tlh-Latn**: Klingon (Latin script) \\\n * **tlh-Piqd**: Klingon (Klingon script) \\\n * **ko**: Korean \\\n * **lv**: Latvian \\\n * **lt**: Lithuanian \\\n * **mg**: Malagasy \\\n * **ms**: Malay \\\n * **mt**: Maltese \\\n * **nb**: Norwegian \\\n * **fa**: Persian \\\n * **pl**: Polish \\\n * **pt**: Portuguese \\\n * **pt-br**: Portuguese (Brazil) \\\n * **pt-PT**: Portuguese (Portugal) \\\n * **otq**: Queretaro Otomi \\\n * **ro**: Romanian \\\n * **ru**: Russian \\\n * **sm**: Samoan \\\n * **sr-Cyrl**: Serbian (Cyrillic) \\\n * **sr-Latn**: Serbian (Latin) \\\n * **sk**: Slovak \\\n * **sl**: Slovenian \\\n * **es**: Spanish \\\n * **sv**: Swedish \\\n * **ty**: Tahitian \\\n * **ta**: Tamil \\\n * **te**: Telugu \\\n * **th**: Thai \\\n * **to**: Tongan \\\n * **tr**: Turkish \\\n * **uk**: Ukrainian \\\n * **ur**: Urdu \\\n * **vi**: Vietnamese \\\n * **cy**: Welsh \\\n * **yua**: Yucatec Maya \\\n * **ga**: Irish \\\n * **kn**: Kannada \\\n * **mi**: Maori \\\n * **ml**: Malayalam \\\n * **pa**: Punjabi\n */\nexport type TextTranslationSkillLanguage = string;\n\n/** Known values of {@link DocumentIntelligenceLayoutSkillOutputFormat} that the service accepts. */\nexport enum KnownDocumentIntelligenceLayoutSkillOutputFormat {\n /** Specify the format of the output as text. */\n Text = \"text\",\n /** Specify the format of the output as markdown. */\n Markdown = \"markdown\",\n}\n\n/**\n * Defines values for DocumentIntelligenceLayoutSkillOutputFormat. \\\n * {@link KnownDocumentIntelligenceLayoutSkillOutputFormat} can be used interchangeably with DocumentIntelligenceLayoutSkillOutputFormat,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **text**: Specify the format of the output as text. \\\n * **markdown**: Specify the format of the output as markdown.\n */\nexport type DocumentIntelligenceLayoutSkillOutputFormat = string;\n\n/** Known values of {@link DocumentIntelligenceLayoutSkillOutputMode} that the service accepts. */\nexport enum KnownDocumentIntelligenceLayoutSkillOutputMode {\n /** Specify that the output should be parsed as 'oneToMany'. */\n OneToMany = \"oneToMany\",\n}\n\n/**\n * Defines values for DocumentIntelligenceLayoutSkillOutputMode. \\\n * {@link KnownDocumentIntelligenceLayoutSkillOutputMode} can be used interchangeably with DocumentIntelligenceLayoutSkillOutputMode,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **oneToMany**: Specify that the output should be parsed as 'oneToMany'.\n */\nexport type DocumentIntelligenceLayoutSkillOutputMode = string;\n\n/** Known values of {@link DocumentIntelligenceLayoutSkillMarkdownHeaderDepth} that the service accepts. */\nexport enum KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth {\n /** Header level 1. */\n H1 = \"h1\",\n /** Header level 2. */\n H2 = \"h2\",\n /** Header level 3. */\n H3 = \"h3\",\n /** Header level 4. */\n H4 = \"h4\",\n /** Header level 5. */\n H5 = \"h5\",\n /** Header level 6. */\n H6 = \"h6\",\n}\n\n/**\n * Defines values for DocumentIntelligenceLayoutSkillMarkdownHeaderDepth. \\\n * {@link KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth} can be used interchangeably with DocumentIntelligenceLayoutSkillMarkdownHeaderDepth,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **h1**: Header level 1. \\\n * **h2**: Header level 2. \\\n * **h3**: Header level 3. \\\n * **h4**: Header level 4. \\\n * **h5**: Header level 5. \\\n * **h6**: Header level 6.\n */\nexport type DocumentIntelligenceLayoutSkillMarkdownHeaderDepth = string;\n\n/** Known values of {@link DocumentIntelligenceLayoutSkillExtractionOptions} that the service accepts. */\nexport enum KnownDocumentIntelligenceLayoutSkillExtractionOptions {\n /** Specify that image content should be extracted from the document. */\n Images = \"images\",\n /** Specify that location metadata should be extracted from the document. */\n LocationMetadata = \"locationMetadata\",\n}\n\n/**\n * Defines values for DocumentIntelligenceLayoutSkillExtractionOptions. \\\n * {@link KnownDocumentIntelligenceLayoutSkillExtractionOptions} can be used interchangeably with DocumentIntelligenceLayoutSkillExtractionOptions,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **images**: Specify that image content should be extracted from the document. \\\n * **locationMetadata**: Specify that location metadata should be extracted from the document.\n */\nexport type DocumentIntelligenceLayoutSkillExtractionOptions = string;\n\n/** Known values of {@link DocumentIntelligenceLayoutSkillChunkingUnit} that the service accepts. */\nexport enum KnownDocumentIntelligenceLayoutSkillChunkingUnit {\n /** Specifies chunk by characters. */\n Characters = \"characters\",\n}\n\n/**\n * Defines values for DocumentIntelligenceLayoutSkillChunkingUnit. \\\n * {@link KnownDocumentIntelligenceLayoutSkillChunkingUnit} can be used interchangeably with DocumentIntelligenceLayoutSkillChunkingUnit,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **characters**: Specifies chunk by characters.\n */\nexport type DocumentIntelligenceLayoutSkillChunkingUnit = string;\n\n/** Known values of {@link LexicalTokenizerName} that the service accepts. */\nexport enum KnownLexicalTokenizerName {\n /** Grammar-based tokenizer that is suitable for processing most European-language documents. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/standard\\/ClassicTokenizer.html */\n Classic = \"classic\",\n /** Tokenizes the input from an edge into n-grams of the given size(s). See https:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/ngram\\/EdgeNGramTokenizer.html */\n EdgeNGram = \"edgeNGram\",\n /** Emits the entire input as a single token. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/KeywordTokenizer.html */\n Keyword = \"keyword_v2\",\n /** Divides text at non-letters. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/LetterTokenizer.html */\n Letter = \"letter\",\n /** Divides text at non-letters and converts them to lower case. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/LowerCaseTokenizer.html */\n Lowercase = \"lowercase\",\n /** Divides text using language-specific rules. */\n MicrosoftLanguageTokenizer = \"microsoft_language_tokenizer\",\n /** Divides text using language-specific rules and reduces words to their base forms. */\n MicrosoftLanguageStemmingTokenizer = \"microsoft_language_stemming_tokenizer\",\n /** Tokenizes the input into n-grams of the given size(s). See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/ngram\\/NGramTokenizer.html */\n NGram = \"nGram\",\n /** Tokenizer for path-like hierarchies. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/path\\/PathHierarchyTokenizer.html */\n PathHierarchy = \"path_hierarchy_v2\",\n /** Tokenizer that uses regex pattern matching to construct distinct tokens. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/pattern\\/PatternTokenizer.html */\n Pattern = \"pattern\",\n /** Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/standard\\/StandardTokenizer.html */\n Standard = \"standard_v2\",\n /** Tokenizes urls and emails as one token. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/standard\\/UAX29URLEmailTokenizer.html */\n UaxUrlEmail = \"uax_url_email\",\n /** Divides text at whitespace. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/WhitespaceTokenizer.html */\n Whitespace = \"whitespace\",\n}\n\n/**\n * Defines values for LexicalTokenizerName. \\\n * {@link KnownLexicalTokenizerName} can be used interchangeably with LexicalTokenizerName,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **classic**: Grammar-based tokenizer that is suitable for processing most European-language documents. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/standard\\/ClassicTokenizer.html \\\n * **edgeNGram**: Tokenizes the input from an edge into n-grams of the given size(s). See https:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/ngram\\/EdgeNGramTokenizer.html \\\n * **keyword_v2**: Emits the entire input as a single token. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/KeywordTokenizer.html \\\n * **letter**: Divides text at non-letters. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/LetterTokenizer.html \\\n * **lowercase**: Divides text at non-letters and converts them to lower case. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/LowerCaseTokenizer.html \\\n * **microsoft_language_tokenizer**: Divides text using language-specific rules. \\\n * **microsoft_language_stemming_tokenizer**: Divides text using language-specific rules and reduces words to their base forms. \\\n * **nGram**: Tokenizes the input into n-grams of the given size(s). See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/ngram\\/NGramTokenizer.html \\\n * **path_hierarchy_v2**: Tokenizer for path-like hierarchies. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/path\\/PathHierarchyTokenizer.html \\\n * **pattern**: Tokenizer that uses regex pattern matching to construct distinct tokens. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/pattern\\/PatternTokenizer.html \\\n * **standard_v2**: Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/standard\\/StandardTokenizer.html \\\n * **uax_url_email**: Tokenizes urls and emails as one token. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/standard\\/UAX29URLEmailTokenizer.html \\\n * **whitespace**: Divides text at whitespace. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/WhitespaceTokenizer.html\n */\nexport type LexicalTokenizerName = string;\n\n/** Known values of {@link RegexFlags} that the service accepts. */\nexport enum KnownRegexFlags {\n /** Enables canonical equivalence. */\n CanonEq = \"CANON_EQ\",\n /** Enables case-insensitive matching. */\n CaseInsensitive = \"CASE_INSENSITIVE\",\n /** Permits whitespace and comments in the pattern. */\n Comments = \"COMMENTS\",\n /** Enables dotall mode. */\n DotAll = \"DOTALL\",\n /** Enables literal parsing of the pattern. */\n Literal = \"LITERAL\",\n /** Enables multiline mode. */\n Multiline = \"MULTILINE\",\n /** Enables Unicode-aware case folding. */\n UnicodeCase = \"UNICODE_CASE\",\n /** Enables Unix lines mode. */\n UnixLines = \"UNIX_LINES\",\n}\n\n/**\n * Defines values for RegexFlags. \\\n * {@link KnownRegexFlags} can be used interchangeably with RegexFlags,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **CANON_EQ**: Enables canonical equivalence. \\\n * **CASE_INSENSITIVE**: Enables case-insensitive matching. \\\n * **COMMENTS**: Permits whitespace and comments in the pattern. \\\n * **DOTALL**: Enables dotall mode. \\\n * **LITERAL**: Enables literal parsing of the pattern. \\\n * **MULTILINE**: Enables multiline mode. \\\n * **UNICODE_CASE**: Enables Unicode-aware case folding. \\\n * **UNIX_LINES**: Enables Unix lines mode.\n */\nexport type RegexFlags = string;\n/** Defines values for IndexerStatus. */\nexport type IndexerStatus = \"unknown\" | \"error\" | \"running\";\n/** Defines values for IndexerExecutionStatus. */\nexport type IndexerExecutionStatus =\n | \"transientFailure\"\n | \"success\"\n | \"inProgress\"\n | \"reset\";\n/** Defines values for ScoringFunctionInterpolation. */\nexport type ScoringFunctionInterpolation =\n | \"linear\"\n | \"constant\"\n | \"quadratic\"\n | \"logarithmic\";\n/** Defines values for ScoringFunctionAggregation. */\nexport type ScoringFunctionAggregation =\n | \"sum\"\n | \"average\"\n | \"minimum\"\n | \"maximum\"\n | \"firstMatching\";\n/** Defines values for TokenCharacterKind. */\nexport type TokenCharacterKind =\n | \"letter\"\n | \"digit\"\n | \"whitespace\"\n | \"punctuation\"\n | \"symbol\";\n/** Defines values for MicrosoftTokenizerLanguage. */\nexport type MicrosoftTokenizerLanguage =\n | \"bangla\"\n | \"bulgarian\"\n | \"catalan\"\n | \"chineseSimplified\"\n | \"chineseTraditional\"\n | \"croatian\"\n | \"czech\"\n | \"danish\"\n | \"dutch\"\n | \"english\"\n | \"french\"\n | \"german\"\n | \"greek\"\n | \"gujarati\"\n | \"hindi\"\n | \"icelandic\"\n | \"indonesian\"\n | \"italian\"\n | \"japanese\"\n | \"kannada\"\n | \"korean\"\n | \"malay\"\n | \"malayalam\"\n | \"marathi\"\n | \"norwegianBokmaal\"\n | \"polish\"\n | \"portuguese\"\n | \"portugueseBrazilian\"\n | \"punjabi\"\n | \"romanian\"\n | \"russian\"\n | \"serbianCyrillic\"\n | \"serbianLatin\"\n | \"slovenian\"\n | \"spanish\"\n | \"swedish\"\n | \"tamil\"\n | \"telugu\"\n | \"thai\"\n | \"ukrainian\"\n | \"urdu\"\n | \"vietnamese\";\n/** Defines values for MicrosoftStemmingTokenizerLanguage. */\nexport type MicrosoftStemmingTokenizerLanguage =\n | \"arabic\"\n | \"bangla\"\n | \"bulgarian\"\n | \"catalan\"\n | \"croatian\"\n | \"czech\"\n | \"danish\"\n | \"dutch\"\n | \"english\"\n | \"estonian\"\n | \"finnish\"\n | \"french\"\n | \"german\"\n | \"greek\"\n | \"gujarati\"\n | \"hebrew\"\n | \"hindi\"\n | \"hungarian\"\n | \"icelandic\"\n | \"indonesian\"\n | \"italian\"\n | \"kannada\"\n | \"latvian\"\n | \"lithuanian\"\n | \"malay\"\n | \"malayalam\"\n | \"marathi\"\n | \"norwegianBokmaal\"\n | \"polish\"\n | \"portuguese\"\n | \"portugueseBrazilian\"\n | \"punjabi\"\n | \"romanian\"\n | \"russian\"\n | \"serbianCyrillic\"\n | \"serbianLatin\"\n | \"slovak\"\n | \"slovenian\"\n | \"spanish\"\n | \"swedish\"\n | \"tamil\"\n | \"telugu\"\n | \"turkish\"\n | \"ukrainian\"\n | \"urdu\";\n/** Defines values for CjkBigramTokenFilterScripts. */\nexport type CjkBigramTokenFilterScripts =\n | \"han\"\n | \"hiragana\"\n | \"katakana\"\n | \"hangul\";\n/** Defines values for EdgeNGramTokenFilterSide. */\nexport type EdgeNGramTokenFilterSide = \"front\" | \"back\";\n/** Defines values for PhoneticEncoder. */\nexport type PhoneticEncoder =\n | \"metaphone\"\n | \"doubleMetaphone\"\n | \"soundex\"\n | \"refinedSoundex\"\n | \"caverphone1\"\n | \"caverphone2\"\n | \"cologne\"\n | \"nysiis\"\n | \"koelnerPhonetik\"\n | \"haasePhonetik\"\n | \"beiderMorse\";\n/** Defines values for SnowballTokenFilterLanguage. */\nexport type SnowballTokenFilterLanguage =\n | \"armenian\"\n | \"basque\"\n | \"catalan\"\n | \"danish\"\n | \"dutch\"\n | \"english\"\n | \"finnish\"\n | \"french\"\n | \"german\"\n | \"german2\"\n | \"hungarian\"\n | \"italian\"\n | \"kp\"\n | \"lovins\"\n | \"norwegian\"\n | \"porter\"\n | \"portuguese\"\n | \"romanian\"\n | \"russian\"\n | \"spanish\"\n | \"swedish\"\n | \"turkish\";\n/** Defines values for StemmerTokenFilterLanguage. */\nexport type StemmerTokenFilterLanguage =\n | \"arabic\"\n | \"armenian\"\n | \"basque\"\n | \"brazilian\"\n | \"bulgarian\"\n | \"catalan\"\n | \"czech\"\n | \"danish\"\n | \"dutch\"\n | \"dutchKp\"\n | \"english\"\n | \"lightEnglish\"\n | \"minimalEnglish\"\n | \"possessiveEnglish\"\n | \"porter2\"\n | \"lovins\"\n | \"finnish\"\n | \"lightFinnish\"\n | \"french\"\n | \"lightFrench\"\n | \"minimalFrench\"\n | \"galician\"\n | \"minimalGalician\"\n | \"german\"\n | \"german2\"\n | \"lightGerman\"\n | \"minimalGerman\"\n | \"greek\"\n | \"hindi\"\n | \"hungarian\"\n | \"lightHungarian\"\n | \"indonesian\"\n | \"irish\"\n | \"italian\"\n | \"lightItalian\"\n | \"sorani\"\n | \"latvian\"\n | \"norwegian\"\n | \"lightNorwegian\"\n | \"minimalNorwegian\"\n | \"lightNynorsk\"\n | \"minimalNynorsk\"\n | \"portuguese\"\n | \"lightPortuguese\"\n | \"minimalPortuguese\"\n | \"portugueseRslp\"\n | \"romanian\"\n | \"russian\"\n | \"lightRussian\"\n | \"spanish\"\n | \"lightSpanish\"\n | \"swedish\"\n | \"lightSwedish\"\n | \"turkish\";\n/** Defines values for StopwordsList. */\nexport type StopwordsList =\n | \"arabic\"\n | \"armenian\"\n | \"basque\"\n | \"brazilian\"\n | \"bulgarian\"\n | \"catalan\"\n | \"czech\"\n | \"danish\"\n | \"dutch\"\n | \"english\"\n | \"finnish\"\n | \"french\"\n | \"galician\"\n | \"german\"\n | \"greek\"\n | \"hindi\"\n | \"hungarian\"\n | \"indonesian\"\n | \"irish\"\n | \"italian\"\n | \"latvian\"\n | \"norwegian\"\n | \"persian\"\n | \"portuguese\"\n | \"romanian\"\n | \"russian\"\n | \"sorani\"\n | \"spanish\"\n | \"swedish\"\n | \"thai\"\n | \"turkish\";\n\n/** Optional parameters. */\nexport interface DataSourcesCreateOrUpdateOptionalParams\n extends coreClient.OperationOptions {\n /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */\n ifMatch?: string;\n /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */\n ifNoneMatch?: string;\n}\n\n/** Contains response data for the createOrUpdate operation. */\nexport type DataSourcesCreateOrUpdateResponse = SearchIndexerDataSource;\n\n/** Optional parameters. */\nexport interface DataSourcesDeleteOptionalParams\n extends coreClient.OperationOptions {\n /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */\n ifMatch?: string;\n /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */\n ifNoneMatch?: string;\n}\n\n/** Optional parameters. */\nexport interface DataSourcesGetOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the get operation. */\nexport type DataSourcesGetResponse = SearchIndexerDataSource;\n\n/** Optional parameters. */\nexport interface DataSourcesListOptionalParams\n extends coreClient.OperationOptions {\n /** Selects which top-level properties of the data sources to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */\n select?: string;\n}\n\n/** Contains response data for the list operation. */\nexport type DataSourcesListResponse = ListDataSourcesResult;\n\n/** Optional parameters. */\nexport interface DataSourcesCreateOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the create operation. */\nexport type DataSourcesCreateResponse = SearchIndexerDataSource;\n\n/** Optional parameters. */\nexport interface IndexersResetOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Optional parameters. */\nexport interface IndexersRunOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Optional parameters. */\nexport interface IndexersCreateOrUpdateOptionalParams\n extends coreClient.OperationOptions {\n /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */\n ifMatch?: string;\n /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */\n ifNoneMatch?: string;\n}\n\n/** Contains response data for the createOrUpdate operation. */\nexport type IndexersCreateOrUpdateResponse = SearchIndexer;\n\n/** Optional parameters. */\nexport interface IndexersDeleteOptionalParams\n extends coreClient.OperationOptions {\n /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */\n ifMatch?: string;\n /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */\n ifNoneMatch?: string;\n}\n\n/** Optional parameters. */\nexport interface IndexersGetOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the get operation. */\nexport type IndexersGetResponse = SearchIndexer;\n\n/** Optional parameters. */\nexport interface IndexersListOptionalParams\n extends coreClient.OperationOptions {\n /** Selects which top-level properties of the indexers to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */\n select?: string;\n}\n\n/** Contains response data for the list operation. */\nexport type IndexersListResponse = ListIndexersResult;\n\n/** Optional parameters. */\nexport interface IndexersCreateOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the create operation. */\nexport type IndexersCreateResponse = SearchIndexer;\n\n/** Optional parameters. */\nexport interface IndexersGetStatusOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the getStatus operation. */\nexport type IndexersGetStatusResponse = SearchIndexerStatus;\n\n/** Optional parameters. */\nexport interface SkillsetsCreateOrUpdateOptionalParams\n extends coreClient.OperationOptions {\n /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */\n ifMatch?: string;\n /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */\n ifNoneMatch?: string;\n}\n\n/** Contains response data for the createOrUpdate operation. */\nexport type SkillsetsCreateOrUpdateResponse = SearchIndexerSkillset;\n\n/** Optional parameters. */\nexport interface SkillsetsDeleteOptionalParams\n extends coreClient.OperationOptions {\n /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */\n ifMatch?: string;\n /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */\n ifNoneMatch?: string;\n}\n\n/** Optional parameters. */\nexport interface SkillsetsGetOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the get operation. */\nexport type SkillsetsGetResponse = SearchIndexerSkillset;\n\n/** Optional parameters. */\nexport interface SkillsetsListOptionalParams\n extends coreClient.OperationOptions {\n /** Selects which top-level properties of the skillsets to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */\n select?: string;\n}\n\n/** Contains response data for the list operation. */\nexport type SkillsetsListResponse = ListSkillsetsResult;\n\n/** Optional parameters. */\nexport interface SkillsetsCreateOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the create operation. */\nexport type SkillsetsCreateResponse = SearchIndexerSkillset;\n\n/** Optional parameters. */\nexport interface SynonymMapsCreateOrUpdateOptionalParams\n extends coreClient.OperationOptions {\n /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */\n ifMatch?: string;\n /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */\n ifNoneMatch?: string;\n}\n\n/** Contains response data for the createOrUpdate operation. */\nexport type SynonymMapsCreateOrUpdateResponse = SynonymMap;\n\n/** Optional parameters. */\nexport interface SynonymMapsDeleteOptionalParams\n extends coreClient.OperationOptions {\n /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */\n ifMatch?: string;\n /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */\n ifNoneMatch?: string;\n}\n\n/** Optional parameters. */\nexport interface SynonymMapsGetOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the get operation. */\nexport type SynonymMapsGetResponse = SynonymMap;\n\n/** Optional parameters. */\nexport interface SynonymMapsListOptionalParams\n extends coreClient.OperationOptions {\n /** Selects which top-level properties of the synonym maps to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */\n select?: string;\n}\n\n/** Contains response data for the list operation. */\nexport type SynonymMapsListResponse = ListSynonymMapsResult;\n\n/** Optional parameters. */\nexport interface SynonymMapsCreateOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the create operation. */\nexport type SynonymMapsCreateResponse = SynonymMap;\n\n/** Optional parameters. */\nexport interface IndexesCreateOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the create operation. */\nexport type IndexesCreateResponse = SearchIndex;\n\n/** Optional parameters. */\nexport interface IndexesListOptionalParams extends coreClient.OperationOptions {\n /** Selects which top-level properties of the index definitions to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */\n select?: string;\n}\n\n/** Contains response data for the list operation. */\nexport type IndexesListResponse = ListIndexesResult;\n\n/** Optional parameters. */\nexport interface IndexesCreateOrUpdateOptionalParams\n extends coreClient.OperationOptions {\n /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */\n ifMatch?: string;\n /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */\n ifNoneMatch?: string;\n /** Allows new analyzers, tokenizers, token filters, or char filters to be added to an index by taking the index offline for at least a few seconds. This temporarily causes indexing and query requests to fail. Performance and write availability of the index can be impaired for several minutes after the index is updated, or longer for very large indexes. */\n allowIndexDowntime?: boolean;\n}\n\n/** Contains response data for the createOrUpdate operation. */\nexport type IndexesCreateOrUpdateResponse = SearchIndex;\n\n/** Optional parameters. */\nexport interface IndexesDeleteOptionalParams\n extends coreClient.OperationOptions {\n /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */\n ifMatch?: string;\n /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */\n ifNoneMatch?: string;\n}\n\n/** Optional parameters. */\nexport interface IndexesGetOptionalParams extends coreClient.OperationOptions {}\n\n/** Contains response data for the get operation. */\nexport type IndexesGetResponse = SearchIndex;\n\n/** Optional parameters. */\nexport interface IndexesGetStatisticsOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the getStatistics operation. */\nexport type IndexesGetStatisticsResponse = GetIndexStatisticsResult;\n\n/** Optional parameters. */\nexport interface IndexesAnalyzeOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the analyze operation. */\nexport type IndexesAnalyzeResponse = AnalyzeResult;\n\n/** Optional parameters. */\nexport interface GetServiceStatisticsOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the getServiceStatistics operation. */\nexport type GetServiceStatisticsResponse = ServiceStatistics;\n\n/** Optional parameters. */\nexport interface SearchServiceClientOptionalParams\n extends coreHttpCompat.ExtendedServiceClientOptions {\n /** Overrides client endpoint. */\n endpoint?: string;\n}\n"]}
1
+ {"version":3,"file":"index.js","sourceRoot":"","sources":["../../../../../src/generated/service/models/index.ts"],"names":[],"mappings":"AAAA;;;;;;GAMG;AAk6FH,kFAAkF;AAClF,MAAM,CAAN,IAAY,8BAGX;AAHD,WAAY,8BAA8B;IACxC,uCAAuC;IACvC,yFAAuD,CAAA;AACzD,CAAC,EAHW,8BAA8B,KAA9B,8BAA8B,QAGzC;AAWD,+EAA+E;AAC/E,MAAM,CAAN,IAAY,2BAGX;AAHD,WAAY,2BAA2B;IACrC,mDAAmD;IACnD,0DAA2B,CAAA;AAC7B,CAAC,EAHW,2BAA2B,KAA3B,2BAA2B,QAGtC;AAWD,8FAA8F;AAC9F,MAAM,CAAN,IAAY,0CAOX;AAPD,WAAY,0CAA0C;IACpD,2FAA2F;IAC3F,iEAAmB,CAAA;IACnB,0CAA0C;IAC1C,yDAAW,CAAA;IACX,2DAA2D;IAC3D,+DAAiB,CAAA;AACnB,CAAC,EAPW,0CAA0C,KAA1C,0CAA0C,QAOrD;AAaD,qFAAqF;AACrF,MAAM,CAAN,IAAY,iCAKX;AALD,WAAY,iCAAiC;IAC3C,qFAAqF;IACrF,sEAAiC,CAAA;IACjC,qDAAqD;IACrD,wEAAmC,CAAA;AACrC,CAAC,EALW,iCAAiC,KAAjC,iCAAiC,QAK5C;AAYD,4EAA4E;AAC5E,MAAM,CAAN,IAAY,wBAaX;AAbD,WAAY,wBAAwB;IAClC,kEAAkE;IAClE,uDAA2B,CAAA;IAC3B,oGAAoG;IACpG,mDAAuB,CAAA;IACvB,2DAA2D;IAC3D,uCAAW,CAAA;IACX,gFAAgF;IAChF,iEAAqC,CAAA;IACrC,4FAA4F;IAC5F,mEAAuC,CAAA;IACvC,yFAAyF;IACzF,6DAAiC,CAAA;AACnC,CAAC,EAbW,wBAAwB,KAAxB,wBAAwB,QAanC;AAgBD,6FAA6F;AAC7F,MAAM,CAAN,IAAY,yCAOX;AAPD,WAAY,yCAAyC;IACnD,iDAAiD;IACjD,kEAAqB,CAAA;IACrB,6EAA6E;IAC7E,8DAAiB,CAAA;IACjB,2EAA2E;IAC3E,kEAAqB,CAAA;AACvB,CAAC,EAPW,yCAAyC,KAAzC,yCAAyC,QAOpD;AAaD,oFAAoF;AACpF,MAAM,CAAN,IAAY,gCAiBX;AAjBD,WAAY,gCAAgC;IAC1C,yCAAyC;IACzC,yDAAqB,CAAA;IACrB,uCAAuC;IACvC,yDAAqB,CAAA;IACrB,0CAA0C;IAC1C,2DAAuB,CAAA;IACvB,2CAA2C;IAC3C,6DAAyB,CAAA;IACzB,oCAAoC;IACpC,mDAAe,CAAA;IACf,yCAAyC;IACzC,yDAAqB,CAAA;IACrB,uDAAuD;IACvD,uDAAmB,CAAA;IACnB,yCAAyC;IACzC,6DAAyB,CAAA;AAC3B,CAAC,EAjBW,gCAAgC,KAAhC,gCAAgC,QAiB3C;AAkBD,gFAAgF;AAChF,MAAM,CAAN,IAAY,4BAOX;AAPD,WAAY,4BAA4B;IACtC,+DAA+D;IAC/D,mDAAmB,CAAA;IACnB,gEAAgE;IAChE,qDAAqB,CAAA;IACrB,oEAAoE;IACpE,uDAAuB,CAAA;AACzB,CAAC,EAPW,4BAA4B,KAA5B,4BAA4B,QAOvC;AAaD,4EAA4E;AAC5E,MAAM,CAAN,IAAY,wBAGX;AAHD,WAAY,wBAAwB;IAClC,oFAAoF;IACpF,uDAA2B,CAAA;AAC7B,CAAC,EAHW,wBAAwB,KAAxB,wBAAwB,QAGnC;AAWD,+EAA+E;AAC/E,MAAM,CAAN,IAAY,2BAeX;AAfD,WAAY,2BAA2B;IACrC,iDAAiD;IACjD,kDAAmB,CAAA;IACnB,uFAAuF;IACvF,4CAAa,CAAA;IACb,2DAA2D;IAC3D,8DAA+B,CAAA;IAC/B,iEAAiE;IACjE,4CAAa,CAAA;IACb,6FAA6F;IAC7F,sDAAuB,CAAA;IACvB,4GAA4G;IAC5G,sDAAuB,CAAA;IACvB,8DAA8D;IAC9D,oDAAqB,CAAA;AACvB,CAAC,EAfW,2BAA2B,KAA3B,2BAA2B,QAetC;AAiBD,+EAA+E;AAC/E,MAAM,CAAN,IAAY,2BAKX;AALD,WAAY,2BAA2B;IACrC,+OAA+O;IAC/O,sDAAuB,CAAA;IACvB,sFAAsF;IACtF,oDAAqB,CAAA;AACvB,CAAC,EALW,2BAA2B,KAA3B,2BAA2B,QAKtC;AAYD,4EAA4E;AAC5E,MAAM,CAAN,IAAY,wBAaX;AAbD,WAAY,wBAAwB;IAClC,qGAAqG;IACrG,qCAAS,CAAA;IACT,qGAAqG;IACrG,qCAAS,CAAA;IACT,qGAAqG;IACrG,qCAAS,CAAA;IACT,qGAAqG;IACrG,qCAAS,CAAA;IACT,qGAAqG;IACrG,qCAAS,CAAA;IACT,0HAA0H;IAC1H,qCAAS,CAAA;AACX,CAAC,EAbW,wBAAwB,KAAxB,wBAAwB,QAanC;AAgBD,iFAAiF;AACjF,MAAM,CAAN,IAAY,6BAOX;AAPD,WAAY,6BAA6B;IACvC,6EAA6E;IAC7E,oEAAmC,CAAA;IACnC,2KAA2K;IAC3K,4DAA2B,CAAA;IAC3B,gEAAgE;IAChE,0EAAyC,CAAA;AAC3C,CAAC,EAPW,6BAA6B,KAA7B,6BAA6B,QAOxC;AAaD,+EAA+E;AAC/E,MAAM,CAAN,IAAY,2BAOX;AAPD,WAAY,2BAA2B;IACrC,oFAAoF;IACpF,4CAAa,CAAA;IACb,4cAA4c;IAC5c,oFAAqD,CAAA;IACrD,yWAAyW;IACzW,gGAAiE,CAAA;AACnE,CAAC,EAPW,2BAA2B,KAA3B,2BAA2B,QAOtC;AAaD,4FAA4F;AAC5F,MAAM,CAAN,IAAY,wCAKX;AALD,WAAY,wCAAwC;IAClD,8DAA8D;IAC9D,yDAAa,CAAA;IACb,qXAAqX;IACrX,yEAA6B,CAAA;AAC/B,CAAC,EALW,wCAAwC,KAAxC,wCAAwC,QAKnD;AAYD,oFAAoF;AACpF,MAAM,CAAN,IAAY,gCAKX;AALD,WAAY,gCAAgC;IAC1C,gLAAgL;IAChL,yDAAqB,CAAA;IACrB,mQAAmQ;IACnQ,uDAAmB,CAAA;AACrB,CAAC,EALW,gCAAgC,KAAhC,gCAAgC,QAK3C;AAYD,qFAAqF;AACrF,MAAM,CAAN,IAAY,iCAKX;AALD,WAAY,iCAAiC;IAC3C,0EAA0E;IAC1E,4DAAuB,CAAA;IACvB,2EAA2E;IAC3E,sDAAiB,CAAA;AACnB,CAAC,EALW,iCAAiC,KAAjC,iCAAiC,QAK5C;AAYD,qEAAqE;AACrE,MAAM,CAAN,IAAY,iBAOX;AAPD,WAAY,iBAAiB;IAC3B,+DAA+D;IAC/D,wDAAmC,CAAA;IACnC,uIAAuI;IACvI,4DAAuC,CAAA;IACvC,qFAAqF;IACrF,sDAAiC,CAAA;AACnC,CAAC,EAPW,iBAAiB,KAAjB,iBAAiB,QAO5B;AAaD,4EAA4E;AAC5E,MAAM,CAAN,IAAY,wBAKX;AALD,WAAY,wBAAwB;IAClC,wFAAwF;IACxF,uFAA2D,CAAA;IAC3D,wGAAwG;IACxG,6FAAiE,CAAA;AACnE,CAAC,EALW,wBAAwB,KAAxB,wBAAwB,QAKnC;AAYD,4EAA4E;AAC5E,MAAM,CAAN,IAAY,wBA2BX;AA3BD,WAAY,wBAAwB;IAClC,gDAAgD;IAChD,iDAAqB,CAAA;IACrB,+DAA+D;IAC/D,+CAAmB,CAAA;IACnB,+DAA+D;IAC/D,+CAAmB,CAAA;IACnB,sFAAsF;IACtF,iDAAqB,CAAA;IACrB,uEAAuE;IACvE,mDAAuB,CAAA;IACvB,0FAA0F;IAC1F,iEAAqC,CAAA;IACrC,yFAAyF;IACzF,iEAAqC,CAAA;IACrC,+GAA+G;IAC/G,uDAA2B,CAAA;IAC3B,0IAA0I;IAC1I,iDAAqB,CAAA;IACrB,sIAAsI;IACtI,6CAAiB,CAAA;IACjB,wHAAwH;IACxH,+CAAmB,CAAA;IACnB,uHAAuH;IACvH,+CAAmB,CAAA;IACnB,wHAAwH;IACxH,6CAAiB,CAAA;AACnB,CAAC,EA3BW,wBAAwB,KAAxB,wBAAwB,QA2BnC;AAuBD,yEAAyE;AACzE,MAAM,CAAN,IAAY,qBAOX;AAPD,WAAY,qBAAqB;IAC/B,0FAA0F;IAC1F,4CAAmB,CAAA;IACnB,2FAA2F;IAC3F,8CAAqB,CAAA;IACrB,+FAA+F;IAC/F,gDAAuB,CAAA;AACzB,CAAC,EAPW,qBAAqB,KAArB,qBAAqB,QAOhC;AAaD,4EAA4E;AAC5E,MAAM,CAAN,IAAY,wBA2LX;AA3LD,WAAY,wBAAwB;IAClC,qCAAqC;IACrC,wDAA4B,CAAA;IAC5B,kCAAkC;IAClC,kDAAsB,CAAA;IACtB,oCAAoC;IACpC,kDAAsB,CAAA;IACtB,qCAAqC;IACrC,wDAA4B,CAAA;IAC5B,kCAAkC;IAClC,kDAAsB,CAAA;IACtB,wCAAwC;IACxC,wDAA4B,CAAA;IAC5B,qCAAqC;IACrC,kDAAsB,CAAA;IACtB,sCAAsC;IACtC,wDAA4B,CAAA;IAC5B,mCAAmC;IACnC,kDAAsB,CAAA;IACtB,mDAAmD;IACnD,iEAAqC,CAAA;IACrC,gDAAgD;IAChD,2DAA+B,CAAA;IAC/B,oDAAoD;IACpD,iEAAqC,CAAA;IACrC,iDAAiD;IACjD,2DAA+B,CAAA;IAC/B,uCAAuC;IACvC,wDAA4B,CAAA;IAC5B,oCAAoC;IACpC,wDAA4B,CAAA;IAC5B,iCAAiC;IACjC,kDAAsB,CAAA;IACtB,qCAAqC;IACrC,wDAA4B,CAAA;IAC5B,kCAAkC;IAClC,kDAAsB,CAAA;IACtB,oCAAoC;IACpC,wDAA4B,CAAA;IAC5B,iCAAiC;IACjC,kDAAsB,CAAA;IACtB,sCAAsC;IACtC,wDAA4B,CAAA;IAC5B,mCAAmC;IACnC,kDAAsB,CAAA;IACtB,uCAAuC;IACvC,wDAA4B,CAAA;IAC5B,sCAAsC;IACtC,wDAA4B,CAAA;IAC5B,mCAAmC;IACnC,kDAAsB,CAAA;IACtB,qCAAqC;IACrC,wDAA4B,CAAA;IAC5B,kCAAkC;IAClC,kDAAsB,CAAA;IACtB,oCAAoC;IACpC,kDAAsB,CAAA;IACtB,qCAAqC;IACrC,wDAA4B,CAAA;IAC5B,kCAAkC;IAClC,kDAAsB,CAAA;IACtB,oCAAoC;IACpC,wDAA4B,CAAA;IAC5B,iCAAiC;IACjC,kDAAsB,CAAA;IACtB,uCAAuC;IACvC,wDAA4B,CAAA;IAC5B,qCAAqC;IACrC,wDAA4B,CAAA;IAC5B,oCAAoC;IACpC,wDAA4B,CAAA;IAC5B,iCAAiC;IACjC,kDAAsB,CAAA;IACtB,wCAAwC;IACxC,wDAA4B,CAAA;IAC5B,qCAAqC;IACrC,kDAAsB,CAAA;IACtB,wCAAwC;IACxC,wDAA4B,CAAA;IAC5B,kDAAkD;IAClD,wDAA4B,CAAA;IAC5B,sCAAsC;IACtC,kDAAsB,CAAA;IACtB,iCAAiC;IACjC,kDAAsB,CAAA;IACtB,sCAAsC;IACtC,wDAA4B,CAAA;IAC5B,mCAAmC;IACnC,kDAAsB,CAAA;IACtB,uCAAuC;IACvC,wDAA4B,CAAA;IAC5B,oCAAoC;IACpC,kDAAsB,CAAA;IACtB,sCAAsC;IACtC,wDAA4B,CAAA;IAC5B,qCAAqC;IACrC,wDAA4B,CAAA;IAC5B,kCAAkC;IAClC,kDAAsB,CAAA;IACtB,sCAAsC;IACtC,wDAA4B,CAAA;IAC5B,mCAAmC;IACnC,kDAAsB,CAAA;IACtB,yCAAyC;IACzC,wDAA4B,CAAA;IAC5B,wCAAwC;IACxC,wDAA4B,CAAA;IAC5B,4CAA4C;IAC5C,wDAA4B,CAAA;IAC5B,sCAAsC;IACtC,wDAA4B,CAAA;IAC5B,iDAAiD;IACjD,wDAA4B,CAAA;IAC5B,qCAAqC;IACrC,kDAAsB,CAAA;IACtB,mCAAmC;IACnC,kDAAsB,CAAA;IACtB,qCAAqC;IACrC,wDAA4B,CAAA;IAC5B,kCAAkC;IAClC,kDAAsB,CAAA;IACtB,kDAAkD;IAClD,6DAAiC,CAAA;IACjC,+CAA+C;IAC/C,uDAA2B,CAAA;IAC3B,oDAAoD;IACpD,6DAAiC,CAAA;IACjC,iDAAiD;IACjD,uDAA2B,CAAA;IAC3B,sCAAsC;IACtC,wDAA4B,CAAA;IAC5B,uCAAuC;IACvC,wDAA4B,CAAA;IAC5B,oCAAoC;IACpC,kDAAsB,CAAA;IACtB,sCAAsC;IACtC,wDAA4B,CAAA;IAC5B,mCAAmC;IACnC,kDAAsB,CAAA;IACtB,iDAAiD;IACjD,yEAA6C,CAAA;IAC7C,8CAA8C;IAC9C,mEAAuC,CAAA;IACvC,qCAAqC;IACrC,wDAA4B,CAAA;IAC5B,wCAAwC;IACxC,wDAA4B,CAAA;IAC5B,sCAAsC;IACtC,wDAA4B,CAAA;IAC5B,mCAAmC;IACnC,kDAAsB,CAAA;IACtB,sCAAsC;IACtC,wDAA4B,CAAA;IAC5B,mCAAmC;IACnC,kDAAsB,CAAA;IACtB,oCAAoC;IACpC,wDAA4B,CAAA;IAC5B,qCAAqC;IACrC,wDAA4B,CAAA;IAC5B,mCAAmC;IACnC,wDAA4B,CAAA;IAC5B,gCAAgC;IAChC,kDAAsB,CAAA;IACtB,sCAAsC;IACtC,wDAA4B,CAAA;IAC5B,mCAAmC;IACnC,kDAAsB,CAAA;IACtB,wCAAwC;IACxC,wDAA4B,CAAA;IAC5B,mCAAmC;IACnC,wDAA4B,CAAA;IAC5B,yCAAyC;IACzC,wDAA4B,CAAA;IAC5B,gCAAgC;IAChC,8DAAkC,CAAA;IAClC,qJAAqJ;IACrJ,sFAA0D,CAAA;IAC1D,4PAA4P;IAC5P,+CAAmB,CAAA;IACnB,6MAA6M;IAC7M,+CAAmB,CAAA;IACnB,2LAA2L;IAC3L,6CAAiB,CAAA;IACjB,2MAA2M;IAC3M,yCAAa,CAAA;IACb,kLAAkL;IAClL,qDAAyB,CAAA;AAC3B,CAAC,EA3LW,wBAAwB,KAAxB,wBAAwB,QA2LnC;AAuGD,8EAA8E;AAC9E,MAAM,CAAN,IAAY,0BAWX;AAXD,WAAY,0BAA0B;IACpC,oVAAoV;IACpV,2DAA6B,CAAA;IAC7B,wNAAwN;IACxN,iDAAmB,CAAA;IACnB,mKAAmK;IACnK,qDAAuB,CAAA;IACvB,yMAAyM;IACzM,mDAAqB,CAAA;IACrB,mKAAmK;IACnK,qDAAuB,CAAA;AACzB,CAAC,EAXW,0BAA0B,KAA1B,0BAA0B,QAWrC;AAeD,6EAA6E;AAC7E,MAAM,CAAN,IAAY,yBAGX;AAHD,WAAY,yBAAyB;IACnC,uEAAuE;IACvE,oDAAuB,CAAA;AACzB,CAAC,EAHW,yBAAyB,KAAzB,yBAAyB,QAGpC;AAWD,qEAAqE;AACrE,MAAM,CAAN,IAAY,iBAKX;AALD,WAAY,iBAAiB;IAC3B,8CAA8C;IAC9C,kEAA6C,CAAA;IAC7C,uCAAuC;IACvC,oDAA+B,CAAA;AACjC,CAAC,EALW,iBAAiB,KAAjB,iBAAiB,QAK5B;AAYD,kFAAkF;AAClF,MAAM,CAAN,IAAY,8BAKX;AALD,WAAY,8BAA8B;IACxC,oGAAoG;IACpG,+CAAa,CAAA;IACb,sEAAsE;IACtE,iEAA+B,CAAA;AACjC,CAAC,EALW,8BAA8B,KAA9B,8BAA8B,QAKzC;AAYD,mFAAmF;AACnF,MAAM,CAAN,IAAY,+BASX;AATD,WAAY,+BAA+B;IACzC,wEAAwE;IACxE,8DAA2B,CAAA;IAC3B,qEAAqE;IACrE,gEAA6B,CAAA;IAC7B,qHAAqH;IACrH,wEAAqC,CAAA;IACrC,kIAAkI;IAClI,8CAAW,CAAA;AACb,CAAC,EATW,+BAA+B,KAA/B,+BAA+B,QAS1C;AAcD,oFAAoF;AACpF,MAAM,CAAN,IAAY,gCAKX;AALD,WAAY,gCAAgC;IAC1C,4RAA4R;IAC5R,6EAAyC,CAAA;IACzC,iRAAiR;IACjR,6EAAyC,CAAA;AAC3C,CAAC,EALW,gCAAgC,KAAhC,gCAAgC,QAK3C;AAYD,oGAAoG;AACpG,MAAM,CAAN,IAAY,gDAKX;AALD,WAAY,gDAAgD;IAC1D,oOAAoO;IACpO,2FAAuC,CAAA;IACvC,+OAA+O;IAC/O,yFAAqC,CAAA;AACvC,CAAC,EALW,gDAAgD,KAAhD,gDAAgD,QAK3D;AAYD,0FAA0F;AAC1F,MAAM,CAAN,IAAY,sCAKX;AALD,WAAY,sCAAsC;IAChD,cAAc;IACd,6DAAmB,CAAA;IACnB,eAAe;IACf,+DAAqB,CAAA;AACvB,CAAC,EALW,sCAAsC,KAAtC,sCAAsC,QAKjD;AAYD,6EAA6E;AAC7E,MAAM,CAAN,IAAY,yBAuBX;AAvBD,WAAY,yBAAyB;IACnC,0BAA0B;IAC1B,2EAA8C,CAAA;IAC9C,0BAA0B;IAC1B,2EAA8C,CAAA;IAC9C,0BAA0B;IAC1B,2EAA8C,CAAA;IAC9C,YAAY;IACZ,6CAAgB,CAAA;IAChB,gBAAgB;IAChB,sDAAyB,CAAA;IACzB,YAAY;IACZ,8CAAiB,CAAA;IACjB,gBAAgB;IAChB,uDAA0B,CAAA;IAC1B,gBAAgB;IAChB,uDAA0B,CAAA;IAC1B,WAAW;IACX,2CAAc,CAAA;IACd,eAAe;IACf,oDAAuB,CAAA;IACvB,eAAe;IACf,oDAAuB,CAAA;AACzB,CAAC,EAvBW,yBAAyB,KAAzB,yBAAyB,QAuBpC;AAqBD,iGAAiG;AACjG,MAAM,CAAN,IAAY,6CAOX;AAPD,WAAY,6CAA6C;IACvD,mEAAmE;IACnE,oEAAmB,CAAA;IACnB,2DAA2D;IAC3D,sEAAqB,CAAA;IACrB,gEAAgE;IAChE,wEAAuB,CAAA;AACzB,CAAC,EAPW,6CAA6C,KAA7C,6CAA6C,QAOxD;AAaD,6FAA6F;AAC7F,MAAM,CAAN,IAAY,yCAKX;AALD,WAAY,yCAAyC;IACnD,gFAAgF;IAChF,gEAAmB,CAAA;IACnB,6DAA6D;IAC7D,kEAAqB,CAAA;AACvB,CAAC,EALW,yCAAyC,KAAzC,yCAAyC,QAKpD;AAYD,uFAAuF;AACvF,MAAM,CAAN,IAAY,mCAOX;AAPD,WAAY,mCAAmC;IAC7C,8DAA8D;IAC9D,gFAAyC,CAAA;IACzC,6DAA6D;IAC7D,4EAAqC,CAAA;IACrC,mFAAmF;IACnF,4DAAqB,CAAA;AACvB,CAAC,EAPW,mCAAmC,KAAnC,mCAAmC,QAO9C;AAaD,wEAAwE;AACxE,MAAM,CAAN,IAAY,oBAqEX;AArED,WAAY,oBAAoB;IAC9B,uNAAuN;IACvN,oEAA4C,CAAA;IAC5C,2MAA2M;IAC3M,iDAAyB,CAAA;IACzB,oVAAoV;IACpV,qDAA6B,CAAA;IAC7B,yMAAyM;IACzM,gDAAwB,CAAA;IACxB,0RAA0R;IAC1R,8CAAsB,CAAA;IACtB,sLAAsL;IACtL,2CAAmB,CAAA;IACnB,oQAAoQ;IACpQ,mDAA2B,CAAA;IAC3B,mOAAmO;IACnO,kDAA0B,CAAA;IAC1B,wNAAwN;IACxN,2CAAmB,CAAA;IACnB,mOAAmO;IACnO,oEAA4C,CAAA;IAC5C,kNAAkN;IAClN,kEAA0C,CAAA;IAC1C,yMAAyM;IACzM,kEAA0C,CAAA;IAC1C,sNAAsN;IACtN,wDAAgC,CAAA;IAChC,sKAAsK;IACtK,uCAAe,CAAA;IACf,mLAAmL;IACnL,yCAAiB,CAAA;IACjB,0LAA0L;IAC1L,uCAAe,CAAA;IACf,oKAAoK;IACpK,+CAAuB,CAAA;IACvB,yKAAyK;IACzK,0CAAkB,CAAA;IAClB,2KAA2K;IAC3K,sEAA8C,CAAA;IAC9C,uKAAuK;IACvK,6CAAqB,CAAA;IACrB,yHAAyH;IACzH,kDAA0B,CAAA;IAC1B,iKAAiK;IACjK,2CAAmB,CAAA;IACnB,uNAAuN;IACvN,gFAAwD,CAAA;IACxD,mTAAmT;IACnT,iFAAyD,CAAA;IACzD,kLAAkL;IAClL,2CAAmB,CAAA;IACnB,gMAAgM;IAChM,6CAAqB,CAAA;IACrB,8LAA8L;IAC9L,oEAA4C,CAAA;IAC5C,mJAAmJ;IACnJ,2CAAmB,CAAA;IACnB,kKAAkK;IAClK,+CAAuB,CAAA;IACvB,sLAAsL;IACtL,qCAAa,CAAA;IACb,sLAAsL;IACtL,6CAAqB,CAAA;IACrB,6MAA6M;IAC7M,yCAAiB,CAAA;IACjB,oKAAoK;IACpK,+CAAuB,CAAA;IACvB,0FAA0F;IAC1F,wDAAgC,CAAA;AAClC,CAAC,EArEW,oBAAoB,KAApB,oBAAoB,QAqE/B;AA4CD,uEAAuE;AACvE,MAAM,CAAN,IAAY,mBAGX;AAHD,WAAY,mBAAmB;IAC7B,yMAAyM;IACzM,+CAAwB,CAAA;AAC1B,CAAC,EAHW,mBAAmB,KAAnB,mBAAmB,QAG9B;AAWD,oFAAoF;AACpF,MAAM,CAAN,IAAY,gCASX;AATD,WAAY,gCAAgC;IAC1C,iJAAiJ;IACjJ,qDAAiB,CAAA;IACjB,6IAA6I;IAC7I,2DAAuB,CAAA;IACvB,wJAAwJ;IACxJ,6DAAyB,CAAA;IACzB,qLAAqL;IACrL,uDAAmB,CAAA;AACrB,CAAC,EATW,gCAAgC,KAAhC,gCAAgC,QAS3C;AAcD,sFAAsF;AACtF,MAAM,CAAN,IAAY,kCAGX;AAHD,WAAY,kCAAkC;IAC5C,WAAW;IACX,mDAAa,CAAA;AACf,CAAC,EAHW,kCAAkC,KAAlC,kCAAkC,QAG7C;AAWD,kFAAkF;AAClF,MAAM,CAAN,IAAY,8BAeX;AAfD,WAAY,8BAA8B;IACxC,kDAAkD;IAClD,oIAAkG,CAAA;IAClG,sDAAsD;IACtD,6IAA2G,CAAA;IAC3G,2CAA2C;IAC3C,oHAAkF,CAAA;IAClF,4CAA4C;IAC5C,sHAAoF,CAAA;IACpF,2BAA2B;IAC3B,kFAAgD,CAAA;IAChD,gCAAgC;IAChC,4FAA0D,CAAA;IAC1D,iFAAiF;IACjF,mEAAiC,CAAA;AACnC,CAAC,EAfW,8BAA8B,KAA9B,8BAA8B,QAezC;AAiBD,yFAAyF;AACzF,MAAM,CAAN,IAAY,qCAiCX;AAjCD,WAAY,qCAAqC;IAC/C,aAAa;IACb,kDAAS,CAAA;IACT,YAAY;IACZ,kDAAS,CAAA;IACT,cAAc;IACd,kDAAS,CAAA;IACT,cAAc;IACd,kDAAS,CAAA;IACT,aAAa;IACb,kDAAS,CAAA;IACT,aAAa;IACb,kDAAS,CAAA;IACT,cAAc;IACd,kDAAS,CAAA;IACT,eAAe;IACf,kDAAS,CAAA;IACT,aAAa;IACb,kDAAS,CAAA;IACT,0BAA0B;IAC1B,kDAAS,CAAA;IACT,aAAa;IACb,kDAAS,CAAA;IACT,4BAA4B;IAC5B,uDAAc,CAAA;IACd,0BAA0B;IAC1B,uDAAc,CAAA;IACd,cAAc;IACd,kDAAS,CAAA;IACT,cAAc;IACd,kDAAS,CAAA;IACT,cAAc;IACd,kDAAS,CAAA;AACX,CAAC,EAjCW,qCAAqC,KAArC,qCAAqC,QAiChD;AA0BD,yEAAyE;AACzE,MAAM,CAAN,IAAY,qBAqVX;AArVD,WAAY,qBAAqB;IAC/B,gBAAgB;IAChB,kCAAS,CAAA;IACT,eAAe;IACf,kCAAS,CAAA;IACT,0BAA0B;IAC1B,oCAAW,CAAA;IACX,aAAa;IACb,kCAAS,CAAA;IACT,eAAe;IACf,oCAAW,CAAA;IACX,gCAAgC;IAChC,oCAAW,CAAA;IACX,0BAA0B;IAC1B,kCAAS,CAAA;IACT,cAAc;IACd,oCAAW,CAAA;IACX,aAAa;IACb,kCAAS,CAAA;IACT,sCAAsC;IACtC,kCAAS,CAAA;IACT,4BAA4B;IAC5B,2CAAkB,CAAA;IAClB,yBAAyB;IACzB,2CAAkB,CAAA;IAClB,kCAAkC;IAClC,oCAAW,CAAA;IACX,cAAc;IACd,kCAAS,CAAA;IACT,wBAAwB;IACxB,oCAAW,CAAA;IACX,oBAAoB;IACpB,kCAAS,CAAA;IACT,cAAc;IACd,oCAAW,CAAA;IACX,aAAa;IACb,kCAAS,CAAA;IACT,gBAAgB;IAChB,kCAAS,CAAA;IACT,cAAc;IACd,oCAAW,CAAA;IACX,wBAAwB;IACxB,oCAAW,CAAA;IACX,cAAc;IACd,kCAAS,CAAA;IACT,cAAc;IACd,oCAAW,CAAA;IACX,eAAe;IACf,oCAAW,CAAA;IACX,eAAe;IACf,kCAAS,CAAA;IACT,iCAAiC;IACjC,oCAAW,CAAA;IACX,yBAAyB;IACzB,2CAAkB,CAAA;IAClB,0BAA0B;IAC1B,2CAAkB,CAAA;IAClB,cAAc;IACd,kCAAS,CAAA;IACT,eAAe;IACf,kCAAS,CAAA;IACT,4BAA4B;IAC5B,oCAAW,CAAA;IACX,eAAe;IACf,kCAAS,CAAA;IACT,YAAY;IACZ,kCAAS,CAAA;IACT,aAAa;IACb,kCAAS,CAAA;IACT,WAAW;IACX,oCAAW,CAAA;IACX,0BAA0B;IAC1B,oCAAW,CAAA;IACX,yBAAyB;IACzB,oCAAW,CAAA;IACX,YAAY;IACZ,kCAAS,CAAA;IACT,cAAc;IACd,kCAAS,CAAA;IACT,uBAAuB;IACvB,oCAAW,CAAA;IACX,eAAe;IACf,kCAAS,CAAA;IACT,cAAc;IACd,kCAAS,CAAA;IACT,aAAa;IACb,kCAAS,CAAA;IACT,eAAe;IACf,oCAAW,CAAA;IACX,cAAc;IACd,kCAAS,CAAA;IACT,aAAa;IACb,kCAAS,CAAA;IACT,cAAc;IACd,oCAAW,CAAA;IACX,qBAAqB;IACrB,oCAAW,CAAA;IACX,eAAe;IACf,kCAAS,CAAA;IACT,aAAa;IACb,kCAAS,CAAA;IACT,iBAAiB;IACjB,oCAAW,CAAA;IACX,yBAAyB;IACzB,oCAAW,CAAA;IACX,YAAY;IACZ,kCAAS,CAAA;IACT,kBAAkB;IAClB,kCAAS,CAAA;IACT,0BAA0B;IAC1B,oCAAW,CAAA;IACX,qBAAqB;IACrB,kCAAS,CAAA;IACT,yBAAyB;IACzB,oCAAW,CAAA;IACX,WAAW;IACX,oCAAW,CAAA;IACX,eAAe;IACf,oCAAW,CAAA;IACX,eAAe;IACf,oCAAW,CAAA;IACX,YAAY;IACZ,kCAAS,CAAA;IACT,wBAAwB;IACxB,oCAAW,CAAA;IACX,sBAAsB;IACtB,oCAAW,CAAA;IACX,gBAAgB;IAChB,kCAAS,CAAA;IACT,gBAAgB;IAChB,kCAAS,CAAA;IACT,iBAAiB;IACjB,oCAAW,CAAA;IACX,iBAAiB;IACjB,kCAAS,CAAA;IACT,kBAAkB;IAClB,kCAAS,CAAA;IACT,wBAAwB;IACxB,kCAAS,CAAA;IACT,YAAY;IACZ,kCAAS,CAAA;IACT,cAAc;IACd,kCAAS,CAAA;IACT,eAAe;IACf,kCAAS,CAAA;IACT,4BAA4B;IAC5B,oCAAW,CAAA;IACX,eAAe;IACf,kCAAS,CAAA;IACT,mBAAmB;IACnB,oCAAW,CAAA;IACX,qBAAqB;IACrB,oCAAW,CAAA;IACX,0BAA0B;IAC1B,oCAAW,CAAA;IACX,sBAAsB;IACtB,oCAAW,CAAA;IACX,6BAA6B;IAC7B,6CAAoB,CAAA;IACpB,0BAA0B;IAC1B,oCAAW,CAAA;IACX,gBAAgB;IAChB,oCAAW,CAAA;IACX,wBAAwB;IACxB,2CAAkB,CAAA;IAClB,qBAAqB;IACrB,2CAAkB,CAAA;IAClB,cAAc;IACd,oCAAW,CAAA;IACX,YAAY;IACZ,oCAAW,CAAA;IACX,cAAc;IACd,oCAAW,CAAA;IACX,aAAa;IACb,kCAAS,CAAA;IACT,YAAY;IACZ,oCAAW,CAAA;IACX,aAAa;IACb,oCAAW,CAAA;IACX,eAAe;IACf,oCAAW,CAAA;IACX,uBAAuB;IACvB,oCAAW,CAAA;IACX,uBAAuB;IACvB,2CAAkB,CAAA;IAClB,sBAAsB;IACtB,2CAAkB,CAAA;IAClB,0BAA0B;IAC1B,oCAAW,CAAA;IACX,wBAAwB;IACxB,kCAAS,CAAA;IACT,aAAa;IACb,oCAAW,CAAA;IACX,YAAY;IACZ,kCAAS,CAAA;IACT,iBAAiB;IACjB,kCAAS,CAAA;IACT,oBAAoB;IACpB,oCAAW,CAAA;IACX,gBAAgB;IAChB,oCAAW,CAAA;IACX,oBAAoB;IACpB,kCAAS,CAAA;IACT,iCAAiC;IACjC,oCAAW,CAAA;IACX,oBAAoB;IACpB,kCAAS,CAAA;IACT,cAAc;IACd,kCAAS,CAAA;IACT,yBAAyB;IACzB,oCAAW,CAAA;IACX,WAAW;IACX,kCAAS,CAAA;IACT,YAAY;IACZ,kCAAS,CAAA;IACT,cAAc;IACd,kCAAS,CAAA;IACT,2BAA2B;IAC3B,kCAAS,CAAA;IACT,6BAA6B;IAC7B,6CAAoB,CAAA;IACpB,0BAA0B;IAC1B,6CAAoB,CAAA;IACpB,iBAAiB;IACjB,oCAAW,CAAA;IACX,aAAa;IACb,kCAAS,CAAA;IACT,aAAa;IACb,oCAAW,CAAA;IACX,YAAY;IACZ,oCAAW,CAAA;IACX,4BAA4B;IAC5B,oCAAW,CAAA;IACX,gBAAgB;IAChB,kCAAS,CAAA;IACT,gBAAgB;IAChB,kCAAS,CAAA;IACT,cAAc;IACd,kCAAS,CAAA;IACT,cAAc;IACd,kCAAS,CAAA;IACT,aAAa;IACb,kCAAS,CAAA;IACT,cAAc;IACd,kCAAS,CAAA;IACT,aAAa;IACb,kCAAS,CAAA;IACT,iBAAiB;IACjB,kCAAS,CAAA;IACT,uBAAuB;IACvB,kCAAS,CAAA;IACT,gBAAgB;IAChB,oCAAW,CAAA;IACX,eAAe;IACf,kCAAS,CAAA;IACT,cAAc;IACd,kCAAS,CAAA;IACT,cAAc;IACd,kCAAS,CAAA;IACT,yBAAyB;IACzB,oCAAW,CAAA;IACX,qBAAqB;IACrB,kCAAS,CAAA;IACT,4BAA4B;IAC5B,kCAAS,CAAA;IACT,2BAA2B;IAC3B,oCAAW,CAAA;IACX,YAAY;IACZ,oCAAW,CAAA;IACX,sBAAsB;IACtB,kCAAS,CAAA;IACT,sBAAsB;IACtB,kCAAS,CAAA;IACT,yBAAyB;IACzB,2CAAkB,CAAA;IAClB,sBAAsB;IACtB,2CAAkB,CAAA;IAClB,0BAA0B;IAC1B,oCAAW,CAAA;IACX,4BAA4B;IAC5B,oCAAW,CAAA;IACX,iBAAiB;IACjB,oCAAW,CAAA;IACX,aAAa;IACb,kCAAS,CAAA;IACT,gBAAgB;IAChB,kCAAS,CAAA;IACT,sBAAsB;IACtB,kCAAS,CAAA;IACT,oBAAoB;IACpB,oCAAW,CAAA;IACX,cAAc;IACd,kCAAS,CAAA;IACT,sBAAsB;IACtB,kCAAS,CAAA;IACT,cAAc;IACd,kCAAS,CAAA;IACT,uBAAuB;IACvB,kCAAS,CAAA;IACT,oBAAoB;IACpB,kCAAS,CAAA;IACT,YAAY;IACZ,oCAAW,CAAA;IACX,cAAc;IACd,oCAAW,CAAA;IACX,aAAa;IACb,kCAAS,CAAA;IACT,cAAc;IACd,kCAAS,CAAA;IACT,sBAAsB;IACtB,kCAAS,CAAA;IACT,YAAY;IACZ,oCAAW,CAAA;IACX,oBAAoB;IACpB,oCAAW,CAAA;IACX,WAAW;IACX,kCAAS,CAAA;IACT,sBAAsB;IACtB,kCAAS,CAAA;IACT,qBAAqB;IACrB,2CAAkB,CAAA;IAClB,uBAAuB;IACvB,2CAAkB,CAAA;IAClB,oBAAoB;IACpB,kCAAS,CAAA;IACT,cAAc;IACd,kCAAS,CAAA;IACT,aAAa;IACb,oCAAW,CAAA;IACX,YAAY;IACZ,kCAAS,CAAA;IACT,sBAAsB;IACtB,kCAAS,CAAA;IACT,mBAAmB;IACnB,oCAAW,CAAA;IACX,aAAa;IACb,kCAAS,CAAA;IACT,WAAW;IACX,kCAAS,CAAA;IACT,oBAAoB;IACpB,oCAAW,CAAA;AACb,CAAC,EArVW,qBAAqB,KAArB,qBAAqB,QAqVhC;AAoLD,sEAAsE;AACtE,MAAM,CAAN,IAAY,kBASX;AATD,WAAY,kBAAkB;IAC5B,uDAAuD;IACvD,qCAAe,CAAA;IACf,iEAAiE;IACjE,uDAAiC,CAAA;IACjC,kEAAkE;IAClE,2CAAqB,CAAA;IACrB,mFAAmF;IACnF,uEAAiD,CAAA;AACnD,CAAC,EATW,kBAAkB,KAAlB,kBAAkB,QAS7B;AAcD,mFAAmF;AACnF,MAAM,CAAN,IAAY,+BAyGX;AAzGD,WAAY,+BAA+B;IACzC,aAAa;IACb,4CAAS,CAAA;IACT,kBAAkB;IAClB,4CAAS,CAAA;IACT,gBAAgB;IAChB,4CAAS,CAAA;IACT,oBAAoB;IACpB,4CAAS,CAAA;IACT,cAAc;IACd,4CAAS,CAAA;IACT,YAAY;IACZ,4CAAS,CAAA;IACT,YAAY;IACZ,4CAAS,CAAA;IACT,aAAa;IACb,4CAAS,CAAA;IACT,aAAa;IACb,4CAAS,CAAA;IACT,YAAY;IACZ,4CAAS,CAAA;IACT,cAAc;IACd,4CAAS,CAAA;IACT,cAAc;IACd,4CAAS,CAAA;IACT,eAAe;IACf,4CAAS,CAAA;IACT,aAAa;IACb,4CAAS,CAAA;IACT,cAAc;IACd,4CAAS,CAAA;IACT,aAAa;IACb,4CAAS,CAAA;IACT,YAAY;IACZ,4CAAS,CAAA;IACT,eAAe;IACf,4CAAS,CAAA;IACT,aAAa;IACb,4CAAS,CAAA;IACT,YAAY;IACZ,4CAAS,CAAA;IACT,eAAe;IACf,4CAAS,CAAA;IACT,gBAAgB;IAChB,4CAAS,CAAA;IACT,iBAAiB;IACjB,4CAAS,CAAA;IACT,cAAc;IACd,4CAAS,CAAA;IACT,eAAe;IACf,4CAAS,CAAA;IACT,aAAa;IACb,4CAAS,CAAA;IACT,aAAa;IACb,4CAAS,CAAA;IACT,iBAAiB;IACjB,4CAAS,CAAA;IACT,cAAc;IACd,4CAAS,CAAA;IACT,iBAAiB;IACjB,4CAAS,CAAA;IACT,qBAAqB;IACrB,4CAAS,CAAA;IACT,yBAAyB;IACzB,4CAAS,CAAA;IACT,YAAY;IACZ,4CAAS,CAAA;IACT,aAAa;IACb,4CAAS,CAAA;IACT,WAAW;IACX,8CAAW,CAAA;IACX,wBAAwB;IACxB,iDAAc,CAAA;IACd,0BAA0B;IAC1B,4CAAS,CAAA;IACT,0BAA0B;IAC1B,iDAAc,CAAA;IACd,eAAe;IACf,4CAAS,CAAA;IACT,cAAc;IACd,4CAAS,CAAA;IACT,aAAa;IACb,4CAAS,CAAA;IACT,gBAAgB;IAChB,4CAAS,CAAA;IACT,4BAA4B;IAC5B,qDAAkB,CAAA;IAClB,yBAAyB;IACzB,qDAAkB,CAAA;IAClB,cAAc;IACd,4CAAS,CAAA;IACT,WAAW;IACX,4CAAS,CAAA;IACT,cAAc;IACd,4CAAS,CAAA;IACT,gBAAgB;IAChB,4CAAS,CAAA;IACT,iBAAiB;IACjB,4CAAS,CAAA;IACT,yBAAyB;IACzB,4CAAS,CAAA;IACT,yBAAyB;IACzB,qDAAkB,CAAA;IAClB,0BAA0B;IAC1B,qDAAkB,CAAA;AACpB,CAAC,EAzGW,+BAA+B,KAA/B,+BAA+B,QAyG1C;AA8DD,sEAAsE;AACtE,MAAM,CAAN,IAAY,kBAeX;AAfD,WAAY,kBAAkB;IAC5B,mDAAmD;IACnD,qCAAe,CAAA;IACf,uDAAuD;IACvD,uCAAiB,CAAA;IACjB,kBAAkB;IAClB,+CAAyB,CAAA;IACzB,mBAAmB;IACnB,iDAA2B,CAAA;IAC3B,kDAAkD;IAClD,qCAAe,CAAA;IACf,6CAA6C;IAC7C,yCAAmB,CAAA;IACnB,YAAY;IACZ,mCAAa,CAAA;AACf,CAAC,EAfW,kBAAkB,KAAlB,kBAAkB,QAe7B;AAiBD,oEAAoE;AACpE,MAAM,CAAN,IAAY,gBAKX;AALD,WAAY,gBAAgB;IAC1B,yCAAyC;IACzC,+CAA2B,CAAA;IAC3B,uCAAuC;IACvC,2CAAuB,CAAA;AACzB,CAAC,EALW,gBAAgB,KAAhB,gBAAgB,QAK3B;AAYD,uEAAuE;AACvE,MAAM,CAAN,IAAY,mBAeX;AAfD,WAAY,mBAAmB;IAC7B,+CAA+C;IAC/C,4CAAqB,CAAA;IACrB,2CAA2C;IAC3C,oDAA6B,CAAA;IAC7B,oCAAoC;IACpC,wCAAiB,CAAA;IACjB,sCAAsC;IACtC,4CAAqB,CAAA;IACrB,2CAA2C;IAC3C,4CAAqB,CAAA;IACrB,iCAAiC;IACjC,kCAAW,CAAA;IACX,4CAA4C;IAC5C,sCAAe,CAAA;AACjB,CAAC,EAfW,mBAAmB,KAAnB,mBAAmB,QAe9B;AAiBD,uFAAuF;AACvF,MAAM,CAAN,IAAY,mCA+CX;AA/CD,WAAY,mCAAmC;IAC7C,aAAa;IACb,gDAAS,CAAA;IACT,YAAY;IACZ,gDAAS,CAAA;IACT,yBAAyB;IACzB,yDAAkB,CAAA;IAClB,0BAA0B;IAC1B,yDAAkB,CAAA;IAClB,aAAa;IACb,gDAAS,CAAA;IACT,YAAY;IACZ,gDAAS,CAAA;IACT,cAAc;IACd,gDAAS,CAAA;IACT,cAAc;IACd,gDAAS,CAAA;IACT,aAAa;IACb,gDAAS,CAAA;IACT,aAAa;IACb,gDAAS,CAAA;IACT,YAAY;IACZ,gDAAS,CAAA;IACT,gBAAgB;IAChB,gDAAS,CAAA;IACT,cAAc;IACd,gDAAS,CAAA;IACT,eAAe;IACf,gDAAS,CAAA;IACT,aAAa;IACb,gDAAS,CAAA;IACT,0BAA0B;IAC1B,gDAAS,CAAA;IACT,aAAa;IACb,gDAAS,CAAA;IACT,4BAA4B;IAC5B,qDAAc,CAAA;IACd,0BAA0B;IAC1B,qDAAc,CAAA;IACd,cAAc;IACd,gDAAS,CAAA;IACT,cAAc;IACd,gDAAS,CAAA;IACT,cAAc;IACd,gDAAS,CAAA;IACT,cAAc;IACd,gDAAS,CAAA;AACX,CAAC,EA/CW,mCAAmC,KAAnC,mCAAmC,QA+C9C;AAiCD,+EAA+E;AAC/E,MAAM,CAAN,IAAY,2BA+BX;AA/BD,WAAY,2BAA2B;IACrC,aAAa;IACb,wCAAS,CAAA;IACT,YAAY;IACZ,wCAAS,CAAA;IACT,cAAc;IACd,wCAAS,CAAA;IACT,cAAc;IACd,wCAAS,CAAA;IACT,aAAa;IACb,wCAAS,CAAA;IACT,aAAa;IACb,wCAAS,CAAA;IACT,YAAY;IACZ,wCAAS,CAAA;IACT,cAAc;IACd,wCAAS,CAAA;IACT,0BAA0B;IAC1B,wCAAS,CAAA;IACT,aAAa;IACb,wCAAS,CAAA;IACT,4BAA4B;IAC5B,6CAAc,CAAA;IACd,cAAc;IACd,wCAAS,CAAA;IACT,cAAc;IACd,wCAAS,CAAA;IACT,cAAc;IACd,wCAAS,CAAA;IACT,cAAc;IACd,wCAAS,CAAA;AACX,CAAC,EA/BW,2BAA2B,KAA3B,2BAA2B,QA+BtC;AAyBD,qFAAqF;AACrF,MAAM,CAAN,IAAY,iCAKX;AALD,WAAY,iCAAiC;IAC3C,wEAAwE;IACxE,kDAAa,CAAA;IACb,4QAA4Q;IAC5Q,wDAAmB,CAAA;AACrB,CAAC,EALW,iCAAiC,KAAjC,iCAAiC,QAK5C;AAYD,2EAA2E;AAC3E,MAAM,CAAN,IAAY,uBAmEX;AAnED,WAAY,uBAAuB;IACjC,cAAc;IACd,oCAAS,CAAA;IACT,cAAc;IACd,oCAAS,CAAA;IACT,YAAY;IACZ,oCAAS,CAAA;IACT,aAAa;IACb,oCAAS,CAAA;IACT,aAAa;IACb,oCAAS,CAAA;IACT,cAAc;IACd,oCAAS,CAAA;IACT,cAAc;IACd,oCAAS,CAAA;IACT,eAAe;IACf,oCAAS,CAAA;IACT,cAAc;IACd,oCAAS,CAAA;IACT,aAAa;IACb,oCAAS,CAAA;IACT,aAAa;IACb,oCAAS,CAAA;IACT,YAAY;IACZ,oCAAS,CAAA;IACT,eAAe;IACf,oCAAS,CAAA;IACT,gBAAgB;IAChB,oCAAS,CAAA;IACT,iBAAiB;IACjB,oCAAS,CAAA;IACT,gBAAgB;IAChB,oCAAS,CAAA;IACT,cAAc;IACd,oCAAS,CAAA;IACT,eAAe;IACf,oCAAS,CAAA;IACT,aAAa;IACb,oCAAS,CAAA;IACT,cAAc;IACd,oCAAS,CAAA;IACT,gBAAgB;IAChB,oCAAS,CAAA;IACT,YAAY;IACZ,oCAAS,CAAA;IACT,aAAa;IACb,oCAAS,CAAA;IACT,4BAA4B;IAC5B,oCAAS,CAAA;IACT,0BAA0B;IAC1B,yCAAc,CAAA;IACd,cAAc;IACd,oCAAS,CAAA;IACT,aAAa;IACb,oCAAS,CAAA;IACT,gBAAgB;IAChB,oCAAS,CAAA;IACT,cAAc;IACd,oCAAS,CAAA;IACT,cAAc;IACd,oCAAS,CAAA;IACT,cAAc;IACd,oCAAS,CAAA;IACT,WAAW;IACX,oCAAS,CAAA;IACT,2BAA2B;IAC3B,oCAAS,CAAA;AACX,CAAC,EAnEW,uBAAuB,KAAvB,uBAAuB,QAmElC;AA2CD,sEAAsE;AACtE,MAAM,CAAN,IAAY,kBAKX;AALD,WAAY,kBAAkB;IAC5B,4CAA4C;IAC5C,qCAAe,CAAA;IACf,gDAAgD;IAChD,6CAAuB,CAAA;AACzB,CAAC,EALW,kBAAkB,KAAlB,kBAAkB,QAK7B;AAYD,uEAAuE;AACvE,MAAM,CAAN,IAAY,mBAKX;AALD,WAAY,mBAAmB;IAC7B,gDAAgD;IAChD,gDAAyB,CAAA;IACzB,yFAAyF;IACzF,8DAAuC,CAAA;AACzC,CAAC,EALW,mBAAmB,KAAnB,mBAAmB,QAK9B;AAYD,mFAAmF;AACnF,MAAM,CAAN,IAAY,+BASX;AATD,WAAY,+BAA+B;IACzC,8HAA8H;IAC9H,yDAAsB,CAAA;IACtB,qFAAqF;IACrF,yDAAsB,CAAA;IACtB,0GAA0G;IAC1G,yDAAsB,CAAA;IACtB,oDAAoD;IACpD,6DAA0B,CAAA;AAC5B,CAAC,EATW,+BAA+B,KAA/B,+BAA+B,QAS1C;AAcD,wFAAwF;AACxF,MAAM,CAAN,IAAY,oCAmBX;AAnBD,WAAY,oCAAoC;IAC9C,aAAa;IACb,iDAAS,CAAA;IACT,aAAa;IACb,iDAAS,CAAA;IACT,cAAc;IACd,iDAAS,CAAA;IACT,cAAc;IACd,iDAAS,CAAA;IACT,cAAc;IACd,iDAAS,CAAA;IACT,aAAa;IACb,iDAAS,CAAA;IACT,cAAc;IACd,iDAAS,CAAA;IACT,aAAa;IACb,iDAAS,CAAA;IACT,iBAAiB;IACjB,iDAAS,CAAA;AACX,CAAC,EAnBW,oCAAoC,KAApC,oCAAoC,QAmB/C;AAmBD,qFAAqF;AACrF,MAAM,CAAN,IAAY,iCAiJX;AAjJD,WAAY,iCAAiC;IAC3C,gBAAgB;IAChB,8CAAS,CAAA;IACT,aAAa;IACb,8CAAS,CAAA;IACT,aAAa;IACb,8CAAS,CAAA;IACT,sBAAsB;IACtB,8CAAS,CAAA;IACT,gBAAgB;IAChB,8CAAS,CAAA;IACT,8BAA8B;IAC9B,gDAAW,CAAA;IACX,cAAc;IACd,8CAAS,CAAA;IACT,yBAAyB;IACzB,uDAAkB,CAAA;IAClB,0BAA0B;IAC1B,uDAAkB,CAAA;IAClB,eAAe;IACf,8CAAS,CAAA;IACT,YAAY;IACZ,8CAAS,CAAA;IACT,aAAa;IACb,8CAAS,CAAA;IACT,YAAY;IACZ,8CAAS,CAAA;IACT,cAAc;IACd,8CAAS,CAAA;IACT,eAAe;IACf,8CAAS,CAAA;IACT,aAAa;IACb,8CAAS,CAAA;IACT,eAAe;IACf,gDAAW,CAAA;IACX,cAAc;IACd,8CAAS,CAAA;IACT,aAAa;IACb,8CAAS,CAAA;IACT,aAAa;IACb,8CAAS,CAAA;IACT,YAAY;IACZ,8CAAS,CAAA;IACT,qBAAqB;IACrB,8CAAS,CAAA;IACT,aAAa;IACb,8CAAS,CAAA;IACT,YAAY;IACZ,8CAAS,CAAA;IACT,gBAAgB;IAChB,gDAAW,CAAA;IACX,gBAAgB;IAChB,8CAAS,CAAA;IACT,gBAAgB;IAChB,8CAAS,CAAA;IACT,iBAAiB;IACjB,8CAAS,CAAA;IACT,cAAc;IACd,8CAAS,CAAA;IACT,eAAe;IACf,8CAAS,CAAA;IACT,gBAAgB;IAChB,8CAAS,CAAA;IACT,cAAc;IACd,gDAAW,CAAA;IACX,6BAA6B;IAC7B,yDAAoB,CAAA;IACpB,+BAA+B;IAC/B,yDAAoB,CAAA;IACpB,aAAa;IACb,8CAAS,CAAA;IACT,cAAc;IACd,8CAAS,CAAA;IACT,iBAAiB;IACjB,8CAAS,CAAA;IACT,eAAe;IACf,8CAAS,CAAA;IACT,YAAY;IACZ,8CAAS,CAAA;IACT,cAAc;IACd,8CAAS,CAAA;IACT,gBAAgB;IAChB,8CAAS,CAAA;IACT,cAAc;IACd,8CAAS,CAAA;IACT,aAAa;IACb,8CAAS,CAAA;IACT,iBAAiB;IACjB,8CAAS,CAAA;IACT,0BAA0B;IAC1B,mDAAc,CAAA;IACd,4BAA4B;IAC5B,mDAAc,CAAA;IACd,sBAAsB;IACtB,gDAAW,CAAA;IACX,eAAe;IACf,8CAAS,CAAA;IACT,cAAc;IACd,8CAAS,CAAA;IACT,aAAa;IACb,8CAAS,CAAA;IACT,yBAAyB;IACzB,uDAAkB,CAAA;IAClB,sBAAsB;IACtB,uDAAkB,CAAA;IAClB,aAAa;IACb,8CAAS,CAAA;IACT,gBAAgB;IAChB,8CAAS,CAAA;IACT,cAAc;IACd,8CAAS,CAAA;IACT,cAAc;IACd,8CAAS,CAAA;IACT,eAAe;IACf,8CAAS,CAAA;IACT,YAAY;IACZ,8CAAS,CAAA;IACT,aAAa;IACb,8CAAS,CAAA;IACT,WAAW;IACX,8CAAS,CAAA;IACT,aAAa;IACb,8CAAS,CAAA;IACT,cAAc;IACd,8CAAS,CAAA;IACT,gBAAgB;IAChB,8CAAS,CAAA;IACT,WAAW;IACX,8CAAS,CAAA;IACT,iBAAiB;IACjB,8CAAS,CAAA;IACT,YAAY;IACZ,8CAAS,CAAA;IACT,mBAAmB;IACnB,gDAAW,CAAA;IACX,YAAY;IACZ,8CAAS,CAAA;IACT,cAAc;IACd,8CAAS,CAAA;IACT,YAAY;IACZ,8CAAS,CAAA;IACT,gBAAgB;IAChB,8CAAS,CAAA;IACT,cAAc;IACd,8CAAS,CAAA;AACX,CAAC,EAjJW,iCAAiC,KAAjC,iCAAiC,QAiJ5C;AAkFD,oGAAoG;AACpG,MAAM,CAAN,IAAY,gDAKX;AALD,WAAY,gDAAgD;IAC1D,gDAAgD;IAChD,iEAAa,CAAA;IACb,oDAAoD;IACpD,yEAAqB,CAAA;AACvB,CAAC,EALW,gDAAgD,KAAhD,gDAAgD,QAK3D;AAYD,kGAAkG;AAClG,MAAM,CAAN,IAAY,8CAGX;AAHD,WAAY,8CAA8C;IACxD,+DAA+D;IAC/D,yEAAuB,CAAA;AACzB,CAAC,EAHW,8CAA8C,KAA9C,8CAA8C,QAGzD;AAWD,2GAA2G;AAC3G,MAAM,CAAN,IAAY,uDAaX;AAbD,WAAY,uDAAuD;IACjE,sBAAsB;IACtB,oEAAS,CAAA;IACT,sBAAsB;IACtB,oEAAS,CAAA;IACT,sBAAsB;IACtB,oEAAS,CAAA;IACT,sBAAsB;IACtB,oEAAS,CAAA;IACT,sBAAsB;IACtB,oEAAS,CAAA;IACT,sBAAsB;IACtB,oEAAS,CAAA;AACX,CAAC,EAbW,uDAAuD,KAAvD,uDAAuD,QAalE;AAgBD,yGAAyG;AACzG,MAAM,CAAN,IAAY,qDAKX;AALD,WAAY,qDAAqD;IAC/D,wEAAwE;IACxE,0EAAiB,CAAA;IACjB,4EAA4E;IAC5E,8FAAqC,CAAA;AACvC,CAAC,EALW,qDAAqD,KAArD,qDAAqD,QAKhE;AAYD,oGAAoG;AACpG,MAAM,CAAN,IAAY,gDAGX;AAHD,WAAY,gDAAgD;IAC1D,qCAAqC;IACrC,6EAAyB,CAAA;AAC3B,CAAC,EAHW,gDAAgD,KAAhD,gDAAgD,QAG3D;AAWD,8FAA8F;AAC9F,MAAM,CAAN,IAAY,0CAOX;AAPD,WAAY,0CAA0C;IACpD,yDAAyD;IACzD,yEAA2B,CAAA;IAC3B,kCAAkC;IAClC,2DAAa,CAAA;IACb,yDAAyD;IACzD,6DAAe,CAAA;AACjB,CAAC,EAPW,0CAA0C,KAA1C,0CAA0C,QAOrD;AAaD,yFAAyF;AACzF,MAAM,CAAN,IAAY,qCAOX;AAPD,WAAY,qCAAqC;IAC/C,WAAW;IACX,sDAAa,CAAA;IACb,iBAAiB;IACjB,kEAAyB,CAAA;IACzB,iBAAiB;IACjB,kEAAyB,CAAA;AAC3B,CAAC,EAPW,qCAAqC,KAArC,qCAAqC,QAOhD;AAaD,mGAAmG;AACnG,MAAM,CAAN,IAAY,+CAKX;AALD,WAAY,+CAA+C;IACzD,wEAAwE;IACxE,oEAAiB,CAAA;IACjB,4EAA4E;IAC5E,wFAAqC,CAAA;AACvC,CAAC,EALW,+CAA+C,KAA/C,+CAA+C,QAK1D;AAYD,8FAA8F;AAC9F,MAAM,CAAN,IAAY,0CAGX;AAHD,WAAY,0CAA0C;IACpD,qCAAqC;IACrC,uEAAyB,CAAA;AAC3B,CAAC,EAHW,0CAA0C,KAA1C,0CAA0C,QAGrD;AAWD,6EAA6E;AAC7E,MAAM,CAAN,IAAY,yBA2BX;AA3BD,WAAY,yBAAyB;IACnC,8NAA8N;IAC9N,gDAAmB,CAAA;IACnB,wMAAwM;IACxM,oDAAuB,CAAA;IACvB,0KAA0K;IAC1K,mDAAsB,CAAA;IACtB,4JAA4J;IAC5J,8CAAiB,CAAA;IACjB,+LAA+L;IAC/L,oDAAuB,CAAA;IACvB,kDAAkD;IAClD,wFAA2D,CAAA;IAC3D,wFAAwF;IACxF,yGAA4E,CAAA;IAC5E,sLAAsL;IACtL,4CAAe,CAAA;IACf,2KAA2K;IAC3K,gEAAmC,CAAA;IACnC,4MAA4M;IAC5M,gDAAmB,CAAA;IACnB,qOAAqO;IACrO,qDAAwB,CAAA;IACxB,kLAAkL;IAClL,0DAA6B,CAAA;IAC7B,+JAA+J;IAC/J,sDAAyB,CAAA;AAC3B,CAAC,EA3BW,yBAAyB,KAAzB,yBAAyB,QA2BpC;AAuBD,mEAAmE;AACnE,MAAM,CAAN,IAAY,eAiBX;AAjBD,WAAY,eAAe;IACzB,qCAAqC;IACrC,uCAAoB,CAAA;IACpB,yCAAyC;IACzC,uDAAoC,CAAA;IACpC,sDAAsD;IACtD,wCAAqB,CAAA;IACrB,2BAA2B;IAC3B,oCAAiB,CAAA;IACjB,8CAA8C;IAC9C,sCAAmB,CAAA;IACnB,8BAA8B;IAC9B,0CAAuB,CAAA;IACvB,0CAA0C;IAC1C,+CAA4B,CAAA;IAC5B,+BAA+B;IAC/B,2CAAwB,CAAA;AAC1B,CAAC,EAjBW,eAAe,KAAf,eAAe,QAiB1B","sourcesContent":["/*\n * Copyright (c) Microsoft Corporation.\n * Licensed under the MIT License.\n *\n * Code generated by Microsoft (R) AutoRest Code Generator.\n * Changes may cause incorrect behavior and will be lost if the code is regenerated.\n */\n\nimport * as coreClient from \"@azure/core-client\";\nimport * as coreHttpCompat from \"@azure/core-http-compat\";\n\nexport type KnowledgeBaseModelUnion =\n | KnowledgeBaseModel\n | KnowledgeBaseAzureOpenAIModel;\nexport type KnowledgeRetrievalReasoningEffortUnion =\n | KnowledgeRetrievalReasoningEffort\n | KnowledgeRetrievalMinimalReasoningEffort\n | KnowledgeRetrievalLowReasoningEffort\n | KnowledgeRetrievalMediumReasoningEffort;\nexport type SearchIndexerDataIdentityUnion =\n | SearchIndexerDataIdentity\n | SearchIndexerDataNoneIdentity\n | SearchIndexerDataUserAssignedIdentity;\nexport type KnowledgeSourceUnion =\n | KnowledgeSource\n | SearchIndexKnowledgeSource\n | AzureBlobKnowledgeSource\n | IndexedSharePointKnowledgeSource\n | IndexedOneLakeKnowledgeSource\n | WebKnowledgeSource\n | RemoteSharePointKnowledgeSource;\nexport type DataChangeDetectionPolicyUnion =\n | DataChangeDetectionPolicy\n | HighWaterMarkChangeDetectionPolicy\n | SqlIntegratedChangeTrackingPolicy;\nexport type DataDeletionDetectionPolicyUnion =\n | DataDeletionDetectionPolicy\n | SoftDeleteColumnDeletionDetectionPolicy\n | NativeBlobSoftDeleteDeletionDetectionPolicy;\nexport type SearchIndexerSkillUnion =\n | SearchIndexerSkill\n | ConditionalSkill\n | KeyPhraseExtractionSkill\n | OcrSkill\n | ImageAnalysisSkill\n | LanguageDetectionSkill\n | ShaperSkill\n | MergeSkill\n | EntityRecognitionSkill\n | SentimentSkill\n | SentimentSkillV3\n | EntityLinkingSkill\n | EntityRecognitionSkillV3\n | PIIDetectionSkill\n | SplitSkill\n | CustomEntityLookupSkill\n | TextTranslationSkill\n | DocumentExtractionSkill\n | DocumentIntelligenceLayoutSkill\n | WebApiSkillUnion\n | ContentUnderstandingSkill\n | AzureMachineLearningSkill\n | AzureOpenAIEmbeddingSkill\n | VisionVectorizeSkill;\nexport type CognitiveServicesAccountUnion =\n | CognitiveServicesAccount\n | DefaultCognitiveServicesAccount\n | CognitiveServicesAccountKey\n | AIServicesAccountKey\n | AIServicesAccountIdentity;\nexport type ScoringFunctionUnion =\n | ScoringFunction\n | DistanceScoringFunction\n | FreshnessScoringFunction\n | MagnitudeScoringFunction\n | TagScoringFunction;\nexport type LexicalAnalyzerUnion =\n | LexicalAnalyzer\n | CustomAnalyzer\n | PatternAnalyzer\n | LuceneStandardAnalyzer\n | StopAnalyzer;\nexport type LexicalTokenizerUnion =\n | LexicalTokenizer\n | ClassicTokenizer\n | EdgeNGramTokenizer\n | KeywordTokenizer\n | KeywordTokenizerV2\n | MicrosoftLanguageTokenizer\n | MicrosoftLanguageStemmingTokenizer\n | NGramTokenizer\n | PathHierarchyTokenizerV2\n | PatternTokenizer\n | LuceneStandardTokenizer\n | LuceneStandardTokenizerV2\n | UaxUrlEmailTokenizer;\nexport type TokenFilterUnion =\n | TokenFilter\n | AsciiFoldingTokenFilter\n | CjkBigramTokenFilter\n | CommonGramTokenFilter\n | DictionaryDecompounderTokenFilter\n | EdgeNGramTokenFilter\n | EdgeNGramTokenFilterV2\n | ElisionTokenFilter\n | KeepTokenFilter\n | KeywordMarkerTokenFilter\n | LengthTokenFilter\n | LimitTokenFilter\n | NGramTokenFilter\n | NGramTokenFilterV2\n | PatternCaptureTokenFilter\n | PatternReplaceTokenFilter\n | PhoneticTokenFilter\n | ShingleTokenFilter\n | SnowballTokenFilter\n | StemmerTokenFilter\n | StemmerOverrideTokenFilter\n | StopwordsTokenFilter\n | SynonymTokenFilter\n | TruncateTokenFilter\n | UniqueTokenFilter\n | WordDelimiterTokenFilter;\nexport type CharFilterUnion =\n | CharFilter\n | MappingCharFilter\n | PatternReplaceCharFilter;\nexport type LexicalNormalizerUnion = LexicalNormalizer | CustomNormalizer;\nexport type SimilarityUnion = Similarity | ClassicSimilarity | BM25Similarity;\nexport type VectorSearchAlgorithmConfigurationUnion =\n | VectorSearchAlgorithmConfiguration\n | HnswAlgorithmConfiguration\n | ExhaustiveKnnAlgorithmConfiguration;\nexport type VectorSearchVectorizerUnion =\n | VectorSearchVectorizer\n | AzureOpenAIVectorizer\n | WebApiVectorizer\n | AIServicesVisionVectorizer\n | AMLVectorizer;\nexport type VectorSearchCompressionUnion =\n | VectorSearchCompression\n | ScalarQuantizationCompression\n | BinaryQuantizationCompression;\nexport type KnowledgeSourceVectorizerUnion =\n | KnowledgeSourceVectorizer\n | KnowledgeSourceAzureOpenAIVectorizer;\nexport type WebApiSkillUnion = WebApiSkill | ChatCompletionSkill;\n\nexport interface KnowledgeBase {\n /** The name of the knowledge knowledge base. */\n name: string;\n knowledgeSources: KnowledgeSourceReference[];\n /** Contains configuration options on how to connect to AI models. */\n models?: KnowledgeBaseModelUnion[];\n retrievalReasoningEffort?: KnowledgeRetrievalReasoningEffortUnion;\n /** The output configuration for this retrieval. */\n outputMode?: KnowledgeRetrievalOutputMode;\n /** The ETag of the knowledge base. */\n etag?: string;\n /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your knowledge base definition when you want full assurance that no one, not even Microsoft, can decrypt them. Once you have encrypted your knowledge base definition, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your knowledge base definition will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */\n encryptionKey?: SearchResourceEncryptionKey;\n /** The description of the knowledge base. */\n description?: string;\n /** Instructions considered by the knowledge knowledge base when developing query plan. */\n retrievalInstructions?: string;\n /** Instructions considered by the knowledge knowledge base when generating answers. */\n answerInstructions?: string;\n}\n\nexport interface KnowledgeSourceReference {\n /** The name of the knowledge source. */\n name: string;\n}\n\n/** Specifies the connection parameters for the model to use for query planning. */\nexport interface KnowledgeBaseModel {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"azureOpenAI\";\n}\n\nexport interface KnowledgeRetrievalReasoningEffort {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"minimal\" | \"low\" | \"medium\";\n}\n\n/** A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be used to encrypt or decrypt data-at-rest, such as indexes and synonym maps. */\nexport interface SearchResourceEncryptionKey {\n /** The name of your Azure Key Vault key to be used to encrypt your data at rest. */\n keyName: string;\n /** The version of your Azure Key Vault key to be used to encrypt your data at rest. */\n keyVersion?: string;\n /** The URI of your Azure Key Vault, also referred to as DNS name, that contains the key to be used to encrypt your data at rest. An example URI might be `https://my-keyvault-name.vault.azure.net`. */\n vaultUri: string;\n /** Optional Azure Active Directory credentials used for accessing your Azure Key Vault. Not required if using managed identity instead. */\n accessCredentials?: AzureActiveDirectoryApplicationCredentials;\n /** An explicit managed identity to use for this encryption key. If not specified and the access credentials property is null, the system-assigned managed identity is used. On update to the resource, if the explicit identity is unspecified, it remains unchanged. If \"none\" is specified, the value of this property is cleared. */\n identity?: SearchIndexerDataIdentityUnion;\n}\n\n/** Credentials of a registered application created for your search service, used for authenticated access to the encryption keys stored in Azure Key Vault. */\nexport interface AzureActiveDirectoryApplicationCredentials {\n /** An AAD Application ID that was granted the required access permissions to the Azure Key Vault that is to be used when encrypting your data at rest. The Application ID should not be confused with the Object ID for your AAD Application. */\n applicationId: string;\n /** The authentication key of the specified AAD application. */\n applicationSecret?: string;\n}\n\n/** Abstract base type for data identities. */\nexport interface SearchIndexerDataIdentity {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype:\n | \"#Microsoft.Azure.Search.DataNoneIdentity\"\n | \"#Microsoft.Azure.Search.DataUserAssignedIdentity\";\n}\n\n/** Common error response for all Azure Resource Manager APIs to return error details for failed operations. (This also follows the OData error response format.). */\nexport interface ErrorResponse {\n /** The error object. */\n error?: ErrorDetail;\n}\n\n/** The error detail. */\nexport interface ErrorDetail {\n /**\n * The error code.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly code?: string;\n /**\n * The error message.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly message?: string;\n /**\n * The error target.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly target?: string;\n /**\n * The error details.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly details?: ErrorDetail[];\n /**\n * The error additional info.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly additionalInfo?: ErrorAdditionalInfo[];\n}\n\n/** The resource management error additional info. */\nexport interface ErrorAdditionalInfo {\n /**\n * The additional info type.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly type?: string;\n /**\n * The additional info.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly info?: Record<string, unknown>;\n}\n\nexport interface ListKnowledgeBasesResult {\n knowledgeBases: KnowledgeBase[];\n}\n\n/** Represents a knowledge source definition. */\nexport interface KnowledgeSource {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind:\n | \"searchIndex\"\n | \"azureBlob\"\n | \"indexedSharePoint\"\n | \"indexedOneLake\"\n | \"web\"\n | \"remoteSharePoint\";\n /** The name of the knowledge source. */\n name: string;\n /** Optional user-defined description. */\n description?: string;\n /** The ETag of the knowledge base. */\n etag?: string;\n /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your knowledge base definition when you want full assurance that no one, not even Microsoft, can decrypt them. Once you have encrypted your knowledge base definition, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your knowledge base definition will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */\n encryptionKey?: SearchResourceEncryptionKey;\n}\n\nexport interface ListKnowledgeSourcesResult {\n knowledgeSources: KnowledgeSourceUnion[];\n}\n\n/** Represents the status and synchronization history of a knowledge source. */\nexport interface KnowledgeSourceStatus {\n /** The current synchronization status of the knowledge source. */\n synchronizationStatus: KnowledgeSourceSynchronizationStatus;\n /** The synchronization interval (e.g., '1d' for daily). Null if no schedule is configured. */\n synchronizationInterval?: string;\n /** Current synchronization state that spans multiple indexer runs. */\n currentSynchronizationState?: SynchronizationState;\n /** Details of the last completed synchronization. Null on first sync. */\n lastSynchronizationState?: CompletedSynchronizationState;\n /** Statistical information about the knowledge source synchronization history. Null on first sync. */\n statistics?: KnowledgeSourceStatistics;\n}\n\n/** Represents the current state of an ongoing synchronization that spans multiple indexer runs. */\nexport interface SynchronizationState {\n /** The start time of the current synchronization. */\n startTime: Date;\n /** The number of item updates successfully processed in the current synchronization. */\n itemsUpdatesProcessed: number;\n /** The number of item updates that failed in the current synchronization. */\n itemsUpdatesFailed: number;\n /** The number of items skipped in the current synchronization. */\n itemsSkipped: number;\n}\n\n/** Represents the completed state of the last synchronization. */\nexport interface CompletedSynchronizationState {\n /** The start time of the last completed synchronization. */\n startTime: Date;\n /** The end time of the last completed synchronization. */\n endTime: Date;\n /** The number of item updates successfully processed in the last synchronization. */\n itemsUpdatesProcessed: number;\n /** The number of item updates that failed in the last synchronization. */\n itemsUpdatesFailed: number;\n /** The number of items skipped in the last synchronization. */\n itemsSkipped: number;\n}\n\n/** Statistical information about knowledge source synchronization history. */\nexport interface KnowledgeSourceStatistics {\n /** The total number of synchronizations completed. */\n totalSynchronization: number;\n /** The average duration of synchronizations in HH:MM:SS format. */\n averageSynchronizationDuration: string;\n /** The average number of items processed per synchronization. */\n averageItemsProcessedPerSynchronization: number;\n}\n\n/** Represents a datasource definition, which can be used to configure an indexer. */\nexport interface SearchIndexerDataSource {\n /** The name of the datasource. */\n name: string;\n /** The description of the datasource. */\n description?: string;\n /** The type of the datasource. */\n type: SearchIndexerDataSourceType;\n /**\n * A specific type of the data source, in case the resource is capable of different modalities. For example, 'MongoDb' for certain 'cosmosDb' accounts.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly subType?: string;\n /** Credentials for the datasource. */\n credentials: DataSourceCredentials;\n /** The data container for the datasource. */\n container: SearchIndexerDataContainer;\n /** An explicit managed identity to use for this datasource. If not specified and the connection string is a managed identity, the system-assigned managed identity is used. If not specified, the value remains unchanged. If \"none\" is specified, the value of this property is cleared. */\n identity?: SearchIndexerDataIdentityUnion;\n /** Ingestion options with various types of permission data. */\n indexerPermissionOptions?: IndexerPermissionOption[];\n /** The data change detection policy for the datasource. */\n dataChangeDetectionPolicy?: DataChangeDetectionPolicyUnion;\n /** The data deletion detection policy for the datasource. */\n dataDeletionDetectionPolicy?: DataDeletionDetectionPolicyUnion;\n /** The ETag of the data source. */\n etag?: string;\n /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your datasource definition when you want full assurance that no one, not even Microsoft, can decrypt your data source definition. Once you have encrypted your data source definition, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your datasource definition will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */\n encryptionKey?: SearchResourceEncryptionKey;\n}\n\n/** Represents credentials that can be used to connect to a datasource. */\nexport interface DataSourceCredentials {\n /** The connection string for the datasource. Set to `<unchanged>` (with brackets) if you don't want the connection string updated. Set to `<redacted>` if you want to remove the connection string value from the datasource. */\n connectionString?: string;\n}\n\n/** Represents information about the entity (such as Azure SQL table or CosmosDB collection) that will be indexed. */\nexport interface SearchIndexerDataContainer {\n /** The name of the table or view (for Azure SQL data source) or collection (for CosmosDB data source) that will be indexed. */\n name: string;\n /** A query that is applied to this data container. The syntax and meaning of this parameter is datasource-specific. Not supported by Azure SQL datasources. */\n query?: string;\n}\n\n/** Base type for data change detection policies. */\nexport interface DataChangeDetectionPolicy {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype:\n | \"#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy\"\n | \"#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy\";\n}\n\n/** Base type for data deletion detection policies. */\nexport interface DataDeletionDetectionPolicy {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype:\n | \"#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy\"\n | \"#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy\";\n}\n\n/** Response from a List Datasources request. If successful, it includes the full definitions of all datasources. */\nexport interface ListDataSourcesResult {\n /**\n * The datasources in the Search service.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly dataSources: SearchIndexerDataSource[];\n}\n\nexport interface DocumentKeysOrIds {\n /** document keys to be reset */\n documentKeys?: string[];\n /** datasource document identifiers to be reset */\n datasourceDocumentIds?: string[];\n}\n\nexport interface IndexerResyncBody {\n /** Re-sync options that have been pre-defined from data source. */\n options?: IndexerResyncOption[];\n}\n\n/** Represents an indexer. */\nexport interface SearchIndexer {\n /** The name of the indexer. */\n name: string;\n /** The description of the indexer. */\n description?: string;\n /** The name of the datasource from which this indexer reads data. */\n dataSourceName: string;\n /** The name of the skillset executing with this indexer. */\n skillsetName?: string;\n /** The name of the index to which this indexer writes data. */\n targetIndexName: string;\n /** The schedule for this indexer. */\n schedule?: IndexingSchedule;\n /** Parameters for indexer execution. */\n parameters?: IndexingParameters;\n /** Defines mappings between fields in the data source and corresponding target fields in the index. */\n fieldMappings?: FieldMapping[];\n /** Output field mappings are applied after enrichment and immediately before indexing. */\n outputFieldMappings?: FieldMapping[];\n /** A value indicating whether the indexer is disabled. Default is false. */\n isDisabled?: boolean;\n /** The ETag of the indexer. */\n etag?: string;\n /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your indexer definition (as well as indexer execution status) when you want full assurance that no one, not even Microsoft, can decrypt them. Once you have encrypted your indexer definition, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your indexer definition (and indexer execution status) will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */\n encryptionKey?: SearchResourceEncryptionKey;\n /** Adds caching to an enrichment pipeline to allow for incremental modification steps without having to rebuild the index every time. */\n cache?: SearchIndexerCache;\n}\n\n/** Represents a schedule for indexer execution. */\nexport interface IndexingSchedule {\n /** The interval of time between indexer executions. */\n interval: string;\n /** The time when an indexer should start running. */\n startTime?: Date;\n}\n\n/** Represents parameters for indexer execution. */\nexport interface IndexingParameters {\n /** The number of items that are read from the data source and indexed as a single batch in order to improve performance. The default depends on the data source type. */\n batchSize?: number;\n /** The maximum number of items that can fail indexing for indexer execution to still be considered successful. -1 means no limit. Default is 0. */\n maxFailedItems?: number;\n /** The maximum number of items in a single batch that can fail indexing for the batch to still be considered successful. -1 means no limit. Default is 0. */\n maxFailedItemsPerBatch?: number;\n /** A dictionary of indexer-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */\n configuration?: IndexingParametersConfiguration;\n}\n\n/** A dictionary of indexer-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */\nexport interface IndexingParametersConfiguration {\n /** Describes unknown properties. The value of an unknown property can be of \"any\" type. */\n [property: string]: any;\n /** Represents the parsing mode for indexing from an Azure blob data source. */\n parsingMode?: BlobIndexerParsingMode;\n /** Comma-delimited list of filename extensions to ignore when processing from Azure blob storage. For example, you could exclude \".png, .mp4\" to skip over those files during indexing. */\n excludedFileNameExtensions?: string;\n /** Comma-delimited list of filename extensions to select when processing from Azure blob storage. For example, you could focus indexing on specific application files \".docx, .pptx, .msg\" to specifically include those file types. */\n indexedFileNameExtensions?: string;\n /** For Azure blobs, set to false if you want to continue indexing when an unsupported content type is encountered, and you don't know all the content types (file extensions) in advance. */\n failOnUnsupportedContentType?: boolean;\n /** For Azure blobs, set to false if you want to continue indexing if a document fails indexing. */\n failOnUnprocessableDocument?: boolean;\n /** For Azure blobs, set this property to true to still index storage metadata for blob content that is too large to process. Oversized blobs are treated as errors by default. For limits on blob size, see https://learn.microsoft.com/azure/search/search-limits-quotas-capacity. */\n indexStorageMetadataOnlyForOversizedDocuments?: boolean;\n /** For CSV blobs, specifies a comma-delimited list of column headers, useful for mapping source fields to destination fields in an index. */\n delimitedTextHeaders?: string;\n /** For CSV blobs, specifies the end-of-line single-character delimiter for CSV files where each line starts a new document (for example, \"|\"). */\n delimitedTextDelimiter?: string;\n /** For CSV blobs, indicates that the first (non-blank) line of each blob contains headers. */\n firstLineContainsHeaders?: boolean;\n /** Specifies the submode that will determine whether a markdown file will be parsed into exactly one search document or multiple search documents. Default is `oneToMany`. */\n markdownParsingSubmode?: MarkdownParsingSubmode;\n /** Specifies the max header depth that will be considered while grouping markdown content. Default is `h6`. */\n markdownHeaderDepth?: MarkdownHeaderDepth;\n /** For JSON arrays, given a structured or semi-structured document, you can specify a path to the array using this property. */\n documentRoot?: string;\n /** Specifies the data to extract from Azure blob storage and tells the indexer which data to extract from image content when \"imageAction\" is set to a value other than \"none\". This applies to embedded image content in a .PDF or other application, or image files such as .jpg and .png, in Azure blobs. */\n dataToExtract?: BlobIndexerDataToExtract;\n /** Determines how to process embedded images and image files in Azure blob storage. Setting the \"imageAction\" configuration to any value other than \"none\" requires that a skillset also be attached to that indexer. */\n imageAction?: BlobIndexerImageAction;\n /** If true, will create a path //document//file_data that is an object representing the original file data downloaded from your blob data source. This allows you to pass the original file data to a custom skill for processing within the enrichment pipeline, or to the Document Extraction skill. */\n allowSkillsetToReadFileData?: boolean;\n /** Determines algorithm for text extraction from PDF files in Azure blob storage. */\n pdfTextRotationAlgorithm?: BlobIndexerPDFTextRotationAlgorithm;\n /** Specifies the environment in which the indexer should execute. */\n executionEnvironment?: IndexerExecutionEnvironment;\n /** Increases the timeout beyond the 5-minute default for Azure SQL database data sources, specified in the format \"hh:mm:ss\". */\n queryTimeout?: string;\n}\n\n/** Defines a mapping between a field in a data source and a target field in an index. */\nexport interface FieldMapping {\n /** The name of the field in the data source. */\n sourceFieldName: string;\n /** The name of the target field in the index. Same as the source field name by default. */\n targetFieldName?: string;\n /** A function to apply to each source field value before indexing. */\n mappingFunction?: FieldMappingFunction;\n}\n\n/** Represents a function that transforms a value from a data source before indexing. */\nexport interface FieldMappingFunction {\n /** The name of the field mapping function. */\n name: string;\n /** A dictionary of parameter name/value pairs to pass to the function. Each value must be of a primitive type. */\n parameters?: { [propertyName: string]: any };\n}\n\nexport interface SearchIndexerCache {\n /** A guid for the SearchIndexerCache. */\n id?: string;\n /** The connection string to the storage account where the cache data will be persisted. */\n storageConnectionString?: string;\n /** Specifies whether incremental reprocessing is enabled. */\n enableReprocessing?: boolean;\n /** The user-assigned managed identity used for connections to the enrichment cache. If the connection string indicates an identity (ResourceId) and it's not specified, the system-assigned managed identity is used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. If set to \"none\", the value of this property is cleared. */\n identity?: SearchIndexerDataIdentityUnion;\n}\n\n/** Response from a List Indexers request. If successful, it includes the full definitions of all indexers. */\nexport interface ListIndexersResult {\n /**\n * The indexers in the Search service.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly indexers: SearchIndexer[];\n}\n\n/** Represents the current status and execution history of an indexer. */\nexport interface SearchIndexerStatus {\n /**\n * The name of the indexer.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly name: string;\n /**\n * Overall indexer status.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly status: IndexerStatus;\n /**\n * Snapshot of the indexer’s cumulative runtime consumption for the service over the current UTC period.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly runtime: IndexerRuntime;\n /**\n * The result of the most recent or an in-progress indexer execution.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly lastResult?: IndexerExecutionResult;\n /**\n * History of the recent indexer executions, sorted in reverse chronological order.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly executionHistory: IndexerExecutionResult[];\n /**\n * The execution limits for the indexer.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly limits: SearchIndexerLimits;\n /**\n * All of the state that defines and dictates the indexer's current execution.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly currentState?: IndexerState;\n}\n\n/** Represents the indexer's cumulative runtime consumption in the service. */\nexport interface IndexerRuntime {\n /** Cumulative runtime of the indexer from the beginningTime to endingTime, in seconds. */\n usedSeconds: number;\n /** Cumulative runtime remaining for all indexers in the service from the beginningTime to endingTime, in seconds. */\n remainingSeconds?: number;\n /** Beginning UTC time of the 24-hour period considered for indexer runtime usage (inclusive). */\n beginningTime: Date;\n /** End UTC time of the 24-hour period considered for indexer runtime usage (inclusive). */\n endingTime: Date;\n}\n\n/** Represents the result of an individual indexer execution. */\nexport interface IndexerExecutionResult {\n /**\n * The outcome of this indexer execution.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly status: IndexerExecutionStatus;\n /**\n * The outcome of this indexer execution.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly statusDetail?: IndexerExecutionStatusDetail;\n /**\n * The mode the indexer is running in.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly mode?: IndexingMode;\n /**\n * The error message indicating the top-level error, if any.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly errorMessage?: string;\n /**\n * The start time of this indexer execution.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly startTime?: Date;\n /**\n * The end time of this indexer execution, if the execution has already completed.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly endTime?: Date;\n /**\n * The item-level indexing errors.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly errors: SearchIndexerError[];\n /**\n * The item-level indexing warnings.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly warnings: SearchIndexerWarning[];\n /**\n * The number of items that were processed during this indexer execution. This includes both successfully processed items and items where indexing was attempted but failed.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly itemCount: number;\n /**\n * The number of items that failed to be indexed during this indexer execution.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly failedItemCount: number;\n /**\n * Change tracking state with which an indexer execution started.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly initialTrackingState?: string;\n /**\n * Change tracking state with which an indexer execution finished.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly finalTrackingState?: string;\n}\n\n/** Represents an item- or document-level indexing error. */\nexport interface SearchIndexerError {\n /**\n * The key of the item for which indexing failed.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly key?: string;\n /**\n * The message describing the error that occurred while processing the item.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly errorMessage: string;\n /**\n * The status code indicating why the indexing operation failed. Possible values include: 400 for a malformed input document, 404 for document not found, 409 for a version conflict, 422 when the index is temporarily unavailable, or 503 for when the service is too busy.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly statusCode: number;\n /**\n * The name of the source at which the error originated. For example, this could refer to a particular skill in the attached skillset. This may not be always available.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly name?: string;\n /**\n * Additional, verbose details about the error to assist in debugging the indexer. This may not be always available.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly details?: string;\n /**\n * A link to a troubleshooting guide for these classes of errors. This may not be always available.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly documentationLink?: string;\n}\n\n/** Represents an item-level warning. */\nexport interface SearchIndexerWarning {\n /**\n * The key of the item which generated a warning.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly key?: string;\n /**\n * The message describing the warning that occurred while processing the item.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly message: string;\n /**\n * The name of the source at which the warning originated. For example, this could refer to a particular skill in the attached skillset. This may not be always available.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly name?: string;\n /**\n * Additional, verbose details about the warning to assist in debugging the indexer. This may not be always available.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly details?: string;\n /**\n * A link to a troubleshooting guide for these classes of warnings. This may not be always available.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly documentationLink?: string;\n}\n\nexport interface SearchIndexerLimits {\n /**\n * The maximum duration that the indexer is permitted to run for one execution.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly maxRunTime?: string;\n /**\n * The maximum size of a document, in bytes, which will be considered valid for indexing.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly maxDocumentExtractionSize?: number;\n /**\n * The maximum number of characters that will be extracted from a document picked up for indexing.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly maxDocumentContentCharactersToExtract?: number;\n}\n\n/** Represents all of the state that defines and dictates the indexer's current execution. */\nexport interface IndexerState {\n /**\n * The mode the indexer is running in.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly mode?: IndexingMode;\n /**\n * Change tracking state used when indexing starts on all documents in the datasource.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly allDocsInitialTrackingState?: string;\n /**\n * Change tracking state value when indexing finishes on all documents in the datasource.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly allDocsFinalTrackingState?: string;\n /**\n * Change tracking state used when indexing starts on select, reset documents in the datasource.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly resetDocsInitialTrackingState?: string;\n /**\n * Change tracking state value when indexing finishes on select, reset documents in the datasource.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly resetDocsFinalTrackingState?: string;\n /**\n * The list of document keys that have been reset. The document key is the document's unique identifier for the data in the search index. The indexer will prioritize selectively re-ingesting these keys.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly resetDocumentKeys?: string[];\n /**\n * The list of datasource document ids that have been reset. The datasource document id is the unique identifier for the data in the datasource. The indexer will prioritize selectively re-ingesting these ids.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly resetDatasourceDocumentIds?: string[];\n /**\n * Change tracking state used when indexing starts on selective options from the datasource.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly resyncInitialTrackingState?: string;\n /**\n * Change tracking state value when indexing finishes on selective options from the datasource.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly resyncFinalTrackingState?: string;\n}\n\n/** A list of skills. */\nexport interface SearchIndexerSkillset {\n /** The name of the skillset. */\n name: string;\n /** The description of the skillset. */\n description?: string;\n /** A list of skills in the skillset. */\n skills: SearchIndexerSkillUnion[];\n /** Details about the Azure AI service to be used when running skills. */\n cognitiveServicesAccount?: CognitiveServicesAccountUnion;\n /** Definition of additional projections to Azure blob, table, or files, of enriched data. */\n knowledgeStore?: SearchIndexerKnowledgeStore;\n /** Definition of additional projections to secondary search index(es). */\n indexProjection?: SearchIndexerIndexProjection;\n /** The ETag of the skillset. */\n etag?: string;\n /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your skillset definition when you want full assurance that no one, not even Microsoft, can decrypt your skillset definition. Once you have encrypted your skillset definition, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your skillset definition will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */\n encryptionKey?: SearchResourceEncryptionKey;\n}\n\n/** Base type for skills. */\nexport interface SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype:\n | \"#Microsoft.Skills.Util.ConditionalSkill\"\n | \"#Microsoft.Skills.Text.KeyPhraseExtractionSkill\"\n | \"#Microsoft.Skills.Vision.OcrSkill\"\n | \"#Microsoft.Skills.Vision.ImageAnalysisSkill\"\n | \"#Microsoft.Skills.Text.LanguageDetectionSkill\"\n | \"#Microsoft.Skills.Util.ShaperSkill\"\n | \"#Microsoft.Skills.Text.MergeSkill\"\n | \"#Microsoft.Skills.Text.EntityRecognitionSkill\"\n | \"#Microsoft.Skills.Text.SentimentSkill\"\n | \"#Microsoft.Skills.Text.V3.SentimentSkill\"\n | \"#Microsoft.Skills.Text.V3.EntityLinkingSkill\"\n | \"#Microsoft.Skills.Text.V3.EntityRecognitionSkill\"\n | \"#Microsoft.Skills.Text.PIIDetectionSkill\"\n | \"#Microsoft.Skills.Text.SplitSkill\"\n | \"#Microsoft.Skills.Text.CustomEntityLookupSkill\"\n | \"#Microsoft.Skills.Text.TranslationSkill\"\n | \"#Microsoft.Skills.Util.DocumentExtractionSkill\"\n | \"#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill\"\n | \"#Microsoft.Skills.Custom.WebApiSkill\"\n | \"#Microsoft.Skills.Custom.ChatCompletionSkill\"\n | \"#Microsoft.Skills.Util.ContentUnderstandingSkill\"\n | \"#Microsoft.Skills.Custom.AmlSkill\"\n | \"#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill\"\n | \"#Microsoft.Skills.Vision.VectorizeSkill\";\n /** The name of the skill which uniquely identifies it within the skillset. A skill with no name defined will be given a default name of its 1-based index in the skills array, prefixed with the character '#'. */\n name?: string;\n /** The description of the skill which describes the inputs, outputs, and usage of the skill. */\n description?: string;\n /** Represents the level at which operations take place, such as the document root or document content (for example, /document or /document/content). The default is /document. */\n context?: string;\n /** Inputs of the skills could be a column in the source data set, or the output of an upstream skill. */\n inputs: InputFieldMappingEntry[];\n /** The output of a skill is either a field in a search index, or a value that can be consumed as an input by another skill. */\n outputs: OutputFieldMappingEntry[];\n}\n\n/** Input field mapping for a skill. */\nexport interface InputFieldMappingEntry {\n /** The name of the input. */\n name: string;\n /** The source of the input. */\n source?: string;\n /** The source context used for selecting recursive inputs. */\n sourceContext?: string;\n /** The recursive inputs used when creating a complex type. */\n inputs?: InputFieldMappingEntry[];\n}\n\n/** Output field mapping for a skill. */\nexport interface OutputFieldMappingEntry {\n /** The name of the output defined by the skill. */\n name: string;\n /** The target name of the output. It is optional and default to name. */\n targetName?: string;\n}\n\n/** Base type for describing any Azure AI service resource attached to a skillset. */\nexport interface CognitiveServicesAccount {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype:\n | \"#Microsoft.Azure.Search.DefaultCognitiveServices\"\n | \"#Microsoft.Azure.Search.CognitiveServicesByKey\"\n | \"#Microsoft.Azure.Search.AIServicesByKey\"\n | \"#Microsoft.Azure.Search.AIServicesByIdentity\";\n /** Description of the Azure AI service resource attached to a skillset. */\n description?: string;\n}\n\n/** Definition of additional projections to azure blob, table, or files, of enriched data. */\nexport interface SearchIndexerKnowledgeStore {\n /** The connection string to the storage account projections will be stored in. */\n storageConnectionString: string;\n /** A list of additional projections to perform during indexing. */\n projections: SearchIndexerKnowledgeStoreProjection[];\n /** The user-assigned managed identity used for connections to Azure Storage when writing knowledge store projections. If the connection string indicates an identity (ResourceId) and it's not specified, the system-assigned managed identity is used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. If set to \"none\", the value of this property is cleared. */\n identity?: SearchIndexerDataIdentityUnion;\n /** A dictionary of knowledge store-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */\n parameters?: SearchIndexerKnowledgeStoreParameters;\n}\n\n/** Container object for various projection selectors. */\nexport interface SearchIndexerKnowledgeStoreProjection {\n /** Projections to Azure Table storage. */\n tables?: SearchIndexerKnowledgeStoreTableProjectionSelector[];\n /** Projections to Azure Blob storage. */\n objects?: SearchIndexerKnowledgeStoreObjectProjectionSelector[];\n /** Projections to Azure File storage. */\n files?: SearchIndexerKnowledgeStoreFileProjectionSelector[];\n}\n\n/** Abstract class to share properties between concrete selectors. */\nexport interface SearchIndexerKnowledgeStoreProjectionSelector {\n /** Name of reference key to different projection. */\n referenceKeyName?: string;\n /** Name of generated key to store projection under. */\n generatedKeyName?: string;\n /** Source data to project. */\n source?: string;\n /** Source context for complex projections. */\n sourceContext?: string;\n /** Nested inputs for complex projections. */\n inputs?: InputFieldMappingEntry[];\n}\n\n/** A dictionary of knowledge store-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */\nexport interface SearchIndexerKnowledgeStoreParameters {\n /** Describes unknown properties. The value of an unknown property can be of \"any\" type. */\n [property: string]: any;\n /** Whether or not projections should synthesize a generated key name if one isn't already present. */\n synthesizeGeneratedKeyName?: boolean;\n}\n\n/** Definition of additional projections to secondary search indexes. */\nexport interface SearchIndexerIndexProjection {\n /** A list of projections to be performed to secondary search indexes. */\n selectors: SearchIndexerIndexProjectionSelector[];\n /** A dictionary of index projection-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */\n parameters?: SearchIndexerIndexProjectionParameters;\n}\n\n/** Description for what data to store in the designated search index. */\nexport interface SearchIndexerIndexProjectionSelector {\n /** Name of the search index to project to. Must have a key field with the 'keyword' analyzer set. */\n targetIndexName: string;\n /** Name of the field in the search index to map the parent document's key value to. Must be a string field that is filterable and not the key field. */\n parentKeyFieldName: string;\n /** Source context for the projections. Represents the cardinality at which the document will be split into multiple sub documents. */\n sourceContext: string;\n /** Mappings for the projection, or which source should be mapped to which field in the target index. */\n mappings: InputFieldMappingEntry[];\n}\n\n/** A dictionary of index projection-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */\nexport interface SearchIndexerIndexProjectionParameters {\n /** Describes unknown properties. The value of an unknown property can be of \"any\" type. */\n [property: string]: any;\n /** Defines behavior of the index projections in relation to the rest of the indexer. */\n projectionMode?: IndexProjectionMode;\n}\n\n/** Response from a list skillset request. If successful, it includes the full definitions of all skillsets. */\nexport interface ListSkillsetsResult {\n /**\n * The skillsets defined in the Search service.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly skillsets: SearchIndexerSkillset[];\n}\n\nexport interface SkillNames {\n /** the names of skills to be reset. */\n skillNames?: string[];\n}\n\n/** Represents a synonym map definition. */\nexport interface SynonymMap {\n /** The name of the synonym map. */\n name: string;\n /** The format of the synonym map. Only the 'solr' format is currently supported. */\n format: \"solr\";\n /** A series of synonym rules in the specified synonym map format. The rules must be separated by newlines. */\n synonyms: string;\n /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your data when you want full assurance that no one, not even Microsoft, can decrypt your data. Once you have encrypted your data, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */\n encryptionKey?: SearchResourceEncryptionKey;\n /** The ETag of the synonym map. */\n etag?: string;\n}\n\n/** Response from a List SynonymMaps request. If successful, it includes the full definitions of all synonym maps. */\nexport interface ListSynonymMapsResult {\n /**\n * The synonym maps in the Search service.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly synonymMaps: SynonymMap[];\n}\n\n/** Represents a search index definition, which describes the fields and search behavior of an index. */\nexport interface SearchIndex {\n /** The name of the index. */\n name: string;\n /** The description of the index. */\n description?: string;\n /** The fields of the index. */\n fields: SearchField[];\n /** The scoring profiles for the index. */\n scoringProfiles?: ScoringProfile[];\n /** The name of the scoring profile to use if none is specified in the query. If this property is not set and no scoring profile is specified in the query, then default scoring (tf-idf) will be used. */\n defaultScoringProfile?: string;\n /** Options to control Cross-Origin Resource Sharing (CORS) for the index. */\n corsOptions?: CorsOptions;\n /** The suggesters for the index. */\n suggesters?: Suggester[];\n /** The analyzers for the index. */\n analyzers?: LexicalAnalyzerUnion[];\n /** The tokenizers for the index. */\n tokenizers?: LexicalTokenizerUnion[];\n /** The token filters for the index. */\n tokenFilters?: TokenFilterUnion[];\n /** The character filters for the index. */\n charFilters?: CharFilterUnion[];\n /** The normalizers for the index. */\n normalizers?: LexicalNormalizerUnion[];\n /** A description of an encryption key that you create in Azure Key Vault. This key is used to provide an additional level of encryption-at-rest for your data when you want full assurance that no one, not even Microsoft, can decrypt your data. Once you have encrypted your data, it will always remain encrypted. The search service will ignore attempts to set this property to null. You can change this property as needed if you want to rotate your encryption key; Your data will be unaffected. Encryption with customer-managed keys is not available for free search services, and is only available for paid services created on or after January 1, 2019. */\n encryptionKey?: SearchResourceEncryptionKey;\n /** The type of similarity algorithm to be used when scoring and ranking the documents matching a search query. The similarity algorithm can only be defined at index creation time and cannot be modified on existing indexes. If null, the ClassicSimilarity algorithm is used. */\n similarity?: SimilarityUnion;\n /** Defines parameters for a search index that influence semantic capabilities. */\n semanticSearch?: SemanticSearch;\n /** Contains configuration options related to vector search. */\n vectorSearch?: VectorSearch;\n /** A value indicating whether permission filtering is enabled for the index. */\n permissionFilterOption?: SearchIndexPermissionFilterOption;\n /** A value indicating whether the index is leveraging Purview-specific features. This property defaults to false and cannot be changed after index creation. */\n purviewEnabled?: boolean;\n /** The ETag of the index. */\n etag?: string;\n}\n\n/** Represents a field in an index definition, which describes the name, data type, and search behavior of a field. */\nexport interface SearchField {\n /** The name of the field, which must be unique within the fields collection of the index or parent field. */\n name: string;\n /** The data type of the field. */\n type: SearchFieldDataType;\n /** A value indicating whether the field uniquely identifies documents in the index. Exactly one top-level field in each index must be chosen as the key field and it must be of type Edm.String. Key fields can be used to look up documents directly and update or delete specific documents. Default is false for simple fields and null for complex fields. */\n key?: boolean;\n /** A value indicating whether the field can be returned in a search result. You can disable this option if you want to use a field (for example, margin) as a filter, sorting, or scoring mechanism but do not want the field to be visible to the end user. This property must be true for key fields, and it must be null for complex fields. This property can be changed on existing fields. Enabling this property does not cause any increase in index storage requirements. Default is true for simple fields, false for vector fields, and null for complex fields. */\n retrievable?: boolean;\n /** An immutable value indicating whether the field will be persisted separately on disk to be returned in a search result. You can disable this option if you don't plan to return the field contents in a search response to save on storage overhead. This can only be set during index creation and only for vector fields. This property cannot be changed for existing fields or set as false for new fields. If this property is set as false, the property 'retrievable' must also be set to false. This property must be true or unset for key fields, for new fields, and for non-vector fields, and it must be null for complex fields. Disabling this property will reduce index storage requirements. The default is true for vector fields. */\n stored?: boolean;\n /** A value indicating whether the field is full-text searchable. This means it will undergo analysis such as word-breaking during indexing. If you set a searchable field to a value like \"sunny day\", internally it will be split into the individual tokens \"sunny\" and \"day\". This enables full-text searches for these terms. Fields of type Edm.String or Collection(Edm.String) are searchable by default. This property must be false for simple fields of other non-string data types, and it must be null for complex fields. Note: searchable fields consume extra space in your index to accommodate additional tokenized versions of the field value for full-text searches. If you want to save space in your index and you don't need a field to be included in searches, set searchable to false. */\n searchable?: boolean;\n /** A value indicating whether to enable the field to be referenced in $filter queries. filterable differs from searchable in how strings are handled. Fields of type Edm.String or Collection(Edm.String) that are filterable do not undergo word-breaking, so comparisons are for exact matches only. For example, if you set such a field f to \"sunny day\", $filter=f eq 'sunny' will find no matches, but $filter=f eq 'sunny day' will. This property must be null for complex fields. Default is true for simple fields and null for complex fields. */\n filterable?: boolean;\n /** A value indicating whether to enable the field to be referenced in $orderby expressions. By default, the search engine sorts results by score, but in many experiences users will want to sort by fields in the documents. A simple field can be sortable only if it is single-valued (it has a single value in the scope of the parent document). Simple collection fields cannot be sortable, since they are multi-valued. Simple sub-fields of complex collections are also multi-valued, and therefore cannot be sortable. This is true whether it's an immediate parent field, or an ancestor field, that's the complex collection. Complex fields cannot be sortable and the sortable property must be null for such fields. The default for sortable is true for single-valued simple fields, false for multi-valued simple fields, and null for complex fields. */\n sortable?: boolean;\n /** A value indicating whether to enable the field to be referenced in facet queries. Typically used in a presentation of search results that includes hit count by category (for example, search for digital cameras and see hits by brand, by megapixels, by price, and so on). This property must be null for complex fields. Fields of type Edm.GeographyPoint or Collection(Edm.GeographyPoint) cannot be facetable. Default is true for all other simple fields. */\n facetable?: boolean;\n /** A value indicating whether the field should be used as a permission filter. */\n permissionFilter?: PermissionFilter;\n /** A value indicating whether the field should be used for sensitivity label filtering. This enables document-level filtering based on Microsoft Purview sensitivity labels. */\n sensitivityLabel?: boolean;\n /** The name of the analyzer to use for the field. This option can be used only with searchable fields and it can't be set together with either searchAnalyzer or indexAnalyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. */\n analyzer?: LexicalAnalyzerName;\n /** The name of the analyzer used at search time for the field. This option can be used only with searchable fields. It must be set together with indexAnalyzer and it cannot be set together with the analyzer option. This property cannot be set to the name of a language analyzer; use the analyzer property instead if you need a language analyzer. This analyzer can be updated on an existing field. Must be null for complex fields. */\n searchAnalyzer?: LexicalAnalyzerName;\n /** The name of the analyzer used at indexing time for the field. This option can be used only with searchable fields. It must be set together with searchAnalyzer and it cannot be set together with the analyzer option. This property cannot be set to the name of a language analyzer; use the analyzer property instead if you need a language analyzer. Once the analyzer is chosen, it cannot be changed for the field. Must be null for complex fields. */\n indexAnalyzer?: LexicalAnalyzerName;\n /** The name of the normalizer to use for the field. This option can be used only with fields with filterable, sortable, or facetable enabled. Once the normalizer is chosen, it cannot be changed for the field. Must be null for complex fields. */\n normalizer?: LexicalNormalizerName;\n /** The dimensionality of the vector field. */\n vectorSearchDimensions?: number;\n /** The name of the vector search profile that specifies the algorithm and vectorizer to use when searching the vector field. */\n vectorSearchProfileName?: string;\n /** The encoding format to interpret the field contents. */\n vectorEncodingFormat?: VectorEncodingFormat;\n /** A list of the names of synonym maps to associate with this field. This option can be used only with searchable fields. Currently only one synonym map per field is supported. Assigning a synonym map to a field ensures that query terms targeting that field are expanded at query-time using the rules in the synonym map. This attribute can be changed on existing fields. Must be null or an empty collection for complex fields. */\n synonymMaps?: string[];\n /** A list of sub-fields if this is a field of type Edm.ComplexType or Collection(Edm.ComplexType). Must be null or empty for simple fields. */\n fields?: SearchField[];\n}\n\n/** Defines parameters for a search index that influence scoring in search queries. */\nexport interface ScoringProfile {\n /** The name of the scoring profile. */\n name: string;\n /** Parameters that boost scoring based on text matches in certain index fields. */\n textWeights?: TextWeights;\n /** The collection of functions that influence the scoring of documents. */\n functions?: ScoringFunctionUnion[];\n /** A value indicating how the results of individual scoring functions should be combined. Defaults to \"Sum\". Ignored if there are no scoring functions. */\n functionAggregation?: ScoringFunctionAggregation;\n}\n\n/** Defines weights on index fields for which matches should boost scoring in search queries. */\nexport interface TextWeights {\n /** The dictionary of per-field weights to boost document scoring. The keys are field names and the values are the weights for each field. */\n weights: { [propertyName: string]: number };\n}\n\n/** Base type for functions that can modify document scores during ranking. */\nexport interface ScoringFunction {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n type: \"distance\" | \"freshness\" | \"magnitude\" | \"tag\";\n /** The name of the field used as input to the scoring function. */\n fieldName: string;\n /** A multiplier for the raw score. Must be a positive number not equal to 1.0. */\n boost: number;\n /** A value indicating how boosting will be interpolated across document scores; defaults to \"Linear\". */\n interpolation?: ScoringFunctionInterpolation;\n}\n\n/** Defines options to control Cross-Origin Resource Sharing (CORS) for an index. */\nexport interface CorsOptions {\n /** The list of origins from which JavaScript code will be granted access to your index. Can contain a list of hosts of the form {protocol}://{fully-qualified-domain-name}[:{port#}], or a single '*' to allow all origins (not recommended). */\n allowedOrigins: string[];\n /** The duration for which browsers should cache CORS preflight responses. Defaults to 5 minutes. */\n maxAgeInSeconds?: number;\n}\n\n/** Defines how the Suggest API should apply to a group of fields in the index. */\nexport interface Suggester {\n /** The name of the suggester. */\n name: string;\n /** A value indicating the capabilities of the suggester. */\n searchMode: \"analyzingInfixMatching\";\n /** The list of field names to which the suggester applies. Each field must be searchable. */\n sourceFields: string[];\n}\n\n/** Base type for analyzers. */\nexport interface LexicalAnalyzer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype:\n | \"#Microsoft.Azure.Search.CustomAnalyzer\"\n | \"#Microsoft.Azure.Search.PatternAnalyzer\"\n | \"#Microsoft.Azure.Search.StandardAnalyzer\"\n | \"#Microsoft.Azure.Search.StopAnalyzer\";\n /** The name of the analyzer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. */\n name: string;\n}\n\n/** Base type for tokenizers. */\nexport interface LexicalTokenizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype:\n | \"#Microsoft.Azure.Search.ClassicTokenizer\"\n | \"#Microsoft.Azure.Search.EdgeNGramTokenizer\"\n | \"#Microsoft.Azure.Search.KeywordTokenizer\"\n | \"#Microsoft.Azure.Search.KeywordTokenizerV2\"\n | \"#Microsoft.Azure.Search.MicrosoftLanguageTokenizer\"\n | \"#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer\"\n | \"#Microsoft.Azure.Search.NGramTokenizer\"\n | \"#Microsoft.Azure.Search.PathHierarchyTokenizerV2\"\n | \"#Microsoft.Azure.Search.PatternTokenizer\"\n | \"#Microsoft.Azure.Search.StandardTokenizer\"\n | \"#Microsoft.Azure.Search.StandardTokenizerV2\"\n | \"#Microsoft.Azure.Search.UaxUrlEmailTokenizer\";\n /** The name of the tokenizer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. */\n name: string;\n}\n\n/** Base type for token filters. */\nexport interface TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype:\n | \"#Microsoft.Azure.Search.AsciiFoldingTokenFilter\"\n | \"#Microsoft.Azure.Search.CjkBigramTokenFilter\"\n | \"#Microsoft.Azure.Search.CommonGramTokenFilter\"\n | \"#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter\"\n | \"#Microsoft.Azure.Search.EdgeNGramTokenFilter\"\n | \"#Microsoft.Azure.Search.EdgeNGramTokenFilterV2\"\n | \"#Microsoft.Azure.Search.ElisionTokenFilter\"\n | \"#Microsoft.Azure.Search.KeepTokenFilter\"\n | \"#Microsoft.Azure.Search.KeywordMarkerTokenFilter\"\n | \"#Microsoft.Azure.Search.LengthTokenFilter\"\n | \"#Microsoft.Azure.Search.LimitTokenFilter\"\n | \"#Microsoft.Azure.Search.NGramTokenFilter\"\n | \"#Microsoft.Azure.Search.NGramTokenFilterV2\"\n | \"#Microsoft.Azure.Search.PatternCaptureTokenFilter\"\n | \"#Microsoft.Azure.Search.PatternReplaceTokenFilter\"\n | \"#Microsoft.Azure.Search.PhoneticTokenFilter\"\n | \"#Microsoft.Azure.Search.ShingleTokenFilter\"\n | \"#Microsoft.Azure.Search.SnowballTokenFilter\"\n | \"#Microsoft.Azure.Search.StemmerTokenFilter\"\n | \"#Microsoft.Azure.Search.StemmerOverrideTokenFilter\"\n | \"#Microsoft.Azure.Search.StopwordsTokenFilter\"\n | \"#Microsoft.Azure.Search.SynonymTokenFilter\"\n | \"#Microsoft.Azure.Search.TruncateTokenFilter\"\n | \"#Microsoft.Azure.Search.UniqueTokenFilter\"\n | \"#Microsoft.Azure.Search.WordDelimiterTokenFilter\";\n /** The name of the token filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. */\n name: string;\n}\n\n/** Base type for character filters. */\nexport interface CharFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype:\n | \"#Microsoft.Azure.Search.MappingCharFilter\"\n | \"#Microsoft.Azure.Search.PatternReplaceCharFilter\";\n /** The name of the char filter. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. */\n name: string;\n}\n\n/** Base type for normalizers. */\nexport interface LexicalNormalizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.CustomNormalizer\";\n /** The name of the normalizer. It must only contain letters, digits, spaces, dashes or underscores, can only start and end with alphanumeric characters, and is limited to 128 characters. It cannot end in '.microsoft' nor '.lucene', nor be named 'asciifolding', 'standard', 'lowercase', 'uppercase', or 'elision'. */\n name: string;\n}\n\n/** Base type for similarity algorithms. Similarity algorithms are used to calculate scores that tie queries to documents. The higher the score, the more relevant the document is to that specific query. Those scores are used to rank the search results. */\nexport interface Similarity {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype:\n | \"#Microsoft.Azure.Search.ClassicSimilarity\"\n | \"#Microsoft.Azure.Search.BM25Similarity\";\n}\n\n/** Defines parameters for a search index that influence semantic capabilities. */\nexport interface SemanticSearch {\n /** Allows you to set the name of a default semantic configuration in your index, making it optional to pass it on as a query parameter every time. */\n defaultConfigurationName?: string;\n /** The semantic configurations for the index. */\n configurations?: SemanticConfiguration[];\n}\n\n/** Defines a specific configuration to be used in the context of semantic capabilities. */\nexport interface SemanticConfiguration {\n /** The name of the semantic configuration. */\n name: string;\n /** Describes the title, content, and keyword fields to be used for semantic ranking, captions, highlights, and answers. At least one of the three sub properties (titleField, prioritizedKeywordsFields and prioritizedContentFields) need to be set. */\n prioritizedFields: SemanticPrioritizedFields;\n /** Specifies the score type to be used for the sort order of the search results. */\n rankingOrder?: RankingOrder;\n /** Determines which semantic or query rewrite models to use during model flighting/upgrades. */\n flightingOptIn?: boolean;\n}\n\n/** Describes the title, content, and keywords fields to be used for semantic ranking, captions, highlights, and answers. */\nexport interface SemanticPrioritizedFields {\n /** Defines the title field to be used for semantic ranking, captions, highlights, and answers. If you don't have a title field in your index, leave this blank. */\n titleField?: SemanticField;\n /** Defines the content fields to be used for semantic ranking, captions, highlights, and answers. For the best result, the selected fields should contain text in natural language form. The order of the fields in the array represents their priority. Fields with lower priority may get truncated if the content is long. */\n contentFields?: SemanticField[];\n /** Defines the keyword fields to be used for semantic ranking, captions, highlights, and answers. For the best result, the selected fields should contain a list of keywords. The order of the fields in the array represents their priority. Fields with lower priority may get truncated if the content is long. */\n keywordsFields?: SemanticField[];\n}\n\n/** A field that is used as part of the semantic configuration. */\nexport interface SemanticField {\n name: string;\n}\n\n/** Contains configuration options related to vector search. */\nexport interface VectorSearch {\n /** Defines combinations of configurations to use with vector search. */\n profiles?: VectorSearchProfile[];\n /** Contains configuration options specific to the algorithm used during indexing or querying. */\n algorithms?: VectorSearchAlgorithmConfigurationUnion[];\n /** Contains configuration options on how to vectorize text vector queries. */\n vectorizers?: VectorSearchVectorizerUnion[];\n /** Contains configuration options specific to the compression method used during indexing or querying. */\n compressions?: VectorSearchCompressionUnion[];\n}\n\n/** Defines a combination of configurations to use with vector search. */\nexport interface VectorSearchProfile {\n /** The name to associate with this particular vector search profile. */\n name: string;\n /** The name of the vector search algorithm configuration that specifies the algorithm and optional parameters. */\n algorithmConfigurationName: string;\n /** The name of the vectorization being configured for use with vector search. */\n vectorizerName?: string;\n /** The name of the compression method configuration that specifies the compression method and optional parameters. */\n compressionName?: string;\n}\n\n/** Contains configuration options specific to the algorithm used during indexing or querying. */\nexport interface VectorSearchAlgorithmConfiguration {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"hnsw\" | \"exhaustiveKnn\";\n /** The name to associate with this particular configuration. */\n name: string;\n}\n\n/** Specifies the vectorization method to be used during query time. */\nexport interface VectorSearchVectorizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"azureOpenAI\" | \"customWebApi\" | \"aiServicesVision\" | \"aml\";\n /** The name to associate with this particular vectorization method. */\n vectorizerName: string;\n}\n\n/** Contains configuration options specific to the compression method used during indexing or querying. */\nexport interface VectorSearchCompression {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"scalarQuantization\" | \"binaryQuantization\";\n /** The name to associate with this particular configuration. */\n compressionName: string;\n /** If set to true, once the ordered set of results calculated using compressed vectors are obtained, they will be reranked again by recalculating the full-precision similarity scores. This will improve recall at the expense of latency. */\n rerankWithOriginalVectors?: boolean;\n /** Default oversampling factor. Oversampling will internally request more documents (specified by this multiplier) in the initial search. This increases the set of results that will be reranked using recomputed similarity scores from full-precision vectors. Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when rerankWithOriginalVectors is true. Higher values improve recall at the expense of latency. */\n defaultOversampling?: number;\n /** Contains the options for rescoring. */\n rescoringOptions?: RescoringOptions;\n /** The number of dimensions to truncate the vectors to. Truncating the vectors reduces the size of the vectors and the amount of data that needs to be transferred during search. This can save storage cost and improve search performance at the expense of recall. It should be only used for embeddings trained with Matryoshka Representation Learning (MRL) such as OpenAI text-embedding-3-large (small). The default value is null, which means no truncation. */\n truncationDimension?: number;\n}\n\n/** Contains the options for rescoring. */\nexport interface RescoringOptions {\n /** If set to true, after the initial search on the compressed vectors, the similarity scores are recalculated using the full-precision vectors. This will improve recall at the expense of latency. */\n enableRescoring?: boolean;\n /** Default oversampling factor. Oversampling retrieves a greater set of potential documents to offset the resolution loss due to quantization. This increases the set of results that will be rescored on full-precision vectors. Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when 'enableRescoring' is true. Higher values improve recall at the expense of latency. */\n defaultOversampling?: number;\n /** Controls the storage method for original vectors. This setting is immutable. */\n rescoreStorageMethod?: VectorSearchCompressionRescoreStorageMethod;\n}\n\n/** Response from a List Indexes request. If successful, it includes the full definitions of all indexes. */\nexport interface ListIndexesResult {\n /**\n * The indexes in the Search service.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly indexes: SearchIndex[];\n}\n\n/** Statistics for a given index. Statistics are collected periodically and are not guaranteed to always be up-to-date. */\nexport interface GetIndexStatisticsResult {\n /**\n * The number of documents in the index.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly documentCount: number;\n /**\n * The amount of storage in bytes consumed by the index.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly storageSize: number;\n /**\n * The amount of memory in bytes consumed by vectors in the index.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly vectorIndexSize: number;\n}\n\n/** Specifies some text and analysis components used to break that text into tokens. */\nexport interface AnalyzeRequest {\n /** The text to break into tokens. */\n text: string;\n /** The name of the analyzer to use to break the given text. If this parameter is not specified, you must specify a tokenizer instead. The tokenizer and analyzer parameters are mutually exclusive. KnownAnalyzerNames is an enum containing known values. */\n analyzer?: string;\n /** The name of the tokenizer to use to break the given text. If this parameter is not specified, you must specify an analyzer instead. The tokenizer and analyzer parameters are mutually exclusive. KnownTokenizerNames is an enum containing known values. */\n tokenizer?: string;\n /** The name of the normalizer to use to normalize the given text. */\n normalizer?: LexicalNormalizerName;\n /** An optional list of token filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. */\n tokenFilters?: string[];\n /** An optional list of character filters to use when breaking the given text. This parameter can only be set when using the tokenizer parameter. */\n charFilters?: string[];\n}\n\n/** The result of testing an analyzer on text. */\nexport interface AnalyzeResult {\n /** The list of tokens returned by the analyzer specified in the request. */\n tokens: AnalyzedTokenInfo[];\n}\n\n/** Information about a token returned by an analyzer. */\nexport interface AnalyzedTokenInfo {\n /**\n * The token returned by the analyzer.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly token: string;\n /**\n * The index of the first character of the token in the input text.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly startOffset: number;\n /**\n * The index of the last character of the token in the input text.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly endOffset: number;\n /**\n * The position of the token in the input text relative to other tokens. The first token in the input text has position 0, the next has position 1, and so on. Depending on the analyzer used, some tokens might have the same position, for example if they are synonyms of each other.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly position: number;\n}\n\n/** Represents an index alias, which describes a mapping from the alias name to an index. The alias name can be used in place of the index name for supported operations. */\nexport interface SearchAlias {\n /** The name of the alias. */\n name: string;\n /** The name of the index this alias maps to. Only one index name may be specified. */\n indexes: string[];\n /** The ETag of the alias. */\n etag?: string;\n}\n\n/** Response from a List Aliases request. If successful, it includes the associated index mappings for all aliases. */\nexport interface ListAliasesResult {\n /**\n * The aliases in the Search service.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly aliases: SearchAlias[];\n}\n\n/** Response from a get service statistics request. If successful, it includes service level counters, indexer runtime information, and limits. */\nexport interface ServiceStatistics {\n /** Service level resource counters. */\n counters: ServiceCounters;\n /** Service level indexers runtime information. */\n indexersRuntime: ServiceIndexersRuntime;\n /** Service level general limits. */\n limits: ServiceLimits;\n}\n\n/** Represents service-level resource counters and quotas. */\nexport interface ServiceCounters {\n /** Total number of aliases. */\n aliasCounter: ResourceCounter;\n /** Total number of documents across all indexes in the service. */\n documentCounter: ResourceCounter;\n /** Total number of indexes. */\n indexCounter: ResourceCounter;\n /** Total number of indexers. */\n indexerCounter: ResourceCounter;\n /** Total number of data sources. */\n dataSourceCounter: ResourceCounter;\n /** Total size of used storage in bytes. */\n storageSizeCounter: ResourceCounter;\n /** Total number of synonym maps. */\n synonymMapCounter: ResourceCounter;\n /** Total number of skillsets. */\n skillsetCounter: ResourceCounter;\n /** Total memory consumption of all vector indexes within the service, in bytes. */\n vectorIndexSizeCounter: ResourceCounter;\n}\n\n/** Represents a resource's usage and quota. */\nexport interface ResourceCounter {\n /** The resource usage amount. */\n usage: number;\n /** The resource amount quota. */\n quota?: number;\n}\n\n/** Represents service level indexers runtime information. */\nexport interface ServiceIndexersRuntime {\n /** Cumulative runtime of all indexers in the service from the beginningTime to endingTime, in seconds. */\n usedSeconds: number;\n /** Cumulative runtime remaining for all indexers in the service from the beginningTime to endingTime, in seconds. */\n remainingSeconds?: number;\n /** Beginning UTC time of the 24-hour period considered for indexer runtime usage (inclusive). */\n beginningTime: Date;\n /** End UTC time of the 24-hour period considered for indexer runtime usage (inclusive). */\n endingTime: Date;\n}\n\n/** Represents various service level limits. */\nexport interface ServiceLimits {\n /** The maximum allowed fields per index. */\n maxFieldsPerIndex?: number;\n /** The maximum depth which you can nest sub-fields in an index, including the top-level complex field. For example, a/b/c has a nesting depth of 3. */\n maxFieldNestingDepthPerIndex?: number;\n /** The maximum number of fields of type Collection(Edm.ComplexType) allowed in an index. */\n maxComplexCollectionFieldsPerIndex?: number;\n /** The maximum number of objects in complex collections allowed per document. */\n maxComplexObjectsInCollectionsPerDocument?: number;\n /** The maximum amount of storage in bytes allowed per index. */\n maxStoragePerIndexInBytes?: number;\n /** The maximum cumulative runtime in seconds allowed for all indexers in the service over the current UTC period. */\n maxCumulativeIndexerRuntimeSeconds?: number;\n}\n\n/** Response from a request to retrieve stats summary of all indexes. If successful, it includes the stats of each index in the service. */\nexport interface ListIndexStatsSummary {\n /**\n * The Statistics summary of all indexes in the Search service.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly indexesStatistics: IndexStatisticsSummary[];\n}\n\n/** Statistics for a given index. Statistics are collected periodically and are not guaranteed to always be up-to-date. */\nexport interface IndexStatisticsSummary {\n /** The name of the index. */\n name: string;\n /**\n * The number of documents in the index.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly documentCount: number;\n /**\n * The amount of storage in bytes consumed by the index.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly storageSize: number;\n /**\n * The amount of memory in bytes consumed by vectors in the index.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly vectorIndexSize: number;\n}\n\n/** Specifies the parameters for connecting to the Azure OpenAI resource. */\nexport interface AzureOpenAIParameters {\n /** The resource URI of the Azure OpenAI resource. */\n resourceUrl?: string;\n /** ID of the Azure OpenAI model deployment on the designated resource. */\n deploymentId?: string;\n /** API key of the designated Azure OpenAI resource. */\n apiKey?: string;\n /** The user-assigned managed identity used for outbound connections. */\n authIdentity?: SearchIndexerDataIdentityUnion;\n /** The name of the embedding model that is deployed at the provided deploymentId path. */\n modelName?: AzureOpenAIModelName;\n}\n\n/** Parameters for search index knowledge source. */\nexport interface SearchIndexKnowledgeSourceParameters {\n /** The name of the Search index. */\n searchIndexName: string;\n /** Used to request additional fields for referenced source data. */\n sourceDataFields?: SearchIndexKnowledgeSourceParametersSourceDataFieldsItem[];\n /** Used to restrict which fields to search on the search index. */\n searchFields?: SearchIndexKnowledgeSourceParametersSearchFieldsItem[];\n /** Used to specify a different semantic configuration on the target search index other than the default one. */\n semanticConfigurationName?: string;\n}\n\nexport interface SearchIndexKnowledgeSourceParametersSourceDataFieldsItem {\n name: string;\n}\n\nexport interface SearchIndexKnowledgeSourceParametersSearchFieldsItem {\n name: string;\n}\n\n/** Parameters for Azure Blob Storage knowledge source. */\nexport interface AzureBlobKnowledgeSourceParameters {\n /** Key-based connection string or the ResourceId format if using a managed identity. */\n connectionString: string;\n /** The name of the blob storage container. */\n containerName: string;\n /** Optional folder path within the container. */\n folderPath?: string;\n /** Set to true if connecting to an ADLS Gen2 storage account. Default is false. */\n isAdlsGen2?: boolean;\n /** Consolidates all general ingestion settings. */\n ingestionParameters?: KnowledgeSourceIngestionParameters;\n /**\n * Resources created by the knowledge source.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly createdResources?: { [propertyName: string]: string };\n}\n\n/** Consolidates all general ingestion settings for knowledge sources. */\nexport interface KnowledgeSourceIngestionParameters {\n /** An explicit identity to use for this knowledge source. */\n identity?: SearchIndexerDataIdentityUnion;\n /** Optional vectorizer configuration for vectorizing content. */\n embeddingModel?: KnowledgeSourceVectorizerUnion;\n /** Optional chat completion model for image verbalization or context extraction. */\n chatCompletionModel?: KnowledgeBaseModelUnion;\n /** Indicates whether image verbalization should be disabled. Default is false. */\n disableImageVerbalization?: boolean;\n /** Optional schedule for data ingestion. */\n ingestionSchedule?: IndexingSchedule;\n /** Optional list of permission types to ingest together with document content. If specified, it will set the indexer permission options for the data source. */\n ingestionPermissionOptions?: KnowledgeSourceIngestionPermissionOption[];\n /** Optional content extraction mode. Default is 'minimal'. */\n contentExtractionMode?: KnowledgeSourceContentExtractionMode;\n /** Optional AI Services configuration for content processing. */\n aiServices?: AIServices;\n}\n\n/** Specifies the vectorization method to be used for knowledge source embedding model, with optional name. */\nexport interface KnowledgeSourceVectorizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"azureOpenAI\";\n}\n\n/** Parameters for Azure Blob Storage knowledge source. */\nexport interface AIServices {\n /** The URI of the AI Services endpoint. */\n uri: string;\n /** The API key for accessing AI Services. */\n apiKey?: string;\n}\n\n/** Parameters for SharePoint knowledge source. */\nexport interface IndexedSharePointKnowledgeSourceParameters {\n /** SharePoint connection string with format: SharePointOnlineEndpoint=[SharePoint site url];ApplicationId=[Azure AD App ID];ApplicationSecret=[Azure AD App client secret];TenantId=[SharePoint site tenant id] */\n connectionString: string;\n /** Specifies which SharePoint libraries to access. */\n containerName: IndexedSharePointContainerName;\n /** Optional query to filter SharePoint content. */\n query?: string;\n /** Consolidates all general ingestion settings. */\n ingestionParameters?: KnowledgeSourceIngestionParameters;\n /**\n * Resources created by the knowledge source.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly createdResources?: { [propertyName: string]: string };\n}\n\n/** Parameters for OneLake knowledge source. */\nexport interface IndexedOneLakeKnowledgeSourceParameters {\n /** OneLake workspace ID. */\n fabricWorkspaceId: string;\n /** Specifies which OneLake lakehouse to access. */\n lakehouseId: string;\n /** Optional OneLakehouse folder or shortcut to filter OneLake content. */\n targetPath?: string;\n /** Consolidates all general ingestion settings. */\n ingestionParameters?: KnowledgeSourceIngestionParameters;\n /**\n * Resources created by the knowledge source.\n * NOTE: This property will not be serialized. It can only be populated by the server.\n */\n readonly createdResources?: { [propertyName: string]: string };\n}\n\n/** Parameters for web knowledge source. */\nexport interface WebKnowledgeSourceParameters {\n /** Domain allow/block configuration for web results. */\n domains?: WebKnowledgeSourceDomains;\n}\n\n/** Domain allow/block configuration for web knowledge source. */\nexport interface WebKnowledgeSourceDomains {\n /** Domains that are allowed for web results */\n allowedDomains?: WebKnowledgeSourceDomain[];\n /** Domains that are blocked from web results */\n blockedDomains?: WebKnowledgeSourceDomain[];\n}\n\n/** Configuration for web knowledge source domain. */\nexport interface WebKnowledgeSourceDomain {\n /** The address of the domain. */\n address: string;\n /** Whether or not to include subpages from this domain. */\n includeSubpages?: boolean;\n}\n\n/** Parameters for remote SharePoint knowledge source. */\nexport interface RemoteSharePointKnowledgeSourceParameters {\n /** Keyword Query Language (KQL) expression with queryable SharePoint properties and attributes to scope the retrieval before the query runs. See documentation: https://learn.microsoft.com/en-us/sharepoint/dev/general-development/keyword-query-language-kql-syntax-reference */\n filterExpression?: string;\n /** A list of metadata fields to be returned for each item in the response. Only retrievable metadata properties can be included in this list. By default, no metadata is returned. Optional. */\n resourceMetadata?: string[];\n /** Container ID for SharePoint Embedded connection. When this is null, it will use SharePoint Online. */\n containerTypeId?: string;\n}\n\n/** Contains the parameters specific to the HNSW algorithm. */\nexport interface HnswParameters {\n /** The number of bi-directional links created for every new element during construction. Increasing this parameter value may improve recall and reduce retrieval times for datasets with high intrinsic dimensionality at the expense of increased memory consumption and longer indexing time. */\n m?: number;\n /** The size of the dynamic list containing the nearest neighbors, which is used during index time. Increasing this parameter may improve index quality, at the expense of increased indexing time. At a certain point, increasing this parameter leads to diminishing returns. */\n efConstruction?: number;\n /** The size of the dynamic list containing the nearest neighbors, which is used during search time. Increasing this parameter may improve search results, at the expense of slower search. At a certain point, increasing this parameter leads to diminishing returns. */\n efSearch?: number;\n /** The similarity metric to use for vector comparisons. */\n metric?: VectorSearchAlgorithmMetric;\n}\n\n/** Contains the parameters specific to exhaustive KNN algorithm. */\nexport interface ExhaustiveKnnParameters {\n /** The similarity metric to use for vector comparisons. */\n metric?: VectorSearchAlgorithmMetric;\n}\n\n/** Contains the parameters specific to Scalar Quantization. */\nexport interface ScalarQuantizationParameters {\n /** The quantized data type of compressed vector values. */\n quantizedDataType?: VectorSearchCompressionTarget;\n}\n\n/** Specifies the properties for connecting to a user-defined vectorizer. */\nexport interface WebApiParameters {\n /** The URI of the Web API providing the vectorizer. */\n uri?: string;\n /** The headers required to make the HTTP request. */\n httpHeaders?: { [propertyName: string]: string };\n /** The method for the HTTP request. */\n httpMethod?: string;\n /** The desired timeout for the request. Default is 30 seconds. */\n timeout?: string;\n /** Applies to custom endpoints that connect to external code in an Azure function or some other application that provides the transformations. This value should be the application ID created for the function or app when it was registered with Azure Active Directory. When specified, the vectorization connects to the function or app using a managed ID (either system or user-assigned) of the search service and the access token of the function or app, using this value as the resource id for creating the scope of the access token. */\n authResourceId?: string;\n /** The user-assigned managed identity used for outbound connections. If an authResourceId is provided and it's not specified, the system-assigned managed identity is used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. If set to \"none\", the value of this property is cleared. */\n authIdentity?: SearchIndexerDataIdentityUnion;\n}\n\n/** Specifies the AI Services Vision parameters for vectorizing a query image or text. */\nexport interface AIServicesVisionParameters {\n /** The version of the model to use when calling the AI Services Vision service. It will default to the latest available when not specified. */\n modelVersion: string | null;\n /** The resource URI of the AI Services resource. */\n resourceUri: string;\n /** API key of the designated AI Services resource. */\n apiKey?: string;\n /** The user-assigned managed identity used for outbound connections. If an authResourceId is provided and it's not specified, the system-assigned managed identity is used. On updates to the index, if the identity is unspecified, the value remains unchanged. If set to \"none\", the value of this property is cleared. */\n authIdentity?: SearchIndexerDataIdentityUnion;\n}\n\n/** Specifies the properties for connecting to an AML vectorizer. */\nexport interface AMLParameters {\n /** (Required for no authentication or key authentication) The scoring URI of the AML service to which the JSON payload will be sent. Only the https URI scheme is allowed. */\n scoringUri: string | null;\n /** (Required for key authentication) The key for the AML service. */\n authenticationKey?: string;\n /** (Required for token authentication). The Azure Resource Manager resource ID of the AML service. It should be in the format subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. */\n resourceId?: string;\n /** (Optional) When specified, indicates the timeout for the http client making the API call. */\n timeout?: string;\n /** (Optional for token authentication). The region the AML service is deployed in. */\n region?: string;\n /** The name of the embedding model from the Azure AI Foundry Catalog that is deployed at the provided endpoint. */\n modelName?: AIFoundryModelCatalogName;\n}\n\n/** Provides parameter values to a distance scoring function. */\nexport interface DistanceScoringParameters {\n /** The name of the parameter passed in search queries to specify the reference location. */\n referencePointParameter: string;\n /** The distance in kilometers from the reference location where the boosting range ends. */\n boostingDistance: number;\n}\n\n/** Provides parameter values to a freshness scoring function. */\nexport interface FreshnessScoringParameters {\n /** The expiration period after which boosting will stop for a particular document. */\n boostingDuration: string;\n}\n\n/** Provides parameter values to a magnitude scoring function. */\nexport interface MagnitudeScoringParameters {\n /** The field value at which boosting starts. */\n boostingRangeStart: number;\n /** The field value at which boosting ends. */\n boostingRangeEnd: number;\n /** A value indicating whether to apply a constant boost for field values beyond the range end value; default is false. */\n shouldBoostBeyondRangeByConstant?: boolean;\n}\n\n/** Provides parameter values to a tag scoring function. */\nexport interface TagScoringParameters {\n /** The name of the parameter passed in search queries to specify the list of tags to compare against the target field. */\n tagsParameter: string;\n}\n\n/** An object that contains information about the matches that were found, and related metadata. */\nexport interface CustomEntity {\n /** The top-level entity descriptor. Matches in the skill output will be grouped by this name, and it should represent the \"normalized\" form of the text being found. */\n name: string;\n /** This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. */\n description?: string;\n /** This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. */\n type?: string;\n /** This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. */\n subtype?: string;\n /** This field can be used as a passthrough for custom metadata about the matched text(s). The value of this field will appear with every match of its entity in the skill output. */\n id?: string;\n /** Defaults to false. Boolean value denoting whether comparisons with the entity name should be sensitive to character casing. Sample case insensitive matches of \"Microsoft\" could be: microsoft, microSoft, MICROSOFT. */\n caseSensitive?: boolean;\n /** Defaults to false. Boolean value denoting whether comparisons with the entity name should be sensitive to accent. */\n accentSensitive?: boolean;\n /** Defaults to 0. Maximum value of 5. Denotes the acceptable number of divergent characters that would still constitute a match with the entity name. The smallest possible fuzziness for any given match is returned. For instance, if the edit distance is set to 3, \"Windows10\" would still match \"Windows\", \"Windows10\" and \"Windows 7\". When case sensitivity is set to false, case differences do NOT count towards fuzziness tolerance, but otherwise do. */\n fuzzyEditDistance?: number;\n /** Changes the default case sensitivity value for this entity. It be used to change the default value of all aliases caseSensitive values. */\n defaultCaseSensitive?: boolean;\n /** Changes the default accent sensitivity value for this entity. It be used to change the default value of all aliases accentSensitive values. */\n defaultAccentSensitive?: boolean;\n /** Changes the default fuzzy edit distance value for this entity. It can be used to change the default value of all aliases fuzzyEditDistance values. */\n defaultFuzzyEditDistance?: number;\n /** An array of complex objects that can be used to specify alternative spellings or synonyms to the root entity name. */\n aliases?: CustomEntityAlias[];\n}\n\n/** A complex object that can be used to specify alternative spellings or synonyms to the root entity name. */\nexport interface CustomEntityAlias {\n /** The text of the alias. */\n text: string;\n /** Determine if the alias is case sensitive. */\n caseSensitive?: boolean;\n /** Determine if the alias is accent sensitive. */\n accentSensitive?: boolean;\n /** Determine the fuzzy edit distance of the alias. */\n fuzzyEditDistance?: number;\n}\n\nexport interface AzureOpenAITokenizerParameters {\n /** Only applies if the unit is set to azureOpenAITokens. Options include 'R50k_base', 'P50k_base', 'P50k_edit' and 'CL100k_base'. The default value is 'CL100k_base'. */\n encoderModelName?: SplitSkillEncoderModelName;\n /** (Optional) Only applies if the unit is set to azureOpenAITokens. This parameter defines a collection of special tokens that are permitted within the tokenization process. */\n allowedSpecialTokens?: string[];\n}\n\n/** Controls the cardinality for chunking the content. */\nexport interface DocumentIntelligenceLayoutSkillChunkingProperties {\n /** The unit of the chunk. */\n unit?: DocumentIntelligenceLayoutSkillChunkingUnit;\n /** The maximum chunk length in characters. Default is 500. */\n maximumLength?: number;\n /** The length of overlap provided between two text chunks. Default is 0. */\n overlapLength?: number;\n}\n\n/** Common language model parameters for Chat Completions. If omitted, default values are used. */\nexport interface CommonModelParameters {\n /** The name of the model to use (e.g., 'gpt-4o', etc.). Default is null if not specified. */\n model?: string;\n /** A float in the range [-2,2] that reduces or increases likelihood of repeated tokens. Default is 0. */\n frequencyPenalty?: number;\n /** A float in the range [-2,2] that penalizes new tokens based on their existing presence. Default is 0. */\n presencePenalty?: number;\n /** Maximum number of tokens to generate. */\n maxTokens?: number;\n /** Sampling temperature. Default is 0.7. */\n temperature?: number;\n /** Random seed for controlling deterministic outputs. If omitted, randomization is used. */\n seed?: number;\n /** List of stop sequences that will cut off text generation. Default is none. */\n stop?: string[];\n}\n\n/** Determines how the language model's response should be serialized. Defaults to 'text'. */\nexport interface ChatCompletionResponseFormat {\n /** Specifies how the LLM should format the response. Possible values: 'text' (plain string), 'json_object' (arbitrary JSON), or 'json_schema' (adheres to provided schema). */\n type?: ChatCompletionResponseFormatType;\n /** An open dictionary for extended properties. Required if 'type' == 'json_schema' */\n chatCompletionSchemaProperties?: ChatCompletionResponseFormatJsonSchemaProperties;\n}\n\n/** An open dictionary for extended properties. Required if 'type' == 'json_schema' */\nexport interface ChatCompletionResponseFormatJsonSchemaProperties {\n /** Name of the json schema the model will adhere to */\n name?: string;\n /** Description of the json schema the model will adhere to. */\n description?: string;\n /** Whether or not the model's response should use structured outputs. Default is true */\n strict?: boolean;\n /** Object defining the custom schema the model will use to structure its output. */\n schema?: ChatCompletionSchema;\n}\n\n/** Object defining the custom schema the model will use to structure its output. */\nexport interface ChatCompletionSchema {\n /** Type of schema representation. Usually 'object'. Default is 'object'. */\n type?: string;\n /** A JSON-formatted string that defines the output schema's properties and constraints for the model. */\n properties?: string;\n /** An array of the property names that are required to be part of the model's response. All properties must be included for structured outputs. */\n required?: string[];\n /** Controls whether it is allowable for an object to contain additional keys / values that were not defined in the JSON Schema. Default is false. */\n additionalProperties?: boolean;\n}\n\n/** Controls the cardinality for chunking the content. */\nexport interface ContentUnderstandingSkillChunkingProperties {\n /** The unit of the chunk. */\n unit?: ContentUnderstandingSkillChunkingUnit;\n /** The maximum chunk length in characters. Default is 500. */\n maximumLength?: number;\n /** The length of overlap provided between two text chunks. Default is 0. */\n overlapLength?: number;\n}\n\n/** Specifies the Azure OpenAI resource used to do query planning. */\nexport interface KnowledgeBaseAzureOpenAIModel extends KnowledgeBaseModel {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"azureOpenAI\";\n /** Contains the parameters specific to Azure OpenAI model endpoint. */\n azureOpenAIParameters: AzureOpenAIParameters;\n}\n\n/** Run knowledge retrieval with minimal reasoning effort. */\nexport interface KnowledgeRetrievalMinimalReasoningEffort\n extends KnowledgeRetrievalReasoningEffort {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"minimal\";\n}\n\n/** Run knowledge retrieval with low reasoning effort. */\nexport interface KnowledgeRetrievalLowReasoningEffort\n extends KnowledgeRetrievalReasoningEffort {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"low\";\n}\n\n/** Run knowledge retrieval with medium reasoning effort. */\nexport interface KnowledgeRetrievalMediumReasoningEffort\n extends KnowledgeRetrievalReasoningEffort {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"medium\";\n}\n\n/** Clears the identity property of a datasource. */\nexport interface SearchIndexerDataNoneIdentity\n extends SearchIndexerDataIdentity {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.DataNoneIdentity\";\n}\n\n/** Specifies the identity for a datasource to use. */\nexport interface SearchIndexerDataUserAssignedIdentity\n extends SearchIndexerDataIdentity {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.DataUserAssignedIdentity\";\n /** The fully qualified Azure resource Id of a user assigned managed identity typically in the form \"/subscriptions/12345678-1234-1234-1234-1234567890ab/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myId\" that should have been assigned to the search service. */\n resourceId: string;\n}\n\n/** Knowledge Source targeting a search index. */\nexport interface SearchIndexKnowledgeSource extends KnowledgeSource {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"searchIndex\";\n /** The parameters for the knowledge source. */\n searchIndexParameters: SearchIndexKnowledgeSourceParameters;\n}\n\n/** Configuration for Azure Blob Storage knowledge source. */\nexport interface AzureBlobKnowledgeSource extends KnowledgeSource {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"azureBlob\";\n /** The type of the knowledge source. */\n azureBlobParameters: AzureBlobKnowledgeSourceParameters;\n}\n\n/** Configuration for SharePoint knowledge source. */\nexport interface IndexedSharePointKnowledgeSource extends KnowledgeSource {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"indexedSharePoint\";\n /** The parameters for the SharePoint knowledge source. */\n indexedSharePointParameters: IndexedSharePointKnowledgeSourceParameters;\n}\n\n/** Configuration for OneLake knowledge source. */\nexport interface IndexedOneLakeKnowledgeSource extends KnowledgeSource {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"indexedOneLake\";\n /** The parameters for the OneLake knowledge source. */\n indexedOneLakeParameters: IndexedOneLakeKnowledgeSourceParameters;\n}\n\n/** Knowledge Source targeting web results. */\nexport interface WebKnowledgeSource extends KnowledgeSource {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"web\";\n /** The parameters for the web knowledge source. */\n webParameters?: WebKnowledgeSourceParameters;\n}\n\n/** Configuration for remote SharePoint knowledge source. */\nexport interface RemoteSharePointKnowledgeSource extends KnowledgeSource {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"remoteSharePoint\";\n /** The parameters for the knowledge source. */\n remoteSharePointParameters: RemoteSharePointKnowledgeSourceParameters;\n}\n\n/** Defines a data change detection policy that captures changes based on the value of a high water mark column. */\nexport interface HighWaterMarkChangeDetectionPolicy\n extends DataChangeDetectionPolicy {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy\";\n /** The name of the high water mark column. */\n highWaterMarkColumnName: string;\n}\n\n/** Defines a data change detection policy that captures changes using the Integrated Change Tracking feature of Azure SQL Database. */\nexport interface SqlIntegratedChangeTrackingPolicy\n extends DataChangeDetectionPolicy {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy\";\n}\n\n/** Defines a data deletion detection policy that implements a soft-deletion strategy. It determines whether an item should be deleted based on the value of a designated 'soft delete' column. */\nexport interface SoftDeleteColumnDeletionDetectionPolicy\n extends DataDeletionDetectionPolicy {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy\";\n /** The name of the column to use for soft-deletion detection. */\n softDeleteColumnName?: string;\n /** The marker value that identifies an item as deleted. */\n softDeleteMarkerValue?: string;\n}\n\n/** Defines a data deletion detection policy utilizing Azure Blob Storage's native soft delete feature for deletion detection. */\nexport interface NativeBlobSoftDeleteDeletionDetectionPolicy\n extends DataDeletionDetectionPolicy {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy\";\n}\n\n/** A skill that enables scenarios that require a Boolean operation to determine the data to assign to an output. */\nexport interface ConditionalSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Util.ConditionalSkill\";\n}\n\n/** A skill that uses text analytics for key phrase extraction. */\nexport interface KeyPhraseExtractionSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Text.KeyPhraseExtractionSkill\";\n /** A value indicating which language code to use. Default is `en`. */\n defaultLanguageCode?: KeyPhraseExtractionSkillLanguage;\n /** A number indicating how many key phrases to return. If absent, all identified key phrases will be returned. */\n maxKeyPhraseCount?: number;\n /** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */\n modelVersion?: string;\n}\n\n/** A skill that extracts text from image files. */\nexport interface OcrSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Vision.OcrSkill\";\n /** A value indicating which language code to use. Default is `en`. */\n defaultLanguageCode?: OcrSkillLanguage;\n /** A value indicating to turn orientation detection on or not. Default is false. */\n shouldDetectOrientation?: boolean;\n /** Defines the sequence of characters to use between the lines of text recognized by the OCR skill. The default value is \"space\". */\n lineEnding?: OcrLineEnding;\n}\n\n/** A skill that analyzes image files. It extracts a rich set of visual features based on the image content. */\nexport interface ImageAnalysisSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Vision.ImageAnalysisSkill\";\n /** A value indicating which language code to use. Default is `en`. */\n defaultLanguageCode?: ImageAnalysisSkillLanguage;\n /** A list of visual features. */\n visualFeatures?: VisualFeature[];\n /** A string indicating which domain-specific details to return. */\n details?: ImageDetail[];\n}\n\n/** A skill that detects the language of input text and reports a single language code for every document submitted on the request. The language code is paired with a score indicating the confidence of the analysis. */\nexport interface LanguageDetectionSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Text.LanguageDetectionSkill\";\n /** A country code to use as a hint to the language detection model if it cannot disambiguate the language. */\n defaultCountryHint?: string;\n /** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */\n modelVersion?: string;\n}\n\n/** A skill for reshaping the outputs. It creates a complex type to support composite fields (also known as multipart fields). */\nexport interface ShaperSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Util.ShaperSkill\";\n}\n\n/** A skill for merging two or more strings into a single unified string, with an optional user-defined delimiter separating each component part. */\nexport interface MergeSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Text.MergeSkill\";\n /** The tag indicates the start of the merged text. By default, the tag is an empty space. */\n insertPreTag?: string;\n /** The tag indicates the end of the merged text. By default, the tag is an empty space. */\n insertPostTag?: string;\n}\n\n/**\n * This skill is deprecated. Use the V3.EntityRecognitionSkill instead.\n *\n * @deprecated\n */\nexport interface EntityRecognitionSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Text.EntityRecognitionSkill\";\n /** A list of entity categories that should be extracted. */\n categories?: EntityCategory[];\n /** A value indicating which language code to use. Default is `en`. */\n defaultLanguageCode?: EntityRecognitionSkillLanguage;\n /** Determines whether or not to include entities which are well known but don't conform to a pre-defined type. If this configuration is not set (default), set to null or set to false, entities which don't conform to one of the pre-defined types will not be surfaced. */\n includeTypelessEntities?: boolean;\n /** A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. */\n minimumPrecision?: number;\n}\n\n/**\n * This skill is deprecated. Use the V3.SentimentSkill instead.\n *\n * @deprecated\n */\nexport interface SentimentSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Text.SentimentSkill\";\n /** A value indicating which language code to use. Default is `en`. */\n defaultLanguageCode?: SentimentSkillLanguage;\n}\n\n/** Using the Text Analytics API, evaluates unstructured text and for each record, provides sentiment labels (such as \"negative\", \"neutral\" and \"positive\") based on the highest confidence score found by the service at a sentence and document-level. */\nexport interface SentimentSkillV3 extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Text.V3.SentimentSkill\";\n /** A value indicating which language code to use. Default is `en`. */\n defaultLanguageCode?: string;\n /** If set to true, the skill output will include information from Text Analytics for opinion mining, namely targets (nouns or verbs) and their associated assessment (adjective) in the text. Default is false. */\n includeOpinionMining?: boolean;\n /** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */\n modelVersion?: string;\n}\n\n/** Using the Text Analytics API, extracts linked entities from text. */\nexport interface EntityLinkingSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Text.V3.EntityLinkingSkill\";\n /** A value indicating which language code to use. Default is `en`. */\n defaultLanguageCode?: string;\n /** A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. */\n minimumPrecision?: number;\n /** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */\n modelVersion?: string;\n}\n\n/** Using the Text Analytics API, extracts entities of different types from text. */\nexport interface EntityRecognitionSkillV3 extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Text.V3.EntityRecognitionSkill\";\n /** A list of entity categories that should be extracted. */\n categories?: string[];\n /** A value indicating which language code to use. Default is `en`. */\n defaultLanguageCode?: string;\n /** A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. */\n minimumPrecision?: number;\n /** The version of the model to use when calling the Text Analytics API. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */\n modelVersion?: string;\n}\n\n/** Using the Text Analytics API, extracts personal information from an input text and gives you the option of masking it. */\nexport interface PIIDetectionSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Text.PIIDetectionSkill\";\n /** A value indicating which language code to use. Default is `en`. */\n defaultLanguageCode?: string;\n /** A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. */\n minimumPrecision?: number;\n /** A parameter that provides various ways to mask the personal information detected in the input text. Default is 'none'. */\n maskingMode?: PIIDetectionSkillMaskingMode;\n /** The character used to mask the text if the maskingMode parameter is set to replace. Default is '*'. */\n maskingCharacter?: string;\n /** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */\n modelVersion?: string;\n /** A list of PII entity categories that should be extracted and masked. */\n categories?: string[];\n /** If specified, will set the PII domain to include only a subset of the entity categories. Possible values include: 'phi', 'none'. Default is 'none'. */\n domain?: string;\n}\n\n/** A skill to split a string into chunks of text. */\nexport interface SplitSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Text.SplitSkill\";\n /** A value indicating which language code to use. Default is `en`. */\n defaultLanguageCode?: SplitSkillLanguage;\n /** A value indicating which split mode to perform. */\n textSplitMode?: TextSplitMode;\n /** The desired maximum page length. Default is 10000. */\n maxPageLength?: number;\n /** Only applicable when textSplitMode is set to 'pages'. If specified, n+1th chunk will start with this number of characters/tokens from the end of the nth chunk. */\n pageOverlapLength?: number;\n /** Only applicable when textSplitMode is set to 'pages'. If specified, the SplitSkill will discontinue splitting after processing the first 'maximumPagesToTake' pages, in order to improve performance when only a few initial pages are needed from each document. */\n maximumPagesToTake?: number;\n /** Only applies if textSplitMode is set to pages. There are two possible values. The choice of the values will decide the length (maximumPageLength and pageOverlapLength) measurement. The default is 'characters', which means the length will be measured by character. */\n unit?: SplitSkillUnit;\n /** Only applies if the unit is set to azureOpenAITokens. If specified, the splitSkill will use these parameters when performing the tokenization. The parameters are a valid 'encoderModelName' and an optional 'allowedSpecialTokens' property. */\n azureOpenAITokenizerParameters?: AzureOpenAITokenizerParameters;\n}\n\n/** A skill looks for text from a custom, user-defined list of words and phrases. */\nexport interface CustomEntityLookupSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Text.CustomEntityLookupSkill\";\n /** A value indicating which language code to use. Default is `en`. */\n defaultLanguageCode?: CustomEntityLookupSkillLanguage;\n /** Path to a JSON or CSV file containing all the target text to match against. This entity definition is read at the beginning of an indexer run. Any updates to this file during an indexer run will not take effect until subsequent runs. This config must be accessible over HTTPS. */\n entitiesDefinitionUri?: string;\n /** The inline CustomEntity definition. */\n inlineEntitiesDefinition?: CustomEntity[];\n /** A global flag for CaseSensitive. If CaseSensitive is not set in CustomEntity, this value will be the default value. */\n globalDefaultCaseSensitive?: boolean;\n /** A global flag for AccentSensitive. If AccentSensitive is not set in CustomEntity, this value will be the default value. */\n globalDefaultAccentSensitive?: boolean;\n /** A global flag for FuzzyEditDistance. If FuzzyEditDistance is not set in CustomEntity, this value will be the default value. */\n globalDefaultFuzzyEditDistance?: number;\n}\n\n/** A skill to translate text from one language to another. */\nexport interface TextTranslationSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Text.TranslationSkill\";\n /** The language code to translate documents into for documents that don't specify the to language explicitly. */\n defaultToLanguageCode: TextTranslationSkillLanguage;\n /** The language code to translate documents from for documents that don't specify the from language explicitly. */\n defaultFromLanguageCode?: TextTranslationSkillLanguage;\n /** The language code to translate documents from when neither the fromLanguageCode input nor the defaultFromLanguageCode parameter are provided, and the automatic language detection is unsuccessful. Default is `en`. */\n suggestedFrom?: TextTranslationSkillLanguage;\n}\n\n/** A skill that extracts content from a file within the enrichment pipeline. */\nexport interface DocumentExtractionSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Util.DocumentExtractionSkill\";\n /** The parsingMode for the skill. Will be set to 'default' if not defined. */\n parsingMode?: string;\n /** The type of data to be extracted for the skill. Will be set to 'contentAndMetadata' if not defined. */\n dataToExtract?: string;\n /** A dictionary of configurations for the skill. */\n configuration?: { [propertyName: string]: any };\n}\n\n/** A skill that extracts content and layout information, via Azure AI Services, from files within the enrichment pipeline. */\nexport interface DocumentIntelligenceLayoutSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Util.DocumentIntelligenceLayoutSkill\";\n /** Controls the cardinality of the output format. Default is 'markdown'. */\n outputFormat?: DocumentIntelligenceLayoutSkillOutputFormat;\n /** Controls the cardinality of the output produced by the skill. Default is 'oneToMany'. */\n outputMode?: DocumentIntelligenceLayoutSkillOutputMode;\n /** The depth of headers in the markdown output. Default is h6. */\n markdownHeaderDepth?: DocumentIntelligenceLayoutSkillMarkdownHeaderDepth;\n /** Controls the cardinality of the content extracted from the document by the skill */\n extractionOptions?: DocumentIntelligenceLayoutSkillExtractionOptions[];\n /** Controls the cardinality for chunking the content. */\n chunkingProperties?: DocumentIntelligenceLayoutSkillChunkingProperties;\n}\n\n/** A skill that can call a Web API endpoint, allowing you to extend a skillset by having it call your custom code. */\nexport interface WebApiSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype:\n | \"#Microsoft.Skills.Custom.WebApiSkill\"\n | \"#Microsoft.Skills.Custom.ChatCompletionSkill\";\n /** The url for the Web API. */\n uri: string;\n /** The headers required to make the http request. */\n httpHeaders?: { [propertyName: string]: string };\n /** The method for the http request. */\n httpMethod?: string;\n /** The desired timeout for the request. Default is 30 seconds. */\n timeout?: string;\n /** The desired batch size which indicates number of documents. */\n batchSize?: number;\n /** If set, the number of parallel calls that can be made to the Web API. */\n degreeOfParallelism?: number;\n /** Applies to custom skills that connect to external code in an Azure function or some other application that provides the transformations. This value should be the application ID created for the function or app when it was registered with Azure Active Directory. When specified, the custom skill connects to the function or app using a managed ID (either system or user-assigned) of the search service and the access token of the function or app, using this value as the resource id for creating the scope of the access token. */\n authResourceId?: string;\n /** The user-assigned managed identity used for outbound connections. If an authResourceId is provided and it's not specified, the system-assigned managed identity is used. On updates to the indexer, if the identity is unspecified, the value remains unchanged. If set to \"none\", the value of this property is cleared. */\n authIdentity?: SearchIndexerDataIdentityUnion;\n}\n\n/** A skill that leverages Azure AI Content Understanding to process and extract structured insights from documents, enabling enriched, searchable content for enhanced document indexing and retrieval */\nexport interface ContentUnderstandingSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Util.ContentUnderstandingSkill\";\n /** Controls the cardinality of the content extracted from the document by the skill */\n extractionOptions?: ContentUnderstandingSkillExtractionOptions[];\n /** Controls the cardinality for chunking the content. */\n chunkingProperties?: ContentUnderstandingSkillChunkingProperties;\n}\n\n/** The AML skill allows you to extend AI enrichment with a custom Azure Machine Learning (AML) model. Once an AML model is trained and deployed, an AML skill integrates it into AI enrichment. */\nexport interface AzureMachineLearningSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Custom.AmlSkill\";\n /** (Required for no authentication or key authentication) The scoring URI of the AML service to which the JSON payload will be sent. Only the https URI scheme is allowed. */\n scoringUri?: string;\n /** (Required for key authentication) The key for the AML service. */\n authenticationKey?: string;\n /** (Required for token authentication). The Azure Resource Manager resource ID of the AML service. It should be in the format subscriptions/{guid}/resourceGroups/{resource-group-name}/Microsoft.MachineLearningServices/workspaces/{workspace-name}/services/{service_name}. */\n resourceId?: string;\n /** (Optional) When specified, indicates the timeout for the http client making the API call. */\n timeout?: string;\n /** (Optional for token authentication). The region the AML service is deployed in. */\n region?: string;\n /** (Optional) When specified, indicates the number of calls the indexer will make in parallel to the endpoint you have provided. You can decrease this value if your endpoint is failing under too high of a request load, or raise it if your endpoint is able to accept more requests and you would like an increase in the performance of the indexer. If not set, a default value of 5 is used. The degreeOfParallelism can be set to a maximum of 10 and a minimum of 1. */\n degreeOfParallelism?: number;\n}\n\n/** Allows you to generate a vector embedding for a given text input using the Azure OpenAI resource. */\nexport interface AzureOpenAIEmbeddingSkill\n extends SearchIndexerSkill,\n AzureOpenAIParameters {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Text.AzureOpenAIEmbeddingSkill\";\n /** The number of dimensions the resulting output embeddings should have. Only supported in text-embedding-3 and later models. */\n dimensions?: number;\n}\n\n/** Allows you to generate a vector embedding for a given image or text input using the Azure AI Services Vision Vectorize API. */\nexport interface VisionVectorizeSkill extends SearchIndexerSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Vision.VectorizeSkill\";\n /** The version of the model to use when calling the AI Services Vision service. It will default to the latest available when not specified. */\n modelVersion: string | null;\n}\n\n/** An empty object that represents the default Azure AI service resource for a skillset. */\nexport interface DefaultCognitiveServicesAccount\n extends CognitiveServicesAccount {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.DefaultCognitiveServices\";\n}\n\n/** The multi-region account key of an Azure AI service resource that's attached to a skillset. */\nexport interface CognitiveServicesAccountKey extends CognitiveServicesAccount {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.CognitiveServicesByKey\";\n /** The key used to provision the Azure AI service resource attached to a skillset. */\n key: string;\n}\n\n/** The account key of an Azure AI service resource that's attached to a skillset, to be used with the resource's subdomain. */\nexport interface AIServicesAccountKey extends CognitiveServicesAccount {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.AIServicesByKey\";\n /** The key used to provision the Azure AI service resource attached to a skillset. */\n key: string;\n /** The subdomain url for the corresponding AI Service. */\n subdomainUrl: string;\n}\n\n/** The multi-region account of an Azure AI service resource that's attached to a skillset. */\nexport interface AIServicesAccountIdentity extends CognitiveServicesAccount {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.AIServicesByIdentity\";\n /** The user-assigned managed identity used for connections to AI Service. If not specified, the system-assigned managed identity is used. On updates to the skillset, if the identity is unspecified, the value remains unchanged. If set to \"none\", the value of this property is cleared. */\n identity?: SearchIndexerDataIdentityUnion;\n /** The subdomain url for the corresponding AI Service. */\n subdomainUrl: string;\n}\n\n/** Description for what data to store in Azure Tables. */\nexport interface SearchIndexerKnowledgeStoreTableProjectionSelector\n extends SearchIndexerKnowledgeStoreProjectionSelector {\n /** Name of the Azure table to store projected data in. */\n tableName: string;\n}\n\n/** Abstract class to share properties between concrete selectors. */\nexport interface SearchIndexerKnowledgeStoreBlobProjectionSelector\n extends SearchIndexerKnowledgeStoreProjectionSelector {\n /** Blob container to store projections in. */\n storageContainer: string;\n}\n\n/** Defines a function that boosts scores based on distance from a geographic location. */\nexport interface DistanceScoringFunction extends ScoringFunction {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n type: \"distance\";\n /** Parameter values for the distance scoring function. */\n parameters: DistanceScoringParameters;\n}\n\n/** Defines a function that boosts scores based on the value of a date-time field. */\nexport interface FreshnessScoringFunction extends ScoringFunction {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n type: \"freshness\";\n /** Parameter values for the freshness scoring function. */\n parameters: FreshnessScoringParameters;\n}\n\n/** Defines a function that boosts scores based on the magnitude of a numeric field. */\nexport interface MagnitudeScoringFunction extends ScoringFunction {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n type: \"magnitude\";\n /** Parameter values for the magnitude scoring function. */\n parameters: MagnitudeScoringParameters;\n}\n\n/** Defines a function that boosts scores of documents with string values matching a given list of tags. */\nexport interface TagScoringFunction extends ScoringFunction {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n type: \"tag\";\n /** Parameter values for the tag scoring function. */\n parameters: TagScoringParameters;\n}\n\n/** Allows you to take control over the process of converting text into indexable/searchable tokens. It's a user-defined configuration consisting of a single predefined tokenizer and one or more filters. The tokenizer is responsible for breaking text into tokens, and the filters for modifying tokens emitted by the tokenizer. */\nexport interface CustomAnalyzer extends LexicalAnalyzer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.CustomAnalyzer\";\n /** The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as breaking a sentence into words. KnownTokenizerNames is an enum containing known values. */\n tokenizerName: string;\n /** A list of token filters used to filter out or modify the tokens generated by a tokenizer. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. */\n tokenFilters?: string[];\n /** A list of character filters used to prepare input text before it is processed by the tokenizer. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. */\n charFilters?: string[];\n}\n\n/** Flexibly separates text into terms via a regular expression pattern. This analyzer is implemented using Apache Lucene. */\nexport interface PatternAnalyzer extends LexicalAnalyzer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.PatternAnalyzer\";\n /** A value indicating whether terms should be lower-cased. Default is true. */\n lowerCaseTerms?: boolean;\n /** A regular expression pattern to match token separators. Default is an expression that matches one or more non-word characters. */\n pattern?: string;\n /** Regular expression flags. */\n flags?: string;\n /** A list of stopwords. */\n stopwords?: string[];\n}\n\n/** Standard Apache Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. */\nexport interface LuceneStandardAnalyzer extends LexicalAnalyzer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.StandardAnalyzer\";\n /** The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */\n maxTokenLength?: number;\n /** A list of stopwords. */\n stopwords?: string[];\n}\n\n/** Divides text at non-letters; Applies the lowercase and stopword token filters. This analyzer is implemented using Apache Lucene. */\nexport interface StopAnalyzer extends LexicalAnalyzer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.StopAnalyzer\";\n /** A list of stopwords. */\n stopwords?: string[];\n}\n\n/** Grammar-based tokenizer that is suitable for processing most European-language documents. This tokenizer is implemented using Apache Lucene. */\nexport interface ClassicTokenizer extends LexicalTokenizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.ClassicTokenizer\";\n /** The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */\n maxTokenLength?: number;\n}\n\n/** Tokenizes the input from an edge into n-grams of the given size(s). This tokenizer is implemented using Apache Lucene. */\nexport interface EdgeNGramTokenizer extends LexicalTokenizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.EdgeNGramTokenizer\";\n /** The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. */\n minGram?: number;\n /** The maximum n-gram length. Default is 2. Maximum is 300. */\n maxGram?: number;\n /** Character classes to keep in the tokens. */\n tokenChars?: TokenCharacterKind[];\n}\n\n/** Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene. */\nexport interface KeywordTokenizer extends LexicalTokenizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.KeywordTokenizer\";\n /** The read buffer size in bytes. Default is 256. */\n bufferSize?: number;\n}\n\n/** Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene. */\nexport interface KeywordTokenizerV2 extends LexicalTokenizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.KeywordTokenizerV2\";\n /** The maximum token length. Default is 256. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */\n maxTokenLength?: number;\n}\n\n/** Divides text using language-specific rules. */\nexport interface MicrosoftLanguageTokenizer extends LexicalTokenizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.MicrosoftLanguageTokenizer\";\n /** The maximum token length. Tokens longer than the maximum length are split. Maximum token length that can be used is 300 characters. Tokens longer than 300 characters are first split into tokens of length 300 and then each of those tokens is split based on the max token length set. Default is 255. */\n maxTokenLength?: number;\n /** A value indicating how the tokenizer is used. Set to true if used as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. */\n isSearchTokenizer?: boolean;\n /** The language to use. The default is English. */\n language?: MicrosoftTokenizerLanguage;\n}\n\n/** Divides text using language-specific rules and reduces words to their base forms. */\nexport interface MicrosoftLanguageStemmingTokenizer extends LexicalTokenizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer\";\n /** The maximum token length. Tokens longer than the maximum length are split. Maximum token length that can be used is 300 characters. Tokens longer than 300 characters are first split into tokens of length 300 and then each of those tokens is split based on the max token length set. Default is 255. */\n maxTokenLength?: number;\n /** A value indicating how the tokenizer is used. Set to true if used as the search tokenizer, set to false if used as the indexing tokenizer. Default is false. */\n isSearchTokenizer?: boolean;\n /** The language to use. The default is English. */\n language?: MicrosoftStemmingTokenizerLanguage;\n}\n\n/** Tokenizes the input into n-grams of the given size(s). This tokenizer is implemented using Apache Lucene. */\nexport interface NGramTokenizer extends LexicalTokenizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.NGramTokenizer\";\n /** The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. */\n minGram?: number;\n /** The maximum n-gram length. Default is 2. Maximum is 300. */\n maxGram?: number;\n /** Character classes to keep in the tokens. */\n tokenChars?: TokenCharacterKind[];\n}\n\n/** Tokenizer for path-like hierarchies. This tokenizer is implemented using Apache Lucene. */\nexport interface PathHierarchyTokenizerV2 extends LexicalTokenizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.PathHierarchyTokenizerV2\";\n /** The delimiter character to use. Default is \"/\". */\n delimiter?: string;\n /** A value that, if set, replaces the delimiter character. Default is \"/\". */\n replacement?: string;\n /** The maximum token length. Default and maximum is 300. */\n maxTokenLength?: number;\n /** A value indicating whether to generate tokens in reverse order. Default is false. */\n reverseTokenOrder?: boolean;\n /** The number of initial tokens to skip. Default is 0. */\n numberOfTokensToSkip?: number;\n}\n\n/** Tokenizer that uses regex pattern matching to construct distinct tokens. This tokenizer is implemented using Apache Lucene. */\nexport interface PatternTokenizer extends LexicalTokenizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.PatternTokenizer\";\n /** A regular expression pattern to match token separators. Default is an expression that matches one or more non-word characters. */\n pattern?: string;\n /** Regular expression flags. */\n flags?: string;\n /** The zero-based ordinal of the matching group in the regular expression pattern to extract into tokens. Use -1 if you want to use the entire pattern to split the input into tokens, irrespective of matching groups. Default is -1. */\n group?: number;\n}\n\n/** Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene. */\nexport interface LuceneStandardTokenizer extends LexicalTokenizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.StandardTokenizer\";\n /** The maximum token length. Default is 255. Tokens longer than the maximum length are split. */\n maxTokenLength?: number;\n}\n\n/** Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using Apache Lucene. */\nexport interface LuceneStandardTokenizerV2 extends LexicalTokenizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.StandardTokenizerV2\";\n /** The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */\n maxTokenLength?: number;\n}\n\n/** Tokenizes urls and emails as one token. This tokenizer is implemented using Apache Lucene. */\nexport interface UaxUrlEmailTokenizer extends LexicalTokenizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.UaxUrlEmailTokenizer\";\n /** The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */\n maxTokenLength?: number;\n}\n\n/** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the \"Basic Latin\" Unicode block) into their ASCII equivalents, if such equivalents exist. This token filter is implemented using Apache Lucene. */\nexport interface AsciiFoldingTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.AsciiFoldingTokenFilter\";\n /** A value indicating whether the original token will be kept. Default is false. */\n preserveOriginal?: boolean;\n}\n\n/** Forms bigrams of CJK terms that are generated from the standard tokenizer. This token filter is implemented using Apache Lucene. */\nexport interface CjkBigramTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.CjkBigramTokenFilter\";\n /** The scripts to ignore. */\n ignoreScripts?: CjkBigramTokenFilterScripts[];\n /** A value indicating whether to output both unigrams and bigrams (if true), or just bigrams (if false). Default is false. */\n outputUnigrams?: boolean;\n}\n\n/** Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. This token filter is implemented using Apache Lucene. */\nexport interface CommonGramTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.CommonGramTokenFilter\";\n /** The set of common words. */\n commonWords: string[];\n /** A value indicating whether common words matching will be case insensitive. Default is false. */\n ignoreCase?: boolean;\n /** A value that indicates whether the token filter is in query mode. When in query mode, the token filter generates bigrams and then removes common words and single terms followed by a common word. Default is false. */\n useQueryMode?: boolean;\n}\n\n/** Decomposes compound words found in many Germanic languages. This token filter is implemented using Apache Lucene. */\nexport interface DictionaryDecompounderTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter\";\n /** The list of words to match against. */\n wordList: string[];\n /** The minimum word size. Only words longer than this get processed. Default is 5. Maximum is 300. */\n minWordSize?: number;\n /** The minimum subword size. Only subwords longer than this are outputted. Default is 2. Maximum is 300. */\n minSubwordSize?: number;\n /** The maximum subword size. Only subwords shorter than this are outputted. Default is 15. Maximum is 300. */\n maxSubwordSize?: number;\n /** A value indicating whether to add only the longest matching subword to the output. Default is false. */\n onlyLongestMatch?: boolean;\n}\n\n/** Generates n-grams of the given size(s) starting from the front or the back of an input token. This token filter is implemented using Apache Lucene. */\nexport interface EdgeNGramTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.EdgeNGramTokenFilter\";\n /** The minimum n-gram length. Default is 1. Must be less than the value of maxGram. */\n minGram?: number;\n /** The maximum n-gram length. Default is 2. */\n maxGram?: number;\n /** Specifies which side of the input the n-gram should be generated from. Default is \"front\". */\n side?: EdgeNGramTokenFilterSide;\n}\n\n/** Generates n-grams of the given size(s) starting from the front or the back of an input token. This token filter is implemented using Apache Lucene. */\nexport interface EdgeNGramTokenFilterV2 extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.EdgeNGramTokenFilterV2\";\n /** The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. */\n minGram?: number;\n /** The maximum n-gram length. Default is 2. Maximum is 300. */\n maxGram?: number;\n /** Specifies which side of the input the n-gram should be generated from. Default is \"front\". */\n side?: EdgeNGramTokenFilterSide;\n}\n\n/** Removes elisions. For example, \"l'avion\" (the plane) will be converted to \"avion\" (plane). This token filter is implemented using Apache Lucene. */\nexport interface ElisionTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.ElisionTokenFilter\";\n /** The set of articles to remove. */\n articles?: string[];\n}\n\n/** A token filter that only keeps tokens with text contained in a specified list of words. This token filter is implemented using Apache Lucene. */\nexport interface KeepTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.KeepTokenFilter\";\n /** The list of words to keep. */\n keepWords: string[];\n /** A value indicating whether to lower case all words first. Default is false. */\n lowerCaseKeepWords?: boolean;\n}\n\n/** Marks terms as keywords. This token filter is implemented using Apache Lucene. */\nexport interface KeywordMarkerTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.KeywordMarkerTokenFilter\";\n /** A list of words to mark as keywords. */\n keywords: string[];\n /** A value indicating whether to ignore case. If true, all words are converted to lower case first. Default is false. */\n ignoreCase?: boolean;\n}\n\n/** Removes words that are too long or too short. This token filter is implemented using Apache Lucene. */\nexport interface LengthTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.LengthTokenFilter\";\n /** The minimum length in characters. Default is 0. Maximum is 300. Must be less than the value of max. */\n minLength?: number;\n /** The maximum length in characters. Default and maximum is 300. */\n maxLength?: number;\n}\n\n/** Limits the number of tokens while indexing. This token filter is implemented using Apache Lucene. */\nexport interface LimitTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.LimitTokenFilter\";\n /** The maximum number of tokens to produce. Default is 1. */\n maxTokenCount?: number;\n /** A value indicating whether all tokens from the input must be consumed even if maxTokenCount is reached. Default is false. */\n consumeAllTokens?: boolean;\n}\n\n/** Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene. */\nexport interface NGramTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.NGramTokenFilter\";\n /** The minimum n-gram length. Default is 1. Must be less than the value of maxGram. */\n minGram?: number;\n /** The maximum n-gram length. Default is 2. */\n maxGram?: number;\n}\n\n/** Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene. */\nexport interface NGramTokenFilterV2 extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.NGramTokenFilterV2\";\n /** The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. */\n minGram?: number;\n /** The maximum n-gram length. Default is 2. Maximum is 300. */\n maxGram?: number;\n}\n\n/** Uses Java regexes to emit multiple tokens - one for each capture group in one or more patterns. This token filter is implemented using Apache Lucene. */\nexport interface PatternCaptureTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.PatternCaptureTokenFilter\";\n /** A list of patterns to match against each token. */\n patterns: string[];\n /** A value indicating whether to return the original token even if one of the patterns matches. Default is true. */\n preserveOriginal?: boolean;\n}\n\n/** A character filter that replaces characters in the input string. It uses a regular expression to identify character sequences to preserve and a replacement pattern to identify characters to replace. For example, given the input text \"aa bb aa bb\", pattern \"(aa)\\s+(bb)\", and replacement \"$1#$2\", the result would be \"aa#bb aa#bb\". This token filter is implemented using Apache Lucene. */\nexport interface PatternReplaceTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.PatternReplaceTokenFilter\";\n /** A regular expression pattern. */\n pattern: string;\n /** The replacement text. */\n replacement: string;\n}\n\n/** Create tokens for phonetic matches. This token filter is implemented using Apache Lucene. */\nexport interface PhoneticTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.PhoneticTokenFilter\";\n /** The phonetic encoder to use. Default is \"metaphone\". */\n encoder?: PhoneticEncoder;\n /** A value indicating whether encoded tokens should replace original tokens. If false, encoded tokens are added as synonyms. Default is true. */\n replaceOriginalTokens?: boolean;\n}\n\n/** Creates combinations of tokens as a single token. This token filter is implemented using Apache Lucene. */\nexport interface ShingleTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.ShingleTokenFilter\";\n /** The maximum shingle size. Default and minimum value is 2. */\n maxShingleSize?: number;\n /** The minimum shingle size. Default and minimum value is 2. Must be less than the value of maxShingleSize. */\n minShingleSize?: number;\n /** A value indicating whether the output stream will contain the input tokens (unigrams) as well as shingles. Default is true. */\n outputUnigrams?: boolean;\n /** A value indicating whether to output unigrams for those times when no shingles are available. This property takes precedence when outputUnigrams is set to false. Default is false. */\n outputUnigramsIfNoShingles?: boolean;\n /** The string to use when joining adjacent tokens to form a shingle. Default is a single space (\" \"). */\n tokenSeparator?: string;\n /** The string to insert for each position at which there is no token. Default is an underscore (\"_\"). */\n filterToken?: string;\n}\n\n/** A filter that stems words using a Snowball-generated stemmer. This token filter is implemented using Apache Lucene. */\nexport interface SnowballTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.SnowballTokenFilter\";\n /** The language to use. */\n language: SnowballTokenFilterLanguage;\n}\n\n/** Language specific stemming filter. This token filter is implemented using Apache Lucene. */\nexport interface StemmerTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.StemmerTokenFilter\";\n /** The language to use. */\n language: StemmerTokenFilterLanguage;\n}\n\n/** Provides the ability to override other stemming filters with custom dictionary-based stemming. Any dictionary-stemmed terms will be marked as keywords so that they will not be stemmed with stemmers down the chain. Must be placed before any stemming filters. This token filter is implemented using Apache Lucene. */\nexport interface StemmerOverrideTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.StemmerOverrideTokenFilter\";\n /** A list of stemming rules in the following format: \"word => stem\", for example: \"ran => run\". */\n rules: string[];\n}\n\n/** Removes stop words from a token stream. This token filter is implemented using Apache Lucene. */\nexport interface StopwordsTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.StopwordsTokenFilter\";\n /** The list of stopwords. This property and the stopwords list property cannot both be set. */\n stopwords?: string[];\n /** A predefined list of stopwords to use. This property and the stopwords property cannot both be set. Default is English. */\n stopwordsList?: StopwordsList;\n /** A value indicating whether to ignore case. If true, all words are converted to lower case first. Default is false. */\n ignoreCase?: boolean;\n /** A value indicating whether to ignore the last search term if it's a stop word. Default is true. */\n removeTrailingStopWords?: boolean;\n}\n\n/** Matches single or multi-word synonyms in a token stream. This token filter is implemented using Apache Lucene. */\nexport interface SynonymTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.SynonymTokenFilter\";\n /** A list of synonyms in following one of two formats: 1. incredible, unbelievable, fabulous => amazing - all terms on the left side of => symbol will be replaced with all terms on its right side; 2. incredible, unbelievable, fabulous, amazing - comma separated list of equivalent words. Set the expand option to change how this list is interpreted. */\n synonyms: string[];\n /** A value indicating whether to case-fold input for matching. Default is false. */\n ignoreCase?: boolean;\n /** A value indicating whether all words in the list of synonyms (if => notation is not used) will map to one another. If true, all words in the list of synonyms (if => notation is not used) will map to one another. The following list: incredible, unbelievable, fabulous, amazing is equivalent to: incredible, unbelievable, fabulous, amazing => incredible, unbelievable, fabulous, amazing. If false, the following list: incredible, unbelievable, fabulous, amazing will be equivalent to: incredible, unbelievable, fabulous, amazing => incredible. Default is true. */\n expand?: boolean;\n}\n\n/** Truncates the terms to a specific length. This token filter is implemented using Apache Lucene. */\nexport interface TruncateTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.TruncateTokenFilter\";\n /** The length at which terms will be truncated. Default and maximum is 300. */\n length?: number;\n}\n\n/** Filters out tokens with same text as the previous token. This token filter is implemented using Apache Lucene. */\nexport interface UniqueTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.UniqueTokenFilter\";\n /** A value indicating whether to remove duplicates only at the same position. Default is false. */\n onlyOnSamePosition?: boolean;\n}\n\n/** Splits words into subwords and performs optional transformations on subword groups. This token filter is implemented using Apache Lucene. */\nexport interface WordDelimiterTokenFilter extends TokenFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.WordDelimiterTokenFilter\";\n /** A value indicating whether to generate part words. If set, causes parts of words to be generated; for example \"AzureSearch\" becomes \"Azure\" \"Search\". Default is true. */\n generateWordParts?: boolean;\n /** A value indicating whether to generate number subwords. Default is true. */\n generateNumberParts?: boolean;\n /** A value indicating whether maximum runs of word parts will be catenated. For example, if this is set to true, \"Azure-Search\" becomes \"AzureSearch\". Default is false. */\n catenateWords?: boolean;\n /** A value indicating whether maximum runs of number parts will be catenated. For example, if this is set to true, \"1-2\" becomes \"12\". Default is false. */\n catenateNumbers?: boolean;\n /** A value indicating whether all subword parts will be catenated. For example, if this is set to true, \"Azure-Search-1\" becomes \"AzureSearch1\". Default is false. */\n catenateAll?: boolean;\n /** A value indicating whether to split words on caseChange. For example, if this is set to true, \"AzureSearch\" becomes \"Azure\" \"Search\". Default is true. */\n splitOnCaseChange?: boolean;\n /** A value indicating whether original words will be preserved and added to the subword list. Default is false. */\n preserveOriginal?: boolean;\n /** A value indicating whether to split on numbers. For example, if this is set to true, \"Azure1Search\" becomes \"Azure\" \"1\" \"Search\". Default is true. */\n splitOnNumerics?: boolean;\n /** A value indicating whether to remove trailing \"'s\" for each subword. Default is true. */\n stemEnglishPossessive?: boolean;\n /** A list of tokens to protect from being delimited. */\n protectedWords?: string[];\n}\n\n/** A character filter that applies mappings defined with the mappings option. Matching is greedy (longest pattern matching at a given point wins). Replacement is allowed to be the empty string. This character filter is implemented using Apache Lucene. */\nexport interface MappingCharFilter extends CharFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.MappingCharFilter\";\n /** A list of mappings of the following format: \"a=>b\" (all occurrences of the character \"a\" will be replaced with character \"b\"). */\n mappings: string[];\n}\n\n/** A character filter that replaces characters in the input string. It uses a regular expression to identify character sequences to preserve and a replacement pattern to identify characters to replace. For example, given the input text \"aa bb aa bb\", pattern \"(aa)\\s+(bb)\", and replacement \"$1#$2\", the result would be \"aa#bb aa#bb\". This character filter is implemented using Apache Lucene. */\nexport interface PatternReplaceCharFilter extends CharFilter {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.PatternReplaceCharFilter\";\n /** A regular expression pattern. */\n pattern: string;\n /** The replacement text. */\n replacement: string;\n}\n\n/** Allows you to configure normalization for filterable, sortable, and facetable fields, which by default operate with strict matching. This is a user-defined configuration consisting of at least one or more filters, which modify the token that is stored. */\nexport interface CustomNormalizer extends LexicalNormalizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.CustomNormalizer\";\n /** A list of token filters used to filter out or modify the input token. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. */\n tokenFilters?: TokenFilterName[];\n /** A list of character filters used to prepare input text before it is processed. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. */\n charFilters?: CharFilterName[];\n}\n\n/** Legacy similarity algorithm which uses the Lucene TFIDFSimilarity implementation of TF-IDF. This variation of TF-IDF introduces static document length normalization as well as coordinating factors that penalize documents that only partially match the searched queries. */\nexport interface ClassicSimilarity extends Similarity {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.ClassicSimilarity\";\n}\n\n/** Ranking function based on the Okapi BM25 similarity algorithm. BM25 is a TF-IDF-like algorithm that includes length normalization (controlled by the 'b' parameter) as well as term frequency saturation (controlled by the 'k1' parameter). */\nexport interface BM25Similarity extends Similarity {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Azure.Search.BM25Similarity\";\n /** This property controls the scaling function between the term frequency of each matching terms and the final relevance score of a document-query pair. By default, a value of 1.2 is used. A value of 0.0 means the score does not scale with an increase in term frequency. */\n k1?: number;\n /** This property controls how the length of a document affects the relevance score. By default, a value of 0.75 is used. A value of 0.0 means no length normalization is applied, while a value of 1.0 means the score is fully normalized by the length of the document. */\n b?: number;\n}\n\n/** Contains configuration options specific to the HNSW approximate nearest neighbors algorithm used during indexing and querying. The HNSW algorithm offers a tunable trade-off between search speed and accuracy. */\nexport interface HnswAlgorithmConfiguration\n extends VectorSearchAlgorithmConfiguration {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"hnsw\";\n /** Contains the parameters specific to HNSW algorithm. */\n parameters?: HnswParameters;\n}\n\n/** Contains configuration options specific to the exhaustive KNN algorithm used during querying, which will perform brute-force search across the entire vector index. */\nexport interface ExhaustiveKnnAlgorithmConfiguration\n extends VectorSearchAlgorithmConfiguration {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"exhaustiveKnn\";\n /** Contains the parameters specific to exhaustive KNN algorithm. */\n parameters?: ExhaustiveKnnParameters;\n}\n\n/** Specifies the Azure OpenAI resource used to vectorize a query string. */\nexport interface AzureOpenAIVectorizer extends VectorSearchVectorizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"azureOpenAI\";\n /** Contains the parameters specific to Azure OpenAI embedding vectorization. */\n parameters?: AzureOpenAIParameters;\n}\n\n/** Specifies a user-defined vectorizer for generating the vector embedding of a query string. Integration of an external vectorizer is achieved using the custom Web API interface of a skillset. */\nexport interface WebApiVectorizer extends VectorSearchVectorizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"customWebApi\";\n /** Specifies the properties of the user-defined vectorizer. */\n parameters?: WebApiParameters;\n}\n\n/** Specifies the AI Services Vision parameters for vectorizing a query image or text. */\nexport interface AIServicesVisionVectorizer extends VectorSearchVectorizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"aiServicesVision\";\n /** Contains the parameters specific to AI Services Vision embedding vectorization. */\n aIServicesVisionParameters?: AIServicesVisionParameters;\n}\n\n/** Specifies an Azure Machine Learning endpoint deployed via the Azure AI Foundry Model Catalog for generating the vector embedding of a query string. */\nexport interface AMLVectorizer extends VectorSearchVectorizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"aml\";\n /** Specifies the properties of the AML vectorizer. */\n aMLParameters?: AMLParameters;\n}\n\n/** Contains configuration options specific to the scalar quantization compression method used during indexing and querying. */\nexport interface ScalarQuantizationCompression extends VectorSearchCompression {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"scalarQuantization\";\n /** Contains the parameters specific to Scalar Quantization. */\n parameters?: ScalarQuantizationParameters;\n}\n\n/** Contains configuration options specific to the binary quantization compression method used during indexing and querying. */\nexport interface BinaryQuantizationCompression extends VectorSearchCompression {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"binaryQuantization\";\n}\n\n/** Specifies the Azure OpenAI resource used to vectorize a query string. */\nexport interface KnowledgeSourceAzureOpenAIVectorizer\n extends KnowledgeSourceVectorizer {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n kind: \"azureOpenAI\";\n /** Contains the parameters specific to Azure OpenAI embedding vectorization. */\n azureOpenAIParameters?: AzureOpenAIParameters;\n}\n\n/** A skill that calls a language model via Azure AI Foundry's Chat Completions endpoint. */\nexport interface ChatCompletionSkill extends WebApiSkill {\n /** Polymorphic discriminator, which specifies the different types this object can be */\n odatatype: \"#Microsoft.Skills.Custom.ChatCompletionSkill\";\n /** API key for authenticating to the model. Both apiKey and authIdentity cannot be specified at the same time. */\n apiKey?: string;\n /** Common language model parameters that customers can tweak. If omitted, reasonable defaults will be applied. */\n commonModelParameters?: CommonModelParameters;\n /** Open-type dictionary for model-specific parameters that should be appended to the chat completions call. Follows Azure AI Foundry’s extensibility pattern. */\n extraParameters?: { [propertyName: string]: any };\n /** How extra parameters are handled by Azure AI Foundry. Default is 'error'. */\n extraParametersBehavior?: ChatCompletionExtraParametersBehavior;\n /** Determines how the LLM should format its response. Defaults to 'text' response type. */\n responseFormat?: ChatCompletionResponseFormat;\n}\n\n/** Projection definition for what data to store in Azure Blob. */\nexport interface SearchIndexerKnowledgeStoreObjectProjectionSelector\n extends SearchIndexerKnowledgeStoreBlobProjectionSelector {}\n\n/** Projection definition for what data to store in Azure Files. */\nexport interface SearchIndexerKnowledgeStoreFileProjectionSelector\n extends SearchIndexerKnowledgeStoreBlobProjectionSelector {}\n\n/** Known values of {@link ApiVersion20251101Preview} that the service accepts. */\nexport enum KnownApiVersion20251101Preview {\n /** Api Version '2025-11-01-preview' */\n TwoThousandTwentyFive1101Preview = \"2025-11-01-preview\",\n}\n\n/**\n * Defines values for ApiVersion20251101Preview. \\\n * {@link KnownApiVersion20251101Preview} can be used interchangeably with ApiVersion20251101Preview,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **2025-11-01-preview**: Api Version '2025-11-01-preview'\n */\nexport type ApiVersion20251101Preview = string;\n\n/** Known values of {@link KnowledgeBaseModelKind} that the service accepts. */\nexport enum KnownKnowledgeBaseModelKind {\n /** Use Azure Open AI models for query planning. */\n AzureOpenAI = \"azureOpenAI\",\n}\n\n/**\n * Defines values for KnowledgeBaseModelKind. \\\n * {@link KnownKnowledgeBaseModelKind} can be used interchangeably with KnowledgeBaseModelKind,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **azureOpenAI**: Use Azure Open AI models for query planning.\n */\nexport type KnowledgeBaseModelKind = string;\n\n/** Known values of {@link KnowledgeRetrievalReasoningEffortKind} that the service accepts. */\nexport enum KnownKnowledgeRetrievalReasoningEffortKind {\n /** Does not perform any source selections, any query planning, or any iterative search. */\n Minimal = \"minimal\",\n /** Use low reasoning during retrieval. */\n Low = \"low\",\n /** Use a moderate amount of reasoning during retrieval. */\n Medium = \"medium\",\n}\n\n/**\n * Defines values for KnowledgeRetrievalReasoningEffortKind. \\\n * {@link KnownKnowledgeRetrievalReasoningEffortKind} can be used interchangeably with KnowledgeRetrievalReasoningEffortKind,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **minimal**: Does not perform any source selections, any query planning, or any iterative search. \\\n * **low**: Use low reasoning during retrieval. \\\n * **medium**: Use a moderate amount of reasoning during retrieval.\n */\nexport type KnowledgeRetrievalReasoningEffortKind = string;\n\n/** Known values of {@link KnowledgeRetrievalOutputMode} that the service accepts. */\nexport enum KnownKnowledgeRetrievalOutputMode {\n /** Return data from the knowledge sources directly without generative alteration. */\n ExtractiveData = \"extractiveData\",\n /** Synthesize an answer for the response payload. */\n AnswerSynthesis = \"answerSynthesis\",\n}\n\n/**\n * Defines values for KnowledgeRetrievalOutputMode. \\\n * {@link KnownKnowledgeRetrievalOutputMode} can be used interchangeably with KnowledgeRetrievalOutputMode,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **extractiveData**: Return data from the knowledge sources directly without generative alteration. \\\n * **answerSynthesis**: Synthesize an answer for the response payload.\n */\nexport type KnowledgeRetrievalOutputMode = string;\n\n/** Known values of {@link KnowledgeSourceKind} that the service accepts. */\nexport enum KnownKnowledgeSourceKind {\n /** A knowledge source that retrieves data from a Search Index. */\n SearchIndex = \"searchIndex\",\n /** A knowledge source that retrieves and ingests data from Azure Blob Storage to a Search Index. */\n AzureBlob = \"azureBlob\",\n /** A knowledge source that retrieves data from the web. */\n Web = \"web\",\n /** A knowledge source that retrieves data from a remote SharePoint endpoint. */\n RemoteSharePoint = \"remoteSharePoint\",\n /** A knowledge source that retrieves and ingests data from SharePoint to a Search Index. */\n IndexedSharePoint = \"indexedSharePoint\",\n /** A knowledge source that retrieves and ingests data from OneLake to a Search Index. */\n IndexedOneLake = \"indexedOneLake\",\n}\n\n/**\n * Defines values for KnowledgeSourceKind. \\\n * {@link KnownKnowledgeSourceKind} can be used interchangeably with KnowledgeSourceKind,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **searchIndex**: A knowledge source that retrieves data from a Search Index. \\\n * **azureBlob**: A knowledge source that retrieves and ingests data from Azure Blob Storage to a Search Index. \\\n * **web**: A knowledge source that retrieves data from the web. \\\n * **remoteSharePoint**: A knowledge source that retrieves data from a remote SharePoint endpoint. \\\n * **indexedSharePoint**: A knowledge source that retrieves and ingests data from SharePoint to a Search Index. \\\n * **indexedOneLake**: A knowledge source that retrieves and ingests data from OneLake to a Search Index.\n */\nexport type KnowledgeSourceKind = string;\n\n/** Known values of {@link KnowledgeSourceSynchronizationStatus} that the service accepts. */\nexport enum KnownKnowledgeSourceSynchronizationStatus {\n /** The knowledge source is being provisioned. */\n Creating = \"creating\",\n /** The knowledge source is active and synchronization runs are occurring. */\n Active = \"active\",\n /** The knowledge source is being deleted and synchronization is paused. */\n Deleting = \"deleting\",\n}\n\n/**\n * Defines values for KnowledgeSourceSynchronizationStatus. \\\n * {@link KnownKnowledgeSourceSynchronizationStatus} can be used interchangeably with KnowledgeSourceSynchronizationStatus,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **creating**: The knowledge source is being provisioned. \\\n * **active**: The knowledge source is active and synchronization runs are occurring. \\\n * **deleting**: The knowledge source is being deleted and synchronization is paused.\n */\nexport type KnowledgeSourceSynchronizationStatus = string;\n\n/** Known values of {@link SearchIndexerDataSourceType} that the service accepts. */\nexport enum KnownSearchIndexerDataSourceType {\n /** Indicates an Azure SQL datasource. */\n AzureSql = \"azuresql\",\n /** Indicates a CosmosDB datasource. */\n CosmosDb = \"cosmosdb\",\n /** Indicates an Azure Blob datasource. */\n AzureBlob = \"azureblob\",\n /** Indicates an Azure Table datasource. */\n AzureTable = \"azuretable\",\n /** Indicates a MySql datasource. */\n MySql = \"mysql\",\n /** Indicates an ADLS Gen2 datasource. */\n AdlsGen2 = \"adlsgen2\",\n /** Indicates a Microsoft Fabric OneLake datasource. */\n OneLake = \"onelake\",\n /** Indicates a SharePoint datasource. */\n SharePoint = \"sharepoint\",\n}\n\n/**\n * Defines values for SearchIndexerDataSourceType. \\\n * {@link KnownSearchIndexerDataSourceType} can be used interchangeably with SearchIndexerDataSourceType,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **azuresql**: Indicates an Azure SQL datasource. \\\n * **cosmosdb**: Indicates a CosmosDB datasource. \\\n * **azureblob**: Indicates an Azure Blob datasource. \\\n * **azuretable**: Indicates an Azure Table datasource. \\\n * **mysql**: Indicates a MySql datasource. \\\n * **adlsgen2**: Indicates an ADLS Gen2 datasource. \\\n * **onelake**: Indicates a Microsoft Fabric OneLake datasource. \\\n * **sharepoint**: Indicates a SharePoint datasource.\n */\nexport type SearchIndexerDataSourceType = string;\n\n/** Known values of {@link IndexerPermissionOption} that the service accepts. */\nexport enum KnownIndexerPermissionOption {\n /** Indexer to ingest ACL userIds from data source to index. */\n UserIds = \"userIds\",\n /** Indexer to ingest ACL groupIds from data source to index. */\n GroupIds = \"groupIds\",\n /** Indexer to ingest Azure RBAC scope from data source to index. */\n RbacScope = \"rbacScope\",\n}\n\n/**\n * Defines values for IndexerPermissionOption. \\\n * {@link KnownIndexerPermissionOption} can be used interchangeably with IndexerPermissionOption,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **userIds**: Indexer to ingest ACL userIds from data source to index. \\\n * **groupIds**: Indexer to ingest ACL groupIds from data source to index. \\\n * **rbacScope**: Indexer to ingest Azure RBAC scope from data source to index.\n */\nexport type IndexerPermissionOption = string;\n\n/** Known values of {@link IndexerResyncOption} that the service accepts. */\nexport enum KnownIndexerResyncOption {\n /** Indexer to re-ingest pre-selected permissions data from data source to index. */\n Permissions = \"permissions\",\n}\n\n/**\n * Defines values for IndexerResyncOption. \\\n * {@link KnownIndexerResyncOption} can be used interchangeably with IndexerResyncOption,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **permissions**: Indexer to re-ingest pre-selected permissions data from data source to index.\n */\nexport type IndexerResyncOption = string;\n\n/** Known values of {@link BlobIndexerParsingMode} that the service accepts. */\nexport enum KnownBlobIndexerParsingMode {\n /** Set to default for normal file processing. */\n Default = \"default\",\n /** Set to text to improve indexing performance on plain text files in blob storage. */\n Text = \"text\",\n /** Set to delimitedText when blobs are plain CSV files. */\n DelimitedText = \"delimitedText\",\n /** Set to json to extract structured content from JSON files. */\n Json = \"json\",\n /** Set to jsonArray to extract individual elements of a JSON array as separate documents. */\n JsonArray = \"jsonArray\",\n /** Set to jsonLines to extract individual JSON entities, separated by a new line, as separate documents. */\n JsonLines = \"jsonLines\",\n /** Set to markdown to extract content from markdown files. */\n Markdown = \"markdown\",\n}\n\n/**\n * Defines values for BlobIndexerParsingMode. \\\n * {@link KnownBlobIndexerParsingMode} can be used interchangeably with BlobIndexerParsingMode,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **default**: Set to default for normal file processing. \\\n * **text**: Set to text to improve indexing performance on plain text files in blob storage. \\\n * **delimitedText**: Set to delimitedText when blobs are plain CSV files. \\\n * **json**: Set to json to extract structured content from JSON files. \\\n * **jsonArray**: Set to jsonArray to extract individual elements of a JSON array as separate documents. \\\n * **jsonLines**: Set to jsonLines to extract individual JSON entities, separated by a new line, as separate documents. \\\n * **markdown**: Set to markdown to extract content from markdown files.\n */\nexport type BlobIndexerParsingMode = string;\n\n/** Known values of {@link MarkdownParsingSubmode} that the service accepts. */\nexport enum KnownMarkdownParsingSubmode {\n /** Indicates that each section of the markdown file (up to a specified depth) will be parsed into individual search documents. This can result in a single markdown file producing multiple search documents. This is the default sub-mode. */\n OneToMany = \"oneToMany\",\n /** Indicates that each markdown file will be parsed into a single search document. */\n OneToOne = \"oneToOne\",\n}\n\n/**\n * Defines values for MarkdownParsingSubmode. \\\n * {@link KnownMarkdownParsingSubmode} can be used interchangeably with MarkdownParsingSubmode,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **oneToMany**: Indicates that each section of the markdown file (up to a specified depth) will be parsed into individual search documents. This can result in a single markdown file producing multiple search documents. This is the default sub-mode. \\\n * **oneToOne**: Indicates that each markdown file will be parsed into a single search document.\n */\nexport type MarkdownParsingSubmode = string;\n\n/** Known values of {@link MarkdownHeaderDepth} that the service accepts. */\nexport enum KnownMarkdownHeaderDepth {\n /** Indicates that headers up to a level of h1 will be considered while grouping markdown content. */\n H1 = \"h1\",\n /** Indicates that headers up to a level of h2 will be considered while grouping markdown content. */\n H2 = \"h2\",\n /** Indicates that headers up to a level of h3 will be considered while grouping markdown content. */\n H3 = \"h3\",\n /** Indicates that headers up to a level of h4 will be considered while grouping markdown content. */\n H4 = \"h4\",\n /** Indicates that headers up to a level of h5 will be considered while grouping markdown content. */\n H5 = \"h5\",\n /** Indicates that headers up to a level of h6 will be considered while grouping markdown content. This is the default. */\n H6 = \"h6\",\n}\n\n/**\n * Defines values for MarkdownHeaderDepth. \\\n * {@link KnownMarkdownHeaderDepth} can be used interchangeably with MarkdownHeaderDepth,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **h1**: Indicates that headers up to a level of h1 will be considered while grouping markdown content. \\\n * **h2**: Indicates that headers up to a level of h2 will be considered while grouping markdown content. \\\n * **h3**: Indicates that headers up to a level of h3 will be considered while grouping markdown content. \\\n * **h4**: Indicates that headers up to a level of h4 will be considered while grouping markdown content. \\\n * **h5**: Indicates that headers up to a level of h5 will be considered while grouping markdown content. \\\n * **h6**: Indicates that headers up to a level of h6 will be considered while grouping markdown content. This is the default.\n */\nexport type MarkdownHeaderDepth = string;\n\n/** Known values of {@link BlobIndexerDataToExtract} that the service accepts. */\nexport enum KnownBlobIndexerDataToExtract {\n /** Indexes just the standard blob properties and user-specified metadata. */\n StorageMetadata = \"storageMetadata\",\n /** Extracts metadata provided by the Azure blob storage subsystem and the content-type specific metadata (for example, metadata unique to just .png files are indexed). */\n AllMetadata = \"allMetadata\",\n /** Extracts all metadata and textual content from each blob. */\n ContentAndMetadata = \"contentAndMetadata\",\n}\n\n/**\n * Defines values for BlobIndexerDataToExtract. \\\n * {@link KnownBlobIndexerDataToExtract} can be used interchangeably with BlobIndexerDataToExtract,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **storageMetadata**: Indexes just the standard blob properties and user-specified metadata. \\\n * **allMetadata**: Extracts metadata provided by the Azure blob storage subsystem and the content-type specific metadata (for example, metadata unique to just .png files are indexed). \\\n * **contentAndMetadata**: Extracts all metadata and textual content from each blob.\n */\nexport type BlobIndexerDataToExtract = string;\n\n/** Known values of {@link BlobIndexerImageAction} that the service accepts. */\nexport enum KnownBlobIndexerImageAction {\n /** Ignores embedded images or image files in the data set. This is the default. */\n None = \"none\",\n /** Extracts text from images (for example, the word \"STOP\" from a traffic stop sign), and embeds it into the content field. This action requires that \"dataToExtract\" is set to \"contentAndMetadata\". A normalized image refers to additional processing resulting in uniform image output, sized and rotated to promote consistent rendering when you include images in visual search results. This information is generated for each image when you use this option. */\n GenerateNormalizedImages = \"generateNormalizedImages\",\n /** Extracts text from images (for example, the word \"STOP\" from a traffic stop sign), and embeds it into the content field, but treats PDF files differently in that each page will be rendered as an image and normalized accordingly, instead of extracting embedded images. Non-PDF file types will be treated the same as if \"generateNormalizedImages\" was set. */\n GenerateNormalizedImagePerPage = \"generateNormalizedImagePerPage\",\n}\n\n/**\n * Defines values for BlobIndexerImageAction. \\\n * {@link KnownBlobIndexerImageAction} can be used interchangeably with BlobIndexerImageAction,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **none**: Ignores embedded images or image files in the data set. This is the default. \\\n * **generateNormalizedImages**: Extracts text from images (for example, the word \"STOP\" from a traffic stop sign), and embeds it into the content field. This action requires that \"dataToExtract\" is set to \"contentAndMetadata\". A normalized image refers to additional processing resulting in uniform image output, sized and rotated to promote consistent rendering when you include images in visual search results. This information is generated for each image when you use this option. \\\n * **generateNormalizedImagePerPage**: Extracts text from images (for example, the word \"STOP\" from a traffic stop sign), and embeds it into the content field, but treats PDF files differently in that each page will be rendered as an image and normalized accordingly, instead of extracting embedded images. Non-PDF file types will be treated the same as if \"generateNormalizedImages\" was set.\n */\nexport type BlobIndexerImageAction = string;\n\n/** Known values of {@link BlobIndexerPDFTextRotationAlgorithm} that the service accepts. */\nexport enum KnownBlobIndexerPDFTextRotationAlgorithm {\n /** Leverages normal text extraction. This is the default. */\n None = \"none\",\n /** May produce better and more readable text extraction from PDF files that have rotated text within them. Note that there may be a small performance speed impact when this parameter is used. This parameter only applies to PDF files, and only to PDFs with embedded text. If the rotated text appears within an embedded image in the PDF, this parameter does not apply. */\n DetectAngles = \"detectAngles\",\n}\n\n/**\n * Defines values for BlobIndexerPDFTextRotationAlgorithm. \\\n * {@link KnownBlobIndexerPDFTextRotationAlgorithm} can be used interchangeably with BlobIndexerPDFTextRotationAlgorithm,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **none**: Leverages normal text extraction. This is the default. \\\n * **detectAngles**: May produce better and more readable text extraction from PDF files that have rotated text within them. Note that there may be a small performance speed impact when this parameter is used. This parameter only applies to PDF files, and only to PDFs with embedded text. If the rotated text appears within an embedded image in the PDF, this parameter does not apply.\n */\nexport type BlobIndexerPDFTextRotationAlgorithm = string;\n\n/** Known values of {@link IndexerExecutionEnvironment} that the service accepts. */\nexport enum KnownIndexerExecutionEnvironment {\n /** Indicates that the search service can determine where the indexer should execute. This is the default environment when nothing is specified and is the recommended value. */\n Standard = \"standard\",\n /** Indicates that the indexer should run with the environment provisioned specifically for the search service. This should only be specified as the execution environment if the indexer needs to access resources securely over shared private link resources. */\n Private = \"private\",\n}\n\n/**\n * Defines values for IndexerExecutionEnvironment. \\\n * {@link KnownIndexerExecutionEnvironment} can be used interchangeably with IndexerExecutionEnvironment,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **standard**: Indicates that the search service can determine where the indexer should execute. This is the default environment when nothing is specified and is the recommended value. \\\n * **private**: Indicates that the indexer should run with the environment provisioned specifically for the search service. This should only be specified as the execution environment if the indexer needs to access resources securely over shared private link resources.\n */\nexport type IndexerExecutionEnvironment = string;\n\n/** Known values of {@link IndexerExecutionStatusDetail} that the service accepts. */\nexport enum KnownIndexerExecutionStatusDetail {\n /** Indicates that the reset that occurred was for a call to ResetDocs. */\n ResetDocs = \"resetDocs\",\n /** Indicates to selectively resync based on option(s) from data source. */\n Resync = \"resync\",\n}\n\n/**\n * Defines values for IndexerExecutionStatusDetail. \\\n * {@link KnownIndexerExecutionStatusDetail} can be used interchangeably with IndexerExecutionStatusDetail,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **resetDocs**: Indicates that the reset that occurred was for a call to ResetDocs. \\\n * **resync**: Indicates to selectively resync based on option(s) from data source.\n */\nexport type IndexerExecutionStatusDetail = string;\n\n/** Known values of {@link IndexingMode} that the service accepts. */\nexport enum KnownIndexingMode {\n /** The indexer is indexing all documents in the datasource. */\n IndexingAllDocs = \"indexingAllDocs\",\n /** The indexer is indexing selective, reset documents in the datasource. The documents being indexed are defined on indexer status. */\n IndexingResetDocs = \"indexingResetDocs\",\n /** The indexer is resyncing and indexing selective option(s) from the datasource. */\n IndexingResync = \"indexingResync\",\n}\n\n/**\n * Defines values for IndexingMode. \\\n * {@link KnownIndexingMode} can be used interchangeably with IndexingMode,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **indexingAllDocs**: The indexer is indexing all documents in the datasource. \\\n * **indexingResetDocs**: The indexer is indexing selective, reset documents in the datasource. The documents being indexed are defined on indexer status. \\\n * **indexingResync**: The indexer is resyncing and indexing selective option(s) from the datasource.\n */\nexport type IndexingMode = string;\n\n/** Known values of {@link IndexProjectionMode} that the service accepts. */\nexport enum KnownIndexProjectionMode {\n /** The source document will be skipped from writing into the indexer's target index. */\n SkipIndexingParentDocuments = \"skipIndexingParentDocuments\",\n /** The source document will be written into the indexer's target index. This is the default pattern. */\n IncludeIndexingParentDocuments = \"includeIndexingParentDocuments\",\n}\n\n/**\n * Defines values for IndexProjectionMode. \\\n * {@link KnownIndexProjectionMode} can be used interchangeably with IndexProjectionMode,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **skipIndexingParentDocuments**: The source document will be skipped from writing into the indexer's target index. \\\n * **includeIndexingParentDocuments**: The source document will be written into the indexer's target index. This is the default pattern.\n */\nexport type IndexProjectionMode = string;\n\n/** Known values of {@link SearchFieldDataType} that the service accepts. */\nexport enum KnownSearchFieldDataType {\n /** Indicates that a field contains a string. */\n String = \"Edm.String\",\n /** Indicates that a field contains a 32-bit signed integer. */\n Int32 = \"Edm.Int32\",\n /** Indicates that a field contains a 64-bit signed integer. */\n Int64 = \"Edm.Int64\",\n /** Indicates that a field contains an IEEE double-precision floating point number. */\n Double = \"Edm.Double\",\n /** Indicates that a field contains a Boolean value (true or false). */\n Boolean = \"Edm.Boolean\",\n /** Indicates that a field contains a date\\/time value, including timezone information. */\n DateTimeOffset = \"Edm.DateTimeOffset\",\n /** Indicates that a field contains a geo-location in terms of longitude and latitude. */\n GeographyPoint = \"Edm.GeographyPoint\",\n /** Indicates that a field contains one or more complex objects that in turn have sub-fields of other types. */\n Complex = \"Edm.ComplexType\",\n /** Indicates that a field contains a single-precision floating point number. This is only valid when used with Collection(Edm.Single). */\n Single = \"Edm.Single\",\n /** Indicates that a field contains a half-precision floating point number. This is only valid when used with Collection(Edm.Half). */\n Half = \"Edm.Half\",\n /** Indicates that a field contains a 16-bit signed integer. This is only valid when used with Collection(Edm.Int16). */\n Int16 = \"Edm.Int16\",\n /** Indicates that a field contains a 8-bit signed integer. This is only valid when used with Collection(Edm.SByte). */\n SByte = \"Edm.SByte\",\n /** Indicates that a field contains a 8-bit unsigned integer. This is only valid when used with Collection(Edm.Byte). */\n Byte = \"Edm.Byte\",\n}\n\n/**\n * Defines values for SearchFieldDataType. \\\n * {@link KnownSearchFieldDataType} can be used interchangeably with SearchFieldDataType,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **Edm.String**: Indicates that a field contains a string. \\\n * **Edm.Int32**: Indicates that a field contains a 32-bit signed integer. \\\n * **Edm.Int64**: Indicates that a field contains a 64-bit signed integer. \\\n * **Edm.Double**: Indicates that a field contains an IEEE double-precision floating point number. \\\n * **Edm.Boolean**: Indicates that a field contains a Boolean value (true or false). \\\n * **Edm.DateTimeOffset**: Indicates that a field contains a date\\/time value, including timezone information. \\\n * **Edm.GeographyPoint**: Indicates that a field contains a geo-location in terms of longitude and latitude. \\\n * **Edm.ComplexType**: Indicates that a field contains one or more complex objects that in turn have sub-fields of other types. \\\n * **Edm.Single**: Indicates that a field contains a single-precision floating point number. This is only valid when used with Collection(Edm.Single). \\\n * **Edm.Half**: Indicates that a field contains a half-precision floating point number. This is only valid when used with Collection(Edm.Half). \\\n * **Edm.Int16**: Indicates that a field contains a 16-bit signed integer. This is only valid when used with Collection(Edm.Int16). \\\n * **Edm.SByte**: Indicates that a field contains a 8-bit signed integer. This is only valid when used with Collection(Edm.SByte). \\\n * **Edm.Byte**: Indicates that a field contains a 8-bit unsigned integer. This is only valid when used with Collection(Edm.Byte).\n */\nexport type SearchFieldDataType = string;\n\n/** Known values of {@link PermissionFilter} that the service accepts. */\nexport enum KnownPermissionFilter {\n /** Field represents user IDs that should be used to filter document access on queries. */\n UserIds = \"userIds\",\n /** Field represents group IDs that should be used to filter document access on queries. */\n GroupIds = \"groupIds\",\n /** Field represents an RBAC scope that should be used to filter document access on queries. */\n RbacScope = \"rbacScope\",\n}\n\n/**\n * Defines values for PermissionFilter. \\\n * {@link KnownPermissionFilter} can be used interchangeably with PermissionFilter,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **userIds**: Field represents user IDs that should be used to filter document access on queries. \\\n * **groupIds**: Field represents group IDs that should be used to filter document access on queries. \\\n * **rbacScope**: Field represents an RBAC scope that should be used to filter document access on queries.\n */\nexport type PermissionFilter = string;\n\n/** Known values of {@link LexicalAnalyzerName} that the service accepts. */\nexport enum KnownLexicalAnalyzerName {\n /** Microsoft analyzer for Arabic. */\n ArMicrosoft = \"ar.microsoft\",\n /** Lucene analyzer for Arabic. */\n ArLucene = \"ar.lucene\",\n /** Lucene analyzer for Armenian. */\n HyLucene = \"hy.lucene\",\n /** Microsoft analyzer for Bangla. */\n BnMicrosoft = \"bn.microsoft\",\n /** Lucene analyzer for Basque. */\n EuLucene = \"eu.lucene\",\n /** Microsoft analyzer for Bulgarian. */\n BgMicrosoft = \"bg.microsoft\",\n /** Lucene analyzer for Bulgarian. */\n BgLucene = \"bg.lucene\",\n /** Microsoft analyzer for Catalan. */\n CaMicrosoft = \"ca.microsoft\",\n /** Lucene analyzer for Catalan. */\n CaLucene = \"ca.lucene\",\n /** Microsoft analyzer for Chinese (Simplified). */\n ZhHansMicrosoft = \"zh-Hans.microsoft\",\n /** Lucene analyzer for Chinese (Simplified). */\n ZhHansLucene = \"zh-Hans.lucene\",\n /** Microsoft analyzer for Chinese (Traditional). */\n ZhHantMicrosoft = \"zh-Hant.microsoft\",\n /** Lucene analyzer for Chinese (Traditional). */\n ZhHantLucene = \"zh-Hant.lucene\",\n /** Microsoft analyzer for Croatian. */\n HrMicrosoft = \"hr.microsoft\",\n /** Microsoft analyzer for Czech. */\n CsMicrosoft = \"cs.microsoft\",\n /** Lucene analyzer for Czech. */\n CsLucene = \"cs.lucene\",\n /** Microsoft analyzer for Danish. */\n DaMicrosoft = \"da.microsoft\",\n /** Lucene analyzer for Danish. */\n DaLucene = \"da.lucene\",\n /** Microsoft analyzer for Dutch. */\n NlMicrosoft = \"nl.microsoft\",\n /** Lucene analyzer for Dutch. */\n NlLucene = \"nl.lucene\",\n /** Microsoft analyzer for English. */\n EnMicrosoft = \"en.microsoft\",\n /** Lucene analyzer for English. */\n EnLucene = \"en.lucene\",\n /** Microsoft analyzer for Estonian. */\n EtMicrosoft = \"et.microsoft\",\n /** Microsoft analyzer for Finnish. */\n FiMicrosoft = \"fi.microsoft\",\n /** Lucene analyzer for Finnish. */\n FiLucene = \"fi.lucene\",\n /** Microsoft analyzer for French. */\n FrMicrosoft = \"fr.microsoft\",\n /** Lucene analyzer for French. */\n FrLucene = \"fr.lucene\",\n /** Lucene analyzer for Galician. */\n GlLucene = \"gl.lucene\",\n /** Microsoft analyzer for German. */\n DeMicrosoft = \"de.microsoft\",\n /** Lucene analyzer for German. */\n DeLucene = \"de.lucene\",\n /** Microsoft analyzer for Greek. */\n ElMicrosoft = \"el.microsoft\",\n /** Lucene analyzer for Greek. */\n ElLucene = \"el.lucene\",\n /** Microsoft analyzer for Gujarati. */\n GuMicrosoft = \"gu.microsoft\",\n /** Microsoft analyzer for Hebrew. */\n HeMicrosoft = \"he.microsoft\",\n /** Microsoft analyzer for Hindi. */\n HiMicrosoft = \"hi.microsoft\",\n /** Lucene analyzer for Hindi. */\n HiLucene = \"hi.lucene\",\n /** Microsoft analyzer for Hungarian. */\n HuMicrosoft = \"hu.microsoft\",\n /** Lucene analyzer for Hungarian. */\n HuLucene = \"hu.lucene\",\n /** Microsoft analyzer for Icelandic. */\n IsMicrosoft = \"is.microsoft\",\n /** Microsoft analyzer for Indonesian (Bahasa). */\n IdMicrosoft = \"id.microsoft\",\n /** Lucene analyzer for Indonesian. */\n IdLucene = \"id.lucene\",\n /** Lucene analyzer for Irish. */\n GaLucene = \"ga.lucene\",\n /** Microsoft analyzer for Italian. */\n ItMicrosoft = \"it.microsoft\",\n /** Lucene analyzer for Italian. */\n ItLucene = \"it.lucene\",\n /** Microsoft analyzer for Japanese. */\n JaMicrosoft = \"ja.microsoft\",\n /** Lucene analyzer for Japanese. */\n JaLucene = \"ja.lucene\",\n /** Microsoft analyzer for Kannada. */\n KnMicrosoft = \"kn.microsoft\",\n /** Microsoft analyzer for Korean. */\n KoMicrosoft = \"ko.microsoft\",\n /** Lucene analyzer for Korean. */\n KoLucene = \"ko.lucene\",\n /** Microsoft analyzer for Latvian. */\n LvMicrosoft = \"lv.microsoft\",\n /** Lucene analyzer for Latvian. */\n LvLucene = \"lv.lucene\",\n /** Microsoft analyzer for Lithuanian. */\n LtMicrosoft = \"lt.microsoft\",\n /** Microsoft analyzer for Malayalam. */\n MlMicrosoft = \"ml.microsoft\",\n /** Microsoft analyzer for Malay (Latin). */\n MsMicrosoft = \"ms.microsoft\",\n /** Microsoft analyzer for Marathi. */\n MrMicrosoft = \"mr.microsoft\",\n /** Microsoft analyzer for Norwegian (Bokmål). */\n NbMicrosoft = \"nb.microsoft\",\n /** Lucene analyzer for Norwegian. */\n NoLucene = \"no.lucene\",\n /** Lucene analyzer for Persian. */\n FaLucene = \"fa.lucene\",\n /** Microsoft analyzer for Polish. */\n PlMicrosoft = \"pl.microsoft\",\n /** Lucene analyzer for Polish. */\n PlLucene = \"pl.lucene\",\n /** Microsoft analyzer for Portuguese (Brazil). */\n PtBrMicrosoft = \"pt-BR.microsoft\",\n /** Lucene analyzer for Portuguese (Brazil). */\n PtBrLucene = \"pt-BR.lucene\",\n /** Microsoft analyzer for Portuguese (Portugal). */\n PtPtMicrosoft = \"pt-PT.microsoft\",\n /** Lucene analyzer for Portuguese (Portugal). */\n PtPtLucene = \"pt-PT.lucene\",\n /** Microsoft analyzer for Punjabi. */\n PaMicrosoft = \"pa.microsoft\",\n /** Microsoft analyzer for Romanian. */\n RoMicrosoft = \"ro.microsoft\",\n /** Lucene analyzer for Romanian. */\n RoLucene = \"ro.lucene\",\n /** Microsoft analyzer for Russian. */\n RuMicrosoft = \"ru.microsoft\",\n /** Lucene analyzer for Russian. */\n RuLucene = \"ru.lucene\",\n /** Microsoft analyzer for Serbian (Cyrillic). */\n SrCyrillicMicrosoft = \"sr-cyrillic.microsoft\",\n /** Microsoft analyzer for Serbian (Latin). */\n SrLatinMicrosoft = \"sr-latin.microsoft\",\n /** Microsoft analyzer for Slovak. */\n SkMicrosoft = \"sk.microsoft\",\n /** Microsoft analyzer for Slovenian. */\n SlMicrosoft = \"sl.microsoft\",\n /** Microsoft analyzer for Spanish. */\n EsMicrosoft = \"es.microsoft\",\n /** Lucene analyzer for Spanish. */\n EsLucene = \"es.lucene\",\n /** Microsoft analyzer for Swedish. */\n SvMicrosoft = \"sv.microsoft\",\n /** Lucene analyzer for Swedish. */\n SvLucene = \"sv.lucene\",\n /** Microsoft analyzer for Tamil. */\n TaMicrosoft = \"ta.microsoft\",\n /** Microsoft analyzer for Telugu. */\n TeMicrosoft = \"te.microsoft\",\n /** Microsoft analyzer for Thai. */\n ThMicrosoft = \"th.microsoft\",\n /** Lucene analyzer for Thai. */\n ThLucene = \"th.lucene\",\n /** Microsoft analyzer for Turkish. */\n TrMicrosoft = \"tr.microsoft\",\n /** Lucene analyzer for Turkish. */\n TrLucene = \"tr.lucene\",\n /** Microsoft analyzer for Ukrainian. */\n UkMicrosoft = \"uk.microsoft\",\n /** Microsoft analyzer for Urdu. */\n UrMicrosoft = \"ur.microsoft\",\n /** Microsoft analyzer for Vietnamese. */\n ViMicrosoft = \"vi.microsoft\",\n /** Standard Lucene analyzer. */\n StandardLucene = \"standard.lucene\",\n /** Standard ASCII Folding Lucene analyzer. See https:\\//learn.microsoft.com\\/rest\\/api\\/searchservice\\/Custom-analyzers-in-Azure-Search#Analyzers */\n StandardAsciiFoldingLucene = \"standardasciifolding.lucene\",\n /** Treats the entire content of a field as a single token. This is useful for data like zip codes, ids, and some product names. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/KeywordAnalyzer.html */\n Keyword = \"keyword\",\n /** Flexibly separates text into terms via a regular expression pattern. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/PatternAnalyzer.html */\n Pattern = \"pattern\",\n /** Divides text at non-letters and converts them to lower case. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/SimpleAnalyzer.html */\n Simple = \"simple\",\n /** Divides text at non-letters; Applies the lowercase and stopword token filters. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/StopAnalyzer.html */\n Stop = \"stop\",\n /** An analyzer that uses the whitespace tokenizer. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/WhitespaceAnalyzer.html */\n Whitespace = \"whitespace\",\n}\n\n/**\n * Defines values for LexicalAnalyzerName. \\\n * {@link KnownLexicalAnalyzerName} can be used interchangeably with LexicalAnalyzerName,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **ar.microsoft**: Microsoft analyzer for Arabic. \\\n * **ar.lucene**: Lucene analyzer for Arabic. \\\n * **hy.lucene**: Lucene analyzer for Armenian. \\\n * **bn.microsoft**: Microsoft analyzer for Bangla. \\\n * **eu.lucene**: Lucene analyzer for Basque. \\\n * **bg.microsoft**: Microsoft analyzer for Bulgarian. \\\n * **bg.lucene**: Lucene analyzer for Bulgarian. \\\n * **ca.microsoft**: Microsoft analyzer for Catalan. \\\n * **ca.lucene**: Lucene analyzer for Catalan. \\\n * **zh-Hans.microsoft**: Microsoft analyzer for Chinese (Simplified). \\\n * **zh-Hans.lucene**: Lucene analyzer for Chinese (Simplified). \\\n * **zh-Hant.microsoft**: Microsoft analyzer for Chinese (Traditional). \\\n * **zh-Hant.lucene**: Lucene analyzer for Chinese (Traditional). \\\n * **hr.microsoft**: Microsoft analyzer for Croatian. \\\n * **cs.microsoft**: Microsoft analyzer for Czech. \\\n * **cs.lucene**: Lucene analyzer for Czech. \\\n * **da.microsoft**: Microsoft analyzer for Danish. \\\n * **da.lucene**: Lucene analyzer for Danish. \\\n * **nl.microsoft**: Microsoft analyzer for Dutch. \\\n * **nl.lucene**: Lucene analyzer for Dutch. \\\n * **en.microsoft**: Microsoft analyzer for English. \\\n * **en.lucene**: Lucene analyzer for English. \\\n * **et.microsoft**: Microsoft analyzer for Estonian. \\\n * **fi.microsoft**: Microsoft analyzer for Finnish. \\\n * **fi.lucene**: Lucene analyzer for Finnish. \\\n * **fr.microsoft**: Microsoft analyzer for French. \\\n * **fr.lucene**: Lucene analyzer for French. \\\n * **gl.lucene**: Lucene analyzer for Galician. \\\n * **de.microsoft**: Microsoft analyzer for German. \\\n * **de.lucene**: Lucene analyzer for German. \\\n * **el.microsoft**: Microsoft analyzer for Greek. \\\n * **el.lucene**: Lucene analyzer for Greek. \\\n * **gu.microsoft**: Microsoft analyzer for Gujarati. \\\n * **he.microsoft**: Microsoft analyzer for Hebrew. \\\n * **hi.microsoft**: Microsoft analyzer for Hindi. \\\n * **hi.lucene**: Lucene analyzer for Hindi. \\\n * **hu.microsoft**: Microsoft analyzer for Hungarian. \\\n * **hu.lucene**: Lucene analyzer for Hungarian. \\\n * **is.microsoft**: Microsoft analyzer for Icelandic. \\\n * **id.microsoft**: Microsoft analyzer for Indonesian (Bahasa). \\\n * **id.lucene**: Lucene analyzer for Indonesian. \\\n * **ga.lucene**: Lucene analyzer for Irish. \\\n * **it.microsoft**: Microsoft analyzer for Italian. \\\n * **it.lucene**: Lucene analyzer for Italian. \\\n * **ja.microsoft**: Microsoft analyzer for Japanese. \\\n * **ja.lucene**: Lucene analyzer for Japanese. \\\n * **kn.microsoft**: Microsoft analyzer for Kannada. \\\n * **ko.microsoft**: Microsoft analyzer for Korean. \\\n * **ko.lucene**: Lucene analyzer for Korean. \\\n * **lv.microsoft**: Microsoft analyzer for Latvian. \\\n * **lv.lucene**: Lucene analyzer for Latvian. \\\n * **lt.microsoft**: Microsoft analyzer for Lithuanian. \\\n * **ml.microsoft**: Microsoft analyzer for Malayalam. \\\n * **ms.microsoft**: Microsoft analyzer for Malay (Latin). \\\n * **mr.microsoft**: Microsoft analyzer for Marathi. \\\n * **nb.microsoft**: Microsoft analyzer for Norwegian (Bokmål). \\\n * **no.lucene**: Lucene analyzer for Norwegian. \\\n * **fa.lucene**: Lucene analyzer for Persian. \\\n * **pl.microsoft**: Microsoft analyzer for Polish. \\\n * **pl.lucene**: Lucene analyzer for Polish. \\\n * **pt-BR.microsoft**: Microsoft analyzer for Portuguese (Brazil). \\\n * **pt-BR.lucene**: Lucene analyzer for Portuguese (Brazil). \\\n * **pt-PT.microsoft**: Microsoft analyzer for Portuguese (Portugal). \\\n * **pt-PT.lucene**: Lucene analyzer for Portuguese (Portugal). \\\n * **pa.microsoft**: Microsoft analyzer for Punjabi. \\\n * **ro.microsoft**: Microsoft analyzer for Romanian. \\\n * **ro.lucene**: Lucene analyzer for Romanian. \\\n * **ru.microsoft**: Microsoft analyzer for Russian. \\\n * **ru.lucene**: Lucene analyzer for Russian. \\\n * **sr-cyrillic.microsoft**: Microsoft analyzer for Serbian (Cyrillic). \\\n * **sr-latin.microsoft**: Microsoft analyzer for Serbian (Latin). \\\n * **sk.microsoft**: Microsoft analyzer for Slovak. \\\n * **sl.microsoft**: Microsoft analyzer for Slovenian. \\\n * **es.microsoft**: Microsoft analyzer for Spanish. \\\n * **es.lucene**: Lucene analyzer for Spanish. \\\n * **sv.microsoft**: Microsoft analyzer for Swedish. \\\n * **sv.lucene**: Lucene analyzer for Swedish. \\\n * **ta.microsoft**: Microsoft analyzer for Tamil. \\\n * **te.microsoft**: Microsoft analyzer for Telugu. \\\n * **th.microsoft**: Microsoft analyzer for Thai. \\\n * **th.lucene**: Lucene analyzer for Thai. \\\n * **tr.microsoft**: Microsoft analyzer for Turkish. \\\n * **tr.lucene**: Lucene analyzer for Turkish. \\\n * **uk.microsoft**: Microsoft analyzer for Ukrainian. \\\n * **ur.microsoft**: Microsoft analyzer for Urdu. \\\n * **vi.microsoft**: Microsoft analyzer for Vietnamese. \\\n * **standard.lucene**: Standard Lucene analyzer. \\\n * **standardasciifolding.lucene**: Standard ASCII Folding Lucene analyzer. See https:\\/\\/learn.microsoft.com\\/rest\\/api\\/searchservice\\/Custom-analyzers-in-Azure-Search#Analyzers \\\n * **keyword**: Treats the entire content of a field as a single token. This is useful for data like zip codes, ids, and some product names. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/KeywordAnalyzer.html \\\n * **pattern**: Flexibly separates text into terms via a regular expression pattern. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/PatternAnalyzer.html \\\n * **simple**: Divides text at non-letters and converts them to lower case. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/SimpleAnalyzer.html \\\n * **stop**: Divides text at non-letters; Applies the lowercase and stopword token filters. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/StopAnalyzer.html \\\n * **whitespace**: An analyzer that uses the whitespace tokenizer. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/WhitespaceAnalyzer.html\n */\nexport type LexicalAnalyzerName = string;\n\n/** Known values of {@link LexicalNormalizerName} that the service accepts. */\nexport enum KnownLexicalNormalizerName {\n /** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the \"Basic Latin\" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/ASCIIFoldingFilter.html */\n AsciiFolding = \"asciifolding\",\n /** Removes elisions. For example, \"l'avion\" (the plane) will be converted to \"avion\" (plane). See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/util\\/ElisionFilter.html */\n Elision = \"elision\",\n /** Normalizes token text to lowercase. See https:\\//lucene.apache.org\\/core\\/6_6_1\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/LowerCaseFilter.html */\n Lowercase = \"lowercase\",\n /** Standard normalizer, which consists of lowercase and asciifolding. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/reverse\\/ReverseStringFilter.html */\n Standard = \"standard\",\n /** Normalizes token text to uppercase. See https:\\//lucene.apache.org\\/core\\/6_6_1\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/UpperCaseFilter.html */\n Uppercase = \"uppercase\",\n}\n\n/**\n * Defines values for LexicalNormalizerName. \\\n * {@link KnownLexicalNormalizerName} can be used interchangeably with LexicalNormalizerName,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **asciifolding**: Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the \"Basic Latin\" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/ASCIIFoldingFilter.html \\\n * **elision**: Removes elisions. For example, \"l'avion\" (the plane) will be converted to \"avion\" (plane). See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/util\\/ElisionFilter.html \\\n * **lowercase**: Normalizes token text to lowercase. See https:\\/\\/lucene.apache.org\\/core\\/6_6_1\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/LowerCaseFilter.html \\\n * **standard**: Standard normalizer, which consists of lowercase and asciifolding. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/reverse\\/ReverseStringFilter.html \\\n * **uppercase**: Normalizes token text to uppercase. See https:\\/\\/lucene.apache.org\\/core\\/6_6_1\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/UpperCaseFilter.html\n */\nexport type LexicalNormalizerName = string;\n\n/** Known values of {@link VectorEncodingFormat} that the service accepts. */\nexport enum KnownVectorEncodingFormat {\n /** Encoding format representing bits packed into a wider data type. */\n PackedBit = \"packedBit\",\n}\n\n/**\n * Defines values for VectorEncodingFormat. \\\n * {@link KnownVectorEncodingFormat} can be used interchangeably with VectorEncodingFormat,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **packedBit**: Encoding format representing bits packed into a wider data type.\n */\nexport type VectorEncodingFormat = string;\n\n/** Known values of {@link RankingOrder} that the service accepts. */\nexport enum KnownRankingOrder {\n /** Sets sort order as BoostedRerankerScore */\n BoostedRerankerScore = \"BoostedRerankerScore\",\n /** Sets sort order as ReRankerScore */\n ReRankerScore = \"RerankerScore\",\n}\n\n/**\n * Defines values for RankingOrder. \\\n * {@link KnownRankingOrder} can be used interchangeably with RankingOrder,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **BoostedRerankerScore**: Sets sort order as BoostedRerankerScore \\\n * **RerankerScore**: Sets sort order as ReRankerScore\n */\nexport type RankingOrder = string;\n\n/** Known values of {@link VectorSearchAlgorithmKind} that the service accepts. */\nexport enum KnownVectorSearchAlgorithmKind {\n /** HNSW (Hierarchical Navigable Small World), a type of approximate nearest neighbors algorithm. */\n Hnsw = \"hnsw\",\n /** Exhaustive KNN algorithm which will perform brute-force search. */\n ExhaustiveKnn = \"exhaustiveKnn\",\n}\n\n/**\n * Defines values for VectorSearchAlgorithmKind. \\\n * {@link KnownVectorSearchAlgorithmKind} can be used interchangeably with VectorSearchAlgorithmKind,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **hnsw**: HNSW (Hierarchical Navigable Small World), a type of approximate nearest neighbors algorithm. \\\n * **exhaustiveKnn**: Exhaustive KNN algorithm which will perform brute-force search.\n */\nexport type VectorSearchAlgorithmKind = string;\n\n/** Known values of {@link VectorSearchVectorizerKind} that the service accepts. */\nexport enum KnownVectorSearchVectorizerKind {\n /** Generate embeddings using an Azure OpenAI resource at query time. */\n AzureOpenAI = \"azureOpenAI\",\n /** Generate embeddings using a custom web endpoint at query time. */\n CustomWebApi = \"customWebApi\",\n /** Generate embeddings for an image or text input at query time using the Azure AI Services Vision Vectorize API. */\n AIServicesVision = \"aiServicesVision\",\n /** Generate embeddings using an Azure Machine Learning endpoint deployed via the Azure AI Foundry Model Catalog at query time. */\n AML = \"aml\",\n}\n\n/**\n * Defines values for VectorSearchVectorizerKind. \\\n * {@link KnownVectorSearchVectorizerKind} can be used interchangeably with VectorSearchVectorizerKind,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **azureOpenAI**: Generate embeddings using an Azure OpenAI resource at query time. \\\n * **customWebApi**: Generate embeddings using a custom web endpoint at query time. \\\n * **aiServicesVision**: Generate embeddings for an image or text input at query time using the Azure AI Services Vision Vectorize API. \\\n * **aml**: Generate embeddings using an Azure Machine Learning endpoint deployed via the Azure AI Foundry Model Catalog at query time.\n */\nexport type VectorSearchVectorizerKind = string;\n\n/** Known values of {@link VectorSearchCompressionKind} that the service accepts. */\nexport enum KnownVectorSearchCompressionKind {\n /** Scalar Quantization, a type of compression method. In scalar quantization, the original vectors values are compressed to a narrower type by discretizing and representing each component of a vector using a reduced set of quantized values, thereby reducing the overall data size. */\n ScalarQuantization = \"scalarQuantization\",\n /** Binary Quantization, a type of compression method. In binary quantization, the original vectors values are compressed to the narrower binary type by discretizing and representing each component of a vector using binary values, thereby reducing the overall data size. */\n BinaryQuantization = \"binaryQuantization\",\n}\n\n/**\n * Defines values for VectorSearchCompressionKind. \\\n * {@link KnownVectorSearchCompressionKind} can be used interchangeably with VectorSearchCompressionKind,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **scalarQuantization**: Scalar Quantization, a type of compression method. In scalar quantization, the original vectors values are compressed to a narrower type by discretizing and representing each component of a vector using a reduced set of quantized values, thereby reducing the overall data size. \\\n * **binaryQuantization**: Binary Quantization, a type of compression method. In binary quantization, the original vectors values are compressed to the narrower binary type by discretizing and representing each component of a vector using binary values, thereby reducing the overall data size.\n */\nexport type VectorSearchCompressionKind = string;\n\n/** Known values of {@link VectorSearchCompressionRescoreStorageMethod} that the service accepts. */\nexport enum KnownVectorSearchCompressionRescoreStorageMethod {\n /** This option preserves the original full-precision vectors. Choose this option for maximum flexibility and highest quality of compressed search results. This consumes more storage but allows for rescoring and oversampling. */\n PreserveOriginals = \"preserveOriginals\",\n /** This option discards the original full-precision vectors. Choose this option for maximum storage savings. Since this option does not allow for rescoring and oversampling, it will often cause slight to moderate reductions in quality. */\n DiscardOriginals = \"discardOriginals\",\n}\n\n/**\n * Defines values for VectorSearchCompressionRescoreStorageMethod. \\\n * {@link KnownVectorSearchCompressionRescoreStorageMethod} can be used interchangeably with VectorSearchCompressionRescoreStorageMethod,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **preserveOriginals**: This option preserves the original full-precision vectors. Choose this option for maximum flexibility and highest quality of compressed search results. This consumes more storage but allows for rescoring and oversampling. \\\n * **discardOriginals**: This option discards the original full-precision vectors. Choose this option for maximum storage savings. Since this option does not allow for rescoring and oversampling, it will often cause slight to moderate reductions in quality.\n */\nexport type VectorSearchCompressionRescoreStorageMethod = string;\n\n/** Known values of {@link SearchIndexPermissionFilterOption} that the service accepts. */\nexport enum KnownSearchIndexPermissionFilterOption {\n /** Enabled */\n Enabled = \"enabled\",\n /** Disabled */\n Disabled = \"disabled\",\n}\n\n/**\n * Defines values for SearchIndexPermissionFilterOption. \\\n * {@link KnownSearchIndexPermissionFilterOption} can be used interchangeably with SearchIndexPermissionFilterOption,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **enabled** \\\n * **disabled**\n */\nexport type SearchIndexPermissionFilterOption = string;\n\n/** Known values of {@link AzureOpenAIModelName} that the service accepts. */\nexport enum KnownAzureOpenAIModelName {\n /** TextEmbeddingAda002 */\n TextEmbeddingAda002 = \"text-embedding-ada-002\",\n /** TextEmbedding3Large */\n TextEmbedding3Large = \"text-embedding-3-large\",\n /** TextEmbedding3Small */\n TextEmbedding3Small = \"text-embedding-3-small\",\n /** Gpt4O */\n Gpt4O = \"gpt-4o\",\n /** Gpt4OMini */\n Gpt4OMini = \"gpt-4o-mini\",\n /** Gpt41 */\n Gpt41 = \"gpt-4.1\",\n /** Gpt41Mini */\n Gpt41Mini = \"gpt-4.1-mini\",\n /** Gpt41Nano */\n Gpt41Nano = \"gpt-4.1-nano\",\n /** Gpt5 */\n Gpt5 = \"gpt-5\",\n /** Gpt5Mini */\n Gpt5Mini = \"gpt-5-mini\",\n /** Gpt5Nano */\n Gpt5Nano = \"gpt-5-nano\",\n}\n\n/**\n * Defines values for AzureOpenAIModelName. \\\n * {@link KnownAzureOpenAIModelName} can be used interchangeably with AzureOpenAIModelName,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **text-embedding-ada-002** \\\n * **text-embedding-3-large** \\\n * **text-embedding-3-small** \\\n * **gpt-4o** \\\n * **gpt-4o-mini** \\\n * **gpt-4.1** \\\n * **gpt-4.1-mini** \\\n * **gpt-4.1-nano** \\\n * **gpt-5** \\\n * **gpt-5-mini** \\\n * **gpt-5-nano**\n */\nexport type AzureOpenAIModelName = string;\n\n/** Known values of {@link KnowledgeSourceIngestionPermissionOption} that the service accepts. */\nexport enum KnownKnowledgeSourceIngestionPermissionOption {\n /** Ingest explicit user identifiers alongside document content. */\n UserIds = \"userIds\",\n /** Ingest group identifiers alongside document content. */\n GroupIds = \"groupIds\",\n /** Ingest RBAC scope information alongside document content. */\n RbacScope = \"rbacScope\",\n}\n\n/**\n * Defines values for KnowledgeSourceIngestionPermissionOption. \\\n * {@link KnownKnowledgeSourceIngestionPermissionOption} can be used interchangeably with KnowledgeSourceIngestionPermissionOption,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **userIds**: Ingest explicit user identifiers alongside document content. \\\n * **groupIds**: Ingest group identifiers alongside document content. \\\n * **rbacScope**: Ingest RBAC scope information alongside document content.\n */\nexport type KnowledgeSourceIngestionPermissionOption = string;\n\n/** Known values of {@link KnowledgeSourceContentExtractionMode} that the service accepts. */\nexport enum KnownKnowledgeSourceContentExtractionMode {\n /** Extracts only essential metadata while deferring most content processing. */\n Minimal = \"minimal\",\n /** Performs the full default content extraction pipeline. */\n Standard = \"standard\",\n}\n\n/**\n * Defines values for KnowledgeSourceContentExtractionMode. \\\n * {@link KnownKnowledgeSourceContentExtractionMode} can be used interchangeably with KnowledgeSourceContentExtractionMode,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **minimal**: Extracts only essential metadata while deferring most content processing. \\\n * **standard**: Performs the full default content extraction pipeline.\n */\nexport type KnowledgeSourceContentExtractionMode = string;\n\n/** Known values of {@link IndexedSharePointContainerName} that the service accepts. */\nexport enum KnownIndexedSharePointContainerName {\n /** Index content from the site's default document library. */\n DefaultSiteLibrary = \"defaultSiteLibrary\",\n /** Index content from every document library in the site. */\n AllSiteLibraries = \"allSiteLibraries\",\n /** Index only content that matches the query specified in the knowledge source. */\n UseQuery = \"useQuery\",\n}\n\n/**\n * Defines values for IndexedSharePointContainerName. \\\n * {@link KnownIndexedSharePointContainerName} can be used interchangeably with IndexedSharePointContainerName,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **defaultSiteLibrary**: Index content from the site's default document library. \\\n * **allSiteLibraries**: Index content from every document library in the site. \\\n * **useQuery**: Index only content that matches the query specified in the knowledge source.\n */\nexport type IndexedSharePointContainerName = string;\n\n/** Known values of {@link TokenFilterName} that the service accepts. */\nexport enum KnownTokenFilterName {\n /** A token filter that applies the Arabic normalizer to normalize the orthography. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/ar\\/ArabicNormalizationFilter.html */\n ArabicNormalization = \"arabic_normalization\",\n /** Strips all characters after an apostrophe (including the apostrophe itself). See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/tr\\/ApostropheFilter.html */\n Apostrophe = \"apostrophe\",\n /** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the \"Basic Latin\" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/ASCIIFoldingFilter.html */\n AsciiFolding = \"asciifolding\",\n /** Forms bigrams of CJK terms that are generated from the standard tokenizer. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/cjk\\/CJKBigramFilter.html */\n CjkBigram = \"cjk_bigram\",\n /** Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/cjk\\/CJKWidthFilter.html */\n CjkWidth = \"cjk_width\",\n /** Removes English possessives, and dots from acronyms. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/standard\\/ClassicFilter.html */\n Classic = \"classic\",\n /** Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/commongrams\\/CommonGramsFilter.html */\n CommonGram = \"common_grams\",\n /** Generates n-grams of the given size(s) starting from the front or the back of an input token. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/ngram\\/EdgeNGramTokenFilter.html */\n EdgeNGram = \"edgeNGram_v2\",\n /** Removes elisions. For example, \"l'avion\" (the plane) will be converted to \"avion\" (plane). See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/util\\/ElisionFilter.html */\n Elision = \"elision\",\n /** Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/de\\/GermanNormalizationFilter.html */\n GermanNormalization = \"german_normalization\",\n /** Normalizes text in Hindi to remove some differences in spelling variations. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/hi\\/HindiNormalizationFilter.html */\n HindiNormalization = \"hindi_normalization\",\n /** Normalizes the Unicode representation of text in Indian languages. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/in\\/IndicNormalizationFilter.html */\n IndicNormalization = \"indic_normalization\",\n /** Emits each incoming token twice, once as keyword and once as non-keyword. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/KeywordRepeatFilter.html */\n KeywordRepeat = \"keyword_repeat\",\n /** A high-performance kstem filter for English. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/en\\/KStemFilter.html */\n KStem = \"kstem\",\n /** Removes words that are too long or too short. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/LengthFilter.html */\n Length = \"length\",\n /** Limits the number of tokens while indexing. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/LimitTokenCountFilter.html */\n Limit = \"limit\",\n /** Normalizes token text to lower case. See https:\\//lucene.apache.org\\/core\\/6_6_1\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/LowerCaseFilter.html */\n Lowercase = \"lowercase\",\n /** Generates n-grams of the given size(s). See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/ngram\\/NGramTokenFilter.html */\n NGram = \"nGram_v2\",\n /** Applies normalization for Persian. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/fa\\/PersianNormalizationFilter.html */\n PersianNormalization = \"persian_normalization\",\n /** Create tokens for phonetic matches. See https:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-phonetic\\/org\\/apache\\/lucene\\/analysis\\/phonetic\\/package-tree.html */\n Phonetic = \"phonetic\",\n /** Uses the Porter stemming algorithm to transform the token stream. See http:\\//tartarus.org\\/~martin\\/PorterStemmer */\n PorterStem = \"porter_stem\",\n /** Reverses the token string. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/reverse\\/ReverseStringFilter.html */\n Reverse = \"reverse\",\n /** Normalizes use of the interchangeable Scandinavian characters. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/ScandinavianNormalizationFilter.html */\n ScandinavianNormalization = \"scandinavian_normalization\",\n /** Folds Scandinavian characters åÅäæÄÆ-&gt;a and öÖøØ-&gt;o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/ScandinavianFoldingFilter.html */\n ScandinavianFoldingNormalization = \"scandinavian_folding\",\n /** Creates combinations of tokens as a single token. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/shingle\\/ShingleFilter.html */\n Shingle = \"shingle\",\n /** A filter that stems words using a Snowball-generated stemmer. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/snowball\\/SnowballFilter.html */\n Snowball = \"snowball\",\n /** Normalizes the Unicode representation of Sorani text. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/ckb\\/SoraniNormalizationFilter.html */\n SoraniNormalization = \"sorani_normalization\",\n /** Language specific stemming filter. See https:\\//learn.microsoft.com\\/rest\\/api\\/searchservice\\/Custom-analyzers-in-Azure-Search#TokenFilters */\n Stemmer = \"stemmer\",\n /** Removes stop words from a token stream. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/StopFilter.html */\n Stopwords = \"stopwords\",\n /** Trims leading and trailing whitespace from tokens. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/TrimFilter.html */\n Trim = \"trim\",\n /** Truncates the terms to a specific length. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/TruncateTokenFilter.html */\n Truncate = \"truncate\",\n /** Filters out tokens with same text as the previous token. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/RemoveDuplicatesTokenFilter.html */\n Unique = \"unique\",\n /** Normalizes token text to upper case. See https:\\//lucene.apache.org\\/core\\/6_6_1\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/UpperCaseFilter.html */\n Uppercase = \"uppercase\",\n /** Splits words into subwords and performs optional transformations on subword groups. */\n WordDelimiter = \"word_delimiter\",\n}\n\n/**\n * Defines values for TokenFilterName. \\\n * {@link KnownTokenFilterName} can be used interchangeably with TokenFilterName,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **arabic_normalization**: A token filter that applies the Arabic normalizer to normalize the orthography. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/ar\\/ArabicNormalizationFilter.html \\\n * **apostrophe**: Strips all characters after an apostrophe (including the apostrophe itself). See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/tr\\/ApostropheFilter.html \\\n * **asciifolding**: Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the \"Basic Latin\" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/ASCIIFoldingFilter.html \\\n * **cjk_bigram**: Forms bigrams of CJK terms that are generated from the standard tokenizer. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/cjk\\/CJKBigramFilter.html \\\n * **cjk_width**: Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/cjk\\/CJKWidthFilter.html \\\n * **classic**: Removes English possessives, and dots from acronyms. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/standard\\/ClassicFilter.html \\\n * **common_grams**: Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/commongrams\\/CommonGramsFilter.html \\\n * **edgeNGram_v2**: Generates n-grams of the given size(s) starting from the front or the back of an input token. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/ngram\\/EdgeNGramTokenFilter.html \\\n * **elision**: Removes elisions. For example, \"l'avion\" (the plane) will be converted to \"avion\" (plane). See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/util\\/ElisionFilter.html \\\n * **german_normalization**: Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/de\\/GermanNormalizationFilter.html \\\n * **hindi_normalization**: Normalizes text in Hindi to remove some differences in spelling variations. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/hi\\/HindiNormalizationFilter.html \\\n * **indic_normalization**: Normalizes the Unicode representation of text in Indian languages. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/in\\/IndicNormalizationFilter.html \\\n * **keyword_repeat**: Emits each incoming token twice, once as keyword and once as non-keyword. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/KeywordRepeatFilter.html \\\n * **kstem**: A high-performance kstem filter for English. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/en\\/KStemFilter.html \\\n * **length**: Removes words that are too long or too short. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/LengthFilter.html \\\n * **limit**: Limits the number of tokens while indexing. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/LimitTokenCountFilter.html \\\n * **lowercase**: Normalizes token text to lower case. See https:\\/\\/lucene.apache.org\\/core\\/6_6_1\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/LowerCaseFilter.html \\\n * **nGram_v2**: Generates n-grams of the given size(s). See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/ngram\\/NGramTokenFilter.html \\\n * **persian_normalization**: Applies normalization for Persian. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/fa\\/PersianNormalizationFilter.html \\\n * **phonetic**: Create tokens for phonetic matches. See https:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-phonetic\\/org\\/apache\\/lucene\\/analysis\\/phonetic\\/package-tree.html \\\n * **porter_stem**: Uses the Porter stemming algorithm to transform the token stream. See http:\\/\\/tartarus.org\\/~martin\\/PorterStemmer \\\n * **reverse**: Reverses the token string. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/reverse\\/ReverseStringFilter.html \\\n * **scandinavian_normalization**: Normalizes use of the interchangeable Scandinavian characters. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/ScandinavianNormalizationFilter.html \\\n * **scandinavian_folding**: Folds Scandinavian characters åÅäæÄÆ-&gt;a and öÖøØ-&gt;o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/ScandinavianFoldingFilter.html \\\n * **shingle**: Creates combinations of tokens as a single token. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/shingle\\/ShingleFilter.html \\\n * **snowball**: A filter that stems words using a Snowball-generated stemmer. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/snowball\\/SnowballFilter.html \\\n * **sorani_normalization**: Normalizes the Unicode representation of Sorani text. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/ckb\\/SoraniNormalizationFilter.html \\\n * **stemmer**: Language specific stemming filter. See https:\\/\\/learn.microsoft.com\\/rest\\/api\\/searchservice\\/Custom-analyzers-in-Azure-Search#TokenFilters \\\n * **stopwords**: Removes stop words from a token stream. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/StopFilter.html \\\n * **trim**: Trims leading and trailing whitespace from tokens. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/TrimFilter.html \\\n * **truncate**: Truncates the terms to a specific length. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/TruncateTokenFilter.html \\\n * **unique**: Filters out tokens with same text as the previous token. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/miscellaneous\\/RemoveDuplicatesTokenFilter.html \\\n * **uppercase**: Normalizes token text to upper case. See https:\\/\\/lucene.apache.org\\/core\\/6_6_1\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/UpperCaseFilter.html \\\n * **word_delimiter**: Splits words into subwords and performs optional transformations on subword groups.\n */\nexport type TokenFilterName = string;\n\n/** Known values of {@link CharFilterName} that the service accepts. */\nexport enum KnownCharFilterName {\n /** A character filter that attempts to strip out HTML constructs. See https:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/charfilter\\/HTMLStripCharFilter.html */\n HtmlStrip = \"html_strip\",\n}\n\n/**\n * Defines values for CharFilterName. \\\n * {@link KnownCharFilterName} can be used interchangeably with CharFilterName,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **html_strip**: A character filter that attempts to strip out HTML constructs. See https:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/charfilter\\/HTMLStripCharFilter.html\n */\nexport type CharFilterName = string;\n\n/** Known values of {@link VectorSearchAlgorithmMetric} that the service accepts. */\nexport enum KnownVectorSearchAlgorithmMetric {\n /** Measures the angle between vectors to quantify their similarity, disregarding magnitude. The smaller the angle, the closer the similarity. */\n Cosine = \"cosine\",\n /** Computes the straight-line distance between vectors in a multi-dimensional space. The smaller the distance, the closer the similarity. */\n Euclidean = \"euclidean\",\n /** Calculates the sum of element-wise products to gauge alignment and magnitude similarity. The larger and more positive, the closer the similarity. */\n DotProduct = \"dotProduct\",\n /** Only applicable to bit-packed binary data types. Determines dissimilarity by counting differing positions in binary vectors. The fewer differences, the closer the similarity. */\n Hamming = \"hamming\",\n}\n\n/**\n * Defines values for VectorSearchAlgorithmMetric. \\\n * {@link KnownVectorSearchAlgorithmMetric} can be used interchangeably with VectorSearchAlgorithmMetric,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **cosine**: Measures the angle between vectors to quantify their similarity, disregarding magnitude. The smaller the angle, the closer the similarity. \\\n * **euclidean**: Computes the straight-line distance between vectors in a multi-dimensional space. The smaller the distance, the closer the similarity. \\\n * **dotProduct**: Calculates the sum of element-wise products to gauge alignment and magnitude similarity. The larger and more positive, the closer the similarity. \\\n * **hamming**: Only applicable to bit-packed binary data types. Determines dissimilarity by counting differing positions in binary vectors. The fewer differences, the closer the similarity.\n */\nexport type VectorSearchAlgorithmMetric = string;\n\n/** Known values of {@link VectorSearchCompressionTarget} that the service accepts. */\nexport enum KnownVectorSearchCompressionTarget {\n /** Int8 */\n Int8 = \"int8\",\n}\n\n/**\n * Defines values for VectorSearchCompressionTarget. \\\n * {@link KnownVectorSearchCompressionTarget} can be used interchangeably with VectorSearchCompressionTarget,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **int8**\n */\nexport type VectorSearchCompressionTarget = string;\n\n/** Known values of {@link AIFoundryModelCatalogName} that the service accepts. */\nexport enum KnownAIFoundryModelCatalogName {\n /** OpenAIClipImageTextEmbeddingsVitBasePatch32 */\n OpenAIClipImageTextEmbeddingsVitBasePatch32 = \"OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32\",\n /** OpenAIClipImageTextEmbeddingsViTLargePatch14336 */\n OpenAIClipImageTextEmbeddingsViTLargePatch14336 = \"OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336\",\n /** FacebookDinoV2ImageEmbeddingsViTBase */\n FacebookDinoV2ImageEmbeddingsViTBase = \"Facebook-DinoV2-Image-Embeddings-ViT-Base\",\n /** FacebookDinoV2ImageEmbeddingsViTGiant */\n FacebookDinoV2ImageEmbeddingsViTGiant = \"Facebook-DinoV2-Image-Embeddings-ViT-Giant\",\n /** CohereEmbedV3English */\n CohereEmbedV3English = \"Cohere-embed-v3-english\",\n /** CohereEmbedV3Multilingual */\n CohereEmbedV3Multilingual = \"Cohere-embed-v3-multilingual\",\n /** Cohere embed v4 model for generating embeddings from both text and images. */\n CohereEmbedV4 = \"Cohere-embed-v4\",\n}\n\n/**\n * Defines values for AIFoundryModelCatalogName. \\\n * {@link KnownAIFoundryModelCatalogName} can be used interchangeably with AIFoundryModelCatalogName,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **OpenAI-CLIP-Image-Text-Embeddings-vit-base-patch32** \\\n * **OpenAI-CLIP-Image-Text-Embeddings-ViT-Large-Patch14-336** \\\n * **Facebook-DinoV2-Image-Embeddings-ViT-Base** \\\n * **Facebook-DinoV2-Image-Embeddings-ViT-Giant** \\\n * **Cohere-embed-v3-english** \\\n * **Cohere-embed-v3-multilingual** \\\n * **Cohere-embed-v4**: Cohere embed v4 model for generating embeddings from both text and images.\n */\nexport type AIFoundryModelCatalogName = string;\n\n/** Known values of {@link KeyPhraseExtractionSkillLanguage} that the service accepts. */\nexport enum KnownKeyPhraseExtractionSkillLanguage {\n /** Danish */\n Da = \"da\",\n /** Dutch */\n Nl = \"nl\",\n /** English */\n En = \"en\",\n /** Finnish */\n Fi = \"fi\",\n /** French */\n Fr = \"fr\",\n /** German */\n De = \"de\",\n /** Italian */\n It = \"it\",\n /** Japanese */\n Ja = \"ja\",\n /** Korean */\n Ko = \"ko\",\n /** Norwegian (Bokmaal) */\n No = \"no\",\n /** Polish */\n Pl = \"pl\",\n /** Portuguese (Portugal) */\n PtPT = \"pt-PT\",\n /** Portuguese (Brazil) */\n PtBR = \"pt-BR\",\n /** Russian */\n Ru = \"ru\",\n /** Spanish */\n Es = \"es\",\n /** Swedish */\n Sv = \"sv\",\n}\n\n/**\n * Defines values for KeyPhraseExtractionSkillLanguage. \\\n * {@link KnownKeyPhraseExtractionSkillLanguage} can be used interchangeably with KeyPhraseExtractionSkillLanguage,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **da**: Danish \\\n * **nl**: Dutch \\\n * **en**: English \\\n * **fi**: Finnish \\\n * **fr**: French \\\n * **de**: German \\\n * **it**: Italian \\\n * **ja**: Japanese \\\n * **ko**: Korean \\\n * **no**: Norwegian (Bokmaal) \\\n * **pl**: Polish \\\n * **pt-PT**: Portuguese (Portugal) \\\n * **pt-BR**: Portuguese (Brazil) \\\n * **ru**: Russian \\\n * **es**: Spanish \\\n * **sv**: Swedish\n */\nexport type KeyPhraseExtractionSkillLanguage = string;\n\n/** Known values of {@link OcrSkillLanguage} that the service accepts. */\nexport enum KnownOcrSkillLanguage {\n /** Afrikaans */\n Af = \"af\",\n /** Albanian */\n Sq = \"sq\",\n /** Angika (Devanagiri) */\n Anp = \"anp\",\n /** Arabic */\n Ar = \"ar\",\n /** Asturian */\n Ast = \"ast\",\n /** Awadhi-Hindi (Devanagiri) */\n Awa = \"awa\",\n /** Azerbaijani (Latin) */\n Az = \"az\",\n /** Bagheli */\n Bfy = \"bfy\",\n /** Basque */\n Eu = \"eu\",\n /** Belarusian (Cyrillic and Latin) */\n Be = \"be\",\n /** Belarusian (Cyrillic) */\n BeCyrl = \"be-cyrl\",\n /** Belarusian (Latin) */\n BeLatn = \"be-latn\",\n /** Bhojpuri-Hindi (Devanagiri) */\n Bho = \"bho\",\n /** Bislama */\n Bi = \"bi\",\n /** Bodo (Devanagiri) */\n Brx = \"brx\",\n /** Bosnian Latin */\n Bs = \"bs\",\n /** Brajbha */\n Bra = \"bra\",\n /** Breton */\n Br = \"br\",\n /** Bulgarian */\n Bg = \"bg\",\n /** Bundeli */\n Bns = \"bns\",\n /** Buryat (Cyrillic) */\n Bua = \"bua\",\n /** Catalan */\n Ca = \"ca\",\n /** Cebuano */\n Ceb = \"ceb\",\n /** Chamling */\n Rab = \"rab\",\n /** Chamorro */\n Ch = \"ch\",\n /** Chhattisgarhi (Devanagiri) */\n Hne = \"hne\",\n /** Chinese Simplified */\n ZhHans = \"zh-Hans\",\n /** Chinese Traditional */\n ZhHant = \"zh-Hant\",\n /** Cornish */\n Kw = \"kw\",\n /** Corsican */\n Co = \"co\",\n /** Crimean Tatar (Latin) */\n Crh = \"crh\",\n /** Croatian */\n Hr = \"hr\",\n /** Czech */\n Cs = \"cs\",\n /** Danish */\n Da = \"da\",\n /** Dari */\n Prs = \"prs\",\n /** Dhimal (Devanagiri) */\n Dhi = \"dhi\",\n /** Dogri (Devanagiri) */\n Doi = \"doi\",\n /** Dutch */\n Nl = \"nl\",\n /** English */\n En = \"en\",\n /** Erzya (Cyrillic) */\n Myv = \"myv\",\n /** Estonian */\n Et = \"et\",\n /** Faroese */\n Fo = \"fo\",\n /** Fijian */\n Fj = \"fj\",\n /** Filipino */\n Fil = \"fil\",\n /** Finnish */\n Fi = \"fi\",\n /** French */\n Fr = \"fr\",\n /** Frulian */\n Fur = \"fur\",\n /** Gagauz (Latin) */\n Gag = \"gag\",\n /** Galician */\n Gl = \"gl\",\n /** German */\n De = \"de\",\n /** Gilbertese */\n Gil = \"gil\",\n /** Gondi (Devanagiri) */\n Gon = \"gon\",\n /** Greek */\n El = \"el\",\n /** Greenlandic */\n Kl = \"kl\",\n /** Gurung (Devanagiri) */\n Gvr = \"gvr\",\n /** Haitian Creole */\n Ht = \"ht\",\n /** Halbi (Devanagiri) */\n Hlb = \"hlb\",\n /** Hani */\n Hni = \"hni\",\n /** Haryanvi */\n Bgc = \"bgc\",\n /** Hawaiian */\n Haw = \"haw\",\n /** Hindi */\n Hi = \"hi\",\n /** Hmong Daw (Latin) */\n Mww = \"mww\",\n /** Ho (Devanagiri) */\n Hoc = \"hoc\",\n /** Hungarian */\n Hu = \"hu\",\n /** Icelandic */\n Is = \"is\",\n /** Inari Sami */\n Smn = \"smn\",\n /** Indonesian */\n Id = \"id\",\n /** Interlingua */\n Ia = \"ia\",\n /** Inuktitut (Latin) */\n Iu = \"iu\",\n /** Irish */\n Ga = \"ga\",\n /** Italian */\n It = \"it\",\n /** Japanese */\n Ja = \"ja\",\n /** Jaunsari (Devanagiri) */\n Jns = \"Jns\",\n /** Javanese */\n Jv = \"jv\",\n /** Kabuverdianu */\n Kea = \"kea\",\n /** Kachin (Latin) */\n Kac = \"kac\",\n /** Kangri (Devanagiri) */\n Xnr = \"xnr\",\n /** Karachay-Balkar */\n Krc = \"krc\",\n /** Kara-Kalpak (Cyrillic) */\n KaaCyrl = \"kaa-cyrl\",\n /** Kara-Kalpak (Latin) */\n Kaa = \"kaa\",\n /** Kashubian */\n Csb = \"csb\",\n /** Kazakh (Cyrillic) */\n KkCyrl = \"kk-cyrl\",\n /** Kazakh (Latin) */\n KkLatn = \"kk-latn\",\n /** Khaling */\n Klr = \"klr\",\n /** Khasi */\n Kha = \"kha\",\n /** K'iche' */\n Quc = \"quc\",\n /** Korean */\n Ko = \"ko\",\n /** Korku */\n Kfq = \"kfq\",\n /** Koryak */\n Kpy = \"kpy\",\n /** Kosraean */\n Kos = \"kos\",\n /** Kumyk (Cyrillic) */\n Kum = \"kum\",\n /** Kurdish (Arabic) */\n KuArab = \"ku-arab\",\n /** Kurdish (Latin) */\n KuLatn = \"ku-latn\",\n /** Kurukh (Devanagiri) */\n Kru = \"kru\",\n /** Kyrgyz (Cyrillic) */\n Ky = \"ky\",\n /** Lakota */\n Lkt = \"lkt\",\n /** Latin */\n La = \"la\",\n /** Lithuanian */\n Lt = \"lt\",\n /** Lower Sorbian */\n Dsb = \"dsb\",\n /** Lule Sami */\n Smj = \"smj\",\n /** Luxembourgish */\n Lb = \"lb\",\n /** Mahasu Pahari (Devanagiri) */\n Bfz = \"bfz\",\n /** Malay (Latin) */\n Ms = \"ms\",\n /** Maltese */\n Mt = \"mt\",\n /** Malto (Devanagiri) */\n Kmj = \"kmj\",\n /** Manx */\n Gv = \"gv\",\n /** Maori */\n Mi = \"mi\",\n /** Marathi */\n Mr = \"mr\",\n /** Mongolian (Cyrillic) */\n Mn = \"mn\",\n /** Montenegrin (Cyrillic) */\n CnrCyrl = \"cnr-cyrl\",\n /** Montenegrin (Latin) */\n CnrLatn = \"cnr-latn\",\n /** Neapolitan */\n Nap = \"nap\",\n /** Nepali */\n Ne = \"ne\",\n /** Niuean */\n Niu = \"niu\",\n /** Nogay */\n Nog = \"nog\",\n /** Northern Sami (Latin) */\n Sme = \"sme\",\n /** Norwegian */\n Nb = \"nb\",\n /** Norwegian */\n No = \"no\",\n /** Occitan */\n Oc = \"oc\",\n /** Ossetic */\n Os = \"os\",\n /** Pashto */\n Ps = \"ps\",\n /** Persian */\n Fa = \"fa\",\n /** Polish */\n Pl = \"pl\",\n /** Portuguese */\n Pt = \"pt\",\n /** Punjabi (Arabic) */\n Pa = \"pa\",\n /** Ripuarian */\n Ksh = \"ksh\",\n /** Romanian */\n Ro = \"ro\",\n /** Romansh */\n Rm = \"rm\",\n /** Russian */\n Ru = \"ru\",\n /** Sadri (Devanagiri) */\n Sck = \"sck\",\n /** Samoan (Latin) */\n Sm = \"sm\",\n /** Sanskrit (Devanagiri) */\n Sa = \"sa\",\n /** Santali (Devanagiri) */\n Sat = \"sat\",\n /** Scots */\n Sco = \"sco\",\n /** Scottish Gaelic */\n Gd = \"gd\",\n /** Serbian (Latin) */\n Sr = \"sr\",\n /** Serbian (Cyrillic) */\n SrCyrl = \"sr-Cyrl\",\n /** Serbian (Latin) */\n SrLatn = \"sr-Latn\",\n /** Sherpa (Devanagiri) */\n Xsr = \"xsr\",\n /** Sirmauri (Devanagiri) */\n Srx = \"srx\",\n /** Skolt Sami */\n Sms = \"sms\",\n /** Slovak */\n Sk = \"sk\",\n /** Slovenian */\n Sl = \"sl\",\n /** Somali (Arabic) */\n So = \"so\",\n /** Southern Sami */\n Sma = \"sma\",\n /** Spanish */\n Es = \"es\",\n /** Swahili (Latin) */\n Sw = \"sw\",\n /** Swedish */\n Sv = \"sv\",\n /** Tajik (Cyrillic) */\n Tg = \"tg\",\n /** Tatar (Latin) */\n Tt = \"tt\",\n /** Tetum */\n Tet = \"tet\",\n /** Thangmi */\n Thf = \"thf\",\n /** Tongan */\n To = \"to\",\n /** Turkish */\n Tr = \"tr\",\n /** Turkmen (Latin) */\n Tk = \"tk\",\n /** Tuvan */\n Tyv = \"tyv\",\n /** Upper Sorbian */\n Hsb = \"hsb\",\n /** Urdu */\n Ur = \"ur\",\n /** Uyghur (Arabic) */\n Ug = \"ug\",\n /** Uzbek (Arabic) */\n UzArab = \"uz-arab\",\n /** Uzbek (Cyrillic) */\n UzCyrl = \"uz-cyrl\",\n /** Uzbek (Latin) */\n Uz = \"uz\",\n /** Volapük */\n Vo = \"vo\",\n /** Walser */\n Wae = \"wae\",\n /** Welsh */\n Cy = \"cy\",\n /** Western Frisian */\n Fy = \"fy\",\n /** Yucatec Maya */\n Yua = \"yua\",\n /** Zhuang */\n Za = \"za\",\n /** Zulu */\n Zu = \"zu\",\n /** Unknown (All) */\n Unk = \"unk\",\n}\n\n/**\n * Defines values for OcrSkillLanguage. \\\n * {@link KnownOcrSkillLanguage} can be used interchangeably with OcrSkillLanguage,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **af**: Afrikaans \\\n * **sq**: Albanian \\\n * **anp**: Angika (Devanagiri) \\\n * **ar**: Arabic \\\n * **ast**: Asturian \\\n * **awa**: Awadhi-Hindi (Devanagiri) \\\n * **az**: Azerbaijani (Latin) \\\n * **bfy**: Bagheli \\\n * **eu**: Basque \\\n * **be**: Belarusian (Cyrillic and Latin) \\\n * **be-cyrl**: Belarusian (Cyrillic) \\\n * **be-latn**: Belarusian (Latin) \\\n * **bho**: Bhojpuri-Hindi (Devanagiri) \\\n * **bi**: Bislama \\\n * **brx**: Bodo (Devanagiri) \\\n * **bs**: Bosnian Latin \\\n * **bra**: Brajbha \\\n * **br**: Breton \\\n * **bg**: Bulgarian \\\n * **bns**: Bundeli \\\n * **bua**: Buryat (Cyrillic) \\\n * **ca**: Catalan \\\n * **ceb**: Cebuano \\\n * **rab**: Chamling \\\n * **ch**: Chamorro \\\n * **hne**: Chhattisgarhi (Devanagiri) \\\n * **zh-Hans**: Chinese Simplified \\\n * **zh-Hant**: Chinese Traditional \\\n * **kw**: Cornish \\\n * **co**: Corsican \\\n * **crh**: Crimean Tatar (Latin) \\\n * **hr**: Croatian \\\n * **cs**: Czech \\\n * **da**: Danish \\\n * **prs**: Dari \\\n * **dhi**: Dhimal (Devanagiri) \\\n * **doi**: Dogri (Devanagiri) \\\n * **nl**: Dutch \\\n * **en**: English \\\n * **myv**: Erzya (Cyrillic) \\\n * **et**: Estonian \\\n * **fo**: Faroese \\\n * **fj**: Fijian \\\n * **fil**: Filipino \\\n * **fi**: Finnish \\\n * **fr**: French \\\n * **fur**: Frulian \\\n * **gag**: Gagauz (Latin) \\\n * **gl**: Galician \\\n * **de**: German \\\n * **gil**: Gilbertese \\\n * **gon**: Gondi (Devanagiri) \\\n * **el**: Greek \\\n * **kl**: Greenlandic \\\n * **gvr**: Gurung (Devanagiri) \\\n * **ht**: Haitian Creole \\\n * **hlb**: Halbi (Devanagiri) \\\n * **hni**: Hani \\\n * **bgc**: Haryanvi \\\n * **haw**: Hawaiian \\\n * **hi**: Hindi \\\n * **mww**: Hmong Daw (Latin) \\\n * **hoc**: Ho (Devanagiri) \\\n * **hu**: Hungarian \\\n * **is**: Icelandic \\\n * **smn**: Inari Sami \\\n * **id**: Indonesian \\\n * **ia**: Interlingua \\\n * **iu**: Inuktitut (Latin) \\\n * **ga**: Irish \\\n * **it**: Italian \\\n * **ja**: Japanese \\\n * **Jns**: Jaunsari (Devanagiri) \\\n * **jv**: Javanese \\\n * **kea**: Kabuverdianu \\\n * **kac**: Kachin (Latin) \\\n * **xnr**: Kangri (Devanagiri) \\\n * **krc**: Karachay-Balkar \\\n * **kaa-cyrl**: Kara-Kalpak (Cyrillic) \\\n * **kaa**: Kara-Kalpak (Latin) \\\n * **csb**: Kashubian \\\n * **kk-cyrl**: Kazakh (Cyrillic) \\\n * **kk-latn**: Kazakh (Latin) \\\n * **klr**: Khaling \\\n * **kha**: Khasi \\\n * **quc**: K'iche' \\\n * **ko**: Korean \\\n * **kfq**: Korku \\\n * **kpy**: Koryak \\\n * **kos**: Kosraean \\\n * **kum**: Kumyk (Cyrillic) \\\n * **ku-arab**: Kurdish (Arabic) \\\n * **ku-latn**: Kurdish (Latin) \\\n * **kru**: Kurukh (Devanagiri) \\\n * **ky**: Kyrgyz (Cyrillic) \\\n * **lkt**: Lakota \\\n * **la**: Latin \\\n * **lt**: Lithuanian \\\n * **dsb**: Lower Sorbian \\\n * **smj**: Lule Sami \\\n * **lb**: Luxembourgish \\\n * **bfz**: Mahasu Pahari (Devanagiri) \\\n * **ms**: Malay (Latin) \\\n * **mt**: Maltese \\\n * **kmj**: Malto (Devanagiri) \\\n * **gv**: Manx \\\n * **mi**: Maori \\\n * **mr**: Marathi \\\n * **mn**: Mongolian (Cyrillic) \\\n * **cnr-cyrl**: Montenegrin (Cyrillic) \\\n * **cnr-latn**: Montenegrin (Latin) \\\n * **nap**: Neapolitan \\\n * **ne**: Nepali \\\n * **niu**: Niuean \\\n * **nog**: Nogay \\\n * **sme**: Northern Sami (Latin) \\\n * **nb**: Norwegian \\\n * **no**: Norwegian \\\n * **oc**: Occitan \\\n * **os**: Ossetic \\\n * **ps**: Pashto \\\n * **fa**: Persian \\\n * **pl**: Polish \\\n * **pt**: Portuguese \\\n * **pa**: Punjabi (Arabic) \\\n * **ksh**: Ripuarian \\\n * **ro**: Romanian \\\n * **rm**: Romansh \\\n * **ru**: Russian \\\n * **sck**: Sadri (Devanagiri) \\\n * **sm**: Samoan (Latin) \\\n * **sa**: Sanskrit (Devanagiri) \\\n * **sat**: Santali (Devanagiri) \\\n * **sco**: Scots \\\n * **gd**: Scottish Gaelic \\\n * **sr**: Serbian (Latin) \\\n * **sr-Cyrl**: Serbian (Cyrillic) \\\n * **sr-Latn**: Serbian (Latin) \\\n * **xsr**: Sherpa (Devanagiri) \\\n * **srx**: Sirmauri (Devanagiri) \\\n * **sms**: Skolt Sami \\\n * **sk**: Slovak \\\n * **sl**: Slovenian \\\n * **so**: Somali (Arabic) \\\n * **sma**: Southern Sami \\\n * **es**: Spanish \\\n * **sw**: Swahili (Latin) \\\n * **sv**: Swedish \\\n * **tg**: Tajik (Cyrillic) \\\n * **tt**: Tatar (Latin) \\\n * **tet**: Tetum \\\n * **thf**: Thangmi \\\n * **to**: Tongan \\\n * **tr**: Turkish \\\n * **tk**: Turkmen (Latin) \\\n * **tyv**: Tuvan \\\n * **hsb**: Upper Sorbian \\\n * **ur**: Urdu \\\n * **ug**: Uyghur (Arabic) \\\n * **uz-arab**: Uzbek (Arabic) \\\n * **uz-cyrl**: Uzbek (Cyrillic) \\\n * **uz**: Uzbek (Latin) \\\n * **vo**: Volapük \\\n * **wae**: Walser \\\n * **cy**: Welsh \\\n * **fy**: Western Frisian \\\n * **yua**: Yucatec Maya \\\n * **za**: Zhuang \\\n * **zu**: Zulu \\\n * **unk**: Unknown (All)\n */\nexport type OcrSkillLanguage = string;\n\n/** Known values of {@link OcrLineEnding} that the service accepts. */\nexport enum KnownOcrLineEnding {\n /** Lines are separated by a single space character. */\n Space = \"space\",\n /** Lines are separated by a carriage return ('\\r') character. */\n CarriageReturn = \"carriageReturn\",\n /** Lines are separated by a single line feed ('\\n') character. */\n LineFeed = \"lineFeed\",\n /** Lines are separated by a carriage return and a line feed ('\\r\\n') character. */\n CarriageReturnLineFeed = \"carriageReturnLineFeed\",\n}\n\n/**\n * Defines values for OcrLineEnding. \\\n * {@link KnownOcrLineEnding} can be used interchangeably with OcrLineEnding,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **space**: Lines are separated by a single space character. \\\n * **carriageReturn**: Lines are separated by a carriage return ('\\r') character. \\\n * **lineFeed**: Lines are separated by a single line feed ('\\n') character. \\\n * **carriageReturnLineFeed**: Lines are separated by a carriage return and a line feed ('\\r\\n') character.\n */\nexport type OcrLineEnding = string;\n\n/** Known values of {@link ImageAnalysisSkillLanguage} that the service accepts. */\nexport enum KnownImageAnalysisSkillLanguage {\n /** Arabic */\n Ar = \"ar\",\n /** Azerbaijani */\n Az = \"az\",\n /** Bulgarian */\n Bg = \"bg\",\n /** Bosnian Latin */\n Bs = \"bs\",\n /** Catalan */\n Ca = \"ca\",\n /** Czech */\n Cs = \"cs\",\n /** Welsh */\n Cy = \"cy\",\n /** Danish */\n Da = \"da\",\n /** German */\n De = \"de\",\n /** Greek */\n El = \"el\",\n /** English */\n En = \"en\",\n /** Spanish */\n Es = \"es\",\n /** Estonian */\n Et = \"et\",\n /** Basque */\n Eu = \"eu\",\n /** Finnish */\n Fi = \"fi\",\n /** French */\n Fr = \"fr\",\n /** Irish */\n Ga = \"ga\",\n /** Galician */\n Gl = \"gl\",\n /** Hebrew */\n He = \"he\",\n /** Hindi */\n Hi = \"hi\",\n /** Croatian */\n Hr = \"hr\",\n /** Hungarian */\n Hu = \"hu\",\n /** Indonesian */\n Id = \"id\",\n /** Italian */\n It = \"it\",\n /** Japanese */\n Ja = \"ja\",\n /** Kazakh */\n Kk = \"kk\",\n /** Korean */\n Ko = \"ko\",\n /** Lithuanian */\n Lt = \"lt\",\n /** Latvian */\n Lv = \"lv\",\n /** Macedonian */\n Mk = \"mk\",\n /** Malay Malaysia */\n Ms = \"ms\",\n /** Norwegian (Bokmal) */\n Nb = \"nb\",\n /** Dutch */\n Nl = \"nl\",\n /** Polish */\n Pl = \"pl\",\n /** Dari */\n Prs = \"prs\",\n /** Portuguese-Brazil */\n PtBR = \"pt-BR\",\n /** Portuguese-Portugal */\n Pt = \"pt\",\n /** Portuguese-Portugal */\n PtPT = \"pt-PT\",\n /** Romanian */\n Ro = \"ro\",\n /** Russian */\n Ru = \"ru\",\n /** Slovak */\n Sk = \"sk\",\n /** Slovenian */\n Sl = \"sl\",\n /** Serbian - Cyrillic RS */\n SrCyrl = \"sr-Cyrl\",\n /** Serbian - Latin RS */\n SrLatn = \"sr-Latn\",\n /** Swedish */\n Sv = \"sv\",\n /** Thai */\n Th = \"th\",\n /** Turkish */\n Tr = \"tr\",\n /** Ukrainian */\n Uk = \"uk\",\n /** Vietnamese */\n Vi = \"vi\",\n /** Chinese Simplified */\n Zh = \"zh\",\n /** Chinese Simplified */\n ZhHans = \"zh-Hans\",\n /** Chinese Traditional */\n ZhHant = \"zh-Hant\",\n}\n\n/**\n * Defines values for ImageAnalysisSkillLanguage. \\\n * {@link KnownImageAnalysisSkillLanguage} can be used interchangeably with ImageAnalysisSkillLanguage,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **ar**: Arabic \\\n * **az**: Azerbaijani \\\n * **bg**: Bulgarian \\\n * **bs**: Bosnian Latin \\\n * **ca**: Catalan \\\n * **cs**: Czech \\\n * **cy**: Welsh \\\n * **da**: Danish \\\n * **de**: German \\\n * **el**: Greek \\\n * **en**: English \\\n * **es**: Spanish \\\n * **et**: Estonian \\\n * **eu**: Basque \\\n * **fi**: Finnish \\\n * **fr**: French \\\n * **ga**: Irish \\\n * **gl**: Galician \\\n * **he**: Hebrew \\\n * **hi**: Hindi \\\n * **hr**: Croatian \\\n * **hu**: Hungarian \\\n * **id**: Indonesian \\\n * **it**: Italian \\\n * **ja**: Japanese \\\n * **kk**: Kazakh \\\n * **ko**: Korean \\\n * **lt**: Lithuanian \\\n * **lv**: Latvian \\\n * **mk**: Macedonian \\\n * **ms**: Malay Malaysia \\\n * **nb**: Norwegian (Bokmal) \\\n * **nl**: Dutch \\\n * **pl**: Polish \\\n * **prs**: Dari \\\n * **pt-BR**: Portuguese-Brazil \\\n * **pt**: Portuguese-Portugal \\\n * **pt-PT**: Portuguese-Portugal \\\n * **ro**: Romanian \\\n * **ru**: Russian \\\n * **sk**: Slovak \\\n * **sl**: Slovenian \\\n * **sr-Cyrl**: Serbian - Cyrillic RS \\\n * **sr-Latn**: Serbian - Latin RS \\\n * **sv**: Swedish \\\n * **th**: Thai \\\n * **tr**: Turkish \\\n * **uk**: Ukrainian \\\n * **vi**: Vietnamese \\\n * **zh**: Chinese Simplified \\\n * **zh-Hans**: Chinese Simplified \\\n * **zh-Hant**: Chinese Traditional\n */\nexport type ImageAnalysisSkillLanguage = string;\n\n/** Known values of {@link VisualFeature} that the service accepts. */\nexport enum KnownVisualFeature {\n /** Visual features recognized as adult persons. */\n Adult = \"adult\",\n /** Visual features recognized as commercial brands. */\n Brands = \"brands\",\n /** Categories. */\n Categories = \"categories\",\n /** Description. */\n Description = \"description\",\n /** Visual features recognized as people faces. */\n Faces = \"faces\",\n /** Visual features recognized as objects. */\n Objects = \"objects\",\n /** Tags. */\n Tags = \"tags\",\n}\n\n/**\n * Defines values for VisualFeature. \\\n * {@link KnownVisualFeature} can be used interchangeably with VisualFeature,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **adult**: Visual features recognized as adult persons. \\\n * **brands**: Visual features recognized as commercial brands. \\\n * **categories**: Categories. \\\n * **description**: Description. \\\n * **faces**: Visual features recognized as people faces. \\\n * **objects**: Visual features recognized as objects. \\\n * **tags**: Tags.\n */\nexport type VisualFeature = string;\n\n/** Known values of {@link ImageDetail} that the service accepts. */\nexport enum KnownImageDetail {\n /** Details recognized as celebrities. */\n Celebrities = \"celebrities\",\n /** Details recognized as landmarks. */\n Landmarks = \"landmarks\",\n}\n\n/**\n * Defines values for ImageDetail. \\\n * {@link KnownImageDetail} can be used interchangeably with ImageDetail,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **celebrities**: Details recognized as celebrities. \\\n * **landmarks**: Details recognized as landmarks.\n */\nexport type ImageDetail = string;\n\n/** Known values of {@link EntityCategory} that the service accepts. */\nexport enum KnownEntityCategory {\n /** Entities describing a physical location. */\n Location = \"location\",\n /** Entities describing an organization. */\n Organization = \"organization\",\n /** Entities describing a person. */\n Person = \"person\",\n /** Entities describing a quantity. */\n Quantity = \"quantity\",\n /** Entities describing a date and time. */\n Datetime = \"datetime\",\n /** Entities describing a URL. */\n Url = \"url\",\n /** Entities describing an email address. */\n Email = \"email\",\n}\n\n/**\n * Defines values for EntityCategory. \\\n * {@link KnownEntityCategory} can be used interchangeably with EntityCategory,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **location**: Entities describing a physical location. \\\n * **organization**: Entities describing an organization. \\\n * **person**: Entities describing a person. \\\n * **quantity**: Entities describing a quantity. \\\n * **datetime**: Entities describing a date and time. \\\n * **url**: Entities describing a URL. \\\n * **email**: Entities describing an email address.\n */\nexport type EntityCategory = string;\n\n/** Known values of {@link EntityRecognitionSkillLanguage} that the service accepts. */\nexport enum KnownEntityRecognitionSkillLanguage {\n /** Arabic */\n Ar = \"ar\",\n /** Czech */\n Cs = \"cs\",\n /** Chinese-Simplified */\n ZhHans = \"zh-Hans\",\n /** Chinese-Traditional */\n ZhHant = \"zh-Hant\",\n /** Danish */\n Da = \"da\",\n /** Dutch */\n Nl = \"nl\",\n /** English */\n En = \"en\",\n /** Finnish */\n Fi = \"fi\",\n /** French */\n Fr = \"fr\",\n /** German */\n De = \"de\",\n /** Greek */\n El = \"el\",\n /** Hungarian */\n Hu = \"hu\",\n /** Italian */\n It = \"it\",\n /** Japanese */\n Ja = \"ja\",\n /** Korean */\n Ko = \"ko\",\n /** Norwegian (Bokmaal) */\n No = \"no\",\n /** Polish */\n Pl = \"pl\",\n /** Portuguese (Portugal) */\n PtPT = \"pt-PT\",\n /** Portuguese (Brazil) */\n PtBR = \"pt-BR\",\n /** Russian */\n Ru = \"ru\",\n /** Spanish */\n Es = \"es\",\n /** Swedish */\n Sv = \"sv\",\n /** Turkish */\n Tr = \"tr\",\n}\n\n/**\n * Defines values for EntityRecognitionSkillLanguage. \\\n * {@link KnownEntityRecognitionSkillLanguage} can be used interchangeably with EntityRecognitionSkillLanguage,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **ar**: Arabic \\\n * **cs**: Czech \\\n * **zh-Hans**: Chinese-Simplified \\\n * **zh-Hant**: Chinese-Traditional \\\n * **da**: Danish \\\n * **nl**: Dutch \\\n * **en**: English \\\n * **fi**: Finnish \\\n * **fr**: French \\\n * **de**: German \\\n * **el**: Greek \\\n * **hu**: Hungarian \\\n * **it**: Italian \\\n * **ja**: Japanese \\\n * **ko**: Korean \\\n * **no**: Norwegian (Bokmaal) \\\n * **pl**: Polish \\\n * **pt-PT**: Portuguese (Portugal) \\\n * **pt-BR**: Portuguese (Brazil) \\\n * **ru**: Russian \\\n * **es**: Spanish \\\n * **sv**: Swedish \\\n * **tr**: Turkish\n */\nexport type EntityRecognitionSkillLanguage = string;\n\n/** Known values of {@link SentimentSkillLanguage} that the service accepts. */\nexport enum KnownSentimentSkillLanguage {\n /** Danish */\n Da = \"da\",\n /** Dutch */\n Nl = \"nl\",\n /** English */\n En = \"en\",\n /** Finnish */\n Fi = \"fi\",\n /** French */\n Fr = \"fr\",\n /** German */\n De = \"de\",\n /** Greek */\n El = \"el\",\n /** Italian */\n It = \"it\",\n /** Norwegian (Bokmaal) */\n No = \"no\",\n /** Polish */\n Pl = \"pl\",\n /** Portuguese (Portugal) */\n PtPT = \"pt-PT\",\n /** Russian */\n Ru = \"ru\",\n /** Spanish */\n Es = \"es\",\n /** Swedish */\n Sv = \"sv\",\n /** Turkish */\n Tr = \"tr\",\n}\n\n/**\n * Defines values for SentimentSkillLanguage. \\\n * {@link KnownSentimentSkillLanguage} can be used interchangeably with SentimentSkillLanguage,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **da**: Danish \\\n * **nl**: Dutch \\\n * **en**: English \\\n * **fi**: Finnish \\\n * **fr**: French \\\n * **de**: German \\\n * **el**: Greek \\\n * **it**: Italian \\\n * **no**: Norwegian (Bokmaal) \\\n * **pl**: Polish \\\n * **pt-PT**: Portuguese (Portugal) \\\n * **ru**: Russian \\\n * **es**: Spanish \\\n * **sv**: Swedish \\\n * **tr**: Turkish\n */\nexport type SentimentSkillLanguage = string;\n\n/** Known values of {@link PIIDetectionSkillMaskingMode} that the service accepts. */\nexport enum KnownPIIDetectionSkillMaskingMode {\n /** No masking occurs and the maskedText output will not be returned. */\n None = \"none\",\n /** Replaces the detected entities with the character given in the maskingCharacter parameter. The character will be repeated to the length of the detected entity so that the offsets will correctly correspond to both the input text as well as the output maskedText. */\n Replace = \"replace\",\n}\n\n/**\n * Defines values for PIIDetectionSkillMaskingMode. \\\n * {@link KnownPIIDetectionSkillMaskingMode} can be used interchangeably with PIIDetectionSkillMaskingMode,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **none**: No masking occurs and the maskedText output will not be returned. \\\n * **replace**: Replaces the detected entities with the character given in the maskingCharacter parameter. The character will be repeated to the length of the detected entity so that the offsets will correctly correspond to both the input text as well as the output maskedText.\n */\nexport type PIIDetectionSkillMaskingMode = string;\n\n/** Known values of {@link SplitSkillLanguage} that the service accepts. */\nexport enum KnownSplitSkillLanguage {\n /** Amharic */\n Am = \"am\",\n /** Bosnian */\n Bs = \"bs\",\n /** Czech */\n Cs = \"cs\",\n /** Danish */\n Da = \"da\",\n /** German */\n De = \"de\",\n /** English */\n En = \"en\",\n /** Spanish */\n Es = \"es\",\n /** Estonian */\n Et = \"et\",\n /** Finnish */\n Fi = \"fi\",\n /** French */\n Fr = \"fr\",\n /** Hebrew */\n He = \"he\",\n /** Hindi */\n Hi = \"hi\",\n /** Croatian */\n Hr = \"hr\",\n /** Hungarian */\n Hu = \"hu\",\n /** Indonesian */\n Id = \"id\",\n /** Icelandic */\n Is = \"is\",\n /** Italian */\n It = \"it\",\n /** Japanese */\n Ja = \"ja\",\n /** Korean */\n Ko = \"ko\",\n /** Latvian */\n Lv = \"lv\",\n /** Norwegian */\n Nb = \"nb\",\n /** Dutch */\n Nl = \"nl\",\n /** Polish */\n Pl = \"pl\",\n /** Portuguese (Portugal) */\n Pt = \"pt\",\n /** Portuguese (Brazil) */\n PtBr = \"pt-br\",\n /** Russian */\n Ru = \"ru\",\n /** Slovak */\n Sk = \"sk\",\n /** Slovenian */\n Sl = \"sl\",\n /** Serbian */\n Sr = \"sr\",\n /** Swedish */\n Sv = \"sv\",\n /** Turkish */\n Tr = \"tr\",\n /** Urdu */\n Ur = \"ur\",\n /** Chinese (Simplified) */\n Zh = \"zh\",\n}\n\n/**\n * Defines values for SplitSkillLanguage. \\\n * {@link KnownSplitSkillLanguage} can be used interchangeably with SplitSkillLanguage,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **am**: Amharic \\\n * **bs**: Bosnian \\\n * **cs**: Czech \\\n * **da**: Danish \\\n * **de**: German \\\n * **en**: English \\\n * **es**: Spanish \\\n * **et**: Estonian \\\n * **fi**: Finnish \\\n * **fr**: French \\\n * **he**: Hebrew \\\n * **hi**: Hindi \\\n * **hr**: Croatian \\\n * **hu**: Hungarian \\\n * **id**: Indonesian \\\n * **is**: Icelandic \\\n * **it**: Italian \\\n * **ja**: Japanese \\\n * **ko**: Korean \\\n * **lv**: Latvian \\\n * **nb**: Norwegian \\\n * **nl**: Dutch \\\n * **pl**: Polish \\\n * **pt**: Portuguese (Portugal) \\\n * **pt-br**: Portuguese (Brazil) \\\n * **ru**: Russian \\\n * **sk**: Slovak \\\n * **sl**: Slovenian \\\n * **sr**: Serbian \\\n * **sv**: Swedish \\\n * **tr**: Turkish \\\n * **ur**: Urdu \\\n * **zh**: Chinese (Simplified)\n */\nexport type SplitSkillLanguage = string;\n\n/** Known values of {@link TextSplitMode} that the service accepts. */\nexport enum KnownTextSplitMode {\n /** Split the text into individual pages. */\n Pages = \"pages\",\n /** Split the text into individual sentences. */\n Sentences = \"sentences\",\n}\n\n/**\n * Defines values for TextSplitMode. \\\n * {@link KnownTextSplitMode} can be used interchangeably with TextSplitMode,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **pages**: Split the text into individual pages. \\\n * **sentences**: Split the text into individual sentences.\n */\nexport type TextSplitMode = string;\n\n/** Known values of {@link SplitSkillUnit} that the service accepts. */\nexport enum KnownSplitSkillUnit {\n /** The length will be measured by character. */\n Characters = \"characters\",\n /** The length will be measured by an AzureOpenAI tokenizer from the tiktoken library. */\n AzureOpenAITokens = \"azureOpenAITokens\",\n}\n\n/**\n * Defines values for SplitSkillUnit. \\\n * {@link KnownSplitSkillUnit} can be used interchangeably with SplitSkillUnit,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **characters**: The length will be measured by character. \\\n * **azureOpenAITokens**: The length will be measured by an AzureOpenAI tokenizer from the tiktoken library.\n */\nexport type SplitSkillUnit = string;\n\n/** Known values of {@link SplitSkillEncoderModelName} that the service accepts. */\nexport enum KnownSplitSkillEncoderModelName {\n /** Refers to a base model trained with a 50,000 token vocabulary, often used in general natural language processing tasks. */\n R50KBase = \"r50k_base\",\n /** A base model with a 50,000 token vocabulary, optimized for prompt-based tasks. */\n P50KBase = \"p50k_base\",\n /** Similar to p50k_base but fine-tuned for editing or rephrasing tasks with a 50,000 token vocabulary. */\n P50KEdit = \"p50k_edit\",\n /** A base model with a 100,000 token vocabulary. */\n CL100KBase = \"cl100k_base\",\n}\n\n/**\n * Defines values for SplitSkillEncoderModelName. \\\n * {@link KnownSplitSkillEncoderModelName} can be used interchangeably with SplitSkillEncoderModelName,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **r50k_base**: Refers to a base model trained with a 50,000 token vocabulary, often used in general natural language processing tasks. \\\n * **p50k_base**: A base model with a 50,000 token vocabulary, optimized for prompt-based tasks. \\\n * **p50k_edit**: Similar to p50k_base but fine-tuned for editing or rephrasing tasks with a 50,000 token vocabulary. \\\n * **cl100k_base**: A base model with a 100,000 token vocabulary.\n */\nexport type SplitSkillEncoderModelName = string;\n\n/** Known values of {@link CustomEntityLookupSkillLanguage} that the service accepts. */\nexport enum KnownCustomEntityLookupSkillLanguage {\n /** Danish */\n Da = \"da\",\n /** German */\n De = \"de\",\n /** English */\n En = \"en\",\n /** Spanish */\n Es = \"es\",\n /** Finnish */\n Fi = \"fi\",\n /** French */\n Fr = \"fr\",\n /** Italian */\n It = \"it\",\n /** Korean */\n Ko = \"ko\",\n /** Portuguese */\n Pt = \"pt\",\n}\n\n/**\n * Defines values for CustomEntityLookupSkillLanguage. \\\n * {@link KnownCustomEntityLookupSkillLanguage} can be used interchangeably with CustomEntityLookupSkillLanguage,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **da**: Danish \\\n * **de**: German \\\n * **en**: English \\\n * **es**: Spanish \\\n * **fi**: Finnish \\\n * **fr**: French \\\n * **it**: Italian \\\n * **ko**: Korean \\\n * **pt**: Portuguese\n */\nexport type CustomEntityLookupSkillLanguage = string;\n\n/** Known values of {@link TextTranslationSkillLanguage} that the service accepts. */\nexport enum KnownTextTranslationSkillLanguage {\n /** Afrikaans */\n Af = \"af\",\n /** Arabic */\n Ar = \"ar\",\n /** Bangla */\n Bn = \"bn\",\n /** Bosnian (Latin) */\n Bs = \"bs\",\n /** Bulgarian */\n Bg = \"bg\",\n /** Cantonese (Traditional) */\n Yue = \"yue\",\n /** Catalan */\n Ca = \"ca\",\n /** Chinese Simplified */\n ZhHans = \"zh-Hans\",\n /** Chinese Traditional */\n ZhHant = \"zh-Hant\",\n /** Croatian */\n Hr = \"hr\",\n /** Czech */\n Cs = \"cs\",\n /** Danish */\n Da = \"da\",\n /** Dutch */\n Nl = \"nl\",\n /** English */\n En = \"en\",\n /** Estonian */\n Et = \"et\",\n /** Fijian */\n Fj = \"fj\",\n /** Filipino */\n Fil = \"fil\",\n /** Finnish */\n Fi = \"fi\",\n /** French */\n Fr = \"fr\",\n /** German */\n De = \"de\",\n /** Greek */\n El = \"el\",\n /** Haitian Creole */\n Ht = \"ht\",\n /** Hebrew */\n He = \"he\",\n /** Hindi */\n Hi = \"hi\",\n /** Hmong Daw */\n Mww = \"mww\",\n /** Hungarian */\n Hu = \"hu\",\n /** Icelandic */\n Is = \"is\",\n /** Indonesian */\n Id = \"id\",\n /** Italian */\n It = \"it\",\n /** Japanese */\n Ja = \"ja\",\n /** Kiswahili */\n Sw = \"sw\",\n /** Klingon */\n Tlh = \"tlh\",\n /** Klingon (Latin script) */\n TlhLatn = \"tlh-Latn\",\n /** Klingon (Klingon script) */\n TlhPiqd = \"tlh-Piqd\",\n /** Korean */\n Ko = \"ko\",\n /** Latvian */\n Lv = \"lv\",\n /** Lithuanian */\n Lt = \"lt\",\n /** Malagasy */\n Mg = \"mg\",\n /** Malay */\n Ms = \"ms\",\n /** Maltese */\n Mt = \"mt\",\n /** Norwegian */\n Nb = \"nb\",\n /** Persian */\n Fa = \"fa\",\n /** Polish */\n Pl = \"pl\",\n /** Portuguese */\n Pt = \"pt\",\n /** Portuguese (Brazil) */\n PtBr = \"pt-br\",\n /** Portuguese (Portugal) */\n PtPT = \"pt-PT\",\n /** Queretaro Otomi */\n Otq = \"otq\",\n /** Romanian */\n Ro = \"ro\",\n /** Russian */\n Ru = \"ru\",\n /** Samoan */\n Sm = \"sm\",\n /** Serbian (Cyrillic) */\n SrCyrl = \"sr-Cyrl\",\n /** Serbian (Latin) */\n SrLatn = \"sr-Latn\",\n /** Slovak */\n Sk = \"sk\",\n /** Slovenian */\n Sl = \"sl\",\n /** Spanish */\n Es = \"es\",\n /** Swedish */\n Sv = \"sv\",\n /** Tahitian */\n Ty = \"ty\",\n /** Tamil */\n Ta = \"ta\",\n /** Telugu */\n Te = \"te\",\n /** Thai */\n Th = \"th\",\n /** Tongan */\n To = \"to\",\n /** Turkish */\n Tr = \"tr\",\n /** Ukrainian */\n Uk = \"uk\",\n /** Urdu */\n Ur = \"ur\",\n /** Vietnamese */\n Vi = \"vi\",\n /** Welsh */\n Cy = \"cy\",\n /** Yucatec Maya */\n Yua = \"yua\",\n /** Irish */\n Ga = \"ga\",\n /** Kannada */\n Kn = \"kn\",\n /** Maori */\n Mi = \"mi\",\n /** Malayalam */\n Ml = \"ml\",\n /** Punjabi */\n Pa = \"pa\",\n}\n\n/**\n * Defines values for TextTranslationSkillLanguage. \\\n * {@link KnownTextTranslationSkillLanguage} can be used interchangeably with TextTranslationSkillLanguage,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **af**: Afrikaans \\\n * **ar**: Arabic \\\n * **bn**: Bangla \\\n * **bs**: Bosnian (Latin) \\\n * **bg**: Bulgarian \\\n * **yue**: Cantonese (Traditional) \\\n * **ca**: Catalan \\\n * **zh-Hans**: Chinese Simplified \\\n * **zh-Hant**: Chinese Traditional \\\n * **hr**: Croatian \\\n * **cs**: Czech \\\n * **da**: Danish \\\n * **nl**: Dutch \\\n * **en**: English \\\n * **et**: Estonian \\\n * **fj**: Fijian \\\n * **fil**: Filipino \\\n * **fi**: Finnish \\\n * **fr**: French \\\n * **de**: German \\\n * **el**: Greek \\\n * **ht**: Haitian Creole \\\n * **he**: Hebrew \\\n * **hi**: Hindi \\\n * **mww**: Hmong Daw \\\n * **hu**: Hungarian \\\n * **is**: Icelandic \\\n * **id**: Indonesian \\\n * **it**: Italian \\\n * **ja**: Japanese \\\n * **sw**: Kiswahili \\\n * **tlh**: Klingon \\\n * **tlh-Latn**: Klingon (Latin script) \\\n * **tlh-Piqd**: Klingon (Klingon script) \\\n * **ko**: Korean \\\n * **lv**: Latvian \\\n * **lt**: Lithuanian \\\n * **mg**: Malagasy \\\n * **ms**: Malay \\\n * **mt**: Maltese \\\n * **nb**: Norwegian \\\n * **fa**: Persian \\\n * **pl**: Polish \\\n * **pt**: Portuguese \\\n * **pt-br**: Portuguese (Brazil) \\\n * **pt-PT**: Portuguese (Portugal) \\\n * **otq**: Queretaro Otomi \\\n * **ro**: Romanian \\\n * **ru**: Russian \\\n * **sm**: Samoan \\\n * **sr-Cyrl**: Serbian (Cyrillic) \\\n * **sr-Latn**: Serbian (Latin) \\\n * **sk**: Slovak \\\n * **sl**: Slovenian \\\n * **es**: Spanish \\\n * **sv**: Swedish \\\n * **ty**: Tahitian \\\n * **ta**: Tamil \\\n * **te**: Telugu \\\n * **th**: Thai \\\n * **to**: Tongan \\\n * **tr**: Turkish \\\n * **uk**: Ukrainian \\\n * **ur**: Urdu \\\n * **vi**: Vietnamese \\\n * **cy**: Welsh \\\n * **yua**: Yucatec Maya \\\n * **ga**: Irish \\\n * **kn**: Kannada \\\n * **mi**: Maori \\\n * **ml**: Malayalam \\\n * **pa**: Punjabi\n */\nexport type TextTranslationSkillLanguage = string;\n\n/** Known values of {@link DocumentIntelligenceLayoutSkillOutputFormat} that the service accepts. */\nexport enum KnownDocumentIntelligenceLayoutSkillOutputFormat {\n /** Specify the format of the output as text. */\n Text = \"text\",\n /** Specify the format of the output as markdown. */\n Markdown = \"markdown\",\n}\n\n/**\n * Defines values for DocumentIntelligenceLayoutSkillOutputFormat. \\\n * {@link KnownDocumentIntelligenceLayoutSkillOutputFormat} can be used interchangeably with DocumentIntelligenceLayoutSkillOutputFormat,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **text**: Specify the format of the output as text. \\\n * **markdown**: Specify the format of the output as markdown.\n */\nexport type DocumentIntelligenceLayoutSkillOutputFormat = string;\n\n/** Known values of {@link DocumentIntelligenceLayoutSkillOutputMode} that the service accepts. */\nexport enum KnownDocumentIntelligenceLayoutSkillOutputMode {\n /** Specify that the output should be parsed as 'oneToMany'. */\n OneToMany = \"oneToMany\",\n}\n\n/**\n * Defines values for DocumentIntelligenceLayoutSkillOutputMode. \\\n * {@link KnownDocumentIntelligenceLayoutSkillOutputMode} can be used interchangeably with DocumentIntelligenceLayoutSkillOutputMode,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **oneToMany**: Specify that the output should be parsed as 'oneToMany'.\n */\nexport type DocumentIntelligenceLayoutSkillOutputMode = string;\n\n/** Known values of {@link DocumentIntelligenceLayoutSkillMarkdownHeaderDepth} that the service accepts. */\nexport enum KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth {\n /** Header level 1. */\n H1 = \"h1\",\n /** Header level 2. */\n H2 = \"h2\",\n /** Header level 3. */\n H3 = \"h3\",\n /** Header level 4. */\n H4 = \"h4\",\n /** Header level 5. */\n H5 = \"h5\",\n /** Header level 6. */\n H6 = \"h6\",\n}\n\n/**\n * Defines values for DocumentIntelligenceLayoutSkillMarkdownHeaderDepth. \\\n * {@link KnownDocumentIntelligenceLayoutSkillMarkdownHeaderDepth} can be used interchangeably with DocumentIntelligenceLayoutSkillMarkdownHeaderDepth,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **h1**: Header level 1. \\\n * **h2**: Header level 2. \\\n * **h3**: Header level 3. \\\n * **h4**: Header level 4. \\\n * **h5**: Header level 5. \\\n * **h6**: Header level 6.\n */\nexport type DocumentIntelligenceLayoutSkillMarkdownHeaderDepth = string;\n\n/** Known values of {@link DocumentIntelligenceLayoutSkillExtractionOptions} that the service accepts. */\nexport enum KnownDocumentIntelligenceLayoutSkillExtractionOptions {\n /** Specify that image content should be extracted from the document. */\n Images = \"images\",\n /** Specify that location metadata should be extracted from the document. */\n LocationMetadata = \"locationMetadata\",\n}\n\n/**\n * Defines values for DocumentIntelligenceLayoutSkillExtractionOptions. \\\n * {@link KnownDocumentIntelligenceLayoutSkillExtractionOptions} can be used interchangeably with DocumentIntelligenceLayoutSkillExtractionOptions,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **images**: Specify that image content should be extracted from the document. \\\n * **locationMetadata**: Specify that location metadata should be extracted from the document.\n */\nexport type DocumentIntelligenceLayoutSkillExtractionOptions = string;\n\n/** Known values of {@link DocumentIntelligenceLayoutSkillChunkingUnit} that the service accepts. */\nexport enum KnownDocumentIntelligenceLayoutSkillChunkingUnit {\n /** Specifies chunk by characters. */\n Characters = \"characters\",\n}\n\n/**\n * Defines values for DocumentIntelligenceLayoutSkillChunkingUnit. \\\n * {@link KnownDocumentIntelligenceLayoutSkillChunkingUnit} can be used interchangeably with DocumentIntelligenceLayoutSkillChunkingUnit,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **characters**: Specifies chunk by characters.\n */\nexport type DocumentIntelligenceLayoutSkillChunkingUnit = string;\n\n/** Known values of {@link ChatCompletionExtraParametersBehavior} that the service accepts. */\nexport enum KnownChatCompletionExtraParametersBehavior {\n /** Passes any extra parameters directly to the model. */\n PassThrough = \"passThrough\",\n /** Drops all extra parameters. */\n Drop = \"drop\",\n /** Raises an error if any extra parameter is present. */\n Error = \"error\",\n}\n\n/**\n * Defines values for ChatCompletionExtraParametersBehavior. \\\n * {@link KnownChatCompletionExtraParametersBehavior} can be used interchangeably with ChatCompletionExtraParametersBehavior,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **passThrough**: Passes any extra parameters directly to the model. \\\n * **drop**: Drops all extra parameters. \\\n * **error**: Raises an error if any extra parameter is present.\n */\nexport type ChatCompletionExtraParametersBehavior = string;\n\n/** Known values of {@link ChatCompletionResponseFormatType} that the service accepts. */\nexport enum KnownChatCompletionResponseFormatType {\n /** Text */\n Text = \"text\",\n /** JsonObject */\n JsonObject = \"jsonObject\",\n /** JsonSchema */\n JsonSchema = \"jsonSchema\",\n}\n\n/**\n * Defines values for ChatCompletionResponseFormatType. \\\n * {@link KnownChatCompletionResponseFormatType} can be used interchangeably with ChatCompletionResponseFormatType,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **text** \\\n * **jsonObject** \\\n * **jsonSchema**\n */\nexport type ChatCompletionResponseFormatType = string;\n\n/** Known values of {@link ContentUnderstandingSkillExtractionOptions} that the service accepts. */\nexport enum KnownContentUnderstandingSkillExtractionOptions {\n /** Specify that image content should be extracted from the document. */\n Images = \"images\",\n /** Specify that location metadata should be extracted from the document. */\n LocationMetadata = \"locationMetadata\",\n}\n\n/**\n * Defines values for ContentUnderstandingSkillExtractionOptions. \\\n * {@link KnownContentUnderstandingSkillExtractionOptions} can be used interchangeably with ContentUnderstandingSkillExtractionOptions,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **images**: Specify that image content should be extracted from the document. \\\n * **locationMetadata**: Specify that location metadata should be extracted from the document.\n */\nexport type ContentUnderstandingSkillExtractionOptions = string;\n\n/** Known values of {@link ContentUnderstandingSkillChunkingUnit} that the service accepts. */\nexport enum KnownContentUnderstandingSkillChunkingUnit {\n /** Specifies chunk by characters. */\n Characters = \"characters\",\n}\n\n/**\n * Defines values for ContentUnderstandingSkillChunkingUnit. \\\n * {@link KnownContentUnderstandingSkillChunkingUnit} can be used interchangeably with ContentUnderstandingSkillChunkingUnit,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **characters**: Specifies chunk by characters.\n */\nexport type ContentUnderstandingSkillChunkingUnit = string;\n\n/** Known values of {@link LexicalTokenizerName} that the service accepts. */\nexport enum KnownLexicalTokenizerName {\n /** Grammar-based tokenizer that is suitable for processing most European-language documents. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/standard\\/ClassicTokenizer.html */\n Classic = \"classic\",\n /** Tokenizes the input from an edge into n-grams of the given size(s). See https:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/ngram\\/EdgeNGramTokenizer.html */\n EdgeNGram = \"edgeNGram\",\n /** Emits the entire input as a single token. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/KeywordTokenizer.html */\n Keyword = \"keyword_v2\",\n /** Divides text at non-letters. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/LetterTokenizer.html */\n Letter = \"letter\",\n /** Divides text at non-letters and converts them to lower case. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/LowerCaseTokenizer.html */\n Lowercase = \"lowercase\",\n /** Divides text using language-specific rules. */\n MicrosoftLanguageTokenizer = \"microsoft_language_tokenizer\",\n /** Divides text using language-specific rules and reduces words to their base forms. */\n MicrosoftLanguageStemmingTokenizer = \"microsoft_language_stemming_tokenizer\",\n /** Tokenizes the input into n-grams of the given size(s). See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/ngram\\/NGramTokenizer.html */\n NGram = \"nGram\",\n /** Tokenizer for path-like hierarchies. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/path\\/PathHierarchyTokenizer.html */\n PathHierarchy = \"path_hierarchy_v2\",\n /** Tokenizer that uses regex pattern matching to construct distinct tokens. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/pattern\\/PatternTokenizer.html */\n Pattern = \"pattern\",\n /** Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/standard\\/StandardTokenizer.html */\n Standard = \"standard_v2\",\n /** Tokenizes urls and emails as one token. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/standard\\/UAX29URLEmailTokenizer.html */\n UaxUrlEmail = \"uax_url_email\",\n /** Divides text at whitespace. See http:\\//lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/WhitespaceTokenizer.html */\n Whitespace = \"whitespace\",\n}\n\n/**\n * Defines values for LexicalTokenizerName. \\\n * {@link KnownLexicalTokenizerName} can be used interchangeably with LexicalTokenizerName,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **classic**: Grammar-based tokenizer that is suitable for processing most European-language documents. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/standard\\/ClassicTokenizer.html \\\n * **edgeNGram**: Tokenizes the input from an edge into n-grams of the given size(s). See https:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/ngram\\/EdgeNGramTokenizer.html \\\n * **keyword_v2**: Emits the entire input as a single token. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/KeywordTokenizer.html \\\n * **letter**: Divides text at non-letters. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/LetterTokenizer.html \\\n * **lowercase**: Divides text at non-letters and converts them to lower case. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/LowerCaseTokenizer.html \\\n * **microsoft_language_tokenizer**: Divides text using language-specific rules. \\\n * **microsoft_language_stemming_tokenizer**: Divides text using language-specific rules and reduces words to their base forms. \\\n * **nGram**: Tokenizes the input into n-grams of the given size(s). See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/ngram\\/NGramTokenizer.html \\\n * **path_hierarchy_v2**: Tokenizer for path-like hierarchies. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/path\\/PathHierarchyTokenizer.html \\\n * **pattern**: Tokenizer that uses regex pattern matching to construct distinct tokens. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/pattern\\/PatternTokenizer.html \\\n * **standard_v2**: Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/standard\\/StandardTokenizer.html \\\n * **uax_url_email**: Tokenizes urls and emails as one token. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/standard\\/UAX29URLEmailTokenizer.html \\\n * **whitespace**: Divides text at whitespace. See http:\\/\\/lucene.apache.org\\/core\\/4_10_3\\/analyzers-common\\/org\\/apache\\/lucene\\/analysis\\/core\\/WhitespaceTokenizer.html\n */\nexport type LexicalTokenizerName = string;\n\n/** Known values of {@link RegexFlags} that the service accepts. */\nexport enum KnownRegexFlags {\n /** Enables canonical equivalence. */\n CanonEq = \"CANON_EQ\",\n /** Enables case-insensitive matching. */\n CaseInsensitive = \"CASE_INSENSITIVE\",\n /** Permits whitespace and comments in the pattern. */\n Comments = \"COMMENTS\",\n /** Enables dotall mode. */\n DotAll = \"DOTALL\",\n /** Enables literal parsing of the pattern. */\n Literal = \"LITERAL\",\n /** Enables multiline mode. */\n Multiline = \"MULTILINE\",\n /** Enables Unicode-aware case folding. */\n UnicodeCase = \"UNICODE_CASE\",\n /** Enables Unix lines mode. */\n UnixLines = \"UNIX_LINES\",\n}\n\n/**\n * Defines values for RegexFlags. \\\n * {@link KnownRegexFlags} can be used interchangeably with RegexFlags,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **CANON_EQ**: Enables canonical equivalence. \\\n * **CASE_INSENSITIVE**: Enables case-insensitive matching. \\\n * **COMMENTS**: Permits whitespace and comments in the pattern. \\\n * **DOTALL**: Enables dotall mode. \\\n * **LITERAL**: Enables literal parsing of the pattern. \\\n * **MULTILINE**: Enables multiline mode. \\\n * **UNICODE_CASE**: Enables Unicode-aware case folding. \\\n * **UNIX_LINES**: Enables Unix lines mode.\n */\nexport type RegexFlags = string;\n/** Defines values for IndexerStatus. */\nexport type IndexerStatus = \"unknown\" | \"error\" | \"running\";\n/** Defines values for IndexerExecutionStatus. */\nexport type IndexerExecutionStatus =\n | \"transientFailure\"\n | \"success\"\n | \"inProgress\"\n | \"reset\";\n/** Defines values for ScoringFunctionInterpolation. */\nexport type ScoringFunctionInterpolation =\n | \"linear\"\n | \"constant\"\n | \"quadratic\"\n | \"logarithmic\";\n/** Defines values for ScoringFunctionAggregation. */\nexport type ScoringFunctionAggregation =\n | \"sum\"\n | \"average\"\n | \"minimum\"\n | \"maximum\"\n | \"firstMatching\"\n | \"product\";\n/** Defines values for TokenCharacterKind. */\nexport type TokenCharacterKind =\n | \"letter\"\n | \"digit\"\n | \"whitespace\"\n | \"punctuation\"\n | \"symbol\";\n/** Defines values for MicrosoftTokenizerLanguage. */\nexport type MicrosoftTokenizerLanguage =\n | \"bangla\"\n | \"bulgarian\"\n | \"catalan\"\n | \"chineseSimplified\"\n | \"chineseTraditional\"\n | \"croatian\"\n | \"czech\"\n | \"danish\"\n | \"dutch\"\n | \"english\"\n | \"french\"\n | \"german\"\n | \"greek\"\n | \"gujarati\"\n | \"hindi\"\n | \"icelandic\"\n | \"indonesian\"\n | \"italian\"\n | \"japanese\"\n | \"kannada\"\n | \"korean\"\n | \"malay\"\n | \"malayalam\"\n | \"marathi\"\n | \"norwegianBokmaal\"\n | \"polish\"\n | \"portuguese\"\n | \"portugueseBrazilian\"\n | \"punjabi\"\n | \"romanian\"\n | \"russian\"\n | \"serbianCyrillic\"\n | \"serbianLatin\"\n | \"slovenian\"\n | \"spanish\"\n | \"swedish\"\n | \"tamil\"\n | \"telugu\"\n | \"thai\"\n | \"ukrainian\"\n | \"urdu\"\n | \"vietnamese\";\n/** Defines values for MicrosoftStemmingTokenizerLanguage. */\nexport type MicrosoftStemmingTokenizerLanguage =\n | \"arabic\"\n | \"bangla\"\n | \"bulgarian\"\n | \"catalan\"\n | \"croatian\"\n | \"czech\"\n | \"danish\"\n | \"dutch\"\n | \"english\"\n | \"estonian\"\n | \"finnish\"\n | \"french\"\n | \"german\"\n | \"greek\"\n | \"gujarati\"\n | \"hebrew\"\n | \"hindi\"\n | \"hungarian\"\n | \"icelandic\"\n | \"indonesian\"\n | \"italian\"\n | \"kannada\"\n | \"latvian\"\n | \"lithuanian\"\n | \"malay\"\n | \"malayalam\"\n | \"marathi\"\n | \"norwegianBokmaal\"\n | \"polish\"\n | \"portuguese\"\n | \"portugueseBrazilian\"\n | \"punjabi\"\n | \"romanian\"\n | \"russian\"\n | \"serbianCyrillic\"\n | \"serbianLatin\"\n | \"slovak\"\n | \"slovenian\"\n | \"spanish\"\n | \"swedish\"\n | \"tamil\"\n | \"telugu\"\n | \"turkish\"\n | \"ukrainian\"\n | \"urdu\";\n/** Defines values for CjkBigramTokenFilterScripts. */\nexport type CjkBigramTokenFilterScripts =\n | \"han\"\n | \"hiragana\"\n | \"katakana\"\n | \"hangul\";\n/** Defines values for EdgeNGramTokenFilterSide. */\nexport type EdgeNGramTokenFilterSide = \"front\" | \"back\";\n/** Defines values for PhoneticEncoder. */\nexport type PhoneticEncoder =\n | \"metaphone\"\n | \"doubleMetaphone\"\n | \"soundex\"\n | \"refinedSoundex\"\n | \"caverphone1\"\n | \"caverphone2\"\n | \"cologne\"\n | \"nysiis\"\n | \"koelnerPhonetik\"\n | \"haasePhonetik\"\n | \"beiderMorse\";\n/** Defines values for SnowballTokenFilterLanguage. */\nexport type SnowballTokenFilterLanguage =\n | \"armenian\"\n | \"basque\"\n | \"catalan\"\n | \"danish\"\n | \"dutch\"\n | \"english\"\n | \"finnish\"\n | \"french\"\n | \"german\"\n | \"german2\"\n | \"hungarian\"\n | \"italian\"\n | \"kp\"\n | \"lovins\"\n | \"norwegian\"\n | \"porter\"\n | \"portuguese\"\n | \"romanian\"\n | \"russian\"\n | \"spanish\"\n | \"swedish\"\n | \"turkish\";\n/** Defines values for StemmerTokenFilterLanguage. */\nexport type StemmerTokenFilterLanguage =\n | \"arabic\"\n | \"armenian\"\n | \"basque\"\n | \"brazilian\"\n | \"bulgarian\"\n | \"catalan\"\n | \"czech\"\n | \"danish\"\n | \"dutch\"\n | \"dutchKp\"\n | \"english\"\n | \"lightEnglish\"\n | \"minimalEnglish\"\n | \"possessiveEnglish\"\n | \"porter2\"\n | \"lovins\"\n | \"finnish\"\n | \"lightFinnish\"\n | \"french\"\n | \"lightFrench\"\n | \"minimalFrench\"\n | \"galician\"\n | \"minimalGalician\"\n | \"german\"\n | \"german2\"\n | \"lightGerman\"\n | \"minimalGerman\"\n | \"greek\"\n | \"hindi\"\n | \"hungarian\"\n | \"lightHungarian\"\n | \"indonesian\"\n | \"irish\"\n | \"italian\"\n | \"lightItalian\"\n | \"sorani\"\n | \"latvian\"\n | \"norwegian\"\n | \"lightNorwegian\"\n | \"minimalNorwegian\"\n | \"lightNynorsk\"\n | \"minimalNynorsk\"\n | \"portuguese\"\n | \"lightPortuguese\"\n | \"minimalPortuguese\"\n | \"portugueseRslp\"\n | \"romanian\"\n | \"russian\"\n | \"lightRussian\"\n | \"spanish\"\n | \"lightSpanish\"\n | \"swedish\"\n | \"lightSwedish\"\n | \"turkish\";\n/** Defines values for StopwordsList. */\nexport type StopwordsList =\n | \"arabic\"\n | \"armenian\"\n | \"basque\"\n | \"brazilian\"\n | \"bulgarian\"\n | \"catalan\"\n | \"czech\"\n | \"danish\"\n | \"dutch\"\n | \"english\"\n | \"finnish\"\n | \"french\"\n | \"galician\"\n | \"german\"\n | \"greek\"\n | \"hindi\"\n | \"hungarian\"\n | \"indonesian\"\n | \"irish\"\n | \"italian\"\n | \"latvian\"\n | \"norwegian\"\n | \"persian\"\n | \"portuguese\"\n | \"romanian\"\n | \"russian\"\n | \"sorani\"\n | \"spanish\"\n | \"swedish\"\n | \"thai\"\n | \"turkish\";\n\n/** Optional parameters. */\nexport interface KnowledgeBasesCreateOrUpdateOptionalParams\n extends coreClient.OperationOptions {\n /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */\n ifMatch?: string;\n /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */\n ifNoneMatch?: string;\n}\n\n/** Contains response data for the createOrUpdate operation. */\nexport type KnowledgeBasesCreateOrUpdateResponse = KnowledgeBase;\n\n/** Optional parameters. */\nexport interface KnowledgeBasesDeleteOptionalParams\n extends coreClient.OperationOptions {\n /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */\n ifMatch?: string;\n /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */\n ifNoneMatch?: string;\n}\n\n/** Optional parameters. */\nexport interface KnowledgeBasesGetOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the get operation. */\nexport type KnowledgeBasesGetResponse = KnowledgeBase;\n\n/** Optional parameters. */\nexport interface KnowledgeBasesListOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the list operation. */\nexport type KnowledgeBasesListResponse = ListKnowledgeBasesResult;\n\n/** Optional parameters. */\nexport interface KnowledgeBasesCreateOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the create operation. */\nexport type KnowledgeBasesCreateResponse = KnowledgeBase;\n\n/** Optional parameters. */\nexport interface KnowledgeSourcesCreateOrUpdateOptionalParams\n extends coreClient.OperationOptions {\n /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */\n ifMatch?: string;\n /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */\n ifNoneMatch?: string;\n}\n\n/** Contains response data for the createOrUpdate operation. */\nexport type KnowledgeSourcesCreateOrUpdateResponse = KnowledgeSourceUnion;\n\n/** Optional parameters. */\nexport interface KnowledgeSourcesDeleteOptionalParams\n extends coreClient.OperationOptions {\n /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */\n ifMatch?: string;\n /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */\n ifNoneMatch?: string;\n}\n\n/** Optional parameters. */\nexport interface KnowledgeSourcesGetOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the get operation. */\nexport type KnowledgeSourcesGetResponse = KnowledgeSourceUnion;\n\n/** Optional parameters. */\nexport interface KnowledgeSourcesListOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the list operation. */\nexport type KnowledgeSourcesListResponse = ListKnowledgeSourcesResult;\n\n/** Optional parameters. */\nexport interface KnowledgeSourcesCreateOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the create operation. */\nexport type KnowledgeSourcesCreateResponse = KnowledgeSourceUnion;\n\n/** Optional parameters. */\nexport interface KnowledgeSourcesGetStatusOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the getStatus operation. */\nexport type KnowledgeSourcesGetStatusResponse = KnowledgeSourceStatus;\n\n/** Optional parameters. */\nexport interface DataSourcesCreateOrUpdateOptionalParams\n extends coreClient.OperationOptions {\n /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */\n ifMatch?: string;\n /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */\n ifNoneMatch?: string;\n /** Ignores cache reset requirements. */\n skipIndexerResetRequirementForCache?: boolean;\n}\n\n/** Contains response data for the createOrUpdate operation. */\nexport type DataSourcesCreateOrUpdateResponse = SearchIndexerDataSource;\n\n/** Optional parameters. */\nexport interface DataSourcesDeleteOptionalParams\n extends coreClient.OperationOptions {\n /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */\n ifMatch?: string;\n /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */\n ifNoneMatch?: string;\n}\n\n/** Optional parameters. */\nexport interface DataSourcesGetOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the get operation. */\nexport type DataSourcesGetResponse = SearchIndexerDataSource;\n\n/** Optional parameters. */\nexport interface DataSourcesListOptionalParams\n extends coreClient.OperationOptions {\n /** Selects which top-level properties of the data sources to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */\n select?: string;\n}\n\n/** Contains response data for the list operation. */\nexport type DataSourcesListResponse = ListDataSourcesResult;\n\n/** Optional parameters. */\nexport interface DataSourcesCreateOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the create operation. */\nexport type DataSourcesCreateResponse = SearchIndexerDataSource;\n\n/** Optional parameters. */\nexport interface IndexersResetOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Optional parameters. */\nexport interface IndexersResetDocsOptionalParams\n extends coreClient.OperationOptions {\n keysOrIds?: DocumentKeysOrIds;\n /** If false, keys or ids will be appended to existing ones. If true, only the keys or ids in this payload will be queued to be re-ingested. */\n overwrite?: boolean;\n}\n\n/** Optional parameters. */\nexport interface IndexersResyncOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Optional parameters. */\nexport interface IndexersRunOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Optional parameters. */\nexport interface IndexersCreateOrUpdateOptionalParams\n extends coreClient.OperationOptions {\n /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */\n ifMatch?: string;\n /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */\n ifNoneMatch?: string;\n /** Ignores cache reset requirements. */\n skipIndexerResetRequirementForCache?: boolean;\n /** Disables cache reprocessing change detection. */\n disableCacheReprocessingChangeDetection?: boolean;\n}\n\n/** Contains response data for the createOrUpdate operation. */\nexport type IndexersCreateOrUpdateResponse = SearchIndexer;\n\n/** Optional parameters. */\nexport interface IndexersDeleteOptionalParams\n extends coreClient.OperationOptions {\n /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */\n ifMatch?: string;\n /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */\n ifNoneMatch?: string;\n}\n\n/** Optional parameters. */\nexport interface IndexersGetOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the get operation. */\nexport type IndexersGetResponse = SearchIndexer;\n\n/** Optional parameters. */\nexport interface IndexersListOptionalParams\n extends coreClient.OperationOptions {\n /** Selects which top-level properties of the indexers to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */\n select?: string;\n}\n\n/** Contains response data for the list operation. */\nexport type IndexersListResponse = ListIndexersResult;\n\n/** Optional parameters. */\nexport interface IndexersCreateOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the create operation. */\nexport type IndexersCreateResponse = SearchIndexer;\n\n/** Optional parameters. */\nexport interface IndexersGetStatusOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the getStatus operation. */\nexport type IndexersGetStatusResponse = SearchIndexerStatus;\n\n/** Optional parameters. */\nexport interface SkillsetsCreateOrUpdateOptionalParams\n extends coreClient.OperationOptions {\n /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */\n ifMatch?: string;\n /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */\n ifNoneMatch?: string;\n /** Ignores cache reset requirements. */\n skipIndexerResetRequirementForCache?: boolean;\n /** Disables cache reprocessing change detection. */\n disableCacheReprocessingChangeDetection?: boolean;\n}\n\n/** Contains response data for the createOrUpdate operation. */\nexport type SkillsetsCreateOrUpdateResponse = SearchIndexerSkillset;\n\n/** Optional parameters. */\nexport interface SkillsetsDeleteOptionalParams\n extends coreClient.OperationOptions {\n /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */\n ifMatch?: string;\n /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */\n ifNoneMatch?: string;\n}\n\n/** Optional parameters. */\nexport interface SkillsetsGetOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the get operation. */\nexport type SkillsetsGetResponse = SearchIndexerSkillset;\n\n/** Optional parameters. */\nexport interface SkillsetsListOptionalParams\n extends coreClient.OperationOptions {\n /** Selects which top-level properties of the skillsets to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */\n select?: string;\n}\n\n/** Contains response data for the list operation. */\nexport type SkillsetsListResponse = ListSkillsetsResult;\n\n/** Optional parameters. */\nexport interface SkillsetsCreateOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the create operation. */\nexport type SkillsetsCreateResponse = SearchIndexerSkillset;\n\n/** Optional parameters. */\nexport interface SkillsetsResetSkillsOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Optional parameters. */\nexport interface SynonymMapsCreateOrUpdateOptionalParams\n extends coreClient.OperationOptions {\n /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */\n ifMatch?: string;\n /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */\n ifNoneMatch?: string;\n}\n\n/** Contains response data for the createOrUpdate operation. */\nexport type SynonymMapsCreateOrUpdateResponse = SynonymMap;\n\n/** Optional parameters. */\nexport interface SynonymMapsDeleteOptionalParams\n extends coreClient.OperationOptions {\n /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */\n ifMatch?: string;\n /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */\n ifNoneMatch?: string;\n}\n\n/** Optional parameters. */\nexport interface SynonymMapsGetOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the get operation. */\nexport type SynonymMapsGetResponse = SynonymMap;\n\n/** Optional parameters. */\nexport interface SynonymMapsListOptionalParams\n extends coreClient.OperationOptions {\n /** Selects which top-level properties of the synonym maps to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */\n select?: string;\n}\n\n/** Contains response data for the list operation. */\nexport type SynonymMapsListResponse = ListSynonymMapsResult;\n\n/** Optional parameters. */\nexport interface SynonymMapsCreateOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the create operation. */\nexport type SynonymMapsCreateResponse = SynonymMap;\n\n/** Optional parameters. */\nexport interface IndexesCreateOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the create operation. */\nexport type IndexesCreateResponse = SearchIndex;\n\n/** Optional parameters. */\nexport interface IndexesListOptionalParams extends coreClient.OperationOptions {\n /** Selects which top-level properties of the index definitions to retrieve. Specified as a comma-separated list of JSON property names, or '*' for all properties. The default is all properties. */\n select?: string;\n}\n\n/** Contains response data for the list operation. */\nexport type IndexesListResponse = ListIndexesResult;\n\n/** Optional parameters. */\nexport interface IndexesCreateOrUpdateOptionalParams\n extends coreClient.OperationOptions {\n /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */\n ifMatch?: string;\n /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */\n ifNoneMatch?: string;\n /** Allows new analyzers, tokenizers, token filters, or char filters to be added to an index by taking the index offline for at least a few seconds. This temporarily causes indexing and query requests to fail. Performance and write availability of the index can be impaired for several minutes after the index is updated, or longer for very large indexes. */\n allowIndexDowntime?: boolean;\n}\n\n/** Contains response data for the createOrUpdate operation. */\nexport type IndexesCreateOrUpdateResponse = SearchIndex;\n\n/** Optional parameters. */\nexport interface IndexesDeleteOptionalParams\n extends coreClient.OperationOptions {\n /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */\n ifMatch?: string;\n /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */\n ifNoneMatch?: string;\n}\n\n/** Optional parameters. */\nexport interface IndexesGetOptionalParams extends coreClient.OperationOptions {}\n\n/** Contains response data for the get operation. */\nexport type IndexesGetResponse = SearchIndex;\n\n/** Optional parameters. */\nexport interface IndexesGetStatisticsOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the getStatistics operation. */\nexport type IndexesGetStatisticsResponse = GetIndexStatisticsResult;\n\n/** Optional parameters. */\nexport interface IndexesAnalyzeOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the analyze operation. */\nexport type IndexesAnalyzeResponse = AnalyzeResult;\n\n/** Optional parameters. */\nexport interface AliasesCreateOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the create operation. */\nexport type AliasesCreateResponse = SearchAlias;\n\n/** Optional parameters. */\nexport interface AliasesListOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the list operation. */\nexport type AliasesListResponse = ListAliasesResult;\n\n/** Optional parameters. */\nexport interface AliasesCreateOrUpdateOptionalParams\n extends coreClient.OperationOptions {\n /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */\n ifMatch?: string;\n /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */\n ifNoneMatch?: string;\n}\n\n/** Contains response data for the createOrUpdate operation. */\nexport type AliasesCreateOrUpdateResponse = SearchAlias;\n\n/** Optional parameters. */\nexport interface AliasesDeleteOptionalParams\n extends coreClient.OperationOptions {\n /** Defines the If-Match condition. The operation will be performed only if the ETag on the server matches this value. */\n ifMatch?: string;\n /** Defines the If-None-Match condition. The operation will be performed only if the ETag on the server does not match this value. */\n ifNoneMatch?: string;\n}\n\n/** Optional parameters. */\nexport interface AliasesGetOptionalParams extends coreClient.OperationOptions {}\n\n/** Contains response data for the get operation. */\nexport type AliasesGetResponse = SearchAlias;\n\n/** Optional parameters. */\nexport interface GetServiceStatisticsOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the getServiceStatistics operation. */\nexport type GetServiceStatisticsResponse = ServiceStatistics;\n\n/** Optional parameters. */\nexport interface GetIndexStatsSummaryOptionalParams\n extends coreClient.OperationOptions {}\n\n/** Contains response data for the getIndexStatsSummary operation. */\nexport type GetIndexStatsSummaryResponse = ListIndexStatsSummary;\n\n/** Optional parameters. */\nexport interface SearchServiceClientOptionalParams\n extends coreHttpCompat.ExtendedServiceClientOptions {\n /** Overrides client endpoint. */\n endpoint?: string;\n}\n"]}