@azure/search-documents 12.0.0-beta.4 → 12.1.0-beta.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +52 -32
- package/dist/index.js +13493 -13180
- package/dist/index.js.map +1 -1
- package/dist-esm/src/constants.js +2 -1
- package/dist-esm/src/constants.js.map +1 -1
- package/dist-esm/src/errorModels.js +4 -0
- package/dist-esm/src/errorModels.js.map +1 -0
- package/dist-esm/src/generated/data/models/index.js +37 -53
- package/dist-esm/src/generated/data/models/index.js.map +1 -1
- package/dist-esm/src/generated/data/models/mappers.js +398 -331
- package/dist-esm/src/generated/data/models/mappers.js.map +1 -1
- package/dist-esm/src/generated/data/models/parameters.js +195 -195
- package/dist-esm/src/generated/data/models/parameters.js.map +1 -1
- package/dist-esm/src/generated/data/operations/documents.js +41 -41
- package/dist-esm/src/generated/data/operations/documents.js.map +1 -1
- package/dist-esm/src/generated/data/operationsInterfaces/documents.js.map +1 -1
- package/dist-esm/src/generated/data/searchClient.js +30 -4
- package/dist-esm/src/generated/data/searchClient.js.map +1 -1
- package/dist-esm/src/generated/service/models/index.js +138 -69
- package/dist-esm/src/generated/service/models/index.js.map +1 -1
- package/dist-esm/src/generated/service/models/mappers.js +1821 -1663
- package/dist-esm/src/generated/service/models/mappers.js.map +1 -1
- package/dist-esm/src/generated/service/models/parameters.js +64 -64
- package/dist-esm/src/generated/service/models/parameters.js.map +1 -1
- package/dist-esm/src/generated/service/operations/aliases.js +22 -22
- package/dist-esm/src/generated/service/operations/aliases.js.map +1 -1
- package/dist-esm/src/generated/service/operations/dataSources.js +23 -23
- package/dist-esm/src/generated/service/operations/dataSources.js.map +1 -1
- package/dist-esm/src/generated/service/operations/indexers.js +36 -36
- package/dist-esm/src/generated/service/operations/indexers.js.map +1 -1
- package/dist-esm/src/generated/service/operations/indexes.js +30 -30
- package/dist-esm/src/generated/service/operations/indexes.js.map +1 -1
- package/dist-esm/src/generated/service/operations/skillsets.js +26 -26
- package/dist-esm/src/generated/service/operations/skillsets.js.map +1 -1
- package/dist-esm/src/generated/service/operations/synonymMaps.js +22 -22
- package/dist-esm/src/generated/service/operations/synonymMaps.js.map +1 -1
- package/dist-esm/src/generated/service/operationsInterfaces/aliases.js.map +1 -1
- package/dist-esm/src/generated/service/operationsInterfaces/dataSources.js.map +1 -1
- package/dist-esm/src/generated/service/operationsInterfaces/indexers.js.map +1 -1
- package/dist-esm/src/generated/service/operationsInterfaces/indexes.js.map +1 -1
- package/dist-esm/src/generated/service/operationsInterfaces/skillsets.js.map +1 -1
- package/dist-esm/src/generated/service/operationsInterfaces/synonymMaps.js.map +1 -1
- package/dist-esm/src/generated/service/searchServiceClient.js +35 -9
- package/dist-esm/src/generated/service/searchServiceClient.js.map +1 -1
- package/dist-esm/src/generatedStringLiteralUnions.js +4 -0
- package/dist-esm/src/generatedStringLiteralUnions.js.map +1 -0
- package/dist-esm/src/index.js +8 -9
- package/dist-esm/src/index.js.map +1 -1
- package/dist-esm/src/indexDocumentsBatch.js.map +1 -1
- package/dist-esm/src/indexModels.js.map +1 -1
- package/dist-esm/src/odata.js.map +1 -1
- package/dist-esm/src/odataMetadataPolicy.js.map +1 -1
- package/dist-esm/src/searchApiKeyCredentialPolicy.js.map +1 -1
- package/dist-esm/src/searchClient.js +36 -27
- package/dist-esm/src/searchClient.js.map +1 -1
- package/dist-esm/src/searchIndexClient.js +15 -29
- package/dist-esm/src/searchIndexClient.js.map +1 -1
- package/dist-esm/src/searchIndexerClient.js +9 -6
- package/dist-esm/src/searchIndexerClient.js.map +1 -1
- package/dist-esm/src/searchIndexingBufferedSender.js +3 -8
- package/dist-esm/src/searchIndexingBufferedSender.js.map +1 -1
- package/dist-esm/src/serialization.js.map +1 -1
- package/dist-esm/src/serviceModels.js.map +1 -1
- package/dist-esm/src/serviceUtils.js +44 -67
- package/dist-esm/src/serviceUtils.js.map +1 -1
- package/dist-esm/src/synonymMapHelper.browser.js.map +1 -1
- package/dist-esm/src/synonymMapHelper.js +1 -1
- package/dist-esm/src/synonymMapHelper.js.map +1 -1
- package/dist-esm/src/tracing.js +1 -1
- package/dist-esm/src/tracing.js.map +1 -1
- package/package.json +42 -43
- package/types/search-documents.d.ts +1014 -1458
|
@@ -5,6 +5,7 @@ import { ExtendedCommonClientOptions } from '@azure/core-http-compat';
|
|
|
5
5
|
import { KeyCredential } from '@azure/core-auth';
|
|
6
6
|
import { OperationOptions } from '@azure/core-client';
|
|
7
7
|
import { PagedAsyncIterableIterator } from '@azure/core-paging';
|
|
8
|
+
import { Pipeline } from '@azure/core-rest-pipeline';
|
|
8
9
|
import { RestError } from '@azure/core-rest-pipeline';
|
|
9
10
|
import { TokenCredential } from '@azure/core-auth';
|
|
10
11
|
|
|
@@ -50,31 +51,32 @@ export declare interface AnalyzeRequest {
|
|
|
50
51
|
/**
|
|
51
52
|
* The name of the analyzer to use to break the given text. If this parameter is not specified,
|
|
52
53
|
* you must specify a tokenizer instead. The tokenizer and analyzer parameters are mutually
|
|
53
|
-
* exclusive. KnownAnalyzerNames is an enum containing
|
|
54
|
+
* exclusive. {@link KnownAnalyzerNames} is an enum containing built-in analyzer names.
|
|
54
55
|
* NOTE: Either analyzerName or tokenizerName is required in an AnalyzeRequest.
|
|
55
56
|
*/
|
|
56
|
-
analyzerName?:
|
|
57
|
+
analyzerName?: LexicalAnalyzerName;
|
|
57
58
|
/**
|
|
58
59
|
* The name of the tokenizer to use to break the given text. If this parameter is not specified,
|
|
59
60
|
* you must specify an analyzer instead. The tokenizer and analyzer parameters are mutually
|
|
60
|
-
* exclusive. KnownTokenizerNames is an enum containing
|
|
61
|
+
* exclusive. {@link KnownTokenizerNames} is an enum containing built-in tokenizer names.
|
|
61
62
|
* NOTE: Either analyzerName or tokenizerName is required in an AnalyzeRequest.
|
|
62
63
|
*/
|
|
63
|
-
tokenizerName?:
|
|
64
|
+
tokenizerName?: LexicalTokenizerName;
|
|
64
65
|
/**
|
|
65
|
-
* The name of the normalizer to use to normalize the given text.
|
|
66
|
+
* The name of the normalizer to use to normalize the given text. {@link KnownNormalizerNames} is
|
|
67
|
+
* an enum containing built-in analyzer names.
|
|
66
68
|
*/
|
|
67
69
|
normalizerName?: LexicalNormalizerName;
|
|
68
70
|
/**
|
|
69
71
|
* An optional list of token filters to use when breaking the given text. This parameter can only
|
|
70
72
|
* be set when using the tokenizer parameter.
|
|
71
73
|
*/
|
|
72
|
-
tokenFilters?:
|
|
74
|
+
tokenFilters?: TokenFilterName[];
|
|
73
75
|
/**
|
|
74
76
|
* An optional list of character filters to use when breaking the given text. This parameter can
|
|
75
77
|
* only be set when using the tokenizer parameter.
|
|
76
78
|
*/
|
|
77
|
-
charFilters?:
|
|
79
|
+
charFilters?: CharFilterName[];
|
|
78
80
|
}
|
|
79
81
|
|
|
80
82
|
/** The result of testing an analyzer on text. */
|
|
@@ -88,76 +90,13 @@ export declare interface AnalyzeResult {
|
|
|
88
90
|
*/
|
|
89
91
|
export declare type AnalyzeTextOptions = OperationOptions & AnalyzeRequest;
|
|
90
92
|
|
|
91
|
-
/** An answer is a text passage extracted from the contents of the most relevant documents that matched the query. Answers are extracted from the top search results. Answer candidates are scored and the top answers are selected. */
|
|
92
|
-
export declare interface AnswerResult {
|
|
93
|
-
/** Describes unknown properties. The value of an unknown property can be of "any" type. */
|
|
94
|
-
[property: string]: any;
|
|
95
|
-
/**
|
|
96
|
-
* The score value represents how relevant the answer is to the query relative to other answers returned for the query.
|
|
97
|
-
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
98
|
-
*/
|
|
99
|
-
readonly score: number;
|
|
100
|
-
/**
|
|
101
|
-
* The key of the document the answer was extracted from.
|
|
102
|
-
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
103
|
-
*/
|
|
104
|
-
readonly key: string;
|
|
105
|
-
/**
|
|
106
|
-
* The text passage extracted from the document contents as the answer.
|
|
107
|
-
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
108
|
-
*/
|
|
109
|
-
readonly text: string;
|
|
110
|
-
/**
|
|
111
|
-
* Same text passage as in the Text property with highlighted text phrases most relevant to the query.
|
|
112
|
-
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
113
|
-
*/
|
|
114
|
-
readonly highlights?: string;
|
|
115
|
-
}
|
|
116
|
-
|
|
117
|
-
/**
|
|
118
|
-
* This parameter is only valid if the query type is 'semantic'. If set, the query returns answers
|
|
119
|
-
* extracted from key passages in the highest ranked documents. The number of answers returned can
|
|
120
|
-
* be configured by appending the pipe character '|' followed by the 'count-\<number of answers\>' option
|
|
121
|
-
* after the answers parameter value, such as 'extractive|count-3'. Default count is 1. The
|
|
122
|
-
* confidence threshold can be configured by appending the pipe character '|' followed by the
|
|
123
|
-
* 'threshold-\<confidence threshold\>' option after the answers parameter value, such as
|
|
124
|
-
* 'extractive|threshold-0.9'. Default threshold is 0.7.
|
|
125
|
-
*/
|
|
126
|
-
export declare type Answers = string;
|
|
127
|
-
|
|
128
|
-
/**
|
|
129
|
-
* A value that specifies whether answers should be returned as part of the search response.
|
|
130
|
-
* This parameter is only valid if the query type is 'semantic'. If set to `extractive`, the query
|
|
131
|
-
* returns answers extracted from key passages in the highest ranked documents.
|
|
132
|
-
*/
|
|
133
|
-
export declare type AnswersOptions = {
|
|
134
|
-
/**
|
|
135
|
-
* Extracts answer candidates from the contents of the documents returned in response to a
|
|
136
|
-
* query expressed as a question in natural language.
|
|
137
|
-
*/
|
|
138
|
-
answers: "extractive";
|
|
139
|
-
/**
|
|
140
|
-
* The number of answers returned. Default count is 1
|
|
141
|
-
*/
|
|
142
|
-
count?: number;
|
|
143
|
-
/**
|
|
144
|
-
* The confidence threshold. Default threshold is 0.7
|
|
145
|
-
*/
|
|
146
|
-
threshold?: number;
|
|
147
|
-
} | {
|
|
148
|
-
/**
|
|
149
|
-
* Do not return answers for the query.
|
|
150
|
-
*/
|
|
151
|
-
answers: "none";
|
|
152
|
-
};
|
|
153
|
-
|
|
154
93
|
/** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. This token filter is implemented using Apache Lucene. */
|
|
155
|
-
export declare
|
|
94
|
+
export declare interface AsciiFoldingTokenFilter extends BaseTokenFilter {
|
|
156
95
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
157
96
|
odatatype: "#Microsoft.Azure.Search.AsciiFoldingTokenFilter";
|
|
158
97
|
/** A value indicating whether the original token will be kept. Default is false. */
|
|
159
98
|
preserveOriginal?: boolean;
|
|
160
|
-
}
|
|
99
|
+
}
|
|
161
100
|
|
|
162
101
|
/** The result of Autocomplete requests. */
|
|
163
102
|
export declare interface AutocompleteItem {
|
|
@@ -258,7 +197,7 @@ export declare interface AzureActiveDirectoryApplicationCredentials {
|
|
|
258
197
|
export { AzureKeyCredential }
|
|
259
198
|
|
|
260
199
|
/** The AML skill allows you to extend AI enrichment with a custom Azure Machine Learning (AML) model. Once an AML model is trained and deployed, an AML skill integrates it into AI enrichment. */
|
|
261
|
-
export declare
|
|
200
|
+
export declare interface AzureMachineLearningSkill extends BaseSearchIndexerSkill {
|
|
262
201
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
263
202
|
odatatype: "#Microsoft.Skills.Custom.AmlSkill";
|
|
264
203
|
/** (Required for no authentication or key authentication) The scoring URI of the AML service to which the JSON payload will be sent. Only the https URI scheme is allowed. */
|
|
@@ -273,7 +212,7 @@ export declare type AzureMachineLearningSkill = BaseSearchIndexerSkill & {
|
|
|
273
212
|
region?: string;
|
|
274
213
|
/** (Optional) When specified, indicates the number of calls the indexer will make in parallel to the endpoint you have provided. You can decrease this value if your endpoint is failing under too high of a request load, or raise it if your endpoint is able to accept more requests and you would like an increase in the performance of the indexer. If not set, a default value of 5 is used. The degreeOfParallelism can be set to a maximum of 10 and a minimum of 1. */
|
|
275
214
|
degreeOfParallelism?: number;
|
|
276
|
-
}
|
|
215
|
+
}
|
|
277
216
|
|
|
278
217
|
/** Allows you to generate a vector embedding for a given text input using the Azure Open AI service. */
|
|
279
218
|
export declare interface AzureOpenAIEmbeddingSkill extends BaseSearchIndexerSkill {
|
|
@@ -317,11 +256,11 @@ export declare interface BaseCharFilter {
|
|
|
317
256
|
name: string;
|
|
318
257
|
}
|
|
319
258
|
|
|
320
|
-
/** Base type for describing any
|
|
259
|
+
/** Base type for describing any Azure AI service resource attached to a skillset. */
|
|
321
260
|
export declare interface BaseCognitiveServicesAccount {
|
|
322
261
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
323
262
|
odatatype: "#Microsoft.Azure.Search.DefaultCognitiveServices" | "#Microsoft.Azure.Search.CognitiveServicesByKey";
|
|
324
|
-
/** Description of the
|
|
263
|
+
/** Description of the Azure AI service resource attached to a skillset. */
|
|
325
264
|
description?: string;
|
|
326
265
|
}
|
|
327
266
|
|
|
@@ -395,6 +334,129 @@ export declare interface BaseSearchIndexerSkill {
|
|
|
395
334
|
outputs: OutputFieldMappingEntry[];
|
|
396
335
|
}
|
|
397
336
|
|
|
337
|
+
/**
|
|
338
|
+
* Parameters for filtering, sorting, faceting, paging, and other search query behaviors.
|
|
339
|
+
*/
|
|
340
|
+
export declare interface BaseSearchRequestOptions<TModel extends object, TFields extends SelectFields<TModel> = SelectFields<TModel>> {
|
|
341
|
+
/**
|
|
342
|
+
* A value that specifies whether to fetch the total count of results. Default is false. Setting
|
|
343
|
+
* this value to true may have a performance impact. Note that the count returned is an
|
|
344
|
+
* approximation.
|
|
345
|
+
*/
|
|
346
|
+
includeTotalCount?: boolean;
|
|
347
|
+
/**
|
|
348
|
+
* The list of facet expressions to apply to the search query. Each facet expression contains a
|
|
349
|
+
* field name, optionally followed by a comma-separated list of name:value pairs.
|
|
350
|
+
*/
|
|
351
|
+
facets?: string[];
|
|
352
|
+
/**
|
|
353
|
+
* The OData $filter expression to apply to the search query.
|
|
354
|
+
*/
|
|
355
|
+
filter?: string;
|
|
356
|
+
/**
|
|
357
|
+
* The comma-separated list of field names to use for hit highlights. Only searchable fields can
|
|
358
|
+
* be used for hit highlighting.
|
|
359
|
+
*/
|
|
360
|
+
highlightFields?: string;
|
|
361
|
+
/**
|
|
362
|
+
* A string tag that is appended to hit highlights. Must be set with highlightPreTag. Default is
|
|
363
|
+
* </em>.
|
|
364
|
+
*/
|
|
365
|
+
highlightPostTag?: string;
|
|
366
|
+
/**
|
|
367
|
+
* A string tag that is prepended to hit highlights. Must be set with highlightPostTag. Default
|
|
368
|
+
* is <em>.
|
|
369
|
+
*/
|
|
370
|
+
highlightPreTag?: string;
|
|
371
|
+
/**
|
|
372
|
+
* A number between 0 and 100 indicating the percentage of the index that must be covered by a
|
|
373
|
+
* search query in order for the query to be reported as a success. This parameter can be useful
|
|
374
|
+
* for ensuring search availability even for services with only one replica. The default is 100.
|
|
375
|
+
*/
|
|
376
|
+
minimumCoverage?: number;
|
|
377
|
+
/**
|
|
378
|
+
* The list of OData $orderby expressions by which to sort the results. Each
|
|
379
|
+
* expression can be either a field name or a call to either the geo.distance() or the
|
|
380
|
+
* search.score() functions. Each expression can be followed by asc to indicate ascending, or
|
|
381
|
+
* desc to indicate descending. The default is ascending order. Ties will be broken by the match
|
|
382
|
+
* scores of documents. If no $orderby is specified, the default sort order is descending by
|
|
383
|
+
* document match score. There can be at most 32 $orderby clauses.
|
|
384
|
+
*/
|
|
385
|
+
orderBy?: string[];
|
|
386
|
+
/**
|
|
387
|
+
* A value that specifies the syntax of the search query. The default is 'simple'. Use 'full' if
|
|
388
|
+
* your query uses the Lucene query syntax. Possible values include: 'simple', 'full', 'semantic'
|
|
389
|
+
*/
|
|
390
|
+
queryType?: QueryType;
|
|
391
|
+
/**
|
|
392
|
+
* The list of parameter values to be used in scoring functions (for example,
|
|
393
|
+
* referencePointParameter) using the format name-values. For example, if the scoring profile
|
|
394
|
+
* defines a function with a parameter called 'mylocation' the parameter string would be
|
|
395
|
+
* "mylocation--122.2,44.8" (without the quotes).
|
|
396
|
+
*/
|
|
397
|
+
scoringParameters?: string[];
|
|
398
|
+
/**
|
|
399
|
+
* The name of a scoring profile to evaluate match scores for matching documents in order to sort
|
|
400
|
+
* the results.
|
|
401
|
+
*/
|
|
402
|
+
scoringProfile?: string;
|
|
403
|
+
/**
|
|
404
|
+
* The comma-separated list of field names to which to scope the full-text search. When using
|
|
405
|
+
* fielded search (fieldName:searchExpression) in a full Lucene query, the field names of each
|
|
406
|
+
* fielded search expression take precedence over any field names listed in this parameter.
|
|
407
|
+
*/
|
|
408
|
+
searchFields?: SearchFieldArray<TModel>;
|
|
409
|
+
/**
|
|
410
|
+
* The language of the query.
|
|
411
|
+
*/
|
|
412
|
+
queryLanguage?: QueryLanguage;
|
|
413
|
+
/**
|
|
414
|
+
* Improve search recall by spell-correcting individual search query terms.
|
|
415
|
+
*/
|
|
416
|
+
speller?: Speller;
|
|
417
|
+
/**
|
|
418
|
+
* A value that specifies whether any or all of the search terms must be matched in order to
|
|
419
|
+
* count the document as a match. Possible values include: 'any', 'all'
|
|
420
|
+
*/
|
|
421
|
+
searchMode?: SearchMode;
|
|
422
|
+
/**
|
|
423
|
+
* A value that specifies whether we want to calculate scoring statistics (such as document
|
|
424
|
+
* frequency) globally for more consistent scoring, or locally, for lower latency. Possible
|
|
425
|
+
* values include: 'Local', 'Global'
|
|
426
|
+
*/
|
|
427
|
+
scoringStatistics?: ScoringStatistics;
|
|
428
|
+
/**
|
|
429
|
+
* A value to be used to create a sticky session, which can help to get more consistent results.
|
|
430
|
+
* As long as the same sessionId is used, a best-effort attempt will be made to target the same
|
|
431
|
+
* replica set. Be wary that reusing the same sessionID values repeatedly can interfere with the
|
|
432
|
+
* load balancing of the requests across replicas and adversely affect the performance of the
|
|
433
|
+
* search service. The value used as sessionId cannot start with a '_' character.
|
|
434
|
+
*/
|
|
435
|
+
sessionId?: string;
|
|
436
|
+
/**
|
|
437
|
+
* The list of fields to retrieve. If unspecified, all fields marked as
|
|
438
|
+
* retrievable in the schema are included.
|
|
439
|
+
*/
|
|
440
|
+
select?: SelectArray<TFields>;
|
|
441
|
+
/**
|
|
442
|
+
* The number of search results to skip. This value cannot be greater than 100,000. If you need
|
|
443
|
+
* to scan documents in sequence, but cannot use skip due to this limitation, consider using
|
|
444
|
+
* orderby on a totally-ordered key and filter with a range query instead.
|
|
445
|
+
*/
|
|
446
|
+
skip?: number;
|
|
447
|
+
/**
|
|
448
|
+
* The number of search results to retrieve. This can be used in conjunction with $skip to
|
|
449
|
+
* implement client-side paging of search results. If results are truncated due to server-side
|
|
450
|
+
* paging, the response will include a continuation token that can be used to issue another
|
|
451
|
+
* Search request for the next page of results.
|
|
452
|
+
*/
|
|
453
|
+
top?: number;
|
|
454
|
+
/**
|
|
455
|
+
* Defines options for vector search queries
|
|
456
|
+
*/
|
|
457
|
+
vectorSearchOptions?: VectorSearchOptions<TModel>;
|
|
458
|
+
}
|
|
459
|
+
|
|
398
460
|
/** Base type for token filters. */
|
|
399
461
|
export declare interface BaseTokenFilter {
|
|
400
462
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
@@ -411,8 +473,19 @@ export declare interface BaseVectorQuery<TModel extends object> {
|
|
|
411
473
|
kNearestNeighborsCount?: number;
|
|
412
474
|
/** Vector Fields of type Collection(Edm.Single) to be included in the vector searched. */
|
|
413
475
|
fields?: SearchFieldArray<TModel>;
|
|
414
|
-
/**
|
|
476
|
+
/**
|
|
477
|
+
* When true, triggers an exhaustive k-nearest neighbor search across all vectors within the
|
|
478
|
+
* vector index. Useful for scenarios where exact matches are critical, such as determining ground
|
|
479
|
+
* truth values.
|
|
480
|
+
*/
|
|
415
481
|
exhaustive?: boolean;
|
|
482
|
+
/**
|
|
483
|
+
* Oversampling factor. Minimum value is 1. It overrides the 'defaultOversampling' parameter
|
|
484
|
+
* configured in the index definition. It can be set only when 'rerankWithOriginalVectors' is
|
|
485
|
+
* true. This parameter is only permitted when a compression method is used on the underlying
|
|
486
|
+
* vector field.
|
|
487
|
+
*/
|
|
488
|
+
oversampling?: number;
|
|
416
489
|
}
|
|
417
490
|
|
|
418
491
|
/** Contains configuration options specific to the algorithm used during indexing and/or querying. */
|
|
@@ -423,6 +496,18 @@ export declare interface BaseVectorSearchAlgorithmConfiguration {
|
|
|
423
496
|
name: string;
|
|
424
497
|
}
|
|
425
498
|
|
|
499
|
+
/** Contains configuration options specific to the compression method used during indexing or querying. */
|
|
500
|
+
export declare interface BaseVectorSearchCompressionConfiguration {
|
|
501
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
502
|
+
kind: "scalarQuantization";
|
|
503
|
+
/** The name to associate with this particular configuration. */
|
|
504
|
+
name: string;
|
|
505
|
+
/** If set to true, once the ordered set of results calculated using compressed vectors are obtained, they will be reranked again by recalculating the full-precision similarity scores. This will improve recall at the expense of latency. */
|
|
506
|
+
rerankWithOriginalVectors?: boolean;
|
|
507
|
+
/** Default oversampling factor. Oversampling will internally request more documents (specified by this multiplier) in the initial search. This increases the set of results that will be reranked using recomputed similarity scores from full-precision vectors. Minimum value is 1, meaning no oversampling (1x). This parameter can only be set when rerankWithOriginalVectors is true. Higher values improve recall at the expense of latency. */
|
|
508
|
+
defaultOversampling?: number;
|
|
509
|
+
}
|
|
510
|
+
|
|
426
511
|
/** Contains specific details for a vectorization method to be used during query time. */
|
|
427
512
|
export declare interface BaseVectorSearchVectorizer {
|
|
428
513
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
@@ -431,88 +516,24 @@ export declare interface BaseVectorSearchVectorizer {
|
|
|
431
516
|
name: string;
|
|
432
517
|
}
|
|
433
518
|
|
|
434
|
-
|
|
435
|
-
* Defines values for BlobIndexerDataToExtract. \
|
|
436
|
-
* {@link KnownBlobIndexerDataToExtract} can be used interchangeably with BlobIndexerDataToExtract,
|
|
437
|
-
* this enum contains the known values that the service supports.
|
|
438
|
-
* ### Known values supported by the service
|
|
439
|
-
* **storageMetadata**: Indexes just the standard blob properties and user-specified metadata. \
|
|
440
|
-
* **allMetadata**: Extracts metadata provided by the Azure blob storage subsystem and the content-type specific metadata (for example, metadata unique to just .png files are indexed). \
|
|
441
|
-
* **contentAndMetadata**: Extracts all metadata and textual content from each blob.
|
|
442
|
-
*/
|
|
443
|
-
export declare type BlobIndexerDataToExtract = string;
|
|
519
|
+
export declare type BlobIndexerDataToExtract = "storageMetadata" | "allMetadata" | "contentAndMetadata";
|
|
444
520
|
|
|
445
|
-
|
|
446
|
-
* Defines values for BlobIndexerImageAction. \
|
|
447
|
-
* {@link KnownBlobIndexerImageAction} can be used interchangeably with BlobIndexerImageAction,
|
|
448
|
-
* this enum contains the known values that the service supports.
|
|
449
|
-
* ### Known values supported by the service
|
|
450
|
-
* **none**: Ignores embedded images or image files in the data set. This is the default. \
|
|
451
|
-
* **generateNormalizedImages**: Extracts text from images (for example, the word "STOP" from a traffic stop sign), and embeds it into the content field. This action requires that "dataToExtract" is set to "contentAndMetadata". A normalized image refers to additional processing resulting in uniform image output, sized and rotated to promote consistent rendering when you include images in visual search results. This information is generated for each image when you use this option. \
|
|
452
|
-
* **generateNormalizedImagePerPage**: Extracts text from images (for example, the word "STOP" from a traffic stop sign), and embeds it into the content field, but treats PDF files differently in that each page will be rendered as an image and normalized accordingly, instead of extracting embedded images. Non-PDF file types will be treated the same as if "generateNormalizedImages" was set.
|
|
453
|
-
*/
|
|
454
|
-
export declare type BlobIndexerImageAction = string;
|
|
521
|
+
export declare type BlobIndexerImageAction = "none" | "generateNormalizedImages" | "generateNormalizedImagePerPage";
|
|
455
522
|
|
|
456
|
-
|
|
457
|
-
* Defines values for BlobIndexerParsingMode. \
|
|
458
|
-
* {@link KnownBlobIndexerParsingMode} can be used interchangeably with BlobIndexerParsingMode,
|
|
459
|
-
* this enum contains the known values that the service supports.
|
|
460
|
-
* ### Known values supported by the service
|
|
461
|
-
* **default**: Set to default for normal file processing. \
|
|
462
|
-
* **text**: Set to text to improve indexing performance on plain text files in blob storage. \
|
|
463
|
-
* **delimitedText**: Set to delimitedText when blobs are plain CSV files. \
|
|
464
|
-
* **json**: Set to json to extract structured content from JSON files. \
|
|
465
|
-
* **jsonArray**: Set to jsonArray to extract individual elements of a JSON array as separate documents in Azure Cognitive Search. \
|
|
466
|
-
* **jsonLines**: Set to jsonLines to extract individual JSON entities, separated by a new line, as separate documents in Azure Cognitive Search.
|
|
467
|
-
*/
|
|
468
|
-
export declare type BlobIndexerParsingMode = string;
|
|
523
|
+
export declare type BlobIndexerParsingMode = "default" | "text" | "delimitedText" | "json" | "jsonArray" | "jsonLines";
|
|
469
524
|
|
|
470
|
-
|
|
471
|
-
* Defines values for BlobIndexerPDFTextRotationAlgorithm. \
|
|
472
|
-
* {@link KnownBlobIndexerPDFTextRotationAlgorithm} can be used interchangeably with BlobIndexerPDFTextRotationAlgorithm,
|
|
473
|
-
* this enum contains the known values that the service supports.
|
|
474
|
-
* ### Known values supported by the service
|
|
475
|
-
* **none**: Leverages normal text extraction. This is the default. \
|
|
476
|
-
* **detectAngles**: May produce better and more readable text extraction from PDF files that have rotated text within them. Note that there may be a small performance speed impact when this parameter is used. This parameter only applies to PDF files, and only to PDFs with embedded text. If the rotated text appears within an embedded image in the PDF, this parameter does not apply.
|
|
477
|
-
*/
|
|
478
|
-
export declare type BlobIndexerPDFTextRotationAlgorithm = string;
|
|
525
|
+
export declare type BlobIndexerPDFTextRotationAlgorithm = "none" | "detectAngles";
|
|
479
526
|
|
|
480
527
|
/** Ranking function based on the Okapi BM25 similarity algorithm. BM25 is a TF-IDF-like algorithm that includes length normalization (controlled by the 'b' parameter) as well as term frequency saturation (controlled by the 'k1' parameter). */
|
|
481
|
-
export declare
|
|
528
|
+
export declare interface BM25Similarity extends Similarity {
|
|
482
529
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
483
530
|
odatatype: "#Microsoft.Azure.Search.BM25Similarity";
|
|
484
531
|
/** This property controls the scaling function between the term frequency of each matching terms and the final relevance score of a document-query pair. By default, a value of 1.2 is used. A value of 0.0 means the score does not scale with an increase in term frequency. */
|
|
485
532
|
k1?: number;
|
|
486
533
|
/** This property controls how the length of a document affects the relevance score. By default, a value of 0.75 is used. A value of 0.0 means no length normalization is applied, while a value of 1.0 means the score is fully normalized by the length of the document. */
|
|
487
534
|
b?: number;
|
|
488
|
-
};
|
|
489
|
-
|
|
490
|
-
/** Captions are the most representative passages from the document relatively to the search query. They are often used as document summary. Captions are only returned for queries of type 'semantic'.. */
|
|
491
|
-
export declare interface CaptionResult {
|
|
492
|
-
/** Describes unknown properties. The value of an unknown property can be of "any" type. */
|
|
493
|
-
[property: string]: any;
|
|
494
|
-
/**
|
|
495
|
-
* A representative text passage extracted from the document most relevant to the search query.
|
|
496
|
-
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
497
|
-
*/
|
|
498
|
-
readonly text?: string;
|
|
499
|
-
/**
|
|
500
|
-
* Same text passage as in the Text property with highlighted phrases most relevant to the query.
|
|
501
|
-
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
502
|
-
*/
|
|
503
|
-
readonly highlights?: string;
|
|
504
535
|
}
|
|
505
536
|
|
|
506
|
-
/**
|
|
507
|
-
* Defines values for Captions. \
|
|
508
|
-
* {@link KnownCaptions} can be used interchangeably with Captions,
|
|
509
|
-
* this enum contains the known values that the service supports.
|
|
510
|
-
* ### Known values supported by the service
|
|
511
|
-
* **none**: Do not return captions for the query. \
|
|
512
|
-
* **extractive**: Extracts captions from the matching documents that contain passages relevant to the search query.
|
|
513
|
-
*/
|
|
514
|
-
export declare type Captions = string;
|
|
515
|
-
|
|
516
537
|
/**
|
|
517
538
|
* Contains the possible cases for CharFilter.
|
|
518
539
|
*/
|
|
@@ -528,47 +549,47 @@ export declare type CharFilter = MappingCharFilter | PatternReplaceCharFilter;
|
|
|
528
549
|
export declare type CharFilterName = string;
|
|
529
550
|
|
|
530
551
|
/** Forms bigrams of CJK terms that are generated from the standard tokenizer. This token filter is implemented using Apache Lucene. */
|
|
531
|
-
export declare
|
|
552
|
+
export declare interface CjkBigramTokenFilter extends BaseTokenFilter {
|
|
532
553
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
533
554
|
odatatype: "#Microsoft.Azure.Search.CjkBigramTokenFilter";
|
|
534
555
|
/** The scripts to ignore. */
|
|
535
556
|
ignoreScripts?: CjkBigramTokenFilterScripts[];
|
|
536
557
|
/** A value indicating whether to output both unigrams and bigrams (if true), or just bigrams (if false). Default is false. */
|
|
537
558
|
outputUnigrams?: boolean;
|
|
538
|
-
}
|
|
559
|
+
}
|
|
539
560
|
|
|
540
561
|
/** Defines values for CjkBigramTokenFilterScripts. */
|
|
541
562
|
export declare type CjkBigramTokenFilterScripts = "han" | "hiragana" | "katakana" | "hangul";
|
|
542
563
|
|
|
543
564
|
/** Legacy similarity algorithm which uses the Lucene TFIDFSimilarity implementation of TF-IDF. This variation of TF-IDF introduces static document length normalization as well as coordinating factors that penalize documents that only partially match the searched queries. */
|
|
544
|
-
export declare
|
|
565
|
+
export declare interface ClassicSimilarity extends Similarity {
|
|
545
566
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
546
567
|
odatatype: "#Microsoft.Azure.Search.ClassicSimilarity";
|
|
547
|
-
}
|
|
568
|
+
}
|
|
548
569
|
|
|
549
570
|
/** Grammar-based tokenizer that is suitable for processing most European-language documents. This tokenizer is implemented using Apache Lucene. */
|
|
550
|
-
export declare
|
|
571
|
+
export declare interface ClassicTokenizer extends BaseLexicalTokenizer {
|
|
551
572
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
552
573
|
odatatype: "#Microsoft.Azure.Search.ClassicTokenizer";
|
|
553
574
|
/** The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */
|
|
554
575
|
maxTokenLength?: number;
|
|
555
|
-
}
|
|
576
|
+
}
|
|
556
577
|
|
|
557
578
|
/**
|
|
558
579
|
* Contains the possible cases for CognitiveServicesAccount.
|
|
559
580
|
*/
|
|
560
581
|
export declare type CognitiveServicesAccount = DefaultCognitiveServicesAccount | CognitiveServicesAccountKey;
|
|
561
582
|
|
|
562
|
-
/**
|
|
563
|
-
export declare
|
|
583
|
+
/** The multi-region account key of an Azure AI service resource that's attached to a skillset. */
|
|
584
|
+
export declare interface CognitiveServicesAccountKey extends BaseCognitiveServicesAccount {
|
|
564
585
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
565
586
|
odatatype: "#Microsoft.Azure.Search.CognitiveServicesByKey";
|
|
566
|
-
/** The key used to provision the
|
|
587
|
+
/** The key used to provision the Azure AI service resource attached to a skillset. */
|
|
567
588
|
key: string;
|
|
568
|
-
}
|
|
589
|
+
}
|
|
569
590
|
|
|
570
591
|
/** Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. This token filter is implemented using Apache Lucene. */
|
|
571
|
-
export declare
|
|
592
|
+
export declare interface CommonGramTokenFilter extends BaseTokenFilter {
|
|
572
593
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
573
594
|
odatatype: "#Microsoft.Azure.Search.CommonGramTokenFilter";
|
|
574
595
|
/** The set of common words. */
|
|
@@ -577,7 +598,7 @@ export declare type CommonGramTokenFilter = BaseTokenFilter & {
|
|
|
577
598
|
ignoreCase?: boolean;
|
|
578
599
|
/** A value that indicates whether the token filter is in query mode. When in query mode, the token filter generates bigrams and then removes common words and single terms followed by a common word. Default is false. */
|
|
579
600
|
useQueryMode?: boolean;
|
|
580
|
-
}
|
|
601
|
+
}
|
|
581
602
|
|
|
582
603
|
/**
|
|
583
604
|
* Defines values for ComplexDataType.
|
|
@@ -608,10 +629,10 @@ export declare interface ComplexField {
|
|
|
608
629
|
}
|
|
609
630
|
|
|
610
631
|
/** A skill that enables scenarios that require a Boolean operation to determine the data to assign to an output. */
|
|
611
|
-
export declare
|
|
632
|
+
export declare interface ConditionalSkill extends BaseSearchIndexerSkill {
|
|
612
633
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
613
634
|
odatatype: "#Microsoft.Skills.Util.ConditionalSkill";
|
|
614
|
-
}
|
|
635
|
+
}
|
|
615
636
|
|
|
616
637
|
/** Defines options to control Cross-Origin Resource Sharing (CORS) for an index. */
|
|
617
638
|
export declare interface CorsOptions {
|
|
@@ -766,21 +787,21 @@ export declare interface CustomAnalyzer {
|
|
|
766
787
|
name: string;
|
|
767
788
|
/**
|
|
768
789
|
* The name of the tokenizer to use to divide continuous text into a sequence of tokens, such as
|
|
769
|
-
* breaking a sentence into words. KnownTokenizerNames is an enum containing
|
|
790
|
+
* breaking a sentence into words. {@link KnownTokenizerNames} is an enum containing built-in tokenizer names.
|
|
770
791
|
*/
|
|
771
|
-
tokenizerName:
|
|
792
|
+
tokenizerName: LexicalTokenizerName;
|
|
772
793
|
/**
|
|
773
794
|
* A list of token filters used to filter out or modify the tokens generated by a tokenizer. For
|
|
774
795
|
* example, you can specify a lowercase filter that converts all characters to lowercase. The
|
|
775
796
|
* filters are run in the order in which they are listed.
|
|
776
797
|
*/
|
|
777
|
-
tokenFilters?:
|
|
798
|
+
tokenFilters?: TokenFilterName[];
|
|
778
799
|
/**
|
|
779
800
|
* A list of character filters used to prepare input text before it is processed by the
|
|
780
801
|
* tokenizer. For instance, they can replace certain characters or symbols. The filters are run
|
|
781
802
|
* in the order in which they are listed.
|
|
782
803
|
*/
|
|
783
|
-
charFilters?:
|
|
804
|
+
charFilters?: CharFilterName[];
|
|
784
805
|
}
|
|
785
806
|
|
|
786
807
|
/** An object that contains information about the matches that were found, and related metadata. */
|
|
@@ -824,7 +845,7 @@ export declare interface CustomEntityAlias {
|
|
|
824
845
|
}
|
|
825
846
|
|
|
826
847
|
/** A skill looks for text from a custom, user-defined list of words and phrases. */
|
|
827
|
-
export declare
|
|
848
|
+
export declare interface CustomEntityLookupSkill extends BaseSearchIndexerSkill {
|
|
828
849
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
829
850
|
odatatype: "#Microsoft.Skills.Text.CustomEntityLookupSkill";
|
|
830
851
|
/** A value indicating which language code to use. Default is en. */
|
|
@@ -839,34 +860,19 @@ export declare type CustomEntityLookupSkill = BaseSearchIndexerSkill & {
|
|
|
839
860
|
globalDefaultAccentSensitive?: boolean;
|
|
840
861
|
/** A global flag for FuzzyEditDistance. If FuzzyEditDistance is not set in CustomEntity, this value will be the default value. */
|
|
841
862
|
globalDefaultFuzzyEditDistance?: number;
|
|
842
|
-
}
|
|
863
|
+
}
|
|
843
864
|
|
|
844
|
-
|
|
845
|
-
* Defines values for CustomEntityLookupSkillLanguage. \
|
|
846
|
-
* {@link KnownCustomEntityLookupSkillLanguage} can be used interchangeably with CustomEntityLookupSkillLanguage,
|
|
847
|
-
* this enum contains the known values that the service supports.
|
|
848
|
-
* ### Known values supported by the service
|
|
849
|
-
* **da**: Danish \
|
|
850
|
-
* **de**: German \
|
|
851
|
-
* **en**: English \
|
|
852
|
-
* **es**: Spanish \
|
|
853
|
-
* **fi**: Finnish \
|
|
854
|
-
* **fr**: French \
|
|
855
|
-
* **it**: Italian \
|
|
856
|
-
* **ko**: Korean \
|
|
857
|
-
* **pt**: Portuguese
|
|
858
|
-
*/
|
|
859
|
-
export declare type CustomEntityLookupSkillLanguage = string;
|
|
865
|
+
export declare type CustomEntityLookupSkillLanguage = "da" | "de" | "en" | "es" | "fi" | "fr" | "it" | "ko" | "pt";
|
|
860
866
|
|
|
861
867
|
/** Allows you to configure normalization for filterable, sortable, and facetable fields, which by default operate with strict matching. This is a user-defined configuration consisting of at least one or more filters, which modify the token that is stored. */
|
|
862
|
-
export declare
|
|
868
|
+
export declare interface CustomNormalizer extends BaseLexicalNormalizer {
|
|
863
869
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
864
870
|
odatatype: "#Microsoft.Azure.Search.CustomNormalizer";
|
|
865
871
|
/** A list of token filters used to filter out or modify the input token. For example, you can specify a lowercase filter that converts all characters to lowercase. The filters are run in the order in which they are listed. */
|
|
866
872
|
tokenFilters?: TokenFilterName[];
|
|
867
873
|
/** A list of character filters used to prepare input text before it is processed. For instance, they can replace certain characters or symbols. The filters are run in the order in which they are listed. */
|
|
868
874
|
charFilters?: CharFilterName[];
|
|
869
|
-
}
|
|
875
|
+
}
|
|
870
876
|
|
|
871
877
|
/** Contains the parameters specific to generating vector embeddings via a custom endpoint. */
|
|
872
878
|
export declare type CustomVectorizer = BaseVectorSearchVectorizer & {
|
|
@@ -917,11 +923,11 @@ export declare const DEFAULT_FLUSH_WINDOW: number;
|
|
|
917
923
|
*/
|
|
918
924
|
export declare const DEFAULT_RETRY_COUNT: number;
|
|
919
925
|
|
|
920
|
-
/** An empty object that represents the default
|
|
921
|
-
export declare
|
|
926
|
+
/** An empty object that represents the default Azure AI service resource for a skillset. */
|
|
927
|
+
export declare interface DefaultCognitiveServicesAccount extends BaseCognitiveServicesAccount {
|
|
922
928
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
923
929
|
odatatype: "#Microsoft.Azure.Search.DefaultCognitiveServices";
|
|
924
|
-
}
|
|
930
|
+
}
|
|
925
931
|
|
|
926
932
|
/**
|
|
927
933
|
* Options for delete alias operation.
|
|
@@ -989,7 +995,7 @@ export declare interface DeleteSynonymMapOptions extends OperationOptions {
|
|
|
989
995
|
}
|
|
990
996
|
|
|
991
997
|
/** Decomposes compound words found in many Germanic languages. This token filter is implemented using Apache Lucene. */
|
|
992
|
-
export declare
|
|
998
|
+
export declare interface DictionaryDecompounderTokenFilter extends BaseTokenFilter {
|
|
993
999
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
994
1000
|
odatatype: "#Microsoft.Azure.Search.DictionaryDecompounderTokenFilter";
|
|
995
1001
|
/** The list of words to match against. */
|
|
@@ -1002,15 +1008,15 @@ export declare type DictionaryDecompounderTokenFilter = BaseTokenFilter & {
|
|
|
1002
1008
|
maxSubwordSize?: number;
|
|
1003
1009
|
/** A value indicating whether to add only the longest matching subword to the output. Default is false. */
|
|
1004
1010
|
onlyLongestMatch?: boolean;
|
|
1005
|
-
}
|
|
1011
|
+
}
|
|
1006
1012
|
|
|
1007
1013
|
/** Defines a function that boosts scores based on distance from a geographic location. */
|
|
1008
|
-
export declare
|
|
1014
|
+
export declare interface DistanceScoringFunction extends BaseScoringFunction {
|
|
1009
1015
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1010
1016
|
type: "distance";
|
|
1011
1017
|
/** Parameter values for the distance scoring function. */
|
|
1012
1018
|
parameters: DistanceScoringParameters;
|
|
1013
|
-
}
|
|
1019
|
+
}
|
|
1014
1020
|
|
|
1015
1021
|
/** Provides parameter values to a distance scoring function. */
|
|
1016
1022
|
export declare interface DistanceScoringParameters {
|
|
@@ -1030,7 +1036,7 @@ export declare interface DocumentDebugInfo {
|
|
|
1030
1036
|
}
|
|
1031
1037
|
|
|
1032
1038
|
/** A skill that extracts content from a file within the enrichment pipeline. */
|
|
1033
|
-
export declare
|
|
1039
|
+
export declare interface DocumentExtractionSkill extends BaseSearchIndexerSkill {
|
|
1034
1040
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1035
1041
|
odatatype: "#Microsoft.Skills.Util.DocumentExtractionSkill";
|
|
1036
1042
|
/** The parsingMode for the skill. Will be set to 'default' if not defined. */
|
|
@@ -1041,7 +1047,7 @@ export declare type DocumentExtractionSkill = BaseSearchIndexerSkill & {
|
|
|
1041
1047
|
configuration?: {
|
|
1042
1048
|
[propertyName: string]: any;
|
|
1043
1049
|
};
|
|
1044
|
-
}
|
|
1050
|
+
}
|
|
1045
1051
|
|
|
1046
1052
|
/**
|
|
1047
1053
|
* Generates n-grams of the given size(s) starting from the front or the back of an input token.
|
|
@@ -1078,7 +1084,7 @@ export declare interface EdgeNGramTokenFilter {
|
|
|
1078
1084
|
export declare type EdgeNGramTokenFilterSide = "front" | "back";
|
|
1079
1085
|
|
|
1080
1086
|
/** Tokenizes the input from an edge into n-grams of the given size(s). This tokenizer is implemented using Apache Lucene. */
|
|
1081
|
-
export declare
|
|
1087
|
+
export declare interface EdgeNGramTokenizer extends BaseLexicalTokenizer {
|
|
1082
1088
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1083
1089
|
odatatype: "#Microsoft.Azure.Search.EdgeNGramTokenizer";
|
|
1084
1090
|
/** The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. */
|
|
@@ -1087,49 +1093,36 @@ export declare type EdgeNGramTokenizer = BaseLexicalTokenizer & {
|
|
|
1087
1093
|
maxGram?: number;
|
|
1088
1094
|
/** Character classes to keep in the tokens. */
|
|
1089
1095
|
tokenChars?: TokenCharacterKind[];
|
|
1090
|
-
}
|
|
1096
|
+
}
|
|
1091
1097
|
|
|
1092
1098
|
/** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). This token filter is implemented using Apache Lucene. */
|
|
1093
|
-
export declare
|
|
1099
|
+
export declare interface ElisionTokenFilter extends BaseTokenFilter {
|
|
1094
1100
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1095
1101
|
odatatype: "#Microsoft.Azure.Search.ElisionTokenFilter";
|
|
1096
1102
|
/** The set of articles to remove. */
|
|
1097
1103
|
articles?: string[];
|
|
1098
|
-
}
|
|
1104
|
+
}
|
|
1099
1105
|
|
|
1100
|
-
|
|
1101
|
-
* Defines values for EntityCategory. \
|
|
1102
|
-
* {@link KnownEntityCategory} can be used interchangeably with EntityCategory,
|
|
1103
|
-
* this enum contains the known values that the service supports.
|
|
1104
|
-
* ### Known values supported by the service
|
|
1105
|
-
* **location**: Entities describing a physical location. \
|
|
1106
|
-
* **organization**: Entities describing an organization. \
|
|
1107
|
-
* **person**: Entities describing a person. \
|
|
1108
|
-
* **quantity**: Entities describing a quantity. \
|
|
1109
|
-
* **datetime**: Entities describing a date and time. \
|
|
1110
|
-
* **url**: Entities describing a URL. \
|
|
1111
|
-
* **email**: Entities describing an email address.
|
|
1112
|
-
*/
|
|
1113
|
-
export declare type EntityCategory = string;
|
|
1106
|
+
export declare type EntityCategory = "location" | "organization" | "person" | "quantity" | "datetime" | "url" | "email";
|
|
1114
1107
|
|
|
1115
1108
|
/** Using the Text Analytics API, extracts linked entities from text. */
|
|
1116
|
-
export declare
|
|
1109
|
+
export declare interface EntityLinkingSkill extends BaseSearchIndexerSkill {
|
|
1117
1110
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1118
1111
|
odatatype: "#Microsoft.Skills.Text.V3.EntityLinkingSkill";
|
|
1119
|
-
/** A value indicating which language code to use. Default is en
|
|
1112
|
+
/** A value indicating which language code to use. Default is `en`. */
|
|
1120
1113
|
defaultLanguageCode?: string;
|
|
1121
1114
|
/** A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. */
|
|
1122
1115
|
minimumPrecision?: number;
|
|
1123
1116
|
/** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */
|
|
1124
1117
|
modelVersion?: string;
|
|
1125
|
-
}
|
|
1118
|
+
}
|
|
1126
1119
|
|
|
1127
1120
|
/**
|
|
1128
|
-
*
|
|
1121
|
+
* Text analytics entity recognition.
|
|
1129
1122
|
*
|
|
1130
|
-
* @deprecated
|
|
1123
|
+
* @deprecated This skill has been deprecated.
|
|
1131
1124
|
*/
|
|
1132
|
-
export declare
|
|
1125
|
+
export declare interface EntityRecognitionSkill extends BaseSearchIndexerSkill {
|
|
1133
1126
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1134
1127
|
odatatype: "#Microsoft.Skills.Text.EntityRecognitionSkill";
|
|
1135
1128
|
/** A list of entity categories that should be extracted. */
|
|
@@ -1140,73 +1133,66 @@ export declare type EntityRecognitionSkill = BaseSearchIndexerSkill & {
|
|
|
1140
1133
|
includeTypelessEntities?: boolean;
|
|
1141
1134
|
/** A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. */
|
|
1142
1135
|
minimumPrecision?: number;
|
|
1143
|
-
}
|
|
1136
|
+
}
|
|
1144
1137
|
|
|
1145
|
-
|
|
1146
|
-
* Defines values for EntityRecognitionSkillLanguage. \
|
|
1147
|
-
* {@link KnownEntityRecognitionSkillLanguage} can be used interchangeably with EntityRecognitionSkillLanguage,
|
|
1148
|
-
* this enum contains the known values that the service supports.
|
|
1149
|
-
* ### Known values supported by the service
|
|
1150
|
-
* **ar**: Arabic \
|
|
1151
|
-
* **cs**: Czech \
|
|
1152
|
-
* **zh-Hans**: Chinese-Simplified \
|
|
1153
|
-
* **zh-Hant**: Chinese-Traditional \
|
|
1154
|
-
* **da**: Danish \
|
|
1155
|
-
* **nl**: Dutch \
|
|
1156
|
-
* **en**: English \
|
|
1157
|
-
* **fi**: Finnish \
|
|
1158
|
-
* **fr**: French \
|
|
1159
|
-
* **de**: German \
|
|
1160
|
-
* **el**: Greek \
|
|
1161
|
-
* **hu**: Hungarian \
|
|
1162
|
-
* **it**: Italian \
|
|
1163
|
-
* **ja**: Japanese \
|
|
1164
|
-
* **ko**: Korean \
|
|
1165
|
-
* **no**: Norwegian (Bokmaal) \
|
|
1166
|
-
* **pl**: Polish \
|
|
1167
|
-
* **pt-PT**: Portuguese (Portugal) \
|
|
1168
|
-
* **pt-BR**: Portuguese (Brazil) \
|
|
1169
|
-
* **ru**: Russian \
|
|
1170
|
-
* **es**: Spanish \
|
|
1171
|
-
* **sv**: Swedish \
|
|
1172
|
-
* **tr**: Turkish
|
|
1173
|
-
*/
|
|
1174
|
-
export declare type EntityRecognitionSkillLanguage = string;
|
|
1138
|
+
export declare type EntityRecognitionSkillLanguage = "ar" | "cs" | "zh-Hans" | "zh-Hant" | "da" | "nl" | "en" | "fi" | "fr" | "de" | "el" | "hu" | "it" | "ja" | "ko" | "no" | "pl" | "pt-PT" | "pt-BR" | "ru" | "es" | "sv" | "tr";
|
|
1175
1139
|
|
|
1176
1140
|
/** Using the Text Analytics API, extracts entities of different types from text. */
|
|
1177
|
-
export declare
|
|
1141
|
+
export declare interface EntityRecognitionSkillV3 extends BaseSearchIndexerSkill {
|
|
1178
1142
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1179
1143
|
odatatype: "#Microsoft.Skills.Text.V3.EntityRecognitionSkill";
|
|
1180
1144
|
/** A list of entity categories that should be extracted. */
|
|
1181
1145
|
categories?: string[];
|
|
1182
|
-
/** A value indicating which language code to use. Default is en
|
|
1146
|
+
/** A value indicating which language code to use. Default is `en`. */
|
|
1183
1147
|
defaultLanguageCode?: string;
|
|
1184
1148
|
/** A value between 0 and 1 that be used to only include entities whose confidence score is greater than the value specified. If not set (default), or if explicitly set to null, all entities will be included. */
|
|
1185
1149
|
minimumPrecision?: number;
|
|
1186
|
-
/** The version of the model to use when calling the Text Analytics
|
|
1150
|
+
/** The version of the model to use when calling the Text Analytics API. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */
|
|
1187
1151
|
modelVersion?: string;
|
|
1188
|
-
}
|
|
1152
|
+
}
|
|
1189
1153
|
|
|
1190
1154
|
export declare type ExcludedODataTypes = Date | GeographyPoint;
|
|
1191
1155
|
|
|
1192
|
-
/** Contains the parameters specific to exhaustive KNN algorithm. */
|
|
1193
|
-
export declare interface ExhaustiveKnnParameters {
|
|
1194
|
-
/** The similarity metric to use for vector comparisons. */
|
|
1195
|
-
metric?: VectorSearchAlgorithmMetric;
|
|
1196
|
-
}
|
|
1197
|
-
|
|
1198
1156
|
/** Contains configuration options specific to the exhaustive KNN algorithm used during querying, which will perform brute-force search across the entire vector index. */
|
|
1199
|
-
export declare type
|
|
1157
|
+
export declare type ExhaustiveKnnAlgorithmConfiguration = BaseVectorSearchAlgorithmConfiguration & {
|
|
1200
1158
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1201
1159
|
kind: "exhaustiveKnn";
|
|
1202
1160
|
/** Contains the parameters specific to exhaustive KNN algorithm. */
|
|
1203
1161
|
parameters?: ExhaustiveKnnParameters;
|
|
1204
1162
|
};
|
|
1205
1163
|
|
|
1164
|
+
/** Contains the parameters specific to exhaustive KNN algorithm. */
|
|
1165
|
+
export declare interface ExhaustiveKnnParameters {
|
|
1166
|
+
/** The similarity metric to use for vector comparisons. */
|
|
1167
|
+
metric?: VectorSearchAlgorithmMetric;
|
|
1168
|
+
}
|
|
1169
|
+
|
|
1206
1170
|
export declare type ExtractDocumentKey<TModel> = {
|
|
1207
1171
|
[K in keyof TModel as TModel[K] extends string | undefined ? K : never]: TModel[K];
|
|
1208
1172
|
};
|
|
1209
1173
|
|
|
1174
|
+
/**
|
|
1175
|
+
* Extracts answer candidates from the contents of the documents returned in response to a query
|
|
1176
|
+
* expressed as a question in natural language.
|
|
1177
|
+
*/
|
|
1178
|
+
export declare interface ExtractiveQueryAnswer {
|
|
1179
|
+
answerType: "extractive";
|
|
1180
|
+
/**
|
|
1181
|
+
* The number of answers returned. Default count is 1
|
|
1182
|
+
*/
|
|
1183
|
+
count?: number;
|
|
1184
|
+
/**
|
|
1185
|
+
* The confidence threshold. Default threshold is 0.7
|
|
1186
|
+
*/
|
|
1187
|
+
threshold?: number;
|
|
1188
|
+
}
|
|
1189
|
+
|
|
1190
|
+
/** Extracts captions from the matching documents that contain passages relevant to the search query. */
|
|
1191
|
+
export declare interface ExtractiveQueryCaption {
|
|
1192
|
+
captionType: "extractive";
|
|
1193
|
+
highlight?: boolean;
|
|
1194
|
+
}
|
|
1195
|
+
|
|
1210
1196
|
/** A single bucket of a facet query result. Reports the number of documents with a field value falling within a particular range or having a particular value or interval. */
|
|
1211
1197
|
export declare interface FacetResult {
|
|
1212
1198
|
/** Describes unknown properties. The value of an unknown property can be of "any" type. */
|
|
@@ -1239,12 +1225,12 @@ export declare interface FieldMappingFunction {
|
|
|
1239
1225
|
}
|
|
1240
1226
|
|
|
1241
1227
|
/** Defines a function that boosts scores based on the value of a date-time field. */
|
|
1242
|
-
export declare
|
|
1228
|
+
export declare interface FreshnessScoringFunction extends BaseScoringFunction {
|
|
1243
1229
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1244
1230
|
type: "freshness";
|
|
1245
1231
|
/** Parameter values for the freshness scoring function. */
|
|
1246
1232
|
parameters: FreshnessScoringParameters;
|
|
1247
|
-
}
|
|
1233
|
+
}
|
|
1248
1234
|
|
|
1249
1235
|
/** Provides parameter values to a freshness scoring function. */
|
|
1250
1236
|
export declare interface FreshnessScoringParameters {
|
|
@@ -1336,11 +1322,27 @@ export declare type GetSkillSetOptions = OperationOptions;
|
|
|
1336
1322
|
export declare type GetSynonymMapsOptions = OperationOptions;
|
|
1337
1323
|
|
|
1338
1324
|
/** Defines a data change detection policy that captures changes based on the value of a high water mark column. */
|
|
1339
|
-
export declare
|
|
1325
|
+
export declare interface HighWaterMarkChangeDetectionPolicy extends BaseDataChangeDetectionPolicy {
|
|
1340
1326
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1341
1327
|
odatatype: "#Microsoft.Azure.Search.HighWaterMarkChangeDetectionPolicy";
|
|
1342
1328
|
/** The name of the high water mark column. */
|
|
1343
1329
|
highWaterMarkColumnName: string;
|
|
1330
|
+
}
|
|
1331
|
+
|
|
1332
|
+
/**
|
|
1333
|
+
* Contains configuration options specific to the hnsw approximate nearest neighbors algorithm
|
|
1334
|
+
* used during indexing time.
|
|
1335
|
+
*/
|
|
1336
|
+
export declare type HnswAlgorithmConfiguration = BaseVectorSearchAlgorithmConfiguration & {
|
|
1337
|
+
/**
|
|
1338
|
+
* Polymorphic discriminator, which specifies the different types this object can be
|
|
1339
|
+
*/
|
|
1340
|
+
kind: "hnsw";
|
|
1341
|
+
/**
|
|
1342
|
+
* Contains the parameters specific to hnsw algorithm.
|
|
1343
|
+
*
|
|
1344
|
+
*/
|
|
1345
|
+
parameters?: HnswParameters;
|
|
1344
1346
|
};
|
|
1345
1347
|
|
|
1346
1348
|
/**
|
|
@@ -1372,24 +1374,20 @@ export declare interface HnswParameters {
|
|
|
1372
1374
|
metric?: VectorSearchAlgorithmMetric;
|
|
1373
1375
|
}
|
|
1374
1376
|
|
|
1375
|
-
/**
|
|
1376
|
-
|
|
1377
|
-
|
|
1378
|
-
|
|
1379
|
-
|
|
1380
|
-
|
|
1381
|
-
|
|
1382
|
-
|
|
1383
|
-
|
|
1384
|
-
|
|
1385
|
-
|
|
1386
|
-
*
|
|
1387
|
-
*/
|
|
1388
|
-
parameters?: HnswParameters;
|
|
1389
|
-
};
|
|
1377
|
+
/** A skill that analyzes image files. It extracts a rich set of visual features based on the image content. */
|
|
1378
|
+
export declare interface ImageAnalysisSkill extends BaseSearchIndexerSkill {
|
|
1379
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1380
|
+
odatatype: "#Microsoft.Skills.Vision.ImageAnalysisSkill";
|
|
1381
|
+
/** A value indicating which language code to use. Default is en. */
|
|
1382
|
+
defaultLanguageCode?: ImageAnalysisSkillLanguage;
|
|
1383
|
+
/** A list of visual features. */
|
|
1384
|
+
visualFeatures?: VisualFeature[];
|
|
1385
|
+
/** A string indicating which domain-specific details to return. */
|
|
1386
|
+
details?: ImageDetail[];
|
|
1387
|
+
}
|
|
1390
1388
|
|
|
1391
1389
|
/** A skill that analyzes image files. It extracts a rich set of visual features based on the image content. */
|
|
1392
|
-
export declare
|
|
1390
|
+
export declare interface ImageAnalysisSkill extends BaseSearchIndexerSkill {
|
|
1393
1391
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1394
1392
|
odatatype: "#Microsoft.Skills.Vision.ImageAnalysisSkill";
|
|
1395
1393
|
/** A value indicating which language code to use. Default is en. */
|
|
@@ -1398,77 +1396,11 @@ export declare type ImageAnalysisSkill = BaseSearchIndexerSkill & {
|
|
|
1398
1396
|
visualFeatures?: VisualFeature[];
|
|
1399
1397
|
/** A string indicating which domain-specific details to return. */
|
|
1400
1398
|
details?: ImageDetail[];
|
|
1401
|
-
}
|
|
1399
|
+
}
|
|
1402
1400
|
|
|
1403
|
-
|
|
1404
|
-
|
|
1405
|
-
|
|
1406
|
-
* this enum contains the known values that the service supports.
|
|
1407
|
-
* ### Known values supported by the service
|
|
1408
|
-
* **ar**: Arabic \
|
|
1409
|
-
* **az**: Azerbaijani \
|
|
1410
|
-
* **bg**: Bulgarian \
|
|
1411
|
-
* **bs**: Bosnian Latin \
|
|
1412
|
-
* **ca**: Catalan \
|
|
1413
|
-
* **cs**: Czech \
|
|
1414
|
-
* **cy**: Welsh \
|
|
1415
|
-
* **da**: Danish \
|
|
1416
|
-
* **de**: German \
|
|
1417
|
-
* **el**: Greek \
|
|
1418
|
-
* **en**: English \
|
|
1419
|
-
* **es**: Spanish \
|
|
1420
|
-
* **et**: Estonian \
|
|
1421
|
-
* **eu**: Basque \
|
|
1422
|
-
* **fi**: Finnish \
|
|
1423
|
-
* **fr**: French \
|
|
1424
|
-
* **ga**: Irish \
|
|
1425
|
-
* **gl**: Galician \
|
|
1426
|
-
* **he**: Hebrew \
|
|
1427
|
-
* **hi**: Hindi \
|
|
1428
|
-
* **hr**: Croatian \
|
|
1429
|
-
* **hu**: Hungarian \
|
|
1430
|
-
* **id**: Indonesian \
|
|
1431
|
-
* **it**: Italian \
|
|
1432
|
-
* **ja**: Japanese \
|
|
1433
|
-
* **kk**: Kazakh \
|
|
1434
|
-
* **ko**: Korean \
|
|
1435
|
-
* **lt**: Lithuanian \
|
|
1436
|
-
* **lv**: Latvian \
|
|
1437
|
-
* **mk**: Macedonian \
|
|
1438
|
-
* **ms**: Malay Malaysia \
|
|
1439
|
-
* **nb**: Norwegian (Bokmal) \
|
|
1440
|
-
* **nl**: Dutch \
|
|
1441
|
-
* **pl**: Polish \
|
|
1442
|
-
* **prs**: Dari \
|
|
1443
|
-
* **pt-BR**: Portuguese-Brazil \
|
|
1444
|
-
* **pt**: Portuguese-Portugal \
|
|
1445
|
-
* **pt-PT**: Portuguese-Portugal \
|
|
1446
|
-
* **ro**: Romanian \
|
|
1447
|
-
* **ru**: Russian \
|
|
1448
|
-
* **sk**: Slovak \
|
|
1449
|
-
* **sl**: Slovenian \
|
|
1450
|
-
* **sr-Cyrl**: Serbian - Cyrillic RS \
|
|
1451
|
-
* **sr-Latn**: Serbian - Latin RS \
|
|
1452
|
-
* **sv**: Swedish \
|
|
1453
|
-
* **th**: Thai \
|
|
1454
|
-
* **tr**: Turkish \
|
|
1455
|
-
* **uk**: Ukrainian \
|
|
1456
|
-
* **vi**: Vietnamese \
|
|
1457
|
-
* **zh**: Chinese Simplified \
|
|
1458
|
-
* **zh-Hans**: Chinese Simplified \
|
|
1459
|
-
* **zh-Hant**: Chinese Traditional
|
|
1460
|
-
*/
|
|
1461
|
-
export declare type ImageAnalysisSkillLanguage = string;
|
|
1462
|
-
|
|
1463
|
-
/**
|
|
1464
|
-
* Defines values for ImageDetail. \
|
|
1465
|
-
* {@link KnownImageDetail} can be used interchangeably with ImageDetail,
|
|
1466
|
-
* this enum contains the known values that the service supports.
|
|
1467
|
-
* ### Known values supported by the service
|
|
1468
|
-
* **celebrities**: Details recognized as celebrities. \
|
|
1469
|
-
* **landmarks**: Details recognized as landmarks.
|
|
1470
|
-
*/
|
|
1471
|
-
export declare type ImageDetail = string;
|
|
1401
|
+
export declare type ImageAnalysisSkillLanguage = "ar" | "az" | "bg" | "bs" | "ca" | "cs" | "cy" | "da" | "de" | "el" | "en" | "es" | "et" | "eu" | "fi" | "fr" | "ga" | "gl" | "he" | "hi" | "hr" | "hu" | "id" | "it" | "ja" | "kk" | "ko" | "lt" | "lv" | "mk" | "ms" | "nb" | "nl" | "pl" | "prs" | "pt-BR" | "pt" | "pt-PT" | "ro" | "ru" | "sk" | "sl" | "sr-Cyrl" | "sr-Latn" | "sv" | "th" | "tr" | "uk" | "vi" | "zh" | "zh-Hans" | "zh-Hant";
|
|
1402
|
+
|
|
1403
|
+
export declare type ImageDetail = "celebrities" | "landmarks";
|
|
1472
1404
|
|
|
1473
1405
|
/** Defines values for IndexActionType. */
|
|
1474
1406
|
export declare type IndexActionType = "upload" | "merge" | "mergeOrUpload" | "delete";
|
|
@@ -1476,58 +1408,58 @@ export declare type IndexActionType = "upload" | "merge" | "mergeOrUpload" | "de
|
|
|
1476
1408
|
/**
|
|
1477
1409
|
* Represents an index action that operates on a document.
|
|
1478
1410
|
*/
|
|
1479
|
-
export declare type IndexDocumentsAction<
|
|
1411
|
+
export declare type IndexDocumentsAction<TModel> = {
|
|
1480
1412
|
/**
|
|
1481
1413
|
* The operation to perform on a document in an indexing batch. Possible values include:
|
|
1482
1414
|
* 'upload', 'merge', 'mergeOrUpload', 'delete'
|
|
1483
1415
|
*/
|
|
1484
1416
|
__actionType: IndexActionType;
|
|
1485
|
-
} & Partial<
|
|
1417
|
+
} & Partial<TModel>;
|
|
1486
1418
|
|
|
1487
1419
|
/**
|
|
1488
1420
|
* Class used to perform batch operations
|
|
1489
1421
|
* with multiple documents to the index.
|
|
1490
1422
|
*/
|
|
1491
|
-
export declare class IndexDocumentsBatch<
|
|
1423
|
+
export declare class IndexDocumentsBatch<TModel> {
|
|
1492
1424
|
/**
|
|
1493
1425
|
* The set of actions taken in this batch.
|
|
1494
1426
|
*/
|
|
1495
|
-
readonly actions: IndexDocumentsAction<
|
|
1496
|
-
constructor(actions?: IndexDocumentsAction<
|
|
1427
|
+
readonly actions: IndexDocumentsAction<TModel>[];
|
|
1428
|
+
constructor(actions?: IndexDocumentsAction<TModel>[]);
|
|
1497
1429
|
/**
|
|
1498
1430
|
* Upload an array of documents to the index.
|
|
1499
1431
|
* @param documents - The documents to upload.
|
|
1500
1432
|
*/
|
|
1501
|
-
upload(documents:
|
|
1433
|
+
upload(documents: TModel[]): void;
|
|
1502
1434
|
/**
|
|
1503
1435
|
* Update a set of documents in the index.
|
|
1504
1436
|
* For more details about how merging works, see https://docs.microsoft.com/en-us/rest/api/searchservice/AddUpdate-or-Delete-Documents
|
|
1505
1437
|
* @param documents - The updated documents.
|
|
1506
1438
|
*/
|
|
1507
|
-
merge(documents:
|
|
1439
|
+
merge(documents: TModel[]): void;
|
|
1508
1440
|
/**
|
|
1509
1441
|
* Update a set of documents in the index or uploads them if they don't exist.
|
|
1510
1442
|
* For more details about how merging works, see https://docs.microsoft.com/en-us/rest/api/searchservice/AddUpdate-or-Delete-Documents
|
|
1511
1443
|
* @param documents - The new/updated documents.
|
|
1512
1444
|
*/
|
|
1513
|
-
mergeOrUpload(documents:
|
|
1445
|
+
mergeOrUpload(documents: TModel[]): void;
|
|
1514
1446
|
/**
|
|
1515
1447
|
* Delete a set of documents.
|
|
1516
1448
|
* @param keyName - The name of their primary key in the index.
|
|
1517
1449
|
* @param keyValues - The primary key values of documents to delete.
|
|
1518
1450
|
*/
|
|
1519
|
-
delete(keyName: keyof
|
|
1451
|
+
delete(keyName: keyof TModel, keyValues: string[]): void;
|
|
1520
1452
|
/**
|
|
1521
1453
|
* Delete a set of documents.
|
|
1522
1454
|
* @param documents - Documents to be deleted.
|
|
1523
1455
|
*/
|
|
1524
|
-
delete(documents:
|
|
1456
|
+
delete(documents: TModel[]): void;
|
|
1525
1457
|
}
|
|
1526
1458
|
|
|
1527
1459
|
/**
|
|
1528
1460
|
* Index Documents Client
|
|
1529
1461
|
*/
|
|
1530
|
-
export declare interface IndexDocumentsClient<
|
|
1462
|
+
export declare interface IndexDocumentsClient<TModel extends object> {
|
|
1531
1463
|
/**
|
|
1532
1464
|
* Perform a set of index modifications (upload, merge, mergeOrUpload, delete)
|
|
1533
1465
|
* for the given set of documents.
|
|
@@ -1535,7 +1467,7 @@ export declare interface IndexDocumentsClient<T extends object> {
|
|
|
1535
1467
|
* @param batch - An array of actions to perform on the index.
|
|
1536
1468
|
* @param options - Additional options.
|
|
1537
1469
|
*/
|
|
1538
|
-
indexDocuments(batch: IndexDocumentsBatch<
|
|
1470
|
+
indexDocuments(batch: IndexDocumentsBatch<TModel>, options: IndexDocumentsOptions): Promise<IndexDocumentsResult>;
|
|
1539
1471
|
}
|
|
1540
1472
|
|
|
1541
1473
|
/**
|
|
@@ -1558,15 +1490,7 @@ export declare interface IndexDocumentsResult {
|
|
|
1558
1490
|
readonly results: IndexingResult[];
|
|
1559
1491
|
}
|
|
1560
1492
|
|
|
1561
|
-
|
|
1562
|
-
* Defines values for IndexerExecutionEnvironment. \
|
|
1563
|
-
* {@link KnownIndexerExecutionEnvironment} can be used interchangeably with IndexerExecutionEnvironment,
|
|
1564
|
-
* this enum contains the known values that the service supports.
|
|
1565
|
-
* ### Known values supported by the service
|
|
1566
|
-
* **standard**: Indicates that Azure Cognitive Search can determine where the indexer should execute. This is the default environment when nothing is specified and is the recommended value. \
|
|
1567
|
-
* **private**: Indicates that the indexer should run with the environment provisioned specifically for the search service. This should only be specified as the execution environment if the indexer needs to access resources securely over shared private link resources.
|
|
1568
|
-
*/
|
|
1569
|
-
export declare type IndexerExecutionEnvironment = string;
|
|
1493
|
+
export declare type IndexerExecutionEnvironment = "standard" | "private";
|
|
1570
1494
|
|
|
1571
1495
|
/** Represents the result of an individual indexer execution. */
|
|
1572
1496
|
export declare interface IndexerExecutionResult {
|
|
@@ -1793,9 +1717,14 @@ export declare type IndexIterator = PagedAsyncIterableIterator<SearchIndex, Sear
|
|
|
1793
1717
|
export declare type IndexNameIterator = PagedAsyncIterableIterator<string, string[], {}>;
|
|
1794
1718
|
|
|
1795
1719
|
/**
|
|
1796
|
-
* Defines
|
|
1720
|
+
* Defines values for IndexProjectionMode. \
|
|
1721
|
+
* {@link KnownIndexProjectionMode} can be used interchangeably with IndexProjectionMode,
|
|
1722
|
+
* this enum contains the known values that the service supports.
|
|
1723
|
+
* ### Known values supported by the service
|
|
1724
|
+
* **skipIndexingParentDocuments**: The source document will be skipped from writing into the indexer's target index. \
|
|
1725
|
+
* **includeIndexingParentDocuments**: The source document will be written into the indexer's target index. This is the default pattern.
|
|
1797
1726
|
*/
|
|
1798
|
-
export declare type IndexProjectionMode =
|
|
1727
|
+
export declare type IndexProjectionMode = string;
|
|
1799
1728
|
|
|
1800
1729
|
/** Input field mapping for a skill. */
|
|
1801
1730
|
export declare interface InputFieldMappingEntry {
|
|
@@ -1810,17 +1739,17 @@ export declare interface InputFieldMappingEntry {
|
|
|
1810
1739
|
}
|
|
1811
1740
|
|
|
1812
1741
|
/** A token filter that only keeps tokens with text contained in a specified list of words. This token filter is implemented using Apache Lucene. */
|
|
1813
|
-
export declare
|
|
1742
|
+
export declare interface KeepTokenFilter extends BaseTokenFilter {
|
|
1814
1743
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1815
1744
|
odatatype: "#Microsoft.Azure.Search.KeepTokenFilter";
|
|
1816
1745
|
/** The list of words to keep. */
|
|
1817
1746
|
keepWords: string[];
|
|
1818
1747
|
/** A value indicating whether to lower case all words first. Default is false. */
|
|
1819
1748
|
lowerCaseKeepWords?: boolean;
|
|
1820
|
-
}
|
|
1749
|
+
}
|
|
1821
1750
|
|
|
1822
1751
|
/** A skill that uses text analytics for key phrase extraction. */
|
|
1823
|
-
export declare
|
|
1752
|
+
export declare interface KeyPhraseExtractionSkill extends BaseSearchIndexerSkill {
|
|
1824
1753
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1825
1754
|
odatatype: "#Microsoft.Skills.Text.KeyPhraseExtractionSkill";
|
|
1826
1755
|
/** A value indicating which language code to use. Default is en. */
|
|
@@ -1829,41 +1758,19 @@ export declare type KeyPhraseExtractionSkill = BaseSearchIndexerSkill & {
|
|
|
1829
1758
|
maxKeyPhraseCount?: number;
|
|
1830
1759
|
/** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */
|
|
1831
1760
|
modelVersion?: string;
|
|
1832
|
-
}
|
|
1761
|
+
}
|
|
1833
1762
|
|
|
1834
|
-
|
|
1835
|
-
* Defines values for KeyPhraseExtractionSkillLanguage. \
|
|
1836
|
-
* {@link KnownKeyPhraseExtractionSkillLanguage} can be used interchangeably with KeyPhraseExtractionSkillLanguage,
|
|
1837
|
-
* this enum contains the known values that the service supports.
|
|
1838
|
-
* ### Known values supported by the service
|
|
1839
|
-
* **da**: Danish \
|
|
1840
|
-
* **nl**: Dutch \
|
|
1841
|
-
* **en**: English \
|
|
1842
|
-
* **fi**: Finnish \
|
|
1843
|
-
* **fr**: French \
|
|
1844
|
-
* **de**: German \
|
|
1845
|
-
* **it**: Italian \
|
|
1846
|
-
* **ja**: Japanese \
|
|
1847
|
-
* **ko**: Korean \
|
|
1848
|
-
* **no**: Norwegian (Bokmaal) \
|
|
1849
|
-
* **pl**: Polish \
|
|
1850
|
-
* **pt-PT**: Portuguese (Portugal) \
|
|
1851
|
-
* **pt-BR**: Portuguese (Brazil) \
|
|
1852
|
-
* **ru**: Russian \
|
|
1853
|
-
* **es**: Spanish \
|
|
1854
|
-
* **sv**: Swedish
|
|
1855
|
-
*/
|
|
1856
|
-
export declare type KeyPhraseExtractionSkillLanguage = string;
|
|
1763
|
+
export declare type KeyPhraseExtractionSkillLanguage = "da" | "nl" | "en" | "fi" | "fr" | "de" | "it" | "ja" | "ko" | "no" | "pl" | "pt-PT" | "pt-BR" | "ru" | "es" | "sv";
|
|
1857
1764
|
|
|
1858
1765
|
/** Marks terms as keywords. This token filter is implemented using Apache Lucene. */
|
|
1859
|
-
export declare
|
|
1766
|
+
export declare interface KeywordMarkerTokenFilter extends BaseTokenFilter {
|
|
1860
1767
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
1861
1768
|
odatatype: "#Microsoft.Azure.Search.KeywordMarkerTokenFilter";
|
|
1862
1769
|
/** A list of words to mark as keywords. */
|
|
1863
1770
|
keywords: string[];
|
|
1864
1771
|
/** A value indicating whether to ignore case. If true, all words are converted to lower case first. Default is false. */
|
|
1865
1772
|
ignoreCase?: boolean;
|
|
1866
|
-
}
|
|
1773
|
+
}
|
|
1867
1774
|
|
|
1868
1775
|
/**
|
|
1869
1776
|
* Emits the entire input as a single token. This tokenizer is implemented using Apache Lucene.
|
|
@@ -2265,14 +2172,6 @@ export declare enum KnownAnalyzerNames {
|
|
|
2265
2172
|
Whitespace = "whitespace"
|
|
2266
2173
|
}
|
|
2267
2174
|
|
|
2268
|
-
/** Known values of {@link Answers} that the service accepts. */
|
|
2269
|
-
export declare enum KnownAnswers {
|
|
2270
|
-
/** Do not return answers for the query. */
|
|
2271
|
-
None = "none",
|
|
2272
|
-
/** Extracts answer candidates from the contents of the documents returned in response to a query expressed as a question in natural language. */
|
|
2273
|
-
Extractive = "extractive"
|
|
2274
|
-
}
|
|
2275
|
-
|
|
2276
2175
|
/** Known values of {@link BlobIndexerDataToExtract} that the service accepts. */
|
|
2277
2176
|
export declare enum KnownBlobIndexerDataToExtract {
|
|
2278
2177
|
/** Indexes just the standard blob properties and user-specified metadata. */
|
|
@@ -2303,9 +2202,9 @@ export declare enum KnownBlobIndexerParsingMode {
|
|
|
2303
2202
|
DelimitedText = "delimitedText",
|
|
2304
2203
|
/** Set to json to extract structured content from JSON files. */
|
|
2305
2204
|
Json = "json",
|
|
2306
|
-
/** Set to jsonArray to extract individual elements of a JSON array as separate documents
|
|
2205
|
+
/** Set to jsonArray to extract individual elements of a JSON array as separate documents. */
|
|
2307
2206
|
JsonArray = "jsonArray",
|
|
2308
|
-
/** Set to jsonLines to extract individual JSON entities, separated by a new line, as separate documents
|
|
2207
|
+
/** Set to jsonLines to extract individual JSON entities, separated by a new line, as separate documents. */
|
|
2309
2208
|
JsonLines = "jsonLines"
|
|
2310
2209
|
}
|
|
2311
2210
|
|
|
@@ -2319,7 +2218,7 @@ export declare enum KnownBlobIndexerPDFTextRotationAlgorithm {
|
|
|
2319
2218
|
|
|
2320
2219
|
/** Known values of {@link CharFilterName} that the service accepts. */
|
|
2321
2220
|
export declare enum KnownCharFilterName {
|
|
2322
|
-
/** A character filter that attempts to strip out HTML constructs. See https
|
|
2221
|
+
/** A character filter that attempts to strip out HTML constructs. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/charfilter\/HTMLStripCharFilter.html */
|
|
2323
2222
|
HtmlStrip = "html_strip"
|
|
2324
2223
|
}
|
|
2325
2224
|
|
|
@@ -2541,6 +2440,14 @@ export declare enum KnownImageDetail {
|
|
|
2541
2440
|
Landmarks = "landmarks"
|
|
2542
2441
|
}
|
|
2543
2442
|
|
|
2443
|
+
/** Known values of {@link IndexerExecutionEnvironment} that the service accepts. */
|
|
2444
|
+
export declare enum KnownIndexerExecutionEnvironment {
|
|
2445
|
+
/** Indicates that the search service can determine where the indexer should execute. This is the default environment when nothing is specified and is the recommended value. */
|
|
2446
|
+
Standard = "standard",
|
|
2447
|
+
/** Indicates that the indexer should run with the environment provisioned specifically for the search service. This should only be specified as the execution environment if the indexer needs to access resources securely over shared private link resources. */
|
|
2448
|
+
Private = "private"
|
|
2449
|
+
}
|
|
2450
|
+
|
|
2544
2451
|
/** Known values of {@link IndexerExecutionStatusDetail} that the service accepts. */
|
|
2545
2452
|
export declare enum KnownIndexerExecutionStatusDetail {
|
|
2546
2453
|
/** Indicates that the reset that occurred was for a call to ResetDocs. */
|
|
@@ -2555,6 +2462,14 @@ export declare enum KnownIndexingMode {
|
|
|
2555
2462
|
IndexingResetDocs = "indexingResetDocs"
|
|
2556
2463
|
}
|
|
2557
2464
|
|
|
2465
|
+
/** Known values of {@link IndexProjectionMode} that the service accepts. */
|
|
2466
|
+
export declare enum KnownIndexProjectionMode {
|
|
2467
|
+
/** The source document will be skipped from writing into the indexer's target index. */
|
|
2468
|
+
SkipIndexingParentDocuments = "skipIndexingParentDocuments",
|
|
2469
|
+
/** The source document will be written into the indexer's target index. This is the default pattern. */
|
|
2470
|
+
IncludeIndexingParentDocuments = "includeIndexingParentDocuments"
|
|
2471
|
+
}
|
|
2472
|
+
|
|
2558
2473
|
/** Known values of {@link KeyPhraseExtractionSkillLanguage} that the service accepts. */
|
|
2559
2474
|
export declare enum KnownKeyPhraseExtractionSkillLanguage {
|
|
2560
2475
|
/** Danish */
|
|
@@ -2767,33 +2682,65 @@ export declare enum KnownLexicalAnalyzerName {
|
|
|
2767
2682
|
ViMicrosoft = "vi.microsoft",
|
|
2768
2683
|
/** Standard Lucene analyzer. */
|
|
2769
2684
|
StandardLucene = "standard.lucene",
|
|
2770
|
-
/** Standard ASCII Folding Lucene analyzer. See https
|
|
2685
|
+
/** Standard ASCII Folding Lucene analyzer. See https:\//docs.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#Analyzers */
|
|
2771
2686
|
StandardAsciiFoldingLucene = "standardasciifolding.lucene",
|
|
2772
|
-
/** Treats the entire content of a field as a single token. This is useful for data like zip codes, ids, and some product names. See http
|
|
2687
|
+
/** Treats the entire content of a field as a single token. This is useful for data like zip codes, ids, and some product names. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/KeywordAnalyzer.html */
|
|
2773
2688
|
Keyword = "keyword",
|
|
2774
|
-
/** Flexibly separates text into terms via a regular expression pattern. See http
|
|
2689
|
+
/** Flexibly separates text into terms via a regular expression pattern. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/PatternAnalyzer.html */
|
|
2775
2690
|
Pattern = "pattern",
|
|
2776
|
-
/** Divides text at non-letters and converts them to lower case. See http
|
|
2691
|
+
/** Divides text at non-letters and converts them to lower case. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/SimpleAnalyzer.html */
|
|
2777
2692
|
Simple = "simple",
|
|
2778
|
-
/** Divides text at non-letters; Applies the lowercase and stopword token filters. See http
|
|
2693
|
+
/** Divides text at non-letters; Applies the lowercase and stopword token filters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopAnalyzer.html */
|
|
2779
2694
|
Stop = "stop",
|
|
2780
|
-
/** An analyzer that uses the whitespace tokenizer. See http
|
|
2695
|
+
/** An analyzer that uses the whitespace tokenizer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/WhitespaceAnalyzer.html */
|
|
2781
2696
|
Whitespace = "whitespace"
|
|
2782
2697
|
}
|
|
2783
2698
|
|
|
2784
2699
|
/** Known values of {@link LexicalNormalizerName} that the service accepts. */
|
|
2785
|
-
|
|
2786
|
-
/** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http
|
|
2700
|
+
declare enum KnownLexicalNormalizerName {
|
|
2701
|
+
/** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html */
|
|
2787
2702
|
AsciiFolding = "asciifolding",
|
|
2788
|
-
/** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http
|
|
2703
|
+
/** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html */
|
|
2789
2704
|
Elision = "elision",
|
|
2790
|
-
/** Normalizes token text to lowercase. See https
|
|
2705
|
+
/** Normalizes token text to lowercase. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html */
|
|
2791
2706
|
Lowercase = "lowercase",
|
|
2792
|
-
/** Standard normalizer, which consists of lowercase and asciifolding. See http
|
|
2707
|
+
/** Standard normalizer, which consists of lowercase and asciifolding. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html */
|
|
2793
2708
|
Standard = "standard",
|
|
2794
|
-
/** Normalizes token text to uppercase. See https
|
|
2709
|
+
/** Normalizes token text to uppercase. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html */
|
|
2795
2710
|
Uppercase = "uppercase"
|
|
2796
2711
|
}
|
|
2712
|
+
export { KnownLexicalNormalizerName }
|
|
2713
|
+
export { KnownLexicalNormalizerName as KnownNormalizerNames }
|
|
2714
|
+
|
|
2715
|
+
/** Known values of {@link LexicalTokenizerName} that the service accepts. */
|
|
2716
|
+
export declare enum KnownLexicalTokenizerName {
|
|
2717
|
+
/** Grammar-based tokenizer that is suitable for processing most European-language documents. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicTokenizer.html */
|
|
2718
|
+
Classic = "classic",
|
|
2719
|
+
/** Tokenizes the input from an edge into n-grams of the given size(s). See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenizer.html */
|
|
2720
|
+
EdgeNGram = "edgeNGram",
|
|
2721
|
+
/** Emits the entire input as a single token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/KeywordTokenizer.html */
|
|
2722
|
+
Keyword = "keyword_v2",
|
|
2723
|
+
/** Divides text at non-letters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LetterTokenizer.html */
|
|
2724
|
+
Letter = "letter",
|
|
2725
|
+
/** Divides text at non-letters and converts them to lower case. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseTokenizer.html */
|
|
2726
|
+
Lowercase = "lowercase",
|
|
2727
|
+
/** Divides text using language-specific rules. */
|
|
2728
|
+
MicrosoftLanguageTokenizer = "microsoft_language_tokenizer",
|
|
2729
|
+
/** Divides text using language-specific rules and reduces words to their base forms. */
|
|
2730
|
+
MicrosoftLanguageStemmingTokenizer = "microsoft_language_stemming_tokenizer",
|
|
2731
|
+
/** Tokenizes the input into n-grams of the given size(s). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenizer.html */
|
|
2732
|
+
NGram = "nGram",
|
|
2733
|
+
/** Tokenizer for path-like hierarchies. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/path\/PathHierarchyTokenizer.html */
|
|
2734
|
+
PathHierarchy = "path_hierarchy_v2",
|
|
2735
|
+
/** Tokenizer that uses regex pattern matching to construct distinct tokens. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/pattern\/PatternTokenizer.html */
|
|
2736
|
+
Pattern = "pattern",
|
|
2737
|
+
/** Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/StandardTokenizer.html */
|
|
2738
|
+
Standard = "standard_v2",
|
|
2739
|
+
/** Tokenizes urls and emails as one token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/UAX29URLEmailTokenizer.html */
|
|
2740
|
+
UaxUrlEmail = "uax_url_email",
|
|
2741
|
+
/** Divides text at whitespace. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/WhitespaceTokenizer.html */
|
|
2742
|
+
Whitespace = "whitespace"
|
|
2743
|
+
}
|
|
2797
2744
|
|
|
2798
2745
|
/** Known values of {@link LineEnding} that the service accepts. */
|
|
2799
2746
|
export declare enum KnownLineEnding {
|
|
@@ -3159,20 +3106,12 @@ export declare enum KnownPIIDetectionSkillMaskingMode {
|
|
|
3159
3106
|
Replace = "replace"
|
|
3160
3107
|
}
|
|
3161
3108
|
|
|
3162
|
-
/** Known values of {@link
|
|
3163
|
-
export declare enum
|
|
3164
|
-
/**
|
|
3165
|
-
|
|
3166
|
-
/**
|
|
3167
|
-
|
|
3168
|
-
}
|
|
3169
|
-
|
|
3170
|
-
/** Known values of {@link QueryCaptionType} that the service accepts. */
|
|
3171
|
-
export declare enum KnownQueryCaptionType {
|
|
3172
|
-
/** Do not return captions for the query. */
|
|
3173
|
-
None = "none",
|
|
3174
|
-
/** Extracts captions from the matching documents that contain passages relevant to the search query. */
|
|
3175
|
-
Extractive = "extractive"
|
|
3109
|
+
/** Known values of {@link QueryDebugMode} that the service accepts. */
|
|
3110
|
+
export declare enum KnownQueryDebugMode {
|
|
3111
|
+
/** No query debugging information will be returned. */
|
|
3112
|
+
Disabled = "disabled",
|
|
3113
|
+
/** Allows the user to further explore their reranked results. */
|
|
3114
|
+
Semantic = "semantic"
|
|
3176
3115
|
}
|
|
3177
3116
|
|
|
3178
3117
|
/** Known values of {@link QueryLanguage} that the service accepts. */
|
|
@@ -3279,7 +3218,7 @@ export declare enum KnownQueryLanguage {
|
|
|
3279
3218
|
LvLv = "lv-lv",
|
|
3280
3219
|
/** Query language value for Estonian (Estonia). */
|
|
3281
3220
|
EtEe = "et-ee",
|
|
3282
|
-
/** Query language value for Catalan
|
|
3221
|
+
/** Query language value for Catalan. */
|
|
3283
3222
|
CaEs = "ca-es",
|
|
3284
3223
|
/** Query language value for Finnish (Finland). */
|
|
3285
3224
|
FiFi = "fi-fi",
|
|
@@ -3297,9 +3236,9 @@ export declare enum KnownQueryLanguage {
|
|
|
3297
3236
|
HyAm = "hy-am",
|
|
3298
3237
|
/** Query language value for Bengali (India). */
|
|
3299
3238
|
BnIn = "bn-in",
|
|
3300
|
-
/** Query language value for Basque
|
|
3239
|
+
/** Query language value for Basque. */
|
|
3301
3240
|
EuEs = "eu-es",
|
|
3302
|
-
/** Query language value for Galician
|
|
3241
|
+
/** Query language value for Galician. */
|
|
3303
3242
|
GlEs = "gl-es",
|
|
3304
3243
|
/** Query language value for Gujarati (India). */
|
|
3305
3244
|
GuIn = "gu-in",
|
|
@@ -3385,6 +3324,42 @@ export declare enum KnownSearchIndexerDataSourceType {
|
|
|
3385
3324
|
AdlsGen2 = "adlsgen2"
|
|
3386
3325
|
}
|
|
3387
3326
|
|
|
3327
|
+
/** Known values of {@link SemanticErrorMode} that the service accepts. */
|
|
3328
|
+
export declare enum KnownSemanticErrorMode {
|
|
3329
|
+
/** If the semantic processing fails, partial results still return. The definition of partial results depends on what semantic step failed and what was the reason for failure. */
|
|
3330
|
+
Partial = "partial",
|
|
3331
|
+
/** If there is an exception during the semantic processing step, the query will fail and return the appropriate HTTP code depending on the error. */
|
|
3332
|
+
Fail = "fail"
|
|
3333
|
+
}
|
|
3334
|
+
|
|
3335
|
+
/** Known values of {@link SemanticErrorReason} that the service accepts. */
|
|
3336
|
+
export declare enum KnownSemanticErrorReason {
|
|
3337
|
+
/** If 'semanticMaxWaitInMilliseconds' was set and the semantic processing duration exceeded that value. Only the base results were returned. */
|
|
3338
|
+
MaxWaitExceeded = "maxWaitExceeded",
|
|
3339
|
+
/** The request was throttled. Only the base results were returned. */
|
|
3340
|
+
CapacityOverloaded = "capacityOverloaded",
|
|
3341
|
+
/** At least one step of the semantic process failed. */
|
|
3342
|
+
Transient = "transient"
|
|
3343
|
+
}
|
|
3344
|
+
|
|
3345
|
+
/** Known values of {@link SemanticFieldState} that the service accepts. */
|
|
3346
|
+
export declare enum KnownSemanticFieldState {
|
|
3347
|
+
/** The field was fully used for semantic enrichment. */
|
|
3348
|
+
Used = "used",
|
|
3349
|
+
/** The field was not used for semantic enrichment. */
|
|
3350
|
+
Unused = "unused",
|
|
3351
|
+
/** The field was partially used for semantic enrichment. */
|
|
3352
|
+
Partial = "partial"
|
|
3353
|
+
}
|
|
3354
|
+
|
|
3355
|
+
/** Known values of {@link SemanticSearchResultsType} that the service accepts. */
|
|
3356
|
+
export declare enum KnownSemanticSearchResultsType {
|
|
3357
|
+
/** Results without any semantic enrichment or reranking. */
|
|
3358
|
+
BaseResults = "baseResults",
|
|
3359
|
+
/** Results have been reranked with the reranker model and will include semantic captions. They will not include any answers, answers highlights or caption highlights. */
|
|
3360
|
+
RerankedResults = "rerankedResults"
|
|
3361
|
+
}
|
|
3362
|
+
|
|
3388
3363
|
/** Known values of {@link SentimentSkillLanguage} that the service accepts. */
|
|
3389
3364
|
export declare enum KnownSentimentSkillLanguage {
|
|
3390
3365
|
/** Danish */
|
|
@@ -3429,6 +3404,12 @@ export declare enum KnownSpeller {
|
|
|
3429
3404
|
|
|
3430
3405
|
/** Known values of {@link SplitSkillLanguage} that the service accepts. */
|
|
3431
3406
|
export declare enum KnownSplitSkillLanguage {
|
|
3407
|
+
/** Amharic */
|
|
3408
|
+
Am = "am",
|
|
3409
|
+
/** Bosnian */
|
|
3410
|
+
Bs = "bs",
|
|
3411
|
+
/** Czech */
|
|
3412
|
+
Cs = "cs",
|
|
3432
3413
|
/** Danish */
|
|
3433
3414
|
Da = "da",
|
|
3434
3415
|
/** German */
|
|
@@ -3437,16 +3418,58 @@ export declare enum KnownSplitSkillLanguage {
|
|
|
3437
3418
|
En = "en",
|
|
3438
3419
|
/** Spanish */
|
|
3439
3420
|
Es = "es",
|
|
3421
|
+
/** Estonian */
|
|
3422
|
+
Et = "et",
|
|
3440
3423
|
/** Finnish */
|
|
3441
3424
|
Fi = "fi",
|
|
3442
3425
|
/** French */
|
|
3443
3426
|
Fr = "fr",
|
|
3427
|
+
/** Hebrew */
|
|
3428
|
+
He = "he",
|
|
3429
|
+
/** Hindi */
|
|
3430
|
+
Hi = "hi",
|
|
3431
|
+
/** Croatian */
|
|
3432
|
+
Hr = "hr",
|
|
3433
|
+
/** Hungarian */
|
|
3434
|
+
Hu = "hu",
|
|
3435
|
+
/** Indonesian */
|
|
3436
|
+
Id = "id",
|
|
3437
|
+
/** Icelandic */
|
|
3438
|
+
Is = "is",
|
|
3444
3439
|
/** Italian */
|
|
3445
3440
|
It = "it",
|
|
3441
|
+
/** Japanese */
|
|
3442
|
+
Ja = "ja",
|
|
3446
3443
|
/** Korean */
|
|
3447
3444
|
Ko = "ko",
|
|
3448
|
-
/**
|
|
3449
|
-
|
|
3445
|
+
/** Latvian */
|
|
3446
|
+
Lv = "lv",
|
|
3447
|
+
/** Norwegian */
|
|
3448
|
+
Nb = "nb",
|
|
3449
|
+
/** Dutch */
|
|
3450
|
+
Nl = "nl",
|
|
3451
|
+
/** Polish */
|
|
3452
|
+
Pl = "pl",
|
|
3453
|
+
/** Portuguese (Portugal) */
|
|
3454
|
+
Pt = "pt",
|
|
3455
|
+
/** Portuguese (Brazil) */
|
|
3456
|
+
PtBr = "pt-br",
|
|
3457
|
+
/** Russian */
|
|
3458
|
+
Ru = "ru",
|
|
3459
|
+
/** Slovak */
|
|
3460
|
+
Sk = "sk",
|
|
3461
|
+
/** Slovenian */
|
|
3462
|
+
Sl = "sl",
|
|
3463
|
+
/** Serbian */
|
|
3464
|
+
Sr = "sr",
|
|
3465
|
+
/** Swedish */
|
|
3466
|
+
Sv = "sv",
|
|
3467
|
+
/** Turkish */
|
|
3468
|
+
Tr = "tr",
|
|
3469
|
+
/** Urdu */
|
|
3470
|
+
Ur = "ur",
|
|
3471
|
+
/** Chinese (Simplified) */
|
|
3472
|
+
Zh = "zh"
|
|
3450
3473
|
}
|
|
3451
3474
|
|
|
3452
3475
|
/** Known values of {@link TextSplitMode} that the service accepts. */
|
|
@@ -3607,71 +3630,71 @@ export declare enum KnownTextTranslationSkillLanguage {
|
|
|
3607
3630
|
|
|
3608
3631
|
/** Known values of {@link TokenFilterName} that the service accepts. */
|
|
3609
3632
|
export declare enum KnownTokenFilterName {
|
|
3610
|
-
/** A token filter that applies the Arabic normalizer to normalize the orthography. See http
|
|
3633
|
+
/** A token filter that applies the Arabic normalizer to normalize the orthography. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ar\/ArabicNormalizationFilter.html */
|
|
3611
3634
|
ArabicNormalization = "arabic_normalization",
|
|
3612
|
-
/** Strips all characters after an apostrophe (including the apostrophe itself). See http
|
|
3635
|
+
/** Strips all characters after an apostrophe (including the apostrophe itself). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/tr\/ApostropheFilter.html */
|
|
3613
3636
|
Apostrophe = "apostrophe",
|
|
3614
|
-
/** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http
|
|
3637
|
+
/** Converts alphabetic, numeric, and symbolic Unicode characters which are not in the first 127 ASCII characters (the "Basic Latin" Unicode block) into their ASCII equivalents, if such equivalents exist. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ASCIIFoldingFilter.html */
|
|
3615
3638
|
AsciiFolding = "asciifolding",
|
|
3616
|
-
/** Forms bigrams of CJK terms that are generated from the standard tokenizer. See http
|
|
3639
|
+
/** Forms bigrams of CJK terms that are generated from the standard tokenizer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKBigramFilter.html */
|
|
3617
3640
|
CjkBigram = "cjk_bigram",
|
|
3618
|
-
/** Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http
|
|
3641
|
+
/** Normalizes CJK width differences. Folds fullwidth ASCII variants into the equivalent basic Latin, and half-width Katakana variants into the equivalent Kana. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/cjk\/CJKWidthFilter.html */
|
|
3619
3642
|
CjkWidth = "cjk_width",
|
|
3620
|
-
/** Removes English possessives, and dots from acronyms. See http
|
|
3643
|
+
/** Removes English possessives, and dots from acronyms. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicFilter.html */
|
|
3621
3644
|
Classic = "classic",
|
|
3622
|
-
/** Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http
|
|
3645
|
+
/** Construct bigrams for frequently occurring terms while indexing. Single terms are still indexed too, with bigrams overlaid. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/commongrams\/CommonGramsFilter.html */
|
|
3623
3646
|
CommonGram = "common_grams",
|
|
3624
|
-
/** Generates n-grams of the given size(s) starting from the front or the back of an input token. See http
|
|
3647
|
+
/** Generates n-grams of the given size(s) starting from the front or the back of an input token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenFilter.html */
|
|
3625
3648
|
EdgeNGram = "edgeNGram_v2",
|
|
3626
|
-
/** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http
|
|
3649
|
+
/** Removes elisions. For example, "l'avion" (the plane) will be converted to "avion" (plane). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/util\/ElisionFilter.html */
|
|
3627
3650
|
Elision = "elision",
|
|
3628
|
-
/** Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http
|
|
3651
|
+
/** Normalizes German characters according to the heuristics of the German2 snowball algorithm. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/de\/GermanNormalizationFilter.html */
|
|
3629
3652
|
GermanNormalization = "german_normalization",
|
|
3630
|
-
/** Normalizes text in Hindi to remove some differences in spelling variations. See http
|
|
3653
|
+
/** Normalizes text in Hindi to remove some differences in spelling variations. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/hi\/HindiNormalizationFilter.html */
|
|
3631
3654
|
HindiNormalization = "hindi_normalization",
|
|
3632
|
-
/** Normalizes the Unicode representation of text in Indian languages. See http
|
|
3655
|
+
/** Normalizes the Unicode representation of text in Indian languages. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/in\/IndicNormalizationFilter.html */
|
|
3633
3656
|
IndicNormalization = "indic_normalization",
|
|
3634
|
-
/** Emits each incoming token twice, once as keyword and once as non-keyword. See http
|
|
3657
|
+
/** Emits each incoming token twice, once as keyword and once as non-keyword. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/KeywordRepeatFilter.html */
|
|
3635
3658
|
KeywordRepeat = "keyword_repeat",
|
|
3636
|
-
/** A high-performance kstem filter for English. See http
|
|
3659
|
+
/** A high-performance kstem filter for English. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/en\/KStemFilter.html */
|
|
3637
3660
|
KStem = "kstem",
|
|
3638
|
-
/** Removes words that are too long or too short. See http
|
|
3661
|
+
/** Removes words that are too long or too short. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LengthFilter.html */
|
|
3639
3662
|
Length = "length",
|
|
3640
|
-
/** Limits the number of tokens while indexing. See http
|
|
3663
|
+
/** Limits the number of tokens while indexing. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/LimitTokenCountFilter.html */
|
|
3641
3664
|
Limit = "limit",
|
|
3642
|
-
/** Normalizes token text to lower case. See https
|
|
3665
|
+
/** Normalizes token text to lower case. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseFilter.html */
|
|
3643
3666
|
Lowercase = "lowercase",
|
|
3644
|
-
/** Generates n-grams of the given size(s). See http
|
|
3667
|
+
/** Generates n-grams of the given size(s). See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenFilter.html */
|
|
3645
3668
|
NGram = "nGram_v2",
|
|
3646
|
-
/** Applies normalization for Persian. See http
|
|
3669
|
+
/** Applies normalization for Persian. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/fa\/PersianNormalizationFilter.html */
|
|
3647
3670
|
PersianNormalization = "persian_normalization",
|
|
3648
|
-
/** Create tokens for phonetic matches. See https
|
|
3671
|
+
/** Create tokens for phonetic matches. See https:\//lucene.apache.org\/core\/4_10_3\/analyzers-phonetic\/org\/apache\/lucene\/analysis\/phonetic\/package-tree.html */
|
|
3649
3672
|
Phonetic = "phonetic",
|
|
3650
|
-
/** Uses the Porter stemming algorithm to transform the token stream. See http
|
|
3673
|
+
/** Uses the Porter stemming algorithm to transform the token stream. See http:\//tartarus.org\/~martin\/PorterStemmer */
|
|
3651
3674
|
PorterStem = "porter_stem",
|
|
3652
|
-
/** Reverses the token string. See http
|
|
3675
|
+
/** Reverses the token string. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/reverse\/ReverseStringFilter.html */
|
|
3653
3676
|
Reverse = "reverse",
|
|
3654
|
-
/** Normalizes use of the interchangeable Scandinavian characters. See http
|
|
3677
|
+
/** Normalizes use of the interchangeable Scandinavian characters. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianNormalizationFilter.html */
|
|
3655
3678
|
ScandinavianNormalization = "scandinavian_normalization",
|
|
3656
|
-
/** Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http
|
|
3679
|
+
/** Folds Scandinavian characters åÅäæÄÆ->a and öÖøØ->o. It also discriminates against use of double vowels aa, ae, ao, oe and oo, leaving just the first one. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/ScandinavianFoldingFilter.html */
|
|
3657
3680
|
ScandinavianFoldingNormalization = "scandinavian_folding",
|
|
3658
|
-
/** Creates combinations of tokens as a single token. See http
|
|
3681
|
+
/** Creates combinations of tokens as a single token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/shingle\/ShingleFilter.html */
|
|
3659
3682
|
Shingle = "shingle",
|
|
3660
|
-
/** A filter that stems words using a Snowball-generated stemmer. See http
|
|
3683
|
+
/** A filter that stems words using a Snowball-generated stemmer. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/snowball\/SnowballFilter.html */
|
|
3661
3684
|
Snowball = "snowball",
|
|
3662
|
-
/** Normalizes the Unicode representation of Sorani text. See http
|
|
3685
|
+
/** Normalizes the Unicode representation of Sorani text. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ckb\/SoraniNormalizationFilter.html */
|
|
3663
3686
|
SoraniNormalization = "sorani_normalization",
|
|
3664
|
-
/** Language specific stemming filter. See https
|
|
3687
|
+
/** Language specific stemming filter. See https:\//docs.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#TokenFilters */
|
|
3665
3688
|
Stemmer = "stemmer",
|
|
3666
|
-
/** Removes stop words from a token stream. See http
|
|
3689
|
+
/** Removes stop words from a token stream. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopFilter.html */
|
|
3667
3690
|
Stopwords = "stopwords",
|
|
3668
|
-
/** Trims leading and trailing whitespace from tokens. See http
|
|
3691
|
+
/** Trims leading and trailing whitespace from tokens. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TrimFilter.html */
|
|
3669
3692
|
Trim = "trim",
|
|
3670
|
-
/** Truncates the terms to a specific length. See http
|
|
3693
|
+
/** Truncates the terms to a specific length. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/TruncateTokenFilter.html */
|
|
3671
3694
|
Truncate = "truncate",
|
|
3672
|
-
/** Filters out tokens with same text as the previous token. See http
|
|
3695
|
+
/** Filters out tokens with same text as the previous token. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/miscellaneous\/RemoveDuplicatesTokenFilter.html */
|
|
3673
3696
|
Unique = "unique",
|
|
3674
|
-
/** Normalizes token text to upper case. See https
|
|
3697
|
+
/** Normalizes token text to upper case. See https:\//lucene.apache.org\/core\/6_6_1\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/UpperCaseFilter.html */
|
|
3675
3698
|
Uppercase = "uppercase",
|
|
3676
3699
|
/** Splits words into subwords and performs optional transformations on subword groups. */
|
|
3677
3700
|
WordDelimiter = "word_delimiter"
|
|
@@ -3931,6 +3954,34 @@ export declare enum KnownTokenizerNames {
|
|
|
3931
3954
|
Whitespace = "whitespace"
|
|
3932
3955
|
}
|
|
3933
3956
|
|
|
3957
|
+
/** Known values of {@link VectorQueryKind} that the service accepts. */
|
|
3958
|
+
export declare enum KnownVectorQueryKind {
|
|
3959
|
+
/** Vector query where a raw vector value is provided. */
|
|
3960
|
+
Vector = "vector",
|
|
3961
|
+
/** Vector query where a text value that needs to be vectorized is provided. */
|
|
3962
|
+
$DO_NOT_NORMALIZE$_text = "text"
|
|
3963
|
+
}
|
|
3964
|
+
|
|
3965
|
+
/** Known values of {@link VectorSearchCompressionKind} that the service accepts. */
|
|
3966
|
+
export declare enum KnownVectorSearchCompressionKind {
|
|
3967
|
+
/** Scalar Quantization, a type of compression method. In scalar quantization, the original vectors values are compressed to a narrower type by discretizing and representing each component of a vector using a reduced set of quantized values, thereby reducing the overall data size. */
|
|
3968
|
+
ScalarQuantization = "scalarQuantization"
|
|
3969
|
+
}
|
|
3970
|
+
|
|
3971
|
+
/** Known values of {@link VectorSearchCompressionTargetDataType} that the service accepts. */
|
|
3972
|
+
export declare enum KnownVectorSearchCompressionTargetDataType {
|
|
3973
|
+
/** Int8 */
|
|
3974
|
+
Int8 = "int8"
|
|
3975
|
+
}
|
|
3976
|
+
|
|
3977
|
+
/** Known values of {@link VectorSearchVectorizerKind} that the service accepts. */
|
|
3978
|
+
export declare enum KnownVectorSearchVectorizerKind {
|
|
3979
|
+
/** Generate embeddings using an Azure OpenAI resource at query time. */
|
|
3980
|
+
AzureOpenAI = "azureOpenAI",
|
|
3981
|
+
/** Generate embeddings using a custom web endpoint at query time. */
|
|
3982
|
+
CustomWebApi = "customWebApi"
|
|
3983
|
+
}
|
|
3984
|
+
|
|
3934
3985
|
/** Known values of {@link VisualFeature} that the service accepts. */
|
|
3935
3986
|
export declare enum KnownVisualFeature {
|
|
3936
3987
|
/** Visual features recognized as adult persons. */
|
|
@@ -3950,24 +4001,24 @@ export declare enum KnownVisualFeature {
|
|
|
3950
4001
|
}
|
|
3951
4002
|
|
|
3952
4003
|
/** A skill that detects the language of input text and reports a single language code for every document submitted on the request. The language code is paired with a score indicating the confidence of the analysis. */
|
|
3953
|
-
export declare
|
|
4004
|
+
export declare interface LanguageDetectionSkill extends BaseSearchIndexerSkill {
|
|
3954
4005
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
3955
4006
|
odatatype: "#Microsoft.Skills.Text.LanguageDetectionSkill";
|
|
3956
4007
|
/** A country code to use as a hint to the language detection model if it cannot disambiguate the language. */
|
|
3957
4008
|
defaultCountryHint?: string;
|
|
3958
4009
|
/** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */
|
|
3959
4010
|
modelVersion?: string;
|
|
3960
|
-
}
|
|
4011
|
+
}
|
|
3961
4012
|
|
|
3962
4013
|
/** Removes words that are too long or too short. This token filter is implemented using Apache Lucene. */
|
|
3963
|
-
export declare
|
|
4014
|
+
export declare interface LengthTokenFilter extends BaseTokenFilter {
|
|
3964
4015
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
3965
4016
|
odatatype: "#Microsoft.Azure.Search.LengthTokenFilter";
|
|
3966
4017
|
/** The minimum length in characters. Default is 0. Maximum is 300. Must be less than the value of max. */
|
|
3967
4018
|
minLength?: number;
|
|
3968
4019
|
/** The maximum length in characters. Default and maximum is 300. */
|
|
3969
4020
|
maxLength?: number;
|
|
3970
|
-
}
|
|
4021
|
+
}
|
|
3971
4022
|
|
|
3972
4023
|
/**
|
|
3973
4024
|
* Contains the possible cases for Analyzer.
|
|
@@ -4098,15 +4149,36 @@ export declare type LexicalNormalizerName = string;
|
|
|
4098
4149
|
*/
|
|
4099
4150
|
export declare type LexicalTokenizer = ClassicTokenizer | EdgeNGramTokenizer | KeywordTokenizer | MicrosoftLanguageTokenizer | MicrosoftLanguageStemmingTokenizer | NGramTokenizer | PathHierarchyTokenizer | PatternTokenizer | LuceneStandardTokenizer | UaxUrlEmailTokenizer;
|
|
4100
4151
|
|
|
4152
|
+
/**
|
|
4153
|
+
* Defines values for LexicalTokenizerName. \
|
|
4154
|
+
* {@link KnownLexicalTokenizerName} can be used interchangeably with LexicalTokenizerName,
|
|
4155
|
+
* this enum contains the known values that the service supports.
|
|
4156
|
+
* ### Known values supported by the service
|
|
4157
|
+
* **classic**: Grammar-based tokenizer that is suitable for processing most European-language documents. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/ClassicTokenizer.html \
|
|
4158
|
+
* **edgeNGram**: Tokenizes the input from an edge into n-grams of the given size(s). See https:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/EdgeNGramTokenizer.html \
|
|
4159
|
+
* **keyword_v2**: Emits the entire input as a single token. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/KeywordTokenizer.html \
|
|
4160
|
+
* **letter**: Divides text at non-letters. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LetterTokenizer.html \
|
|
4161
|
+
* **lowercase**: Divides text at non-letters and converts them to lower case. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/LowerCaseTokenizer.html \
|
|
4162
|
+
* **microsoft_language_tokenizer**: Divides text using language-specific rules. \
|
|
4163
|
+
* **microsoft_language_stemming_tokenizer**: Divides text using language-specific rules and reduces words to their base forms. \
|
|
4164
|
+
* **nGram**: Tokenizes the input into n-grams of the given size(s). See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ngram\/NGramTokenizer.html \
|
|
4165
|
+
* **path_hierarchy_v2**: Tokenizer for path-like hierarchies. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/path\/PathHierarchyTokenizer.html \
|
|
4166
|
+
* **pattern**: Tokenizer that uses regex pattern matching to construct distinct tokens. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/pattern\/PatternTokenizer.html \
|
|
4167
|
+
* **standard_v2**: Standard Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/StandardTokenizer.html \
|
|
4168
|
+
* **uax_url_email**: Tokenizes urls and emails as one token. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/standard\/UAX29URLEmailTokenizer.html \
|
|
4169
|
+
* **whitespace**: Divides text at whitespace. See http:\/\/lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/WhitespaceTokenizer.html
|
|
4170
|
+
*/
|
|
4171
|
+
export declare type LexicalTokenizerName = string;
|
|
4172
|
+
|
|
4101
4173
|
/** Limits the number of tokens while indexing. This token filter is implemented using Apache Lucene. */
|
|
4102
|
-
export declare
|
|
4174
|
+
export declare interface LimitTokenFilter extends BaseTokenFilter {
|
|
4103
4175
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
4104
4176
|
odatatype: "#Microsoft.Azure.Search.LimitTokenFilter";
|
|
4105
4177
|
/** The maximum number of tokens to produce. Default is 1. */
|
|
4106
4178
|
maxTokenCount?: number;
|
|
4107
4179
|
/** A value indicating whether all tokens from the input must be consumed even if maxTokenCount is reached. Default is false. */
|
|
4108
4180
|
consumeAllTokens?: boolean;
|
|
4109
|
-
}
|
|
4181
|
+
}
|
|
4110
4182
|
|
|
4111
4183
|
/**
|
|
4112
4184
|
* Defines values for LineEnding. \
|
|
@@ -4162,14 +4234,14 @@ export declare type ListSkillsetsOptions = OperationOptions;
|
|
|
4162
4234
|
export declare type ListSynonymMapsOptions = OperationOptions;
|
|
4163
4235
|
|
|
4164
4236
|
/** Standard Apache Lucene analyzer; Composed of the standard tokenizer, lowercase filter and stop filter. */
|
|
4165
|
-
export declare
|
|
4237
|
+
export declare interface LuceneStandardAnalyzer extends BaseLexicalAnalyzer {
|
|
4166
4238
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
4167
4239
|
odatatype: "#Microsoft.Azure.Search.StandardAnalyzer";
|
|
4168
4240
|
/** The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */
|
|
4169
4241
|
maxTokenLength?: number;
|
|
4170
4242
|
/** A list of stopwords. */
|
|
4171
4243
|
stopwords?: string[];
|
|
4172
|
-
}
|
|
4244
|
+
}
|
|
4173
4245
|
|
|
4174
4246
|
/**
|
|
4175
4247
|
* Breaks text following the Unicode Text Segmentation rules. This tokenizer is implemented using
|
|
@@ -4194,12 +4266,12 @@ export declare interface LuceneStandardTokenizer {
|
|
|
4194
4266
|
}
|
|
4195
4267
|
|
|
4196
4268
|
/** Defines a function that boosts scores based on the magnitude of a numeric field. */
|
|
4197
|
-
export declare
|
|
4269
|
+
export declare interface MagnitudeScoringFunction extends BaseScoringFunction {
|
|
4198
4270
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
4199
4271
|
type: "magnitude";
|
|
4200
4272
|
/** Parameter values for the magnitude scoring function. */
|
|
4201
4273
|
parameters: MagnitudeScoringParameters;
|
|
4202
|
-
}
|
|
4274
|
+
}
|
|
4203
4275
|
|
|
4204
4276
|
/** Provides parameter values to a magnitude scoring function. */
|
|
4205
4277
|
export declare interface MagnitudeScoringParameters {
|
|
@@ -4212,12 +4284,12 @@ export declare interface MagnitudeScoringParameters {
|
|
|
4212
4284
|
}
|
|
4213
4285
|
|
|
4214
4286
|
/** A character filter that applies mappings defined with the mappings option. Matching is greedy (longest pattern matching at a given point wins). Replacement is allowed to be the empty string. This character filter is implemented using Apache Lucene. */
|
|
4215
|
-
export declare
|
|
4287
|
+
export declare interface MappingCharFilter extends BaseCharFilter {
|
|
4216
4288
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
4217
4289
|
odatatype: "#Microsoft.Azure.Search.MappingCharFilter";
|
|
4218
4290
|
/** A list of mappings of the following format: "a=>b" (all occurrences of the character "a" will be replaced with character "b"). */
|
|
4219
4291
|
mappings: string[];
|
|
4220
|
-
}
|
|
4292
|
+
}
|
|
4221
4293
|
|
|
4222
4294
|
/**
|
|
4223
4295
|
* Options for the merge documents operation.
|
|
@@ -4230,17 +4302,17 @@ export declare type MergeDocumentsOptions = IndexDocumentsOptions;
|
|
|
4230
4302
|
export declare type MergeOrUploadDocumentsOptions = IndexDocumentsOptions;
|
|
4231
4303
|
|
|
4232
4304
|
/** A skill for merging two or more strings into a single unified string, with an optional user-defined delimiter separating each component part. */
|
|
4233
|
-
export declare
|
|
4305
|
+
export declare interface MergeSkill extends BaseSearchIndexerSkill {
|
|
4234
4306
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
4235
4307
|
odatatype: "#Microsoft.Skills.Text.MergeSkill";
|
|
4236
4308
|
/** The tag indicates the start of the merged text. By default, the tag is an empty space. */
|
|
4237
4309
|
insertPreTag?: string;
|
|
4238
4310
|
/** The tag indicates the end of the merged text. By default, the tag is an empty space. */
|
|
4239
4311
|
insertPostTag?: string;
|
|
4240
|
-
}
|
|
4312
|
+
}
|
|
4241
4313
|
|
|
4242
4314
|
/** Divides text using language-specific rules and reduces words to their base forms. */
|
|
4243
|
-
export declare
|
|
4315
|
+
export declare interface MicrosoftLanguageStemmingTokenizer extends BaseLexicalTokenizer {
|
|
4244
4316
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
4245
4317
|
odatatype: "#Microsoft.Azure.Search.MicrosoftLanguageStemmingTokenizer";
|
|
4246
4318
|
/** The maximum token length. Tokens longer than the maximum length are split. Maximum token length that can be used is 300 characters. Tokens longer than 300 characters are first split into tokens of length 300 and then each of those tokens is split based on the max token length set. Default is 255. */
|
|
@@ -4249,10 +4321,10 @@ export declare type MicrosoftLanguageStemmingTokenizer = BaseLexicalTokenizer &
|
|
|
4249
4321
|
isSearchTokenizer?: boolean;
|
|
4250
4322
|
/** The language to use. The default is English. */
|
|
4251
4323
|
language?: MicrosoftStemmingTokenizerLanguage;
|
|
4252
|
-
}
|
|
4324
|
+
}
|
|
4253
4325
|
|
|
4254
4326
|
/** Divides text using language-specific rules. */
|
|
4255
|
-
export declare
|
|
4327
|
+
export declare interface MicrosoftLanguageTokenizer extends BaseLexicalTokenizer {
|
|
4256
4328
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
4257
4329
|
odatatype: "#Microsoft.Azure.Search.MicrosoftLanguageTokenizer";
|
|
4258
4330
|
/** The maximum token length. Tokens longer than the maximum length are split. Maximum token length that can be used is 300 characters. Tokens longer than 300 characters are first split into tokens of length 300 and then each of those tokens is split based on the max token length set. Default is 255. */
|
|
@@ -4261,7 +4333,7 @@ export declare type MicrosoftLanguageTokenizer = BaseLexicalTokenizer & {
|
|
|
4261
4333
|
isSearchTokenizer?: boolean;
|
|
4262
4334
|
/** The language to use. The default is English. */
|
|
4263
4335
|
language?: MicrosoftTokenizerLanguage;
|
|
4264
|
-
}
|
|
4336
|
+
}
|
|
4265
4337
|
|
|
4266
4338
|
/** Defines values for MicrosoftStemmingTokenizerLanguage. */
|
|
4267
4339
|
export declare type MicrosoftStemmingTokenizerLanguage = "arabic" | "bangla" | "bulgarian" | "catalan" | "croatian" | "czech" | "danish" | "dutch" | "english" | "estonian" | "finnish" | "french" | "german" | "greek" | "gujarati" | "hebrew" | "hindi" | "hungarian" | "icelandic" | "indonesian" | "italian" | "kannada" | "latvian" | "lithuanian" | "malay" | "malayalam" | "marathi" | "norwegianBokmaal" | "polish" | "portuguese" | "portugueseBrazilian" | "punjabi" | "romanian" | "russian" | "serbianCyrillic" | "serbianLatin" | "slovak" | "slovenian" | "spanish" | "swedish" | "tamil" | "telugu" | "turkish" | "ukrainian" | "urdu";
|
|
@@ -4275,10 +4347,10 @@ export declare type MicrosoftTokenizerLanguage = "bangla" | "bulgarian" | "catal
|
|
|
4275
4347
|
export declare type NarrowedModel<TModel extends object, TFields extends SelectFields<TModel> = SelectFields<TModel>> = (<T>() => T extends TModel ? true : false) extends <T>() => T extends never ? true : false ? TModel : (<T>() => T extends TModel ? true : false) extends <T>() => T extends object ? true : false ? TModel : (<T>() => T extends TModel ? true : false) extends <T>() => T extends any ? true : false ? TModel : (<T>() => T extends TModel ? true : false) extends <T>() => T extends unknown ? true : false ? TModel : (<T>() => T extends TFields ? true : false) extends <T>() => T extends never ? true : false ? never : (<T>() => T extends TFields ? true : false) extends <T>() => T extends SelectFields<TModel> ? true : false ? TModel : SearchPick<TModel, TFields>;
|
|
4276
4348
|
|
|
4277
4349
|
/** Defines a data deletion detection policy utilizing Azure Blob Storage's native soft delete feature for deletion detection. */
|
|
4278
|
-
export declare
|
|
4350
|
+
export declare interface NativeBlobSoftDeleteDeletionDetectionPolicy extends BaseDataDeletionDetectionPolicy {
|
|
4279
4351
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
4280
4352
|
odatatype: "#Microsoft.Azure.Search.NativeBlobSoftDeleteDeletionDetectionPolicy";
|
|
4281
|
-
}
|
|
4353
|
+
}
|
|
4282
4354
|
|
|
4283
4355
|
/**
|
|
4284
4356
|
* Generates n-grams of the given size(s). This token filter is implemented using Apache Lucene.
|
|
@@ -4306,7 +4378,7 @@ export declare interface NGramTokenFilter {
|
|
|
4306
4378
|
}
|
|
4307
4379
|
|
|
4308
4380
|
/** Tokenizes the input into n-grams of the given size(s). This tokenizer is implemented using Apache Lucene. */
|
|
4309
|
-
export declare
|
|
4381
|
+
export declare interface NGramTokenizer extends BaseLexicalTokenizer {
|
|
4310
4382
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
4311
4383
|
odatatype: "#Microsoft.Azure.Search.NGramTokenizer";
|
|
4312
4384
|
/** The minimum n-gram length. Default is 1. Maximum is 300. Must be less than the value of maxGram. */
|
|
@@ -4315,197 +4387,19 @@ export declare type NGramTokenizer = BaseLexicalTokenizer & {
|
|
|
4315
4387
|
maxGram?: number;
|
|
4316
4388
|
/** Character classes to keep in the tokens. */
|
|
4317
4389
|
tokenChars?: TokenCharacterKind[];
|
|
4318
|
-
}
|
|
4390
|
+
}
|
|
4319
4391
|
|
|
4320
4392
|
/** A skill that extracts text from image files. */
|
|
4321
|
-
export declare
|
|
4393
|
+
export declare interface OcrSkill extends BaseSearchIndexerSkill {
|
|
4322
4394
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
4323
4395
|
odatatype: "#Microsoft.Skills.Vision.OcrSkill";
|
|
4324
4396
|
/** A value indicating which language code to use. Default is en. */
|
|
4325
4397
|
defaultLanguageCode?: OcrSkillLanguage;
|
|
4326
4398
|
/** A value indicating to turn orientation detection on or not. Default is false. */
|
|
4327
4399
|
shouldDetectOrientation?: boolean;
|
|
4328
|
-
|
|
4329
|
-
lineEnding?: LineEnding;
|
|
4330
|
-
};
|
|
4400
|
+
}
|
|
4331
4401
|
|
|
4332
|
-
|
|
4333
|
-
* Defines values for OcrSkillLanguage. \
|
|
4334
|
-
* {@link KnownOcrSkillLanguage} can be used interchangeably with OcrSkillLanguage,
|
|
4335
|
-
* this enum contains the known values that the service supports.
|
|
4336
|
-
* ### Known values supported by the service
|
|
4337
|
-
* **af**: Afrikaans \
|
|
4338
|
-
* **sq**: Albanian \
|
|
4339
|
-
* **anp**: Angika (Devanagiri) \
|
|
4340
|
-
* **ar**: Arabic \
|
|
4341
|
-
* **ast**: Asturian \
|
|
4342
|
-
* **awa**: Awadhi-Hindi (Devanagiri) \
|
|
4343
|
-
* **az**: Azerbaijani (Latin) \
|
|
4344
|
-
* **bfy**: Bagheli \
|
|
4345
|
-
* **eu**: Basque \
|
|
4346
|
-
* **be**: Belarusian (Cyrillic and Latin) \
|
|
4347
|
-
* **be-cyrl**: Belarusian (Cyrillic) \
|
|
4348
|
-
* **be-latn**: Belarusian (Latin) \
|
|
4349
|
-
* **bho**: Bhojpuri-Hindi (Devanagiri) \
|
|
4350
|
-
* **bi**: Bislama \
|
|
4351
|
-
* **brx**: Bodo (Devanagiri) \
|
|
4352
|
-
* **bs**: Bosnian Latin \
|
|
4353
|
-
* **bra**: Brajbha \
|
|
4354
|
-
* **br**: Breton \
|
|
4355
|
-
* **bg**: Bulgarian \
|
|
4356
|
-
* **bns**: Bundeli \
|
|
4357
|
-
* **bua**: Buryat (Cyrillic) \
|
|
4358
|
-
* **ca**: Catalan \
|
|
4359
|
-
* **ceb**: Cebuano \
|
|
4360
|
-
* **rab**: Chamling \
|
|
4361
|
-
* **ch**: Chamorro \
|
|
4362
|
-
* **hne**: Chhattisgarhi (Devanagiri) \
|
|
4363
|
-
* **zh-Hans**: Chinese Simplified \
|
|
4364
|
-
* **zh-Hant**: Chinese Traditional \
|
|
4365
|
-
* **kw**: Cornish \
|
|
4366
|
-
* **co**: Corsican \
|
|
4367
|
-
* **crh**: Crimean Tatar (Latin) \
|
|
4368
|
-
* **hr**: Croatian \
|
|
4369
|
-
* **cs**: Czech \
|
|
4370
|
-
* **da**: Danish \
|
|
4371
|
-
* **prs**: Dari \
|
|
4372
|
-
* **dhi**: Dhimal (Devanagiri) \
|
|
4373
|
-
* **doi**: Dogri (Devanagiri) \
|
|
4374
|
-
* **nl**: Dutch \
|
|
4375
|
-
* **en**: English \
|
|
4376
|
-
* **myv**: Erzya (Cyrillic) \
|
|
4377
|
-
* **et**: Estonian \
|
|
4378
|
-
* **fo**: Faroese \
|
|
4379
|
-
* **fj**: Fijian \
|
|
4380
|
-
* **fil**: Filipino \
|
|
4381
|
-
* **fi**: Finnish \
|
|
4382
|
-
* **fr**: French \
|
|
4383
|
-
* **fur**: Frulian \
|
|
4384
|
-
* **gag**: Gagauz (Latin) \
|
|
4385
|
-
* **gl**: Galician \
|
|
4386
|
-
* **de**: German \
|
|
4387
|
-
* **gil**: Gilbertese \
|
|
4388
|
-
* **gon**: Gondi (Devanagiri) \
|
|
4389
|
-
* **el**: Greek \
|
|
4390
|
-
* **kl**: Greenlandic \
|
|
4391
|
-
* **gvr**: Gurung (Devanagiri) \
|
|
4392
|
-
* **ht**: Haitian Creole \
|
|
4393
|
-
* **hlb**: Halbi (Devanagiri) \
|
|
4394
|
-
* **hni**: Hani \
|
|
4395
|
-
* **bgc**: Haryanvi \
|
|
4396
|
-
* **haw**: Hawaiian \
|
|
4397
|
-
* **hi**: Hindi \
|
|
4398
|
-
* **mww**: Hmong Daw (Latin) \
|
|
4399
|
-
* **hoc**: Ho (Devanagiri) \
|
|
4400
|
-
* **hu**: Hungarian \
|
|
4401
|
-
* **is**: Icelandic \
|
|
4402
|
-
* **smn**: Inari Sami \
|
|
4403
|
-
* **id**: Indonesian \
|
|
4404
|
-
* **ia**: Interlingua \
|
|
4405
|
-
* **iu**: Inuktitut (Latin) \
|
|
4406
|
-
* **ga**: Irish \
|
|
4407
|
-
* **it**: Italian \
|
|
4408
|
-
* **ja**: Japanese \
|
|
4409
|
-
* **Jns**: Jaunsari (Devanagiri) \
|
|
4410
|
-
* **jv**: Javanese \
|
|
4411
|
-
* **kea**: Kabuverdianu \
|
|
4412
|
-
* **kac**: Kachin (Latin) \
|
|
4413
|
-
* **xnr**: Kangri (Devanagiri) \
|
|
4414
|
-
* **krc**: Karachay-Balkar \
|
|
4415
|
-
* **kaa-cyrl**: Kara-Kalpak (Cyrillic) \
|
|
4416
|
-
* **kaa**: Kara-Kalpak (Latin) \
|
|
4417
|
-
* **csb**: Kashubian \
|
|
4418
|
-
* **kk-cyrl**: Kazakh (Cyrillic) \
|
|
4419
|
-
* **kk-latn**: Kazakh (Latin) \
|
|
4420
|
-
* **klr**: Khaling \
|
|
4421
|
-
* **kha**: Khasi \
|
|
4422
|
-
* **quc**: K'iche' \
|
|
4423
|
-
* **ko**: Korean \
|
|
4424
|
-
* **kfq**: Korku \
|
|
4425
|
-
* **kpy**: Koryak \
|
|
4426
|
-
* **kos**: Kosraean \
|
|
4427
|
-
* **kum**: Kumyk (Cyrillic) \
|
|
4428
|
-
* **ku-arab**: Kurdish (Arabic) \
|
|
4429
|
-
* **ku-latn**: Kurdish (Latin) \
|
|
4430
|
-
* **kru**: Kurukh (Devanagiri) \
|
|
4431
|
-
* **ky**: Kyrgyz (Cyrillic) \
|
|
4432
|
-
* **lkt**: Lakota \
|
|
4433
|
-
* **la**: Latin \
|
|
4434
|
-
* **lt**: Lithuanian \
|
|
4435
|
-
* **dsb**: Lower Sorbian \
|
|
4436
|
-
* **smj**: Lule Sami \
|
|
4437
|
-
* **lb**: Luxembourgish \
|
|
4438
|
-
* **bfz**: Mahasu Pahari (Devanagiri) \
|
|
4439
|
-
* **ms**: Malay (Latin) \
|
|
4440
|
-
* **mt**: Maltese \
|
|
4441
|
-
* **kmj**: Malto (Devanagiri) \
|
|
4442
|
-
* **gv**: Manx \
|
|
4443
|
-
* **mi**: Maori \
|
|
4444
|
-
* **mr**: Marathi \
|
|
4445
|
-
* **mn**: Mongolian (Cyrillic) \
|
|
4446
|
-
* **cnr-cyrl**: Montenegrin (Cyrillic) \
|
|
4447
|
-
* **cnr-latn**: Montenegrin (Latin) \
|
|
4448
|
-
* **nap**: Neapolitan \
|
|
4449
|
-
* **ne**: Nepali \
|
|
4450
|
-
* **niu**: Niuean \
|
|
4451
|
-
* **nog**: Nogay \
|
|
4452
|
-
* **sme**: Northern Sami (Latin) \
|
|
4453
|
-
* **nb**: Norwegian \
|
|
4454
|
-
* **no**: Norwegian \
|
|
4455
|
-
* **oc**: Occitan \
|
|
4456
|
-
* **os**: Ossetic \
|
|
4457
|
-
* **ps**: Pashto \
|
|
4458
|
-
* **fa**: Persian \
|
|
4459
|
-
* **pl**: Polish \
|
|
4460
|
-
* **pt**: Portuguese \
|
|
4461
|
-
* **pa**: Punjabi (Arabic) \
|
|
4462
|
-
* **ksh**: Ripuarian \
|
|
4463
|
-
* **ro**: Romanian \
|
|
4464
|
-
* **rm**: Romansh \
|
|
4465
|
-
* **ru**: Russian \
|
|
4466
|
-
* **sck**: Sadri (Devanagiri) \
|
|
4467
|
-
* **sm**: Samoan (Latin) \
|
|
4468
|
-
* **sa**: Sanskrit (Devanagiri) \
|
|
4469
|
-
* **sat**: Santali (Devanagiri) \
|
|
4470
|
-
* **sco**: Scots \
|
|
4471
|
-
* **gd**: Scottish Gaelic \
|
|
4472
|
-
* **sr**: Serbian (Latin) \
|
|
4473
|
-
* **sr-Cyrl**: Serbian (Cyrillic) \
|
|
4474
|
-
* **sr-Latn**: Serbian (Latin) \
|
|
4475
|
-
* **xsr**: Sherpa (Devanagiri) \
|
|
4476
|
-
* **srx**: Sirmauri (Devanagiri) \
|
|
4477
|
-
* **sms**: Skolt Sami \
|
|
4478
|
-
* **sk**: Slovak \
|
|
4479
|
-
* **sl**: Slovenian \
|
|
4480
|
-
* **so**: Somali (Arabic) \
|
|
4481
|
-
* **sma**: Southern Sami \
|
|
4482
|
-
* **es**: Spanish \
|
|
4483
|
-
* **sw**: Swahili (Latin) \
|
|
4484
|
-
* **sv**: Swedish \
|
|
4485
|
-
* **tg**: Tajik (Cyrillic) \
|
|
4486
|
-
* **tt**: Tatar (Latin) \
|
|
4487
|
-
* **tet**: Tetum \
|
|
4488
|
-
* **thf**: Thangmi \
|
|
4489
|
-
* **to**: Tongan \
|
|
4490
|
-
* **tr**: Turkish \
|
|
4491
|
-
* **tk**: Turkmen (Latin) \
|
|
4492
|
-
* **tyv**: Tuvan \
|
|
4493
|
-
* **hsb**: Upper Sorbian \
|
|
4494
|
-
* **ur**: Urdu \
|
|
4495
|
-
* **ug**: Uyghur (Arabic) \
|
|
4496
|
-
* **uz-arab**: Uzbek (Arabic) \
|
|
4497
|
-
* **uz-cyrl**: Uzbek (Cyrillic) \
|
|
4498
|
-
* **uz**: Uzbek (Latin) \
|
|
4499
|
-
* **vo**: Volapük \
|
|
4500
|
-
* **wae**: Walser \
|
|
4501
|
-
* **cy**: Welsh \
|
|
4502
|
-
* **fy**: Western Frisian \
|
|
4503
|
-
* **yua**: Yucatec Maya \
|
|
4504
|
-
* **za**: Zhuang \
|
|
4505
|
-
* **zu**: Zulu \
|
|
4506
|
-
* **unk**: Unknown (All)
|
|
4507
|
-
*/
|
|
4508
|
-
export declare type OcrSkillLanguage = string;
|
|
4402
|
+
export declare type OcrSkillLanguage = "af" | "sq" | "anp" | "ar" | "ast" | "awa" | "az" | "bfy" | "eu" | "be" | "be-cyrl" | "be-latn" | "bho" | "bi" | "brx" | "bs" | "bra" | "br" | "bg" | "bns" | "bua" | "ca" | "ceb" | "rab" | "ch" | "hne" | "zh-Hans" | "zh-Hant" | "kw" | "co" | "crh" | "hr" | "cs" | "da" | "prs" | "dhi" | "doi" | "nl" | "en" | "myv" | "et" | "fo" | "fj" | "fil" | "fi" | "fr" | "fur" | "gag" | "gl" | "de" | "gil" | "gon" | "el" | "kl" | "gvr" | "ht" | "hlb" | "hni" | "bgc" | "haw" | "hi" | "mww" | "hoc" | "hu" | "is" | "smn" | "id" | "ia" | "iu" | "ga" | "it" | "ja" | "Jns" | "jv" | "kea" | "kac" | "xnr" | "krc" | "kaa-cyrl" | "kaa" | "csb" | "kk-cyrl" | "kk-latn" | "klr" | "kha" | "quc" | "ko" | "kfq" | "kpy" | "kos" | "kum" | "ku-arab" | "ku-latn" | "kru" | "ky" | "lkt" | "la" | "lt" | "dsb" | "smj" | "lb" | "bfz" | "ms" | "mt" | "kmj" | "gv" | "mi" | "mr" | "mn" | "cnr-cyrl" | "cnr-latn" | "nap" | "ne" | "niu" | "nog" | "sme" | "nb" | "no" | "oc" | "os" | "ps" | "fa" | "pl" | "pt" | "pa" | "ksh" | "ro" | "rm" | "ru" | "sck" | "sm" | "sa" | "sat" | "sco" | "gd" | "sr" | "sr-Cyrl" | "sr-Latn" | "xsr" | "srx" | "sms" | "sk" | "sl" | "so" | "sma" | "es" | "sw" | "sv" | "tg" | "tt" | "tet" | "thf" | "to" | "tr" | "tk" | "tyv" | "hsb" | "ur" | "ug" | "uz-arab" | "uz-cyrl" | "uz" | "vo" | "wae" | "cy" | "fy" | "yua" | "za" | "zu" | "unk";
|
|
4509
4403
|
|
|
4510
4404
|
/**
|
|
4511
4405
|
* Escapes an odata filter expression to avoid errors with quoting string literals.
|
|
@@ -4530,7 +4424,7 @@ export declare interface OutputFieldMappingEntry {
|
|
|
4530
4424
|
}
|
|
4531
4425
|
|
|
4532
4426
|
/** Tokenizer for path-like hierarchies. This tokenizer is implemented using Apache Lucene. */
|
|
4533
|
-
export declare
|
|
4427
|
+
export declare interface PathHierarchyTokenizer extends BaseLexicalTokenizer {
|
|
4534
4428
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
4535
4429
|
odatatype: "#Microsoft.Azure.Search.PathHierarchyTokenizerV2";
|
|
4536
4430
|
/** The delimiter character to use. Default is "/". */
|
|
@@ -4543,7 +4437,7 @@ export declare type PathHierarchyTokenizer = BaseLexicalTokenizer & {
|
|
|
4543
4437
|
reverseTokenOrder?: boolean;
|
|
4544
4438
|
/** The number of initial tokens to skip. Default is 0. */
|
|
4545
4439
|
numberOfTokensToSkip?: number;
|
|
4546
|
-
}
|
|
4440
|
+
}
|
|
4547
4441
|
|
|
4548
4442
|
/**
|
|
4549
4443
|
* Flexibly separates text into terms via a regular expression pattern. This analyzer is
|
|
@@ -4580,34 +4474,34 @@ export declare interface PatternAnalyzer {
|
|
|
4580
4474
|
}
|
|
4581
4475
|
|
|
4582
4476
|
/** Uses Java regexes to emit multiple tokens - one for each capture group in one or more patterns. This token filter is implemented using Apache Lucene. */
|
|
4583
|
-
export declare
|
|
4477
|
+
export declare interface PatternCaptureTokenFilter extends BaseTokenFilter {
|
|
4584
4478
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
4585
4479
|
odatatype: "#Microsoft.Azure.Search.PatternCaptureTokenFilter";
|
|
4586
4480
|
/** A list of patterns to match against each token. */
|
|
4587
4481
|
patterns: string[];
|
|
4588
4482
|
/** A value indicating whether to return the original token even if one of the patterns matches. Default is true. */
|
|
4589
4483
|
preserveOriginal?: boolean;
|
|
4590
|
-
}
|
|
4484
|
+
}
|
|
4591
4485
|
|
|
4592
4486
|
/** A character filter that replaces characters in the input string. It uses a regular expression to identify character sequences to preserve and a replacement pattern to identify characters to replace. For example, given the input text "aa bb aa bb", pattern "(aa)\s+(bb)", and replacement "$1#$2", the result would be "aa#bb aa#bb". This character filter is implemented using Apache Lucene. */
|
|
4593
|
-
export declare
|
|
4487
|
+
export declare interface PatternReplaceCharFilter extends BaseCharFilter {
|
|
4594
4488
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
4595
4489
|
odatatype: "#Microsoft.Azure.Search.PatternReplaceCharFilter";
|
|
4596
4490
|
/** A regular expression pattern. */
|
|
4597
4491
|
pattern: string;
|
|
4598
4492
|
/** The replacement text. */
|
|
4599
4493
|
replacement: string;
|
|
4600
|
-
}
|
|
4494
|
+
}
|
|
4601
4495
|
|
|
4602
4496
|
/** A character filter that replaces characters in the input string. It uses a regular expression to identify character sequences to preserve and a replacement pattern to identify characters to replace. For example, given the input text "aa bb aa bb", pattern "(aa)\s+(bb)", and replacement "$1#$2", the result would be "aa#bb aa#bb". This token filter is implemented using Apache Lucene. */
|
|
4603
|
-
export declare
|
|
4497
|
+
export declare interface PatternReplaceTokenFilter extends BaseTokenFilter {
|
|
4604
4498
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
4605
4499
|
odatatype: "#Microsoft.Azure.Search.PatternReplaceTokenFilter";
|
|
4606
4500
|
/** A regular expression pattern. */
|
|
4607
4501
|
pattern: string;
|
|
4608
4502
|
/** The replacement text. */
|
|
4609
4503
|
replacement: string;
|
|
4610
|
-
}
|
|
4504
|
+
}
|
|
4611
4505
|
|
|
4612
4506
|
/**
|
|
4613
4507
|
* Tokenizer that uses regex pattern matching to construct distinct tokens. This tokenizer is
|
|
@@ -4646,17 +4540,17 @@ export declare interface PatternTokenizer {
|
|
|
4646
4540
|
export declare type PhoneticEncoder = "metaphone" | "doubleMetaphone" | "soundex" | "refinedSoundex" | "caverphone1" | "caverphone2" | "cologne" | "nysiis" | "koelnerPhonetik" | "haasePhonetik" | "beiderMorse";
|
|
4647
4541
|
|
|
4648
4542
|
/** Create tokens for phonetic matches. This token filter is implemented using Apache Lucene. */
|
|
4649
|
-
export declare
|
|
4543
|
+
export declare interface PhoneticTokenFilter extends BaseTokenFilter {
|
|
4650
4544
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
4651
4545
|
odatatype: "#Microsoft.Azure.Search.PhoneticTokenFilter";
|
|
4652
4546
|
/** The phonetic encoder to use. Default is "metaphone". */
|
|
4653
4547
|
encoder?: PhoneticEncoder;
|
|
4654
4548
|
/** A value indicating whether encoded tokens should replace original tokens. If false, encoded tokens are added as synonyms. Default is true. */
|
|
4655
4549
|
replaceOriginalTokens?: boolean;
|
|
4656
|
-
}
|
|
4550
|
+
}
|
|
4657
4551
|
|
|
4658
4552
|
/** Using the Text Analytics API, extracts personal information from an input text and gives you the option of masking it. */
|
|
4659
|
-
export declare
|
|
4553
|
+
export declare interface PIIDetectionSkill extends BaseSearchIndexerSkill {
|
|
4660
4554
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
4661
4555
|
odatatype: "#Microsoft.Skills.Text.PIIDetectionSkill";
|
|
4662
4556
|
/** A value indicating which language code to use. Default is en. */
|
|
@@ -4670,57 +4564,79 @@ export declare type PIIDetectionSkill = BaseSearchIndexerSkill & {
|
|
|
4670
4564
|
/** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */
|
|
4671
4565
|
modelVersion?: string;
|
|
4672
4566
|
/** A list of PII entity categories that should be extracted and masked. */
|
|
4673
|
-
|
|
4567
|
+
categories?: string[];
|
|
4674
4568
|
/** If specified, will set the PII domain to include only a subset of the entity categories. Possible values include: 'phi', 'none'. Default is 'none'. */
|
|
4675
4569
|
domain?: string;
|
|
4676
|
-
};
|
|
4677
|
-
|
|
4678
|
-
/**
|
|
4679
|
-
* Defines values for PIIDetectionSkillMaskingMode. \
|
|
4680
|
-
* {@link KnownPIIDetectionSkillMaskingMode} can be used interchangeably with PIIDetectionSkillMaskingMode,
|
|
4681
|
-
* this enum contains the known values that the service supports.
|
|
4682
|
-
* ### Known values supported by the service
|
|
4683
|
-
* **none**: No masking occurs and the maskedText output will not be returned. \
|
|
4684
|
-
* **replace**: Replaces the detected entities with the character given in the maskingCharacter parameter. The character will be repeated to the length of the detected entity so that the offsets will correctly correspond to both the input text as well as the output maskedText.
|
|
4685
|
-
*/
|
|
4686
|
-
export declare type PIIDetectionSkillMaskingMode = string;
|
|
4687
|
-
|
|
4688
|
-
/** Describes the title, content, and keywords fields to be used for semantic ranking, captions, highlights, and answers. */
|
|
4689
|
-
export declare interface PrioritizedFields {
|
|
4690
|
-
/** Defines the title field to be used for semantic ranking, captions, highlights, and answers. If you don't have a title field in your index, leave this blank. */
|
|
4691
|
-
titleField?: SemanticField;
|
|
4692
|
-
/** Defines the content fields to be used for semantic ranking, captions, highlights, and answers. For the best result, the selected fields should contain text in natural language form. The order of the fields in the array represents their priority. Fields with lower priority may get truncated if the content is long. */
|
|
4693
|
-
prioritizedContentFields?: SemanticField[];
|
|
4694
|
-
/** Defines the keyword fields to be used for semantic ranking, captions, highlights, and answers. For the best result, the selected fields should contain a list of keywords. The order of the fields in the array represents their priority. Fields with lower priority may get truncated if the content is long. */
|
|
4695
|
-
prioritizedKeywordsFields?: SemanticField[];
|
|
4696
4570
|
}
|
|
4697
4571
|
|
|
4698
|
-
|
|
4699
|
-
* Defines values for QueryAnswerType. \
|
|
4700
|
-
* {@link KnownQueryAnswerType} can be used interchangeably with QueryAnswerType,
|
|
4701
|
-
* this enum contains the known values that the service supports.
|
|
4702
|
-
* ### Known values supported by the service
|
|
4703
|
-
* **none**: Do not return answers for the query. \
|
|
4704
|
-
* **extractive**: Extracts answer candidates from the contents of the documents returned in response to a query expressed as a question in natural language.
|
|
4705
|
-
*/
|
|
4706
|
-
export declare type QueryAnswerType = string;
|
|
4572
|
+
export declare type PIIDetectionSkillMaskingMode = "none" | "replace";
|
|
4707
4573
|
|
|
4708
4574
|
/**
|
|
4709
|
-
*
|
|
4710
|
-
*
|
|
4711
|
-
*
|
|
4712
|
-
* ### Known values supported by the service
|
|
4713
|
-
* **none**: Do not return captions for the query. \
|
|
4714
|
-
* **extractive**: Extracts captions from the matching documents that contain passages relevant to the search query.
|
|
4575
|
+
* A value that specifies whether answers should be returned as part of the search response.
|
|
4576
|
+
* This parameter is only valid if the query type is 'semantic'. If set to `extractive`, the query
|
|
4577
|
+
* returns answers extracted from key passages in the highest ranked documents.
|
|
4715
4578
|
*/
|
|
4716
|
-
export declare type
|
|
4579
|
+
export declare type QueryAnswer = ExtractiveQueryAnswer;
|
|
4580
|
+
|
|
4581
|
+
/** An answer is a text passage extracted from the contents of the most relevant documents that matched the query. Answers are extracted from the top search results. Answer candidates are scored and the top answers are selected. */
|
|
4582
|
+
export declare interface QueryAnswerResult {
|
|
4583
|
+
/** Describes unknown properties. The value of an unknown property can be of "any" type. */
|
|
4584
|
+
[property: string]: any;
|
|
4585
|
+
/**
|
|
4586
|
+
* The score value represents how relevant the answer is to the query relative to other answers returned for the query.
|
|
4587
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
4588
|
+
*/
|
|
4589
|
+
readonly score: number;
|
|
4590
|
+
/**
|
|
4591
|
+
* The key of the document the answer was extracted from.
|
|
4592
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
4593
|
+
*/
|
|
4594
|
+
readonly key: string;
|
|
4595
|
+
/**
|
|
4596
|
+
* The text passage extracted from the document contents as the answer.
|
|
4597
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
4598
|
+
*/
|
|
4599
|
+
readonly text: string;
|
|
4600
|
+
/**
|
|
4601
|
+
* Same text passage as in the Text property with highlighted text phrases most relevant to the query.
|
|
4602
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
4603
|
+
*/
|
|
4604
|
+
readonly highlights?: string;
|
|
4605
|
+
}
|
|
4717
4606
|
|
|
4718
4607
|
/**
|
|
4719
|
-
*
|
|
4720
|
-
*
|
|
4721
|
-
*
|
|
4608
|
+
* A value that specifies whether captions should be returned as part of the search response.
|
|
4609
|
+
* This parameter is only valid if the query type is 'semantic'. If set, the query returns captions
|
|
4610
|
+
* extracted from key passages in the highest ranked documents. When Captions is 'extractive',
|
|
4611
|
+
* highlighting is enabled by default. Defaults to 'none'.
|
|
4612
|
+
*/
|
|
4613
|
+
export declare type QueryCaption = ExtractiveQueryCaption;
|
|
4614
|
+
|
|
4615
|
+
/** Captions are the most representative passages from the document relatively to the search query. They are often used as document summary. Captions are only returned for queries of type 'semantic'.. */
|
|
4616
|
+
export declare interface QueryCaptionResult {
|
|
4617
|
+
/** Describes unknown properties. The value of an unknown property can be of "any" type. */
|
|
4618
|
+
[property: string]: any;
|
|
4619
|
+
/**
|
|
4620
|
+
* A representative text passage extracted from the document most relevant to the search query.
|
|
4621
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
4622
|
+
*/
|
|
4623
|
+
readonly text?: string;
|
|
4624
|
+
/**
|
|
4625
|
+
* Same text passage as in the Text property with highlighted phrases most relevant to the query.
|
|
4626
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
4627
|
+
*/
|
|
4628
|
+
readonly highlights?: string;
|
|
4629
|
+
}
|
|
4630
|
+
|
|
4631
|
+
/**
|
|
4632
|
+
* Defines values for QueryDebugMode. \
|
|
4633
|
+
* {@link KnownQueryDebugMode} can be used interchangeably with QueryDebugMode,
|
|
4634
|
+
* this enum contains the known values that the service supports.
|
|
4635
|
+
* ### Known values supported by the service
|
|
4636
|
+
* **disabled**: No query debugging information will be returned. \
|
|
4637
|
+
* **semantic**: Allows the user to further explore their reranked results.
|
|
4722
4638
|
*/
|
|
4723
|
-
export declare type QueryDebugMode =
|
|
4639
|
+
export declare type QueryDebugMode = string;
|
|
4724
4640
|
|
|
4725
4641
|
/**
|
|
4726
4642
|
* Defines values for QueryLanguage. \
|
|
@@ -4778,7 +4694,7 @@ export declare type QueryDebugMode = "disabled" | "semantic";
|
|
|
4778
4694
|
* **uk-ua**: Query language value for Ukrainian (Ukraine). \
|
|
4779
4695
|
* **lv-lv**: Query language value for Latvian (Latvia). \
|
|
4780
4696
|
* **et-ee**: Query language value for Estonian (Estonia). \
|
|
4781
|
-
* **ca-es**: Query language value for Catalan
|
|
4697
|
+
* **ca-es**: Query language value for Catalan. \
|
|
4782
4698
|
* **fi-fi**: Query language value for Finnish (Finland). \
|
|
4783
4699
|
* **sr-ba**: Query language value for Serbian (Bosnia and Herzegovina). \
|
|
4784
4700
|
* **sr-me**: Query language value for Serbian (Montenegro). \
|
|
@@ -4787,8 +4703,8 @@ export declare type QueryDebugMode = "disabled" | "semantic";
|
|
|
4787
4703
|
* **nb-no**: Query language value for Norwegian (Norway). \
|
|
4788
4704
|
* **hy-am**: Query language value for Armenian (Armenia). \
|
|
4789
4705
|
* **bn-in**: Query language value for Bengali (India). \
|
|
4790
|
-
* **eu-es**: Query language value for Basque
|
|
4791
|
-
* **gl-es**: Query language value for Galician
|
|
4706
|
+
* **eu-es**: Query language value for Basque. \
|
|
4707
|
+
* **gl-es**: Query language value for Galician. \
|
|
4792
4708
|
* **gu-in**: Query language value for Gujarati (India). \
|
|
4793
4709
|
* **he-il**: Query language value for Hebrew (Israel). \
|
|
4794
4710
|
* **ga-ie**: Query language value for Irish (Ireland). \
|
|
@@ -4848,29 +4764,7 @@ export declare type QuerySpellerType = string;
|
|
|
4848
4764
|
/** Defines values for QueryType. */
|
|
4849
4765
|
export declare type QueryType = "simple" | "full" | "semantic";
|
|
4850
4766
|
|
|
4851
|
-
|
|
4852
|
-
export declare interface RawVectorQuery<TModel extends object> extends BaseVectorQuery<TModel> {
|
|
4853
|
-
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
4854
|
-
kind: "vector";
|
|
4855
|
-
/** The vector representation of a search query. */
|
|
4856
|
-
vector?: number[];
|
|
4857
|
-
}
|
|
4858
|
-
|
|
4859
|
-
/**
|
|
4860
|
-
* Defines values for RegexFlags. \
|
|
4861
|
-
* {@link KnownRegexFlags} can be used interchangeably with RegexFlags,
|
|
4862
|
-
* this enum contains the known values that the service supports.
|
|
4863
|
-
* ### Known values supported by the service
|
|
4864
|
-
* **CANON_EQ**: Enables canonical equivalence. \
|
|
4865
|
-
* **CASE_INSENSITIVE**: Enables case-insensitive matching. \
|
|
4866
|
-
* **COMMENTS**: Permits whitespace and comments in the pattern. \
|
|
4867
|
-
* **DOTALL**: Enables dotall mode. \
|
|
4868
|
-
* **LITERAL**: Enables literal parsing of the pattern. \
|
|
4869
|
-
* **MULTILINE**: Enables multiline mode. \
|
|
4870
|
-
* **UNICODE_CASE**: Enables Unicode-aware case folding. \
|
|
4871
|
-
* **UNIX_LINES**: Enables Unix lines mode.
|
|
4872
|
-
*/
|
|
4873
|
-
export declare type RegexFlags = string;
|
|
4767
|
+
export declare type RegexFlags = "CANON_EQ" | "CASE_INSENSITIVE" | "COMMENTS" | "DOTALL" | "LITERAL" | "MULTILINE" | "UNICODE_CASE" | "UNIX_LINES";
|
|
4874
4768
|
|
|
4875
4769
|
/**
|
|
4876
4770
|
* Options for reset docs operation.
|
|
@@ -4910,6 +4804,20 @@ export declare interface ResourceCounter {
|
|
|
4910
4804
|
*/
|
|
4911
4805
|
export declare type RunIndexerOptions = OperationOptions;
|
|
4912
4806
|
|
|
4807
|
+
/** Contains configuration options specific to the scalar quantization compression method used during indexing and querying. */
|
|
4808
|
+
export declare interface ScalarQuantizationCompressionConfiguration extends BaseVectorSearchCompressionConfiguration {
|
|
4809
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
4810
|
+
kind: "scalarQuantization";
|
|
4811
|
+
/** Contains the parameters specific to Scalar Quantization. */
|
|
4812
|
+
parameters?: ScalarQuantizationParameters;
|
|
4813
|
+
}
|
|
4814
|
+
|
|
4815
|
+
/** Contains the parameters specific to Scalar Quantization. */
|
|
4816
|
+
export declare interface ScalarQuantizationParameters {
|
|
4817
|
+
/** The quantized data type of compressed vector values. */
|
|
4818
|
+
quantizedDataType?: VectorSearchCompressionTargetDataType;
|
|
4819
|
+
}
|
|
4820
|
+
|
|
4913
4821
|
/**
|
|
4914
4822
|
* Contains the possible cases for ScoringFunction.
|
|
4915
4823
|
*/
|
|
@@ -4981,7 +4889,15 @@ export declare class SearchClient<TModel extends object> implements IndexDocumen
|
|
|
4981
4889
|
* The name of the index
|
|
4982
4890
|
*/
|
|
4983
4891
|
readonly indexName: string;
|
|
4984
|
-
|
|
4892
|
+
/**
|
|
4893
|
+
* @hidden
|
|
4894
|
+
* A reference to the auto-generated SearchClient
|
|
4895
|
+
*/
|
|
4896
|
+
private readonly client;
|
|
4897
|
+
/**
|
|
4898
|
+
* A reference to the internal HTTP pipeline for use with raw requests
|
|
4899
|
+
*/
|
|
4900
|
+
readonly pipeline: Pipeline;
|
|
4985
4901
|
/**
|
|
4986
4902
|
* Creates an instance of SearchClient.
|
|
4987
4903
|
*
|
|
@@ -5190,7 +5106,8 @@ export declare class SearchClient<TModel extends object> implements IndexDocumen
|
|
|
5190
5106
|
private convertSearchFields;
|
|
5191
5107
|
private convertSemanticFields;
|
|
5192
5108
|
private convertOrderBy;
|
|
5193
|
-
private
|
|
5109
|
+
private convertQueryAnswers;
|
|
5110
|
+
private convertQueryCaptions;
|
|
5194
5111
|
private convertVectorQuery;
|
|
5195
5112
|
}
|
|
5196
5113
|
|
|
@@ -5273,17 +5190,17 @@ export declare interface SearchDocumentsResultBase {
|
|
|
5273
5190
|
* not specified or set to 'none'.
|
|
5274
5191
|
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
5275
5192
|
*/
|
|
5276
|
-
readonly answers?:
|
|
5193
|
+
readonly answers?: QueryAnswerResult[];
|
|
5277
5194
|
/**
|
|
5278
5195
|
* Reason that a partial response was returned for a semantic search request.
|
|
5279
5196
|
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
5280
5197
|
*/
|
|
5281
|
-
readonly
|
|
5198
|
+
readonly semanticErrorReason?: SemanticErrorReason;
|
|
5282
5199
|
/**
|
|
5283
5200
|
* Type of partial response that was returned for a semantic search request.
|
|
5284
5201
|
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
5285
5202
|
*/
|
|
5286
|
-
readonly
|
|
5203
|
+
readonly semanticSearchResultsType?: SemanticSearchResultsType;
|
|
5287
5204
|
}
|
|
5288
5205
|
|
|
5289
5206
|
/**
|
|
@@ -5303,12 +5220,13 @@ export declare type SearchFieldArray<TModel extends object = object> = (<T>() =>
|
|
|
5303
5220
|
* Possible values include: 'Edm.String', 'Edm.Int32', 'Edm.Int64', 'Edm.Double', 'Edm.Boolean',
|
|
5304
5221
|
* 'Edm.DateTimeOffset', 'Edm.GeographyPoint', 'Collection(Edm.String)', 'Collection(Edm.Int32)',
|
|
5305
5222
|
* 'Collection(Edm.Int64)', 'Collection(Edm.Double)', 'Collection(Edm.Boolean)',
|
|
5306
|
-
* 'Collection(Edm.DateTimeOffset)', 'Collection(Edm.GeographyPoint)', 'Collection(Edm.Single)'
|
|
5223
|
+
* 'Collection(Edm.DateTimeOffset)', 'Collection(Edm.GeographyPoint)', 'Collection(Edm.Single)',
|
|
5224
|
+
* 'Collection(Edm.Half)', 'Collection(Edm.Int16)', 'Collection(Edm.SByte)'
|
|
5307
5225
|
*
|
|
5308
5226
|
* NB: `Edm.Single` alone is not a valid data type. It must be used as part of a collection type.
|
|
5309
5227
|
* @readonly
|
|
5310
5228
|
*/
|
|
5311
|
-
export declare type SearchFieldDataType = "Edm.String" | "Edm.Int32" | "Edm.Int64" | "Edm.Double" | "Edm.Boolean" | "Edm.DateTimeOffset" | "Edm.GeographyPoint" | "Collection(Edm.String)" | "Collection(Edm.Int32)" | "Collection(Edm.Int64)" | "Collection(Edm.Double)" | "Collection(Edm.Boolean)" | "Collection(Edm.DateTimeOffset)" | "Collection(Edm.GeographyPoint)" | "Collection(Edm.Single)";
|
|
5229
|
+
export declare type SearchFieldDataType = "Edm.String" | "Edm.Int32" | "Edm.Int64" | "Edm.Double" | "Edm.Boolean" | "Edm.DateTimeOffset" | "Edm.GeographyPoint" | "Collection(Edm.String)" | "Collection(Edm.Int32)" | "Collection(Edm.Int64)" | "Collection(Edm.Double)" | "Collection(Edm.Boolean)" | "Collection(Edm.DateTimeOffset)" | "Collection(Edm.GeographyPoint)" | "Collection(Edm.Single)" | "Collection(Edm.Half)" | "Collection(Edm.Int16)" | "Collection(Edm.SByte)";
|
|
5312
5230
|
|
|
5313
5231
|
/**
|
|
5314
5232
|
* Represents a search index definition, which describes the fields and search behavior of an
|
|
@@ -5381,7 +5299,7 @@ export declare interface SearchIndex {
|
|
|
5381
5299
|
/**
|
|
5382
5300
|
* Defines parameters for a search index that influence semantic capabilities.
|
|
5383
5301
|
*/
|
|
5384
|
-
|
|
5302
|
+
semanticSearch?: SemanticSearch;
|
|
5385
5303
|
/**
|
|
5386
5304
|
* Contains configuration options related to vector search.
|
|
5387
5305
|
*/
|
|
@@ -5416,7 +5334,15 @@ export declare class SearchIndexClient {
|
|
|
5416
5334
|
* The endpoint of the search service
|
|
5417
5335
|
*/
|
|
5418
5336
|
readonly endpoint: string;
|
|
5419
|
-
|
|
5337
|
+
/**
|
|
5338
|
+
* @hidden
|
|
5339
|
+
* A reference to the auto-generated SearchServiceClient
|
|
5340
|
+
*/
|
|
5341
|
+
private readonly client;
|
|
5342
|
+
/**
|
|
5343
|
+
* A reference to the internal HTTP pipeline for use with raw requests
|
|
5344
|
+
*/
|
|
5345
|
+
readonly pipeline: Pipeline;
|
|
5420
5346
|
/**
|
|
5421
5347
|
* Used to authenticate requests to the service.
|
|
5422
5348
|
*/
|
|
@@ -5703,7 +5629,15 @@ export declare class SearchIndexerClient {
|
|
|
5703
5629
|
* The endpoint of the search service
|
|
5704
5630
|
*/
|
|
5705
5631
|
readonly endpoint: string;
|
|
5706
|
-
|
|
5632
|
+
/**
|
|
5633
|
+
* @hidden
|
|
5634
|
+
* A reference to the auto-generated SearchServiceClient
|
|
5635
|
+
*/
|
|
5636
|
+
private readonly client;
|
|
5637
|
+
/**
|
|
5638
|
+
* A reference to the internal HTTP pipeline for use with raw requests
|
|
5639
|
+
*/
|
|
5640
|
+
readonly pipeline: Pipeline;
|
|
5707
5641
|
/**
|
|
5708
5642
|
* Creates an instance of SearchIndexerClient.
|
|
5709
5643
|
*
|
|
@@ -5891,10 +5825,10 @@ export declare interface SearchIndexerDataContainer {
|
|
|
5891
5825
|
export declare type SearchIndexerDataIdentity = SearchIndexerDataNoneIdentity | SearchIndexerDataUserAssignedIdentity;
|
|
5892
5826
|
|
|
5893
5827
|
/** Clears the identity property of a datasource. */
|
|
5894
|
-
export declare
|
|
5828
|
+
export declare interface SearchIndexerDataNoneIdentity extends BaseSearchIndexerDataIdentity {
|
|
5895
5829
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
5896
5830
|
odatatype: "#Microsoft.Azure.Search.DataNoneIdentity";
|
|
5897
|
-
}
|
|
5831
|
+
}
|
|
5898
5832
|
|
|
5899
5833
|
/**
|
|
5900
5834
|
* Represents a datasource definition, which can be used to configure an indexer.
|
|
@@ -5953,27 +5887,15 @@ export declare interface SearchIndexerDataSourceConnection {
|
|
|
5953
5887
|
encryptionKey?: SearchResourceEncryptionKey;
|
|
5954
5888
|
}
|
|
5955
5889
|
|
|
5956
|
-
|
|
5957
|
-
* Defines values for SearchIndexerDataSourceType. \
|
|
5958
|
-
* {@link KnownSearchIndexerDataSourceType} can be used interchangeably with SearchIndexerDataSourceType,
|
|
5959
|
-
* this enum contains the known values that the service supports.
|
|
5960
|
-
* ### Known values supported by the service
|
|
5961
|
-
* **azuresql**: Indicates an Azure SQL datasource. \
|
|
5962
|
-
* **cosmosdb**: Indicates a CosmosDB datasource. \
|
|
5963
|
-
* **azureblob**: Indicates an Azure Blob datasource. \
|
|
5964
|
-
* **azuretable**: Indicates an Azure Table datasource. \
|
|
5965
|
-
* **mysql**: Indicates a MySql datasource. \
|
|
5966
|
-
* **adlsgen2**: Indicates an ADLS Gen2 datasource.
|
|
5967
|
-
*/
|
|
5968
|
-
export declare type SearchIndexerDataSourceType = string;
|
|
5890
|
+
export declare type SearchIndexerDataSourceType = "azuresql" | "cosmosdb" | "azureblob" | "azuretable" | "mysql" | "adlsgen2";
|
|
5969
5891
|
|
|
5970
5892
|
/** Specifies the identity for a datasource to use. */
|
|
5971
|
-
export declare
|
|
5893
|
+
export declare interface SearchIndexerDataUserAssignedIdentity extends BaseSearchIndexerDataIdentity {
|
|
5972
5894
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
5973
5895
|
odatatype: "#Microsoft.Azure.Search.DataUserAssignedIdentity";
|
|
5974
5896
|
/** The fully qualified Azure resource Id of a user assigned managed identity typically in the form "/subscriptions/12345678-1234-1234-1234-1234567890ab/resourceGroups/rg/providers/Microsoft.ManagedIdentity/userAssignedIdentities/myId" that should have been assigned to the search service. */
|
|
5975
5897
|
userAssignedIdentity: string;
|
|
5976
|
-
}
|
|
5898
|
+
}
|
|
5977
5899
|
|
|
5978
5900
|
/** Represents an item- or document-level indexing error. */
|
|
5979
5901
|
export declare interface SearchIndexerError {
|
|
@@ -6060,16 +5982,18 @@ export declare interface SearchIndexerKnowledgeStore {
|
|
|
6060
5982
|
}
|
|
6061
5983
|
|
|
6062
5984
|
/** Abstract class to share properties between concrete selectors. */
|
|
6063
|
-
export declare
|
|
5985
|
+
export declare interface SearchIndexerKnowledgeStoreBlobProjectionSelector extends SearchIndexerKnowledgeStoreProjectionSelector {
|
|
6064
5986
|
/** Blob container to store projections in. */
|
|
6065
5987
|
storageContainer: string;
|
|
6066
|
-
}
|
|
5988
|
+
}
|
|
6067
5989
|
|
|
6068
5990
|
/** Projection definition for what data to store in Azure Files. */
|
|
6069
|
-
export declare
|
|
5991
|
+
export declare interface SearchIndexerKnowledgeStoreFileProjectionSelector extends SearchIndexerKnowledgeStoreBlobProjectionSelector {
|
|
5992
|
+
}
|
|
6070
5993
|
|
|
6071
5994
|
/** Projection definition for what data to store in Azure Blob. */
|
|
6072
|
-
export declare
|
|
5995
|
+
export declare interface SearchIndexerKnowledgeStoreObjectProjectionSelector extends SearchIndexerKnowledgeStoreBlobProjectionSelector {
|
|
5996
|
+
}
|
|
6073
5997
|
|
|
6074
5998
|
/** A dictionary of knowledge store-specific configuration properties. Each name is the name of a specific property. Each value must be of a primitive type. */
|
|
6075
5999
|
export declare interface SearchIndexerKnowledgeStoreParameters {
|
|
@@ -6104,10 +6028,10 @@ export declare interface SearchIndexerKnowledgeStoreProjectionSelector {
|
|
|
6104
6028
|
}
|
|
6105
6029
|
|
|
6106
6030
|
/** Description for what data to store in Azure Tables. */
|
|
6107
|
-
export declare
|
|
6031
|
+
export declare interface SearchIndexerKnowledgeStoreTableProjectionSelector extends SearchIndexerKnowledgeStoreProjectionSelector {
|
|
6108
6032
|
/** Name of the Azure table to store projected data in. */
|
|
6109
6033
|
tableName: string;
|
|
6110
|
-
}
|
|
6034
|
+
}
|
|
6111
6035
|
|
|
6112
6036
|
export declare interface SearchIndexerLimits {
|
|
6113
6037
|
/**
|
|
@@ -6130,7 +6054,7 @@ export declare interface SearchIndexerLimits {
|
|
|
6130
6054
|
/**
|
|
6131
6055
|
* Contains the possible cases for Skill.
|
|
6132
6056
|
*/
|
|
6133
|
-
export declare type SearchIndexerSkill =
|
|
6057
|
+
export declare type SearchIndexerSkill = AzureMachineLearningSkill | AzureOpenAIEmbeddingSkill | ConditionalSkill | CustomEntityLookupSkill | DocumentExtractionSkill | EntityLinkingSkill | EntityRecognitionSkill | EntityRecognitionSkillV3 | ImageAnalysisSkill | KeyPhraseExtractionSkill | LanguageDetectionSkill | MergeSkill | OcrSkill | PIIDetectionSkill | SentimentSkill | SentimentSkillV3 | ShaperSkill | SplitSkill | TextTranslationSkill | WebApiSkill;
|
|
6134
6058
|
|
|
6135
6059
|
/**
|
|
6136
6060
|
* A list of skills.
|
|
@@ -6474,7 +6398,7 @@ export declare interface SearchIndexStatistics {
|
|
|
6474
6398
|
* The amount of memory in bytes consumed by vectors in the index.
|
|
6475
6399
|
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
6476
6400
|
*/
|
|
6477
|
-
readonly vectorIndexSize
|
|
6401
|
+
readonly vectorIndexSize: number;
|
|
6478
6402
|
}
|
|
6479
6403
|
|
|
6480
6404
|
/**
|
|
@@ -6506,586 +6430,275 @@ UnionToIntersection<TFields extends `${infer FieldName}/${infer RestPaths}` ? Fi
|
|
|
6506
6430
|
/**
|
|
6507
6431
|
* Parameters for filtering, sorting, faceting, paging, and other search query behaviors.
|
|
6508
6432
|
*/
|
|
6509
|
-
export declare
|
|
6510
|
-
|
|
6511
|
-
|
|
6512
|
-
|
|
6513
|
-
* approximation.
|
|
6514
|
-
*/
|
|
6515
|
-
includeTotalCount?: boolean;
|
|
6433
|
+
export declare type SearchRequestOptions<TModel extends object, TFields extends SelectFields<TModel> = SelectFields<TModel>> = BaseSearchRequestOptions<TModel, TFields> & SearchRequestQueryTypeOptions;
|
|
6434
|
+
|
|
6435
|
+
export declare type SearchRequestQueryTypeOptions = {
|
|
6436
|
+
queryType: "semantic";
|
|
6516
6437
|
/**
|
|
6517
|
-
*
|
|
6518
|
-
* field name, optionally followed by a comma-separated list of name:value pairs.
|
|
6438
|
+
* Defines options for semantic search queries
|
|
6519
6439
|
*/
|
|
6520
|
-
|
|
6440
|
+
semanticSearchOptions: SemanticSearchOptions;
|
|
6441
|
+
} | {
|
|
6442
|
+
queryType?: "simple" | "full";
|
|
6443
|
+
};
|
|
6444
|
+
|
|
6445
|
+
/**
|
|
6446
|
+
* A customer-managed encryption key in Azure Key Vault. Keys that you create and manage can be
|
|
6447
|
+
* used to encrypt or decrypt data-at-rest in Azure Cognitive Search, such as indexes and synonym
|
|
6448
|
+
* maps.
|
|
6449
|
+
*/
|
|
6450
|
+
export declare interface SearchResourceEncryptionKey {
|
|
6521
6451
|
/**
|
|
6522
|
-
* The
|
|
6452
|
+
* The name of your Azure Key Vault key to be used to encrypt your data at rest.
|
|
6523
6453
|
*/
|
|
6524
|
-
|
|
6454
|
+
keyName: string;
|
|
6525
6455
|
/**
|
|
6526
|
-
* The
|
|
6527
|
-
* be used for hit highlighting.
|
|
6456
|
+
* The version of your Azure Key Vault key to be used to encrypt your data at rest.
|
|
6528
6457
|
*/
|
|
6529
|
-
|
|
6458
|
+
keyVersion: string;
|
|
6530
6459
|
/**
|
|
6531
|
-
*
|
|
6532
|
-
*
|
|
6460
|
+
* The URI of your Azure Key Vault, also referred to as DNS name, that contains the key to be
|
|
6461
|
+
* used to encrypt your data at rest. An example URI might be
|
|
6462
|
+
* https://my-keyvault-name.vault.azure.net.
|
|
6533
6463
|
*/
|
|
6534
|
-
|
|
6464
|
+
vaultUrl: string;
|
|
6535
6465
|
/**
|
|
6536
|
-
*
|
|
6537
|
-
* is
|
|
6466
|
+
* An AAD Application ID that was granted the required access permissions to the Azure Key Vault
|
|
6467
|
+
* that is to be used when encrypting your data at rest. The Application ID should not be
|
|
6468
|
+
* confused with the Object ID for your AAD Application.
|
|
6538
6469
|
*/
|
|
6539
|
-
|
|
6470
|
+
applicationId?: string;
|
|
6540
6471
|
/**
|
|
6541
|
-
*
|
|
6542
|
-
* search query in order for the query to be reported as a success. This parameter can be useful
|
|
6543
|
-
* for ensuring search availability even for services with only one replica. The default is 100.
|
|
6472
|
+
* The authentication key of the specified AAD application.
|
|
6544
6473
|
*/
|
|
6545
|
-
|
|
6474
|
+
applicationSecret?: string;
|
|
6546
6475
|
/**
|
|
6547
|
-
*
|
|
6548
|
-
*
|
|
6549
|
-
*
|
|
6550
|
-
*
|
|
6551
|
-
* scores of documents. If no $orderby is specified, the default sort order is descending by
|
|
6552
|
-
* document match score. There can be at most 32 $orderby clauses.
|
|
6476
|
+
* An explicit managed identity to use for this encryption key. If not specified and the access
|
|
6477
|
+
* credentials property is null, the system-assigned managed identity is used. On update to the
|
|
6478
|
+
* resource, if the explicit identity is unspecified, it remains unchanged. If "none" is specified,
|
|
6479
|
+
* the value of this property is cleared.
|
|
6553
6480
|
*/
|
|
6554
|
-
|
|
6481
|
+
identity?: SearchIndexerDataIdentity;
|
|
6482
|
+
}
|
|
6483
|
+
|
|
6484
|
+
/**
|
|
6485
|
+
* Contains a document found by a search query, plus associated metadata.
|
|
6486
|
+
*/
|
|
6487
|
+
export declare type SearchResult<TModel extends object, TFields extends SelectFields<TModel> = SelectFields<TModel>> = {
|
|
6555
6488
|
/**
|
|
6556
|
-
*
|
|
6557
|
-
*
|
|
6489
|
+
* The relevance score of the document compared to other documents returned by the query.
|
|
6490
|
+
* **NOTE: This property will not be serialized. It can only be populated by the server.**
|
|
6558
6491
|
*/
|
|
6559
|
-
|
|
6492
|
+
readonly score: number;
|
|
6560
6493
|
/**
|
|
6561
|
-
*
|
|
6562
|
-
*
|
|
6563
|
-
* 'local'. Use 'global' to aggregate scoring statistics globally before scoring. Using global
|
|
6564
|
-
* scoring statistics can increase latency of search queries. Possible values include: 'Local',
|
|
6565
|
-
* 'Global'
|
|
6494
|
+
* The relevance score computed by the semantic ranker for the top search results. Search results are sorted by the RerankerScore first and then by the Score. RerankerScore is only returned for queries of type 'semantic'.
|
|
6495
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
6566
6496
|
*/
|
|
6567
|
-
|
|
6497
|
+
readonly rerankerScore?: number;
|
|
6568
6498
|
/**
|
|
6569
|
-
*
|
|
6570
|
-
*
|
|
6571
|
-
*
|
|
6572
|
-
* load balancing of the requests across replicas and adversely affect the performance of the
|
|
6573
|
-
* search service. The value used as sessionId cannot start with a '_' character.
|
|
6499
|
+
* Text fragments from the document that indicate the matching search terms, organized by each
|
|
6500
|
+
* applicable field; null if hit highlighting was not enabled for the query.
|
|
6501
|
+
* **NOTE: This property will not be serialized. It can only be populated by the server.**
|
|
6574
6502
|
*/
|
|
6575
|
-
|
|
6503
|
+
readonly highlights?: {
|
|
6504
|
+
[k in SelectFields<TModel>]?: string[];
|
|
6505
|
+
};
|
|
6576
6506
|
/**
|
|
6577
|
-
*
|
|
6578
|
-
*
|
|
6579
|
-
* defines a function with a parameter called 'mylocation' the parameter string would be
|
|
6580
|
-
* "mylocation--122.2,44.8" (without the quotes).
|
|
6507
|
+
* Captions are the most representative passages from the document relatively to the search query. They are often used as document summary. Captions are only returned for queries of type 'semantic'.
|
|
6508
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
6581
6509
|
*/
|
|
6582
|
-
|
|
6510
|
+
readonly captions?: QueryCaptionResult[];
|
|
6511
|
+
document: NarrowedModel<TModel, TFields>;
|
|
6583
6512
|
/**
|
|
6584
|
-
*
|
|
6585
|
-
* the
|
|
6513
|
+
* Contains debugging information that can be used to further explore your search results.
|
|
6514
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
6586
6515
|
*/
|
|
6587
|
-
|
|
6516
|
+
readonly documentDebugInfo?: DocumentDebugInfo[];
|
|
6517
|
+
};
|
|
6518
|
+
|
|
6519
|
+
/**
|
|
6520
|
+
* Response from a get service statistics request. If successful, it includes service level
|
|
6521
|
+
* counters and limits.
|
|
6522
|
+
*/
|
|
6523
|
+
export declare interface SearchServiceStatistics {
|
|
6588
6524
|
/**
|
|
6589
|
-
*
|
|
6590
|
-
* semantic captions and semantic answers. Is useful for scenarios where there is a need to use
|
|
6591
|
-
* different queries between the base retrieval and ranking phase, and the L2 semantic phase.
|
|
6525
|
+
* Service level resource counters.
|
|
6592
6526
|
*/
|
|
6593
|
-
|
|
6527
|
+
counters: ServiceCounters;
|
|
6594
6528
|
/**
|
|
6595
|
-
*
|
|
6596
|
-
* type semantic.
|
|
6529
|
+
* Service level general limits.
|
|
6597
6530
|
*/
|
|
6598
|
-
|
|
6531
|
+
limits: ServiceLimits;
|
|
6532
|
+
}
|
|
6533
|
+
|
|
6534
|
+
/** Defines how the Suggest API should apply to a group of fields in the index. */
|
|
6535
|
+
export declare interface SearchSuggester {
|
|
6536
|
+
/** The name of the suggester. */
|
|
6537
|
+
name: string;
|
|
6538
|
+
/** A value indicating the capabilities of the suggester. */
|
|
6539
|
+
searchMode: "analyzingInfixMatching";
|
|
6540
|
+
/** The list of field names to which the suggester applies. Each field must be searchable. */
|
|
6541
|
+
sourceFields: string[];
|
|
6542
|
+
}
|
|
6543
|
+
|
|
6544
|
+
/**
|
|
6545
|
+
* If `TFields` is never, an untyped string array
|
|
6546
|
+
* Otherwise, a narrowed `Fields[]` type to be used elsewhere in the consuming type.
|
|
6547
|
+
*/
|
|
6548
|
+
export declare type SelectArray<TFields = never> = [string] extends [TFields] ? readonly TFields[] : (<T>() => T extends TFields ? true : false) extends <T>() => T extends never ? true : false ? readonly string[] : readonly TFields[];
|
|
6549
|
+
|
|
6550
|
+
/**
|
|
6551
|
+
* Produces a union of valid Cognitive Search OData $select paths for T
|
|
6552
|
+
* using a post-order traversal of the field tree rooted at T.
|
|
6553
|
+
*/
|
|
6554
|
+
export declare type SelectFields<TModel extends object> = (<T>() => T extends TModel ? true : false) extends <T>() => T extends never ? true : false ? string : (<T>() => T extends TModel ? true : false) extends <T>() => T extends any ? true : false ? string : (<T>() => T extends TModel ? true : false) extends <T>() => T extends object ? true : false ? string : TModel extends Array<infer Elem> ? Elem extends object ? SelectFields<Elem> : never : {
|
|
6555
|
+
[Key in keyof TModel]: Key extends string ? NonNullable<TModel[Key]> extends object ? NonNullable<TModel[Key]> extends ExcludedODataTypes ? Key : SelectFields<NonNullable<TModel[Key]>> extends infer NextPaths ? (<T>() => T extends NextPaths ? true : false) extends <T>() => T extends never ? true : false ? Key : NextPaths extends string ? Key | `${Key}/${NextPaths}` : Key : never : Key : never;
|
|
6556
|
+
}[keyof TModel & string] & string;
|
|
6557
|
+
|
|
6558
|
+
/** Defines a specific configuration to be used in the context of semantic capabilities. */
|
|
6559
|
+
export declare interface SemanticConfiguration {
|
|
6560
|
+
/** The name of the semantic configuration. */
|
|
6561
|
+
name: string;
|
|
6562
|
+
/** Describes the title, content, and keyword fields to be used for semantic ranking, captions, highlights, and answers. At least one of the three sub properties (titleField, prioritizedKeywordsFields and prioritizedContentFields) need to be set. */
|
|
6563
|
+
prioritizedFields: SemanticPrioritizedFields;
|
|
6564
|
+
}
|
|
6565
|
+
|
|
6566
|
+
/**
|
|
6567
|
+
* Debug options for semantic search queries.
|
|
6568
|
+
*/
|
|
6569
|
+
export declare interface SemanticDebugInfo {
|
|
6599
6570
|
/**
|
|
6600
|
-
*
|
|
6601
|
-
*
|
|
6571
|
+
* The title field that was sent to the semantic enrichment process, as well as how it was used
|
|
6572
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
6602
6573
|
*/
|
|
6603
|
-
|
|
6574
|
+
readonly titleField?: QueryResultDocumentSemanticField;
|
|
6604
6575
|
/**
|
|
6605
|
-
*
|
|
6606
|
-
*
|
|
6576
|
+
* The content fields that were sent to the semantic enrichment process, as well as how they were used
|
|
6577
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
6607
6578
|
*/
|
|
6608
|
-
|
|
6579
|
+
readonly contentFields?: QueryResultDocumentSemanticField[];
|
|
6609
6580
|
/**
|
|
6610
|
-
*
|
|
6581
|
+
* The keyword fields that were sent to the semantic enrichment process, as well as how they were used
|
|
6582
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
6611
6583
|
*/
|
|
6612
|
-
|
|
6584
|
+
readonly keywordFields?: QueryResultDocumentSemanticField[];
|
|
6613
6585
|
/**
|
|
6614
|
-
*
|
|
6586
|
+
* The raw concatenated strings that were sent to the semantic enrichment process.
|
|
6587
|
+
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
6615
6588
|
*/
|
|
6616
|
-
|
|
6617
|
-
/**
|
|
6618
|
-
* The comma-separated list of field names to which to scope the full-text search. When using
|
|
6619
|
-
* fielded search (fieldName:searchExpression) in a full Lucene query, the field names of each
|
|
6620
|
-
* fielded search expression take precedence over any field names listed in this parameter.
|
|
6621
|
-
*/
|
|
6622
|
-
searchFields?: string;
|
|
6623
|
-
/**
|
|
6624
|
-
* A value that specifies whether any or all of the search terms must be matched in order to
|
|
6625
|
-
* count the document as a match. Possible values include: 'Any', 'All'
|
|
6626
|
-
*/
|
|
6627
|
-
searchMode?: SearchMode;
|
|
6628
|
-
/**
|
|
6629
|
-
* A value that specifies the language of the search query.
|
|
6630
|
-
*/
|
|
6631
|
-
queryLanguage?: QueryLanguage;
|
|
6632
|
-
/**
|
|
6633
|
-
* A value that specified the type of the speller to use to spell-correct individual search
|
|
6634
|
-
* query terms.
|
|
6635
|
-
*/
|
|
6636
|
-
speller?: QuerySpellerType;
|
|
6637
|
-
/**
|
|
6638
|
-
* A value that specifies whether answers should be returned as part of the search response.
|
|
6639
|
-
*/
|
|
6640
|
-
answers?: QueryAnswerType;
|
|
6641
|
-
/**
|
|
6642
|
-
* The comma-separated list of fields to retrieve. If unspecified, all fields marked as
|
|
6643
|
-
* retrievable in the schema are included.
|
|
6644
|
-
*/
|
|
6645
|
-
select?: string;
|
|
6646
|
-
/**
|
|
6647
|
-
* The number of search results to skip. This value cannot be greater than 100,000. If you need
|
|
6648
|
-
* to scan documents in sequence, but cannot use skip due to this limitation, consider using
|
|
6649
|
-
* orderby on a totally-ordered key and filter with a range query instead.
|
|
6650
|
-
*/
|
|
6651
|
-
skip?: number;
|
|
6652
|
-
/**
|
|
6653
|
-
* The number of search results to retrieve. This can be used in conjunction with $skip to
|
|
6654
|
-
* implement client-side paging of search results. If results are truncated due to server-side
|
|
6655
|
-
* paging, the response will include a continuation token that can be used to issue another
|
|
6656
|
-
* Search request for the next page of results.
|
|
6657
|
-
*/
|
|
6658
|
-
top?: number;
|
|
6659
|
-
/**
|
|
6660
|
-
* A value that specifies whether captions should be returned as part of the search response.
|
|
6661
|
-
*/
|
|
6662
|
-
captions?: QueryCaptionType;
|
|
6663
|
-
/**
|
|
6664
|
-
* The comma-separated list of field names used for semantic search.
|
|
6665
|
-
*/
|
|
6666
|
-
semanticFields?: string;
|
|
6667
|
-
/**
|
|
6668
|
-
* The query parameters for vector, hybrid, and multi-vector search queries.
|
|
6669
|
-
*/
|
|
6670
|
-
vectorQueries?: VectorQuery<TModel>[];
|
|
6671
|
-
/**
|
|
6672
|
-
* Determines whether or not filters are applied before or after the vector search is performed.
|
|
6673
|
-
* Default is 'preFilter'.
|
|
6674
|
-
*/
|
|
6675
|
-
vectorFilterMode?: VectorFilterMode;
|
|
6589
|
+
readonly rerankerInput?: QueryResultDocumentRerankerInput;
|
|
6676
6590
|
}
|
|
6677
6591
|
|
|
6678
|
-
|
|
6679
|
-
|
|
6680
|
-
|
|
6681
|
-
|
|
6682
|
-
|
|
6683
|
-
|
|
6684
|
-
|
|
6685
|
-
* approximation.
|
|
6686
|
-
*/
|
|
6687
|
-
includeTotalCount?: boolean;
|
|
6688
|
-
/**
|
|
6689
|
-
* The list of facet expressions to apply to the search query. Each facet expression contains a
|
|
6690
|
-
* field name, optionally followed by a comma-separated list of name:value pairs.
|
|
6691
|
-
*/
|
|
6692
|
-
facets?: string[];
|
|
6693
|
-
/**
|
|
6694
|
-
* The OData $filter expression to apply to the search query.
|
|
6695
|
-
*/
|
|
6696
|
-
filter?: string;
|
|
6697
|
-
/**
|
|
6698
|
-
* The comma-separated list of field names to use for hit highlights. Only searchable fields can
|
|
6699
|
-
* be used for hit highlighting.
|
|
6700
|
-
*/
|
|
6701
|
-
highlightFields?: string;
|
|
6702
|
-
/**
|
|
6703
|
-
* A string tag that is appended to hit highlights. Must be set with highlightPreTag. Default is
|
|
6704
|
-
* </em>.
|
|
6705
|
-
*/
|
|
6706
|
-
highlightPostTag?: string;
|
|
6707
|
-
/**
|
|
6708
|
-
* A string tag that is prepended to hit highlights. Must be set with highlightPostTag. Default
|
|
6709
|
-
* is <em>.
|
|
6710
|
-
*/
|
|
6711
|
-
highlightPreTag?: string;
|
|
6712
|
-
/**
|
|
6713
|
-
* A number between 0 and 100 indicating the percentage of the index that must be covered by a
|
|
6714
|
-
* search query in order for the query to be reported as a success. This parameter can be useful
|
|
6715
|
-
* for ensuring search availability even for services with only one replica. The default is 100.
|
|
6716
|
-
*/
|
|
6717
|
-
minimumCoverage?: number;
|
|
6718
|
-
/**
|
|
6719
|
-
* The list of OData $orderby expressions by which to sort the results. Each
|
|
6720
|
-
* expression can be either a field name or a call to either the geo.distance() or the
|
|
6721
|
-
* search.score() functions. Each expression can be followed by asc to indicate ascending, or
|
|
6722
|
-
* desc to indicate descending. The default is ascending order. Ties will be broken by the match
|
|
6723
|
-
* scores of documents. If no $orderby is specified, the default sort order is descending by
|
|
6724
|
-
* document match score. There can be at most 32 $orderby clauses.
|
|
6725
|
-
*/
|
|
6726
|
-
orderBy?: string[];
|
|
6727
|
-
/**
|
|
6728
|
-
* A value that specifies the syntax of the search query. The default is 'simple'. Use 'full' if
|
|
6729
|
-
* your query uses the Lucene query syntax. Possible values include: 'simple', 'full', 'semantic'
|
|
6730
|
-
*/
|
|
6731
|
-
queryType?: QueryType;
|
|
6732
|
-
/**
|
|
6733
|
-
* The list of parameter values to be used in scoring functions (for example,
|
|
6734
|
-
* referencePointParameter) using the format name-values. For example, if the scoring profile
|
|
6735
|
-
* defines a function with a parameter called 'mylocation' the parameter string would be
|
|
6736
|
-
* "mylocation--122.2,44.8" (without the quotes).
|
|
6737
|
-
*/
|
|
6738
|
-
scoringParameters?: string[];
|
|
6739
|
-
/**
|
|
6740
|
-
* The name of a scoring profile to evaluate match scores for matching documents in order to sort
|
|
6741
|
-
* the results.
|
|
6742
|
-
*/
|
|
6743
|
-
scoringProfile?: string;
|
|
6744
|
-
/**
|
|
6745
|
-
* Allows setting a separate search query that will be solely used for semantic reranking,
|
|
6746
|
-
* semantic captions and semantic answers. Is useful for scenarios where there is a need to use
|
|
6747
|
-
* different queries between the base retrieval and ranking phase, and the L2 semantic phase.
|
|
6748
|
-
*/
|
|
6749
|
-
semanticQuery?: string;
|
|
6750
|
-
/**
|
|
6751
|
-
* The name of a semantic configuration that will be used when processing documents for queries of
|
|
6752
|
-
* type semantic.
|
|
6753
|
-
*/
|
|
6754
|
-
semanticConfiguration?: string;
|
|
6755
|
-
/**
|
|
6756
|
-
* Allows the user to choose whether a semantic call should fail completely, or to return
|
|
6757
|
-
* partial results (default).
|
|
6758
|
-
*/
|
|
6759
|
-
semanticErrorHandlingMode?: SemanticErrorHandlingMode;
|
|
6760
|
-
/**
|
|
6761
|
-
* Allows the user to set an upper bound on the amount of time it takes for semantic enrichment to finish
|
|
6762
|
-
* processing before the request fails.
|
|
6763
|
-
*/
|
|
6764
|
-
semanticMaxWaitInMilliseconds?: number;
|
|
6765
|
-
/**
|
|
6766
|
-
* Enables a debugging tool that can be used to further explore your search results.
|
|
6767
|
-
*/
|
|
6768
|
-
debugMode?: QueryDebugMode;
|
|
6769
|
-
/**
|
|
6770
|
-
* The comma-separated list of field names to which to scope the full-text search. When using
|
|
6771
|
-
* fielded search (fieldName:searchExpression) in a full Lucene query, the field names of each
|
|
6772
|
-
* fielded search expression take precedence over any field names listed in this parameter.
|
|
6773
|
-
*/
|
|
6774
|
-
searchFields?: SearchFieldArray<TModel>;
|
|
6775
|
-
/**
|
|
6776
|
-
* The language of the query.
|
|
6777
|
-
*/
|
|
6778
|
-
queryLanguage?: QueryLanguage;
|
|
6779
|
-
/**
|
|
6780
|
-
* Improve search recall by spell-correcting individual search query terms.
|
|
6781
|
-
*/
|
|
6782
|
-
speller?: Speller;
|
|
6783
|
-
/**
|
|
6784
|
-
* This parameter is only valid if the query type is 'semantic'. If set, the query returns answers
|
|
6785
|
-
* extracted from key passages in the highest ranked documents.
|
|
6786
|
-
*/
|
|
6787
|
-
answers?: Answers | AnswersOptions;
|
|
6788
|
-
/**
|
|
6789
|
-
* A value that specifies whether any or all of the search terms must be matched in order to
|
|
6790
|
-
* count the document as a match. Possible values include: 'any', 'all'
|
|
6791
|
-
*/
|
|
6792
|
-
searchMode?: SearchMode;
|
|
6793
|
-
/**
|
|
6794
|
-
* A value that specifies whether we want to calculate scoring statistics (such as document
|
|
6795
|
-
* frequency) globally for more consistent scoring, or locally, for lower latency. Possible
|
|
6796
|
-
* values include: 'Local', 'Global'
|
|
6797
|
-
*/
|
|
6798
|
-
scoringStatistics?: ScoringStatistics;
|
|
6799
|
-
/**
|
|
6800
|
-
* A value to be used to create a sticky session, which can help to get more consistent results.
|
|
6801
|
-
* As long as the same sessionId is used, a best-effort attempt will be made to target the same
|
|
6802
|
-
* replica set. Be wary that reusing the same sessionID values repeatedly can interfere with the
|
|
6803
|
-
* load balancing of the requests across replicas and adversely affect the performance of the
|
|
6804
|
-
* search service. The value used as sessionId cannot start with a '_' character.
|
|
6805
|
-
*/
|
|
6806
|
-
sessionId?: string;
|
|
6807
|
-
/**
|
|
6808
|
-
* The list of fields to retrieve. If unspecified, all fields marked as
|
|
6809
|
-
* retrievable in the schema are included.
|
|
6810
|
-
*/
|
|
6811
|
-
select?: SelectArray<TFields>;
|
|
6812
|
-
/**
|
|
6813
|
-
* The number of search results to skip. This value cannot be greater than 100,000. If you need
|
|
6814
|
-
* to scan documents in sequence, but cannot use skip due to this limitation, consider using
|
|
6815
|
-
* orderby on a totally-ordered key and filter with a range query instead.
|
|
6816
|
-
*/
|
|
6817
|
-
skip?: number;
|
|
6818
|
-
/**
|
|
6819
|
-
* The number of search results to retrieve. This can be used in conjunction with $skip to
|
|
6820
|
-
* implement client-side paging of search results. If results are truncated due to server-side
|
|
6821
|
-
* paging, the response will include a continuation token that can be used to issue another
|
|
6822
|
-
* Search request for the next page of results.
|
|
6823
|
-
*/
|
|
6824
|
-
top?: number;
|
|
6825
|
-
/**
|
|
6826
|
-
* This parameter is only valid if the query type is 'semantic'. If set, the query returns captions
|
|
6827
|
-
* extracted from key passages in the highest ranked documents. When Captions is set to 'extractive',
|
|
6828
|
-
* highlighting is enabled by default, and can be configured by appending the pipe character '|'
|
|
6829
|
-
* followed by the 'highlight-true'/'highlight-false' option, such as 'extractive|highlight-true'. Defaults to 'None'.
|
|
6830
|
-
*/
|
|
6831
|
-
captions?: Captions;
|
|
6832
|
-
/**
|
|
6833
|
-
* The list of field names used for semantic search.
|
|
6834
|
-
*/
|
|
6835
|
-
semanticFields?: string[];
|
|
6836
|
-
/**
|
|
6837
|
-
* The query parameters for vector and hybrid search queries.
|
|
6838
|
-
*/
|
|
6839
|
-
vectorQueries?: VectorQuery<TModel>[];
|
|
6840
|
-
/**
|
|
6841
|
-
* Determines whether or not filters are applied before or after the vector search is performed.
|
|
6842
|
-
* Default is 'preFilter'.
|
|
6843
|
-
*/
|
|
6844
|
-
vectorFilterMode?: VectorFilterMode;
|
|
6592
|
+
export declare type SemanticErrorMode = "partial" | "fail";
|
|
6593
|
+
|
|
6594
|
+
export declare type SemanticErrorReason = "maxWaitExceeded" | "capacityOverloaded" | "transient";
|
|
6595
|
+
|
|
6596
|
+
/** A field that is used as part of the semantic configuration. */
|
|
6597
|
+
export declare interface SemanticField {
|
|
6598
|
+
name: string;
|
|
6845
6599
|
}
|
|
6846
6600
|
|
|
6847
6601
|
/**
|
|
6848
|
-
*
|
|
6849
|
-
*
|
|
6850
|
-
*
|
|
6602
|
+
* Defines values for SemanticFieldState. \
|
|
6603
|
+
* {@link KnownSemanticFieldState} can be used interchangeably with SemanticFieldState,
|
|
6604
|
+
* this enum contains the known values that the service supports.
|
|
6605
|
+
* ### Known values supported by the service
|
|
6606
|
+
* **used**: The field was fully used for semantic enrichment. \
|
|
6607
|
+
* **unused**: The field was not used for semantic enrichment. \
|
|
6608
|
+
* **partial**: The field was partially used for semantic enrichment.
|
|
6851
6609
|
*/
|
|
6852
|
-
export declare
|
|
6853
|
-
|
|
6854
|
-
|
|
6855
|
-
|
|
6856
|
-
|
|
6857
|
-
|
|
6858
|
-
|
|
6859
|
-
|
|
6860
|
-
|
|
6861
|
-
|
|
6862
|
-
|
|
6863
|
-
|
|
6864
|
-
|
|
6865
|
-
|
|
6866
|
-
|
|
6867
|
-
|
|
6868
|
-
|
|
6869
|
-
|
|
6870
|
-
* confused with the Object ID for your AAD Application.
|
|
6871
|
-
*/
|
|
6872
|
-
applicationId?: string;
|
|
6873
|
-
/**
|
|
6874
|
-
* The authentication key of the specified AAD application.
|
|
6875
|
-
*/
|
|
6876
|
-
applicationSecret?: string;
|
|
6877
|
-
/**
|
|
6878
|
-
* An explicit managed identity to use for this encryption key. If not specified and the access
|
|
6879
|
-
* credentials property is null, the system-assigned managed identity is used. On update to the
|
|
6880
|
-
* resource, if the explicit identity is unspecified, it remains unchanged. If "none" is specified,
|
|
6881
|
-
* the value of this property is cleared.
|
|
6882
|
-
*/
|
|
6883
|
-
identity?: SearchIndexerDataIdentity;
|
|
6610
|
+
export declare type SemanticFieldState = string;
|
|
6611
|
+
|
|
6612
|
+
/** Describes the title, content, and keywords fields to be used for semantic ranking, captions, highlights, and answers. */
|
|
6613
|
+
export declare interface SemanticPrioritizedFields {
|
|
6614
|
+
/** Defines the title field to be used for semantic ranking, captions, highlights, and answers. If you don't have a title field in your index, leave this blank. */
|
|
6615
|
+
titleField?: SemanticField;
|
|
6616
|
+
/** Defines the content fields to be used for semantic ranking, captions, highlights, and answers. For the best result, the selected fields should contain text in natural language form. The order of the fields in the array represents their priority. Fields with lower priority may get truncated if the content is long. */
|
|
6617
|
+
contentFields?: SemanticField[];
|
|
6618
|
+
/** Defines the keyword fields to be used for semantic ranking, captions, highlights, and answers. For the best result, the selected fields should contain a list of keywords. The order of the fields in the array represents their priority. Fields with lower priority may get truncated if the content is long. */
|
|
6619
|
+
keywordsFields?: SemanticField[];
|
|
6620
|
+
}
|
|
6621
|
+
|
|
6622
|
+
/** Defines parameters for a search index that influence semantic capabilities. */
|
|
6623
|
+
export declare interface SemanticSearch {
|
|
6624
|
+
/** Allows you to set the name of a default semantic configuration in your index, making it optional to pass it on as a query parameter every time. */
|
|
6625
|
+
defaultConfigurationName?: string;
|
|
6626
|
+
/** The semantic configurations for the index. */
|
|
6627
|
+
configurations?: SemanticConfiguration[];
|
|
6884
6628
|
}
|
|
6885
6629
|
|
|
6886
6630
|
/**
|
|
6887
|
-
*
|
|
6631
|
+
* Defines options for semantic search queries
|
|
6888
6632
|
*/
|
|
6889
|
-
export declare
|
|
6633
|
+
export declare interface SemanticSearchOptions {
|
|
6890
6634
|
/**
|
|
6891
|
-
* The
|
|
6892
|
-
*
|
|
6635
|
+
* The name of a semantic configuration that will be used when processing documents for queries of
|
|
6636
|
+
* type semantic.
|
|
6893
6637
|
*/
|
|
6894
|
-
|
|
6638
|
+
configurationName?: string;
|
|
6895
6639
|
/**
|
|
6896
|
-
*
|
|
6897
|
-
*
|
|
6640
|
+
* Allows the user to choose whether a semantic call should fail completely, or to return partial
|
|
6641
|
+
* results (default).
|
|
6898
6642
|
*/
|
|
6899
|
-
|
|
6643
|
+
errorMode?: SemanticErrorMode;
|
|
6900
6644
|
/**
|
|
6901
|
-
*
|
|
6902
|
-
*
|
|
6903
|
-
* **NOTE: This property will not be serialized. It can only be populated by the server.**
|
|
6904
|
-
*/
|
|
6905
|
-
readonly highlights?: {
|
|
6906
|
-
[k in SelectFields<TModel>]?: string[];
|
|
6907
|
-
};
|
|
6908
|
-
/**
|
|
6909
|
-
* Captions are the most representative passages from the document relatively to the search query. They are often used as document summary. Captions are only returned for queries of type 'semantic'.
|
|
6910
|
-
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
6645
|
+
* Allows the user to set an upper bound on the amount of time it takes for semantic enrichment
|
|
6646
|
+
* to finish processing before the request fails.
|
|
6911
6647
|
*/
|
|
6912
|
-
|
|
6913
|
-
document: NarrowedModel<TModel, TFields>;
|
|
6648
|
+
maxWaitInMilliseconds?: number;
|
|
6914
6649
|
/**
|
|
6915
|
-
*
|
|
6916
|
-
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
6650
|
+
* If set, the query returns answers extracted from key passages in the highest ranked documents.
|
|
6917
6651
|
*/
|
|
6918
|
-
|
|
6919
|
-
};
|
|
6920
|
-
|
|
6921
|
-
/**
|
|
6922
|
-
* Response from a get service statistics request. If successful, it includes service level
|
|
6923
|
-
* counters and limits.
|
|
6924
|
-
*/
|
|
6925
|
-
export declare interface SearchServiceStatistics {
|
|
6652
|
+
answers?: QueryAnswer;
|
|
6926
6653
|
/**
|
|
6927
|
-
*
|
|
6654
|
+
* If set, the query returns captions extracted from key passages in the highest ranked
|
|
6655
|
+
* documents. When Captions is set to 'extractive', highlighting is enabled by default. Defaults
|
|
6656
|
+
* to 'None'.
|
|
6928
6657
|
*/
|
|
6929
|
-
|
|
6658
|
+
captions?: QueryCaption;
|
|
6930
6659
|
/**
|
|
6931
|
-
*
|
|
6932
|
-
|
|
6933
|
-
|
|
6934
|
-
}
|
|
6935
|
-
|
|
6936
|
-
/** Defines how the Suggest API should apply to a group of fields in the index. */
|
|
6937
|
-
export declare interface SearchSuggester {
|
|
6938
|
-
/** The name of the suggester. */
|
|
6939
|
-
name: string;
|
|
6940
|
-
/** A value indicating the capabilities of the suggester. */
|
|
6941
|
-
searchMode: "analyzingInfixMatching";
|
|
6942
|
-
/** The list of field names to which the suggester applies. Each field must be searchable. */
|
|
6943
|
-
sourceFields: string[];
|
|
6944
|
-
}
|
|
6945
|
-
|
|
6946
|
-
/**
|
|
6947
|
-
* If `TFields` is never, an untyped string array
|
|
6948
|
-
* Otherwise, a narrowed `Fields[]` type to be used elsewhere in the consuming type.
|
|
6949
|
-
*/
|
|
6950
|
-
export declare type SelectArray<TFields = never> = [string] extends [TFields] ? readonly TFields[] : (<T>() => T extends TFields ? true : false) extends <T>() => T extends never ? true : false ? readonly string[] : readonly TFields[];
|
|
6951
|
-
|
|
6952
|
-
/**
|
|
6953
|
-
* Produces a union of valid Cognitive Search OData $select paths for T
|
|
6954
|
-
* using a post-order traversal of the field tree rooted at T.
|
|
6955
|
-
*/
|
|
6956
|
-
export declare type SelectFields<TModel extends object> = (<T>() => T extends TModel ? true : false) extends <T>() => T extends never ? true : false ? string : (<T>() => T extends TModel ? true : false) extends <T>() => T extends any ? true : false ? string : (<T>() => T extends TModel ? true : false) extends <T>() => T extends object ? true : false ? string : TModel extends Array<infer Elem> ? Elem extends object ? SelectFields<Elem> : never : {
|
|
6957
|
-
[Key in keyof TModel]: Key extends string ? NonNullable<TModel[Key]> extends object ? NonNullable<TModel[Key]> extends ExcludedODataTypes ? Key : SelectFields<NonNullable<TModel[Key]>> extends infer NextPaths ? (<T>() => T extends NextPaths ? true : false) extends <T>() => T extends never ? true : false ? Key : NextPaths extends string ? Key | `${Key}/${NextPaths}` : Key : never : Key : never;
|
|
6958
|
-
}[keyof TModel & string] & string;
|
|
6959
|
-
|
|
6960
|
-
/** Defines a specific configuration to be used in the context of semantic capabilities. */
|
|
6961
|
-
export declare interface SemanticConfiguration {
|
|
6962
|
-
/** The name of the semantic configuration. */
|
|
6963
|
-
name: string;
|
|
6964
|
-
/** Describes the title, content, and keyword fields to be used for semantic ranking, captions, highlights, and answers. At least one of the three sub properties (titleField, prioritizedKeywordsFields and prioritizedContentFields) need to be set. */
|
|
6965
|
-
prioritizedFields: PrioritizedFields;
|
|
6966
|
-
}
|
|
6967
|
-
|
|
6968
|
-
/**
|
|
6969
|
-
* Debug options for semantic search queries.
|
|
6970
|
-
*/
|
|
6971
|
-
export declare interface SemanticDebugInfo {
|
|
6972
|
-
/**
|
|
6973
|
-
* The title field that was sent to the semantic enrichment process, as well as how it was used
|
|
6974
|
-
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
6975
|
-
*/
|
|
6976
|
-
readonly titleField?: QueryResultDocumentSemanticField;
|
|
6977
|
-
/**
|
|
6978
|
-
* The content fields that were sent to the semantic enrichment process, as well as how they were used
|
|
6979
|
-
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
6660
|
+
* Allows setting a separate search query that will be solely used for semantic reranking,
|
|
6661
|
+
* semantic captions and semantic answers. Is useful for scenarios where there is a need to use
|
|
6662
|
+
* different queries between the base retrieval and ranking phase, and the L2 semantic phase.
|
|
6980
6663
|
*/
|
|
6981
|
-
|
|
6664
|
+
semanticQuery?: string;
|
|
6982
6665
|
/**
|
|
6983
|
-
* The
|
|
6984
|
-
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
6666
|
+
* The list of field names used for semantic search.
|
|
6985
6667
|
*/
|
|
6986
|
-
|
|
6668
|
+
semanticFields?: string[];
|
|
6987
6669
|
/**
|
|
6988
|
-
*
|
|
6989
|
-
* NOTE: This property will not be serialized. It can only be populated by the server.
|
|
6670
|
+
* Enables a debugging tool that can be used to further explore your search results.
|
|
6990
6671
|
*/
|
|
6991
|
-
|
|
6992
|
-
}
|
|
6993
|
-
|
|
6994
|
-
/**
|
|
6995
|
-
* partial: If the semantic processing fails, partial results still return. The definition of
|
|
6996
|
-
* partial results depends on what semantic step failed and what was the reason for failure.
|
|
6997
|
-
*
|
|
6998
|
-
* fail: If there is an exception during the semantic processing step, the query will fail and
|
|
6999
|
-
* return the appropriate HTTP code depending on the error.
|
|
7000
|
-
*/
|
|
7001
|
-
export declare type SemanticErrorHandlingMode = "partial" | "fail";
|
|
7002
|
-
|
|
7003
|
-
/** A field that is used as part of the semantic configuration. */
|
|
7004
|
-
export declare interface SemanticField {
|
|
7005
|
-
name?: string;
|
|
6672
|
+
debugMode?: QueryDebugMode;
|
|
7006
6673
|
}
|
|
7007
6674
|
|
|
7008
|
-
|
|
7009
|
-
* used: The field was fully used for semantic enrichment.
|
|
7010
|
-
*
|
|
7011
|
-
* unused: The field was not used for semantic enrichment.
|
|
7012
|
-
*
|
|
7013
|
-
* partial: The field was partially used for semantic enrichment.
|
|
7014
|
-
*/
|
|
7015
|
-
export declare type SemanticFieldState = "used" | "unused" | "partial";
|
|
6675
|
+
export declare type SemanticSearchResultsType = "baseResults" | "rerankedResults";
|
|
7016
6676
|
|
|
7017
6677
|
/**
|
|
7018
|
-
*
|
|
7019
|
-
* exceeded that value. Only the base results were returned.
|
|
6678
|
+
* Text analytics positive-negative sentiment analysis, scored as a floating point value in a range of zero to 1.
|
|
7020
6679
|
*
|
|
7021
|
-
*
|
|
7022
|
-
*
|
|
7023
|
-
* transient: At least one step of the semantic process failed.
|
|
6680
|
+
* @deprecated This skill has been deprecated.
|
|
7024
6681
|
*/
|
|
7025
|
-
export declare
|
|
7026
|
-
|
|
7027
|
-
/**
|
|
7028
|
-
* baseResults: Results without any semantic enrichment or reranking.
|
|
7029
|
-
*
|
|
7030
|
-
* rerankedResults: Results have been reranked with the reranker model and will include semantic
|
|
7031
|
-
* captions. They will not include any answers, answers highlights or caption highlights.
|
|
7032
|
-
*/
|
|
7033
|
-
export declare type SemanticPartialResponseType = "baseResults" | "rerankedResults";
|
|
7034
|
-
|
|
7035
|
-
/** Defines parameters for a search index that influence semantic capabilities. */
|
|
7036
|
-
export declare interface SemanticSettings {
|
|
7037
|
-
/** Allows you to set the name of a default semantic configuration in your index, making it optional to pass it on as a query parameter every time. */
|
|
7038
|
-
defaultConfiguration?: string;
|
|
7039
|
-
/** The semantic configurations for the index. */
|
|
7040
|
-
configurations?: SemanticConfiguration[];
|
|
7041
|
-
}
|
|
7042
|
-
|
|
7043
|
-
/**
|
|
7044
|
-
* This skill is deprecated. Use the V3.SentimentSkill instead.
|
|
7045
|
-
*
|
|
7046
|
-
* @deprecated
|
|
7047
|
-
*/
|
|
7048
|
-
export declare type SentimentSkill = BaseSearchIndexerSkill & {
|
|
6682
|
+
export declare interface SentimentSkill extends BaseSearchIndexerSkill {
|
|
7049
6683
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
7050
6684
|
odatatype: "#Microsoft.Skills.Text.SentimentSkill";
|
|
7051
6685
|
/** A value indicating which language code to use. Default is en. */
|
|
7052
6686
|
defaultLanguageCode?: SentimentSkillLanguage;
|
|
7053
|
-
}
|
|
6687
|
+
}
|
|
7054
6688
|
|
|
7055
|
-
|
|
7056
|
-
* Defines values for SentimentSkillLanguage. \
|
|
7057
|
-
* {@link KnownSentimentSkillLanguage} can be used interchangeably with SentimentSkillLanguage,
|
|
7058
|
-
* this enum contains the known values that the service supports.
|
|
7059
|
-
* ### Known values supported by the service
|
|
7060
|
-
* **da**: Danish \
|
|
7061
|
-
* **nl**: Dutch \
|
|
7062
|
-
* **en**: English \
|
|
7063
|
-
* **fi**: Finnish \
|
|
7064
|
-
* **fr**: French \
|
|
7065
|
-
* **de**: German \
|
|
7066
|
-
* **el**: Greek \
|
|
7067
|
-
* **it**: Italian \
|
|
7068
|
-
* **no**: Norwegian (Bokmaal) \
|
|
7069
|
-
* **pl**: Polish \
|
|
7070
|
-
* **pt-PT**: Portuguese (Portugal) \
|
|
7071
|
-
* **ru**: Russian \
|
|
7072
|
-
* **es**: Spanish \
|
|
7073
|
-
* **sv**: Swedish \
|
|
7074
|
-
* **tr**: Turkish
|
|
7075
|
-
*/
|
|
7076
|
-
export declare type SentimentSkillLanguage = string;
|
|
6689
|
+
export declare type SentimentSkillLanguage = "da" | "nl" | "en" | "fi" | "fr" | "de" | "el" | "it" | "no" | "pl" | "pt-PT" | "ru" | "es" | "sv" | "tr";
|
|
7077
6690
|
|
|
7078
6691
|
/** Using the Text Analytics API, evaluates unstructured text and for each record, provides sentiment labels (such as "negative", "neutral" and "positive") based on the highest confidence score found by the service at a sentence and document-level. */
|
|
7079
|
-
export declare
|
|
6692
|
+
export declare interface SentimentSkillV3 extends BaseSearchIndexerSkill {
|
|
7080
6693
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
7081
6694
|
odatatype: "#Microsoft.Skills.Text.V3.SentimentSkill";
|
|
7082
|
-
/** A value indicating which language code to use. Default is en
|
|
6695
|
+
/** A value indicating which language code to use. Default is `en`. */
|
|
7083
6696
|
defaultLanguageCode?: string;
|
|
7084
6697
|
/** If set to true, the skill output will include information from Text Analytics for opinion mining, namely targets (nouns or verbs) and their associated assessment (adjective) in the text. Default is false. */
|
|
7085
6698
|
includeOpinionMining?: boolean;
|
|
7086
6699
|
/** The version of the model to use when calling the Text Analytics service. It will default to the latest available when not specified. We recommend you do not specify this value unless absolutely necessary. */
|
|
7087
6700
|
modelVersion?: string;
|
|
7088
|
-
}
|
|
6701
|
+
}
|
|
7089
6702
|
|
|
7090
6703
|
/** Represents service-level resource counters and quotas. */
|
|
7091
6704
|
export declare interface ServiceCounters {
|
|
@@ -7122,13 +6735,13 @@ export declare interface ServiceLimits {
|
|
|
7122
6735
|
}
|
|
7123
6736
|
|
|
7124
6737
|
/** A skill for reshaping the outputs. It creates a complex type to support composite fields (also known as multipart fields). */
|
|
7125
|
-
export declare
|
|
6738
|
+
export declare interface ShaperSkill extends BaseSearchIndexerSkill {
|
|
7126
6739
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
7127
6740
|
odatatype: "#Microsoft.Skills.Util.ShaperSkill";
|
|
7128
|
-
}
|
|
6741
|
+
}
|
|
7129
6742
|
|
|
7130
6743
|
/** Creates combinations of tokens as a single token. This token filter is implemented using Apache Lucene. */
|
|
7131
|
-
export declare
|
|
6744
|
+
export declare interface ShingleTokenFilter extends BaseTokenFilter {
|
|
7132
6745
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
7133
6746
|
odatatype: "#Microsoft.Azure.Search.ShingleTokenFilter";
|
|
7134
6747
|
/** The maximum shingle size. Default and minimum value is 2. */
|
|
@@ -7143,7 +6756,7 @@ export declare type ShingleTokenFilter = BaseTokenFilter & {
|
|
|
7143
6756
|
tokenSeparator?: string;
|
|
7144
6757
|
/** The string to insert for each position at which there is no token. Default is an underscore ("_"). */
|
|
7145
6758
|
filterToken?: string;
|
|
7146
|
-
}
|
|
6759
|
+
}
|
|
7147
6760
|
|
|
7148
6761
|
/** Base type for similarity algorithms. Similarity algorithms are used to calculate scores that tie queries to documents. The higher the score, the more relevant the document is to that specific query. Those scores are used to rank the search results. */
|
|
7149
6762
|
export declare interface Similarity {
|
|
@@ -7182,14 +6795,25 @@ export declare interface SimpleField {
|
|
|
7182
6795
|
*/
|
|
7183
6796
|
key?: boolean;
|
|
7184
6797
|
/**
|
|
7185
|
-
* A value indicating whether the field can be returned in a search result. You can
|
|
6798
|
+
* A value indicating whether the field can be returned in a search result. You can disable this
|
|
7186
6799
|
* option if you want to use a field (for example, margin) as a filter, sorting, or scoring
|
|
7187
|
-
* mechanism but do not want the field to be visible to the end user. This property must be
|
|
7188
|
-
* for key fields. This property can be changed on existing fields.
|
|
7189
|
-
*
|
|
7190
|
-
*
|
|
6800
|
+
* mechanism but do not want the field to be visible to the end user. This property must be true
|
|
6801
|
+
* for key fields. This property can be changed on existing fields. Enabling this property does
|
|
6802
|
+
* not cause any increase in index storage requirements. Default is true for simple fields and
|
|
6803
|
+
* false for vector fields.
|
|
7191
6804
|
*/
|
|
7192
6805
|
hidden?: boolean;
|
|
6806
|
+
/**
|
|
6807
|
+
* An immutable value indicating whether the field will be persisted separately on disk to be
|
|
6808
|
+
* returned in a search result. You can disable this option if you don't plan to return the field
|
|
6809
|
+
* contents in a search response to save on storage overhead. This can only be set during index
|
|
6810
|
+
* creation and only for vector fields. This property cannot be changed for existing fields or set
|
|
6811
|
+
* as false for new fields. If this property is set as false, the property `hidden` must be set as
|
|
6812
|
+
* true. This property must be true or unset for key fields, for new fields, and for non-vector
|
|
6813
|
+
* fields, and it must be null for complex fields. Disabling this property will reduce index
|
|
6814
|
+
* storage requirements. The default is true for vector fields.
|
|
6815
|
+
*/
|
|
6816
|
+
stored?: boolean;
|
|
7193
6817
|
/**
|
|
7194
6818
|
* A value indicating whether the field is full-text searchable. This means it will undergo
|
|
7195
6819
|
* analysis such as word-breaking during indexing. If you set a searchable field to a value like
|
|
@@ -7269,29 +6893,29 @@ export declare interface SimpleField {
|
|
|
7269
6893
|
* The name of the vector search algorithm configuration that specifies the algorithm and
|
|
7270
6894
|
* optional parameters for searching the vector field.
|
|
7271
6895
|
*/
|
|
7272
|
-
|
|
6896
|
+
vectorSearchProfileName?: string;
|
|
7273
6897
|
}
|
|
7274
6898
|
|
|
7275
6899
|
/** A filter that stems words using a Snowball-generated stemmer. This token filter is implemented using Apache Lucene. */
|
|
7276
|
-
export declare
|
|
6900
|
+
export declare interface SnowballTokenFilter extends BaseTokenFilter {
|
|
7277
6901
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
7278
6902
|
odatatype: "#Microsoft.Azure.Search.SnowballTokenFilter";
|
|
7279
6903
|
/** The language to use. */
|
|
7280
6904
|
language: SnowballTokenFilterLanguage;
|
|
7281
|
-
}
|
|
6905
|
+
}
|
|
7282
6906
|
|
|
7283
6907
|
/** Defines values for SnowballTokenFilterLanguage. */
|
|
7284
6908
|
export declare type SnowballTokenFilterLanguage = "armenian" | "basque" | "catalan" | "danish" | "dutch" | "english" | "finnish" | "french" | "german" | "german2" | "hungarian" | "italian" | "kp" | "lovins" | "norwegian" | "porter" | "portuguese" | "romanian" | "russian" | "spanish" | "swedish" | "turkish";
|
|
7285
6909
|
|
|
7286
6910
|
/** Defines a data deletion detection policy that implements a soft-deletion strategy. It determines whether an item should be deleted based on the value of a designated 'soft delete' column. */
|
|
7287
|
-
export declare
|
|
6911
|
+
export declare interface SoftDeleteColumnDeletionDetectionPolicy extends BaseDataDeletionDetectionPolicy {
|
|
7288
6912
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
7289
6913
|
odatatype: "#Microsoft.Azure.Search.SoftDeleteColumnDeletionDetectionPolicy";
|
|
7290
6914
|
/** The name of the column to use for soft-deletion detection. */
|
|
7291
6915
|
softDeleteColumnName?: string;
|
|
7292
6916
|
/** The marker value that identifies an item as deleted. */
|
|
7293
6917
|
softDeleteMarkerValue?: string;
|
|
7294
|
-
}
|
|
6918
|
+
}
|
|
7295
6919
|
|
|
7296
6920
|
/**
|
|
7297
6921
|
* Defines values for Speller. \
|
|
@@ -7304,7 +6928,7 @@ export declare type SoftDeleteColumnDeletionDetectionPolicy = BaseDataDeletionDe
|
|
|
7304
6928
|
export declare type Speller = string;
|
|
7305
6929
|
|
|
7306
6930
|
/** A skill to split a string into chunks of text. */
|
|
7307
|
-
export declare
|
|
6931
|
+
export declare interface SplitSkill extends BaseSearchIndexerSkill {
|
|
7308
6932
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
7309
6933
|
odatatype: "#Microsoft.Skills.Text.SplitSkill";
|
|
7310
6934
|
/** A value indicating which language code to use. Default is en. */
|
|
@@ -7313,67 +6937,48 @@ export declare type SplitSkill = BaseSearchIndexerSkill & {
|
|
|
7313
6937
|
textSplitMode?: TextSplitMode;
|
|
7314
6938
|
/** The desired maximum page length. Default is 10000. */
|
|
7315
6939
|
maxPageLength?: number;
|
|
7316
|
-
|
|
7317
|
-
pageOverlapLength?: number;
|
|
7318
|
-
/** Only applicable when textSplitMode is set to 'pages'. If specified, the SplitSkill will discontinue splitting after processing the first 'maximumPagesToTake' pages, in order to improve performance when only a few initial pages are needed from each document. */
|
|
7319
|
-
maximumPagesToTake?: number;
|
|
7320
|
-
};
|
|
6940
|
+
}
|
|
7321
6941
|
|
|
7322
|
-
|
|
7323
|
-
* Defines values for SplitSkillLanguage. \
|
|
7324
|
-
* {@link KnownSplitSkillLanguage} can be used interchangeably with SplitSkillLanguage,
|
|
7325
|
-
* this enum contains the known values that the service supports.
|
|
7326
|
-
* ### Known values supported by the service
|
|
7327
|
-
* **da**: Danish \
|
|
7328
|
-
* **de**: German \
|
|
7329
|
-
* **en**: English \
|
|
7330
|
-
* **es**: Spanish \
|
|
7331
|
-
* **fi**: Finnish \
|
|
7332
|
-
* **fr**: French \
|
|
7333
|
-
* **it**: Italian \
|
|
7334
|
-
* **ko**: Korean \
|
|
7335
|
-
* **pt**: Portuguese
|
|
7336
|
-
*/
|
|
7337
|
-
export declare type SplitSkillLanguage = string;
|
|
6942
|
+
export declare type SplitSkillLanguage = "am" | "bs" | "cs" | "da" | "de" | "en" | "es" | "et" | "fi" | "fr" | "he" | "hi" | "hr" | "hu" | "id" | "is" | "it" | "ja" | "ko" | "lv" | "nb" | "nl" | "pl" | "pt" | "pt-br" | "ru" | "sk" | "sl" | "sr" | "sv" | "tr" | "ur" | "zh";
|
|
7338
6943
|
|
|
7339
6944
|
/** Defines a data change detection policy that captures changes using the Integrated Change Tracking feature of Azure SQL Database. */
|
|
7340
|
-
export declare
|
|
6945
|
+
export declare interface SqlIntegratedChangeTrackingPolicy extends BaseDataChangeDetectionPolicy {
|
|
7341
6946
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
7342
6947
|
odatatype: "#Microsoft.Azure.Search.SqlIntegratedChangeTrackingPolicy";
|
|
7343
|
-
}
|
|
6948
|
+
}
|
|
7344
6949
|
|
|
7345
6950
|
/** Provides the ability to override other stemming filters with custom dictionary-based stemming. Any dictionary-stemmed terms will be marked as keywords so that they will not be stemmed with stemmers down the chain. Must be placed before any stemming filters. This token filter is implemented using Apache Lucene. */
|
|
7346
|
-
export declare
|
|
6951
|
+
export declare interface StemmerOverrideTokenFilter extends BaseTokenFilter {
|
|
7347
6952
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
7348
6953
|
odatatype: "#Microsoft.Azure.Search.StemmerOverrideTokenFilter";
|
|
7349
6954
|
/** A list of stemming rules in the following format: "word => stem", for example: "ran => run". */
|
|
7350
6955
|
rules: string[];
|
|
7351
|
-
}
|
|
6956
|
+
}
|
|
7352
6957
|
|
|
7353
6958
|
/** Language specific stemming filter. This token filter is implemented using Apache Lucene. */
|
|
7354
|
-
export declare
|
|
6959
|
+
export declare interface StemmerTokenFilter extends BaseTokenFilter {
|
|
7355
6960
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
7356
6961
|
odatatype: "#Microsoft.Azure.Search.StemmerTokenFilter";
|
|
7357
6962
|
/** The language to use. */
|
|
7358
6963
|
language: StemmerTokenFilterLanguage;
|
|
7359
|
-
}
|
|
6964
|
+
}
|
|
7360
6965
|
|
|
7361
6966
|
/** Defines values for StemmerTokenFilterLanguage. */
|
|
7362
6967
|
export declare type StemmerTokenFilterLanguage = "arabic" | "armenian" | "basque" | "brazilian" | "bulgarian" | "catalan" | "czech" | "danish" | "dutch" | "dutchKp" | "english" | "lightEnglish" | "minimalEnglish" | "possessiveEnglish" | "porter2" | "lovins" | "finnish" | "lightFinnish" | "french" | "lightFrench" | "minimalFrench" | "galician" | "minimalGalician" | "german" | "german2" | "lightGerman" | "minimalGerman" | "greek" | "hindi" | "hungarian" | "lightHungarian" | "indonesian" | "irish" | "italian" | "lightItalian" | "sorani" | "latvian" | "norwegian" | "lightNorwegian" | "minimalNorwegian" | "lightNynorsk" | "minimalNynorsk" | "portuguese" | "lightPortuguese" | "minimalPortuguese" | "portugueseRslp" | "romanian" | "russian" | "lightRussian" | "spanish" | "lightSpanish" | "swedish" | "lightSwedish" | "turkish";
|
|
7363
6968
|
|
|
7364
6969
|
/** Divides text at non-letters; Applies the lowercase and stopword token filters. This analyzer is implemented using Apache Lucene. */
|
|
7365
|
-
export declare
|
|
6970
|
+
export declare interface StopAnalyzer extends BaseLexicalAnalyzer {
|
|
7366
6971
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
7367
6972
|
odatatype: "#Microsoft.Azure.Search.StopAnalyzer";
|
|
7368
6973
|
/** A list of stopwords. */
|
|
7369
6974
|
stopwords?: string[];
|
|
7370
|
-
}
|
|
6975
|
+
}
|
|
7371
6976
|
|
|
7372
6977
|
/** Defines values for StopwordsList. */
|
|
7373
6978
|
export declare type StopwordsList = "arabic" | "armenian" | "basque" | "brazilian" | "bulgarian" | "catalan" | "czech" | "danish" | "dutch" | "english" | "finnish" | "french" | "galician" | "german" | "greek" | "hindi" | "hungarian" | "indonesian" | "irish" | "italian" | "latvian" | "norwegian" | "persian" | "portuguese" | "romanian" | "russian" | "sorani" | "spanish" | "swedish" | "thai" | "turkish";
|
|
7374
6979
|
|
|
7375
6980
|
/** Removes stop words from a token stream. This token filter is implemented using Apache Lucene. */
|
|
7376
|
-
export declare
|
|
6981
|
+
export declare interface StopwordsTokenFilter extends BaseTokenFilter {
|
|
7377
6982
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
7378
6983
|
odatatype: "#Microsoft.Azure.Search.StopwordsTokenFilter";
|
|
7379
6984
|
/** The list of stopwords. This property and the stopwords list property cannot both be set. */
|
|
@@ -7384,7 +6989,7 @@ export declare type StopwordsTokenFilter = BaseTokenFilter & {
|
|
|
7384
6989
|
ignoreCase?: boolean;
|
|
7385
6990
|
/** A value indicating whether to ignore the last search term if it's a stop word. Default is true. */
|
|
7386
6991
|
removeTrailingStopWords?: boolean;
|
|
7387
|
-
}
|
|
6992
|
+
}
|
|
7388
6993
|
|
|
7389
6994
|
/**
|
|
7390
6995
|
* Response containing suggestion query results from an index.
|
|
@@ -7512,7 +7117,7 @@ export declare interface SynonymMap {
|
|
|
7512
7117
|
}
|
|
7513
7118
|
|
|
7514
7119
|
/** Matches single or multi-word synonyms in a token stream. This token filter is implemented using Apache Lucene. */
|
|
7515
|
-
export declare
|
|
7120
|
+
export declare interface SynonymTokenFilter extends BaseTokenFilter {
|
|
7516
7121
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
7517
7122
|
odatatype: "#Microsoft.Azure.Search.SynonymTokenFilter";
|
|
7518
7123
|
/** A list of synonyms in following one of two formats: 1. incredible, unbelievable, fabulous => amazing - all terms on the left side of => symbol will be replaced with all terms on its right side; 2. incredible, unbelievable, fabulous, amazing - comma separated list of equivalent words. Set the expand option to change how this list is interpreted. */
|
|
@@ -7521,15 +7126,15 @@ export declare type SynonymTokenFilter = BaseTokenFilter & {
|
|
|
7521
7126
|
ignoreCase?: boolean;
|
|
7522
7127
|
/** A value indicating whether all words in the list of synonyms (if => notation is not used) will map to one another. If true, all words in the list of synonyms (if => notation is not used) will map to one another. The following list: incredible, unbelievable, fabulous, amazing is equivalent to: incredible, unbelievable, fabulous, amazing => incredible, unbelievable, fabulous, amazing. If false, the following list: incredible, unbelievable, fabulous, amazing will be equivalent to: incredible, unbelievable, fabulous, amazing => incredible. Default is true. */
|
|
7523
7128
|
expand?: boolean;
|
|
7524
|
-
}
|
|
7129
|
+
}
|
|
7525
7130
|
|
|
7526
7131
|
/** Defines a function that boosts scores of documents with string values matching a given list of tags. */
|
|
7527
|
-
export declare
|
|
7132
|
+
export declare interface TagScoringFunction extends BaseScoringFunction {
|
|
7528
7133
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
7529
7134
|
type: "tag";
|
|
7530
7135
|
/** Parameter values for the tag scoring function. */
|
|
7531
7136
|
parameters: TagScoringParameters;
|
|
7532
|
-
}
|
|
7137
|
+
}
|
|
7533
7138
|
|
|
7534
7139
|
/** Provides parameter values to a tag scoring function. */
|
|
7535
7140
|
export declare interface TagScoringParameters {
|
|
@@ -7537,18 +7142,10 @@ export declare interface TagScoringParameters {
|
|
|
7537
7142
|
tagsParameter: string;
|
|
7538
7143
|
}
|
|
7539
7144
|
|
|
7540
|
-
|
|
7541
|
-
* Defines values for TextSplitMode. \
|
|
7542
|
-
* {@link KnownTextSplitMode} can be used interchangeably with TextSplitMode,
|
|
7543
|
-
* this enum contains the known values that the service supports.
|
|
7544
|
-
* ### Known values supported by the service
|
|
7545
|
-
* **pages**: Split the text into individual pages. \
|
|
7546
|
-
* **sentences**: Split the text into individual sentences.
|
|
7547
|
-
*/
|
|
7548
|
-
export declare type TextSplitMode = string;
|
|
7145
|
+
export declare type TextSplitMode = "pages" | "sentences";
|
|
7549
7146
|
|
|
7550
7147
|
/** A skill to translate text from one language to another. */
|
|
7551
|
-
export declare
|
|
7148
|
+
export declare interface TextTranslationSkill extends BaseSearchIndexerSkill {
|
|
7552
7149
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
7553
7150
|
odatatype: "#Microsoft.Skills.Text.TranslationSkill";
|
|
7554
7151
|
/** The language code to translate documents into for documents that don't specify the to language explicitly. */
|
|
@@ -7557,87 +7154,9 @@ export declare type TextTranslationSkill = BaseSearchIndexerSkill & {
|
|
|
7557
7154
|
defaultFromLanguageCode?: TextTranslationSkillLanguage;
|
|
7558
7155
|
/** The language code to translate documents from when neither the fromLanguageCode input nor the defaultFromLanguageCode parameter are provided, and the automatic language detection is unsuccessful. Default is en. */
|
|
7559
7156
|
suggestedFrom?: TextTranslationSkillLanguage;
|
|
7560
|
-
}
|
|
7157
|
+
}
|
|
7561
7158
|
|
|
7562
|
-
|
|
7563
|
-
* Defines values for TextTranslationSkillLanguage. \
|
|
7564
|
-
* {@link KnownTextTranslationSkillLanguage} can be used interchangeably with TextTranslationSkillLanguage,
|
|
7565
|
-
* this enum contains the known values that the service supports.
|
|
7566
|
-
* ### Known values supported by the service
|
|
7567
|
-
* **af**: Afrikaans \
|
|
7568
|
-
* **ar**: Arabic \
|
|
7569
|
-
* **bn**: Bangla \
|
|
7570
|
-
* **bs**: Bosnian (Latin) \
|
|
7571
|
-
* **bg**: Bulgarian \
|
|
7572
|
-
* **yue**: Cantonese (Traditional) \
|
|
7573
|
-
* **ca**: Catalan \
|
|
7574
|
-
* **zh-Hans**: Chinese Simplified \
|
|
7575
|
-
* **zh-Hant**: Chinese Traditional \
|
|
7576
|
-
* **hr**: Croatian \
|
|
7577
|
-
* **cs**: Czech \
|
|
7578
|
-
* **da**: Danish \
|
|
7579
|
-
* **nl**: Dutch \
|
|
7580
|
-
* **en**: English \
|
|
7581
|
-
* **et**: Estonian \
|
|
7582
|
-
* **fj**: Fijian \
|
|
7583
|
-
* **fil**: Filipino \
|
|
7584
|
-
* **fi**: Finnish \
|
|
7585
|
-
* **fr**: French \
|
|
7586
|
-
* **de**: German \
|
|
7587
|
-
* **el**: Greek \
|
|
7588
|
-
* **ht**: Haitian Creole \
|
|
7589
|
-
* **he**: Hebrew \
|
|
7590
|
-
* **hi**: Hindi \
|
|
7591
|
-
* **mww**: Hmong Daw \
|
|
7592
|
-
* **hu**: Hungarian \
|
|
7593
|
-
* **is**: Icelandic \
|
|
7594
|
-
* **id**: Indonesian \
|
|
7595
|
-
* **it**: Italian \
|
|
7596
|
-
* **ja**: Japanese \
|
|
7597
|
-
* **sw**: Kiswahili \
|
|
7598
|
-
* **tlh**: Klingon \
|
|
7599
|
-
* **tlh-Latn**: Klingon (Latin script) \
|
|
7600
|
-
* **tlh-Piqd**: Klingon (Klingon script) \
|
|
7601
|
-
* **ko**: Korean \
|
|
7602
|
-
* **lv**: Latvian \
|
|
7603
|
-
* **lt**: Lithuanian \
|
|
7604
|
-
* **mg**: Malagasy \
|
|
7605
|
-
* **ms**: Malay \
|
|
7606
|
-
* **mt**: Maltese \
|
|
7607
|
-
* **nb**: Norwegian \
|
|
7608
|
-
* **fa**: Persian \
|
|
7609
|
-
* **pl**: Polish \
|
|
7610
|
-
* **pt**: Portuguese \
|
|
7611
|
-
* **pt-br**: Portuguese (Brazil) \
|
|
7612
|
-
* **pt-PT**: Portuguese (Portugal) \
|
|
7613
|
-
* **otq**: Queretaro Otomi \
|
|
7614
|
-
* **ro**: Romanian \
|
|
7615
|
-
* **ru**: Russian \
|
|
7616
|
-
* **sm**: Samoan \
|
|
7617
|
-
* **sr-Cyrl**: Serbian (Cyrillic) \
|
|
7618
|
-
* **sr-Latn**: Serbian (Latin) \
|
|
7619
|
-
* **sk**: Slovak \
|
|
7620
|
-
* **sl**: Slovenian \
|
|
7621
|
-
* **es**: Spanish \
|
|
7622
|
-
* **sv**: Swedish \
|
|
7623
|
-
* **ty**: Tahitian \
|
|
7624
|
-
* **ta**: Tamil \
|
|
7625
|
-
* **te**: Telugu \
|
|
7626
|
-
* **th**: Thai \
|
|
7627
|
-
* **to**: Tongan \
|
|
7628
|
-
* **tr**: Turkish \
|
|
7629
|
-
* **uk**: Ukrainian \
|
|
7630
|
-
* **ur**: Urdu \
|
|
7631
|
-
* **vi**: Vietnamese \
|
|
7632
|
-
* **cy**: Welsh \
|
|
7633
|
-
* **yua**: Yucatec Maya \
|
|
7634
|
-
* **ga**: Irish \
|
|
7635
|
-
* **kn**: Kannada \
|
|
7636
|
-
* **mi**: Maori \
|
|
7637
|
-
* **ml**: Malayalam \
|
|
7638
|
-
* **pa**: Punjabi
|
|
7639
|
-
*/
|
|
7640
|
-
export declare type TextTranslationSkillLanguage = string;
|
|
7159
|
+
export declare type TextTranslationSkillLanguage = "af" | "ar" | "bn" | "bs" | "bg" | "yue" | "ca" | "zh-Hans" | "zh-Hant" | "hr" | "cs" | "da" | "nl" | "en" | "et" | "fj" | "fil" | "fi" | "fr" | "de" | "el" | "ht" | "he" | "hi" | "mww" | "hu" | "is" | "id" | "it" | "ja" | "sw" | "tlh" | "tlh-Latn" | "tlh-Piqd" | "ko" | "lv" | "lt" | "mg" | "ms" | "mt" | "nb" | "fa" | "pl" | "pt" | "pt-br" | "pt-PT" | "otq" | "ro" | "ru" | "sm" | "sr-Cyrl" | "sr-Latn" | "sk" | "sl" | "es" | "sv" | "ty" | "ta" | "te" | "th" | "to" | "tr" | "uk" | "ur" | "vi" | "cy" | "yua" | "ga" | "kn" | "mi" | "ml" | "pa";
|
|
7641
7160
|
|
|
7642
7161
|
/** Defines weights on index fields for which matches should boost scoring in search queries. */
|
|
7643
7162
|
export declare interface TextWeights {
|
|
@@ -7698,39 +7217,36 @@ export declare type TokenFilter = AsciiFoldingTokenFilter | CjkBigramTokenFilter
|
|
|
7698
7217
|
export declare type TokenFilterName = string;
|
|
7699
7218
|
|
|
7700
7219
|
/** Truncates the terms to a specific length. This token filter is implemented using Apache Lucene. */
|
|
7701
|
-
export declare
|
|
7220
|
+
export declare interface TruncateTokenFilter extends BaseTokenFilter {
|
|
7702
7221
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
7703
7222
|
odatatype: "#Microsoft.Azure.Search.TruncateTokenFilter";
|
|
7704
7223
|
/** The length at which terms will be truncated. Default and maximum is 300. */
|
|
7705
7224
|
length?: number;
|
|
7706
|
-
}
|
|
7225
|
+
}
|
|
7707
7226
|
|
|
7708
7227
|
/** Tokenizes urls and emails as one token. This tokenizer is implemented using Apache Lucene. */
|
|
7709
|
-
export declare
|
|
7228
|
+
export declare interface UaxUrlEmailTokenizer extends BaseLexicalTokenizer {
|
|
7710
7229
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
7711
7230
|
odatatype: "#Microsoft.Azure.Search.UaxUrlEmailTokenizer";
|
|
7712
7231
|
/** The maximum token length. Default is 255. Tokens longer than the maximum length are split. The maximum token length that can be used is 300 characters. */
|
|
7713
7232
|
maxTokenLength?: number;
|
|
7714
|
-
}
|
|
7233
|
+
}
|
|
7715
7234
|
|
|
7716
7235
|
export declare type UnionToIntersection<Union> = (Union extends unknown ? (_: Union) => unknown : never) extends (_: infer I) => unknown ? I : never;
|
|
7717
7236
|
|
|
7718
7237
|
/** Filters out tokens with same text as the previous token. This token filter is implemented using Apache Lucene. */
|
|
7719
|
-
export declare
|
|
7238
|
+
export declare interface UniqueTokenFilter extends BaseTokenFilter {
|
|
7720
7239
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
7721
7240
|
odatatype: "#Microsoft.Azure.Search.UniqueTokenFilter";
|
|
7722
7241
|
/** A value indicating whether to remove duplicates only at the same position. Default is false. */
|
|
7723
7242
|
onlyOnSamePosition?: boolean;
|
|
7724
|
-
}
|
|
7243
|
+
}
|
|
7725
7244
|
|
|
7726
7245
|
/**
|
|
7727
7246
|
* Options for the upload documents operation.
|
|
7728
7247
|
*/
|
|
7729
7248
|
export declare type UploadDocumentsOptions = IndexDocumentsOptions;
|
|
7730
7249
|
|
|
7731
|
-
/**
|
|
7732
|
-
* Determines whether or not filters are applied before or after the vector search is performed.
|
|
7733
|
-
*/
|
|
7734
7250
|
export declare type VectorFilterMode = "postFilter" | "preFilter";
|
|
7735
7251
|
|
|
7736
7252
|
/** The query parameters to use for vector search when a text value that needs to be vectorized is provided. */
|
|
@@ -7741,8 +7257,16 @@ export declare interface VectorizableTextQuery<TModel extends object> extends Ba
|
|
|
7741
7257
|
text?: string;
|
|
7742
7258
|
}
|
|
7743
7259
|
|
|
7260
|
+
/** The query parameters to use for vector search when a raw vector value is provided. */
|
|
7261
|
+
export declare interface VectorizedQuery<TModel extends object> extends BaseVectorQuery<TModel> {
|
|
7262
|
+
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
7263
|
+
kind: "vector";
|
|
7264
|
+
/** The vector representation of a search query. */
|
|
7265
|
+
vector: number[];
|
|
7266
|
+
}
|
|
7267
|
+
|
|
7744
7268
|
/** The query parameters for vector and hybrid search queries. */
|
|
7745
|
-
export declare type VectorQuery<TModel extends object> =
|
|
7269
|
+
export declare type VectorQuery<TModel extends object> = VectorizedQuery<TModel> | VectorizableTextQuery<TModel>;
|
|
7746
7270
|
|
|
7747
7271
|
export declare type VectorQueryKind = "vector" | "text";
|
|
7748
7272
|
|
|
@@ -7754,24 +7278,69 @@ export declare interface VectorSearch {
|
|
|
7754
7278
|
algorithms?: VectorSearchAlgorithmConfiguration[];
|
|
7755
7279
|
/** Contains configuration options on how to vectorize text vector queries. */
|
|
7756
7280
|
vectorizers?: VectorSearchVectorizer[];
|
|
7281
|
+
/**
|
|
7282
|
+
* Contains configuration options specific to the compression method used during indexing or
|
|
7283
|
+
* querying.
|
|
7284
|
+
*/
|
|
7285
|
+
compressions?: VectorSearchCompressionConfiguration[];
|
|
7757
7286
|
}
|
|
7758
7287
|
|
|
7759
7288
|
/** Contains configuration options specific to the algorithm used during indexing and/or querying. */
|
|
7760
|
-
export declare type VectorSearchAlgorithmConfiguration =
|
|
7289
|
+
export declare type VectorSearchAlgorithmConfiguration = HnswAlgorithmConfiguration | ExhaustiveKnnAlgorithmConfiguration;
|
|
7761
7290
|
|
|
7762
7291
|
export declare type VectorSearchAlgorithmKind = "hnsw" | "exhaustiveKnn";
|
|
7763
7292
|
|
|
7764
|
-
/** The similarity metric to use for vector comparisons. */
|
|
7765
7293
|
export declare type VectorSearchAlgorithmMetric = "cosine" | "euclidean" | "dotProduct";
|
|
7766
7294
|
|
|
7295
|
+
/**
|
|
7296
|
+
* Contains configuration options specific to the compression method used during indexing or
|
|
7297
|
+
* querying.
|
|
7298
|
+
*/
|
|
7299
|
+
export declare type VectorSearchCompressionConfiguration = ScalarQuantizationCompressionConfiguration;
|
|
7300
|
+
|
|
7301
|
+
/**
|
|
7302
|
+
* Defines values for VectorSearchCompressionKind. \
|
|
7303
|
+
* {@link KnownVectorSearchCompressionKind} can be used interchangeably with VectorSearchCompressionKind,
|
|
7304
|
+
* this enum contains the known values that the service supports.
|
|
7305
|
+
* ### Known values supported by the service
|
|
7306
|
+
* **scalarQuantization**: Scalar Quantization, a type of compression method. In scalar quantization, the original vectors values are compressed to a narrower type by discretizing and representing each component of a vector using a reduced set of quantized values, thereby reducing the overall data size.
|
|
7307
|
+
*/
|
|
7308
|
+
export declare type VectorSearchCompressionKind = string;
|
|
7309
|
+
|
|
7310
|
+
/**
|
|
7311
|
+
* Defines values for VectorSearchCompressionTargetDataType. \
|
|
7312
|
+
* {@link KnownVectorSearchCompressionTargetDataType} can be used interchangeably with VectorSearchCompressionTargetDataType,
|
|
7313
|
+
* this enum contains the known values that the service supports.
|
|
7314
|
+
* ### Known values supported by the service
|
|
7315
|
+
* **int8**
|
|
7316
|
+
*/
|
|
7317
|
+
export declare type VectorSearchCompressionTargetDataType = string;
|
|
7318
|
+
|
|
7319
|
+
/**
|
|
7320
|
+
* Defines options for vector search queries
|
|
7321
|
+
*/
|
|
7322
|
+
export declare interface VectorSearchOptions<TModel extends object> {
|
|
7323
|
+
/**
|
|
7324
|
+
* The query parameters for vector, hybrid, and multi-vector search queries.
|
|
7325
|
+
*/
|
|
7326
|
+
queries: VectorQuery<TModel>[];
|
|
7327
|
+
/**
|
|
7328
|
+
* Determines whether or not filters are applied before or after the vector search is performed.
|
|
7329
|
+
* Default is 'preFilter'.
|
|
7330
|
+
*/
|
|
7331
|
+
filterMode?: VectorFilterMode;
|
|
7332
|
+
}
|
|
7333
|
+
|
|
7767
7334
|
/** Defines a combination of configurations to use with vector search. */
|
|
7768
7335
|
export declare interface VectorSearchProfile {
|
|
7769
7336
|
/** The name to associate with this particular vector search profile. */
|
|
7770
7337
|
name: string;
|
|
7771
7338
|
/** The name of the vector search algorithm configuration that specifies the algorithm and optional parameters. */
|
|
7772
|
-
|
|
7339
|
+
algorithmConfigurationName: string;
|
|
7773
7340
|
/** The name of the kind of vectorization method being configured for use with vector search. */
|
|
7774
7341
|
vectorizer?: string;
|
|
7342
|
+
/** The name of the compression method configuration that specifies the compression method and optional parameters. */
|
|
7343
|
+
compressionConfigurationName?: string;
|
|
7775
7344
|
}
|
|
7776
7345
|
|
|
7777
7346
|
/** Contains configuration options on how to vectorize text vector queries. */
|
|
@@ -7779,20 +7348,7 @@ export declare type VectorSearchVectorizer = AzureOpenAIVectorizer | CustomVecto
|
|
|
7779
7348
|
|
|
7780
7349
|
export declare type VectorSearchVectorizerKind = "azureOpenAI" | "customWebApi";
|
|
7781
7350
|
|
|
7782
|
-
|
|
7783
|
-
* Defines values for VisualFeature. \
|
|
7784
|
-
* {@link KnownVisualFeature} can be used interchangeably with VisualFeature,
|
|
7785
|
-
* this enum contains the known values that the service supports.
|
|
7786
|
-
* ### Known values supported by the service
|
|
7787
|
-
* **adult**: Visual features recognized as adult persons. \
|
|
7788
|
-
* **brands**: Visual features recognized as commercial brands. \
|
|
7789
|
-
* **categories**: Categories. \
|
|
7790
|
-
* **description**: Description. \
|
|
7791
|
-
* **faces**: Visual features recognized as people faces. \
|
|
7792
|
-
* **objects**: Visual features recognized as objects. \
|
|
7793
|
-
* **tags**: Tags.
|
|
7794
|
-
*/
|
|
7795
|
-
export declare type VisualFeature = string;
|
|
7351
|
+
export declare type VisualFeature = "adult" | "brands" | "categories" | "description" | "faces" | "objects" | "tags";
|
|
7796
7352
|
|
|
7797
7353
|
/**
|
|
7798
7354
|
* A skill that can call a Web API endpoint, allowing you to extend a skillset by having it call
|
|
@@ -7848,7 +7404,7 @@ export declare interface WebApiSkill extends BaseSearchIndexerSkill {
|
|
|
7848
7404
|
}
|
|
7849
7405
|
|
|
7850
7406
|
/** Splits words into subwords and performs optional transformations on subword groups. This token filter is implemented using Apache Lucene. */
|
|
7851
|
-
export declare
|
|
7407
|
+
export declare interface WordDelimiterTokenFilter extends BaseTokenFilter {
|
|
7852
7408
|
/** Polymorphic discriminator, which specifies the different types this object can be */
|
|
7853
7409
|
odatatype: "#Microsoft.Azure.Search.WordDelimiterTokenFilter";
|
|
7854
7410
|
/** A value indicating whether to generate part words. If set, causes parts of words to be generated; for example "AzureSearch" becomes "Azure" "Search". Default is true. */
|
|
@@ -7871,6 +7427,6 @@ export declare type WordDelimiterTokenFilter = BaseTokenFilter & {
|
|
|
7871
7427
|
stemEnglishPossessive?: boolean;
|
|
7872
7428
|
/** A list of tokens to protect from being delimited. */
|
|
7873
7429
|
protectedWords?: string[];
|
|
7874
|
-
}
|
|
7430
|
+
}
|
|
7875
7431
|
|
|
7876
7432
|
export { }
|