@azure/search-documents 12.1.0-beta.2 → 12.2.0-beta.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/dist/index.js +405 -258
- package/dist/index.js.map +1 -1
- package/dist-esm/src/base64.browser.js +1 -1
- package/dist-esm/src/base64.browser.js.map +1 -1
- package/dist-esm/src/base64.js +1 -1
- package/dist-esm/src/base64.js.map +1 -1
- package/dist-esm/src/errorModels.js +1 -1
- package/dist-esm/src/errorModels.js.map +1 -1
- package/dist-esm/src/generated/data/models/index.js +17 -17
- package/dist-esm/src/generated/data/models/index.js.map +1 -1
- package/dist-esm/src/generated/data/models/mappers.js +153 -48
- package/dist-esm/src/generated/data/models/mappers.js.map +1 -1
- package/dist-esm/src/generated/data/searchClient.js +1 -1
- package/dist-esm/src/generated/data/searchClient.js.map +1 -1
- package/dist-esm/src/generated/service/models/index.js +43 -23
- package/dist-esm/src/generated/service/models/index.js.map +1 -1
- package/dist-esm/src/generated/service/models/mappers.js +89 -33
- package/dist-esm/src/generated/service/models/mappers.js.map +1 -1
- package/dist-esm/src/generated/service/searchServiceClient.js +1 -1
- package/dist-esm/src/generated/service/searchServiceClient.js.map +1 -1
- package/dist-esm/src/geographyPoint.js +1 -1
- package/dist-esm/src/geographyPoint.js.map +1 -1
- package/dist-esm/src/index.js +3 -3
- package/dist-esm/src/index.js.map +1 -1
- package/dist-esm/src/indexDocumentsBatch.js +1 -1
- package/dist-esm/src/indexDocumentsBatch.js.map +1 -1
- package/dist-esm/src/indexModels.js +1 -1
- package/dist-esm/src/indexModels.js.map +1 -1
- package/dist-esm/src/logger.js +1 -1
- package/dist-esm/src/logger.js.map +1 -1
- package/dist-esm/src/odata.js +1 -1
- package/dist-esm/src/odata.js.map +1 -1
- package/dist-esm/src/odataMetadataPolicy.js +1 -1
- package/dist-esm/src/odataMetadataPolicy.js.map +1 -1
- package/dist-esm/src/searchApiKeyCredentialPolicy.js +1 -1
- package/dist-esm/src/searchApiKeyCredentialPolicy.js.map +1 -1
- package/dist-esm/src/searchAudience.js +1 -1
- package/dist-esm/src/searchAudience.js.map +1 -1
- package/dist-esm/src/searchClient.js +1 -1
- package/dist-esm/src/searchClient.js.map +1 -1
- package/dist-esm/src/searchIndexClient.js +1 -1
- package/dist-esm/src/searchIndexClient.js.map +1 -1
- package/dist-esm/src/searchIndexerClient.js +1 -1
- package/dist-esm/src/searchIndexerClient.js.map +1 -1
- package/dist-esm/src/searchIndexingBufferedSender.js +1 -1
- package/dist-esm/src/searchIndexingBufferedSender.js.map +1 -1
- package/dist-esm/src/serialization.js +1 -1
- package/dist-esm/src/serialization.js.map +1 -1
- package/dist-esm/src/serviceModels.js +1 -1
- package/dist-esm/src/serviceModels.js.map +1 -1
- package/dist-esm/src/serviceUtils.js +58 -98
- package/dist-esm/src/serviceUtils.js.map +1 -1
- package/dist-esm/src/synonymMapHelper.browser.js +1 -1
- package/dist-esm/src/synonymMapHelper.browser.js.map +1 -1
- package/dist-esm/src/synonymMapHelper.js +1 -1
- package/dist-esm/src/synonymMapHelper.js.map +1 -1
- package/dist-esm/src/tracing.js +1 -1
- package/dist-esm/src/tracing.js.map +1 -1
- package/dist-esm/src/walk.js +1 -1
- package/dist-esm/src/walk.js.map +1 -1
- package/package.json +9 -8
- package/types/search-documents.d.ts +380 -267
- package/dist-esm/src/generatedStringLiteralUnions.js +0 -4
- package/dist-esm/src/generatedStringLiteralUnions.js.map +0 -1
|
@@ -5,12 +5,12 @@
|
|
|
5
5
|
* Code generated by Microsoft (R) AutoRest Code Generator.
|
|
6
6
|
* Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
|
7
7
|
*/
|
|
8
|
-
/** Known values of {@link
|
|
9
|
-
export var
|
|
10
|
-
(function (
|
|
11
|
-
/** Api Version '2024-
|
|
12
|
-
|
|
13
|
-
})(
|
|
8
|
+
/** Known values of {@link ApiVersion20240901Preview} that the service accepts. */
|
|
9
|
+
export var KnownApiVersion20240901Preview;
|
|
10
|
+
(function (KnownApiVersion20240901Preview) {
|
|
11
|
+
/** Api Version '2024-09-01-preview' */
|
|
12
|
+
KnownApiVersion20240901Preview["TwoThousandTwentyFour0901Preview"] = "2024-09-01-preview";
|
|
13
|
+
})(KnownApiVersion20240901Preview || (KnownApiVersion20240901Preview = {}));
|
|
14
14
|
/** Known values of {@link SearchIndexerDataSourceType} that the service accepts. */
|
|
15
15
|
export var KnownSearchIndexerDataSourceType;
|
|
16
16
|
(function (KnownSearchIndexerDataSourceType) {
|
|
@@ -310,7 +310,7 @@ export var KnownLexicalAnalyzerName;
|
|
|
310
310
|
KnownLexicalAnalyzerName["ViMicrosoft"] = "vi.microsoft";
|
|
311
311
|
/** Standard Lucene analyzer. */
|
|
312
312
|
KnownLexicalAnalyzerName["StandardLucene"] = "standard.lucene";
|
|
313
|
-
/** Standard ASCII Folding Lucene analyzer. See https:\//
|
|
313
|
+
/** Standard ASCII Folding Lucene analyzer. See https:\//learn.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#Analyzers */
|
|
314
314
|
KnownLexicalAnalyzerName["StandardAsciiFoldingLucene"] = "standardasciifolding.lucene";
|
|
315
315
|
/** Treats the entire content of a field as a single token. This is useful for data like zip codes, ids, and some product names. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/KeywordAnalyzer.html */
|
|
316
316
|
KnownLexicalAnalyzerName["Keyword"] = "keyword";
|
|
@@ -368,6 +368,8 @@ export var KnownVectorSearchCompressionKind;
|
|
|
368
368
|
(function (KnownVectorSearchCompressionKind) {
|
|
369
369
|
/** Scalar Quantization, a type of compression method. In scalar quantization, the original vectors values are compressed to a narrower type by discretizing and representing each component of a vector using a reduced set of quantized values, thereby reducing the overall data size. */
|
|
370
370
|
KnownVectorSearchCompressionKind["ScalarQuantization"] = "scalarQuantization";
|
|
371
|
+
/** Binary Quantization, a type of compression method. In binary quantization, the original vectors values are compressed to the narrower binary type by discretizing and representing each component of a vector using binary values, thereby reducing the overall data size. */
|
|
372
|
+
KnownVectorSearchCompressionKind["BinaryQuantization"] = "binaryQuantization";
|
|
371
373
|
})(KnownVectorSearchCompressionKind || (KnownVectorSearchCompressionKind = {}));
|
|
372
374
|
/** Known values of {@link TokenFilterName} that the service accepts. */
|
|
373
375
|
export var KnownTokenFilterName;
|
|
@@ -426,7 +428,7 @@ export var KnownTokenFilterName;
|
|
|
426
428
|
KnownTokenFilterName["Snowball"] = "snowball";
|
|
427
429
|
/** Normalizes the Unicode representation of Sorani text. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/ckb\/SoraniNormalizationFilter.html */
|
|
428
430
|
KnownTokenFilterName["SoraniNormalization"] = "sorani_normalization";
|
|
429
|
-
/** Language specific stemming filter. See https:\//
|
|
431
|
+
/** Language specific stemming filter. See https:\//learn.microsoft.com\/rest\/api\/searchservice\/Custom-analyzers-in-Azure-Search#TokenFilters */
|
|
430
432
|
KnownTokenFilterName["Stemmer"] = "stemmer";
|
|
431
433
|
/** Removes stop words from a token stream. See http:\//lucene.apache.org\/core\/4_10_3\/analyzers-common\/org\/apache\/lucene\/analysis\/core\/StopFilter.html */
|
|
432
434
|
KnownTokenFilterName["Stopwords"] = "stopwords";
|
|
@@ -459,12 +461,12 @@ export var KnownVectorSearchAlgorithmMetric;
|
|
|
459
461
|
/** Only applicable to bit-packed binary data types. Determines dissimilarity by counting differing positions in binary vectors. The fewer differences, the closer the similarity. */
|
|
460
462
|
KnownVectorSearchAlgorithmMetric["Hamming"] = "hamming";
|
|
461
463
|
})(KnownVectorSearchAlgorithmMetric || (KnownVectorSearchAlgorithmMetric = {}));
|
|
462
|
-
/** Known values of {@link
|
|
463
|
-
export var
|
|
464
|
-
(function (
|
|
464
|
+
/** Known values of {@link VectorSearchCompressionTarget} that the service accepts. */
|
|
465
|
+
export var KnownVectorSearchCompressionTarget;
|
|
466
|
+
(function (KnownVectorSearchCompressionTarget) {
|
|
465
467
|
/** Int8 */
|
|
466
|
-
|
|
467
|
-
})(
|
|
468
|
+
KnownVectorSearchCompressionTarget["Int8"] = "int8";
|
|
469
|
+
})(KnownVectorSearchCompressionTarget || (KnownVectorSearchCompressionTarget = {}));
|
|
468
470
|
/** Known values of {@link AzureOpenAIModelName} that the service accepts. */
|
|
469
471
|
export var KnownAzureOpenAIModelName;
|
|
470
472
|
(function (KnownAzureOpenAIModelName) {
|
|
@@ -474,8 +476,6 @@ export var KnownAzureOpenAIModelName;
|
|
|
474
476
|
KnownAzureOpenAIModelName["TextEmbedding3Large"] = "text-embedding-3-large";
|
|
475
477
|
/** TextEmbedding3Small */
|
|
476
478
|
KnownAzureOpenAIModelName["TextEmbedding3Small"] = "text-embedding-3-small";
|
|
477
|
-
/** Experimental */
|
|
478
|
-
KnownAzureOpenAIModelName["Experimental"] = "experimental";
|
|
479
479
|
})(KnownAzureOpenAIModelName || (KnownAzureOpenAIModelName = {}));
|
|
480
480
|
/** Known values of {@link AIStudioModelCatalogName} that the service accepts. */
|
|
481
481
|
export var KnownAIStudioModelCatalogName;
|
|
@@ -873,18 +873,18 @@ export var KnownOcrSkillLanguage;
|
|
|
873
873
|
/** Unknown (All) */
|
|
874
874
|
KnownOcrSkillLanguage["Unk"] = "unk";
|
|
875
875
|
})(KnownOcrSkillLanguage || (KnownOcrSkillLanguage = {}));
|
|
876
|
-
/** Known values of {@link
|
|
877
|
-
export var
|
|
878
|
-
(function (
|
|
876
|
+
/** Known values of {@link OcrLineEnding} that the service accepts. */
|
|
877
|
+
export var KnownOcrLineEnding;
|
|
878
|
+
(function (KnownOcrLineEnding) {
|
|
879
879
|
/** Lines are separated by a single space character. */
|
|
880
|
-
|
|
880
|
+
KnownOcrLineEnding["Space"] = "space";
|
|
881
881
|
/** Lines are separated by a carriage return ('\r') character. */
|
|
882
|
-
|
|
882
|
+
KnownOcrLineEnding["CarriageReturn"] = "carriageReturn";
|
|
883
883
|
/** Lines are separated by a single line feed ('\n') character. */
|
|
884
|
-
|
|
884
|
+
KnownOcrLineEnding["LineFeed"] = "lineFeed";
|
|
885
885
|
/** Lines are separated by a carriage return and a line feed ('\r\n') character. */
|
|
886
|
-
|
|
887
|
-
})(
|
|
886
|
+
KnownOcrLineEnding["CarriageReturnLineFeed"] = "carriageReturnLineFeed";
|
|
887
|
+
})(KnownOcrLineEnding || (KnownOcrLineEnding = {}));
|
|
888
888
|
/** Known values of {@link ImageAnalysisSkillLanguage} that the service accepts. */
|
|
889
889
|
export var KnownImageAnalysisSkillLanguage;
|
|
890
890
|
(function (KnownImageAnalysisSkillLanguage) {
|
|
@@ -1207,6 +1207,26 @@ export var KnownTextSplitMode;
|
|
|
1207
1207
|
/** Split the text into individual sentences. */
|
|
1208
1208
|
KnownTextSplitMode["Sentences"] = "sentences";
|
|
1209
1209
|
})(KnownTextSplitMode || (KnownTextSplitMode = {}));
|
|
1210
|
+
/** Known values of {@link SplitSkillUnit} that the service accepts. */
|
|
1211
|
+
export var KnownSplitSkillUnit;
|
|
1212
|
+
(function (KnownSplitSkillUnit) {
|
|
1213
|
+
/** The length will be measured by character. */
|
|
1214
|
+
KnownSplitSkillUnit["Characters"] = "characters";
|
|
1215
|
+
/** The length will be measured by an AzureOpenAI tokenizer from the tiktoken library. */
|
|
1216
|
+
KnownSplitSkillUnit["AzureOpenAITokens"] = "azureOpenAITokens";
|
|
1217
|
+
})(KnownSplitSkillUnit || (KnownSplitSkillUnit = {}));
|
|
1218
|
+
/** Known values of {@link SplitSkillEncoderModelName} that the service accepts. */
|
|
1219
|
+
export var KnownSplitSkillEncoderModelName;
|
|
1220
|
+
(function (KnownSplitSkillEncoderModelName) {
|
|
1221
|
+
/** Refers to a base model trained with a 50,000 token vocabulary, often used in general natural language processing tasks. */
|
|
1222
|
+
KnownSplitSkillEncoderModelName["R50KBase"] = "r50k_base";
|
|
1223
|
+
/** A base model with a 50,000 token vocabulary, optimized for prompt-based tasks. */
|
|
1224
|
+
KnownSplitSkillEncoderModelName["P50KBase"] = "p50k_base";
|
|
1225
|
+
/** Similar to p50k_base but fine-tuned for editing or rephrasing tasks with a 50,000 token vocabulary. */
|
|
1226
|
+
KnownSplitSkillEncoderModelName["P50KEdit"] = "p50k_edit";
|
|
1227
|
+
/** A base model with a 100,000 token vocabulary. */
|
|
1228
|
+
KnownSplitSkillEncoderModelName["CL100KBase"] = "cl100k_base";
|
|
1229
|
+
})(KnownSplitSkillEncoderModelName || (KnownSplitSkillEncoderModelName = {}));
|
|
1210
1230
|
/** Known values of {@link CustomEntityLookupSkillLanguage} that the service accepts. */
|
|
1211
1231
|
export var KnownCustomEntityLookupSkillLanguage;
|
|
1212
1232
|
(function (KnownCustomEntityLookupSkillLanguage) {
|