@azure/ai-language-text 1.1.0-beta.1 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +35 -632
- package/dist/index.js +363 -1256
- package/dist/index.js.map +1 -1
- package/dist-esm/src/constants.js +1 -1
- package/dist-esm/src/constants.js.map +1 -1
- package/dist-esm/src/generated/generatedClient.js +3 -3
- package/dist-esm/src/generated/generatedClient.js.map +1 -1
- package/dist-esm/src/generated/models/index.js +48 -438
- package/dist-esm/src/generated/models/index.js.map +1 -1
- package/dist-esm/src/generated/models/mappers.js +153 -741
- package/dist-esm/src/generated/models/mappers.js.map +1 -1
- package/dist-esm/src/generated/models/parameters.js +1 -1
- package/dist-esm/src/generated/models/parameters.js.map +1 -1
- package/dist-esm/src/index.js +1 -1
- package/dist-esm/src/index.js.map +1 -1
- package/dist-esm/src/lro.js +24 -8
- package/dist-esm/src/lro.js.map +1 -1
- package/dist-esm/src/models.js +0 -9
- package/dist-esm/src/models.js.map +1 -1
- package/dist-esm/src/textAnalysisClient.js +2 -2
- package/dist-esm/src/textAnalysisClient.js.map +1 -1
- package/dist-esm/src/transforms.js +114 -22
- package/dist-esm/src/transforms.js.map +1 -1
- package/dist-esm/src/util.js +15 -0
- package/dist-esm/src/util.js.map +1 -1
- package/package.json +9 -6
- package/types/ai-language-text.d.ts +86 -982
@@ -1 +1 @@
|
|
1
|
-
{"version":3,"file":"parameters.js","sourceRoot":"","sources":["../../../../src/generated/models/parameters.ts"],"names":[],"mappings":"AAAA;;;;;;GAMG;AAOH,OAAO,EACL,aAAa,IAAI,mBAAmB,EACpC,oBAAoB,IAAI,0BAA0B,EACnD,MAAM,mBAAmB,CAAC;AAE3B,MAAM,CAAC,MAAM,WAAW,GAAuB;IAC7C,aAAa,EAAE,CAAC,SAAS,EAAE,aAAa,CAAC;IACzC,MAAM,EAAE;QACN,YAAY,EAAE,kBAAkB;QAChC,UAAU,EAAE,IAAI;QAChB,cAAc,EAAE,cAAc;QAC9B,IAAI,EAAE;YACJ,IAAI,EAAE,QAAQ;SACf;KACF;CACF,CAAC;AAEF,MAAM,CAAC,MAAM,IAAI,GAAuB;IACtC,aAAa,EAAE,MAAM;IACrB,MAAM,EAAE,mBAAmB;CAC5B,CAAC;AAEF,MAAM,CAAC,MAAM,MAAM,GAAuB;IACxC,aAAa,EAAE,QAAQ;IACvB,MAAM,EAAE;QACN,YAAY,EAAE,kBAAkB;QAChC,UAAU,EAAE,IAAI;QAChB,cAAc,EAAE,QAAQ;QACxB,IAAI,EAAE;YACJ,IAAI,EAAE,QAAQ;SACf;KACF;CACF,CAAC;AAEF,MAAM,CAAC,MAAM,QAAQ,GAA0B;IAC7C,aAAa,EAAE,UAAU;IACzB,MAAM,EAAE;QACN,cAAc,EAAE,UAAU;QAC1B,QAAQ,EAAE,IAAI;QACd,IAAI,EAAE;YACJ,IAAI,EAAE,QAAQ;SACf;KACF;IACD,YAAY,EAAE,IAAI;CACnB,CAAC;AAEF,MAAM,CAAC,MAAM,UAAU,GAA4B;IACjD,aAAa,EAAE,YAAY;IAC3B,MAAM,EAAE;QACN,YAAY,EAAE,
|
1
|
+
{"version":3,"file":"parameters.js","sourceRoot":"","sources":["../../../../src/generated/models/parameters.ts"],"names":[],"mappings":"AAAA;;;;;;GAMG;AAOH,OAAO,EACL,aAAa,IAAI,mBAAmB,EACpC,oBAAoB,IAAI,0BAA0B,EACnD,MAAM,mBAAmB,CAAC;AAE3B,MAAM,CAAC,MAAM,WAAW,GAAuB;IAC7C,aAAa,EAAE,CAAC,SAAS,EAAE,aAAa,CAAC;IACzC,MAAM,EAAE;QACN,YAAY,EAAE,kBAAkB;QAChC,UAAU,EAAE,IAAI;QAChB,cAAc,EAAE,cAAc;QAC9B,IAAI,EAAE;YACJ,IAAI,EAAE,QAAQ;SACf;KACF;CACF,CAAC;AAEF,MAAM,CAAC,MAAM,IAAI,GAAuB;IACtC,aAAa,EAAE,MAAM;IACrB,MAAM,EAAE,mBAAmB;CAC5B,CAAC;AAEF,MAAM,CAAC,MAAM,MAAM,GAAuB;IACxC,aAAa,EAAE,QAAQ;IACvB,MAAM,EAAE;QACN,YAAY,EAAE,kBAAkB;QAChC,UAAU,EAAE,IAAI;QAChB,cAAc,EAAE,QAAQ;QACxB,IAAI,EAAE;YACJ,IAAI,EAAE,QAAQ;SACf;KACF;CACF,CAAC;AAEF,MAAM,CAAC,MAAM,QAAQ,GAA0B;IAC7C,aAAa,EAAE,UAAU;IACzB,MAAM,EAAE;QACN,cAAc,EAAE,UAAU;QAC1B,QAAQ,EAAE,IAAI;QACd,IAAI,EAAE;YACJ,IAAI,EAAE,QAAQ;SACf;KACF;IACD,YAAY,EAAE,IAAI;CACnB,CAAC;AAEF,MAAM,CAAC,MAAM,UAAU,GAA4B;IACjD,aAAa,EAAE,YAAY;IAC3B,MAAM,EAAE;QACN,YAAY,EAAE,YAAY;QAC1B,UAAU,EAAE,IAAI;QAChB,cAAc,EAAE,aAAa;QAC7B,IAAI,EAAE;YACJ,IAAI,EAAE,QAAQ;SACf;KACF;CACF,CAAC;AAEF,MAAM,CAAC,MAAM,iBAAiB,GAA4B;IACxD,aAAa,EAAE,CAAC,SAAS,EAAE,mBAAmB,CAAC;IAC/C,MAAM,EAAE;QACN,cAAc,EAAE,WAAW;QAC3B,IAAI,EAAE;YACJ,IAAI,EAAE,SAAS;SAChB;KACF;CACF,CAAC;AAEF,MAAM,CAAC,MAAM,KAAK,GAAuB;IACvC,aAAa,EAAE,MAAM;IACrB,MAAM,EAAE,0BAA0B;CACnC,CAAC;AAEF,MAAM,CAAC,MAAM,KAAK,GAA0B;IAC1C,aAAa,EAAE,OAAO;IACtB,MAAM,EAAE;QACN,cAAc,EAAE,OAAO;QACvB,QAAQ,EAAE,IAAI;QACd,IAAI,EAAE;YACJ,IAAI,EAAE,MAAM;SACb;KACF;CACF,CAAC;AAEF,MAAM,CAAC,MAAM,GAAG,GAA4B;IAC1C,aAAa,EAAE,CAAC,SAAS,EAAE,KAAK,CAAC;IACjC,MAAM,EAAE;QACN,cAAc,EAAE,KAAK;QACrB,IAAI,EAAE;YACJ,IAAI,EAAE,QAAQ;SACf;KACF;CACF,CAAC;AAEF,MAAM,CAAC,MAAM,IAAI,GAA4B;IAC3C,aAAa,EAAE,CAAC,SAAS,EAAE,MAAM,CAAC;IAClC,MAAM,EAAE;QACN,cAAc,EAAE,MAAM;QACtB,IAAI,EAAE;YACJ,IAAI,EAAE,QAAQ;SACf;KACF;CACF,CAAC","sourcesContent":["/*\n * Copyright (c) Microsoft Corporation.\n * Licensed under the MIT License.\n *\n * Code generated by Microsoft (R) AutoRest Code Generator.\n * Changes may cause incorrect behavior and will be lost if the code is regenerated.\n */\n\nimport {\n OperationParameter,\n OperationURLParameter,\n OperationQueryParameter\n} from \"@azure/core-client\";\nimport {\n AnalyzeAction as AnalyzeActionMapper,\n AnalyzeTextJobsInput as AnalyzeTextJobsInputMapper\n} from \"../models/mappers\";\n\nexport const contentType: OperationParameter = {\n parameterPath: [\"options\", \"contentType\"],\n mapper: {\n defaultValue: \"application/json\",\n isConstant: true,\n serializedName: \"Content-Type\",\n type: {\n name: \"String\"\n }\n }\n};\n\nexport const body: OperationParameter = {\n parameterPath: \"body\",\n mapper: AnalyzeActionMapper\n};\n\nexport const accept: OperationParameter = {\n parameterPath: \"accept\",\n mapper: {\n defaultValue: \"application/json\",\n isConstant: true,\n serializedName: \"Accept\",\n type: {\n name: \"String\"\n }\n }\n};\n\nexport const endpoint: OperationURLParameter = {\n parameterPath: \"endpoint\",\n mapper: {\n serializedName: \"Endpoint\",\n required: true,\n type: {\n name: \"String\"\n }\n },\n skipEncoding: true\n};\n\nexport const apiVersion: OperationQueryParameter = {\n parameterPath: \"apiVersion\",\n mapper: {\n defaultValue: \"2023-04-01\",\n isConstant: true,\n serializedName: \"api-version\",\n type: {\n name: \"String\"\n }\n }\n};\n\nexport const includeStatistics: OperationQueryParameter = {\n parameterPath: [\"options\", \"includeStatistics\"],\n mapper: {\n serializedName: \"showStats\",\n type: {\n name: \"Boolean\"\n }\n }\n};\n\nexport const body1: OperationParameter = {\n parameterPath: \"body\",\n mapper: AnalyzeTextJobsInputMapper\n};\n\nexport const jobId: OperationURLParameter = {\n parameterPath: \"jobId\",\n mapper: {\n serializedName: \"jobId\",\n required: true,\n type: {\n name: \"Uuid\"\n }\n }\n};\n\nexport const top: OperationQueryParameter = {\n parameterPath: [\"options\", \"top\"],\n mapper: {\n serializedName: \"top\",\n type: {\n name: \"Number\"\n }\n }\n};\n\nexport const skip: OperationQueryParameter = {\n parameterPath: [\"options\", \"skip\"],\n mapper: {\n serializedName: \"skip\",\n type: {\n name: \"Number\"\n }\n }\n};\n"]}
|
package/dist-esm/src/index.js
CHANGED
@@ -14,5 +14,5 @@ export { TextAnalysisClient } from "./textAnalysisClient";
|
|
14
14
|
export * from "./models";
|
15
15
|
export {
|
16
16
|
/** orphan exports */
|
17
|
-
KnownPiiEntityDomain, KnownPiiEntityCategory, KnownStringIndexType, KnownErrorCode, KnownInnerErrorCode,
|
17
|
+
KnownPiiEntityDomain, KnownPiiEntityCategory, KnownStringIndexType, KnownErrorCode, KnownInnerErrorCode, KnownRelationType, KnownExtractiveSummarizationOrderingCriteria, KnownHealthcareEntityCategory, } from "./generated/models";
|
18
18
|
//# sourceMappingURL=index.js.map
|
@@ -1 +1 @@
|
|
1
|
-
{"version":3,"file":"index.js","sourceRoot":"","sources":["../../src/index.ts"],"names":[],"mappings":"AAAA,uCAAuC;AACvC,kCAAkC;AAElC;;;;;;;;GAQG;AAEH,OAAO,EAAE,kBAAkB,EAAE,MAAM,kBAAkB,CAAC;AAEtD,OAAO,EAAE,kBAAkB,EAAE,MAAM,sBAAsB,CAAC;AAC1D,cAAc,UAAU,CAAC;AACzB,OAAO;
|
1
|
+
{"version":3,"file":"index.js","sourceRoot":"","sources":["../../src/index.ts"],"names":[],"mappings":"AAAA,uCAAuC;AACvC,kCAAkC;AAElC;;;;;;;;GAQG;AAEH,OAAO,EAAE,kBAAkB,EAAE,MAAM,kBAAkB,CAAC;AAEtD,OAAO,EAAE,kBAAkB,EAAE,MAAM,sBAAsB,CAAC;AAC1D,cAAc,UAAU,CAAC;AACzB,OAAO;AA6CL,qBAAqB;AACrB,oBAAoB,EACpB,sBAAsB,EACtB,oBAAoB,EACpB,cAAc,EACd,mBAAmB,EAEnB,iBAAiB,EACjB,4CAA4C,EAC5C,6BAA6B,GAC9B,MAAM,oBAAoB,CAAC","sourcesContent":["// Copyright (c) Microsoft Corporation.\n// Licensed under the MIT license.\n\n/**\n * [Azure Cognitive Language Services](https://docs.microsoft.com/azure/cognitive-services/language-service/overview)\n * is a suite of natural language processing (NLP) skills built with\n * best-in-class Microsoft machine learning algorithms used to analyze\n * unstructured text for actions such as sentiment analysis, key phrase\n * extraction, and language detection.\n *\n * @packageDocumentation\n */\n\nexport { AzureKeyCredential } from \"@azure/core-auth\";\n\nexport { TextAnalysisClient } from \"./textAnalysisClient\";\nexport * from \"./models\";\nexport {\n AssessmentSentiment,\n SentimentAnalysisAction,\n EntityLinkingAction,\n EntityRecognitionAction,\n KeyPhraseExtractionAction,\n LanguageDetectionAction,\n PiiEntityRecognitionAction,\n StringIndexType,\n LinkedEntity,\n Entity,\n DetectedLanguage,\n PiiEntityCategory,\n PiiEntityDomain,\n SentimentConfidenceScores,\n SentenceSentimentLabel,\n DocumentSentimentLabel,\n TargetConfidenceScores,\n TokenSentimentLabel,\n LanguageDetectionInput,\n TextDocumentInput,\n TextDocumentStatistics,\n DocumentWarning,\n WarningCode,\n Match,\n ActionCommon,\n ActionPrebuilt,\n HealthcareAction,\n CustomEntityRecognitionAction,\n CustomSingleLabelClassificationAction,\n CustomMultiLabelClassificationAction,\n ActionCustom,\n ClassificationCategory,\n HealthcareAssertion,\n HealthcareEntityCategory,\n EntityDataSource,\n RelationType,\n EntityAssociation,\n EntityCertainty,\n EntityConditionality,\n SummaryContext,\n AbstractiveSummary,\n ExtractiveSummarizationAction,\n SummarySentence,\n ExtractiveSummarizationOrderingCriteria,\n /** orphan exports */\n KnownPiiEntityDomain,\n KnownPiiEntityCategory,\n KnownStringIndexType,\n KnownErrorCode,\n KnownInnerErrorCode,\n TextDocumentBatchStatistics,\n KnownRelationType,\n KnownExtractiveSummarizationOrderingCriteria,\n KnownHealthcareEntityCategory,\n} from \"./generated/models\";\n"]}
|
package/dist-esm/src/lro.js
CHANGED
@@ -22,13 +22,24 @@ const jobStatusOperationSpec = {
|
|
22
22
|
queryParameters: [Parameters.top, Parameters.skip, Parameters.includeStatistics],
|
23
23
|
serializer,
|
24
24
|
};
|
25
|
+
function addOnResponse(options, cb) {
|
26
|
+
return Object.assign(Object.assign({}, options), { onResponse: (rawResponse, response, error) => {
|
27
|
+
var _a;
|
28
|
+
cb(rawResponse, response, error);
|
29
|
+
(_a = options.onResponse) === null || _a === void 0 ? void 0 : _a.call(options, rawResponse, response, error);
|
30
|
+
} });
|
31
|
+
}
|
32
|
+
function logWarnHeader(rawResponse) {
|
33
|
+
const warnHeader = rawResponse.headers.get("warn-text");
|
34
|
+
if (warnHeader) {
|
35
|
+
warnHeader.split(";").map((x) => logger.warning(x));
|
36
|
+
}
|
37
|
+
}
|
25
38
|
async function getRawResponse(getResponse, options) {
|
26
|
-
const { onResponse } = options || {};
|
27
39
|
let rawResponse;
|
28
|
-
const flatResponse = await getResponse(
|
29
|
-
|
30
|
-
|
31
|
-
} }));
|
40
|
+
const flatResponse = await getResponse(addOnResponse(options, (response) => {
|
41
|
+
rawResponse = response;
|
42
|
+
}));
|
32
43
|
return {
|
33
44
|
flatResponse,
|
34
45
|
rawResponse: {
|
@@ -51,7 +62,12 @@ function createSendPollRequest(settings) {
|
|
51
62
|
return async (path) => {
|
52
63
|
return throwError(sendRequest({
|
53
64
|
client,
|
54
|
-
opOptions: options,
|
65
|
+
opOptions: addOnResponse(options, (_, response) => {
|
66
|
+
const castResponse = response;
|
67
|
+
if (castResponse.status.toLowerCase() === "partiallysucceeded") {
|
68
|
+
castResponse.status = "succeeded";
|
69
|
+
}
|
70
|
+
}),
|
55
71
|
path,
|
56
72
|
spanStr,
|
57
73
|
spec: jobStatusOperationSpec,
|
@@ -66,7 +82,7 @@ export function createAnalyzeBatchLro(settings) {
|
|
66
82
|
const { client, commonOptions, documents, initialRequestOptions, pollRequestOptions, tasks, tracing, } = settings;
|
67
83
|
return {
|
68
84
|
async sendInitialRequest() {
|
69
|
-
return tracing.withSpan(`${clientName}.beginAnalyzeBatch`, Object.assign(Object.assign({}, commonOptions), initialRequestOptions), async (finalOptions) => throwError(getRawResponse((paramOptions) => client.analyzeBatch({
|
85
|
+
return tracing.withSpan(`${clientName}.beginAnalyzeBatch`, addOnResponse(Object.assign(Object.assign({}, commonOptions), initialRequestOptions), logWarnHeader), async (finalOptions) => throwError(getRawResponse((paramOptions) => client.analyzeBatch({
|
70
86
|
tasks,
|
71
87
|
analysisInput: {
|
72
88
|
documents,
|
@@ -134,7 +150,7 @@ export function processAnalyzeResult(options) {
|
|
134
150
|
});
|
135
151
|
const flatResponse = response.flatResponse;
|
136
152
|
return {
|
137
|
-
page: transformAnalyzeBatchResults(docIds, flatResponse.tasks.items),
|
153
|
+
page: transformAnalyzeBatchResults(docIds, flatResponse.tasks.items, flatResponse.errors),
|
138
154
|
nextPageLink: flatResponse.nextLink,
|
139
155
|
};
|
140
156
|
},
|
package/dist-esm/src/lro.js.map
CHANGED
@@ -1 +1 @@
|
|
1
|
-
{"version":3,"file":"lro.js","sourceRoot":"","sources":["../../src/lro.ts"],"names":[],"mappings":"AAAA,uCAAuC;AACvC,kCAAkC;AAElC,OAAO,KAAK,OAAO,MAAM,4BAA4B,CAAC;AACtD,OAAO,KAAK,UAAU,MAAM,+BAA+B,CAAC;AAc5D,OAAO,EAIL,gBAAgB,GACjB,MAAM,oBAAoB,CAAC;AAE5B,OAAO,EAAe,qBAAqB,EAAE,MAAM,oBAAoB,CAAC;AACxE,OAAO,EAAE,UAAU,EAAE,4BAA4B,EAAE,MAAM,cAAc,CAAC;AAGxE,OAAO,EAAE,UAAU,EAAE,MAAM,aAAa,CAAC;AACzC,OAAO,EAAE,MAAM,EAAE,MAAM,UAAU,CAAC;AAElC,MAAM,UAAU,GAAG,gBAAgB,CAAC,OAAO,EAAE,WAAW,CAAC,KAAK,CAAC,CAAC;AAEhE,MAAM,sBAAsB,GAAkB;IAC5C,UAAU,EAAE,KAAK;IACjB,SAAS,EAAE;QACT,GAAG,EAAE;YACH,UAAU,EAAE,OAAO,CAAC,mBAAmB;SACxC;QACD,OAAO,EAAE;YACP,UAAU,EAAE,OAAO,CAAC,aAAa;SAClC;KACF;IACD,gBAAgB,EAAE,CAAC,UAAU,CAAC,MAAM,CAAC;IACrC,eAAe,EAAE,CAAC,UAAU,CAAC,GAAG,EAAE,UAAU,CAAC,IAAI,EAAE,UAAU,CAAC,iBAAiB,CAAC;IAChF,UAAU;CACX,CAAC;AAEF,KAAK,UAAU,cAAc,CAC3B,WAAsD,EACtD,OAAiB;IAEjB,MAAM,EAAE,UAAU,EAAE,GAAG,OAAO,IAAI,EAAE,CAAC;IACrC,IAAI,WAAkC,CAAC;IACvC,MAAM,YAAY,GAAG,MAAM,WAAW,iCACjC,OAAO,KACV,UAAU,EAAE,CAAC,QAAQ,EAAE,iBAAiB,EAAE,EAAE;YAC1C,WAAW,GAAG,QAAQ,CAAC;YACvB,UAAU,aAAV,UAAU,uBAAV,UAAU,CAAG,QAAQ,EAAE,iBAAiB,CAAC,CAAC;QAC5C,CAAC,IACD,CAAC;IACH,OAAO;QACL,YAAY;QACZ,WAAW,EAAE;YACX,UAAU,EAAE,WAAY,CAAC,MAAM;YAC/B,OAAO,EAAE,WAAY,CAAC,OAAO,CAAC,MAAM,EAAE;YACtC,IAAI,EAAE,WAAY,CAAC,UAAU;SAC9B;KACF,CAAC;AACJ,CAAC;AAED,KAAK,UAAU,WAAW,CAAoC,QAQ7D;IACC,MAAM,EAAE,MAAM,EAAE,SAAS,EAAE,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,OAAO,EAAE,UAAU,GAAG,KAAK,EAAE,GAAG,QAAQ,CAAC;IACzF,OAAO,OAAO,CAAC,QAAQ,CAAC,OAAO,EAAE,SAAS,EAAE,KAAK,EAAE,YAAsB,EAAE,EAAE,CAC3E,UAAU,CACR,cAAc,CACZ,CAAC,OAAO,EAAE,EAAE,CACV,MAAM,CAAC,oBAAoB,CACzB,EAAE,OAAO,EAAE,kCAEN,IAAI,KACP,IAAI;QACJ,UAAU,IAEb,EACH,YAAY,CACb,CACF,CACF,CAAC;AACJ,CAAC;AAED;;GAEG;AACH,SAAS,qBAAqB,CAAoC,QAKjE;IACC,MAAM,EAAE,MAAM,EAAE,OAAO,EAAE,OAAO,EAAE,OAAO,EAAE,GAAG,QAAQ,CAAC;IACvD,OAAO,KAAK,EAAE,IAAY,EAAiC,EAAE;QAC3D,OAAO,UAAU,CACf,WAAW,CAAC;YACV,MAAM;YACN,SAAS,EAAE,OAAO;YAClB,IAAI;YACJ,OAAO;YACP,IAAI,EAAE,sBAAsB;YAC5B,OAAO;SACR,CAAC,CACH,CAAC;IACJ,CAAC,CAAC;AACJ,CAAC;AAED;;GAEG;AACH,MAAM,UAAU,qBAAqB,CAAC,QAYrC;IACC,MAAM,EACJ,MAAM,EACN,aAAa,EACb,SAAS,EACT,qBAAqB,EACrB,kBAAkB,EAClB,KAAK,EACL,OAAO,GACR,GAAG,QAAQ,CAAC;IACb,OAAO;QACL,KAAK,CAAC,kBAAkB;YACtB,OAAO,OAAO,CAAC,QAAQ,CACrB,GAAG,UAAU,oBAAoB,kCAE5B,aAAa,GACb,qBAAqB,GAE1B,KAAK,EAAE,YAAY,EAAE,EAAE,CACrB,UAAU,CACR,cAAc,CACZ,CAAC,YAAY,EAAE,EAAE,CACf,MAAM,CAAC,YAAY,CACjB;gBACE,KAAK;gBACL,aAAa,EAAE;oBACb,SAAS;iBACV;gBACD,WAAW,EAAE,qBAAqB,CAAC,WAAW;aAC/C,EACD,YAAY,CACb,EACH,YAAY,CACb,CACF,CACJ,CAAC;QACJ,CAAC;QACD,eAAe,EAAE,qBAAqB,CAAC;YACrC,MAAM;YACN,OAAO,kCAAO,aAAa,GAAK,kBAAkB,CAAE;YACpD,OAAO,EAAE,GAAG,UAAU,oBAAoB;YAC1C,OAAO;SACR,CAAC;KACH,CAAC;AACJ,CAAC;AAED;;GAEG;AACH,MAAM,UAAU,kBAAkB,CAAC,eAAuB;IACxD,IAAI;QACF,MAAM,EAAE,MAAM,EAAE,GAAG,IAAI,CAAC,KAAK,CAAC,eAAe,CAAC,CAAC,KAAK,CAAC;QACrD,OAAO,MAAM,CAAC;KACf;IAAC,OAAO,CAAC,EAAE;QACV,MAAM,CAAC,KAAK,CACV,0FAA0F,CAC3F,CAAC;QACF,OAAO,EAAE,CAAC;KACX;AACH,CAAC;AAED;;GAEG;AACH,MAAM,UAAU,iCAAiC,CAAoC,QAIpF;IACC,MAAM,EAAE,MAAM,EAAE,OAAO,EAAE,OAAO,EAAE,GAAG,QAAQ,CAAC;IAC9C,OAAO;QACL,KAAK,CAAC,kBAAkB;YACtB,MAAM,IAAI,KAAK,CAAC,mCAAmC,CAAC,CAAC;QACvD,CAAC;QACD,eAAe,EAAE,qBAAqB,CAAC;YACrC,MAAM;YACN,OAAO;YACP,OAAO,EAAE,GAAG,UAAU,oBAAoB;YAC1C,OAAO;SACR,CAAC;KACH,CAAC;AACJ,CAAC;AAED;;GAEG;AACH,MAAM,UAAU,oBAAoB,CAAC,OAMpC;IACC,OAAO,GAA4B,EAAE;QACnC,MAAM,EAAE,MAAM,EAAE,MAAM,EAAE,SAAS,EAAE,OAAO,EAAE,KAAK,EAAE,GAAG,OAAO,CAAC;QAC9D,MAAM,OAAO,GAAG,KAAK,CAAC,iBAAiB,CAAC;QACxC,MAAM,WAAW,GAAsC;YACrD,aAAa,EAAE,OAAO;YACtB,OAAO,EAAE,KAAK,EAAE,QAAgB,EAAE,WAAoB,EAAE,EAAE;gBACxD,MAAM,QAAQ,GAAG,MAAM,WAAW,CAAC;oBACjC,MAAM;oBACN,IAAI,EAAE,sBAAsB;oBAC5B,OAAO,EAAE,GAAG,UAAU,oBAAoB;oBAC1C,qEAAqE;oBACrE,0BAA0B;oBAC1B,SAAS,EAAE,WAAW,CAAC,CAAC,iCAAM,SAAS,KAAE,GAAG,EAAE,WAAW,IAAG,CAAC,CAAC,SAAS;oBACvE,IAAI,EAAE,QAAQ;oBACd,OAAO;iBACR,CAAC,CAAC;gBACH,MAAM,YAAY,GAAG,QAAQ,CAAC,YAA4C,CAAC;gBAC3E,OAAO;oBACL,IAAI,EAAE,4BAA4B,CAAC,MAAM,EAAE,YAAY,CAAC,KAAK,CAAC,KAAK,CAAC;oBACpE,YAAY,EAAE,YAAY,CAAC,QAAQ;iBACpC,CAAC;YACJ,CAAC;SACF,CAAC;QACF,OAAO,qBAAqB,CAAC,WAAW,CAAC,CAAC;IAC5C,CAAC,CAAC;AACJ,CAAC;AAMD;;GAEG;AACH,MAAM,UAAU,wBAAwB,CAAC,MAAiB;IACxD,OAAO,CAAC,KAAiC,EAAE,YAAyB,EAAQ,EAAE;QAC5E,MAAM,EAAE,SAAS,EAAE,UAAU,EAAE,EAAE,EAAE,WAAW,EAAE,SAAS,EAAE,KAAK,EAAE,kBAAkB,EAAE,GACpF,YAAY,CAAC,YAA6E,CAAC;QAC7F,MAAM,YAAY,GAAG,KAEpB,CAAC;QACF,YAAY,CAAC,SAAS,GAAG,SAAS,CAAC;QACnC,8DAA8D;QAC9D,YAAY,CAAC,UAAU,GAAG,UAAU,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI,IAAI,CAAC,kBAAkB,CAAC,CAAC;QACjF,YAAY,CAAC,SAAS,GAAG,SAAS,CAAC;QACnC,YAAY,CAAC,WAAW,GAAG,WAAW,CAAC;QACvC,YAAY,CAAC,EAAE,GAAG,EAAE,CAAC;QACrB,YAAY,CAAC,oBAAoB,GAAG,KAAK,CAAC,SAAS,CAAC;QACpD,YAAY,CAAC,iBAAiB,GAAG,KAAK,CAAC,MAAM,CAAC;QAC9C,YAAY,CAAC,qBAAqB,GAAG,KAAK,CAAC,UAAU,CAAC;QACtD,IAAI,YAAY,CAAC,MAAM,KAAK,SAAS,IAAI,MAAM,KAAK,SAAS,EAAE;YAC7D,YAAY,CAAC,MAAM,GAAG,MAAM,CAAC;SAC9B;IACH,CAAC,CAAC;AACJ,CAAC;AAED;;GAEG;AACH,MAAM,UAAU,4BAA4B,CAAC,QAM5C;IACC,MAAM,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,EAAE,EAAE,EAAE,OAAO,EAAE,GAAG,QAAQ,CAAC;IAC1D,uCACK,MAAM,KACT,uBAAuB,EAAE,KAAK,IAAI,EAAE;YAClC,MAAM,OAAO,CAAC,QAAQ,CAAC,GAAG,UAAU,oBAAoB,EAAE,OAAO,EAAE,KAAK,EAAE,YAAY,EAAE,EAAE,CACxF,UAAU,CACR,cAAc,CACZ,CAAC,YAAY,EAAE,EAAE,CAAC,MAAM,CAAC,WAAW,CAAC,SAAS,CAAC,EAAE,EAAE,YAAY,CAAC,EAChE,YAAY,CACb,CACF,CACF,CAAC;QACJ,CAAC,IACD;AACJ,CAAC","sourcesContent":["// Copyright (c) Microsoft Corporation.\n// Licensed under the MIT license.\n\nimport * as Mappers from \"./generated/models/mappers\";\nimport * as Parameters from \"./generated/models/parameters\";\nimport {\n AnalyzeBatchActionUnion,\n AnalyzeTextJobStatusOptionalParams,\n AnalyzeTextJobStatusResponse,\n GeneratedClient,\n TextDocumentInput,\n} from \"./generated\";\nimport {\n AnalyzeBatchOperationState,\n AnalyzeBatchResult,\n PagedAnalyzeBatchResult,\n PollerLike,\n} from \"./models\";\nimport {\n FullOperationResponse,\n OperationOptions,\n OperationSpec,\n createSerializer,\n} from \"@azure/core-client\";\nimport { LongRunningOperation, LroResponse, SimplePollerLike } from \"@azure/core-lro\";\nimport { PagedResult, getPagedAsyncIterator } from \"@azure/core-paging\";\nimport { throwError, transformAnalyzeBatchResults } from \"./transforms\";\nimport { HttpMethods } from \"@azure/core-rest-pipeline\";\nimport { TracingClient } from \"@azure/core-tracing\";\nimport { clientName } from \"./constants\";\nimport { logger } from \"./logger\";\n\nconst serializer = createSerializer(Mappers, /* isXml */ false);\n\nconst jobStatusOperationSpec: OperationSpec = {\n httpMethod: \"GET\",\n responses: {\n 200: {\n bodyMapper: Mappers.AnalyzeTextJobState,\n },\n default: {\n bodyMapper: Mappers.ErrorResponse,\n },\n },\n headerParameters: [Parameters.accept],\n queryParameters: [Parameters.top, Parameters.skip, Parameters.includeStatistics],\n serializer,\n};\n\nasync function getRawResponse<TOptions extends OperationOptions, TResponse>(\n getResponse: (options: TOptions) => Promise<TResponse>,\n options: TOptions\n): Promise<LroResponse<TResponse>> {\n const { onResponse } = options || {};\n let rawResponse: FullOperationResponse;\n const flatResponse = await getResponse({\n ...options,\n onResponse: (response, flatResponseParam) => {\n rawResponse = response;\n onResponse?.(response, flatResponseParam);\n },\n });\n return {\n flatResponse,\n rawResponse: {\n statusCode: rawResponse!.status,\n headers: rawResponse!.headers.toJSON(),\n body: rawResponse!.parsedBody,\n },\n };\n}\n\nasync function sendRequest<TOptions extends OperationOptions>(settings: {\n client: GeneratedClient;\n tracing: TracingClient;\n spec: OperationSpec;\n spanStr: string;\n opOptions: TOptions;\n path: string;\n httpMethod?: HttpMethods;\n}): Promise<LroResponse<unknown>> {\n const { client, opOptions, path, spanStr, spec, tracing, httpMethod = \"GET\" } = settings;\n return tracing.withSpan(spanStr, opOptions, async (finalOptions: TOptions) =>\n throwError(\n getRawResponse(\n (options) =>\n client.sendOperationRequest(\n { options },\n {\n ...spec,\n path,\n httpMethod,\n }\n ),\n finalOptions\n )\n )\n );\n}\n\n/**\n * @internal\n */\nfunction createSendPollRequest<TOptions extends OperationOptions>(settings: {\n client: GeneratedClient;\n tracing: TracingClient;\n options: TOptions;\n spanStr: string;\n}): (path: string) => Promise<LroResponse<unknown>> {\n const { client, options, tracing, spanStr } = settings;\n return async (path: string): Promise<LroResponse<unknown>> => {\n return throwError(\n sendRequest({\n client,\n opOptions: options,\n path,\n spanStr,\n spec: jobStatusOperationSpec,\n tracing,\n })\n );\n };\n}\n\n/**\n * @internal\n */\nexport function createAnalyzeBatchLro(settings: {\n client: GeneratedClient;\n tracing: TracingClient;\n commonOptions: OperationOptions;\n initialRequestOptions: {\n displayName?: string;\n };\n pollRequestOptions: {\n includeStatistics?: boolean;\n };\n documents: TextDocumentInput[];\n tasks: AnalyzeBatchActionUnion[];\n}): LongRunningOperation {\n const {\n client,\n commonOptions,\n documents,\n initialRequestOptions,\n pollRequestOptions,\n tasks,\n tracing,\n } = settings;\n return {\n async sendInitialRequest(): Promise<LroResponse<unknown>> {\n return tracing.withSpan(\n `${clientName}.beginAnalyzeBatch`,\n {\n ...commonOptions,\n ...initialRequestOptions,\n },\n async (finalOptions) =>\n throwError(\n getRawResponse(\n (paramOptions) =>\n client.analyzeBatch(\n {\n tasks,\n analysisInput: {\n documents,\n },\n displayName: initialRequestOptions.displayName,\n },\n paramOptions\n ),\n finalOptions\n )\n )\n );\n },\n sendPollRequest: createSendPollRequest({\n client,\n options: { ...commonOptions, ...pollRequestOptions },\n spanStr: `${clientName}.beginAnalyzeBatch`,\n tracing,\n }),\n };\n}\n\n/**\n * @internal\n */\nexport function getDocIDsFromState(serializedState: string): string[] {\n try {\n const { docIds } = JSON.parse(serializedState).state;\n return docIds;\n } catch (e) {\n logger.error(\n `Document IDs are not found in the LRO's state. The results may not be ordered correctly.`\n );\n return [];\n }\n}\n\n/**\n * @internal\n */\nexport function createCreateAnalyzeBatchPollerLro<OptionsT extends OperationOptions>(settings: {\n client: GeneratedClient;\n tracing: TracingClient;\n options: OptionsT;\n}): LongRunningOperation {\n const { client, options, tracing } = settings;\n return {\n async sendInitialRequest(): Promise<LroResponse<unknown>> {\n throw new Error(`The operation has already started`);\n },\n sendPollRequest: createSendPollRequest({\n client,\n options,\n spanStr: `${clientName}.beginAnalyzeBatch`,\n tracing,\n }),\n };\n}\n\n/**\n * @internal\n */\nexport function processAnalyzeResult(options: {\n client: GeneratedClient;\n tracing: TracingClient;\n docIds: string[];\n opOptions: AnalyzeTextJobStatusOptionalParams;\n state: { continuationToken: string };\n}): (result: unknown, state: AnalyzeBatchOperationState) => PagedAnalyzeBatchResult {\n return (): PagedAnalyzeBatchResult => {\n const { client, docIds, opOptions, tracing, state } = options;\n const pageURL = state.continuationToken;\n const pagedResult: PagedResult<AnalyzeBatchResult[]> = {\n firstPageLink: pageURL,\n getPage: async (pageLink: string, maxPageSize?: number) => {\n const response = await sendRequest({\n client,\n spec: jobStatusOperationSpec,\n spanStr: `${clientName}.beginAnalyzeBatch`,\n // if `top` is set to `undefined`, the default value will not be sent\n // as part of the request.\n opOptions: maxPageSize ? { ...opOptions, top: maxPageSize } : opOptions,\n path: pageLink,\n tracing,\n });\n const flatResponse = response.flatResponse as AnalyzeTextJobStatusResponse;\n return {\n page: transformAnalyzeBatchResults(docIds, flatResponse.tasks.items),\n nextPageLink: flatResponse.nextLink,\n };\n },\n };\n return getPagedAsyncIterator(pagedResult);\n };\n}\n\ntype Writable<T> = {\n -readonly [P in keyof T]: T[P];\n};\n\n/**\n * @internal\n */\nexport function createUpdateAnalyzeState(docIds?: string[]) {\n return (state: AnalyzeBatchOperationState, lastResponse: LroResponse): void => {\n const { createdOn, modifiedOn, id, displayName, expiresOn, tasks, lastUpdateDateTime } =\n lastResponse.flatResponse as AnalyzeTextJobStatusResponse & { lastUpdateDateTime: string };\n const mutableState = state as Writable<AnalyzeBatchOperationState> & {\n docIds?: string[];\n };\n mutableState.createdOn = createdOn;\n // FIXME: remove this mitigation when the service API is fixed\n mutableState.modifiedOn = modifiedOn ? modifiedOn : new Date(lastUpdateDateTime);\n mutableState.expiresOn = expiresOn;\n mutableState.displayName = displayName;\n mutableState.id = id;\n mutableState.actionSucceededCount = tasks.completed;\n mutableState.actionFailedCount = tasks.failed;\n mutableState.actionInProgressCount = tasks.inProgress;\n if (mutableState.docIds === undefined && docIds !== undefined) {\n mutableState.docIds = docIds;\n }\n };\n}\n\n/**\n * @internal\n */\nexport function createPollerWithCancellation(settings: {\n poller: SimplePollerLike<AnalyzeBatchOperationState, PagedAnalyzeBatchResult>;\n client: GeneratedClient;\n tracing: TracingClient;\n options: AnalyzeTextJobStatusOptionalParams;\n id: string;\n}): PollerLike<AnalyzeBatchOperationState, PagedAnalyzeBatchResult> {\n const { client, options, poller, id, tracing } = settings;\n return {\n ...poller,\n sendCancellationRequest: async () => {\n await tracing.withSpan(`${clientName}.beginAnalyzeBatch`, options, async (finalOptions) =>\n throwError(\n getRawResponse(\n (paramOptions) => client.analyzeText.cancelJob(id, paramOptions),\n finalOptions\n )\n )\n );\n },\n };\n}\n"]}
|
1
|
+
{"version":3,"file":"lro.js","sourceRoot":"","sources":["../../src/lro.ts"],"names":[],"mappings":"AAAA,uCAAuC;AACvC,kCAAkC;AAElC,OAAO,KAAK,OAAO,MAAM,4BAA4B,CAAC;AACtD,OAAO,KAAK,UAAU,MAAM,+BAA+B,CAAC;AAc5D,OAAO,EAIL,gBAAgB,GACjB,MAAM,oBAAoB,CAAC;AAE5B,OAAO,EAAe,qBAAqB,EAAE,MAAM,oBAAoB,CAAC;AACxE,OAAO,EAAE,UAAU,EAAE,4BAA4B,EAAE,MAAM,cAAc,CAAC;AAGxE,OAAO,EAAE,UAAU,EAAE,MAAM,aAAa,CAAC;AACzC,OAAO,EAAE,MAAM,EAAE,MAAM,UAAU,CAAC;AAElC,MAAM,UAAU,GAAG,gBAAgB,CAAC,OAAO,EAAE,WAAW,CAAC,KAAK,CAAC,CAAC;AAEhE,MAAM,sBAAsB,GAAkB;IAC5C,UAAU,EAAE,KAAK;IACjB,SAAS,EAAE;QACT,GAAG,EAAE;YACH,UAAU,EAAE,OAAO,CAAC,mBAAmB;SACxC;QACD,OAAO,EAAE;YACP,UAAU,EAAE,OAAO,CAAC,aAAa;SAClC;KACF;IACD,gBAAgB,EAAE,CAAC,UAAU,CAAC,MAAM,CAAC;IACrC,eAAe,EAAE,CAAC,UAAU,CAAC,GAAG,EAAE,UAAU,CAAC,IAAI,EAAE,UAAU,CAAC,iBAAiB,CAAC;IAChF,UAAU;CACX,CAAC;AAEF,SAAS,aAAa,CACpB,OAAiB,EACjB,EAAmF;IAEnF,uCACK,OAAO,KACV,UAAU,EAAE,CAAC,WAAW,EAAE,QAAQ,EAAE,KAAK,EAAE,EAAE;;YAC3C,EAAE,CAAC,WAAW,EAAE,QAAQ,EAAE,KAAK,CAAC,CAAC;YACjC,MAAA,OAAO,CAAC,UAAU,wDAAG,WAAW,EAAE,QAAQ,EAAE,KAAK,CAAC,CAAC;QACrD,CAAC,IACD;AACJ,CAAC;AAED,SAAS,aAAa,CAAC,WAAkC;IACvD,MAAM,UAAU,GAAG,WAAW,CAAC,OAAO,CAAC,GAAG,CAAC,WAAW,CAAC,CAAC;IACxD,IAAI,UAAU,EAAE;QACd,UAAU,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,GAAG,CAAC,CAAC,CAAC,EAAE,EAAE,CAAC,MAAM,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC,CAAC;KACrD;AACH,CAAC;AAED,KAAK,UAAU,cAAc,CAC3B,WAAsD,EACtD,OAAiB;IAEjB,IAAI,WAAkC,CAAC;IACvC,MAAM,YAAY,GAAG,MAAM,WAAW,CACpC,aAAa,CAAC,OAAO,EAAE,CAAC,QAAQ,EAAE,EAAE;QAClC,WAAW,GAAG,QAAQ,CAAC;IACzB,CAAC,CAAC,CACH,CAAC;IACF,OAAO;QACL,YAAY;QACZ,WAAW,EAAE;YACX,UAAU,EAAE,WAAY,CAAC,MAAM;YAC/B,OAAO,EAAE,WAAY,CAAC,OAAO,CAAC,MAAM,EAAE;YACtC,IAAI,EAAE,WAAY,CAAC,UAAU;SAC9B;KACF,CAAC;AACJ,CAAC;AAED,KAAK,UAAU,WAAW,CAAoC,QAQ7D;IACC,MAAM,EAAE,MAAM,EAAE,SAAS,EAAE,IAAI,EAAE,OAAO,EAAE,IAAI,EAAE,OAAO,EAAE,UAAU,GAAG,KAAK,EAAE,GAAG,QAAQ,CAAC;IACzF,OAAO,OAAO,CAAC,QAAQ,CAAC,OAAO,EAAE,SAAS,EAAE,KAAK,EAAE,YAAsB,EAAE,EAAE,CAC3E,UAAU,CACR,cAAc,CACZ,CAAC,OAAO,EAAE,EAAE,CACV,MAAM,CAAC,oBAAoB,CACzB,EAAE,OAAO,EAAE,kCAEN,IAAI,KACP,IAAI;QACJ,UAAU,IAEb,EACH,YAAY,CACb,CACF,CACF,CAAC;AACJ,CAAC;AAED;;GAEG;AACH,SAAS,qBAAqB,CAAoC,QAKjE;IACC,MAAM,EAAE,MAAM,EAAE,OAAO,EAAE,OAAO,EAAE,OAAO,EAAE,GAAG,QAAQ,CAAC;IACvD,OAAO,KAAK,EAAE,IAAY,EAAiC,EAAE;QAC3D,OAAO,UAAU,CACf,WAAW,CAAC;YACV,MAAM;YACN,SAAS,EAAE,aAAa,CAAC,OAAO,EAAE,CAAC,CAAC,EAAE,QAAQ,EAAE,EAAE;gBAChD,MAAM,YAAY,GAAG,QAAwC,CAAC;gBAC9D,IAAI,YAAY,CAAC,MAAM,CAAC,WAAW,EAAE,KAAK,oBAAoB,EAAE;oBAC9D,YAAY,CAAC,MAAM,GAAG,WAAW,CAAC;iBACnC;YACH,CAAC,CAAC;YACF,IAAI;YACJ,OAAO;YACP,IAAI,EAAE,sBAAsB;YAC5B,OAAO;SACR,CAAC,CACH,CAAC;IACJ,CAAC,CAAC;AACJ,CAAC;AAED;;GAEG;AACH,MAAM,UAAU,qBAAqB,CAAC,QAYrC;IACC,MAAM,EACJ,MAAM,EACN,aAAa,EACb,SAAS,EACT,qBAAqB,EACrB,kBAAkB,EAClB,KAAK,EACL,OAAO,GACR,GAAG,QAAQ,CAAC;IACb,OAAO;QACL,KAAK,CAAC,kBAAkB;YACtB,OAAO,OAAO,CAAC,QAAQ,CACrB,GAAG,UAAU,oBAAoB,EACjC,aAAa,iCAEN,aAAa,GACb,qBAAqB,GAE1B,aAAa,CACd,EACD,KAAK,EAAE,YAAY,EAAE,EAAE,CACrB,UAAU,CACR,cAAc,CACZ,CAAC,YAAY,EAAE,EAAE,CACf,MAAM,CAAC,YAAY,CACjB;gBACE,KAAK;gBACL,aAAa,EAAE;oBACb,SAAS;iBACV;gBACD,WAAW,EAAE,qBAAqB,CAAC,WAAW;aAC/C,EACD,YAAY,CACb,EACH,YAAY,CACb,CACF,CACJ,CAAC;QACJ,CAAC;QACD,eAAe,EAAE,qBAAqB,CAAC;YACrC,MAAM;YACN,OAAO,kCAAO,aAAa,GAAK,kBAAkB,CAAE;YACpD,OAAO,EAAE,GAAG,UAAU,oBAAoB;YAC1C,OAAO;SACR,CAAC;KACH,CAAC;AACJ,CAAC;AAED;;GAEG;AACH,MAAM,UAAU,kBAAkB,CAAC,eAAuB;IACxD,IAAI;QACF,MAAM,EAAE,MAAM,EAAE,GAAG,IAAI,CAAC,KAAK,CAAC,eAAe,CAAC,CAAC,KAAK,CAAC;QACrD,OAAO,MAAM,CAAC;KACf;IAAC,OAAO,CAAC,EAAE;QACV,MAAM,CAAC,KAAK,CACV,0FAA0F,CAC3F,CAAC;QACF,OAAO,EAAE,CAAC;KACX;AACH,CAAC;AAED;;GAEG;AACH,MAAM,UAAU,iCAAiC,CAAoC,QAIpF;IACC,MAAM,EAAE,MAAM,EAAE,OAAO,EAAE,OAAO,EAAE,GAAG,QAAQ,CAAC;IAC9C,OAAO;QACL,KAAK,CAAC,kBAAkB;YACtB,MAAM,IAAI,KAAK,CAAC,mCAAmC,CAAC,CAAC;QACvD,CAAC;QACD,eAAe,EAAE,qBAAqB,CAAC;YACrC,MAAM;YACN,OAAO;YACP,OAAO,EAAE,GAAG,UAAU,oBAAoB;YAC1C,OAAO;SACR,CAAC;KACH,CAAC;AACJ,CAAC;AAED;;GAEG;AACH,MAAM,UAAU,oBAAoB,CAAC,OAMpC;IACC,OAAO,GAA4B,EAAE;QACnC,MAAM,EAAE,MAAM,EAAE,MAAM,EAAE,SAAS,EAAE,OAAO,EAAE,KAAK,EAAE,GAAG,OAAO,CAAC;QAC9D,MAAM,OAAO,GAAG,KAAK,CAAC,iBAAiB,CAAC;QACxC,MAAM,WAAW,GAAsC;YACrD,aAAa,EAAE,OAAO;YACtB,OAAO,EAAE,KAAK,EAAE,QAAgB,EAAE,WAAoB,EAAE,EAAE;gBACxD,MAAM,QAAQ,GAAG,MAAM,WAAW,CAAC;oBACjC,MAAM;oBACN,IAAI,EAAE,sBAAsB;oBAC5B,OAAO,EAAE,GAAG,UAAU,oBAAoB;oBAC1C,qEAAqE;oBACrE,0BAA0B;oBAC1B,SAAS,EAAE,WAAW,CAAC,CAAC,iCAAM,SAAS,KAAE,GAAG,EAAE,WAAW,IAAG,CAAC,CAAC,SAAS;oBACvE,IAAI,EAAE,QAAQ;oBACd,OAAO;iBACR,CAAC,CAAC;gBACH,MAAM,YAAY,GAAG,QAAQ,CAAC,YAA4C,CAAC;gBAC3E,OAAO;oBACL,IAAI,EAAE,4BAA4B,CAAC,MAAM,EAAE,YAAY,CAAC,KAAK,CAAC,KAAK,EAAE,YAAY,CAAC,MAAM,CAAC;oBACzF,YAAY,EAAE,YAAY,CAAC,QAAQ;iBACpC,CAAC;YACJ,CAAC;SACF,CAAC;QACF,OAAO,qBAAqB,CAAC,WAAW,CAAC,CAAC;IAC5C,CAAC,CAAC;AACJ,CAAC;AAMD;;GAEG;AACH,MAAM,UAAU,wBAAwB,CAAC,MAAiB;IACxD,OAAO,CAAC,KAAiC,EAAE,YAAyB,EAAQ,EAAE;QAC5E,MAAM,EAAE,SAAS,EAAE,UAAU,EAAE,EAAE,EAAE,WAAW,EAAE,SAAS,EAAE,KAAK,EAAE,kBAAkB,EAAE,GACpF,YAAY,CAAC,YAA6E,CAAC;QAC7F,MAAM,YAAY,GAAG,KAEpB,CAAC;QACF,YAAY,CAAC,SAAS,GAAG,SAAS,CAAC;QACnC,8DAA8D;QAC9D,YAAY,CAAC,UAAU,GAAG,UAAU,CAAC,CAAC,CAAC,UAAU,CAAC,CAAC,CAAC,IAAI,IAAI,CAAC,kBAAkB,CAAC,CAAC;QACjF,YAAY,CAAC,SAAS,GAAG,SAAS,CAAC;QACnC,YAAY,CAAC,WAAW,GAAG,WAAW,CAAC;QACvC,YAAY,CAAC,EAAE,GAAG,EAAE,CAAC;QACrB,YAAY,CAAC,oBAAoB,GAAG,KAAK,CAAC,SAAS,CAAC;QACpD,YAAY,CAAC,iBAAiB,GAAG,KAAK,CAAC,MAAM,CAAC;QAC9C,YAAY,CAAC,qBAAqB,GAAG,KAAK,CAAC,UAAU,CAAC;QACtD,IAAI,YAAY,CAAC,MAAM,KAAK,SAAS,IAAI,MAAM,KAAK,SAAS,EAAE;YAC7D,YAAY,CAAC,MAAM,GAAG,MAAM,CAAC;SAC9B;IACH,CAAC,CAAC;AACJ,CAAC;AAED;;GAEG;AACH,MAAM,UAAU,4BAA4B,CAAC,QAM5C;IACC,MAAM,EAAE,MAAM,EAAE,OAAO,EAAE,MAAM,EAAE,EAAE,EAAE,OAAO,EAAE,GAAG,QAAQ,CAAC;IAC1D,uCACK,MAAM,KACT,uBAAuB,EAAE,KAAK,IAAI,EAAE;YAClC,MAAM,OAAO,CAAC,QAAQ,CAAC,GAAG,UAAU,oBAAoB,EAAE,OAAO,EAAE,KAAK,EAAE,YAAY,EAAE,EAAE,CACxF,UAAU,CACR,cAAc,CACZ,CAAC,YAAY,EAAE,EAAE,CAAC,MAAM,CAAC,WAAW,CAAC,SAAS,CAAC,EAAE,EAAE,YAAY,CAAC,EAChE,YAAY,CACb,CACF,CACF,CAAC;QACJ,CAAC,IACD;AACJ,CAAC","sourcesContent":["// Copyright (c) Microsoft Corporation.\n// Licensed under the MIT license.\n\nimport * as Mappers from \"./generated/models/mappers\";\nimport * as Parameters from \"./generated/models/parameters\";\nimport {\n AnalyzeBatchActionUnion,\n AnalyzeTextJobStatusOptionalParams,\n AnalyzeTextJobStatusResponse,\n GeneratedClient,\n TextDocumentInput,\n} from \"./generated\";\nimport {\n AnalyzeBatchOperationState,\n AnalyzeBatchResult,\n PagedAnalyzeBatchResult,\n PollerLike,\n} from \"./models\";\nimport {\n FullOperationResponse,\n OperationOptions,\n OperationSpec,\n createSerializer,\n} from \"@azure/core-client\";\nimport { LongRunningOperation, LroResponse, SimplePollerLike } from \"@azure/core-lro\";\nimport { PagedResult, getPagedAsyncIterator } from \"@azure/core-paging\";\nimport { throwError, transformAnalyzeBatchResults } from \"./transforms\";\nimport { HttpMethods } from \"@azure/core-rest-pipeline\";\nimport { TracingClient } from \"@azure/core-tracing\";\nimport { clientName } from \"./constants\";\nimport { logger } from \"./logger\";\n\nconst serializer = createSerializer(Mappers, /* isXml */ false);\n\nconst jobStatusOperationSpec: OperationSpec = {\n httpMethod: \"GET\",\n responses: {\n 200: {\n bodyMapper: Mappers.AnalyzeTextJobState,\n },\n default: {\n bodyMapper: Mappers.ErrorResponse,\n },\n },\n headerParameters: [Parameters.accept],\n queryParameters: [Parameters.top, Parameters.skip, Parameters.includeStatistics],\n serializer,\n};\n\nfunction addOnResponse<TOptions extends OperationOptions>(\n options: TOptions,\n cb: (rawResponse: FullOperationResponse, response: unknown, error: unknown) => void\n): TOptions {\n return {\n ...options,\n onResponse: (rawResponse, response, error) => {\n cb(rawResponse, response, error);\n options.onResponse?.(rawResponse, response, error);\n },\n };\n}\n\nfunction logWarnHeader(rawResponse: FullOperationResponse) {\n const warnHeader = rawResponse.headers.get(\"warn-text\");\n if (warnHeader) {\n warnHeader.split(\";\").map((x) => logger.warning(x));\n }\n}\n\nasync function getRawResponse<TOptions extends OperationOptions, TResponse>(\n getResponse: (options: TOptions) => Promise<TResponse>,\n options: TOptions\n): Promise<LroResponse<TResponse>> {\n let rawResponse: FullOperationResponse;\n const flatResponse = await getResponse(\n addOnResponse(options, (response) => {\n rawResponse = response;\n })\n );\n return {\n flatResponse,\n rawResponse: {\n statusCode: rawResponse!.status,\n headers: rawResponse!.headers.toJSON(),\n body: rawResponse!.parsedBody,\n },\n };\n}\n\nasync function sendRequest<TOptions extends OperationOptions>(settings: {\n client: GeneratedClient;\n tracing: TracingClient;\n spec: OperationSpec;\n spanStr: string;\n opOptions: TOptions;\n path: string;\n httpMethod?: HttpMethods;\n}): Promise<LroResponse<unknown>> {\n const { client, opOptions, path, spanStr, spec, tracing, httpMethod = \"GET\" } = settings;\n return tracing.withSpan(spanStr, opOptions, async (finalOptions: TOptions) =>\n throwError(\n getRawResponse(\n (options) =>\n client.sendOperationRequest(\n { options },\n {\n ...spec,\n path,\n httpMethod,\n }\n ),\n finalOptions\n )\n )\n );\n}\n\n/**\n * @internal\n */\nfunction createSendPollRequest<TOptions extends OperationOptions>(settings: {\n client: GeneratedClient;\n tracing: TracingClient;\n options: TOptions;\n spanStr: string;\n}): (path: string) => Promise<LroResponse<unknown>> {\n const { client, options, tracing, spanStr } = settings;\n return async (path: string): Promise<LroResponse<unknown>> => {\n return throwError(\n sendRequest({\n client,\n opOptions: addOnResponse(options, (_, response) => {\n const castResponse = response as AnalyzeTextJobStatusResponse;\n if (castResponse.status.toLowerCase() === \"partiallysucceeded\") {\n castResponse.status = \"succeeded\";\n }\n }),\n path,\n spanStr,\n spec: jobStatusOperationSpec,\n tracing,\n })\n );\n };\n}\n\n/**\n * @internal\n */\nexport function createAnalyzeBatchLro(settings: {\n client: GeneratedClient;\n tracing: TracingClient;\n commonOptions: OperationOptions;\n initialRequestOptions: {\n displayName?: string;\n };\n pollRequestOptions: {\n includeStatistics?: boolean;\n };\n documents: TextDocumentInput[];\n tasks: AnalyzeBatchActionUnion[];\n}): LongRunningOperation {\n const {\n client,\n commonOptions,\n documents,\n initialRequestOptions,\n pollRequestOptions,\n tasks,\n tracing,\n } = settings;\n return {\n async sendInitialRequest(): Promise<LroResponse<unknown>> {\n return tracing.withSpan(\n `${clientName}.beginAnalyzeBatch`,\n addOnResponse(\n {\n ...commonOptions,\n ...initialRequestOptions,\n },\n logWarnHeader\n ),\n async (finalOptions) =>\n throwError(\n getRawResponse(\n (paramOptions) =>\n client.analyzeBatch(\n {\n tasks,\n analysisInput: {\n documents,\n },\n displayName: initialRequestOptions.displayName,\n },\n paramOptions\n ),\n finalOptions\n )\n )\n );\n },\n sendPollRequest: createSendPollRequest({\n client,\n options: { ...commonOptions, ...pollRequestOptions },\n spanStr: `${clientName}.beginAnalyzeBatch`,\n tracing,\n }),\n };\n}\n\n/**\n * @internal\n */\nexport function getDocIDsFromState(serializedState: string): string[] {\n try {\n const { docIds } = JSON.parse(serializedState).state;\n return docIds;\n } catch (e) {\n logger.error(\n `Document IDs are not found in the LRO's state. The results may not be ordered correctly.`\n );\n return [];\n }\n}\n\n/**\n * @internal\n */\nexport function createCreateAnalyzeBatchPollerLro<OptionsT extends OperationOptions>(settings: {\n client: GeneratedClient;\n tracing: TracingClient;\n options: OptionsT;\n}): LongRunningOperation {\n const { client, options, tracing } = settings;\n return {\n async sendInitialRequest(): Promise<LroResponse<unknown>> {\n throw new Error(`The operation has already started`);\n },\n sendPollRequest: createSendPollRequest({\n client,\n options,\n spanStr: `${clientName}.beginAnalyzeBatch`,\n tracing,\n }),\n };\n}\n\n/**\n * @internal\n */\nexport function processAnalyzeResult(options: {\n client: GeneratedClient;\n tracing: TracingClient;\n docIds: string[];\n opOptions: AnalyzeTextJobStatusOptionalParams;\n state: { continuationToken: string };\n}): (result: unknown, state: AnalyzeBatchOperationState) => PagedAnalyzeBatchResult {\n return (): PagedAnalyzeBatchResult => {\n const { client, docIds, opOptions, tracing, state } = options;\n const pageURL = state.continuationToken;\n const pagedResult: PagedResult<AnalyzeBatchResult[]> = {\n firstPageLink: pageURL,\n getPage: async (pageLink: string, maxPageSize?: number) => {\n const response = await sendRequest({\n client,\n spec: jobStatusOperationSpec,\n spanStr: `${clientName}.beginAnalyzeBatch`,\n // if `top` is set to `undefined`, the default value will not be sent\n // as part of the request.\n opOptions: maxPageSize ? { ...opOptions, top: maxPageSize } : opOptions,\n path: pageLink,\n tracing,\n });\n const flatResponse = response.flatResponse as AnalyzeTextJobStatusResponse;\n return {\n page: transformAnalyzeBatchResults(docIds, flatResponse.tasks.items, flatResponse.errors),\n nextPageLink: flatResponse.nextLink,\n };\n },\n };\n return getPagedAsyncIterator(pagedResult);\n };\n}\n\ntype Writable<T> = {\n -readonly [P in keyof T]: T[P];\n};\n\n/**\n * @internal\n */\nexport function createUpdateAnalyzeState(docIds?: string[]) {\n return (state: AnalyzeBatchOperationState, lastResponse: LroResponse): void => {\n const { createdOn, modifiedOn, id, displayName, expiresOn, tasks, lastUpdateDateTime } =\n lastResponse.flatResponse as AnalyzeTextJobStatusResponse & { lastUpdateDateTime: string };\n const mutableState = state as Writable<AnalyzeBatchOperationState> & {\n docIds?: string[];\n };\n mutableState.createdOn = createdOn;\n // FIXME: remove this mitigation when the service API is fixed\n mutableState.modifiedOn = modifiedOn ? modifiedOn : new Date(lastUpdateDateTime);\n mutableState.expiresOn = expiresOn;\n mutableState.displayName = displayName;\n mutableState.id = id;\n mutableState.actionSucceededCount = tasks.completed;\n mutableState.actionFailedCount = tasks.failed;\n mutableState.actionInProgressCount = tasks.inProgress;\n if (mutableState.docIds === undefined && docIds !== undefined) {\n mutableState.docIds = docIds;\n }\n };\n}\n\n/**\n * @internal\n */\nexport function createPollerWithCancellation(settings: {\n poller: SimplePollerLike<AnalyzeBatchOperationState, PagedAnalyzeBatchResult>;\n client: GeneratedClient;\n tracing: TracingClient;\n options: AnalyzeTextJobStatusOptionalParams;\n id: string;\n}): PollerLike<AnalyzeBatchOperationState, PagedAnalyzeBatchResult> {\n const { client, options, poller, id, tracing } = settings;\n return {\n ...poller,\n sendCancellationRequest: async () => {\n await tracing.withSpan(`${clientName}.beginAnalyzeBatch`, options, async (finalOptions) =>\n throwError(\n getRawResponse(\n (paramOptions) => client.analyzeText.cancelJob(id, paramOptions),\n finalOptions\n )\n )\n );\n },\n };\n}\n"]}
|
package/dist-esm/src/models.js
CHANGED
@@ -11,7 +11,6 @@ export const AnalyzeActionNames = {
|
|
11
11
|
PiiEntityRecognition: "PiiEntityRecognition",
|
12
12
|
LanguageDetection: "LanguageDetection",
|
13
13
|
SentimentAnalysis: "SentimentAnalysis",
|
14
|
-
DynamicClassification: "DynamicClassification",
|
15
14
|
};
|
16
15
|
/**
|
17
16
|
* Type of actions supported by the {@link TextAnalysisClient.beginAnalyzeBatch} method.
|
@@ -29,14 +28,6 @@ export const AnalyzeBatchActionNames = {
|
|
29
28
|
CustomSingleLabelClassification: "CustomSingleLabelClassification",
|
30
29
|
CustomMultiLabelClassification: "CustomMultiLabelClassification",
|
31
30
|
};
|
32
|
-
/**
|
33
|
-
* Known values of the {@link HealthcareAction.fhirVersion} parameter.
|
34
|
-
*/
|
35
|
-
export var KnownFhirVersion;
|
36
|
-
(function (KnownFhirVersion) {
|
37
|
-
/** 4.0.1 */
|
38
|
-
KnownFhirVersion["4.0.1"] = "4.0.1";
|
39
|
-
})(KnownFhirVersion || (KnownFhirVersion = {}));
|
40
31
|
/**
|
41
32
|
* Enum of possible error codes of a {@link TextAnalysisError}.
|
42
33
|
*/
|
@@ -1 +1 @@
|
|
1
|
-
{"version":3,"file":"models.js","sourceRoot":"","sources":["../../src/models.ts"],"names":[],"mappings":"AAAA,uCAAuC;AACvC,kCAAkC;AAElC,OAAO,EAsBL,cAAc,EACd,mBAAmB,GAcpB,MAAM,aAAa,CAAC;AA8DrB;;GAEG;AACH,MAAM,CAAC,MAAM,kBAAkB,GAAG;IAChC,aAAa,EAAE,eAAe;IAC9B,iBAAiB,EAAE,mBAAmB;IACtC,mBAAmB,EAAE,qBAAqB;IAC1C,oBAAoB,EAAE,sBAAsB;IAC5C,iBAAiB,EAAE,mBAAmB;IACtC,iBAAiB,EAAE,mBAAmB;IACtC,qBAAqB,EAAE,uBAAuB;CACtC,CAAC;AAEX;;GAEG;AACH,MAAM,CAAC,MAAM,uBAAuB,GAAG;IACrC,iBAAiB,EAAE,mBAAmB;IACtC,iBAAiB,EAAE,mBAAmB;IACtC,oBAAoB,EAAE,sBAAsB;IAC5C,mBAAmB,EAAE,qBAAqB;IAC1C,aAAa,EAAE,eAAe;IAC9B,UAAU,EAAE,YAAY;IACxB,uBAAuB,EAAE,yBAAyB;IAClD,wBAAwB,EAAE,0BAA0B;IACpD,uBAAuB,EAAE,yBAAyB;IAClD,+BAA+B,EAAE,iCAAiC;IAClE,8BAA8B,EAAE,gCAAgC;CACxD,CAAC;AAiCX;;GAEG;AACH,MAAM,CAAN,IAAY,gBAGX;AAHD,WAAY,gBAAgB;IAC1B,YAAY;IACZ,mCAAiB,CAAA;AACnB,CAAC,EAHW,gBAAgB,KAAhB,gBAAgB,QAG3B;AAcD;;GAEG;AACH,MAAM,CAAC,MAAM,0BAA0B,mCAAQ,cAAc,GAAK,mBAAmB,CAAE,CAAC","sourcesContent":["// Copyright (c) Microsoft Corporation.\n// Licensed under the MIT license.\n\nimport {\n AbstractiveSummary,\n AssessmentSentiment,\n ClassificationCategory,\n CustomEntityRecognitionAction,\n CustomMultiLabelClassificationAction,\n CustomSingleLabelClassificationAction,\n DetectedLanguage,\n DocumentDetectedLanguage,\n DocumentSentimentLabel,\n DocumentWarning,\n DynamicClassificationAction,\n Entity,\n EntityDataSource,\n EntityLinkingAction,\n EntityRecognitionAction,\n EntityWithResolution,\n ExtractiveSummarizationAction,\n HealthcareAction,\n HealthcareAssertion,\n HealthcareEntityCategory,\n KeyPhraseExtractionAction,\n KnownErrorCode,\n KnownInnerErrorCode,\n LanguageDetectionAction,\n LinkedEntity,\n PiiEntityRecognitionAction,\n RelationType,\n SentenceSentimentLabel,\n SentimentAnalysisAction,\n SentimentConfidenceScores,\n StringIndexType,\n SummarySentence,\n TargetConfidenceScores,\n TextDocumentBatchStatistics,\n TextDocumentStatistics,\n TokenSentimentLabel,\n} from \"./generated\";\nimport { CommonClientOptions, OperationOptions } from \"@azure/core-client\";\nimport { OperationState, SimplePollerLike } from \"@azure/core-lro\";\nimport { PagedAsyncIterableIterator } from \"@azure/core-paging\";\n\n/**\n * Configuration options for {@link TextAnalysisClient}.\n */\nexport interface TextAnalysisClientOptions extends CommonClientOptions {\n /**\n * The default country hint to use. Defaults to \"us\".\n */\n defaultCountryHint?: string;\n\n /**\n * The default language to use. Defaults to \"en\".\n */\n defaultLanguage?: string;\n /**\n * The version of the Cognitive Language Service API to use.\n */\n serviceVersion?: string;\n}\n\n/**\n * Options common to all operations.\n */\nexport interface TextAnalysisOperationOptions extends OperationOptions {\n /**\n * If set to true, response will contain input and document level statistics.\n */\n includeStatistics?: boolean;\n}\n\n/**\n * Options for the begin analyze actions operation.\n */\nexport interface BeginAnalyzeBatchOptions extends TextAnalysisOperationOptions {\n /**\n * Time delay between poll requests, in milliseconds.\n */\n updateIntervalInMs?: number;\n /**\n * The operation's display name.\n */\n displayName?: string;\n /**\n * Default language code to use for records requesting automatic language detection\n */\n defaultLanguage?: string;\n}\n\n/**\n * Options for the begin analyze actions operation.\n */\nexport interface RestoreAnalyzeBatchPollerOptions extends TextAnalysisOperationOptions {\n /**\n * Time delay between poll requests, in milliseconds.\n */\n updateIntervalInMs?: number;\n}\n\n/**\n * Type of actions supported by the {@link TextAnalysisClient.analyze} method.\n */\nexport const AnalyzeActionNames = {\n EntityLinking: \"EntityLinking\",\n EntityRecognition: \"EntityRecognition\",\n KeyPhraseExtraction: \"KeyPhraseExtraction\",\n PiiEntityRecognition: \"PiiEntityRecognition\",\n LanguageDetection: \"LanguageDetection\",\n SentimentAnalysis: \"SentimentAnalysis\",\n DynamicClassification: \"DynamicClassification\",\n} as const;\n\n/**\n * Type of actions supported by the {@link TextAnalysisClient.beginAnalyzeBatch} method.\n */\nexport const AnalyzeBatchActionNames = {\n SentimentAnalysis: \"SentimentAnalysis\",\n EntityRecognition: \"EntityRecognition\",\n PiiEntityRecognition: \"PiiEntityRecognition\",\n KeyPhraseExtraction: \"KeyPhraseExtraction\",\n EntityLinking: \"EntityLinking\",\n Healthcare: \"Healthcare\",\n ExtractiveSummarization: \"ExtractiveSummarization\",\n AbstractiveSummarization: \"AbstractiveSummarization\",\n CustomEntityRecognition: \"CustomEntityRecognition\",\n CustomSingleLabelClassification: \"CustomSingleLabelClassification\",\n CustomMultiLabelClassification: \"CustomMultiLabelClassification\",\n} as const;\n\n/**\n * Type of actions supported by the {@link TextAnalysisClient.analyze} method.\n */\nexport type AnalyzeActionName = keyof typeof AnalyzeActionNames;\n\n/**\n * The type of parameters for every action in ${@link AnalyzeActionNames}.\n */\nexport type AnalyzeActionParameters<ActionName extends AnalyzeActionName> = {\n EntityLinking: EntityLinkingAction;\n EntityRecognition: EntityRecognitionAction;\n PiiEntityRecognition: PiiEntityRecognitionAction;\n KeyPhraseExtraction: KeyPhraseExtractionAction;\n SentimentAnalysis: SentimentAnalysisAction;\n DynamicClassification: DynamicClassificationAction;\n LanguageDetection: LanguageDetectionAction;\n}[ActionName];\n\n/**\n * The type of results of every action in ${@link AnalyzeActionNames}.\n */\nexport type AnalyzeResult<ActionName extends AnalyzeActionName> = {\n EntityLinking: EntityLinkingResult[];\n EntityRecognition: EntityRecognitionResult[];\n PiiEntityRecognition: PiiEntityRecognitionResult[];\n KeyPhraseExtraction: KeyPhraseExtractionResult[];\n SentimentAnalysis: SentimentAnalysisResult[];\n DynamicClassification: DynamicClassificationResult[];\n LanguageDetection: LanguageDetectionResult[];\n}[ActionName];\n\n/**\n * Known values of the {@link HealthcareAction.fhirVersion} parameter.\n */\nexport enum KnownFhirVersion {\n /** 4.0.1 */\n \"4.0.1\" = \"4.0.1\",\n}\n\n/** Options for an Abstractive Summarization action. */\nexport interface AbstractiveSummarizationAction {\n /** The max number of sentences to be part of the summary. */\n maxSentenceCount?: number;\n /**\n * Specifies the measurement unit used to calculate the offset and length properties. For a list of possible values, see {@link KnownStringIndexType}.\n *\n * The default is the JavaScript's default which is \"Utf16CodeUnit\".\n */\n stringIndexType?: StringIndexType;\n}\n\n/**\n * Enum of possible error codes of a {@link TextAnalysisError}.\n */\nexport const KnownTextAnalysisErrorCode = { ...KnownErrorCode, ...KnownInnerErrorCode };\n\n/**\n * Type describing an API error.\n */\nexport interface TextAnalysisError {\n /**\n * A code describing the kind of error produced. See {@link KnownTextAnalysisErrorCode}.\n */\n readonly code: string;\n /**\n * A message from the service explaining the error\n */\n readonly message: string;\n /**\n * The target of the particular error (for example, the name of an invalid parameter)\n */\n readonly target?: string;\n}\n\n/**\n * Base type for results of an action corresponding to a single input document.\n */\nexport interface TextAnalysisSuccessResult {\n /**\n * Unique, non-empty document identifier.\n */\n readonly id: string;\n\n /**\n * Statistics about the input document and how it was processed by the service.\n * This property will have a value when includeStatistics is set to true in\n * the client call.\n */\n readonly statistics?: TextDocumentStatistics;\n\n /**\n * An array of warning data corresponding to this document.\n *\n * If no warnings were returned, this array will be empty.\n */\n readonly warnings: DocumentWarning[];\n\n /**\n * Discriminant to determine if this is an error result.\n */\n readonly error?: undefined;\n}\n\n/**\n * Base type for error results of an action corresponding to a single document.\n */\nexport interface TextAnalysisErrorResult {\n /**\n * Unique, non-empty document identifier.\n */\n readonly id: string;\n\n /**\n * The Error for this document result.\n */\n readonly error: TextAnalysisError;\n}\n\n/**\n * The result of an entity recognition action on a single document.\n */\nexport type EntityRecognitionResult = EntityRecognitionSuccessResult | EntityRecognitionErrorResult;\n\n/**\n * The result of an entity recognition action on a single document, containing\n * a collection of {@link Entity} objects identified in that document.\n */\nexport interface EntityRecognitionSuccessResult extends TextAnalysisSuccessResult {\n /**\n * The collection of entities identified in the input document.\n */\n readonly entities: EntityWithResolution[];\n}\n\n/**\n * An error result from an entity recognition action on a single document.\n */\nexport type EntityRecognitionErrorResult = TextAnalysisErrorResult;\n\n/**\n * The result of an entity recognition action on a single document.\n */\nexport type PiiEntityRecognitionResult =\n | PiiEntityRecognitionSuccessResult\n | PiiEntityRecognitionErrorResult;\n\n/**\n * The result of a pii entity recognition action on a single document,\n * containing the collection of {@link Entity} objects identified in that\n * document.\n */\nexport interface PiiEntityRecognitionSuccessResult extends TextAnalysisSuccessResult {\n /**\n * The collection of entities identified in the input document.\n */\n readonly entities: Entity[];\n /**\n * The text redacted.\n */\n readonly redactedText: string;\n}\n\n/**\n * An error result from a pii entity recognition action on a single document.\n */\nexport type PiiEntityRecognitionErrorResult = TextAnalysisErrorResult;\n\n/**\n * The result of an entity linking action on a single document.\n */\nexport type EntityLinkingResult = EntityLinkingSuccessResult | EntityLinkingErrorResult;\n\n/**\n * The result of a entity linking action on a single document, containing a\n * collection of the {@link LinkedEntity} objects identified in that document.\n */\nexport interface EntityLinkingSuccessResult extends TextAnalysisSuccessResult {\n /**\n * The collection of entities identified in the input document.\n */\n readonly entities: LinkedEntity[];\n}\n\n/**\n * An error result from an entity linking action on a single document.\n */\nexport type EntityLinkingErrorResult = TextAnalysisErrorResult;\n\n/**\n * The result of a language detection action on a single document.\n */\nexport type LanguageDetectionResult = LanguageDetectionSuccessResult | LanguageDetectionErrorResult;\n\n/**\n * The result of a language detection action on a single document,\n * containing a prediction of what language the document is written in.\n */\nexport interface LanguageDetectionSuccessResult extends TextAnalysisSuccessResult {\n /**\n * The top detected language by confidence score.\n */\n readonly primaryLanguage: DetectedLanguage;\n}\n\n/**\n * An error result from a language detection action on a single document.\n */\nexport type LanguageDetectionErrorResult = TextAnalysisErrorResult;\n\n/**\n * The result of a sentiment analysis action on a single document.\n */\nexport type KeyPhraseExtractionResult =\n | KeyPhraseExtractionSuccessResult\n | KeyPhraseExtractionErrorResult;\n\n/**\n * The result of a key phrase extraction action on a single document,\n * containing a collection of the key phrases identified in that document.\n */\nexport interface KeyPhraseExtractionSuccessResult extends TextAnalysisSuccessResult {\n /**\n * A list of representative words or phrases. The number of key phrases\n * returned is proportional to the number of words in the input document.\n */\n readonly keyPhrases: string[];\n}\n\n/**\n * An error result from a key phrase extraction action on a single document.\n */\nexport type KeyPhraseExtractionErrorResult = TextAnalysisErrorResult;\n\n/**\n * The result of a sentiment analysis action on a single document.\n */\nexport type SentimentAnalysisResult = SentimentAnalysisSuccessResult | SentimentAnalysisErrorResult;\n\n/**\n * The result of a sentiment analysis action on a single document,\n * containing the predicted sentiment for each sentence as well as for the full\n * document.\n */\nexport interface SentimentAnalysisSuccessResult extends TextAnalysisSuccessResult {\n /**\n * Predicted sentiment for document. For a list of possible values, see {@link DocumentSentimentLabel}\n */\n readonly sentiment: DocumentSentimentLabel;\n /**\n * Document level sentiment confidence scores between 0 and 1 for each\n * sentiment class.\n */\n readonly confidenceScores: SentimentConfidenceScores;\n /**\n * The predicted sentiment for each sentence in the corresponding document.\n */\n readonly sentences: SentenceSentiment[];\n}\n\n/**\n * The predicted sentiment for a given span of text. For more information\n * regarding text sentiment, see {@link https://docs.microsoft.com//azure/cognitive-services/language-service/sentiment-opinion-mining/overview}.\n */\nexport interface SentenceSentiment {\n /**\n * The sentence text.\n */\n readonly text: string;\n /**\n * The predicted Sentiment for the sentence. For a list of possible values,\n * see {@link SentenceSentimentLabel}\n */\n readonly sentiment: SentenceSentimentLabel;\n /**\n * The sentiment confidence score between 0 and 1 for the sentence for all\n * classes.\n */\n readonly confidenceScores: SentimentConfidenceScores;\n /**\n * The sentence text offset from the start of the document.\n */\n readonly offset: number;\n /**\n * The length of the sentence text.\n */\n readonly length: number;\n /**\n * The list of opinions mined from this sentence. For example in \"The food is\n * good, but the service is bad\", the following two opinions will be returned:\n * \"food is good\" and \"service is bad\".\n *\n * It is non-empty only returned if {@link includeOpinionMining} was set to\n * `true`.\n */\n readonly opinions: Opinion[];\n}\n\n/**\n * TargetSentiment contains the predicted sentiment, confidence scores and other\n * information about a target of a product. A target of a product/service is a\n * key component of that product/service. For example in \"The food at Hotel Foo\n * is good\", \"food\" is a target of \"Hotel Foo\".\n */\nexport interface TargetSentiment {\n /**\n * The sentiment confidence score between 0 and 1 for the target for\n * 'positive' and 'negative' labels.\n */\n readonly confidenceScores: TargetConfidenceScores;\n /**\n * The predicted Sentiment for the Target. For a list of possible values,\n * see {@link TokenSentimentLabel}\n */\n readonly sentiment: TokenSentimentLabel;\n /**\n * The target text.\n */\n readonly text: string;\n /**\n * The Target text offset from the start of the sentence.\n */\n readonly offset: number;\n /**\n * The length of the Target text.\n */\n readonly length: number;\n}\n\n/**\n * A mined opinion object represents an opinion we've extracted from a sentence.\n * It consists of both a target that these assessments are about, and the actual\n * assessments themselves.\n */\nexport interface Opinion {\n /**\n * The target of a product/service that this assessment is about.\n */\n readonly target: TargetSentiment;\n /**\n * The actual assessments of the target.\n */\n readonly assessments: AssessmentSentiment[];\n}\n\n/**\n * The result of a language detection action on a single document.\n */\nexport type DynamicClassificationResult =\n | DynamicClassificationSuccessResult\n | DynamicClassificationErrorResult;\n\n/**\n * The result of a language detection action on a single document,\n * containing a prediction of what language the document is written in.\n */\nexport interface DynamicClassificationSuccessResult extends TextAnalysisSuccessResult {\n /**\n * The collection of classifications in the input document.\n */\n readonly classifications: ClassificationCategory[];\n}\n\n/**\n * An error result from a language detection action on a single document.\n */\nexport type DynamicClassificationErrorResult = TextAnalysisErrorResult;\n\n/**\n * A healthcare entity represented as a node in a directed graph where the edges are\n * a particular type of relationship between the source and target nodes.\n */\nexport interface HealthcareEntity extends Entity {\n /**\n * Normalized name for the entity. For example, the normalized text for \"histologically\" is \"histologic\".\n */\n readonly normalizedText?: string;\n /**\n * Whether the entity is negated.\n */\n readonly assertion?: HealthcareAssertion;\n /**\n * Entity references in known data sources.\n */\n readonly dataSources: EntityDataSource[];\n /**\n * Defines values for HealthcareEntityCategory.\n * {@link KnownHealthcareEntityCategory} can be used interchangeably with HealthcareEntityCategory,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **BODY_STRUCTURE**\n * **AGE**\n * **GENDER**\n * **EXAMINATION_NAME**\n * **DATE**\n * **DIRECTION**\n * **FREQUENCY**\n * **MEASUREMENT_VALUE**\n * **MEASUREMENT_UNIT**\n * **RELATIONAL_OPERATOR**\n * **TIME**\n * **GENE_OR_PROTEIN**\n * **VARIANT**\n * **ADMINISTRATIVE_EVENT**\n * **CARE_ENVIRONMENT**\n * **HEALTHCARE_PROFESSION**\n * **DIAGNOSIS**\n * **SYMPTOM_OR_SIGN**\n * **CONDITION_QUALIFIER**\n * **MEDICATION_CLASS**\n * **MEDICATION_NAME**\n * **DOSAGE**\n * **MEDICATION_FORM**\n * **MEDICATION_ROUTE**\n * **FAMILY_RELATION**\n * **TREATMENT_NAME**\n */\n readonly category: HealthcareEntityCategory;\n}\n\n/**\n * The type of different roles a healthcare entity can play in a relation.\n */\nexport type HealthcareEntityRelationRoleType = string;\n\n/**\n * A healthcare entity that plays a specific role in a relation.\n */\nexport interface HealthcareEntityRelationRole {\n /**\n * A healthcare entity\n */\n readonly entity: HealthcareEntity;\n /**\n * The role of the healthcare entity in a particular relation.\n */\n readonly name: HealthcareEntityRelationRoleType;\n}\n\n/**\n * A relationship between two or more healthcare entities.\n */\nexport interface HealthcareEntityRelation {\n /**\n * The type of the healthcare relation.\n */\n readonly relationType: RelationType;\n /**\n * The list of healthcare entities and their roles in the healthcare relation.\n */\n readonly roles: HealthcareEntityRelationRole[];\n /**\n * The confidence score between 0 and 1 of the extracted relation.\n */\n readonly confidenceScore?: number;\n}\n\n/**\n * The results of a successful healthcare analysis action for a single document.\n */\nexport interface HealthcareSuccessResult extends TextAnalysisSuccessResult {\n /**\n * Healthcare entities.\n */\n readonly entities: HealthcareEntity[];\n /**\n * Relations between healthcare entities.\n */\n readonly entityRelations: HealthcareEntityRelation[];\n /**\n * JSON bundle containing a FHIR compatible object for consumption in other\n * Healthcare tools. For additional information see {@link https://www.hl7.org/fhir/overview.html}.\n */\n readonly fhirBundle?: Record<string, any>;\n}\n\n/**\n * An error result from the healthcare analysis action on a single document.\n */\nexport type HealthcareErrorResult = TextAnalysisErrorResult;\n\n/**\n * The result of the healthcare analysis action on a single document.\n */\nexport type HealthcareResult = HealthcareSuccessResult | HealthcareErrorResult;\n\n/**\n * The result of the extractive summarization action on a single document.\n */\nexport type ExtractiveSummarizationResult =\n | ExtractiveSummarizationSuccessResult\n | ExtractiveSummarizationErrorResult;\n\n/**\n * The result of the extractive summarization action on a single document,\n * containing a collection of the summary identified in that document.\n */\nexport interface ExtractiveSummarizationSuccessResult extends TextAnalysisSuccessResult {\n /**\n * A list of sentences composing a summary of the input document.\n */\n readonly sentences: SummarySentence[];\n}\n\n/**\n * An error result from the extractive summarization action on a single document.\n */\nexport type ExtractiveSummarizationErrorResult = TextAnalysisErrorResult;\n\n/**\n * The result of the abstractive summarization action on a single document.\n */\nexport type AbstractiveSummarizationResult =\n | AbstractiveSummarizationSuccessResult\n | AbstractiveSummarizationErrorResult;\n\n/**\n * The result of the abstractive summarization action on a single document,\n * containing a collection of the summaries identified for that document.\n */\nexport interface AbstractiveSummarizationSuccessResult extends TextAnalysisSuccessResult {\n /**\n * A list of summaries of the input document.\n */\n readonly summaries: AbstractiveSummary[];\n}\n\n/**\n * An error result from the abstractive summarization action on a single document.\n */\nexport type AbstractiveSummarizationErrorResult = TextAnalysisErrorResult;\n\n/**\n * The result of the custom entity recognition action on a single document.\n */\nexport type CustomEntityRecognitionResult =\n | CustomEntityRecognitionSuccessResult\n | CustomEntityRecognitionErrorResult;\n\n/**\n * The result of the custom entity recognition action on a single document,\n * containing a collection of the entities identified in that document.\n */\nexport interface CustomEntityRecognitionSuccessResult extends TextAnalysisSuccessResult {\n /**\n * The collection of entities identified in the input document.\n */\n readonly entities: Entity[];\n}\n\n/**\n * An error result from the custom entity recognition action on a single document.\n */\nexport type CustomEntityRecognitionErrorResult = TextAnalysisErrorResult;\n\n/**\n * The result of the single-label classification action on a single document.\n */\nexport type CustomSingleLabelClassificationResult =\n | CustomSingleLabelClassificationSuccessResult\n | CustomSingleLabelClassificationErrorResult;\n\n/**\n * The result of a successful single-label classification action on a single document,\n * containing the result of the classification.\n */\nexport interface CustomSingleLabelClassificationSuccessResult extends TextAnalysisSuccessResult {\n /**\n * The collection of classifications in the input document.\n */\n readonly classifications: ClassificationCategory[];\n}\n\n/**\n * An error result from the single-label classification action on a single document.\n */\nexport type CustomSingleLabelClassificationErrorResult = TextAnalysisErrorResult;\n\n/**\n * The result of the multi-label classification action on a multi document.\n */\nexport type CustomMultiLabelClassificationResult =\n | CustomMultiLabelClassificationSuccessResult\n | CustomMultiLabelClassificationErrorResult;\n\n/**\n * The result of a successful multi-label classification action on a multi document,\n * containing the result of the classification.\n */\nexport interface CustomMultiLabelClassificationSuccessResult extends TextAnalysisSuccessResult {\n /**\n * The collection of classifications in the input document.\n */\n readonly classifications: ClassificationCategory[];\n}\n\n/**\n * An error result from the multi-label classification action on a multi document.\n */\nexport type CustomMultiLabelClassificationErrorResult = TextAnalysisErrorResult;\n\n/**\n * Options common to all batch actions.\n */\nexport interface AnalyzeBatchActionCommon {\n /**\n * The name of the action.\n */\n actionName?: string;\n}\n\n/** Options for an entity linking batch action. */\nexport interface EntityLinkingBatchAction extends AnalyzeBatchActionCommon, EntityLinkingAction {\n /**\n * The kind of the action.\n */\n kind: \"EntityLinking\";\n}\n\n/** Options for an entity recognition batch action. */\nexport interface EntityRecognitionBatchAction\n extends AnalyzeBatchActionCommon,\n EntityRecognitionAction {\n /**\n * The kind of the action.\n */\n kind: \"EntityRecognition\";\n}\n\n/** Options for an key phrase extraction batch action. */\nexport interface KeyPhraseExtractionBatchAction\n extends AnalyzeBatchActionCommon,\n KeyPhraseExtractionAction {\n /**\n * The kind of the action.\n */\n kind: \"KeyPhraseExtraction\";\n}\n\n/** Options for a pii entity recognition batch action. */\nexport interface PiiEntityRecognitionBatchAction\n extends AnalyzeBatchActionCommon,\n PiiEntityRecognitionAction {\n /**\n * The kind of the action.\n */\n kind: \"PiiEntityRecognition\";\n}\n\n/** Options for a healthcare batch action. */\nexport interface HealthcareBatchAction extends AnalyzeBatchActionCommon, HealthcareAction {\n /**\n * The kind of the action.\n */\n kind: \"Healthcare\";\n}\n\n/** Options for a sentiment analysis batch action. */\nexport interface SentimentAnalysisBatchAction\n extends AnalyzeBatchActionCommon,\n SentimentAnalysisAction {\n /**\n * The kind of the action.\n */\n kind: \"SentimentAnalysis\";\n}\n\n/** Options for an extractive summarization batch action. */\nexport interface ExtractiveSummarizationBatchAction\n extends AnalyzeBatchActionCommon,\n ExtractiveSummarizationAction {\n /**\n * The kind of the action.\n */\n kind: \"ExtractiveSummarization\";\n}\n\n/** Options for an abstractive summarization batch action. */\nexport interface AbstractiveSummarizationBatchAction\n extends AnalyzeBatchActionCommon,\n AbstractiveSummarizationAction {\n /**\n * The kind of the action.\n */\n kind: \"AbstractiveSummarization\";\n}\n\n/** Options for a custom entity recognition batch action. */\nexport interface CustomEntityRecognitionBatchAction\n extends AnalyzeBatchActionCommon,\n CustomEntityRecognitionAction {\n /**\n * The kind of the action.\n */\n kind: \"CustomEntityRecognition\";\n}\n\n/** Options for a custom single-label classification batch action. */\nexport interface CustomSingleLabelClassificationBatchAction\n extends AnalyzeBatchActionCommon,\n CustomSingleLabelClassificationAction {\n /**\n * The kind of the action.\n */\n kind: \"CustomSingleLabelClassification\";\n}\n\n/** Options for a custom multi-label classification batch action. */\nexport interface CustomMultiLabelClassificationBatchAction\n extends AnalyzeBatchActionCommon,\n CustomMultiLabelClassificationAction {\n /**\n * The kind of the action.\n */\n kind: \"CustomMultiLabelClassification\";\n}\n\n/**\n * Batch of actions.\n */\nexport type AnalyzeBatchAction =\n | EntityLinkingBatchAction\n | EntityRecognitionBatchAction\n | KeyPhraseExtractionBatchAction\n | PiiEntityRecognitionBatchAction\n | HealthcareBatchAction\n | SentimentAnalysisBatchAction\n | ExtractiveSummarizationBatchAction\n | AbstractiveSummarizationBatchAction\n | CustomEntityRecognitionBatchAction\n | CustomSingleLabelClassificationBatchAction\n | CustomMultiLabelClassificationBatchAction;\n\n/**\n * Type of actions supported by the {@link TextAnalysisClient.beginAnalyzeBatch} method.\n */\nexport type AnalyzeBatchActionName = keyof typeof AnalyzeBatchActionNames;\n\n/** The State of a batched action */\nexport interface BatchActionState<Kind extends AnalyzeBatchActionName> {\n /**\n * The kind of the action results.\n */\n readonly kind: Kind;\n /**\n * The name of the action.\n */\n readonly actionName?: string;\n /**\n * Action statistics.\n */\n readonly statistics?: TextDocumentBatchStatistics;\n}\n\n/**\n * Action metadata.\n */\nexport interface ActionMetadata {\n /**\n * The model version used to perform the action.\n */\n readonly modelVersion: string;\n}\n\n/**\n * Custom action metadata.\n */\nexport interface CustomActionMetadata {\n /**\n * The name of the project used to perform the action.\n */\n readonly projectName: string;\n /**\n * The name of the deployment used to perform the action.\n */\n readonly deploymentName: string;\n}\n\n/**\n * Document results with potentially automatically detected language.\n */\nexport type WithDetectedLanguage<T> = T &\n DocumentDetectedLanguage & {\n /** Indicates whether the default language hint was used */\n isLanguageDefaulted?: boolean;\n };\n\n/**\n * The state of a succeeded batched action.\n */\nexport interface BatchActionSuccessResult<T, Kind extends AnalyzeBatchActionName>\n extends BatchActionState<Kind> {\n /**\n * The list of document results.\n */\n readonly results: WithDetectedLanguage<T>[];\n /**\n * When this action was completed by the service.\n */\n readonly completedOn: Date;\n /**\n * Discriminant to determine if that this is an error result.\n */\n readonly error?: undefined;\n}\n\n/**\n * The error of an analyze batch action.\n */\nexport interface BatchActionErrorResult<Kind extends AnalyzeBatchActionName>\n extends BatchActionState<Kind> {\n /**\n * When this action was completed by the service.\n */\n readonly failedOn: Date;\n /**\n * The Error for this action result.\n */\n readonly error: TextAnalysisError;\n}\n\n/**\n * The result of a batched action.\n */\nexport type BatchActionResult<T, Kind extends AnalyzeBatchActionName> =\n | BatchActionSuccessResult<T, Kind>\n | BatchActionErrorResult<Kind>;\n\n/**\n * The result of an entity linking batch action.\n */\nexport type EntityLinkingBatchResult = ActionMetadata &\n BatchActionResult<EntityLinkingResult, \"EntityLinking\">;\n\n/**\n * The result of an entity recognition batch action.\n */\nexport type EntityRecognitionBatchResult = ActionMetadata &\n BatchActionResult<EntityRecognitionResult, \"EntityRecognition\">;\n\n/**\n * The result of a key phrase extraction batch action.\n */\nexport type KeyPhraseExtractionBatchResult = ActionMetadata &\n BatchActionResult<KeyPhraseExtractionResult, \"KeyPhraseExtraction\">;\n\n/**\n * The result of a pii entity recognition batch action.\n */\nexport type PiiEntityRecognitionBatchResult = ActionMetadata &\n BatchActionResult<PiiEntityRecognitionResult, \"PiiEntityRecognition\">;\n\n/**\n * The result of a sentiment analysis batch action.\n */\nexport type SentimentAnalysisBatchResult = ActionMetadata &\n BatchActionResult<SentimentAnalysisResult, \"SentimentAnalysis\">;\n\n/**\n * The result of a healthcare batch action.\n */\nexport type HealthcareBatchResult = ActionMetadata &\n BatchActionResult<HealthcareResult, \"Healthcare\">;\n\n/**\n * The result of an extractive summarization batch action.\n */\nexport type ExtractiveSummarizationBatchResult = ActionMetadata &\n BatchActionResult<ExtractiveSummarizationResult, \"ExtractiveSummarization\">;\n\n/**\n * The result of an abstractive summarization batch action.\n */\nexport type AbstractiveSummarizationBatchResult = ActionMetadata &\n BatchActionResult<AbstractiveSummarizationResult, \"AbstractiveSummarization\">;\n\n/**\n * The result of a custom entity recognition batch action.\n */\nexport type CustomEntityRecognitionBatchResult = CustomActionMetadata &\n BatchActionResult<CustomEntityRecognitionResult, \"CustomEntityRecognition\">;\n\n/**\n * The result of a custom single-label classification batch action.\n */\nexport type CustomSingleLabelClassificationBatchResult = CustomActionMetadata &\n BatchActionResult<CustomSingleLabelClassificationResult, \"CustomSingleLabelClassification\">;\n\n/**\n * The result of a custom multi-label classification batch action.\n */\nexport type CustomMultiLabelClassificationBatchResult = CustomActionMetadata &\n BatchActionResult<CustomMultiLabelClassificationResult, \"CustomMultiLabelClassification\">;\n/**\n * Results of a batch of actions.\n */\nexport type AnalyzeBatchResult =\n | EntityLinkingBatchResult\n | EntityRecognitionBatchResult\n | KeyPhraseExtractionBatchResult\n | PiiEntityRecognitionBatchResult\n | SentimentAnalysisBatchResult\n | HealthcareBatchResult\n | ExtractiveSummarizationBatchResult\n | AbstractiveSummarizationBatchResult\n | CustomEntityRecognitionBatchResult\n | CustomSingleLabelClassificationBatchResult\n | CustomMultiLabelClassificationBatchResult;\n\n/**\n * An error result from a sentiment analysis action on a single document.\n */\nexport type SentimentAnalysisErrorResult = TextAnalysisErrorResult;\n\n/**\n * Paged results of the {@link TextAnalysisClient.beginAnalyzeBatch} operation.\n */\nexport type PagedAnalyzeBatchResult = PagedAsyncIterableIterator<AnalyzeBatchResult>;\n\n/**\n * A poller that polls long-running operations started by {@link TextAnalysisClient.beginAnalyzeBatch}.\n */\nexport type AnalyzeBatchPoller = PollerLike<AnalyzeBatchOperationState, PagedAnalyzeBatchResult>;\n\n/**\n * The metadata for long-running operations started by {@link TextAnalysisClient.beginAnalyzeBatch}.\n */\nexport interface AnalyzeBatchOperationMetadata {\n /**\n * The date and time the operation was created.\n */\n readonly createdOn: Date;\n /**\n * The date and time when the operation results will expire on the server.\n */\n readonly expiresOn?: Date;\n /**\n * The operation id.\n */\n readonly id: string;\n /**\n * The time the operation status was last updated.\n */\n readonly modifiedOn: Date;\n /**\n * Number of successfully completed actions.\n */\n readonly actionSucceededCount: number;\n /**\n * Number of failed actions.\n */\n readonly actionFailedCount: number;\n /**\n * Number of actions still in progress.\n */\n readonly actionInProgressCount: number;\n /**\n * The operation's display name.\n */\n readonly displayName?: string;\n}\n\n/**\n * The state of the begin analyze polling operation.\n */\nexport interface AnalyzeBatchOperationState\n extends OperationState<PagedAnalyzeBatchResult>,\n AnalyzeBatchOperationMetadata {}\n\n/**\n * Abstract representation of a poller, intended to expose just the minimal API that the user needs to work with.\n */\nexport interface PollerLike<TState extends OperationState<TResult>, TResult>\n extends SimplePollerLike<TState, TResult> {\n /**\n * sends a cancellation request.\n */\n sendCancellationRequest: () => Promise<void>;\n}\n"]}
|
1
|
+
{"version":3,"file":"models.js","sourceRoot":"","sources":["../../src/models.ts"],"names":[],"mappings":"AAAA,uCAAuC;AACvC,kCAAkC;AAElC,OAAO,EAmBL,cAAc,EACd,mBAAmB,GAcpB,MAAM,aAAa,CAAC;AA0DrB;;GAEG;AACH,MAAM,CAAC,MAAM,kBAAkB,GAAG;IAChC,aAAa,EAAE,eAAe;IAC9B,iBAAiB,EAAE,mBAAmB;IACtC,mBAAmB,EAAE,qBAAqB;IAC1C,oBAAoB,EAAE,sBAAsB;IAC5C,iBAAiB,EAAE,mBAAmB;IACtC,iBAAiB,EAAE,mBAAmB;CAC9B,CAAC;AAEX;;GAEG;AACH,MAAM,CAAC,MAAM,uBAAuB,GAAG;IACrC,iBAAiB,EAAE,mBAAmB;IACtC,iBAAiB,EAAE,mBAAmB;IACtC,oBAAoB,EAAE,sBAAsB;IAC5C,mBAAmB,EAAE,qBAAqB;IAC1C,aAAa,EAAE,eAAe;IAC9B,UAAU,EAAE,YAAY;IACxB,uBAAuB,EAAE,yBAAyB;IAClD,wBAAwB,EAAE,0BAA0B;IACpD,uBAAuB,EAAE,yBAAyB;IAClD,+BAA+B,EAAE,iCAAiC;IAClE,8BAA8B,EAAE,gCAAgC;CACxD,CAAC;AA2CX;;GAEG;AACH,MAAM,CAAC,MAAM,0BAA0B,mCAAQ,cAAc,GAAK,mBAAmB,CAAE,CAAC","sourcesContent":["// Copyright (c) Microsoft Corporation.\n// Licensed under the MIT license.\n\nimport {\n AbstractiveSummary,\n AssessmentSentiment,\n ClassificationCategory,\n CustomEntityRecognitionAction,\n CustomMultiLabelClassificationAction,\n CustomSingleLabelClassificationAction,\n DetectedLanguage,\n DocumentSentimentLabel,\n DocumentWarning,\n Entity,\n EntityDataSource,\n EntityLinkingAction,\n EntityRecognitionAction,\n ExtractiveSummarizationAction,\n HealthcareAction,\n HealthcareAssertion,\n HealthcareEntityCategory,\n KeyPhraseExtractionAction,\n KnownErrorCode,\n KnownInnerErrorCode,\n LanguageDetectionAction,\n LinkedEntity,\n PiiEntityRecognitionAction,\n RelationType,\n SentenceSentimentLabel,\n SentimentAnalysisAction,\n SentimentConfidenceScores,\n StringIndexType,\n SummarySentence,\n TargetConfidenceScores,\n TextDocumentBatchStatistics,\n TextDocumentStatistics,\n TokenSentimentLabel,\n} from \"./generated\";\nimport { CommonClientOptions, OperationOptions } from \"@azure/core-client\";\nimport { OperationState, SimplePollerLike } from \"@azure/core-lro\";\nimport { PagedAsyncIterableIterator } from \"@azure/core-paging\";\n\n/**\n * Configuration options for {@link TextAnalysisClient}.\n */\nexport interface TextAnalysisClientOptions extends CommonClientOptions {\n /**\n * The default country hint to use. Defaults to \"us\".\n */\n defaultCountryHint?: string;\n\n /**\n * The default language to use. Defaults to \"en\".\n */\n defaultLanguage?: string;\n /**\n * The version of the Cognitive Language Service API to use.\n */\n serviceVersion?: string;\n}\n\n/**\n * Options common to all operations.\n */\nexport interface TextAnalysisOperationOptions extends OperationOptions {\n /**\n * If set to true, response will contain input and document level statistics.\n */\n includeStatistics?: boolean;\n}\n\n/**\n * Options for the begin analyze actions operation.\n */\nexport interface BeginAnalyzeBatchOptions extends TextAnalysisOperationOptions {\n /**\n * Time delay between poll requests, in milliseconds.\n */\n updateIntervalInMs?: number;\n /**\n * The operation's display name.\n */\n displayName?: string;\n}\n\n/**\n * Options for the begin analyze actions operation.\n */\nexport interface RestoreAnalyzeBatchPollerOptions extends TextAnalysisOperationOptions {\n /**\n * Time delay between poll requests, in milliseconds.\n */\n updateIntervalInMs?: number;\n}\n\n/**\n * Type of actions supported by the {@link TextAnalysisClient.analyze} method.\n */\nexport const AnalyzeActionNames = {\n EntityLinking: \"EntityLinking\",\n EntityRecognition: \"EntityRecognition\",\n KeyPhraseExtraction: \"KeyPhraseExtraction\",\n PiiEntityRecognition: \"PiiEntityRecognition\",\n LanguageDetection: \"LanguageDetection\",\n SentimentAnalysis: \"SentimentAnalysis\",\n} as const;\n\n/**\n * Type of actions supported by the {@link TextAnalysisClient.beginAnalyzeBatch} method.\n */\nexport const AnalyzeBatchActionNames = {\n SentimentAnalysis: \"SentimentAnalysis\",\n EntityRecognition: \"EntityRecognition\",\n PiiEntityRecognition: \"PiiEntityRecognition\",\n KeyPhraseExtraction: \"KeyPhraseExtraction\",\n EntityLinking: \"EntityLinking\",\n Healthcare: \"Healthcare\",\n ExtractiveSummarization: \"ExtractiveSummarization\",\n AbstractiveSummarization: \"AbstractiveSummarization\",\n CustomEntityRecognition: \"CustomEntityRecognition\",\n CustomSingleLabelClassification: \"CustomSingleLabelClassification\",\n CustomMultiLabelClassification: \"CustomMultiLabelClassification\",\n} as const;\n\n/**\n * Type of actions supported by the {@link TextAnalysisClient.analyze} method.\n */\nexport type AnalyzeActionName = keyof typeof AnalyzeActionNames;\n\n/**\n * The type of parameters for every action in ${@link AnalyzeActionNames}.\n */\nexport type AnalyzeActionParameters<ActionName extends AnalyzeActionName> = {\n EntityLinking: EntityLinkingAction;\n EntityRecognition: EntityRecognitionAction;\n PiiEntityRecognition: PiiEntityRecognitionAction;\n KeyPhraseExtraction: KeyPhraseExtractionAction;\n SentimentAnalysis: SentimentAnalysisAction;\n LanguageDetection: LanguageDetectionAction;\n}[ActionName];\n\n/**\n * The type of results of every action in ${@link AnalyzeActionNames}.\n */\nexport type AnalyzeResult<ActionName extends AnalyzeActionName> = {\n EntityLinking: EntityLinkingResult[];\n EntityRecognition: EntityRecognitionResult[];\n PiiEntityRecognition: PiiEntityRecognitionResult[];\n KeyPhraseExtraction: KeyPhraseExtractionResult[];\n SentimentAnalysis: SentimentAnalysisResult[];\n LanguageDetection: LanguageDetectionResult[];\n}[ActionName];\n\n/** Options for an Abstractive Summarization action. */\nexport interface AbstractiveSummarizationAction {\n /** The approximate number of sentences to be part of the summary. */\n sentenceCount?: number;\n /**\n * Specifies the measurement unit used to calculate the offset and length properties. For a list of possible values, see {@link KnownStringIndexType}.\n *\n * The default is the JavaScript's default which is \"Utf16CodeUnit\".\n */\n stringIndexType?: StringIndexType;\n}\n\n/**\n * Enum of possible error codes of a {@link TextAnalysisError}.\n */\nexport const KnownTextAnalysisErrorCode = { ...KnownErrorCode, ...KnownInnerErrorCode };\n\n/**\n * Type describing an API error.\n */\nexport interface TextAnalysisError {\n /**\n * A code describing the kind of error produced. See {@link KnownTextAnalysisErrorCode}.\n */\n readonly code: string;\n /**\n * A message from the service explaining the error\n */\n readonly message: string;\n /**\n * The target of the particular error (for example, the name of an invalid parameter)\n */\n readonly target?: string;\n}\n\n/**\n * Base type for results of an action corresponding to a single input document.\n */\nexport interface TextAnalysisSuccessResult {\n /**\n * Unique, non-empty document identifier.\n */\n readonly id: string;\n\n /**\n * Statistics about the input document and how it was processed by the service.\n * This property will have a value when includeStatistics is set to true in\n * the client call.\n */\n readonly statistics?: TextDocumentStatistics;\n\n /**\n * An array of warning data corresponding to this document.\n *\n * If no warnings were returned, this array will be empty.\n */\n readonly warnings: DocumentWarning[];\n\n /**\n * Discriminant to determine if this is an error result.\n */\n readonly error?: undefined;\n}\n\n/**\n * Base type for error results of an action corresponding to a single document.\n */\nexport interface TextAnalysisErrorResult {\n /**\n * Unique, non-empty document identifier.\n */\n readonly id: string;\n\n /**\n * The Error for this document result.\n */\n readonly error: TextAnalysisError;\n}\n\n/**\n * The result of an entity recognition action on a single document.\n */\nexport type EntityRecognitionResult = EntityRecognitionSuccessResult | EntityRecognitionErrorResult;\n\n/**\n * The result of an entity recognition action on a single document, containing\n * a collection of {@link Entity} objects identified in that document.\n */\nexport interface EntityRecognitionSuccessResult extends TextAnalysisSuccessResult {\n /**\n * The collection of entities identified in the input document.\n */\n readonly entities: Entity[];\n}\n\n/**\n * An error result from an entity recognition action on a single document.\n */\nexport type EntityRecognitionErrorResult = TextAnalysisErrorResult;\n\n/**\n * The result of an entity recognition action on a single document.\n */\nexport type PiiEntityRecognitionResult =\n | PiiEntityRecognitionSuccessResult\n | PiiEntityRecognitionErrorResult;\n\n/**\n * The result of a pii entity recognition action on a single document,\n * containing the collection of {@link Entity} objects identified in that\n * document.\n */\nexport interface PiiEntityRecognitionSuccessResult extends TextAnalysisSuccessResult {\n /**\n * The collection of entities identified in the input document.\n */\n readonly entities: Entity[];\n /**\n * The text redacted.\n */\n readonly redactedText: string;\n}\n\n/**\n * An error result from a pii entity recognition action on a single document.\n */\nexport type PiiEntityRecognitionErrorResult = TextAnalysisErrorResult;\n\n/**\n * The result of an entity linking action on a single document.\n */\nexport type EntityLinkingResult = EntityLinkingSuccessResult | EntityLinkingErrorResult;\n\n/**\n * The result of a entity linking action on a single document, containing a\n * collection of the {@link LinkedEntity} objects identified in that document.\n */\nexport interface EntityLinkingSuccessResult extends TextAnalysisSuccessResult {\n /**\n * The collection of entities identified in the input document.\n */\n readonly entities: LinkedEntity[];\n}\n\n/**\n * An error result from an entity linking action on a single document.\n */\nexport type EntityLinkingErrorResult = TextAnalysisErrorResult;\n\n/**\n * The result of a language detection action on a single document.\n */\nexport type LanguageDetectionResult = LanguageDetectionSuccessResult | LanguageDetectionErrorResult;\n\n/**\n * The result of a language detection action on a single document,\n * containing a prediction of what language the document is written in.\n */\nexport interface LanguageDetectionSuccessResult extends TextAnalysisSuccessResult {\n /**\n * The top detected language by confidence score.\n */\n readonly primaryLanguage: DetectedLanguage;\n}\n\n/**\n * An error result from a language detection action on a single document.\n */\nexport type LanguageDetectionErrorResult = TextAnalysisErrorResult;\n\n/**\n * The result of a sentiment analysis action on a single document.\n */\nexport type KeyPhraseExtractionResult =\n | KeyPhraseExtractionSuccessResult\n | KeyPhraseExtractionErrorResult;\n\n/**\n * The result of a key phrase extraction action on a single document,\n * containing a collection of the key phrases identified in that document.\n */\nexport interface KeyPhraseExtractionSuccessResult extends TextAnalysisSuccessResult {\n /**\n * A list of representative words or phrases. The number of key phrases\n * returned is proportional to the number of words in the input document.\n */\n readonly keyPhrases: string[];\n}\n\n/**\n * An error result from a key phrase extraction action on a single document.\n */\nexport type KeyPhraseExtractionErrorResult = TextAnalysisErrorResult;\n\n/**\n * The result of a sentiment analysis action on a single document.\n */\nexport type SentimentAnalysisResult = SentimentAnalysisSuccessResult | SentimentAnalysisErrorResult;\n\n/**\n * The result of a sentiment analysis action on a single document,\n * containing the predicted sentiment for each sentence as well as for the full\n * document.\n */\nexport interface SentimentAnalysisSuccessResult extends TextAnalysisSuccessResult {\n /**\n * Predicted sentiment for document. For a list of possible values, see {@link DocumentSentimentLabel}\n */\n readonly sentiment: DocumentSentimentLabel;\n /**\n * Document level sentiment confidence scores between 0 and 1 for each\n * sentiment class.\n */\n readonly confidenceScores: SentimentConfidenceScores;\n /**\n * The predicted sentiment for each sentence in the corresponding document.\n */\n readonly sentences: SentenceSentiment[];\n}\n\n/**\n * The predicted sentiment for a given span of text. For more information\n * regarding text sentiment, see {@link https://docs.microsoft.com//azure/cognitive-services/language-service/sentiment-opinion-mining/overview}.\n */\nexport interface SentenceSentiment {\n /**\n * The sentence text.\n */\n readonly text: string;\n /**\n * The predicted Sentiment for the sentence. For a list of possible values,\n * see {@link SentenceSentimentLabel}\n */\n readonly sentiment: SentenceSentimentLabel;\n /**\n * The sentiment confidence score between 0 and 1 for the sentence for all\n * classes.\n */\n readonly confidenceScores: SentimentConfidenceScores;\n /**\n * The sentence text offset from the start of the document.\n */\n readonly offset: number;\n /**\n * The length of the sentence text.\n */\n readonly length: number;\n /**\n * The list of opinions mined from this sentence. For example in \"The food is\n * good, but the service is bad\", the following two opinions will be returned:\n * \"food is good\" and \"service is bad\".\n *\n * It is non-empty only returned if {@link includeOpinionMining} was set to\n * `true`.\n */\n readonly opinions: Opinion[];\n}\n\n/**\n * TargetSentiment contains the predicted sentiment, confidence scores and other\n * information about a target of a product. A target of a product/service is a\n * key component of that product/service. For example in \"The food at Hotel Foo\n * is good\", \"food\" is a target of \"Hotel Foo\".\n */\nexport interface TargetSentiment {\n /**\n * The sentiment confidence score between 0 and 1 for the target for\n * 'positive' and 'negative' labels.\n */\n readonly confidenceScores: TargetConfidenceScores;\n /**\n * The predicted Sentiment for the Target. For a list of possible values,\n * see {@link TokenSentimentLabel}\n */\n readonly sentiment: TokenSentimentLabel;\n /**\n * The target text.\n */\n readonly text: string;\n /**\n * The Target text offset from the start of the sentence.\n */\n readonly offset: number;\n /**\n * The length of the Target text.\n */\n readonly length: number;\n}\n\n/**\n * A mined opinion object represents an opinion we've extracted from a sentence.\n * It consists of both a target that these assessments are about, and the actual\n * assessments themselves.\n */\nexport interface Opinion {\n /**\n * The target of a product/service that this assessment is about.\n */\n readonly target: TargetSentiment;\n /**\n * The actual assessments of the target.\n */\n readonly assessments: AssessmentSentiment[];\n}\n\n/**\n * A healthcare entity represented as a node in a directed graph where the edges are\n * a particular type of relationship between the source and target nodes.\n */\nexport interface HealthcareEntity extends Entity {\n /**\n * Normalized name for the entity. For example, the normalized text for \"histologically\" is \"histologic\".\n */\n readonly normalizedText?: string;\n /**\n * Whether the entity is negated.\n */\n readonly assertion?: HealthcareAssertion;\n /**\n * Entity references in known data sources.\n */\n readonly dataSources: EntityDataSource[];\n /**\n * Defines values for HealthcareEntityCategory.\n * {@link KnownHealthcareEntityCategory} can be used interchangeably with HealthcareEntityCategory,\n * this enum contains the known values that the service supports.\n * ### Known values supported by the service\n * **BODY_STRUCTURE**\n * **AGE**\n * **GENDER**\n * **EXAMINATION_NAME**\n * **DATE**\n * **DIRECTION**\n * **FREQUENCY**\n * **MEASUREMENT_VALUE**\n * **MEASUREMENT_UNIT**\n * **RELATIONAL_OPERATOR**\n * **TIME**\n * **GENE_OR_PROTEIN**\n * **VARIANT**\n * **ADMINISTRATIVE_EVENT**\n * **CARE_ENVIRONMENT**\n * **HEALTHCARE_PROFESSION**\n * **DIAGNOSIS**\n * **SYMPTOM_OR_SIGN**\n * **CONDITION_QUALIFIER**\n * **MEDICATION_CLASS**\n * **MEDICATION_NAME**\n * **DOSAGE**\n * **MEDICATION_FORM**\n * **MEDICATION_ROUTE**\n * **FAMILY_RELATION**\n * **TREATMENT_NAME**\n */\n readonly category: HealthcareEntityCategory;\n}\n\n/**\n * The type of different roles a healthcare entity can play in a relation.\n */\nexport type HealthcareEntityRelationRoleType = string;\n\n/**\n * A healthcare entity that plays a specific role in a relation.\n */\nexport interface HealthcareEntityRelationRole {\n /**\n * A healthcare entity\n */\n readonly entity: HealthcareEntity;\n /**\n * The role of the healthcare entity in a particular relation.\n */\n readonly name: HealthcareEntityRelationRoleType;\n}\n\n/**\n * A relationship between two or more healthcare entities.\n */\nexport interface HealthcareEntityRelation {\n /**\n * The type of the healthcare relation.\n */\n readonly relationType: RelationType;\n /**\n * The list of healthcare entities and their roles in the healthcare relation.\n */\n readonly roles: HealthcareEntityRelationRole[];\n /**\n * The confidence score between 0 and 1 of the extracted relation.\n */\n readonly confidenceScore?: number;\n}\n\n/**\n * The results of a successful healthcare analysis action for a single document.\n */\nexport interface HealthcareSuccessResult extends TextAnalysisSuccessResult {\n /**\n * Healthcare entities.\n */\n readonly entities: HealthcareEntity[];\n /**\n * Relations between healthcare entities.\n */\n readonly entityRelations: HealthcareEntityRelation[];\n}\n\n/**\n * An error result from the healthcare analysis action on a single document.\n */\nexport type HealthcareErrorResult = TextAnalysisErrorResult;\n\n/**\n * The result of the healthcare analysis action on a single document.\n */\nexport type HealthcareResult = HealthcareSuccessResult | HealthcareErrorResult;\n\n/**\n * The result of the extractive summarization action on a single document.\n */\nexport type ExtractiveSummarizationResult =\n | ExtractiveSummarizationSuccessResult\n | ExtractiveSummarizationErrorResult;\n\n/**\n * The result of the extractive summarization action on a single document,\n * containing a collection of the summary identified in that document.\n */\nexport interface ExtractiveSummarizationSuccessResult extends TextAnalysisSuccessResult {\n /**\n * A list of sentences composing a summary of the input document.\n */\n readonly sentences: SummarySentence[];\n}\n\n/**\n * An error result from the extractive summarization action on a single document.\n */\nexport type ExtractiveSummarizationErrorResult = TextAnalysisErrorResult;\n\n/**\n * The result of the abstractive summarization action on a single document.\n */\nexport type AbstractiveSummarizationResult =\n | AbstractiveSummarizationSuccessResult\n | AbstractiveSummarizationErrorResult;\n\n/**\n * The result of the abstractive summarization action on a single document,\n * containing a collection of the summaries identified for that document.\n */\nexport interface AbstractiveSummarizationSuccessResult extends TextAnalysisSuccessResult {\n /**\n * A list of summaries of the input document.\n */\n readonly summaries: AbstractiveSummary[];\n}\n\n/**\n * An error result from the abstractive summarization action on a single document.\n */\nexport type AbstractiveSummarizationErrorResult = TextAnalysisErrorResult;\n\n/**\n * The result of the custom entity recognition action on a single document.\n */\nexport type CustomEntityRecognitionResult =\n | CustomEntityRecognitionSuccessResult\n | CustomEntityRecognitionErrorResult;\n\n/**\n * The result of the custom entity recognition action on a single document,\n * containing a collection of the entities identified in that document.\n */\nexport interface CustomEntityRecognitionSuccessResult extends TextAnalysisSuccessResult {\n /**\n * The collection of entities identified in the input document.\n */\n readonly entities: Entity[];\n}\n\n/**\n * An error result from the custom entity recognition action on a single document.\n */\nexport type CustomEntityRecognitionErrorResult = TextAnalysisErrorResult;\n\n/**\n * The result of the single-label classification action on a single document.\n */\nexport type CustomSingleLabelClassificationResult =\n | CustomSingleLabelClassificationSuccessResult\n | CustomSingleLabelClassificationErrorResult;\n\n/**\n * The result of a successful single-label classification action on a single document,\n * containing the result of the classification.\n */\nexport interface CustomSingleLabelClassificationSuccessResult extends TextAnalysisSuccessResult {\n /**\n * The collection of classifications in the input document.\n */\n readonly classifications: ClassificationCategory[];\n}\n\n/**\n * An error result from the single-label classification action on a single document.\n */\nexport type CustomSingleLabelClassificationErrorResult = TextAnalysisErrorResult;\n\n/**\n * The result of the multi-label classification action on a multi document.\n */\nexport type CustomMultiLabelClassificationResult =\n | CustomMultiLabelClassificationSuccessResult\n | CustomMultiLabelClassificationErrorResult;\n\n/**\n * The result of a successful multi-label classification action on a multi document,\n * containing the result of the classification.\n */\nexport interface CustomMultiLabelClassificationSuccessResult extends TextAnalysisSuccessResult {\n /**\n * The collection of classifications in the input document.\n */\n readonly classifications: ClassificationCategory[];\n}\n\n/**\n * An error result from the multi-label classification action on a multi document.\n */\nexport type CustomMultiLabelClassificationErrorResult = TextAnalysisErrorResult;\n\n/**\n * Options common to all batch actions.\n */\nexport interface AnalyzeBatchActionCommon {\n /**\n * The name of the action.\n */\n actionName?: string;\n}\n\n/** Options for an entity linking batch action. */\nexport interface EntityLinkingBatchAction extends AnalyzeBatchActionCommon, EntityLinkingAction {\n /**\n * The kind of the action.\n */\n kind: \"EntityLinking\";\n}\n\n/** Options for an entity recognition batch action. */\nexport interface EntityRecognitionBatchAction\n extends AnalyzeBatchActionCommon,\n EntityRecognitionAction {\n /**\n * The kind of the action.\n */\n kind: \"EntityRecognition\";\n}\n\n/** Options for an key phrase extraction batch action. */\nexport interface KeyPhraseExtractionBatchAction\n extends AnalyzeBatchActionCommon,\n KeyPhraseExtractionAction {\n /**\n * The kind of the action.\n */\n kind: \"KeyPhraseExtraction\";\n}\n\n/** Options for a pii entity recognition batch action. */\nexport interface PiiEntityRecognitionBatchAction\n extends AnalyzeBatchActionCommon,\n PiiEntityRecognitionAction {\n /**\n * The kind of the action.\n */\n kind: \"PiiEntityRecognition\";\n}\n\n/** Options for a healthcare batch action. */\nexport interface HealthcareBatchAction extends AnalyzeBatchActionCommon, HealthcareAction {\n /**\n * The kind of the action.\n */\n kind: \"Healthcare\";\n}\n\n/** Options for a sentiment analysis batch action. */\nexport interface SentimentAnalysisBatchAction\n extends AnalyzeBatchActionCommon,\n SentimentAnalysisAction {\n /**\n * The kind of the action.\n */\n kind: \"SentimentAnalysis\";\n}\n\n/** Options for an extractive summarization batch action. */\nexport interface ExtractiveSummarizationBatchAction\n extends AnalyzeBatchActionCommon,\n ExtractiveSummarizationAction {\n /**\n * The kind of the action.\n */\n kind: \"ExtractiveSummarization\";\n}\n\n/** Options for an abstractive summarization batch action. */\nexport interface AbstractiveSummarizationBatchAction\n extends AnalyzeBatchActionCommon,\n AbstractiveSummarizationAction {\n /**\n * The kind of the action.\n */\n kind: \"AbstractiveSummarization\";\n}\n\n/** Options for a custom entity recognition batch action. */\nexport interface CustomEntityRecognitionBatchAction\n extends AnalyzeBatchActionCommon,\n CustomEntityRecognitionAction {\n /**\n * The kind of the action.\n */\n kind: \"CustomEntityRecognition\";\n}\n\n/** Options for a custom single-label classification batch action. */\nexport interface CustomSingleLabelClassificationBatchAction\n extends AnalyzeBatchActionCommon,\n CustomSingleLabelClassificationAction {\n /**\n * The kind of the action.\n */\n kind: \"CustomSingleLabelClassification\";\n}\n\n/** Options for a custom multi-label classification batch action. */\nexport interface CustomMultiLabelClassificationBatchAction\n extends AnalyzeBatchActionCommon,\n CustomMultiLabelClassificationAction {\n /**\n * The kind of the action.\n */\n kind: \"CustomMultiLabelClassification\";\n}\n\n/**\n * Batch of actions.\n */\nexport type AnalyzeBatchAction =\n | EntityLinkingBatchAction\n | EntityRecognitionBatchAction\n | KeyPhraseExtractionBatchAction\n | PiiEntityRecognitionBatchAction\n | HealthcareBatchAction\n | SentimentAnalysisBatchAction\n | ExtractiveSummarizationBatchAction\n | AbstractiveSummarizationBatchAction\n | CustomEntityRecognitionBatchAction\n | CustomSingleLabelClassificationBatchAction\n | CustomMultiLabelClassificationBatchAction;\n\n/**\n * Type of actions supported by the {@link TextAnalysisClient.beginAnalyzeBatch} method.\n */\nexport type AnalyzeBatchActionName = keyof typeof AnalyzeBatchActionNames;\n\n/** The State of a batched action */\nexport interface BatchActionState<Kind extends AnalyzeBatchActionName> {\n /**\n * The kind of the action results.\n */\n readonly kind: Kind;\n /**\n * The name of the action.\n */\n readonly actionName?: string;\n /**\n * Action statistics.\n */\n readonly statistics?: TextDocumentBatchStatistics;\n}\n\n/**\n * Action metadata.\n */\nexport interface ActionMetadata {\n /**\n * The model version used to perform the action.\n */\n readonly modelVersion: string;\n}\n\n/**\n * Custom action metadata.\n */\nexport interface CustomActionMetadata {\n /**\n * The name of the project used to perform the action.\n */\n readonly projectName: string;\n /**\n * The name of the deployment used to perform the action.\n */\n readonly deploymentName: string;\n}\n\n/**\n * The state of a succeeded batched action.\n */\nexport interface BatchActionSuccessResult<T, Kind extends AnalyzeBatchActionName>\n extends BatchActionState<Kind> {\n /**\n * The list of document results.\n */\n readonly results: T[];\n /**\n * When this action was completed by the service.\n */\n readonly completedOn: Date;\n /**\n * Discriminant to determine if that this is an error result.\n */\n readonly error?: undefined;\n}\n\n/**\n * The error of an analyze batch action.\n */\nexport interface BatchActionErrorResult<Kind extends AnalyzeBatchActionName>\n extends BatchActionState<Kind> {\n /**\n * When this action was completed by the service.\n */\n readonly failedOn: Date;\n /**\n * The Error for this action result.\n */\n readonly error: TextAnalysisError;\n}\n\n/**\n * The result of a batched action.\n */\nexport type BatchActionResult<T, Kind extends AnalyzeBatchActionName> =\n | BatchActionSuccessResult<T, Kind>\n | BatchActionErrorResult<Kind>;\n\n/**\n * The result of an entity linking batch action.\n */\nexport type EntityLinkingBatchResult = ActionMetadata &\n BatchActionResult<EntityLinkingResult, \"EntityLinking\">;\n\n/**\n * The result of an entity recognition batch action.\n */\nexport type EntityRecognitionBatchResult = ActionMetadata &\n BatchActionResult<EntityRecognitionResult, \"EntityRecognition\">;\n\n/**\n * The result of a key phrase extraction batch action.\n */\nexport type KeyPhraseExtractionBatchResult = ActionMetadata &\n BatchActionResult<KeyPhraseExtractionResult, \"KeyPhraseExtraction\">;\n\n/**\n * The result of a pii entity recognition batch action.\n */\nexport type PiiEntityRecognitionBatchResult = ActionMetadata &\n BatchActionResult<PiiEntityRecognitionResult, \"PiiEntityRecognition\">;\n\n/**\n * The result of a sentiment analysis batch action.\n */\nexport type SentimentAnalysisBatchResult = ActionMetadata &\n BatchActionResult<SentimentAnalysisResult, \"SentimentAnalysis\">;\n\n/**\n * The result of a healthcare batch action.\n */\nexport type HealthcareBatchResult = ActionMetadata &\n BatchActionResult<HealthcareResult, \"Healthcare\">;\n\n/**\n * The result of an extractive summarization batch action.\n */\nexport type ExtractiveSummarizationBatchResult = ActionMetadata &\n BatchActionResult<ExtractiveSummarizationResult, \"ExtractiveSummarization\">;\n\n/**\n * The result of an abstractive summarization batch action.\n */\nexport type AbstractiveSummarizationBatchResult = ActionMetadata &\n BatchActionResult<AbstractiveSummarizationResult, \"AbstractiveSummarization\">;\n\n/**\n * The result of a custom entity recognition batch action.\n */\nexport type CustomEntityRecognitionBatchResult = CustomActionMetadata &\n BatchActionResult<CustomEntityRecognitionResult, \"CustomEntityRecognition\">;\n\n/**\n * The result of a custom single-label classification batch action.\n */\nexport type CustomSingleLabelClassificationBatchResult = CustomActionMetadata &\n BatchActionResult<CustomSingleLabelClassificationResult, \"CustomSingleLabelClassification\">;\n\n/**\n * The result of a custom multi-label classification batch action.\n */\nexport type CustomMultiLabelClassificationBatchResult = CustomActionMetadata &\n BatchActionResult<CustomMultiLabelClassificationResult, \"CustomMultiLabelClassification\">;\n/**\n * Results of a batch of actions.\n */\nexport type AnalyzeBatchResult =\n | EntityLinkingBatchResult\n | EntityRecognitionBatchResult\n | KeyPhraseExtractionBatchResult\n | PiiEntityRecognitionBatchResult\n | SentimentAnalysisBatchResult\n | HealthcareBatchResult\n | ExtractiveSummarizationBatchResult\n | AbstractiveSummarizationBatchResult\n | CustomEntityRecognitionBatchResult\n | CustomSingleLabelClassificationBatchResult\n | CustomMultiLabelClassificationBatchResult;\n\n/**\n * An error result from a sentiment analysis action on a single document.\n */\nexport type SentimentAnalysisErrorResult = TextAnalysisErrorResult;\n\n/**\n * Paged results of the {@link TextAnalysisClient.beginAnalyzeBatch} operation.\n */\nexport type PagedAnalyzeBatchResult = PagedAsyncIterableIterator<AnalyzeBatchResult>;\n\n/**\n * A poller that polls long-running operations started by {@link TextAnalysisClient.beginAnalyzeBatch}.\n */\nexport type AnalyzeBatchPoller = PollerLike<AnalyzeBatchOperationState, PagedAnalyzeBatchResult>;\n\n/**\n * The metadata for long-running operations started by {@link TextAnalysisClient.beginAnalyzeBatch}.\n */\nexport interface AnalyzeBatchOperationMetadata {\n /**\n * The date and time the operation was created.\n */\n readonly createdOn: Date;\n /**\n * The date and time when the operation results will expire on the server.\n */\n readonly expiresOn?: Date;\n /**\n * The operation id.\n */\n readonly id: string;\n /**\n * The time the operation status was last updated.\n */\n readonly modifiedOn: Date;\n /**\n * Number of successfully completed actions.\n */\n readonly actionSucceededCount: number;\n /**\n * Number of failed actions.\n */\n readonly actionFailedCount: number;\n /**\n * Number of actions still in progress.\n */\n readonly actionInProgressCount: number;\n /**\n * The operation's display name.\n */\n readonly displayName?: string;\n}\n\n/**\n * The state of the begin analyze polling operation.\n */\nexport interface AnalyzeBatchOperationState\n extends OperationState<PagedAnalyzeBatchResult>,\n AnalyzeBatchOperationMetadata {}\n\n/**\n * Abstract representation of a poller, intended to expose just the minimal API that the user needs to work with.\n */\nexport interface PollerLike<TState extends OperationState<TResult>, TResult>\n extends SimplePollerLike<TState, TResult> {\n /**\n * sends a cancellation request.\n */\n sendCancellationRequest: () => Promise<void>;\n}\n"]}
|
@@ -116,8 +116,8 @@ export class TextAnalysisClient {
|
|
116
116
|
throw new Error("'documents' must be a non-empty array");
|
117
117
|
}
|
118
118
|
if (isStringArray(documents)) {
|
119
|
-
const
|
120
|
-
realInputs = convertToTextDocumentInput(documents,
|
119
|
+
const languageCode = (_a = languageOrOptions) !== null && _a !== void 0 ? _a : this.defaultLanguage;
|
120
|
+
realInputs = convertToTextDocumentInput(documents, languageCode);
|
121
121
|
realOptions = options;
|
122
122
|
}
|
123
123
|
else {
|
@@ -1 +1 @@
|
|
1
|
-
{"version":3,"file":"textAnalysisClient.js","sourceRoot":"","sources":["../../src/textAnalysisClient.ts"],"names":[],"mappings":"AAAA,uCAAuC;AACvC,kCAAkC;;AAmBlC,OAAO,EAAE,uBAAuB,EAAE,WAAW,EAAE,MAAM,aAAa,CAAC;AACnE,OAAO,EAAkC,iBAAiB,EAAE,MAAM,kBAAkB,CAAC;AACrF,OAAO,EAAiB,mBAAmB,EAAE,MAAM,qBAAqB,CAAC;AACzE,OAAO,EACL,+BAA+B,EAC/B,0BAA0B,EAC1B,mBAAmB,EACnB,aAAa,GACd,MAAM,QAAQ,CAAC;AAChB,OAAO,EACL,qBAAqB,EACrB,iCAAiC,EACjC,4BAA4B,EAC5B,wBAAwB,EACxB,kBAAkB,EAClB,oBAAoB,GACrB,MAAM,OAAO,CAAC;AACf,OAAO,EAAE,UAAU,EAAE,qBAAqB,EAAE,MAAM,cAAc,CAAC;AACjE,OAAO,EAAE,eAAe,EAAE,MAAM,6BAA6B,CAAC;AAC9D,OAAO,EAAE,+BAA+B,EAAE,MAAM,2BAA2B,CAAC;AAC5E,OAAO,EAAE,gBAAgB,EAAE,MAAM,iBAAiB,CAAC;AACnD,OAAO,EAAE,MAAM,EAAE,MAAM,UAAU,CAAC;AAClC,OAAO,EAAE,qCAAqC,EAAE,MAAM,4BAA4B,CAAC;AAEnF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GAoCG;AACH,MAAM,OAAO,kBAAkB;IA8D7B,YACE,WAAmB,EACnB,UAA2C,EAC3C,UAAqC,EAAE;QAEvC,MAAM,EACJ,kBAAkB,GAAG,IAAI,EACzB,eAAe,GAAG,IAAI,EACtB,cAAc,KAEZ,OAAO,EADN,eAAe,UAChB,OAAO,EALL,2DAKL,CAAU,CAAC;QACZ,IAAI,CAAC,kBAAkB,GAAG,kBAAkB,CAAC;QAC7C,IAAI,CAAC,eAAe,GAAG,eAAe,CAAC;QAEvC,MAAM,uBAAuB,iDACxB,eAAe,GACf;YACD,cAAc,EAAE;gBACd,MAAM,EAAE,MAAM,CAAC,IAAI;gBACnB,4BAA4B,EAAE,CAAC,6BAA6B,EAAE,iBAAiB,CAAC;aACjF;SACF,KACD,UAAU,EAAE,cAAc,GAC3B,CAAC;QAEF,IAAI,CAAC,OAAO,GAAG,IAAI,eAAe,CAAC,WAAW,EAAE,uBAAuB,CAAC,CAAC;QAEzE,MAAM,UAAU,GAAG,iBAAiB,CAAC,UAAU,CAAC;YAC9C,CAAC,CAAC,+BAA+B,CAAC,EAAE,UAAU,EAAE,MAAM,EAAE,uBAAuB,EAAE,CAAC;YAClF,CAAC,CAAC,qCAAqC,CAAC,UAAU,CAAC,CAAC;QAEtD,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,SAAS,CAAC,UAAU,CAAC,CAAC;QAC5C,IAAI,CAAC,QAAQ,GAAG,mBAAmB,CAAC;YAClC,WAAW,EAAE,yBAAyB;YACtC,cAAc,EAAE,WAAW;YAC3B,SAAS,EAAE,6BAA6B;SACzC,CAAC,CAAC;IACL,CAAC;IAsRD,iBAAiB;IACV,KAAK,CAAC,OAAO,CAClB,UAAsB,EACtB,SAAoE,EACpE,8BAEwE,EACxE,OAA4E;QAE5E,IAAI,WAA+E,CAAC;QAEpF,IAAI,SAAS,CAAC,MAAM,KAAK,CAAC,EAAE;YAC1B,MAAM,IAAI,KAAK,CAAC,uCAAuC,CAAC,CAAC;SAC1D;QAED,IAAI,UAA0D,CAAC;QAC/D,IAAI,aAAa,CAAC,SAAS,CAAC,EAAE;YAC5B,IAAI,UAAU,KAAK,mBAAmB,EAAE;gBACtC,UAAU,GAAG,+BAA+B,CAC1C,SAAS,EACT,OAAO,8BAA8B,KAAK,QAAQ;oBAChD,CAAC,CAAC,8BAA8B;oBAChC,CAAC,CAAC,IAAI,CAAC,kBAAkB,CAC5B,CAAC;aACH;iBAAM;gBACL,UAAU,GAAG,0BAA0B,CACrC,SAAS,EACT,OAAO,8BAA8B,KAAK,QAAQ;oBAChD,CAAC,CAAC,8BAA8B;oBAChC,CAAC,CAAC,IAAI,CAAC,eAAe,CACzB,CAAC;aACH;YACD,WAAW,GAAG,OAAO,IAAK,EAAU,CAAC;SACtC;aAAM;YACL,UAAU,GAAG,SAAS,CAAC;YACvB,WAAW;gBACR,8BAC8B,IAAI,EAAE,CAAC;SACzC;QACD,MAAM,EAAE,OAAO,EAAE,gBAAgB,EAAE,IAAI,EAAE,MAAM,EAAE,GAAG,mBAAmB,CAAC,WAAW,CAAC,CAAC;QACrF,OAAO,IAAI,CAAC,QAAQ,CAAC,QAAQ,CAC3B,4BAA4B,EAC5B,gBAAgB,EAChB,KAAK,EAAE,cAA4C,EAAE,EAAE,CACrD,UAAU,CACR,IAAI,CAAC,OAAO;aACT,OAAO,CACN;YACE,IAAI,EAAE,UAAU;YAChB,aAAa,EAAE;gBACb,SAAS,EAAE,UAAU;aACtB;YACD,UAAU,EAAE,MAAM;SACZ,EACR,cAAc,CACf;aACA,IAAI,CACH,CAAC,MAAM,EAAE,EAAE,CACT,qBAAqB,CACnB,UAAU,EACV,UAAU,CAAC,GAAG,CAAC,CAAC,EAAE,EAAE,EAAE,EAAE,EAAE,CAAC,EAAE,CAAC,EAC9B,MAAM,CACsB,CACjC,CACJ,CACJ,CAAC;IACJ,CAAC;IA2HD,iBAAiB;IACjB,KAAK,CAAC,iBAAiB,CACrB,OAA6B,EAC7B,SAAyC,EACzC,iBAAqD,EACrD,UAAoC,EAAE;;QAEtC,IAAI,WAAqC,CAAC;QAC1C,IAAI,UAA+B,CAAC;QAEpC,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,CAAC,IAAI,SAAS,CAAC,MAAM,KAAK,CAAC,EAAE;YACvD,MAAM,IAAI,KAAK,CAAC,uCAAuC,CAAC,CAAC;SAC1D;QAED,IAAI,aAAa,CAAC,SAAS,CAAC,EAAE;YAC5B,MAAM,YAAY,GAAG,MAAC,iBAA4B,mCAAI,IAAI,CAAC,eAAe,CAAC;YAC3E,UAAU,GAAG,0BAA0B,CAAC,SAAS,EAAE,YAAY,CAAC,CAAC;YACjE,WAAW,GAAG,OAAO,CAAC;SACvB;aAAM;YACL,UAAU,GAAG,SAAS,CAAC;YACvB,WAAW,GAAG,iBAA6C,CAAC;SAC7D;QACD,MAAM,WAAW,GAAG,OAAO,CAAC,GAAG,CAC7B,CAAC,EAA6B,EAAqD,EAAE;gBAApF,EAAE,IAAI,EAAE,UAAU,OAAW,EAAN,IAAI,cAA3B,sBAA6B,CAAF;YAA0D,OAAA,CAAC;gBACrF,IAAI;gBACJ,UAAU;gBACV,UAAU,EAAE,IAAI;aACjB,CAAC,CAAA;SAAA,CACH,CAAC;QACF,MAAM,EAAE,iBAAiB,EAAE,kBAAkB,EAAE,WAAW,KAAc,WAAW,EAApB,IAAI,UAAK,WAAW,EAA7E,0DAA+D,CAAc,CAAC;QACpF,MAAM,GAAG,GAAG,qBAAqB,CAAC;YAChC,MAAM,EAAE,IAAI,CAAC,OAAO;YACpB,aAAa,EAAE,IAAI;YACnB,SAAS,EAAE,UAAU;YACrB,qBAAqB,EAAE,EAAE,WAAW,EAAE;YACtC,kBAAkB,EAAE,EAAE,iBAAiB,EAAE;YACzC,KAAK,EAAE,WAAW;YAClB,OAAO,EAAE,IAAI,CAAC,QAAQ;SACvB,CAAC,CAAC;QAEH,MAAM,MAAM,GAAG,UAAU,CAAC,GAAG,CAAC,CAAC,EAAE,EAAE,EAAE,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC;QAE9C,MAAM,KAAK,GAAG,EAAE,iBAAiB,EAAE,EAAE,EAAE,CAAC;QAExC,MAAM,MAAM,GAAG,MAAM,gBAAgB,CAAC,GAAG,EAAE;YACzC,YAAY,EAAE,kBAAkB;YAChC,aAAa,EAAE,oBAAoB,CAAC;gBAClC,MAAM,EAAE,IAAI,CAAC,OAAO;gBACpB,OAAO,EAAE,IAAI,CAAC,QAAQ;gBACtB,MAAM;gBACN,SAAS,kCAAO,IAAI,KAAE,iBAAiB,GAAE;gBACzC,KAAK;aACN,CAAC;YACF,WAAW,EAAE,wBAAwB,CAAC,MAAM,CAAC;YAC7C,qBAAqB,CAAC,iBAAyB;gBAC7C,KAAK,CAAC,iBAAiB,GAAG,iBAAiB,CAAC;YAC9C,CAAC;SACF,CAAC,CAAC;QAEH,MAAM,MAAM,CAAC,IAAI,EAAE,CAAC;QACpB,MAAM,EAAE,GAAG,MAAM,CAAC,iBAAiB,EAAE,CAAC,EAAE,CAAC;QACzC,OAAO,4BAA4B,CAAC;YAClC,EAAE;YACF,MAAM,EAAE,IAAI,CAAC,OAAO;YACpB,OAAO;YACP,MAAM;YACN,OAAO,EAAE,IAAI,CAAC,QAAQ;SACvB,CAAC,CAAC;IACL,CAAC;IA0BD,iBAAiB;IACjB,KAAK,CAAC,yBAAyB,CAC7B,eAAuB,EACvB,UAA4C,EAAE;QAE9C,MAAM,EAAE,iBAAiB,EAAE,kBAAkB,KAAc,OAAO,EAAhB,IAAI,UAAK,OAAO,EAA5D,2CAAkD,CAAU,CAAC;QACnE,MAAM,MAAM,GAAG,kBAAkB,CAAC,eAAe,CAAC,CAAC;QACnD,MAAM,GAAG,GAAG,iCAAiC,CAAC;YAC5C,MAAM,EAAE,IAAI,CAAC,OAAO;YACpB,OAAO,kCAAO,IAAI,KAAE,iBAAiB,GAAE;YACvC,OAAO,EAAE,IAAI,CAAC,QAAQ;SACvB,CAAC,CAAC;QAEH,MAAM,KAAK,GAAG,EAAE,iBAAiB,EAAE,EAAE,EAAE,CAAC;QAExC,MAAM,MAAM,GAAG,MAAM,gBAAgB,CAAC,GAAG,EAAE;YACzC,YAAY,EAAE,kBAAkB;YAChC,WAAW,EAAE,eAAe;YAC5B,aAAa,EAAE,oBAAoB,CAAC;gBAClC,MAAM,EAAE,IAAI,CAAC,OAAO;gBACpB,OAAO,EAAE,IAAI,CAAC,QAAQ;gBACtB,MAAM;gBACN,SAAS,kCAAO,IAAI,KAAE,iBAAiB,GAAE;gBACzC,KAAK;aACN,CAAC;YACF,WAAW,EAAE,wBAAwB,EAAE;YACvC,qBAAqB,CAAC,iBAAyB;gBAC7C,KAAK,CAAC,iBAAiB,GAAG,iBAAiB,CAAC;YAC9C,CAAC;SACF,CAAC,CAAC;QAEH,MAAM,MAAM,CAAC,IAAI,EAAE,CAAC;QACpB,MAAM,EAAE,GAAG,MAAM,CAAC,iBAAiB,EAAE,CAAC,EAAE,CAAC;QACzC,OAAO,4BAA4B,CAAC;YAClC,EAAE;YACF,MAAM,EAAE,IAAI,CAAC,OAAO;YACpB,OAAO;YACP,MAAM;YACN,OAAO,EAAE,IAAI,CAAC,QAAQ;SACvB,CAAC,CAAC;IACL,CAAC;CACF","sourcesContent":["// Copyright (c) Microsoft Corporation.\n// Licensed under the MIT license.\n\nimport {\n AnalyzeActionName,\n AnalyzeActionParameters,\n AnalyzeBatchAction,\n AnalyzeBatchPoller,\n AnalyzeResult,\n BeginAnalyzeBatchOptions,\n RestoreAnalyzeBatchPollerOptions,\n TextAnalysisClientOptions,\n TextAnalysisOperationOptions,\n} from \"./models\";\nimport {\n AnalyzeBatchActionUnion,\n GeneratedClientOptionalParams,\n LanguageDetectionInput,\n TextDocumentInput,\n} from \"./generated/models\";\nimport { DEFAULT_COGNITIVE_SCOPE, SDK_VERSION } from \"./constants\";\nimport { KeyCredential, TokenCredential, isTokenCredential } from \"@azure/core-auth\";\nimport { TracingClient, createTracingClient } from \"@azure/core-tracing\";\nimport {\n convertToLanguageDetectionInput,\n convertToTextDocumentInput,\n getOperationOptions,\n isStringArray,\n} from \"./util\";\nimport {\n createAnalyzeBatchLro,\n createCreateAnalyzeBatchPollerLro,\n createPollerWithCancellation,\n createUpdateAnalyzeState,\n getDocIDsFromState,\n processAnalyzeResult,\n} from \"./lro\";\nimport { throwError, transformActionResult } from \"./transforms\";\nimport { GeneratedClient } from \"./generated/generatedClient\";\nimport { bearerTokenAuthenticationPolicy } from \"@azure/core-rest-pipeline\";\nimport { createHttpPoller } from \"@azure/core-lro\";\nimport { logger } from \"./logger\";\nimport { textAnalyticsAzureKeyCredentialPolicy } from \"./azureKeyCredentialPolicy\";\n\n/**\n * A client for interacting with the text analysis features in Azure Cognitive\n * Language Service.\n *\n * The client needs the endpoint of a Language resource and an authentication\n * method such as an API key or AAD. The API key and endpoint can be found in\n * the Language resource page in the Azure portal. They will be located in the\n * resource's Keys and Endpoint page, under Resource Management.\n *\n * ### Examples for authentication:\n *\n * #### API Key\n *\n * ```js\n * import { TextAnalysisClient, AzureKeyCredential } from \"@azure/ai-language-text\";\n *\n * const endpoint = \"https://<resource name>.cognitiveservices.azure.com\";\n * const credential = new AzureKeyCredential(\"<api key>\");\n *\n * const client = new TextAnalysisClient(endpoint, credential);\n * ```\n *\n * #### Azure Active Directory\n *\n * See the [`@azure/identity`](https://npmjs.com/package/\\@azure/identity)\n * package for more information about authenticating with Azure Active Directory.\n *\n * ```js\n * import { TextAnalysisClient } from \"@azure/ai-language-text\";\n * import { DefaultAzureCredential } from \"@azure/identity\";\n *\n * const endpoint = \"https://<resource name>.cognitiveservices.azure.com\";\n * const credential = new DefaultAzureCredential();\n *\n * const client = new TextAnalysisClient(endpoint, credential);\n * ```\n */\nexport class TextAnalysisClient {\n private readonly _client: GeneratedClient;\n private readonly _tracing: TracingClient;\n private readonly defaultCountryHint: string;\n private readonly defaultLanguage: string;\n\n /**\n * Creates an instance of TextAnalysisClient with the endpoint of a Language\n * resource and an authentication method such as an API key or AAD.\n *\n * The API key and endpoint can be found in the Language resource page in the\n * Azure portal. They will be located in the resource's Keys and Endpoint page,\n * under Resource Management.\n *\n * ### Example\n *\n * ```js\n * import { TextAnalysisClient, AzureKeyCredential } from \"@azure/ai-language-text\";\n *\n * const endpoint = \"https://<resource name>.cognitiveservices.azure.com\";\n * const credential = new AzureKeyCredential(\"<api key>\");\n *\n * const client = new TextAnalysisClient(endpoint, credential);\n * ```\n *\n * @param endpointUrl - The URL to the endpoint of a Cognitive Language Service resource\n * @param credential - Key credential to be used to authenticate requests to the service.\n * @param options - Used to configure the TextAnalytics client.\n */\n constructor(endpointUrl: string, credential: KeyCredential, options?: TextAnalysisClientOptions);\n /**\n * Creates an instance of TextAnalysisClient with the endpoint of a Language\n * resource and an authentication method such as an API key or AAD.\n *\n * The API key and endpoint can be found in the Language resource page in the\n * Azure portal. They will be located in the resource's Keys and Endpoint page,\n * under Resource Management.\n *\n * ### Example\n *\n * See the [`@azure/identity`](https://npmjs.com/package/\\@azure/identity)\n * package for more information about authenticating with Azure Active Directory.\n *\n * ```js\n * import { TextAnalysisClient } from \"@azure/ai-language-text\";\n * import { DefaultAzureCredential } from \"@azure/identity\";\n *\n * const endpoint = \"https://<resource name>.cognitiveservices.azure.com\";\n * const credential = new DefaultAzureCredential();\n *\n * const client = new TextAnalysisClient(endpoint, credential);\n * ```\n *\n * @param endpointUrl - The URL to the endpoint of a Cognitive Language Service resource\n * @param credential - Token credential to be used to authenticate requests to the service.\n * @param options - Used to configure the TextAnalytics client.\n */\n constructor(\n endpointUrl: string,\n credential: TokenCredential,\n options?: TextAnalysisClientOptions\n );\n constructor(\n endpointUrl: string,\n credential: TokenCredential | KeyCredential,\n options: TextAnalysisClientOptions = {}\n ) {\n const {\n defaultCountryHint = \"us\",\n defaultLanguage = \"en\",\n serviceVersion,\n ...pipelineOptions\n } = options;\n this.defaultCountryHint = defaultCountryHint;\n this.defaultLanguage = defaultLanguage;\n\n const internalPipelineOptions: GeneratedClientOptionalParams = {\n ...pipelineOptions,\n ...{\n loggingOptions: {\n logger: logger.info,\n additionalAllowedHeaderNames: [\"x-ms-correlation-request-id\", \"x-ms-request-id\"],\n },\n },\n apiVersion: serviceVersion,\n };\n\n this._client = new GeneratedClient(endpointUrl, internalPipelineOptions);\n\n const authPolicy = isTokenCredential(credential)\n ? bearerTokenAuthenticationPolicy({ credential, scopes: DEFAULT_COGNITIVE_SCOPE })\n : textAnalyticsAzureKeyCredentialPolicy(credential);\n\n this._client.pipeline.addPolicy(authPolicy);\n this._tracing = createTracingClient({\n packageName: \"@azure/ai-language-text\",\n packageVersion: SDK_VERSION,\n namespace: \"Microsoft.CognitiveServices\",\n });\n }\n\n /**\n * Runs a predictive model to determine the language that the passed-in\n * input strings are written in, and returns, for each one, the detected\n * language as well as a score indicating the model's confidence that the\n * inferred language is correct. Scores close to 1 indicate high certainty in\n * the result. 120 languages are supported.\n *\n * See {@link https://docs.microsoft.com//azure/cognitive-services/language-service/concepts/data-limits}\n * for data limits.\n *\n * ### Examples\n *\n * #### Language detection\n *\n * ```js\n * const documents = [<input strings>];\n * const countryHint = \"us\";\n * const results = await client.analyze(\"LanguageDetection\", documents, countryHint);\n *\n * for (let i = 0; i < results.length; i++) {\n * const result = results[i];\n * if (result.error) {\n * // a document has an error instead of results\n * } else {\n * const { name, confidenceScore, iso6391Name } = result.primaryLanguage;\n * }\n * }\n * ```\n *\n * See {@link https://docs.microsoft.com//azure/cognitive-services/language-service/language-detection/overview}\n * for more information on language detection.\n *\n * @param actionName - the name of the action to be performed on the input\n * documents, see ${@link AnalyzeActionName}\n * @param documents - the input documents to be analyzed\n * @param options - optional action parameters and settings for the operation\n *\n * @returns an array of results where each element contains the primary language\n * for the corresponding input document.\n */\n public async analyze<ActionName extends \"LanguageDetection\">(\n actionName: ActionName,\n documents: LanguageDetectionInput[],\n options?: AnalyzeActionParameters<ActionName> & TextAnalysisOperationOptions\n ): Promise<AnalyzeResult<ActionName>>;\n /**\n * Runs a predictive model to determine the language that the passed-in\n * input strings are written in, and returns, for each one, the detected\n * language as well as a score indicating the model's confidence that the\n * inferred language is correct. Scores close to 1 indicate high certainty in\n * the result. 120 languages are supported.\n *\n * See {@link https://docs.microsoft.com//azure/cognitive-services/language-service/concepts/data-limits}\n * for data limits.\n *\n * ### Examples\n *\n * #### Language detection\n *\n * ```js\n * const documents = [<input strings>];\n * const countryHint = \"us\";\n * const results = await client.analyze(\"LanguageDetection\", documents, countryHint);\n *\n * for (const result of results) {\n * if (result.error) {\n * // a document has an error instead of results\n * } else {\n * const { name, confidenceScore, iso6391Name } = result.primaryLanguage;\n * }\n * }\n * ```\n *\n * See {@link https://docs.microsoft.com//azure/cognitive-services/language-service/language-detection/overview}\n * for more information on language detection.\n *\n * @param actionName - the name of the action to be performed on the input\n * documents, see ${@link AnalyzeActionName}\n * @param documents - the input documents to be analyzed\n * @param countryHint - Indicates the country of origin for all of\n * the input strings to assist the model in predicting the language they are\n * written in. If unspecified, this value will be set to the default\n * country hint in `TextAnalysisClientOptions`. If set to an empty string,\n * or the string \"none\", the service will apply a model where the country is\n * explicitly unset. The same country hint is applied to all strings in the\n * input collection.\n * @param options - optional action parameters and settings for the operation\n *\n * @returns an array of results where each element contains the primary language\n * for the corresponding input document.\n */\n public async analyze<ActionName extends \"LanguageDetection\">(\n actionName: ActionName,\n documents: string[],\n countryHint?: string,\n options?: AnalyzeActionParameters<ActionName> & TextAnalysisOperationOptions\n ): Promise<AnalyzeResult<ActionName>>;\n /**\n * Runs a predictive model to perform the action of choice on the input\n * documents. See ${@link AnalyzeActionName} for a list of supported\n * actions.\n *\n * The layout of each item in the results array depends on the action chosen.\n * For example, each PIIEntityRecognition document result consists of both\n * `entities` and `redactedText` where the former is a list of all Pii entities\n * in the text and the latter is the original text after all such Pii entities\n * have been redacted from it.\n *\n * See {@link https://docs.microsoft.com//azure/cognitive-services/language-service/concepts/data-limits}\n * for data limits.\n *\n * ### Examples\n *\n * #### Opinion mining\n *\n * ```js\n * const documents = [{\n * id: \"1\",\n * text: \"The food and service aren't the best\",\n * language: \"en\"\n * }];\n * const results = await client.analyze(\"SentimentAnalysis\", documents, {\n * includeOpinionMining: true,\n * });\n *\n * for (const result of results) {\n * if (result.error) {\n * // a document has an error instead of results\n * } else {\n * const { sentiment, confidenceScores, sentences } = result;\n * for (const { sentiment, confidenceScores, opinions } of sentences) {\n * for (const { target, assessments } of opinions) {\n * const { text, sentiment, confidenceScores } = target;\n * for (const { text, sentiment } of assessments) {\n * // Do something\n * }\n * }\n * }\n * }\n * }\n * ```\n *\n * See {@link https://docs.microsoft.com//azure/cognitive-services/language-service/sentiment-opinion-mining/overview}\n * for more information on opinion mining.\n *\n * #### Personally identifiable information\n *\n * ```js\n * const documents = [<input documents>];\n * const categoriesFilter = [KnownPiiCategory.USSocialSecurityNumber];\n * const domainFilter = KnownPiiDomain.Phi;\n * const results = await client.analyze(\"PiiEntityRecognition\", documents, {\n * domainFilter, categoriesFilter\n * });\n *\n * for (const result of results) {\n * if (result.error) {\n * // a document has an error instead of results\n * } else {\n * const { entities, redactedText } = result;\n * for (const { text, category, confidenceScore, length, offset } of entities) {\n * // Do something\n * }\n * }\n * }\n * ```\n *\n * See {@link https://docs.microsoft.com//azure/cognitive-services/language-service/personally-identifiable-information/overview}\n * for more information on personally identifiable information.\n *\n * @param actionName - the name of the action to be performed on the input\n * documents, see ${@link AnalyzeActionName}\n * @param documents - the input documents to be analyzed\n * @param options - optional action parameters and settings for the operation\n *\n * @returns an array of results corresponding to the input documents\n */\n public async analyze<ActionName extends AnalyzeActionName = AnalyzeActionName>(\n actionName: ActionName,\n documents: TextDocumentInput[],\n options?: AnalyzeActionParameters<ActionName> & TextAnalysisOperationOptions\n ): Promise<AnalyzeResult<ActionName>>;\n\n /**\n * Runs a predictive model to perform the action of choice on the input\n * strings. See ${@link AnalyzeActionName} for a list of supported\n * actions.\n *\n * The layout of each item in the results array depends on the action chosen.\n * For example, each PIIEntityRecognition document result consists of both\n * `entities` and `redactedText` where the former is a list of all Pii entities\n * in the text and the latter is the original text after all such Pii entities\n * have been redacted from it.\n *\n * See {@link https://docs.microsoft.com//azure/cognitive-services/language-service/concepts/data-limits}\n * for data limits.\n *\n * ### Examples\n *\n * #### Opinion mining\n *\n * ```js\n * const documents = [\"The food and service aren't the best\"];\n * const results = await client.analyze(\"SentimentAnalysis\", documents, {\n * includeOpinionMining: true,\n * });\n *\n * for (const result of results) {\n * if (result.error) {\n * // a document has an error instead of results\n * } else {\n * const { sentiment, confidenceScores, sentences } = result;\n * for (const { sentiment, confidenceScores, opinions } of sentences) {\n * for (const { target, assessments } of opinions) {\n * const { text, sentiment, confidenceScores } = target;\n * for (const { text, sentiment } of assessments) {\n * // Do something\n * }\n * }\n * }\n * }\n * }\n * ```\n *\n * See {@link https://docs.microsoft.com//azure/cognitive-services/language-service/sentiment-opinion-mining/overview}\n * for more information on opinion mining.\n *\n * #### Personally identifiable information\n *\n * ```js\n * const documents = [<input strings>];\n * const languageHint = \"en\";\n * const categoriesFilter = [KnownPiiCategory.USSocialSecurityNumber];\n * const domainFilter = KnownPiiDomain.Phi;\n * const results = await client.analyze(\"PiiEntityRecognition\", documents, languageHint, {\n * domainFilter, categoriesFilter\n * });\n *\n * for (const result of results) {\n * if (result.error) {\n * // a document has an error instead of results\n * } else {\n * const { entities, redactedText } = result;\n * for (const { text, category, confidenceScore, length, offset } of entities) {\n * // Do something\n * }\n * }\n * }\n * ```\n *\n * See {@link https://docs.microsoft.com//azure/cognitive-services/language-service/personally-identifiable-information/overview}\n * for more information on personally identifiable information.\n *\n * @param actionName - the name of the action to be performed on the input\n * documents, see ${@link AnalyzeActionName}\n * @param documents - the input documents to be analyzed\n * @param languageCode - the code of the language that all the input strings are\n * written in. If unspecified, this value will be set to the default\n * language in `TextAnalysisClientOptions`. If set to an empty string,\n * the service will apply a model where the language is explicitly set to\n * \"None\". Language support varies per action, for example, more information\n * about the languages supported for Entity Recognition actions can be\n * found in {@link https://docs.microsoft.com//azure/cognitive-services/language-service/named-entity-recognition/language-support}.\n * If set to \"auto\", the service will automatically infer the language from\n * the input text. If that process fails, the value in the `defaultLanguage`\n * option will be used.\n * @param options - optional action parameters and settings for the operation\n *\n * @returns an array of results corresponding to the input documents\n */\n public async analyze<ActionName extends AnalyzeActionName = AnalyzeActionName>(\n actionName: ActionName,\n documents: string[],\n languageCode?: string,\n options?: AnalyzeActionParameters<ActionName> & TextAnalysisOperationOptions\n ): Promise<AnalyzeResult<ActionName>>;\n // implementation\n public async analyze<ActionName extends AnalyzeActionName = AnalyzeActionName>(\n actionName: ActionName,\n documents: string[] | LanguageDetectionInput[] | TextDocumentInput[],\n languageOrCountryHintOrOptions?:\n | string\n | (AnalyzeActionParameters<ActionName> & TextAnalysisOperationOptions),\n options?: AnalyzeActionParameters<ActionName> & TextAnalysisOperationOptions\n ): Promise<AnalyzeResult<ActionName>> {\n let realOptions: AnalyzeActionParameters<ActionName> & TextAnalysisOperationOptions;\n\n if (documents.length === 0) {\n throw new Error(\"'documents' must be a non-empty array\");\n }\n\n let realInputs: LanguageDetectionInput[] | TextDocumentInput[];\n if (isStringArray(documents)) {\n if (actionName === \"LanguageDetection\") {\n realInputs = convertToLanguageDetectionInput(\n documents,\n typeof languageOrCountryHintOrOptions === \"string\"\n ? languageOrCountryHintOrOptions\n : this.defaultCountryHint\n );\n } else {\n realInputs = convertToTextDocumentInput(\n documents,\n typeof languageOrCountryHintOrOptions === \"string\"\n ? languageOrCountryHintOrOptions\n : this.defaultLanguage\n );\n }\n realOptions = options || ({} as any);\n } else {\n realInputs = documents;\n realOptions =\n (languageOrCountryHintOrOptions as AnalyzeActionParameters<ActionName> &\n TextAnalysisOperationOptions) || {};\n }\n const { options: operationOptions, rest: action } = getOperationOptions(realOptions);\n return this._tracing.withSpan(\n \"TextAnalysisClient.analyze\",\n operationOptions,\n async (updatedOptions: TextAnalysisOperationOptions) =>\n throwError(\n this._client\n .analyze(\n {\n kind: actionName,\n analysisInput: {\n documents: realInputs,\n },\n parameters: action,\n } as any,\n updatedOptions\n )\n .then(\n (result) =>\n transformActionResult(\n actionName,\n realInputs.map(({ id }) => id),\n result\n ) as AnalyzeResult<ActionName>\n )\n )\n );\n }\n\n /**\n * Performs an array (batch) of actions on the input documents. Each action has\n * a `kind` field that specifies the nature of the action. See ${@link AnalyzeBatchActionNames}\n * for a list of supported actions. In addition to `kind`, actions could also\n * have other parameters such as `disableServiceLogs` and `modelVersion`.\n *\n * The results array contains the results for those input actions where each\n * item also has a `kind` field that specifies the type of the results.\n *\n * See {@link https://docs.microsoft.com//azure/cognitive-services/language-service/concepts/data-limits}\n * for data limits.\n *\n * ### Examples\n *\n * #### Key phrase extraction and Pii entity recognition\n *\n * ```js\n * const poller = await client.beginAnalyzeBatch(\n * [{ kind: \"KeyPhraseExtraction\" }, { kind: \"PiiEntityRecognition\" }],\n * documents\n * );\n * const actionResults = await poller.pollUntilDone();\n *\n * for await (const actionResult of actionResults) {\n * if (actionResult.error) {\n * throw new Error(`Unexpected error`);\n * }\n * switch (actionResult.kind) {\n * case \"KeyPhraseExtraction\": {\n * for (const doc of actionResult.results) {\n * // do something\n * }\n * break;\n * }\n * case \"PiiEntityRecognition\": {\n * for (const doc of actionResult.results) {\n * // do something\n * }\n * break;\n * }\n * }\n * }\n * ```\n *\n * @param actions - an array of actions that will be run on the input documents\n * @param documents - the input documents to be analyzed\n * @param languageCode - the code of the language that all the input strings are\n * written in. If unspecified, this value will be set to the default\n * language in `TextAnalysisClientOptions`. If set to an empty string,\n * the service will apply a model where the language is explicitly set to\n * \"None\". Language support varies per action, for example, more information\n * about the languages supported for Entity Recognition actions can be\n * found in {@link https://docs.microsoft.com//azure/cognitive-services/language-service/named-entity-recognition/language-support}.\n * If set to \"auto\", the service will automatically infer the language from\n * the input text. If that process fails, the value in the `defaultLanguage`\n * option will be used.\n * @param options - optional settings for the operation\n *\n * @returns an array of results corresponding to the input actions\n */\n async beginAnalyzeBatch(\n actions: AnalyzeBatchAction[],\n documents: string[],\n languageCode?: string,\n options?: BeginAnalyzeBatchOptions\n ): Promise<AnalyzeBatchPoller>;\n /**\n * Performs an array (batch) of actions on the input documents. Each action has\n * a `kind` field that specifies the nature of the action. See ${@link AnalyzeBatchActionNames}\n * for a list of supported actions. In addition to `kind`, actions could also\n * have other parameters such as `disableServiceLogs` and `modelVersion`.\n *\n * The results array contains the results for those input actions where each\n * item also has a `kind` field that specifies the type of the results.\n *\n * See {@link https://docs.microsoft.com//azure/cognitive-services/language-service/concepts/data-limits}\n * for data limits.\n *\n * ### Examples\n *\n * #### Keyphrase extraction and Pii entity recognition\n *\n * ```js\n * const poller = await client.beginAnalyzeBatch(\n * [{ kind: \"KeyPhraseExtraction\" }, { kind: \"PiiEntityRecognition\" }],\n * documents\n * );\n * const actionResults = await poller.pollUntilDone();\n *\n * for await (const actionResult of actionResults) {\n * if (actionResult.error) {\n * throw new Error(`Unexpected error`);\n * }\n * switch (actionResult.kind) {\n * case \"KeyPhraseExtraction\": {\n * for (const doc of actionResult.results) {\n * // do something\n * }\n * break;\n * }\n * case \"PiiEntityRecognition\": {\n * for (const doc of actionResult.results) {\n * // do something\n * }\n * break;\n * }\n * }\n * }\n * ```\n *\n * @param actions - an array of actions that will be run on the input documents\n * @param documents - the input documents to be analyzed\n * @param options - optional settings for the operation\n *\n * @returns an array of results corresponding to the input actions\n */\n async beginAnalyzeBatch(\n actions: AnalyzeBatchAction[],\n documents: TextDocumentInput[],\n options?: BeginAnalyzeBatchOptions\n ): Promise<AnalyzeBatchPoller>;\n // implementation\n async beginAnalyzeBatch(\n actions: AnalyzeBatchAction[],\n documents: TextDocumentInput[] | string[],\n languageOrOptions?: BeginAnalyzeBatchOptions | string,\n options: BeginAnalyzeBatchOptions = {}\n ): Promise<AnalyzeBatchPoller> {\n let realOptions: BeginAnalyzeBatchOptions;\n let realInputs: TextDocumentInput[];\n\n if (!Array.isArray(documents) || documents.length === 0) {\n throw new Error(\"'documents' must be a non-empty array\");\n }\n\n if (isStringArray(documents)) {\n const languageHint = (languageOrOptions as string) ?? this.defaultLanguage;\n realInputs = convertToTextDocumentInput(documents, languageHint);\n realOptions = options;\n } else {\n realInputs = documents;\n realOptions = languageOrOptions as BeginAnalyzeBatchOptions;\n }\n const realActions = actions.map(\n ({ kind, actionName, ...rest }): AnalyzeBatchActionUnion & { parameters: unknown } => ({\n kind,\n actionName,\n parameters: rest,\n })\n );\n const { includeStatistics, updateIntervalInMs, displayName, ...rest } = realOptions;\n const lro = createAnalyzeBatchLro({\n client: this._client,\n commonOptions: rest,\n documents: realInputs,\n initialRequestOptions: { displayName },\n pollRequestOptions: { includeStatistics },\n tasks: realActions,\n tracing: this._tracing,\n });\n\n const docIds = realInputs.map(({ id }) => id);\n\n const state = { continuationToken: \"\" };\n\n const poller = await createHttpPoller(lro, {\n intervalInMs: updateIntervalInMs,\n processResult: processAnalyzeResult({\n client: this._client,\n tracing: this._tracing,\n docIds,\n opOptions: { ...rest, includeStatistics },\n state,\n }),\n updateState: createUpdateAnalyzeState(docIds),\n withOperationLocation(operationLocation: string) {\n state.continuationToken = operationLocation;\n },\n });\n\n await poller.poll();\n const id = poller.getOperationState().id;\n return createPollerWithCancellation({\n id,\n client: this._client,\n options,\n poller,\n tracing: this._tracing,\n });\n }\n\n /**\n * Creates a poller from the serialized state of another poller. This can be\n * useful when you want to create pollers on a different host or a poller\n * needs to be constructed after the original one is not in scope.\n *\n * @param serializedState - the serialized state of another poller. It is the\n * result of `poller.toString()`\n * @param options - optional settings for the operation\n *\n * # Example\n *\n * `client.beginAnalyzeBatch` returns a promise that will resolve to a poller.\n * The state of the poller can be serialized and used to create another as follows:\n *\n * ```js\n * const serializedState = poller.toString();\n * const rehydratedPoller = await client.createAnalyzeBatchPoller(serializedState);\n * const actionResults = await rehydratedPoller.pollUntilDone();\n * ```\n */\n async restoreAnalyzeBatchPoller(\n serializedState: string,\n options?: RestoreAnalyzeBatchPollerOptions\n ): Promise<AnalyzeBatchPoller>;\n // implementation\n async restoreAnalyzeBatchPoller(\n serializedState: string,\n options: RestoreAnalyzeBatchPollerOptions = {}\n ): Promise<AnalyzeBatchPoller> {\n const { includeStatistics, updateIntervalInMs, ...rest } = options;\n const docIds = getDocIDsFromState(serializedState);\n const lro = createCreateAnalyzeBatchPollerLro({\n client: this._client,\n options: { ...rest, includeStatistics },\n tracing: this._tracing,\n });\n\n const state = { continuationToken: \"\" };\n\n const poller = await createHttpPoller(lro, {\n intervalInMs: updateIntervalInMs,\n restoreFrom: serializedState,\n processResult: processAnalyzeResult({\n client: this._client,\n tracing: this._tracing,\n docIds,\n opOptions: { ...rest, includeStatistics },\n state,\n }),\n updateState: createUpdateAnalyzeState(),\n withOperationLocation(operationLocation: string) {\n state.continuationToken = operationLocation;\n },\n });\n\n await poller.poll();\n const id = poller.getOperationState().id;\n return createPollerWithCancellation({\n id,\n client: this._client,\n options,\n poller,\n tracing: this._tracing,\n });\n }\n}\n"]}
|
1
|
+
{"version":3,"file":"textAnalysisClient.js","sourceRoot":"","sources":["../../src/textAnalysisClient.ts"],"names":[],"mappings":"AAAA,uCAAuC;AACvC,kCAAkC;;AAmBlC,OAAO,EAAE,uBAAuB,EAAE,WAAW,EAAE,MAAM,aAAa,CAAC;AACnE,OAAO,EAAkC,iBAAiB,EAAE,MAAM,kBAAkB,CAAC;AACrF,OAAO,EAAiB,mBAAmB,EAAE,MAAM,qBAAqB,CAAC;AACzE,OAAO,EACL,+BAA+B,EAC/B,0BAA0B,EAC1B,mBAAmB,EACnB,aAAa,GACd,MAAM,QAAQ,CAAC;AAChB,OAAO,EACL,qBAAqB,EACrB,iCAAiC,EACjC,4BAA4B,EAC5B,wBAAwB,EACxB,kBAAkB,EAClB,oBAAoB,GACrB,MAAM,OAAO,CAAC;AACf,OAAO,EAAE,UAAU,EAAE,qBAAqB,EAAE,MAAM,cAAc,CAAC;AACjE,OAAO,EAAE,eAAe,EAAE,MAAM,6BAA6B,CAAC;AAC9D,OAAO,EAAE,+BAA+B,EAAE,MAAM,2BAA2B,CAAC;AAC5E,OAAO,EAAE,gBAAgB,EAAE,MAAM,iBAAiB,CAAC;AACnD,OAAO,EAAE,MAAM,EAAE,MAAM,UAAU,CAAC;AAClC,OAAO,EAAE,qCAAqC,EAAE,MAAM,4BAA4B,CAAC;AAEnF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GAoCG;AACH,MAAM,OAAO,kBAAkB;IA8D7B,YACE,WAAmB,EACnB,UAA2C,EAC3C,UAAqC,EAAE;QAEvC,MAAM,EACJ,kBAAkB,GAAG,IAAI,EACzB,eAAe,GAAG,IAAI,EACtB,cAAc,KAEZ,OAAO,EADN,eAAe,UAChB,OAAO,EALL,2DAKL,CAAU,CAAC;QACZ,IAAI,CAAC,kBAAkB,GAAG,kBAAkB,CAAC;QAC7C,IAAI,CAAC,eAAe,GAAG,eAAe,CAAC;QAEvC,MAAM,uBAAuB,iDACxB,eAAe,GACf;YACD,cAAc,EAAE;gBACd,MAAM,EAAE,MAAM,CAAC,IAAI;gBACnB,4BAA4B,EAAE,CAAC,6BAA6B,EAAE,iBAAiB,CAAC;aACjF;SACF,KACD,UAAU,EAAE,cAAc,GAC3B,CAAC;QAEF,IAAI,CAAC,OAAO,GAAG,IAAI,eAAe,CAAC,WAAW,EAAE,uBAAuB,CAAC,CAAC;QAEzE,MAAM,UAAU,GAAG,iBAAiB,CAAC,UAAU,CAAC;YAC9C,CAAC,CAAC,+BAA+B,CAAC,EAAE,UAAU,EAAE,MAAM,EAAE,uBAAuB,EAAE,CAAC;YAClF,CAAC,CAAC,qCAAqC,CAAC,UAAU,CAAC,CAAC;QAEtD,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,SAAS,CAAC,UAAU,CAAC,CAAC;QAC5C,IAAI,CAAC,QAAQ,GAAG,mBAAmB,CAAC;YAClC,WAAW,EAAE,yBAAyB;YACtC,cAAc,EAAE,WAAW;YAC3B,SAAS,EAAE,6BAA6B;SACzC,CAAC,CAAC;IACL,CAAC;IAqRD,iBAAiB;IACV,KAAK,CAAC,OAAO,CAClB,UAAsB,EACtB,SAAoE,EACpE,8BAEwE,EACxE,OAA4E;QAE5E,IAAI,WAA+E,CAAC;QAEpF,IAAI,SAAS,CAAC,MAAM,KAAK,CAAC,EAAE;YAC1B,MAAM,IAAI,KAAK,CAAC,uCAAuC,CAAC,CAAC;SAC1D;QAED,IAAI,UAA0D,CAAC;QAC/D,IAAI,aAAa,CAAC,SAAS,CAAC,EAAE;YAC5B,IAAI,UAAU,KAAK,mBAAmB,EAAE;gBACtC,UAAU,GAAG,+BAA+B,CAC1C,SAAS,EACT,OAAO,8BAA8B,KAAK,QAAQ;oBAChD,CAAC,CAAC,8BAA8B;oBAChC,CAAC,CAAC,IAAI,CAAC,kBAAkB,CAC5B,CAAC;aACH;iBAAM;gBACL,UAAU,GAAG,0BAA0B,CACrC,SAAS,EACT,OAAO,8BAA8B,KAAK,QAAQ;oBAChD,CAAC,CAAC,8BAA8B;oBAChC,CAAC,CAAC,IAAI,CAAC,eAAe,CACzB,CAAC;aACH;YACD,WAAW,GAAG,OAAO,IAAK,EAAU,CAAC;SACtC;aAAM;YACL,UAAU,GAAG,SAAS,CAAC;YACvB,WAAW;gBACR,8BAC8B,IAAI,EAAE,CAAC;SACzC;QACD,MAAM,EAAE,OAAO,EAAE,gBAAgB,EAAE,IAAI,EAAE,MAAM,EAAE,GAAG,mBAAmB,CAAC,WAAW,CAAC,CAAC;QACrF,OAAO,IAAI,CAAC,QAAQ,CAAC,QAAQ,CAC3B,4BAA4B,EAC5B,gBAAgB,EAChB,KAAK,EAAE,cAA4C,EAAE,EAAE,CACrD,UAAU,CACR,IAAI,CAAC,OAAO;aACT,OAAO,CACN;YACE,IAAI,EAAE,UAAU;YAChB,aAAa,EAAE;gBACb,SAAS,EAAE,UAAU;aACtB;YACD,UAAU,EAAE,MAAM;SACZ,EACR,cAAc,CACf;aACA,IAAI,CACH,CAAC,MAAM,EAAE,EAAE,CACT,qBAAqB,CACnB,UAAU,EACV,UAAU,CAAC,GAAG,CAAC,CAAC,EAAE,EAAE,EAAE,EAAE,EAAE,CAAC,EAAE,CAAC,EAC9B,MAAM,CACsB,CACjC,CACJ,CACJ,CAAC;IACJ,CAAC;IA0HD,iBAAiB;IACjB,KAAK,CAAC,iBAAiB,CACrB,OAA6B,EAC7B,SAAyC,EACzC,iBAAqD,EACrD,UAAoC,EAAE;;QAEtC,IAAI,WAAqC,CAAC;QAC1C,IAAI,UAA+B,CAAC;QAEpC,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,CAAC,IAAI,SAAS,CAAC,MAAM,KAAK,CAAC,EAAE;YACvD,MAAM,IAAI,KAAK,CAAC,uCAAuC,CAAC,CAAC;SAC1D;QAED,IAAI,aAAa,CAAC,SAAS,CAAC,EAAE;YAC5B,MAAM,YAAY,GAAG,MAAC,iBAA4B,mCAAI,IAAI,CAAC,eAAe,CAAC;YAC3E,UAAU,GAAG,0BAA0B,CAAC,SAAS,EAAE,YAAY,CAAC,CAAC;YACjE,WAAW,GAAG,OAAO,CAAC;SACvB;aAAM;YACL,UAAU,GAAG,SAAS,CAAC;YACvB,WAAW,GAAG,iBAA6C,CAAC;SAC7D;QACD,MAAM,WAAW,GAAG,OAAO,CAAC,GAAG,CAC7B,CAAC,EAA6B,EAAqD,EAAE;gBAApF,EAAE,IAAI,EAAE,UAAU,OAAW,EAAN,IAAI,cAA3B,sBAA6B,CAAF;YAA0D,OAAA,CAAC;gBACrF,IAAI;gBACJ,UAAU;gBACV,UAAU,EAAE,IAAI;aACjB,CAAC,CAAA;SAAA,CACH,CAAC;QACF,MAAM,EAAE,iBAAiB,EAAE,kBAAkB,EAAE,WAAW,KAAc,WAAW,EAApB,IAAI,UAAK,WAAW,EAA7E,0DAA+D,CAAc,CAAC;QACpF,MAAM,GAAG,GAAG,qBAAqB,CAAC;YAChC,MAAM,EAAE,IAAI,CAAC,OAAO;YACpB,aAAa,EAAE,IAAI;YACnB,SAAS,EAAE,UAAU;YACrB,qBAAqB,EAAE,EAAE,WAAW,EAAE;YACtC,kBAAkB,EAAE,EAAE,iBAAiB,EAAE;YACzC,KAAK,EAAE,WAAW;YAClB,OAAO,EAAE,IAAI,CAAC,QAAQ;SACvB,CAAC,CAAC;QAEH,MAAM,MAAM,GAAG,UAAU,CAAC,GAAG,CAAC,CAAC,EAAE,EAAE,EAAE,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC;QAE9C,MAAM,KAAK,GAAG,EAAE,iBAAiB,EAAE,EAAE,EAAE,CAAC;QAExC,MAAM,MAAM,GAAG,MAAM,gBAAgB,CAAC,GAAG,EAAE;YACzC,YAAY,EAAE,kBAAkB;YAChC,aAAa,EAAE,oBAAoB,CAAC;gBAClC,MAAM,EAAE,IAAI,CAAC,OAAO;gBACpB,OAAO,EAAE,IAAI,CAAC,QAAQ;gBACtB,MAAM;gBACN,SAAS,kCAAO,IAAI,KAAE,iBAAiB,GAAE;gBACzC,KAAK;aACN,CAAC;YACF,WAAW,EAAE,wBAAwB,CAAC,MAAM,CAAC;YAC7C,qBAAqB,CAAC,iBAAyB;gBAC7C,KAAK,CAAC,iBAAiB,GAAG,iBAAiB,CAAC;YAC9C,CAAC;SACF,CAAC,CAAC;QAEH,MAAM,MAAM,CAAC,IAAI,EAAE,CAAC;QACpB,MAAM,EAAE,GAAG,MAAM,CAAC,iBAAiB,EAAE,CAAC,EAAE,CAAC;QACzC,OAAO,4BAA4B,CAAC;YAClC,EAAE;YACF,MAAM,EAAE,IAAI,CAAC,OAAO;YACpB,OAAO;YACP,MAAM;YACN,OAAO,EAAE,IAAI,CAAC,QAAQ;SACvB,CAAC,CAAC;IACL,CAAC;IA0BD,iBAAiB;IACjB,KAAK,CAAC,yBAAyB,CAC7B,eAAuB,EACvB,UAA4C,EAAE;QAE9C,MAAM,EAAE,iBAAiB,EAAE,kBAAkB,KAAc,OAAO,EAAhB,IAAI,UAAK,OAAO,EAA5D,2CAAkD,CAAU,CAAC;QACnE,MAAM,MAAM,GAAG,kBAAkB,CAAC,eAAe,CAAC,CAAC;QACnD,MAAM,GAAG,GAAG,iCAAiC,CAAC;YAC5C,MAAM,EAAE,IAAI,CAAC,OAAO;YACpB,OAAO,kCAAO,IAAI,KAAE,iBAAiB,GAAE;YACvC,OAAO,EAAE,IAAI,CAAC,QAAQ;SACvB,CAAC,CAAC;QAEH,MAAM,KAAK,GAAG,EAAE,iBAAiB,EAAE,EAAE,EAAE,CAAC;QAExC,MAAM,MAAM,GAAG,MAAM,gBAAgB,CAAC,GAAG,EAAE;YACzC,YAAY,EAAE,kBAAkB;YAChC,WAAW,EAAE,eAAe;YAC5B,aAAa,EAAE,oBAAoB,CAAC;gBAClC,MAAM,EAAE,IAAI,CAAC,OAAO;gBACpB,OAAO,EAAE,IAAI,CAAC,QAAQ;gBACtB,MAAM;gBACN,SAAS,kCAAO,IAAI,KAAE,iBAAiB,GAAE;gBACzC,KAAK;aACN,CAAC;YACF,WAAW,EAAE,wBAAwB,EAAE;YACvC,qBAAqB,CAAC,iBAAyB;gBAC7C,KAAK,CAAC,iBAAiB,GAAG,iBAAiB,CAAC;YAC9C,CAAC;SACF,CAAC,CAAC;QAEH,MAAM,MAAM,CAAC,IAAI,EAAE,CAAC;QACpB,MAAM,EAAE,GAAG,MAAM,CAAC,iBAAiB,EAAE,CAAC,EAAE,CAAC;QACzC,OAAO,4BAA4B,CAAC;YAClC,EAAE;YACF,MAAM,EAAE,IAAI,CAAC,OAAO;YACpB,OAAO;YACP,MAAM;YACN,OAAO,EAAE,IAAI,CAAC,QAAQ;SACvB,CAAC,CAAC;IACL,CAAC;CACF","sourcesContent":["// Copyright (c) Microsoft Corporation.\n// Licensed under the MIT license.\n\nimport {\n AnalyzeActionName,\n AnalyzeActionParameters,\n AnalyzeBatchAction,\n AnalyzeBatchPoller,\n AnalyzeResult,\n BeginAnalyzeBatchOptions,\n RestoreAnalyzeBatchPollerOptions,\n TextAnalysisClientOptions,\n TextAnalysisOperationOptions,\n} from \"./models\";\nimport {\n AnalyzeBatchActionUnion,\n GeneratedClientOptionalParams,\n LanguageDetectionInput,\n TextDocumentInput,\n} from \"./generated/models\";\nimport { DEFAULT_COGNITIVE_SCOPE, SDK_VERSION } from \"./constants\";\nimport { KeyCredential, TokenCredential, isTokenCredential } from \"@azure/core-auth\";\nimport { TracingClient, createTracingClient } from \"@azure/core-tracing\";\nimport {\n convertToLanguageDetectionInput,\n convertToTextDocumentInput,\n getOperationOptions,\n isStringArray,\n} from \"./util\";\nimport {\n createAnalyzeBatchLro,\n createCreateAnalyzeBatchPollerLro,\n createPollerWithCancellation,\n createUpdateAnalyzeState,\n getDocIDsFromState,\n processAnalyzeResult,\n} from \"./lro\";\nimport { throwError, transformActionResult } from \"./transforms\";\nimport { GeneratedClient } from \"./generated/generatedClient\";\nimport { bearerTokenAuthenticationPolicy } from \"@azure/core-rest-pipeline\";\nimport { createHttpPoller } from \"@azure/core-lro\";\nimport { logger } from \"./logger\";\nimport { textAnalyticsAzureKeyCredentialPolicy } from \"./azureKeyCredentialPolicy\";\n\n/**\n * A client for interacting with the text analysis features in Azure Cognitive\n * Language Service.\n *\n * The client needs the endpoint of a Language resource and an authentication\n * method such as an API key or AAD. The API key and endpoint can be found in\n * the Language resource page in the Azure portal. They will be located in the\n * resource's Keys and Endpoint page, under Resource Management.\n *\n * ### Examples for authentication:\n *\n * #### API Key\n *\n * ```js\n * import { TextAnalysisClient, AzureKeyCredential } from \"@azure/ai-language-text\";\n *\n * const endpoint = \"https://<resource name>.cognitiveservices.azure.com\";\n * const credential = new AzureKeyCredential(\"<api key>\");\n *\n * const client = new TextAnalysisClient(endpoint, credential);\n * ```\n *\n * #### Azure Active Directory\n *\n * See the [`@azure/identity`](https://npmjs.com/package/\\@azure/identity)\n * package for more information about authenticating with Azure Active Directory.\n *\n * ```js\n * import { TextAnalysisClient } from \"@azure/ai-language-text\";\n * import { DefaultAzureCredential } from \"@azure/identity\";\n *\n * const endpoint = \"https://<resource name>.cognitiveservices.azure.com\";\n * const credential = new DefaultAzureCredential();\n *\n * const client = new TextAnalysisClient(endpoint, credential);\n * ```\n */\nexport class TextAnalysisClient {\n private readonly _client: GeneratedClient;\n private readonly _tracing: TracingClient;\n private readonly defaultCountryHint: string;\n private readonly defaultLanguage: string;\n\n /**\n * Creates an instance of TextAnalysisClient with the endpoint of a Language\n * resource and an authentication method such as an API key or AAD.\n *\n * The API key and endpoint can be found in the Language resource page in the\n * Azure portal. They will be located in the resource's Keys and Endpoint page,\n * under Resource Management.\n *\n * ### Example\n *\n * ```js\n * import { TextAnalysisClient, AzureKeyCredential } from \"@azure/ai-language-text\";\n *\n * const endpoint = \"https://<resource name>.cognitiveservices.azure.com\";\n * const credential = new AzureKeyCredential(\"<api key>\");\n *\n * const client = new TextAnalysisClient(endpoint, credential);\n * ```\n *\n * @param endpointUrl - The URL to the endpoint of a Cognitive Language Service resource\n * @param credential - Key credential to be used to authenticate requests to the service.\n * @param options - Used to configure the TextAnalytics client.\n */\n constructor(endpointUrl: string, credential: KeyCredential, options?: TextAnalysisClientOptions);\n /**\n * Creates an instance of TextAnalysisClient with the endpoint of a Language\n * resource and an authentication method such as an API key or AAD.\n *\n * The API key and endpoint can be found in the Language resource page in the\n * Azure portal. They will be located in the resource's Keys and Endpoint page,\n * under Resource Management.\n *\n * ### Example\n *\n * See the [`@azure/identity`](https://npmjs.com/package/\\@azure/identity)\n * package for more information about authenticating with Azure Active Directory.\n *\n * ```js\n * import { TextAnalysisClient } from \"@azure/ai-language-text\";\n * import { DefaultAzureCredential } from \"@azure/identity\";\n *\n * const endpoint = \"https://<resource name>.cognitiveservices.azure.com\";\n * const credential = new DefaultAzureCredential();\n *\n * const client = new TextAnalysisClient(endpoint, credential);\n * ```\n *\n * @param endpointUrl - The URL to the endpoint of a Cognitive Language Service resource\n * @param credential - Token credential to be used to authenticate requests to the service.\n * @param options - Used to configure the TextAnalytics client.\n */\n constructor(\n endpointUrl: string,\n credential: TokenCredential,\n options?: TextAnalysisClientOptions\n );\n constructor(\n endpointUrl: string,\n credential: TokenCredential | KeyCredential,\n options: TextAnalysisClientOptions = {}\n ) {\n const {\n defaultCountryHint = \"us\",\n defaultLanguage = \"en\",\n serviceVersion,\n ...pipelineOptions\n } = options;\n this.defaultCountryHint = defaultCountryHint;\n this.defaultLanguage = defaultLanguage;\n\n const internalPipelineOptions: GeneratedClientOptionalParams = {\n ...pipelineOptions,\n ...{\n loggingOptions: {\n logger: logger.info,\n additionalAllowedHeaderNames: [\"x-ms-correlation-request-id\", \"x-ms-request-id\"],\n },\n },\n apiVersion: serviceVersion,\n };\n\n this._client = new GeneratedClient(endpointUrl, internalPipelineOptions);\n\n const authPolicy = isTokenCredential(credential)\n ? bearerTokenAuthenticationPolicy({ credential, scopes: DEFAULT_COGNITIVE_SCOPE })\n : textAnalyticsAzureKeyCredentialPolicy(credential);\n\n this._client.pipeline.addPolicy(authPolicy);\n this._tracing = createTracingClient({\n packageName: \"@azure/ai-language-text\",\n packageVersion: SDK_VERSION,\n namespace: \"Microsoft.CognitiveServices\",\n });\n }\n\n /**\n * Runs a predictive model to determine the language that the passed-in\n * input strings are written in, and returns, for each one, the detected\n * language as well as a score indicating the model's confidence that the\n * inferred language is correct. Scores close to 1 indicate high certainty in\n * the result. 120 languages are supported.\n *\n * See {@link https://docs.microsoft.com//azure/cognitive-services/language-service/concepts/data-limits}\n * for data limits.\n *\n * ### Examples\n *\n * #### Language detection\n *\n * ```js\n * const documents = [<input strings>];\n * const countryHint = \"us\";\n * const results = await client.analyze(\"LanguageDetection\", documents, countryHint);\n *\n * for (let i = 0; i < results.length; i++) {\n * const result = results[i];\n * if (result.error) {\n * // a document has an error instead of results\n * } else {\n * const { name, confidenceScore, iso6391Name } = result.primaryLanguage;\n * }\n * }\n * ```\n *\n * See {@link https://docs.microsoft.com//azure/cognitive-services/language-service/language-detection/overview}\n * for more information on language detection.\n *\n * @param actionName - the name of the action to be performed on the input\n * documents, see ${@link AnalyzeActionName}\n * @param documents - the input documents to be analyzed\n * @param options - optional action parameters and settings for the operation\n *\n * @returns an array of results where each element contains the primary language\n * for the corresponding input document.\n */\n public async analyze<ActionName extends \"LanguageDetection\">(\n actionName: ActionName,\n documents: LanguageDetectionInput[],\n options?: AnalyzeActionParameters<ActionName> & TextAnalysisOperationOptions\n ): Promise<AnalyzeResult<ActionName>>;\n /**\n * Runs a predictive model to determine the language that the passed-in\n * input strings are written in, and returns, for each one, the detected\n * language as well as a score indicating the model's confidence that the\n * inferred language is correct. Scores close to 1 indicate high certainty in\n * the result. 120 languages are supported.\n *\n * See {@link https://docs.microsoft.com//azure/cognitive-services/language-service/concepts/data-limits}\n * for data limits.\n *\n * ### Examples\n *\n * #### Language detection\n *\n * ```js\n * const documents = [<input strings>];\n * const countryHint = \"us\";\n * const results = await client.analyze(\"LanguageDetection\", documents, countryHint);\n *\n * for (const result of results) {\n * if (result.error) {\n * // a document has an error instead of results\n * } else {\n * const { name, confidenceScore, iso6391Name } = result.primaryLanguage;\n * }\n * }\n * ```\n *\n * See {@link https://docs.microsoft.com//azure/cognitive-services/language-service/language-detection/overview}\n * for more information on language detection.\n *\n * @param actionName - the name of the action to be performed on the input\n * documents, see ${@link AnalyzeActionName}\n * @param documents - the input documents to be analyzed\n * @param countryHint - Indicates the country of origin for all of\n * the input strings to assist the model in predicting the language they are\n * written in. If unspecified, this value will be set to the default\n * country hint in `TextAnalysisClientOptions`. If set to an empty string,\n * or the string \"none\", the service will apply a model where the country is\n * explicitly unset. The same country hint is applied to all strings in the\n * input collection.\n * @param options - optional action parameters and settings for the operation\n *\n * @returns an array of results where each element contains the primary language\n * for the corresponding input document.\n */\n public async analyze<ActionName extends \"LanguageDetection\">(\n actionName: ActionName,\n documents: string[],\n countryHint?: string,\n options?: AnalyzeActionParameters<ActionName> & TextAnalysisOperationOptions\n ): Promise<AnalyzeResult<ActionName>>;\n /**\n * Runs a predictive model to perform the action of choice on the input\n * documents. See ${@link AnalyzeActionName} for a list of supported\n * actions.\n *\n * The layout of each item in the results array depends on the action chosen.\n * For example, each PIIEntityRecognition document result consists of both\n * `entities` and `redactedText` where the former is a list of all Pii entities\n * in the text and the latter is the original text after all such Pii entities\n * have been redacted from it.\n *\n * See {@link https://docs.microsoft.com//azure/cognitive-services/language-service/concepts/data-limits}\n * for data limits.\n *\n * ### Examples\n *\n * #### Opinion mining\n *\n * ```js\n * const documents = [{\n * id: \"1\",\n * text: \"The food and service aren't the best\",\n * language: \"en\"\n * }];\n * const results = await client.analyze(\"SentimentAnalysis\", documents, {\n * includeOpinionMining: true,\n * });\n *\n * for (const result of results) {\n * if (result.error) {\n * // a document has an error instead of results\n * } else {\n * const { sentiment, confidenceScores, sentences } = result;\n * for (const { sentiment, confidenceScores, opinions } of sentences) {\n * for (const { target, assessments } of opinions) {\n * const { text, sentiment, confidenceScores } = target;\n * for (const { text, sentiment } of assessments) {\n * // Do something\n * }\n * }\n * }\n * }\n * }\n * ```\n *\n * See {@link https://docs.microsoft.com//azure/cognitive-services/language-service/sentiment-opinion-mining/overview}\n * for more information on opinion mining.\n *\n * #### Personally identifiable information\n *\n * ```js\n * const documents = [<input documents>];\n * const categoriesFilter = [KnownPiiCategory.USSocialSecurityNumber];\n * const domainFilter = KnownPiiDomain.Phi;\n * const results = await client.analyze(\"PiiEntityRecognition\", documents, {\n * domainFilter, categoriesFilter\n * });\n *\n * for (const result of results) {\n * if (result.error) {\n * // a document has an error instead of results\n * } else {\n * const { entities, redactedText } = result;\n * for (const { text, category, confidenceScore, length, offset } of entities) {\n * // Do something\n * }\n * }\n * }\n * ```\n *\n * See {@link https://docs.microsoft.com//azure/cognitive-services/language-service/personally-identifiable-information/overview}\n * for more information on personally identifiable information.\n *\n * @param actionName - the name of the action to be performed on the input\n * documents, see ${@link AnalyzeActionName}\n * @param documents - the input documents to be analyzed\n * @param options - optional action parameters and settings for the operation\n *\n * @returns an array of results corresponding to the input documents\n */\n public async analyze<ActionName extends AnalyzeActionName = AnalyzeActionName>(\n actionName: ActionName,\n documents: TextDocumentInput[],\n options?: AnalyzeActionParameters<ActionName> & TextAnalysisOperationOptions\n ): Promise<AnalyzeResult<ActionName>>;\n\n /**\n * Runs a predictive model to perform the action of choice on the input\n * strings. See ${@link AnalyzeActionName} for a list of supported\n * actions.\n *\n * The layout of each item in the results array depends on the action chosen.\n * For example, each PIIEntityRecognition document result consists of both\n * `entities` and `redactedText` where the former is a list of all Pii entities\n * in the text and the latter is the original text after all such Pii entities\n * have been redacted from it.\n *\n * See {@link https://docs.microsoft.com//azure/cognitive-services/language-service/concepts/data-limits}\n * for data limits.\n *\n * ### Examples\n *\n * #### Opinion mining\n *\n * ```js\n * const documents = [\"The food and service aren't the best\"];\n * const results = await client.analyze(\"SentimentAnalysis\", documents, {\n * includeOpinionMining: true,\n * });\n *\n * for (const result of results) {\n * if (result.error) {\n * // a document has an error instead of results\n * } else {\n * const { sentiment, confidenceScores, sentences } = result;\n * for (const { sentiment, confidenceScores, opinions } of sentences) {\n * for (const { target, assessments } of opinions) {\n * const { text, sentiment, confidenceScores } = target;\n * for (const { text, sentiment } of assessments) {\n * // Do something\n * }\n * }\n * }\n * }\n * }\n * ```\n *\n * See {@link https://docs.microsoft.com//azure/cognitive-services/language-service/sentiment-opinion-mining/overview}\n * for more information on opinion mining.\n *\n * #### Personally identifiable information\n *\n * ```js\n * const documents = [<input strings>];\n * const languageCode = \"en\";\n * const categoriesFilter = [KnownPiiCategory.USSocialSecurityNumber];\n * const domainFilter = KnownPiiDomain.Phi;\n * const results = await client.analyze(\"PiiEntityRecognition\", documents, languageCode, {\n * domainFilter, categoriesFilter\n * });\n *\n * for (const result of results) {\n * if (result.error) {\n * // a document has an error instead of results\n * } else {\n * const { entities, redactedText } = result;\n * for (const { text, category, confidenceScore, length, offset } of entities) {\n * // Do something\n * }\n * }\n * }\n * ```\n *\n * See {@link https://docs.microsoft.com//azure/cognitive-services/language-service/personally-identifiable-information/overview}\n * for more information on personally identifiable information.\n *\n * @param actionName - the name of the action to be performed on the input\n * documents, see ${@link AnalyzeActionName}\n * @param documents - the input documents to be analyzed\n * @param languageCode - the code of the language that all the input strings are\n * written in. If unspecified, this value will be set to the default\n * language in `TextAnalysisClientOptions`. If set to an empty string,\n * the service will apply a model where the language is explicitly set to\n * \"None\". Language support varies per action, for example, more information\n * about the languages supported for Entity Recognition actions can be\n * found in {@link https://docs.microsoft.com//azure/cognitive-services/language-service/named-entity-recognition/language-support}.\n * If set to \"auto\", the service will automatically infer the language from\n * the input text.\n * @param options - optional action parameters and settings for the operation\n *\n * @returns an array of results corresponding to the input documents\n */\n public async analyze<ActionName extends AnalyzeActionName = AnalyzeActionName>(\n actionName: ActionName,\n documents: string[],\n languageCode?: string,\n options?: AnalyzeActionParameters<ActionName> & TextAnalysisOperationOptions\n ): Promise<AnalyzeResult<ActionName>>;\n // implementation\n public async analyze<ActionName extends AnalyzeActionName = AnalyzeActionName>(\n actionName: ActionName,\n documents: string[] | LanguageDetectionInput[] | TextDocumentInput[],\n languageOrCountryHintOrOptions?:\n | string\n | (AnalyzeActionParameters<ActionName> & TextAnalysisOperationOptions),\n options?: AnalyzeActionParameters<ActionName> & TextAnalysisOperationOptions\n ): Promise<AnalyzeResult<ActionName>> {\n let realOptions: AnalyzeActionParameters<ActionName> & TextAnalysisOperationOptions;\n\n if (documents.length === 0) {\n throw new Error(\"'documents' must be a non-empty array\");\n }\n\n let realInputs: LanguageDetectionInput[] | TextDocumentInput[];\n if (isStringArray(documents)) {\n if (actionName === \"LanguageDetection\") {\n realInputs = convertToLanguageDetectionInput(\n documents,\n typeof languageOrCountryHintOrOptions === \"string\"\n ? languageOrCountryHintOrOptions\n : this.defaultCountryHint\n );\n } else {\n realInputs = convertToTextDocumentInput(\n documents,\n typeof languageOrCountryHintOrOptions === \"string\"\n ? languageOrCountryHintOrOptions\n : this.defaultLanguage\n );\n }\n realOptions = options || ({} as any);\n } else {\n realInputs = documents;\n realOptions =\n (languageOrCountryHintOrOptions as AnalyzeActionParameters<ActionName> &\n TextAnalysisOperationOptions) || {};\n }\n const { options: operationOptions, rest: action } = getOperationOptions(realOptions);\n return this._tracing.withSpan(\n \"TextAnalysisClient.analyze\",\n operationOptions,\n async (updatedOptions: TextAnalysisOperationOptions) =>\n throwError(\n this._client\n .analyze(\n {\n kind: actionName,\n analysisInput: {\n documents: realInputs,\n },\n parameters: action,\n } as any,\n updatedOptions\n )\n .then(\n (result) =>\n transformActionResult(\n actionName,\n realInputs.map(({ id }) => id),\n result\n ) as AnalyzeResult<ActionName>\n )\n )\n );\n }\n\n /**\n * Performs an array (batch) of actions on the input documents. Each action has\n * a `kind` field that specifies the nature of the action. See ${@link AnalyzeBatchActionNames}\n * for a list of supported actions. In addition to `kind`, actions could also\n * have other parameters such as `disableServiceLogs` and `modelVersion`.\n *\n * The results array contains the results for those input actions where each\n * item also has a `kind` field that specifies the type of the results.\n *\n * See {@link https://docs.microsoft.com//azure/cognitive-services/language-service/concepts/data-limits}\n * for data limits.\n *\n * ### Examples\n *\n * #### Key phrase extraction and Pii entity recognition\n *\n * ```js\n * const poller = await client.beginAnalyzeBatch(\n * [{ kind: \"KeyPhraseExtraction\" }, { kind: \"PiiEntityRecognition\" }],\n * documents\n * );\n * const actionResults = await poller.pollUntilDone();\n *\n * for await (const actionResult of actionResults) {\n * if (actionResult.error) {\n * throw new Error(`Unexpected error`);\n * }\n * switch (actionResult.kind) {\n * case \"KeyPhraseExtraction\": {\n * for (const doc of actionResult.results) {\n * // do something\n * }\n * break;\n * }\n * case \"PiiEntityRecognition\": {\n * for (const doc of actionResult.results) {\n * // do something\n * }\n * break;\n * }\n * }\n * }\n * ```\n *\n * @param actions - an array of actions that will be run on the input documents\n * @param documents - the input documents to be analyzed\n * @param languageCode - the code of the language that all the input strings are\n * written in. If unspecified, this value will be set to the default\n * language in `TextAnalysisClientOptions`. If set to an empty string,\n * the service will apply a model where the language is explicitly set to\n * \"None\". Language support varies per action, for example, more information\n * about the languages supported for Entity Recognition actions can be\n * found in {@link https://docs.microsoft.com//azure/cognitive-services/language-service/named-entity-recognition/language-support}.\n * If set to \"auto\", the service will automatically infer the language from\n * the input text.\n * @param options - optional settings for the operation\n *\n * @returns an array of results corresponding to the input actions\n */\n async beginAnalyzeBatch(\n actions: AnalyzeBatchAction[],\n documents: string[],\n languageCode?: string,\n options?: BeginAnalyzeBatchOptions\n ): Promise<AnalyzeBatchPoller>;\n /**\n * Performs an array (batch) of actions on the input documents. Each action has\n * a `kind` field that specifies the nature of the action. See ${@link AnalyzeBatchActionNames}\n * for a list of supported actions. In addition to `kind`, actions could also\n * have other parameters such as `disableServiceLogs` and `modelVersion`.\n *\n * The results array contains the results for those input actions where each\n * item also has a `kind` field that specifies the type of the results.\n *\n * See {@link https://docs.microsoft.com//azure/cognitive-services/language-service/concepts/data-limits}\n * for data limits.\n *\n * ### Examples\n *\n * #### Keyphrase extraction and Pii entity recognition\n *\n * ```js\n * const poller = await client.beginAnalyzeBatch(\n * [{ kind: \"KeyPhraseExtraction\" }, { kind: \"PiiEntityRecognition\" }],\n * documents\n * );\n * const actionResults = await poller.pollUntilDone();\n *\n * for await (const actionResult of actionResults) {\n * if (actionResult.error) {\n * throw new Error(`Unexpected error`);\n * }\n * switch (actionResult.kind) {\n * case \"KeyPhraseExtraction\": {\n * for (const doc of actionResult.results) {\n * // do something\n * }\n * break;\n * }\n * case \"PiiEntityRecognition\": {\n * for (const doc of actionResult.results) {\n * // do something\n * }\n * break;\n * }\n * }\n * }\n * ```\n *\n * @param actions - an array of actions that will be run on the input documents\n * @param documents - the input documents to be analyzed\n * @param options - optional settings for the operation\n *\n * @returns an array of results corresponding to the input actions\n */\n async beginAnalyzeBatch(\n actions: AnalyzeBatchAction[],\n documents: TextDocumentInput[],\n options?: BeginAnalyzeBatchOptions\n ): Promise<AnalyzeBatchPoller>;\n // implementation\n async beginAnalyzeBatch(\n actions: AnalyzeBatchAction[],\n documents: TextDocumentInput[] | string[],\n languageOrOptions?: BeginAnalyzeBatchOptions | string,\n options: BeginAnalyzeBatchOptions = {}\n ): Promise<AnalyzeBatchPoller> {\n let realOptions: BeginAnalyzeBatchOptions;\n let realInputs: TextDocumentInput[];\n\n if (!Array.isArray(documents) || documents.length === 0) {\n throw new Error(\"'documents' must be a non-empty array\");\n }\n\n if (isStringArray(documents)) {\n const languageCode = (languageOrOptions as string) ?? this.defaultLanguage;\n realInputs = convertToTextDocumentInput(documents, languageCode);\n realOptions = options;\n } else {\n realInputs = documents;\n realOptions = languageOrOptions as BeginAnalyzeBatchOptions;\n }\n const realActions = actions.map(\n ({ kind, actionName, ...rest }): AnalyzeBatchActionUnion & { parameters: unknown } => ({\n kind,\n actionName,\n parameters: rest,\n })\n );\n const { includeStatistics, updateIntervalInMs, displayName, ...rest } = realOptions;\n const lro = createAnalyzeBatchLro({\n client: this._client,\n commonOptions: rest,\n documents: realInputs,\n initialRequestOptions: { displayName },\n pollRequestOptions: { includeStatistics },\n tasks: realActions,\n tracing: this._tracing,\n });\n\n const docIds = realInputs.map(({ id }) => id);\n\n const state = { continuationToken: \"\" };\n\n const poller = await createHttpPoller(lro, {\n intervalInMs: updateIntervalInMs,\n processResult: processAnalyzeResult({\n client: this._client,\n tracing: this._tracing,\n docIds,\n opOptions: { ...rest, includeStatistics },\n state,\n }),\n updateState: createUpdateAnalyzeState(docIds),\n withOperationLocation(operationLocation: string) {\n state.continuationToken = operationLocation;\n },\n });\n\n await poller.poll();\n const id = poller.getOperationState().id;\n return createPollerWithCancellation({\n id,\n client: this._client,\n options,\n poller,\n tracing: this._tracing,\n });\n }\n\n /**\n * Creates a poller from the serialized state of another poller. This can be\n * useful when you want to create pollers on a different host or a poller\n * needs to be constructed after the original one is not in scope.\n *\n * @param serializedState - the serialized state of another poller. It is the\n * result of `poller.toString()`\n * @param options - optional settings for the operation\n *\n * # Example\n *\n * `client.beginAnalyzeBatch` returns a promise that will resolve to a poller.\n * The state of the poller can be serialized and used to create another as follows:\n *\n * ```js\n * const serializedState = poller.toString();\n * const rehydratedPoller = await client.createAnalyzeBatchPoller(serializedState);\n * const actionResults = await rehydratedPoller.pollUntilDone();\n * ```\n */\n async restoreAnalyzeBatchPoller(\n serializedState: string,\n options?: RestoreAnalyzeBatchPollerOptions\n ): Promise<AnalyzeBatchPoller>;\n // implementation\n async restoreAnalyzeBatchPoller(\n serializedState: string,\n options: RestoreAnalyzeBatchPollerOptions = {}\n ): Promise<AnalyzeBatchPoller> {\n const { includeStatistics, updateIntervalInMs, ...rest } = options;\n const docIds = getDocIDsFromState(serializedState);\n const lro = createCreateAnalyzeBatchPollerLro({\n client: this._client,\n options: { ...rest, includeStatistics },\n tracing: this._tracing,\n });\n\n const state = { continuationToken: \"\" };\n\n const poller = await createHttpPoller(lro, {\n intervalInMs: updateIntervalInMs,\n restoreFrom: serializedState,\n processResult: processAnalyzeResult({\n client: this._client,\n tracing: this._tracing,\n docIds,\n opOptions: { ...rest, includeStatistics },\n state,\n }),\n updateState: createUpdateAnalyzeState(),\n withOperationLocation(operationLocation: string) {\n state.continuationToken = operationLocation;\n },\n });\n\n await poller.poll();\n const id = poller.getOperationState().id;\n return createPollerWithCancellation({\n id,\n client: this._client,\n options,\n poller,\n tracing: this._tracing,\n });\n }\n}\n"]}
|