@azure/ai-language-text 1.1.0-alpha.20250618.1 → 1.1.0-alpha.20250722.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. package/dist/browser/generated/generatedClient.js +10 -3
  2. package/dist/browser/generated/generatedClient.js.map +1 -1
  3. package/dist/browser/generated/models/mappers.js +407 -161
  4. package/dist/browser/generated/models/mappers.js.map +1 -1
  5. package/dist/browser/generated/operations/analyzeText.js +1 -0
  6. package/dist/browser/generated/operations/analyzeText.js.map +1 -1
  7. package/dist/browser/lro.js +22 -11
  8. package/dist/browser/lro.js.map +1 -1
  9. package/dist/browser/models.js +1 -1
  10. package/dist/browser/models.js.map +1 -1
  11. package/dist/browser/textAnalysisClient.js +25 -22
  12. package/dist/browser/textAnalysisClient.js.map +1 -1
  13. package/dist/browser/transforms.js +127 -47
  14. package/dist/browser/transforms.js.map +1 -1
  15. package/dist/browser/util.js +1 -2
  16. package/dist/browser/util.js.map +1 -1
  17. package/dist/commonjs/generated/generatedClient.js +10 -3
  18. package/dist/commonjs/generated/generatedClient.js.map +1 -1
  19. package/dist/commonjs/generated/models/mappers.js +407 -161
  20. package/dist/commonjs/generated/models/mappers.js.map +1 -1
  21. package/dist/commonjs/generated/operations/analyzeText.js +1 -0
  22. package/dist/commonjs/generated/operations/analyzeText.js.map +1 -1
  23. package/dist/commonjs/lro.js +22 -11
  24. package/dist/commonjs/lro.js.map +1 -1
  25. package/dist/commonjs/models.js +1 -1
  26. package/dist/commonjs/models.js.map +1 -1
  27. package/dist/commonjs/textAnalysisClient.js +25 -22
  28. package/dist/commonjs/textAnalysisClient.js.map +1 -1
  29. package/dist/commonjs/transforms.js +127 -47
  30. package/dist/commonjs/transforms.js.map +1 -1
  31. package/dist/commonjs/tsdoc-metadata.json +11 -11
  32. package/dist/commonjs/util.js +1 -2
  33. package/dist/commonjs/util.js.map +1 -1
  34. package/dist/esm/generated/generatedClient.js +10 -3
  35. package/dist/esm/generated/generatedClient.js.map +1 -1
  36. package/dist/esm/generated/models/mappers.js +407 -161
  37. package/dist/esm/generated/models/mappers.js.map +1 -1
  38. package/dist/esm/generated/operations/analyzeText.js +1 -0
  39. package/dist/esm/generated/operations/analyzeText.js.map +1 -1
  40. package/dist/esm/lro.js +22 -11
  41. package/dist/esm/lro.js.map +1 -1
  42. package/dist/esm/models.js +1 -1
  43. package/dist/esm/models.js.map +1 -1
  44. package/dist/esm/textAnalysisClient.js +25 -22
  45. package/dist/esm/textAnalysisClient.js.map +1 -1
  46. package/dist/esm/transforms.js +127 -47
  47. package/dist/esm/transforms.js.map +1 -1
  48. package/dist/esm/util.js +1 -2
  49. package/dist/esm/util.js.map +1 -1
  50. package/dist/react-native/generated/generatedClient.js +10 -3
  51. package/dist/react-native/generated/generatedClient.js.map +1 -1
  52. package/dist/react-native/generated/models/mappers.js +407 -161
  53. package/dist/react-native/generated/models/mappers.js.map +1 -1
  54. package/dist/react-native/generated/operations/analyzeText.js +1 -0
  55. package/dist/react-native/generated/operations/analyzeText.js.map +1 -1
  56. package/dist/react-native/lro.js +22 -11
  57. package/dist/react-native/lro.js.map +1 -1
  58. package/dist/react-native/models.js +1 -1
  59. package/dist/react-native/models.js.map +1 -1
  60. package/dist/react-native/textAnalysisClient.js +25 -22
  61. package/dist/react-native/textAnalysisClient.js.map +1 -1
  62. package/dist/react-native/transforms.js +127 -47
  63. package/dist/react-native/transforms.js.map +1 -1
  64. package/dist/react-native/util.js +1 -2
  65. package/dist/react-native/util.js.map +1 -1
  66. package/package.json +2 -2
@@ -1 +1 @@
1
- {"version":3,"file":"textAnalysisClient.js","sourceRoot":"","sources":["../../src/textAnalysisClient.ts"],"names":[],"mappings":"AAAA,uCAAuC;AACvC,kCAAkC;;AAmBlC,OAAO,EAAE,uBAAuB,EAAE,WAAW,EAAE,MAAM,gBAAgB,CAAC;AAEtE,OAAO,EAAE,iBAAiB,EAAE,MAAM,kBAAkB,CAAC;AAErD,OAAO,EAAE,mBAAmB,EAAE,MAAM,qBAAqB,CAAC;AAC1D,OAAO,EACL,+BAA+B,EAC/B,0BAA0B,EAC1B,mBAAmB,EACnB,aAAa,GACd,MAAM,WAAW,CAAC;AACnB,OAAO,EACL,qBAAqB,EACrB,iCAAiC,EACjC,4BAA4B,EAC5B,wBAAwB,EACxB,kBAAkB,EAClB,oBAAoB,GACrB,MAAM,UAAU,CAAC;AAClB,OAAO,EAAE,UAAU,EAAE,qBAAqB,EAAE,MAAM,iBAAiB,CAAC;AACpE,OAAO,EAAE,eAAe,EAAE,MAAM,gCAAgC,CAAC;AACjE,OAAO,EAAE,+BAA+B,EAAE,MAAM,2BAA2B,CAAC;AAC5E,OAAO,EAAE,gBAAgB,EAAE,MAAM,iBAAiB,CAAC;AACnD,OAAO,EAAE,MAAM,EAAE,MAAM,aAAa,CAAC;AACrC,OAAO,EAAE,qCAAqC,EAAE,MAAM,+BAA+B,CAAC;AAEtF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GAkCG;AACH,MAAM,OAAO,kBAAkB;IA4D7B,YACE,WAAmB,EACnB,UAA2C,EAC3C,UAAqC,EAAE;QAEvC,MAAM,EACJ,kBAAkB,GAAG,IAAI,EACzB,eAAe,GAAG,IAAI,EACtB,cAAc,KAEZ,OAAO,EADN,eAAe,UAChB,OAAO,EALL,2DAKL,CAAU,CAAC;QACZ,IAAI,CAAC,kBAAkB,GAAG,kBAAkB,CAAC;QAC7C,IAAI,CAAC,eAAe,GAAG,eAAe,CAAC;QAEvC,MAAM,uBAAuB,iDACxB,eAAe,GACf;YACD,cAAc,EAAE;gBACd,MAAM,EAAE,MAAM,CAAC,IAAI;gBACnB,4BAA4B,EAAE,CAAC,6BAA6B,EAAE,iBAAiB,CAAC;aACjF;SACF,KACD,UAAU,EAAE,cAAc,GAC3B,CAAC;QAEF,IAAI,CAAC,OAAO,GAAG,IAAI,eAAe,CAAC,WAAW,EAAE,uBAAuB,CAAC,CAAC;QAEzE,MAAM,UAAU,GAAG,iBAAiB,CAAC,UAAU,CAAC;YAC9C,CAAC,CAAC,+BAA+B,CAAC,EAAE,UAAU,EAAE,MAAM,EAAE,uBAAuB,EAAE,CAAC;YAClF,CAAC,CAAC,qCAAqC,CAAC,UAAU,CAAC,CAAC;QAEtD,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,SAAS,CAAC,UAAU,CAAC,CAAC;QAC5C,IAAI,CAAC,QAAQ,GAAG,mBAAmB,CAAC;YAClC,WAAW,EAAE,yBAAyB;YACtC,cAAc,EAAE,WAAW;YAC3B,SAAS,EAAE,6BAA6B;SACzC,CAAC,CAAC;IACL,CAAC;IA6UD,iBAAiB;IACV,KAAK,CAAC,OAAO,CAClB,UAAsB,EACtB,SAAoE,EACpE,8BAEwE,EACxE,OAA4E;QAE5E,IAAI,WAA+E,CAAC;QAEpF,IAAI,SAAS,CAAC,MAAM,KAAK,CAAC,EAAE,CAAC;YAC3B,MAAM,IAAI,KAAK,CAAC,uCAAuC,CAAC,CAAC;QAC3D,CAAC;QAED,IAAI,UAA0D,CAAC;QAC/D,IAAI,aAAa,CAAC,SAAS,CAAC,EAAE,CAAC;YAC7B,IAAI,UAAU,KAAK,mBAAmB,EAAE,CAAC;gBACvC,UAAU,GAAG,+BAA+B,CAC1C,SAAS,EACT,OAAO,8BAA8B,KAAK,QAAQ;oBAChD,CAAC,CAAC,8BAA8B;oBAChC,CAAC,CAAC,IAAI,CAAC,kBAAkB,CAC5B,CAAC;YACJ,CAAC;iBAAM,CAAC;gBACN,UAAU,GAAG,0BAA0B,CACrC,SAAS,EACT,OAAO,8BAA8B,KAAK,QAAQ;oBAChD,CAAC,CAAC,8BAA8B;oBAChC,CAAC,CAAC,IAAI,CAAC,eAAe,CACzB,CAAC;YACJ,CAAC;YACD,WAAW,GAAG,OAAO,IAAK,EAAU,CAAC;QACvC,CAAC;aAAM,CAAC;YACN,UAAU,GAAG,SAAS,CAAC;YACvB,WAAW;gBACR,8BAC8B,IAAI,EAAE,CAAC;QAC1C,CAAC;QACD,MAAM,EAAE,OAAO,EAAE,gBAAgB,EAAE,IAAI,EAAE,MAAM,EAAE,GAAG,mBAAmB,CAAC,WAAW,CAAC,CAAC;QACrF,OAAO,IAAI,CAAC,QAAQ,CAAC,QAAQ,CAC3B,4BAA4B,EAC5B,gBAAgB,EAChB,KAAK,EAAE,cAA4C,EAAE,EAAE,CACrD,UAAU,CACR,IAAI,CAAC,OAAO;aACT,OAAO,CACN;YACE,IAAI,EAAE,UAAU;YAChB,aAAa,EAAE;gBACb,SAAS,EAAE,UAAU;aACtB;YACD,UAAU,EAAE,MAAM;SACZ,EACR,cAAc,CACf;aACA,IAAI,CACH,CAAC,MAAM,EAAE,EAAE,CACT,qBAAqB,CACnB,UAAU,EACV,UAAU,CAAC,GAAG,CAAC,CAAC,EAAE,EAAE,EAAE,EAAE,EAAE,CAAC,EAAE,CAAC,EAC9B,MAAM,CACsB,CACjC,CACJ,CACJ,CAAC;IACJ,CAAC;IAoQD,iBAAiB;IACjB,KAAK,CAAC,iBAAiB,CACrB,OAA6B,EAC7B,SAAyC,EACzC,iBAAqD,EACrD,UAAoC,EAAE;;QAEtC,IAAI,WAAqC,CAAC;QAC1C,IAAI,UAA+B,CAAC;QAEpC,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,CAAC,IAAI,SAAS,CAAC,MAAM,KAAK,CAAC,EAAE,CAAC;YACxD,MAAM,IAAI,KAAK,CAAC,uCAAuC,CAAC,CAAC;QAC3D,CAAC;QAED,IAAI,aAAa,CAAC,SAAS,CAAC,EAAE,CAAC;YAC7B,MAAM,YAAY,GAAG,MAAC,iBAA4B,mCAAI,IAAI,CAAC,eAAe,CAAC;YAC3E,UAAU,GAAG,0BAA0B,CAAC,SAAS,EAAE,YAAY,CAAC,CAAC;YACjE,WAAW,GAAG,OAAO,CAAC;QACxB,CAAC;aAAM,CAAC;YACN,UAAU,GAAG,SAAS,CAAC;YACvB,WAAW,GAAG,iBAA6C,CAAC;QAC9D,CAAC;QACD,MAAM,WAAW,GAAG,OAAO,CAAC,GAAG,CAC7B,CAAC,EAA6B,EAAqD,EAAE;gBAApF,EAAE,IAAI,EAAE,UAAU,OAAW,EAAN,IAAI,cAA3B,sBAA6B,CAAF;YAA0D,OAAA,CAAC;gBACrF,IAAI;gBACJ,UAAU;gBACV,UAAU,EAAE,IAAI;aACjB,CAAC,CAAA;SAAA,CACH,CAAC;QACF,MAAM,EAAE,iBAAiB,EAAE,kBAAkB,EAAE,WAAW,KAAc,WAAW,EAApB,IAAI,UAAK,WAAW,EAA7E,0DAA+D,CAAc,CAAC;QACpF,MAAM,GAAG,GAAG,qBAAqB,CAAC;YAChC,MAAM,EAAE,IAAI,CAAC,OAAO;YACpB,aAAa,EAAE,IAAI;YACnB,SAAS,EAAE,UAAU;YACrB,qBAAqB,EAAE,EAAE,WAAW,EAAE;YACtC,kBAAkB,EAAE,EAAE,iBAAiB,EAAE;YACzC,KAAK,EAAE,WAAW;YAClB,OAAO,EAAE,IAAI,CAAC,QAAQ;SACvB,CAAC,CAAC;QAEH,MAAM,MAAM,GAAG,UAAU,CAAC,GAAG,CAAC,CAAC,EAAE,EAAE,EAAE,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC;QAE9C,MAAM,KAAK,GAAG,EAAE,iBAAiB,EAAE,EAAE,EAAE,CAAC;QAExC,MAAM,MAAM,GAAG,MAAM,gBAAgB,CAAC,GAAG,EAAE;YACzC,YAAY,EAAE,kBAAkB;YAChC,aAAa,EAAE,oBAAoB,CAAC;gBAClC,MAAM,EAAE,IAAI,CAAC,OAAO;gBACpB,OAAO,EAAE,IAAI,CAAC,QAAQ;gBACtB,MAAM;gBACN,SAAS,kCAAO,IAAI,KAAE,iBAAiB,GAAE;gBACzC,KAAK;aACN,CAAC;YACF,WAAW,EAAE,wBAAwB,CAAC,MAAM,CAAC;YAC7C,qBAAqB,CAAC,iBAAyB;gBAC7C,KAAK,CAAC,iBAAiB,GAAG,iBAAiB,CAAC;YAC9C,CAAC;SACF,CAAC,CAAC;QAEH,MAAM,MAAM,CAAC,IAAI,EAAE,CAAC;QACpB,MAAM,EAAE,GAAG,MAAM,CAAC,iBAAiB,EAAE,CAAC,EAAE,CAAC;QACzC,OAAO,4BAA4B,CAAC;YAClC,EAAE;YACF,MAAM,EAAE,IAAI,CAAC,OAAO;YACpB,OAAO;YACP,MAAM;YACN,OAAO,EAAE,IAAI,CAAC,QAAQ;SACvB,CAAC,CAAC;IACL,CAAC;IAgBD,iBAAiB;IACjB,KAAK,CAAC,yBAAyB,CAC7B,eAAuB,EACvB,UAA4C,EAAE;QAE9C,MAAM,EAAE,iBAAiB,EAAE,kBAAkB,KAAc,OAAO,EAAhB,IAAI,UAAK,OAAO,EAA5D,2CAAkD,CAAU,CAAC;QACnE,MAAM,MAAM,GAAG,kBAAkB,CAAC,eAAe,CAAC,CAAC;QACnD,MAAM,GAAG,GAAG,iCAAiC,CAAC;YAC5C,MAAM,EAAE,IAAI,CAAC,OAAO;YACpB,OAAO,kCAAO,IAAI,KAAE,iBAAiB,GAAE;YACvC,OAAO,EAAE,IAAI,CAAC,QAAQ;SACvB,CAAC,CAAC;QAEH,MAAM,KAAK,GAAG,EAAE,iBAAiB,EAAE,EAAE,EAAE,CAAC;QAExC,MAAM,MAAM,GAAG,MAAM,gBAAgB,CAAC,GAAG,EAAE;YACzC,YAAY,EAAE,kBAAkB;YAChC,WAAW,EAAE,eAAe;YAC5B,aAAa,EAAE,oBAAoB,CAAC;gBAClC,MAAM,EAAE,IAAI,CAAC,OAAO;gBACpB,OAAO,EAAE,IAAI,CAAC,QAAQ;gBACtB,MAAM;gBACN,SAAS,kCAAO,IAAI,KAAE,iBAAiB,GAAE;gBACzC,KAAK;aACN,CAAC;YACF,WAAW,EAAE,wBAAwB,EAAE;YACvC,qBAAqB,CAAC,iBAAyB;gBAC7C,KAAK,CAAC,iBAAiB,GAAG,iBAAiB,CAAC;YAC9C,CAAC;SACF,CAAC,CAAC;QAEH,MAAM,MAAM,CAAC,IAAI,EAAE,CAAC;QACpB,MAAM,EAAE,GAAG,MAAM,CAAC,iBAAiB,EAAE,CAAC,EAAE,CAAC;QACzC,OAAO,4BAA4B,CAAC;YAClC,EAAE;YACF,MAAM,EAAE,IAAI,CAAC,OAAO;YACpB,OAAO;YACP,MAAM;YACN,OAAO,EAAE,IAAI,CAAC,QAAQ;SACvB,CAAC,CAAC;IACL,CAAC;CACF","sourcesContent":["// Copyright (c) Microsoft Corporation.\n// Licensed under the MIT License.\n\nimport type {\n AnalyzeActionName,\n AnalyzeActionParameters,\n AnalyzeBatchAction,\n AnalyzeBatchPoller,\n AnalyzeResult,\n BeginAnalyzeBatchOptions,\n RestoreAnalyzeBatchPollerOptions,\n TextAnalysisClientOptions,\n TextAnalysisOperationOptions,\n} from \"./models.js\";\nimport type {\n AnalyzeBatchActionUnion,\n GeneratedClientOptionalParams,\n LanguageDetectionInput,\n TextDocumentInput,\n} from \"./generated/models/index.js\";\nimport { DEFAULT_COGNITIVE_SCOPE, SDK_VERSION } from \"./constants.js\";\nimport type { KeyCredential, TokenCredential } from \"@azure/core-auth\";\nimport { isTokenCredential } from \"@azure/core-auth\";\nimport type { TracingClient } from \"@azure/core-tracing\";\nimport { createTracingClient } from \"@azure/core-tracing\";\nimport {\n convertToLanguageDetectionInput,\n convertToTextDocumentInput,\n getOperationOptions,\n isStringArray,\n} from \"./util.js\";\nimport {\n createAnalyzeBatchLro,\n createCreateAnalyzeBatchPollerLro,\n createPollerWithCancellation,\n createUpdateAnalyzeState,\n getDocIDsFromState,\n processAnalyzeResult,\n} from \"./lro.js\";\nimport { throwError, transformActionResult } from \"./transforms.js\";\nimport { GeneratedClient } from \"./generated/generatedClient.js\";\nimport { bearerTokenAuthenticationPolicy } from \"@azure/core-rest-pipeline\";\nimport { createHttpPoller } from \"@azure/core-lro\";\nimport { logger } from \"./logger.js\";\nimport { textAnalyticsAzureKeyCredentialPolicy } from \"./azureKeyCredentialPolicy.js\";\n\n/**\n * A client for interacting with the text analysis features in Azure Cognitive\n * Language Service.\n *\n * The client needs the endpoint of a Language resource and an authentication\n * method such as an API key or AAD. The API key and endpoint can be found in\n * the Language resource page in the Azure portal. They will be located in the\n * resource's Keys and Endpoint page, under Resource Management.\n *\n * ### Examples for authentication:\n *\n * #### API Key\n *\n * ```ts snippet:ReadmeSampleCreateClient_Key\n * import { AzureKeyCredential, TextAnalysisClient } from \"@azure/ai-language-text\";\n *\n * const endpoint = \"https://<resource name>.cognitiveservices.azure.com\";\n * const credential = new AzureKeyCredential(\"<api key>\");\n * const client = new TextAnalysisClient(endpoint, credential);\n * ```\n *\n * #### Azure Active Directory\n *\n * See the [`@azure/identity`](https://npmjs.com/package/\\@azure/identity)\n * package for more information about authenticating with Azure Active Directory.\n *\n * ```ts snippet:ReadmeSampleCreateClient_ActiveDirectory\n * import { DefaultAzureCredential } from \"@azure/identity\";\n * import { TextAnalysisClient } from \"@azure/ai-language-text\";\n *\n * const endpoint = \"https://<resource name>.cognitiveservices.azure.com\";\n * const credential = new DefaultAzureCredential();\n * const client = new TextAnalysisClient(endpoint, credential);\n * ```\n */\nexport class TextAnalysisClient {\n private readonly _client: GeneratedClient;\n private readonly _tracing: TracingClient;\n private readonly defaultCountryHint: string;\n private readonly defaultLanguage: string;\n\n /**\n * Creates an instance of TextAnalysisClient with the endpoint of a Language\n * resource and an authentication method such as an API key or AAD.\n *\n * The API key and endpoint can be found in the Language resource page in the\n * Azure portal. They will be located in the resource's Keys and Endpoint page,\n * under Resource Management.\n *\n * ### Example\n *\n * ```ts snippet:ReadmeSampleCreateClient_Key\n * import { AzureKeyCredential, TextAnalysisClient } from \"@azure/ai-language-text\";\n *\n * const endpoint = \"https://<resource name>.cognitiveservices.azure.com\";\n * const credential = new AzureKeyCredential(\"<api key>\");\n * const client = new TextAnalysisClient(endpoint, credential);\n * ```\n *\n * @param endpointUrl - The URL to the endpoint of a Cognitive Language Service resource\n * @param credential - Key credential to be used to authenticate requests to the service.\n * @param options - Used to configure the TextAnalytics client.\n */\n constructor(endpointUrl: string, credential: KeyCredential, options?: TextAnalysisClientOptions);\n /**\n * Creates an instance of TextAnalysisClient with the endpoint of a Language\n * resource and an authentication method such as an API key or AAD.\n *\n * The API key and endpoint can be found in the Language resource page in the\n * Azure portal. They will be located in the resource's Keys and Endpoint page,\n * under Resource Management.\n *\n * ### Example\n *\n * See the [`@azure/identity`](https://npmjs.com/package/\\@azure/identity)\n * package for more information about authenticating with Azure Active Directory.\n *\n * ```ts snippet:ReadmeSampleCreateClient_ActiveDirectory\n * import { DefaultAzureCredential } from \"@azure/identity\";\n * import { TextAnalysisClient } from \"@azure/ai-language-text\";\n *\n * const endpoint = \"https://<resource name>.cognitiveservices.azure.com\";\n * const credential = new DefaultAzureCredential();\n * const client = new TextAnalysisClient(endpoint, credential);\n * ```\n *\n * @param endpointUrl - The URL to the endpoint of a Cognitive Language Service resource\n * @param credential - Token credential to be used to authenticate requests to the service.\n * @param options - Used to configure the TextAnalytics client.\n */\n constructor(\n endpointUrl: string,\n credential: TokenCredential,\n options?: TextAnalysisClientOptions,\n );\n constructor(\n endpointUrl: string,\n credential: TokenCredential | KeyCredential,\n options: TextAnalysisClientOptions = {},\n ) {\n const {\n defaultCountryHint = \"us\",\n defaultLanguage = \"en\",\n serviceVersion,\n ...pipelineOptions\n } = options;\n this.defaultCountryHint = defaultCountryHint;\n this.defaultLanguage = defaultLanguage;\n\n const internalPipelineOptions: GeneratedClientOptionalParams = {\n ...pipelineOptions,\n ...{\n loggingOptions: {\n logger: logger.info,\n additionalAllowedHeaderNames: [\"x-ms-correlation-request-id\", \"x-ms-request-id\"],\n },\n },\n apiVersion: serviceVersion,\n };\n\n this._client = new GeneratedClient(endpointUrl, internalPipelineOptions);\n\n const authPolicy = isTokenCredential(credential)\n ? bearerTokenAuthenticationPolicy({ credential, scopes: DEFAULT_COGNITIVE_SCOPE })\n : textAnalyticsAzureKeyCredentialPolicy(credential);\n\n this._client.pipeline.addPolicy(authPolicy);\n this._tracing = createTracingClient({\n packageName: \"@azure/ai-language-text\",\n packageVersion: SDK_VERSION,\n namespace: \"Microsoft.CognitiveServices\",\n });\n }\n\n /**\n * Runs a predictive model to determine the language that the passed-in\n * input strings are written in, and returns, for each one, the detected\n * language as well as a score indicating the model's confidence that the\n * inferred language is correct. Scores close to 1 indicate high certainty in\n * the result. 120 languages are supported.\n *\n * See {@link https://learn.microsoft.com//azure/cognitive-services/language-service/concepts/data-limits}\n * for data limits.\n *\n * ### Examples\n *\n * #### Language detection\n *\n * ```ts snippet:Sample_LanguageDetection\n * import { TextAnalysisClient } from \"@azure/ai-language-text\";\n * import { DefaultAzureCredential } from \"@azure/identity\";\n *\n * const documents = [\n * \"This document is written in English.\",\n * \"Este es un document escrito en Español.\",\n * \"这是一个用中文写的文件\",\n * \"Dies ist ein Dokument in deutsche Sprache.\",\n * \"Detta är ett dokument skrivet på engelska.\",\n * ];\n *\n * const client = new TextAnalysisClient(\"<endpoint>\", new DefaultAzureCredential());\n *\n * const result = await client.analyze(\"LanguageDetection\", documents, \"us\", {\n * modelVersion: \"2022-04-10-preview\",\n * });\n *\n * for (const doc of result) {\n * if (!doc.error) {\n * console.log(\n * `Primary language: ${doc.primaryLanguage.name} (iso6391 name: ${doc.primaryLanguage.iso6391Name})`,\n * );\n * }\n * }\n * ```\n *\n * See {@link https://learn.microsoft.com//azure/cognitive-services/language-service/language-detection/overview}\n * for more information on language detection.\n *\n * @param actionName - the name of the action to be performed on the input\n * documents, see ${@link AnalyzeActionName}\n * @param documents - the input documents to be analyzed\n * @param options - optional action parameters and settings for the operation\n *\n * @returns an array of results where each element contains the primary language\n * for the corresponding input document.\n */\n public async analyze<ActionName extends \"LanguageDetection\">(\n actionName: ActionName,\n documents: LanguageDetectionInput[],\n options?: AnalyzeActionParameters<ActionName> & TextAnalysisOperationOptions,\n ): Promise<AnalyzeResult<ActionName>>;\n /**\n * Runs a predictive model to determine the language that the passed-in\n * input strings are written in, and returns, for each one, the detected\n * language as well as a score indicating the model's confidence that the\n * inferred language is correct. Scores close to 1 indicate high certainty in\n * the result. 120 languages are supported.\n *\n * See {@link https://learn.microsoft.com//azure/cognitive-services/language-service/concepts/data-limits}\n * for data limits.\n *\n * ### Examples\n *\n * #### Language detection\n *\n * ```ts snippet:Sample_LanguageDetection\n * import { TextAnalysisClient } from \"@azure/ai-language-text\";\n * import { DefaultAzureCredential } from \"@azure/identity\";\n *\n * const documents = [\n * \"This document is written in English.\",\n * \"Este es un document escrito en Español.\",\n * \"这是一个用中文写的文件\",\n * \"Dies ist ein Dokument in deutsche Sprache.\",\n * \"Detta är ett dokument skrivet på engelska.\",\n * ];\n *\n * const client = new TextAnalysisClient(\"<endpoint>\", new DefaultAzureCredential());\n *\n * const result = await client.analyze(\"LanguageDetection\", documents, \"us\", {\n * modelVersion: \"2022-04-10-preview\",\n * });\n *\n * for (const doc of result) {\n * if (!doc.error) {\n * console.log(\n * `Primary language: ${doc.primaryLanguage.name} (iso6391 name: ${doc.primaryLanguage.iso6391Name})`,\n * );\n * }\n * }\n * ```\n *\n * See {@link https://learn.microsoft.com//azure/cognitive-services/language-service/language-detection/overview}\n * for more information on language detection.\n *\n * @param actionName - the name of the action to be performed on the input\n * documents, see ${@link AnalyzeActionName}\n * @param documents - the input documents to be analyzed\n * @param countryHint - Indicates the country of origin for all of\n * the input strings to assist the model in predicting the language they are\n * written in. If unspecified, this value will be set to the default\n * country hint in `TextAnalysisClientOptions`. If set to an empty string,\n * or the string \"none\", the service will apply a model where the country is\n * explicitly unset. The same country hint is applied to all strings in the\n * input collection.\n * @param options - optional action parameters and settings for the operation\n *\n * @returns an array of results where each element contains the primary language\n * for the corresponding input document.\n */\n public async analyze<ActionName extends \"LanguageDetection\">(\n actionName: ActionName,\n documents: string[],\n countryHint?: string,\n options?: AnalyzeActionParameters<ActionName> & TextAnalysisOperationOptions,\n ): Promise<AnalyzeResult<ActionName>>;\n /**\n * Runs a predictive model to perform the action of choice on the input\n * documents. See ${@link AnalyzeActionName} for a list of supported\n * actions.\n *\n * The layout of each item in the results array depends on the action chosen.\n * For example, each PIIEntityRecognition document result consists of both\n * `entities` and `redactedText` where the former is a list of all Pii entities\n * in the text and the latter is the original text after all such Pii entities\n * have been redacted from it.\n *\n * See {@link https://learn.microsoft.com//azure/cognitive-services/language-service/concepts/data-limits}\n * for data limits.\n *\n * ### Examples\n *\n * #### Opinion mining\n *\n * ```ts snippet:Sample_SentimentAnalysis\n * import { TextAnalysisClient } from \"@azure/ai-language-text\";\n * import { DefaultAzureCredential } from \"@azure/identity\";\n *\n * const documents = [\n * \"I had the best day of my life.\",\n * \"This was a waste of my time. The speaker put me to sleep.\",\n * ];\n *\n * const client = new TextAnalysisClient(\"<endpoint>\", new DefaultAzureCredential());\n *\n * const results = await client.analyze(\"SentimentAnalysis\", documents);\n *\n * for (let i = 0; i < results.length; i++) {\n * const result = results[i];\n * console.log(`- Document ${result.id}`);\n * if (!result.error) {\n * console.log(`\\tDocument text: ${documents[i]}`);\n * console.log(`\\tOverall Sentiment: ${result.sentiment}`);\n * console.log(\"\\tSentiment confidence scores: \", result.confidenceScores);\n * console.log(\"\\tSentences\");\n * for (const { sentiment, confidenceScores, text } of result.sentences) {\n * console.log(`\\t- Sentence text: ${text}`);\n * console.log(`\\t Sentence sentiment: ${sentiment}`);\n * console.log(\"\\t Confidence scores:\", confidenceScores);\n * }\n * } else {\n * console.error(` Error: ${result.error}`);\n * }\n * }\n * ```\n *\n * See {@link https://learn.microsoft.com//azure/cognitive-services/language-service/sentiment-opinion-mining/overview}\n * for more information on opinion mining.\n *\n * #### Personally identifiable information\n *\n * ```ts snippet:Sample_PIIEntityRecognition\n * import {\n * TextAnalysisClient,\n * KnownPiiEntityDomain,\n * KnownPiiEntityCategory,\n * } from \"@azure/ai-language-text\";\n * import { DefaultAzureCredential } from \"@azure/identity\";\n *\n * const client = new TextAnalysisClient(\"<endpoint>\", new DefaultAzureCredential());\n *\n * const documents = [\"My phone number is 555-5555\"];\n *\n * const [result] = await client.analyze(\"PiiEntityRecognition\", documents, \"en\", {\n * domainFilter: KnownPiiEntityDomain.Phi,\n * categoriesFilter: [\n * KnownPiiEntityCategory.PhoneNumber,\n * KnownPiiEntityCategory.USSocialSecurityNumber,\n * ],\n * });\n *\n * if (!result.error) {\n * console.log(`Redacted text: \"${result.redactedText}\"`);\n * console.log(\"Pii Entities: \");\n * for (const entity of result.entities) {\n * console.log(`\\t- \"${entity.text}\" of type ${entity.category}`);\n * }\n * }\n * ```\n *\n * See {@link https://learn.microsoft.com//azure/cognitive-services/language-service/personally-identifiable-information/overview}\n * for more information on personally identifiable information.\n *\n * @param actionName - the name of the action to be performed on the input\n * documents, see ${@link AnalyzeActionName}\n * @param documents - the input documents to be analyzed\n * @param options - optional action parameters and settings for the operation\n *\n * @returns an array of results corresponding to the input documents\n */\n public async analyze<ActionName extends AnalyzeActionName = AnalyzeActionName>(\n actionName: ActionName,\n documents: TextDocumentInput[],\n options?: AnalyzeActionParameters<ActionName> & TextAnalysisOperationOptions,\n ): Promise<AnalyzeResult<ActionName>>;\n\n /**\n * Runs a predictive model to perform the action of choice on the input\n * strings. See ${@link AnalyzeActionName} for a list of supported\n * actions.\n *\n * The layout of each item in the results array depends on the action chosen.\n * For example, each PIIEntityRecognition document result consists of both\n * `entities` and `redactedText` where the former is a list of all Pii entities\n * in the text and the latter is the original text after all such Pii entities\n * have been redacted from it.\n *\n * See {@link https://learn.microsoft.com//azure/cognitive-services/language-service/concepts/data-limits}\n * for data limits.\n *\n * ### Examples\n *\n * #### Opinion mining\n *\n * ```ts snippet:Sample_SentimentAnalysis\n * import { TextAnalysisClient } from \"@azure/ai-language-text\";\n * import { DefaultAzureCredential } from \"@azure/identity\";\n *\n * const documents = [\n * \"I had the best day of my life.\",\n * \"This was a waste of my time. The speaker put me to sleep.\",\n * ];\n *\n * const client = new TextAnalysisClient(\"<endpoint>\", new DefaultAzureCredential());\n *\n * const results = await client.analyze(\"SentimentAnalysis\", documents);\n *\n * for (let i = 0; i < results.length; i++) {\n * const result = results[i];\n * console.log(`- Document ${result.id}`);\n * if (!result.error) {\n * console.log(`\\tDocument text: ${documents[i]}`);\n * console.log(`\\tOverall Sentiment: ${result.sentiment}`);\n * console.log(\"\\tSentiment confidence scores: \", result.confidenceScores);\n * console.log(\"\\tSentences\");\n * for (const { sentiment, confidenceScores, text } of result.sentences) {\n * console.log(`\\t- Sentence text: ${text}`);\n * console.log(`\\t Sentence sentiment: ${sentiment}`);\n * console.log(\"\\t Confidence scores:\", confidenceScores);\n * }\n * } else {\n * console.error(` Error: ${result.error}`);\n * }\n * }\n * ```\n *\n * See {@link https://learn.microsoft.com//azure/cognitive-services/language-service/sentiment-opinion-mining/overview}\n * for more information on opinion mining.\n *\n * #### Personally identifiable information\n *\n * ```ts snippet:Sample_PIIEntityRecognition\n * import {\n * TextAnalysisClient,\n * KnownPiiEntityDomain,\n * KnownPiiEntityCategory,\n * } from \"@azure/ai-language-text\";\n * import { DefaultAzureCredential } from \"@azure/identity\";\n *\n * const client = new TextAnalysisClient(\"<endpoint>\", new DefaultAzureCredential());\n *\n * const documents = [\"My phone number is 555-5555\"];\n *\n * const [result] = await client.analyze(\"PiiEntityRecognition\", documents, \"en\", {\n * domainFilter: KnownPiiEntityDomain.Phi,\n * categoriesFilter: [\n * KnownPiiEntityCategory.PhoneNumber,\n * KnownPiiEntityCategory.USSocialSecurityNumber,\n * ],\n * });\n *\n * if (!result.error) {\n * console.log(`Redacted text: \"${result.redactedText}\"`);\n * console.log(\"Pii Entities: \");\n * for (const entity of result.entities) {\n * console.log(`\\t- \"${entity.text}\" of type ${entity.category}`);\n * }\n * }\n * ```\n *\n * See {@link https://learn.microsoft.com//azure/cognitive-services/language-service/personally-identifiable-information/overview}\n * for more information on personally identifiable information.\n *\n * @param actionName - the name of the action to be performed on the input\n * documents, see ${@link AnalyzeActionName}\n * @param documents - the input documents to be analyzed\n * @param languageCode - the code of the language that all the input strings are\n * written in. If unspecified, this value will be set to the default\n * language in `TextAnalysisClientOptions`. If set to an empty string,\n * the service will apply a model where the language is explicitly set to\n * \"None\". Language support varies per action, for example, more information\n * about the languages supported for Entity Recognition actions can be\n * found in {@link https://learn.microsoft.com//azure/cognitive-services/language-service/named-entity-recognition/language-support}.\n * If set to \"auto\", the service will automatically infer the language from\n * the input text.\n * @param options - optional action parameters and settings for the operation\n *\n * @returns an array of results corresponding to the input documents\n */\n public async analyze<ActionName extends AnalyzeActionName = AnalyzeActionName>(\n actionName: ActionName,\n documents: string[],\n languageCode?: string,\n options?: AnalyzeActionParameters<ActionName> & TextAnalysisOperationOptions,\n ): Promise<AnalyzeResult<ActionName>>;\n // implementation\n public async analyze<ActionName extends AnalyzeActionName = AnalyzeActionName>(\n actionName: ActionName,\n documents: string[] | LanguageDetectionInput[] | TextDocumentInput[],\n languageOrCountryHintOrOptions?:\n | string\n | (AnalyzeActionParameters<ActionName> & TextAnalysisOperationOptions),\n options?: AnalyzeActionParameters<ActionName> & TextAnalysisOperationOptions,\n ): Promise<AnalyzeResult<ActionName>> {\n let realOptions: AnalyzeActionParameters<ActionName> & TextAnalysisOperationOptions;\n\n if (documents.length === 0) {\n throw new Error(\"'documents' must be a non-empty array\");\n }\n\n let realInputs: LanguageDetectionInput[] | TextDocumentInput[];\n if (isStringArray(documents)) {\n if (actionName === \"LanguageDetection\") {\n realInputs = convertToLanguageDetectionInput(\n documents,\n typeof languageOrCountryHintOrOptions === \"string\"\n ? languageOrCountryHintOrOptions\n : this.defaultCountryHint,\n );\n } else {\n realInputs = convertToTextDocumentInput(\n documents,\n typeof languageOrCountryHintOrOptions === \"string\"\n ? languageOrCountryHintOrOptions\n : this.defaultLanguage,\n );\n }\n realOptions = options || ({} as any);\n } else {\n realInputs = documents;\n realOptions =\n (languageOrCountryHintOrOptions as AnalyzeActionParameters<ActionName> &\n TextAnalysisOperationOptions) || {};\n }\n const { options: operationOptions, rest: action } = getOperationOptions(realOptions);\n return this._tracing.withSpan(\n \"TextAnalysisClient.analyze\",\n operationOptions,\n async (updatedOptions: TextAnalysisOperationOptions) =>\n throwError(\n this._client\n .analyze(\n {\n kind: actionName,\n analysisInput: {\n documents: realInputs,\n },\n parameters: action,\n } as any,\n updatedOptions,\n )\n .then(\n (result) =>\n transformActionResult(\n actionName,\n realInputs.map(({ id }) => id),\n result,\n ) as AnalyzeResult<ActionName>,\n ),\n ),\n );\n }\n\n /**\n * Performs an array (batch) of actions on the input documents. Each action has\n * a `kind` field that specifies the nature of the action. See ${@link AnalyzeBatchActionNames}\n * for a list of supported actions. In addition to `kind`, actions could also\n * have other parameters such as `disableServiceLogs` and `modelVersion`.\n *\n * The results array contains the results for those input actions where each\n * item also has a `kind` field that specifies the type of the results.\n *\n * See {@link https://learn.microsoft.com//azure/cognitive-services/language-service/concepts/data-limits}\n * for data limits.\n *\n * ### Examples\n *\n * #### Key phrase extraction and Pii entity recognition\n *\n * ```ts snippet:Sample_ActionBatching\n * import { TextAnalysisClient, AnalyzeBatchAction } from \"@azure/ai-language-text\";\n * import { DefaultAzureCredential } from \"@azure/identity\";\n *\n * const documents = [\n * \"Microsoft was founded by Bill Gates and Paul Allen.\",\n * \"Redmond is a city in King County, Washington, United States, located 15 miles east of Seattle.\",\n * \"I need to take my cat to the veterinarian.\",\n * \"The employee's SSN is 555-55-5555.\",\n * \"We went to Contoso Steakhouse located at midtown NYC last week for a dinner party, and we adore the spot! They provide marvelous food and they have a great menu. The chief cook happens to be the owner (I think his name is John Doe) and he is super nice, coming out of the kitchen and greeted us all. We enjoyed very much dining in the place! The Sirloin steak I ordered was tender and juicy, and the place was impeccably clean. You can even pre-order from their online menu at www.contososteakhouse.com, call 312-555-0176 or send email to order@contososteakhouse.com! The only complaint I have is the food didn't come fast enough. Overall I highly recommend it!\",\n * ];\n *\n * const client = new TextAnalysisClient(\"<endpoint>\", new DefaultAzureCredential());\n *\n * const actions: AnalyzeBatchAction[] = [\n * {\n * kind: \"EntityRecognition\",\n * modelVersion: \"latest\",\n * },\n * {\n * kind: \"PiiEntityRecognition\",\n * modelVersion: \"latest\",\n * },\n * {\n * kind: \"KeyPhraseExtraction\",\n * modelVersion: \"latest\",\n * },\n * ];\n * const poller = await client.beginAnalyzeBatch(actions, documents, \"en\");\n *\n * poller.onProgress(() => {\n * console.log(\n * `Number of actions still in progress: ${poller.getOperationState().actionInProgressCount}`,\n * );\n * });\n *\n * console.log(`The operation was created on ${poller.getOperationState().createdOn}`);\n *\n * console.log(`The operation results will expire on ${poller.getOperationState().expiresOn}`);\n *\n * const actionResults = await poller.pollUntilDone();\n *\n * for await (const actionResult of actionResults) {\n * if (actionResult.error) {\n * const { code, message } = actionResult.error;\n * throw new Error(`Unexpected error (${code}): ${message}`);\n * }\n * switch (actionResult.kind) {\n * case \"KeyPhraseExtraction\": {\n * for (const doc of actionResult.results) {\n * console.log(`- Document ${doc.id}`);\n * if (!doc.error) {\n * console.log(\"\\tKey phrases:\");\n * for (const phrase of doc.keyPhrases) {\n * console.log(`\\t- ${phrase}`);\n * }\n * } else {\n * console.error(\"\\tError:\", doc.error);\n * }\n * }\n * break;\n * }\n * case \"EntityRecognition\": {\n * for (const doc of actionResult.results) {\n * console.log(`- Document ${doc.id}`);\n * if (!doc.error) {\n * console.log(\"\\tEntities:\");\n * for (const entity of doc.entities) {\n * console.log(`\\t- Entity ${entity.text} of type ${entity.category}`);\n * }\n * } else {\n * console.error(\"\\tError:\", doc.error);\n * }\n * }\n * break;\n * }\n * case \"PiiEntityRecognition\": {\n * for (const doc of actionResult.results) {\n * console.log(`- Document ${doc.id}`);\n * if (!doc.error) {\n * console.log(\"\\tPii Entities:\");\n * for (const entity of doc.entities) {\n * console.log(`\\t- Entity ${entity.text} of type ${entity.category}`);\n * }\n * } else {\n * console.error(\"\\tError:\", doc.error);\n * }\n * }\n * break;\n * }\n * default: {\n * throw new Error(`Unexpected action results: ${actionResult.kind}`);\n * }\n * }\n * }\n * ```\n *\n * @param actions - an array of actions that will be run on the input documents\n * @param documents - the input documents to be analyzed\n * @param languageCode - the code of the language that all the input strings are\n * written in. If unspecified, this value will be set to the default\n * language in `TextAnalysisClientOptions`. If set to an empty string,\n * the service will apply a model where the language is explicitly set to\n * \"None\". Language support varies per action, for example, more information\n * about the languages supported for Entity Recognition actions can be\n * found in {@link https://learn.microsoft.com//azure/cognitive-services/language-service/named-entity-recognition/language-support}.\n * If set to \"auto\", the service will automatically infer the language from\n * the input text.\n * @param options - optional settings for the operation\n *\n * @returns an array of results corresponding to the input actions\n */\n async beginAnalyzeBatch(\n actions: AnalyzeBatchAction[],\n documents: string[],\n languageCode?: string,\n options?: BeginAnalyzeBatchOptions,\n ): Promise<AnalyzeBatchPoller>;\n /**\n * Performs an array (batch) of actions on the input documents. Each action has\n * a `kind` field that specifies the nature of the action. See ${@link AnalyzeBatchActionNames}\n * for a list of supported actions. In addition to `kind`, actions could also\n * have other parameters such as `disableServiceLogs` and `modelVersion`.\n *\n * The results array contains the results for those input actions where each\n * item also has a `kind` field that specifies the type of the results.\n *\n * See {@link https://learn.microsoft.com//azure/cognitive-services/language-service/concepts/data-limits}\n * for data limits.\n *\n * ### Examples\n *\n * #### Keyphrase extraction and Pii entity recognition\n *\n * ```ts snippet:Sample_ActionBatching\n * import { TextAnalysisClient, AnalyzeBatchAction } from \"@azure/ai-language-text\";\n * import { DefaultAzureCredential } from \"@azure/identity\";\n *\n * const documents = [\n * \"Microsoft was founded by Bill Gates and Paul Allen.\",\n * \"Redmond is a city in King County, Washington, United States, located 15 miles east of Seattle.\",\n * \"I need to take my cat to the veterinarian.\",\n * \"The employee's SSN is 555-55-5555.\",\n * \"We went to Contoso Steakhouse located at midtown NYC last week for a dinner party, and we adore the spot! They provide marvelous food and they have a great menu. The chief cook happens to be the owner (I think his name is John Doe) and he is super nice, coming out of the kitchen and greeted us all. We enjoyed very much dining in the place! The Sirloin steak I ordered was tender and juicy, and the place was impeccably clean. You can even pre-order from their online menu at www.contososteakhouse.com, call 312-555-0176 or send email to order@contososteakhouse.com! The only complaint I have is the food didn't come fast enough. Overall I highly recommend it!\",\n * ];\n *\n * const client = new TextAnalysisClient(\"<endpoint>\", new DefaultAzureCredential());\n *\n * const actions: AnalyzeBatchAction[] = [\n * {\n * kind: \"EntityRecognition\",\n * modelVersion: \"latest\",\n * },\n * {\n * kind: \"PiiEntityRecognition\",\n * modelVersion: \"latest\",\n * },\n * {\n * kind: \"KeyPhraseExtraction\",\n * modelVersion: \"latest\",\n * },\n * ];\n * const poller = await client.beginAnalyzeBatch(actions, documents, \"en\");\n *\n * poller.onProgress(() => {\n * console.log(\n * `Number of actions still in progress: ${poller.getOperationState().actionInProgressCount}`,\n * );\n * });\n *\n * console.log(`The operation was created on ${poller.getOperationState().createdOn}`);\n *\n * console.log(`The operation results will expire on ${poller.getOperationState().expiresOn}`);\n *\n * const actionResults = await poller.pollUntilDone();\n *\n * for await (const actionResult of actionResults) {\n * if (actionResult.error) {\n * const { code, message } = actionResult.error;\n * throw new Error(`Unexpected error (${code}): ${message}`);\n * }\n * switch (actionResult.kind) {\n * case \"KeyPhraseExtraction\": {\n * for (const doc of actionResult.results) {\n * console.log(`- Document ${doc.id}`);\n * if (!doc.error) {\n * console.log(\"\\tKey phrases:\");\n * for (const phrase of doc.keyPhrases) {\n * console.log(`\\t- ${phrase}`);\n * }\n * } else {\n * console.error(\"\\tError:\", doc.error);\n * }\n * }\n * break;\n * }\n * case \"EntityRecognition\": {\n * for (const doc of actionResult.results) {\n * console.log(`- Document ${doc.id}`);\n * if (!doc.error) {\n * console.log(\"\\tEntities:\");\n * for (const entity of doc.entities) {\n * console.log(`\\t- Entity ${entity.text} of type ${entity.category}`);\n * }\n * } else {\n * console.error(\"\\tError:\", doc.error);\n * }\n * }\n * break;\n * }\n * case \"PiiEntityRecognition\": {\n * for (const doc of actionResult.results) {\n * console.log(`- Document ${doc.id}`);\n * if (!doc.error) {\n * console.log(\"\\tPii Entities:\");\n * for (const entity of doc.entities) {\n * console.log(`\\t- Entity ${entity.text} of type ${entity.category}`);\n * }\n * } else {\n * console.error(\"\\tError:\", doc.error);\n * }\n * }\n * break;\n * }\n * default: {\n * throw new Error(`Unexpected action results: ${actionResult.kind}`);\n * }\n * }\n * }\n * ```\n *\n * @param actions - an array of actions that will be run on the input documents\n * @param documents - the input documents to be analyzed\n * @param options - optional settings for the operation\n *\n * @returns an array of results corresponding to the input actions\n */\n async beginAnalyzeBatch(\n actions: AnalyzeBatchAction[],\n documents: TextDocumentInput[],\n options?: BeginAnalyzeBatchOptions,\n ): Promise<AnalyzeBatchPoller>;\n // implementation\n async beginAnalyzeBatch(\n actions: AnalyzeBatchAction[],\n documents: TextDocumentInput[] | string[],\n languageOrOptions?: BeginAnalyzeBatchOptions | string,\n options: BeginAnalyzeBatchOptions = {},\n ): Promise<AnalyzeBatchPoller> {\n let realOptions: BeginAnalyzeBatchOptions;\n let realInputs: TextDocumentInput[];\n\n if (!Array.isArray(documents) || documents.length === 0) {\n throw new Error(\"'documents' must be a non-empty array\");\n }\n\n if (isStringArray(documents)) {\n const languageCode = (languageOrOptions as string) ?? this.defaultLanguage;\n realInputs = convertToTextDocumentInput(documents, languageCode);\n realOptions = options;\n } else {\n realInputs = documents;\n realOptions = languageOrOptions as BeginAnalyzeBatchOptions;\n }\n const realActions = actions.map(\n ({ kind, actionName, ...rest }): AnalyzeBatchActionUnion & { parameters: unknown } => ({\n kind,\n actionName,\n parameters: rest,\n }),\n );\n const { includeStatistics, updateIntervalInMs, displayName, ...rest } = realOptions;\n const lro = createAnalyzeBatchLro({\n client: this._client,\n commonOptions: rest,\n documents: realInputs,\n initialRequestOptions: { displayName },\n pollRequestOptions: { includeStatistics },\n tasks: realActions,\n tracing: this._tracing,\n });\n\n const docIds = realInputs.map(({ id }) => id);\n\n const state = { continuationToken: \"\" };\n\n const poller = await createHttpPoller(lro, {\n intervalInMs: updateIntervalInMs,\n processResult: processAnalyzeResult({\n client: this._client,\n tracing: this._tracing,\n docIds,\n opOptions: { ...rest, includeStatistics },\n state,\n }),\n updateState: createUpdateAnalyzeState(docIds),\n withOperationLocation(operationLocation: string) {\n state.continuationToken = operationLocation;\n },\n });\n\n await poller.poll();\n const id = poller.getOperationState().id;\n return createPollerWithCancellation({\n id,\n client: this._client,\n options,\n poller,\n tracing: this._tracing,\n });\n }\n\n /**\n * Creates a poller from the serialized state of another poller. This can be\n * useful when you want to create pollers on a different host or a poller\n * needs to be constructed after the original one is not in scope.\n *\n * @param serializedState - the serialized state of another poller. It is the\n * result of `poller.toString()`\n * @param options - optional settings for the operation\n *\n */\n async restoreAnalyzeBatchPoller(\n serializedState: string,\n options?: RestoreAnalyzeBatchPollerOptions,\n ): Promise<AnalyzeBatchPoller>;\n // implementation\n async restoreAnalyzeBatchPoller(\n serializedState: string,\n options: RestoreAnalyzeBatchPollerOptions = {},\n ): Promise<AnalyzeBatchPoller> {\n const { includeStatistics, updateIntervalInMs, ...rest } = options;\n const docIds = getDocIDsFromState(serializedState);\n const lro = createCreateAnalyzeBatchPollerLro({\n client: this._client,\n options: { ...rest, includeStatistics },\n tracing: this._tracing,\n });\n\n const state = { continuationToken: \"\" };\n\n const poller = await createHttpPoller(lro, {\n intervalInMs: updateIntervalInMs,\n restoreFrom: serializedState,\n processResult: processAnalyzeResult({\n client: this._client,\n tracing: this._tracing,\n docIds,\n opOptions: { ...rest, includeStatistics },\n state,\n }),\n updateState: createUpdateAnalyzeState(),\n withOperationLocation(operationLocation: string) {\n state.continuationToken = operationLocation;\n },\n });\n\n await poller.poll();\n const id = poller.getOperationState().id;\n return createPollerWithCancellation({\n id,\n client: this._client,\n options,\n poller,\n tracing: this._tracing,\n });\n }\n}\n"]}
1
+ {"version":3,"file":"textAnalysisClient.js","sourceRoot":"","sources":["../../src/textAnalysisClient.ts"],"names":[],"mappings":"AAAA,uCAAuC;AACvC,kCAAkC;AAmBlC,OAAO,EAAE,uBAAuB,EAAE,WAAW,EAAE,MAAM,gBAAgB,CAAC;AAEtE,OAAO,EAAE,iBAAiB,EAAE,MAAM,kBAAkB,CAAC;AAErD,OAAO,EAAE,mBAAmB,EAAE,MAAM,qBAAqB,CAAC;AAC1D,OAAO,EACL,+BAA+B,EAC/B,0BAA0B,EAC1B,mBAAmB,EACnB,aAAa,GACd,MAAM,WAAW,CAAC;AACnB,OAAO,EACL,qBAAqB,EACrB,iCAAiC,EACjC,4BAA4B,EAC5B,wBAAwB,EACxB,kBAAkB,EAClB,oBAAoB,GACrB,MAAM,UAAU,CAAC;AAClB,OAAO,EAAE,UAAU,EAAE,qBAAqB,EAAE,MAAM,iBAAiB,CAAC;AACpE,OAAO,EAAE,eAAe,EAAE,MAAM,gCAAgC,CAAC;AACjE,OAAO,EAAE,+BAA+B,EAAE,MAAM,2BAA2B,CAAC;AAC5E,OAAO,EAAE,gBAAgB,EAAE,MAAM,iBAAiB,CAAC;AACnD,OAAO,EAAE,MAAM,EAAE,MAAM,aAAa,CAAC;AACrC,OAAO,EAAE,qCAAqC,EAAE,MAAM,+BAA+B,CAAC;AAEtF;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;GAkCG;AACH,MAAM,OAAO,kBAAkB;IACZ,OAAO,CAAkB;IACzB,QAAQ,CAAgB;IACxB,kBAAkB,CAAS;IAC3B,eAAe,CAAS;IAwDzC,YACE,WAAmB,EACnB,UAA2C,EAC3C,UAAqC,EAAE;QAEvC,MAAM,EACJ,kBAAkB,GAAG,IAAI,EACzB,eAAe,GAAG,IAAI,EACtB,cAAc,EACd,GAAG,eAAe,EACnB,GAAG,OAAO,CAAC;QACZ,IAAI,CAAC,kBAAkB,GAAG,kBAAkB,CAAC;QAC7C,IAAI,CAAC,eAAe,GAAG,eAAe,CAAC;QAEvC,MAAM,uBAAuB,GAAkC;YAC7D,GAAG,eAAe;YAClB,GAAG;gBACD,cAAc,EAAE;oBACd,MAAM,EAAE,MAAM,CAAC,IAAI;oBACnB,4BAA4B,EAAE,CAAC,6BAA6B,EAAE,iBAAiB,CAAC;iBACjF;aACF;YACD,UAAU,EAAE,cAAc;SAC3B,CAAC;QAEF,IAAI,CAAC,OAAO,GAAG,IAAI,eAAe,CAAC,WAAW,EAAE,uBAAuB,CAAC,CAAC;QAEzE,MAAM,UAAU,GAAG,iBAAiB,CAAC,UAAU,CAAC;YAC9C,CAAC,CAAC,+BAA+B,CAAC,EAAE,UAAU,EAAE,MAAM,EAAE,uBAAuB,EAAE,CAAC;YAClF,CAAC,CAAC,qCAAqC,CAAC,UAAU,CAAC,CAAC;QAEtD,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,SAAS,CAAC,UAAU,CAAC,CAAC;QAC5C,IAAI,CAAC,QAAQ,GAAG,mBAAmB,CAAC;YAClC,WAAW,EAAE,yBAAyB;YACtC,cAAc,EAAE,WAAW;YAC3B,SAAS,EAAE,6BAA6B;SACzC,CAAC,CAAC;IACL,CAAC;IA6UD,iBAAiB;IACV,KAAK,CAAC,OAAO,CAClB,UAAsB,EACtB,SAAoE,EACpE,8BAEwE,EACxE,OAA4E;QAE5E,IAAI,WAA+E,CAAC;QAEpF,IAAI,SAAS,CAAC,MAAM,KAAK,CAAC,EAAE,CAAC;YAC3B,MAAM,IAAI,KAAK,CAAC,uCAAuC,CAAC,CAAC;QAC3D,CAAC;QAED,IAAI,UAA0D,CAAC;QAC/D,IAAI,aAAa,CAAC,SAAS,CAAC,EAAE,CAAC;YAC7B,IAAI,UAAU,KAAK,mBAAmB,EAAE,CAAC;gBACvC,UAAU,GAAG,+BAA+B,CAC1C,SAAS,EACT,OAAO,8BAA8B,KAAK,QAAQ;oBAChD,CAAC,CAAC,8BAA8B;oBAChC,CAAC,CAAC,IAAI,CAAC,kBAAkB,CAC5B,CAAC;YACJ,CAAC;iBAAM,CAAC;gBACN,UAAU,GAAG,0BAA0B,CACrC,SAAS,EACT,OAAO,8BAA8B,KAAK,QAAQ;oBAChD,CAAC,CAAC,8BAA8B;oBAChC,CAAC,CAAC,IAAI,CAAC,eAAe,CACzB,CAAC;YACJ,CAAC;YACD,WAAW,GAAG,OAAO,IAAK,EAAU,CAAC;QACvC,CAAC;aAAM,CAAC;YACN,UAAU,GAAG,SAAS,CAAC;YACvB,WAAW;gBACR,8BAC8B,IAAI,EAAE,CAAC;QAC1C,CAAC;QACD,MAAM,EAAE,OAAO,EAAE,gBAAgB,EAAE,IAAI,EAAE,MAAM,EAAE,GAAG,mBAAmB,CAAC,WAAW,CAAC,CAAC;QACrF,OAAO,IAAI,CAAC,QAAQ,CAAC,QAAQ,CAC3B,4BAA4B,EAC5B,gBAAgB,EAChB,KAAK,EAAE,cAA4C,EAAE,EAAE,CACrD,UAAU,CACR,IAAI,CAAC,OAAO;aACT,OAAO,CACN;YACE,IAAI,EAAE,UAAU;YAChB,aAAa,EAAE;gBACb,SAAS,EAAE,UAAU;aACtB;YACD,UAAU,EAAE,MAAM;SACZ,EACR,cAAc,CACf;aACA,IAAI,CACH,CAAC,MAAM,EAAE,EAAE,CACT,qBAAqB,CACnB,UAAU,EACV,UAAU,CAAC,GAAG,CAAC,CAAC,EAAE,EAAE,EAAE,EAAE,EAAE,CAAC,EAAE,CAAC,EAC9B,MAAM,CACsB,CACjC,CACJ,CACJ,CAAC;IACJ,CAAC;IAoQD,iBAAiB;IACjB,KAAK,CAAC,iBAAiB,CACrB,OAA6B,EAC7B,SAAyC,EACzC,iBAAqD,EACrD,UAAoC,EAAE;QAEtC,IAAI,WAAqC,CAAC;QAC1C,IAAI,UAA+B,CAAC;QAEpC,IAAI,CAAC,KAAK,CAAC,OAAO,CAAC,SAAS,CAAC,IAAI,SAAS,CAAC,MAAM,KAAK,CAAC,EAAE,CAAC;YACxD,MAAM,IAAI,KAAK,CAAC,uCAAuC,CAAC,CAAC;QAC3D,CAAC;QAED,IAAI,aAAa,CAAC,SAAS,CAAC,EAAE,CAAC;YAC7B,MAAM,YAAY,GAAI,iBAA4B,IAAI,IAAI,CAAC,eAAe,CAAC;YAC3E,UAAU,GAAG,0BAA0B,CAAC,SAAS,EAAE,YAAY,CAAC,CAAC;YACjE,WAAW,GAAG,OAAO,CAAC;QACxB,CAAC;aAAM,CAAC;YACN,UAAU,GAAG,SAAS,CAAC;YACvB,WAAW,GAAG,iBAA6C,CAAC;QAC9D,CAAC;QACD,MAAM,WAAW,GAAG,OAAO,CAAC,GAAG,CAC7B,CAAC,EAAE,IAAI,EAAE,UAAU,EAAE,GAAG,IAAI,EAAE,EAAqD,EAAE,CAAC,CAAC;YACrF,IAAI;YACJ,UAAU;YACV,UAAU,EAAE,IAAI;SACjB,CAAC,CACH,CAAC;QACF,MAAM,EAAE,iBAAiB,EAAE,kBAAkB,EAAE,WAAW,EAAE,GAAG,IAAI,EAAE,GAAG,WAAW,CAAC;QACpF,MAAM,GAAG,GAAG,qBAAqB,CAAC;YAChC,MAAM,EAAE,IAAI,CAAC,OAAO;YACpB,aAAa,EAAE,IAAI;YACnB,SAAS,EAAE,UAAU;YACrB,qBAAqB,EAAE,EAAE,WAAW,EAAE;YACtC,kBAAkB,EAAE,EAAE,iBAAiB,EAAE;YACzC,KAAK,EAAE,WAAW;YAClB,OAAO,EAAE,IAAI,CAAC,QAAQ;SACvB,CAAC,CAAC;QAEH,MAAM,MAAM,GAAG,UAAU,CAAC,GAAG,CAAC,CAAC,EAAE,EAAE,EAAE,EAAE,EAAE,CAAC,EAAE,CAAC,CAAC;QAE9C,MAAM,KAAK,GAAG,EAAE,iBAAiB,EAAE,EAAE,EAAE,CAAC;QAExC,MAAM,MAAM,GAAG,MAAM,gBAAgB,CAAC,GAAG,EAAE;YACzC,YAAY,EAAE,kBAAkB;YAChC,aAAa,EAAE,oBAAoB,CAAC;gBAClC,MAAM,EAAE,IAAI,CAAC,OAAO;gBACpB,OAAO,EAAE,IAAI,CAAC,QAAQ;gBACtB,MAAM;gBACN,SAAS,EAAE,EAAE,GAAG,IAAI,EAAE,iBAAiB,EAAE;gBACzC,KAAK;aACN,CAAC;YACF,WAAW,EAAE,wBAAwB,CAAC,MAAM,CAAC;YAC7C,qBAAqB,CAAC,iBAAyB;gBAC7C,KAAK,CAAC,iBAAiB,GAAG,iBAAiB,CAAC;YAC9C,CAAC;SACF,CAAC,CAAC;QAEH,MAAM,MAAM,CAAC,IAAI,EAAE,CAAC;QACpB,MAAM,EAAE,GAAG,MAAM,CAAC,iBAAiB,EAAE,CAAC,EAAE,CAAC;QACzC,OAAO,4BAA4B,CAAC;YAClC,EAAE;YACF,MAAM,EAAE,IAAI,CAAC,OAAO;YACpB,OAAO;YACP,MAAM;YACN,OAAO,EAAE,IAAI,CAAC,QAAQ;SACvB,CAAC,CAAC;IACL,CAAC;IAgBD,iBAAiB;IACjB,KAAK,CAAC,yBAAyB,CAC7B,eAAuB,EACvB,UAA4C,EAAE;QAE9C,MAAM,EAAE,iBAAiB,EAAE,kBAAkB,EAAE,GAAG,IAAI,EAAE,GAAG,OAAO,CAAC;QACnE,MAAM,MAAM,GAAG,kBAAkB,CAAC,eAAe,CAAC,CAAC;QACnD,MAAM,GAAG,GAAG,iCAAiC,CAAC;YAC5C,MAAM,EAAE,IAAI,CAAC,OAAO;YACpB,OAAO,EAAE,EAAE,GAAG,IAAI,EAAE,iBAAiB,EAAE;YACvC,OAAO,EAAE,IAAI,CAAC,QAAQ;SACvB,CAAC,CAAC;QAEH,MAAM,KAAK,GAAG,EAAE,iBAAiB,EAAE,EAAE,EAAE,CAAC;QAExC,MAAM,MAAM,GAAG,MAAM,gBAAgB,CAAC,GAAG,EAAE;YACzC,YAAY,EAAE,kBAAkB;YAChC,WAAW,EAAE,eAAe;YAC5B,aAAa,EAAE,oBAAoB,CAAC;gBAClC,MAAM,EAAE,IAAI,CAAC,OAAO;gBACpB,OAAO,EAAE,IAAI,CAAC,QAAQ;gBACtB,MAAM;gBACN,SAAS,EAAE,EAAE,GAAG,IAAI,EAAE,iBAAiB,EAAE;gBACzC,KAAK;aACN,CAAC;YACF,WAAW,EAAE,wBAAwB,EAAE;YACvC,qBAAqB,CAAC,iBAAyB;gBAC7C,KAAK,CAAC,iBAAiB,GAAG,iBAAiB,CAAC;YAC9C,CAAC;SACF,CAAC,CAAC;QAEH,MAAM,MAAM,CAAC,IAAI,EAAE,CAAC;QACpB,MAAM,EAAE,GAAG,MAAM,CAAC,iBAAiB,EAAE,CAAC,EAAE,CAAC;QACzC,OAAO,4BAA4B,CAAC;YAClC,EAAE;YACF,MAAM,EAAE,IAAI,CAAC,OAAO;YACpB,OAAO;YACP,MAAM;YACN,OAAO,EAAE,IAAI,CAAC,QAAQ;SACvB,CAAC,CAAC;IACL,CAAC;CACF","sourcesContent":["// Copyright (c) Microsoft Corporation.\n// Licensed under the MIT License.\n\nimport type {\n AnalyzeActionName,\n AnalyzeActionParameters,\n AnalyzeBatchAction,\n AnalyzeBatchPoller,\n AnalyzeResult,\n BeginAnalyzeBatchOptions,\n RestoreAnalyzeBatchPollerOptions,\n TextAnalysisClientOptions,\n TextAnalysisOperationOptions,\n} from \"./models.js\";\nimport type {\n AnalyzeBatchActionUnion,\n GeneratedClientOptionalParams,\n LanguageDetectionInput,\n TextDocumentInput,\n} from \"./generated/models/index.js\";\nimport { DEFAULT_COGNITIVE_SCOPE, SDK_VERSION } from \"./constants.js\";\nimport type { KeyCredential, TokenCredential } from \"@azure/core-auth\";\nimport { isTokenCredential } from \"@azure/core-auth\";\nimport type { TracingClient } from \"@azure/core-tracing\";\nimport { createTracingClient } from \"@azure/core-tracing\";\nimport {\n convertToLanguageDetectionInput,\n convertToTextDocumentInput,\n getOperationOptions,\n isStringArray,\n} from \"./util.js\";\nimport {\n createAnalyzeBatchLro,\n createCreateAnalyzeBatchPollerLro,\n createPollerWithCancellation,\n createUpdateAnalyzeState,\n getDocIDsFromState,\n processAnalyzeResult,\n} from \"./lro.js\";\nimport { throwError, transformActionResult } from \"./transforms.js\";\nimport { GeneratedClient } from \"./generated/generatedClient.js\";\nimport { bearerTokenAuthenticationPolicy } from \"@azure/core-rest-pipeline\";\nimport { createHttpPoller } from \"@azure/core-lro\";\nimport { logger } from \"./logger.js\";\nimport { textAnalyticsAzureKeyCredentialPolicy } from \"./azureKeyCredentialPolicy.js\";\n\n/**\n * A client for interacting with the text analysis features in Azure Cognitive\n * Language Service.\n *\n * The client needs the endpoint of a Language resource and an authentication\n * method such as an API key or AAD. The API key and endpoint can be found in\n * the Language resource page in the Azure portal. They will be located in the\n * resource's Keys and Endpoint page, under Resource Management.\n *\n * ### Examples for authentication:\n *\n * #### API Key\n *\n * ```ts snippet:ReadmeSampleCreateClient_Key\n * import { AzureKeyCredential, TextAnalysisClient } from \"@azure/ai-language-text\";\n *\n * const endpoint = \"https://<resource name>.cognitiveservices.azure.com\";\n * const credential = new AzureKeyCredential(\"<api key>\");\n * const client = new TextAnalysisClient(endpoint, credential);\n * ```\n *\n * #### Azure Active Directory\n *\n * See the [`@azure/identity`](https://npmjs.com/package/\\@azure/identity)\n * package for more information about authenticating with Azure Active Directory.\n *\n * ```ts snippet:ReadmeSampleCreateClient_ActiveDirectory\n * import { DefaultAzureCredential } from \"@azure/identity\";\n * import { TextAnalysisClient } from \"@azure/ai-language-text\";\n *\n * const endpoint = \"https://<resource name>.cognitiveservices.azure.com\";\n * const credential = new DefaultAzureCredential();\n * const client = new TextAnalysisClient(endpoint, credential);\n * ```\n */\nexport class TextAnalysisClient {\n private readonly _client: GeneratedClient;\n private readonly _tracing: TracingClient;\n private readonly defaultCountryHint: string;\n private readonly defaultLanguage: string;\n\n /**\n * Creates an instance of TextAnalysisClient with the endpoint of a Language\n * resource and an authentication method such as an API key or AAD.\n *\n * The API key and endpoint can be found in the Language resource page in the\n * Azure portal. They will be located in the resource's Keys and Endpoint page,\n * under Resource Management.\n *\n * ### Example\n *\n * ```ts snippet:ReadmeSampleCreateClient_Key\n * import { AzureKeyCredential, TextAnalysisClient } from \"@azure/ai-language-text\";\n *\n * const endpoint = \"https://<resource name>.cognitiveservices.azure.com\";\n * const credential = new AzureKeyCredential(\"<api key>\");\n * const client = new TextAnalysisClient(endpoint, credential);\n * ```\n *\n * @param endpointUrl - The URL to the endpoint of a Cognitive Language Service resource\n * @param credential - Key credential to be used to authenticate requests to the service.\n * @param options - Used to configure the TextAnalytics client.\n */\n constructor(endpointUrl: string, credential: KeyCredential, options?: TextAnalysisClientOptions);\n /**\n * Creates an instance of TextAnalysisClient with the endpoint of a Language\n * resource and an authentication method such as an API key or AAD.\n *\n * The API key and endpoint can be found in the Language resource page in the\n * Azure portal. They will be located in the resource's Keys and Endpoint page,\n * under Resource Management.\n *\n * ### Example\n *\n * See the [`@azure/identity`](https://npmjs.com/package/\\@azure/identity)\n * package for more information about authenticating with Azure Active Directory.\n *\n * ```ts snippet:ReadmeSampleCreateClient_ActiveDirectory\n * import { DefaultAzureCredential } from \"@azure/identity\";\n * import { TextAnalysisClient } from \"@azure/ai-language-text\";\n *\n * const endpoint = \"https://<resource name>.cognitiveservices.azure.com\";\n * const credential = new DefaultAzureCredential();\n * const client = new TextAnalysisClient(endpoint, credential);\n * ```\n *\n * @param endpointUrl - The URL to the endpoint of a Cognitive Language Service resource\n * @param credential - Token credential to be used to authenticate requests to the service.\n * @param options - Used to configure the TextAnalytics client.\n */\n constructor(\n endpointUrl: string,\n credential: TokenCredential,\n options?: TextAnalysisClientOptions,\n );\n constructor(\n endpointUrl: string,\n credential: TokenCredential | KeyCredential,\n options: TextAnalysisClientOptions = {},\n ) {\n const {\n defaultCountryHint = \"us\",\n defaultLanguage = \"en\",\n serviceVersion,\n ...pipelineOptions\n } = options;\n this.defaultCountryHint = defaultCountryHint;\n this.defaultLanguage = defaultLanguage;\n\n const internalPipelineOptions: GeneratedClientOptionalParams = {\n ...pipelineOptions,\n ...{\n loggingOptions: {\n logger: logger.info,\n additionalAllowedHeaderNames: [\"x-ms-correlation-request-id\", \"x-ms-request-id\"],\n },\n },\n apiVersion: serviceVersion,\n };\n\n this._client = new GeneratedClient(endpointUrl, internalPipelineOptions);\n\n const authPolicy = isTokenCredential(credential)\n ? bearerTokenAuthenticationPolicy({ credential, scopes: DEFAULT_COGNITIVE_SCOPE })\n : textAnalyticsAzureKeyCredentialPolicy(credential);\n\n this._client.pipeline.addPolicy(authPolicy);\n this._tracing = createTracingClient({\n packageName: \"@azure/ai-language-text\",\n packageVersion: SDK_VERSION,\n namespace: \"Microsoft.CognitiveServices\",\n });\n }\n\n /**\n * Runs a predictive model to determine the language that the passed-in\n * input strings are written in, and returns, for each one, the detected\n * language as well as a score indicating the model's confidence that the\n * inferred language is correct. Scores close to 1 indicate high certainty in\n * the result. 120 languages are supported.\n *\n * See {@link https://learn.microsoft.com//azure/cognitive-services/language-service/concepts/data-limits}\n * for data limits.\n *\n * ### Examples\n *\n * #### Language detection\n *\n * ```ts snippet:Sample_LanguageDetection\n * import { TextAnalysisClient } from \"@azure/ai-language-text\";\n * import { DefaultAzureCredential } from \"@azure/identity\";\n *\n * const documents = [\n * \"This document is written in English.\",\n * \"Este es un document escrito en Español.\",\n * \"这是一个用中文写的文件\",\n * \"Dies ist ein Dokument in deutsche Sprache.\",\n * \"Detta är ett dokument skrivet på engelska.\",\n * ];\n *\n * const client = new TextAnalysisClient(\"<endpoint>\", new DefaultAzureCredential());\n *\n * const result = await client.analyze(\"LanguageDetection\", documents, \"us\", {\n * modelVersion: \"2022-04-10-preview\",\n * });\n *\n * for (const doc of result) {\n * if (!doc.error) {\n * console.log(\n * `Primary language: ${doc.primaryLanguage.name} (iso6391 name: ${doc.primaryLanguage.iso6391Name})`,\n * );\n * }\n * }\n * ```\n *\n * See {@link https://learn.microsoft.com//azure/cognitive-services/language-service/language-detection/overview}\n * for more information on language detection.\n *\n * @param actionName - the name of the action to be performed on the input\n * documents, see ${@link AnalyzeActionName}\n * @param documents - the input documents to be analyzed\n * @param options - optional action parameters and settings for the operation\n *\n * @returns an array of results where each element contains the primary language\n * for the corresponding input document.\n */\n public async analyze<ActionName extends \"LanguageDetection\">(\n actionName: ActionName,\n documents: LanguageDetectionInput[],\n options?: AnalyzeActionParameters<ActionName> & TextAnalysisOperationOptions,\n ): Promise<AnalyzeResult<ActionName>>;\n /**\n * Runs a predictive model to determine the language that the passed-in\n * input strings are written in, and returns, for each one, the detected\n * language as well as a score indicating the model's confidence that the\n * inferred language is correct. Scores close to 1 indicate high certainty in\n * the result. 120 languages are supported.\n *\n * See {@link https://learn.microsoft.com//azure/cognitive-services/language-service/concepts/data-limits}\n * for data limits.\n *\n * ### Examples\n *\n * #### Language detection\n *\n * ```ts snippet:Sample_LanguageDetection\n * import { TextAnalysisClient } from \"@azure/ai-language-text\";\n * import { DefaultAzureCredential } from \"@azure/identity\";\n *\n * const documents = [\n * \"This document is written in English.\",\n * \"Este es un document escrito en Español.\",\n * \"这是一个用中文写的文件\",\n * \"Dies ist ein Dokument in deutsche Sprache.\",\n * \"Detta är ett dokument skrivet på engelska.\",\n * ];\n *\n * const client = new TextAnalysisClient(\"<endpoint>\", new DefaultAzureCredential());\n *\n * const result = await client.analyze(\"LanguageDetection\", documents, \"us\", {\n * modelVersion: \"2022-04-10-preview\",\n * });\n *\n * for (const doc of result) {\n * if (!doc.error) {\n * console.log(\n * `Primary language: ${doc.primaryLanguage.name} (iso6391 name: ${doc.primaryLanguage.iso6391Name})`,\n * );\n * }\n * }\n * ```\n *\n * See {@link https://learn.microsoft.com//azure/cognitive-services/language-service/language-detection/overview}\n * for more information on language detection.\n *\n * @param actionName - the name of the action to be performed on the input\n * documents, see ${@link AnalyzeActionName}\n * @param documents - the input documents to be analyzed\n * @param countryHint - Indicates the country of origin for all of\n * the input strings to assist the model in predicting the language they are\n * written in. If unspecified, this value will be set to the default\n * country hint in `TextAnalysisClientOptions`. If set to an empty string,\n * or the string \"none\", the service will apply a model where the country is\n * explicitly unset. The same country hint is applied to all strings in the\n * input collection.\n * @param options - optional action parameters and settings for the operation\n *\n * @returns an array of results where each element contains the primary language\n * for the corresponding input document.\n */\n public async analyze<ActionName extends \"LanguageDetection\">(\n actionName: ActionName,\n documents: string[],\n countryHint?: string,\n options?: AnalyzeActionParameters<ActionName> & TextAnalysisOperationOptions,\n ): Promise<AnalyzeResult<ActionName>>;\n /**\n * Runs a predictive model to perform the action of choice on the input\n * documents. See ${@link AnalyzeActionName} for a list of supported\n * actions.\n *\n * The layout of each item in the results array depends on the action chosen.\n * For example, each PIIEntityRecognition document result consists of both\n * `entities` and `redactedText` where the former is a list of all Pii entities\n * in the text and the latter is the original text after all such Pii entities\n * have been redacted from it.\n *\n * See {@link https://learn.microsoft.com//azure/cognitive-services/language-service/concepts/data-limits}\n * for data limits.\n *\n * ### Examples\n *\n * #### Opinion mining\n *\n * ```ts snippet:Sample_SentimentAnalysis\n * import { TextAnalysisClient } from \"@azure/ai-language-text\";\n * import { DefaultAzureCredential } from \"@azure/identity\";\n *\n * const documents = [\n * \"I had the best day of my life.\",\n * \"This was a waste of my time. The speaker put me to sleep.\",\n * ];\n *\n * const client = new TextAnalysisClient(\"<endpoint>\", new DefaultAzureCredential());\n *\n * const results = await client.analyze(\"SentimentAnalysis\", documents);\n *\n * for (let i = 0; i < results.length; i++) {\n * const result = results[i];\n * console.log(`- Document ${result.id}`);\n * if (!result.error) {\n * console.log(`\\tDocument text: ${documents[i]}`);\n * console.log(`\\tOverall Sentiment: ${result.sentiment}`);\n * console.log(\"\\tSentiment confidence scores: \", result.confidenceScores);\n * console.log(\"\\tSentences\");\n * for (const { sentiment, confidenceScores, text } of result.sentences) {\n * console.log(`\\t- Sentence text: ${text}`);\n * console.log(`\\t Sentence sentiment: ${sentiment}`);\n * console.log(\"\\t Confidence scores:\", confidenceScores);\n * }\n * } else {\n * console.error(` Error: ${result.error}`);\n * }\n * }\n * ```\n *\n * See {@link https://learn.microsoft.com//azure/cognitive-services/language-service/sentiment-opinion-mining/overview}\n * for more information on opinion mining.\n *\n * #### Personally identifiable information\n *\n * ```ts snippet:Sample_PIIEntityRecognition\n * import {\n * TextAnalysisClient,\n * KnownPiiEntityDomain,\n * KnownPiiEntityCategory,\n * } from \"@azure/ai-language-text\";\n * import { DefaultAzureCredential } from \"@azure/identity\";\n *\n * const client = new TextAnalysisClient(\"<endpoint>\", new DefaultAzureCredential());\n *\n * const documents = [\"My phone number is 555-5555\"];\n *\n * const [result] = await client.analyze(\"PiiEntityRecognition\", documents, \"en\", {\n * domainFilter: KnownPiiEntityDomain.Phi,\n * categoriesFilter: [\n * KnownPiiEntityCategory.PhoneNumber,\n * KnownPiiEntityCategory.USSocialSecurityNumber,\n * ],\n * });\n *\n * if (!result.error) {\n * console.log(`Redacted text: \"${result.redactedText}\"`);\n * console.log(\"Pii Entities: \");\n * for (const entity of result.entities) {\n * console.log(`\\t- \"${entity.text}\" of type ${entity.category}`);\n * }\n * }\n * ```\n *\n * See {@link https://learn.microsoft.com//azure/cognitive-services/language-service/personally-identifiable-information/overview}\n * for more information on personally identifiable information.\n *\n * @param actionName - the name of the action to be performed on the input\n * documents, see ${@link AnalyzeActionName}\n * @param documents - the input documents to be analyzed\n * @param options - optional action parameters and settings for the operation\n *\n * @returns an array of results corresponding to the input documents\n */\n public async analyze<ActionName extends AnalyzeActionName = AnalyzeActionName>(\n actionName: ActionName,\n documents: TextDocumentInput[],\n options?: AnalyzeActionParameters<ActionName> & TextAnalysisOperationOptions,\n ): Promise<AnalyzeResult<ActionName>>;\n\n /**\n * Runs a predictive model to perform the action of choice on the input\n * strings. See ${@link AnalyzeActionName} for a list of supported\n * actions.\n *\n * The layout of each item in the results array depends on the action chosen.\n * For example, each PIIEntityRecognition document result consists of both\n * `entities` and `redactedText` where the former is a list of all Pii entities\n * in the text and the latter is the original text after all such Pii entities\n * have been redacted from it.\n *\n * See {@link https://learn.microsoft.com//azure/cognitive-services/language-service/concepts/data-limits}\n * for data limits.\n *\n * ### Examples\n *\n * #### Opinion mining\n *\n * ```ts snippet:Sample_SentimentAnalysis\n * import { TextAnalysisClient } from \"@azure/ai-language-text\";\n * import { DefaultAzureCredential } from \"@azure/identity\";\n *\n * const documents = [\n * \"I had the best day of my life.\",\n * \"This was a waste of my time. The speaker put me to sleep.\",\n * ];\n *\n * const client = new TextAnalysisClient(\"<endpoint>\", new DefaultAzureCredential());\n *\n * const results = await client.analyze(\"SentimentAnalysis\", documents);\n *\n * for (let i = 0; i < results.length; i++) {\n * const result = results[i];\n * console.log(`- Document ${result.id}`);\n * if (!result.error) {\n * console.log(`\\tDocument text: ${documents[i]}`);\n * console.log(`\\tOverall Sentiment: ${result.sentiment}`);\n * console.log(\"\\tSentiment confidence scores: \", result.confidenceScores);\n * console.log(\"\\tSentences\");\n * for (const { sentiment, confidenceScores, text } of result.sentences) {\n * console.log(`\\t- Sentence text: ${text}`);\n * console.log(`\\t Sentence sentiment: ${sentiment}`);\n * console.log(\"\\t Confidence scores:\", confidenceScores);\n * }\n * } else {\n * console.error(` Error: ${result.error}`);\n * }\n * }\n * ```\n *\n * See {@link https://learn.microsoft.com//azure/cognitive-services/language-service/sentiment-opinion-mining/overview}\n * for more information on opinion mining.\n *\n * #### Personally identifiable information\n *\n * ```ts snippet:Sample_PIIEntityRecognition\n * import {\n * TextAnalysisClient,\n * KnownPiiEntityDomain,\n * KnownPiiEntityCategory,\n * } from \"@azure/ai-language-text\";\n * import { DefaultAzureCredential } from \"@azure/identity\";\n *\n * const client = new TextAnalysisClient(\"<endpoint>\", new DefaultAzureCredential());\n *\n * const documents = [\"My phone number is 555-5555\"];\n *\n * const [result] = await client.analyze(\"PiiEntityRecognition\", documents, \"en\", {\n * domainFilter: KnownPiiEntityDomain.Phi,\n * categoriesFilter: [\n * KnownPiiEntityCategory.PhoneNumber,\n * KnownPiiEntityCategory.USSocialSecurityNumber,\n * ],\n * });\n *\n * if (!result.error) {\n * console.log(`Redacted text: \"${result.redactedText}\"`);\n * console.log(\"Pii Entities: \");\n * for (const entity of result.entities) {\n * console.log(`\\t- \"${entity.text}\" of type ${entity.category}`);\n * }\n * }\n * ```\n *\n * See {@link https://learn.microsoft.com//azure/cognitive-services/language-service/personally-identifiable-information/overview}\n * for more information on personally identifiable information.\n *\n * @param actionName - the name of the action to be performed on the input\n * documents, see ${@link AnalyzeActionName}\n * @param documents - the input documents to be analyzed\n * @param languageCode - the code of the language that all the input strings are\n * written in. If unspecified, this value will be set to the default\n * language in `TextAnalysisClientOptions`. If set to an empty string,\n * the service will apply a model where the language is explicitly set to\n * \"None\". Language support varies per action, for example, more information\n * about the languages supported for Entity Recognition actions can be\n * found in {@link https://learn.microsoft.com//azure/cognitive-services/language-service/named-entity-recognition/language-support}.\n * If set to \"auto\", the service will automatically infer the language from\n * the input text.\n * @param options - optional action parameters and settings for the operation\n *\n * @returns an array of results corresponding to the input documents\n */\n public async analyze<ActionName extends AnalyzeActionName = AnalyzeActionName>(\n actionName: ActionName,\n documents: string[],\n languageCode?: string,\n options?: AnalyzeActionParameters<ActionName> & TextAnalysisOperationOptions,\n ): Promise<AnalyzeResult<ActionName>>;\n // implementation\n public async analyze<ActionName extends AnalyzeActionName = AnalyzeActionName>(\n actionName: ActionName,\n documents: string[] | LanguageDetectionInput[] | TextDocumentInput[],\n languageOrCountryHintOrOptions?:\n | string\n | (AnalyzeActionParameters<ActionName> & TextAnalysisOperationOptions),\n options?: AnalyzeActionParameters<ActionName> & TextAnalysisOperationOptions,\n ): Promise<AnalyzeResult<ActionName>> {\n let realOptions: AnalyzeActionParameters<ActionName> & TextAnalysisOperationOptions;\n\n if (documents.length === 0) {\n throw new Error(\"'documents' must be a non-empty array\");\n }\n\n let realInputs: LanguageDetectionInput[] | TextDocumentInput[];\n if (isStringArray(documents)) {\n if (actionName === \"LanguageDetection\") {\n realInputs = convertToLanguageDetectionInput(\n documents,\n typeof languageOrCountryHintOrOptions === \"string\"\n ? languageOrCountryHintOrOptions\n : this.defaultCountryHint,\n );\n } else {\n realInputs = convertToTextDocumentInput(\n documents,\n typeof languageOrCountryHintOrOptions === \"string\"\n ? languageOrCountryHintOrOptions\n : this.defaultLanguage,\n );\n }\n realOptions = options || ({} as any);\n } else {\n realInputs = documents;\n realOptions =\n (languageOrCountryHintOrOptions as AnalyzeActionParameters<ActionName> &\n TextAnalysisOperationOptions) || {};\n }\n const { options: operationOptions, rest: action } = getOperationOptions(realOptions);\n return this._tracing.withSpan(\n \"TextAnalysisClient.analyze\",\n operationOptions,\n async (updatedOptions: TextAnalysisOperationOptions) =>\n throwError(\n this._client\n .analyze(\n {\n kind: actionName,\n analysisInput: {\n documents: realInputs,\n },\n parameters: action,\n } as any,\n updatedOptions,\n )\n .then(\n (result) =>\n transformActionResult(\n actionName,\n realInputs.map(({ id }) => id),\n result,\n ) as AnalyzeResult<ActionName>,\n ),\n ),\n );\n }\n\n /**\n * Performs an array (batch) of actions on the input documents. Each action has\n * a `kind` field that specifies the nature of the action. See ${@link AnalyzeBatchActionNames}\n * for a list of supported actions. In addition to `kind`, actions could also\n * have other parameters such as `disableServiceLogs` and `modelVersion`.\n *\n * The results array contains the results for those input actions where each\n * item also has a `kind` field that specifies the type of the results.\n *\n * See {@link https://learn.microsoft.com//azure/cognitive-services/language-service/concepts/data-limits}\n * for data limits.\n *\n * ### Examples\n *\n * #### Key phrase extraction and Pii entity recognition\n *\n * ```ts snippet:Sample_ActionBatching\n * import { TextAnalysisClient, AnalyzeBatchAction } from \"@azure/ai-language-text\";\n * import { DefaultAzureCredential } from \"@azure/identity\";\n *\n * const documents = [\n * \"Microsoft was founded by Bill Gates and Paul Allen.\",\n * \"Redmond is a city in King County, Washington, United States, located 15 miles east of Seattle.\",\n * \"I need to take my cat to the veterinarian.\",\n * \"The employee's SSN is 555-55-5555.\",\n * \"We went to Contoso Steakhouse located at midtown NYC last week for a dinner party, and we adore the spot! They provide marvelous food and they have a great menu. The chief cook happens to be the owner (I think his name is John Doe) and he is super nice, coming out of the kitchen and greeted us all. We enjoyed very much dining in the place! The Sirloin steak I ordered was tender and juicy, and the place was impeccably clean. You can even pre-order from their online menu at www.contososteakhouse.com, call 312-555-0176 or send email to order@contososteakhouse.com! The only complaint I have is the food didn't come fast enough. Overall I highly recommend it!\",\n * ];\n *\n * const client = new TextAnalysisClient(\"<endpoint>\", new DefaultAzureCredential());\n *\n * const actions: AnalyzeBatchAction[] = [\n * {\n * kind: \"EntityRecognition\",\n * modelVersion: \"latest\",\n * },\n * {\n * kind: \"PiiEntityRecognition\",\n * modelVersion: \"latest\",\n * },\n * {\n * kind: \"KeyPhraseExtraction\",\n * modelVersion: \"latest\",\n * },\n * ];\n * const poller = await client.beginAnalyzeBatch(actions, documents, \"en\");\n *\n * poller.onProgress(() => {\n * console.log(\n * `Number of actions still in progress: ${poller.getOperationState().actionInProgressCount}`,\n * );\n * });\n *\n * console.log(`The operation was created on ${poller.getOperationState().createdOn}`);\n *\n * console.log(`The operation results will expire on ${poller.getOperationState().expiresOn}`);\n *\n * const actionResults = await poller.pollUntilDone();\n *\n * for await (const actionResult of actionResults) {\n * if (actionResult.error) {\n * const { code, message } = actionResult.error;\n * throw new Error(`Unexpected error (${code}): ${message}`);\n * }\n * switch (actionResult.kind) {\n * case \"KeyPhraseExtraction\": {\n * for (const doc of actionResult.results) {\n * console.log(`- Document ${doc.id}`);\n * if (!doc.error) {\n * console.log(\"\\tKey phrases:\");\n * for (const phrase of doc.keyPhrases) {\n * console.log(`\\t- ${phrase}`);\n * }\n * } else {\n * console.error(\"\\tError:\", doc.error);\n * }\n * }\n * break;\n * }\n * case \"EntityRecognition\": {\n * for (const doc of actionResult.results) {\n * console.log(`- Document ${doc.id}`);\n * if (!doc.error) {\n * console.log(\"\\tEntities:\");\n * for (const entity of doc.entities) {\n * console.log(`\\t- Entity ${entity.text} of type ${entity.category}`);\n * }\n * } else {\n * console.error(\"\\tError:\", doc.error);\n * }\n * }\n * break;\n * }\n * case \"PiiEntityRecognition\": {\n * for (const doc of actionResult.results) {\n * console.log(`- Document ${doc.id}`);\n * if (!doc.error) {\n * console.log(\"\\tPii Entities:\");\n * for (const entity of doc.entities) {\n * console.log(`\\t- Entity ${entity.text} of type ${entity.category}`);\n * }\n * } else {\n * console.error(\"\\tError:\", doc.error);\n * }\n * }\n * break;\n * }\n * default: {\n * throw new Error(`Unexpected action results: ${actionResult.kind}`);\n * }\n * }\n * }\n * ```\n *\n * @param actions - an array of actions that will be run on the input documents\n * @param documents - the input documents to be analyzed\n * @param languageCode - the code of the language that all the input strings are\n * written in. If unspecified, this value will be set to the default\n * language in `TextAnalysisClientOptions`. If set to an empty string,\n * the service will apply a model where the language is explicitly set to\n * \"None\". Language support varies per action, for example, more information\n * about the languages supported for Entity Recognition actions can be\n * found in {@link https://learn.microsoft.com//azure/cognitive-services/language-service/named-entity-recognition/language-support}.\n * If set to \"auto\", the service will automatically infer the language from\n * the input text.\n * @param options - optional settings for the operation\n *\n * @returns an array of results corresponding to the input actions\n */\n async beginAnalyzeBatch(\n actions: AnalyzeBatchAction[],\n documents: string[],\n languageCode?: string,\n options?: BeginAnalyzeBatchOptions,\n ): Promise<AnalyzeBatchPoller>;\n /**\n * Performs an array (batch) of actions on the input documents. Each action has\n * a `kind` field that specifies the nature of the action. See ${@link AnalyzeBatchActionNames}\n * for a list of supported actions. In addition to `kind`, actions could also\n * have other parameters such as `disableServiceLogs` and `modelVersion`.\n *\n * The results array contains the results for those input actions where each\n * item also has a `kind` field that specifies the type of the results.\n *\n * See {@link https://learn.microsoft.com//azure/cognitive-services/language-service/concepts/data-limits}\n * for data limits.\n *\n * ### Examples\n *\n * #### Keyphrase extraction and Pii entity recognition\n *\n * ```ts snippet:Sample_ActionBatching\n * import { TextAnalysisClient, AnalyzeBatchAction } from \"@azure/ai-language-text\";\n * import { DefaultAzureCredential } from \"@azure/identity\";\n *\n * const documents = [\n * \"Microsoft was founded by Bill Gates and Paul Allen.\",\n * \"Redmond is a city in King County, Washington, United States, located 15 miles east of Seattle.\",\n * \"I need to take my cat to the veterinarian.\",\n * \"The employee's SSN is 555-55-5555.\",\n * \"We went to Contoso Steakhouse located at midtown NYC last week for a dinner party, and we adore the spot! They provide marvelous food and they have a great menu. The chief cook happens to be the owner (I think his name is John Doe) and he is super nice, coming out of the kitchen and greeted us all. We enjoyed very much dining in the place! The Sirloin steak I ordered was tender and juicy, and the place was impeccably clean. You can even pre-order from their online menu at www.contososteakhouse.com, call 312-555-0176 or send email to order@contososteakhouse.com! The only complaint I have is the food didn't come fast enough. Overall I highly recommend it!\",\n * ];\n *\n * const client = new TextAnalysisClient(\"<endpoint>\", new DefaultAzureCredential());\n *\n * const actions: AnalyzeBatchAction[] = [\n * {\n * kind: \"EntityRecognition\",\n * modelVersion: \"latest\",\n * },\n * {\n * kind: \"PiiEntityRecognition\",\n * modelVersion: \"latest\",\n * },\n * {\n * kind: \"KeyPhraseExtraction\",\n * modelVersion: \"latest\",\n * },\n * ];\n * const poller = await client.beginAnalyzeBatch(actions, documents, \"en\");\n *\n * poller.onProgress(() => {\n * console.log(\n * `Number of actions still in progress: ${poller.getOperationState().actionInProgressCount}`,\n * );\n * });\n *\n * console.log(`The operation was created on ${poller.getOperationState().createdOn}`);\n *\n * console.log(`The operation results will expire on ${poller.getOperationState().expiresOn}`);\n *\n * const actionResults = await poller.pollUntilDone();\n *\n * for await (const actionResult of actionResults) {\n * if (actionResult.error) {\n * const { code, message } = actionResult.error;\n * throw new Error(`Unexpected error (${code}): ${message}`);\n * }\n * switch (actionResult.kind) {\n * case \"KeyPhraseExtraction\": {\n * for (const doc of actionResult.results) {\n * console.log(`- Document ${doc.id}`);\n * if (!doc.error) {\n * console.log(\"\\tKey phrases:\");\n * for (const phrase of doc.keyPhrases) {\n * console.log(`\\t- ${phrase}`);\n * }\n * } else {\n * console.error(\"\\tError:\", doc.error);\n * }\n * }\n * break;\n * }\n * case \"EntityRecognition\": {\n * for (const doc of actionResult.results) {\n * console.log(`- Document ${doc.id}`);\n * if (!doc.error) {\n * console.log(\"\\tEntities:\");\n * for (const entity of doc.entities) {\n * console.log(`\\t- Entity ${entity.text} of type ${entity.category}`);\n * }\n * } else {\n * console.error(\"\\tError:\", doc.error);\n * }\n * }\n * break;\n * }\n * case \"PiiEntityRecognition\": {\n * for (const doc of actionResult.results) {\n * console.log(`- Document ${doc.id}`);\n * if (!doc.error) {\n * console.log(\"\\tPii Entities:\");\n * for (const entity of doc.entities) {\n * console.log(`\\t- Entity ${entity.text} of type ${entity.category}`);\n * }\n * } else {\n * console.error(\"\\tError:\", doc.error);\n * }\n * }\n * break;\n * }\n * default: {\n * throw new Error(`Unexpected action results: ${actionResult.kind}`);\n * }\n * }\n * }\n * ```\n *\n * @param actions - an array of actions that will be run on the input documents\n * @param documents - the input documents to be analyzed\n * @param options - optional settings for the operation\n *\n * @returns an array of results corresponding to the input actions\n */\n async beginAnalyzeBatch(\n actions: AnalyzeBatchAction[],\n documents: TextDocumentInput[],\n options?: BeginAnalyzeBatchOptions,\n ): Promise<AnalyzeBatchPoller>;\n // implementation\n async beginAnalyzeBatch(\n actions: AnalyzeBatchAction[],\n documents: TextDocumentInput[] | string[],\n languageOrOptions?: BeginAnalyzeBatchOptions | string,\n options: BeginAnalyzeBatchOptions = {},\n ): Promise<AnalyzeBatchPoller> {\n let realOptions: BeginAnalyzeBatchOptions;\n let realInputs: TextDocumentInput[];\n\n if (!Array.isArray(documents) || documents.length === 0) {\n throw new Error(\"'documents' must be a non-empty array\");\n }\n\n if (isStringArray(documents)) {\n const languageCode = (languageOrOptions as string) ?? this.defaultLanguage;\n realInputs = convertToTextDocumentInput(documents, languageCode);\n realOptions = options;\n } else {\n realInputs = documents;\n realOptions = languageOrOptions as BeginAnalyzeBatchOptions;\n }\n const realActions = actions.map(\n ({ kind, actionName, ...rest }): AnalyzeBatchActionUnion & { parameters: unknown } => ({\n kind,\n actionName,\n parameters: rest,\n }),\n );\n const { includeStatistics, updateIntervalInMs, displayName, ...rest } = realOptions;\n const lro = createAnalyzeBatchLro({\n client: this._client,\n commonOptions: rest,\n documents: realInputs,\n initialRequestOptions: { displayName },\n pollRequestOptions: { includeStatistics },\n tasks: realActions,\n tracing: this._tracing,\n });\n\n const docIds = realInputs.map(({ id }) => id);\n\n const state = { continuationToken: \"\" };\n\n const poller = await createHttpPoller(lro, {\n intervalInMs: updateIntervalInMs,\n processResult: processAnalyzeResult({\n client: this._client,\n tracing: this._tracing,\n docIds,\n opOptions: { ...rest, includeStatistics },\n state,\n }),\n updateState: createUpdateAnalyzeState(docIds),\n withOperationLocation(operationLocation: string) {\n state.continuationToken = operationLocation;\n },\n });\n\n await poller.poll();\n const id = poller.getOperationState().id;\n return createPollerWithCancellation({\n id,\n client: this._client,\n options,\n poller,\n tracing: this._tracing,\n });\n }\n\n /**\n * Creates a poller from the serialized state of another poller. This can be\n * useful when you want to create pollers on a different host or a poller\n * needs to be constructed after the original one is not in scope.\n *\n * @param serializedState - the serialized state of another poller. It is the\n * result of `poller.toString()`\n * @param options - optional settings for the operation\n *\n */\n async restoreAnalyzeBatchPoller(\n serializedState: string,\n options?: RestoreAnalyzeBatchPollerOptions,\n ): Promise<AnalyzeBatchPoller>;\n // implementation\n async restoreAnalyzeBatchPoller(\n serializedState: string,\n options: RestoreAnalyzeBatchPollerOptions = {},\n ): Promise<AnalyzeBatchPoller> {\n const { includeStatistics, updateIntervalInMs, ...rest } = options;\n const docIds = getDocIDsFromState(serializedState);\n const lro = createCreateAnalyzeBatchPollerLro({\n client: this._client,\n options: { ...rest, includeStatistics },\n tracing: this._tracing,\n });\n\n const state = { continuationToken: \"\" };\n\n const poller = await createHttpPoller(lro, {\n intervalInMs: updateIntervalInMs,\n restoreFrom: serializedState,\n processResult: processAnalyzeResult({\n client: this._client,\n tracing: this._tracing,\n docIds,\n opOptions: { ...rest, includeStatistics },\n state,\n }),\n updateState: createUpdateAnalyzeState(),\n withOperationLocation(operationLocation: string) {\n state.continuationToken = operationLocation;\n },\n });\n\n await poller.poll();\n const id = poller.getOperationState().id;\n return createPollerWithCancellation({\n id,\n client: this._client,\n options,\n poller,\n tracing: this._tracing,\n });\n }\n}\n"]}
@@ -1,6 +1,5 @@
1
1
  // Copyright (c) Microsoft Corporation.
2
2
  // Licensed under the MIT License.
3
- import { __rest } from "tslib";
4
3
  import { extractErrorPointerIndex, parseAssessmentIndex, parseHealthcareEntityIndex, sortResponseIdObjects, } from "./util.js";
5
4
  import { RestError } from "@azure/core-rest-pipeline";
6
5
  /**
@@ -12,7 +11,9 @@ function toTextAnalysisError(errorModel) {
12
11
  if (errorModel.innererror !== undefined) {
13
12
  return toTextAnalysisError(errorModel.innererror);
14
13
  }
15
- return Object.assign({}, errorModel);
14
+ return {
15
+ ...errorModel,
16
+ };
16
17
  }
17
18
  function makeTextAnalysisErrorResult(id, error) {
18
19
  return {
@@ -37,10 +38,10 @@ function transformDocumentResults(ids, response, options) {
37
38
  }
38
39
  function toLanguageDetectionResult(docIds, results) {
39
40
  return transformDocumentResults(docIds, results, {
40
- processSuccess: (_a) => {
41
- var { detectedLanguage } = _a, rest = __rest(_a, ["detectedLanguage"]);
42
- return (Object.assign({ primaryLanguage: detectedLanguage }, rest));
43
- },
41
+ processSuccess: ({ detectedLanguage, ...rest }) => ({
42
+ primaryLanguage: detectedLanguage,
43
+ ...rest,
44
+ }),
44
45
  });
45
46
  }
46
47
  function toPiiEntityRecognitionResult(docIds, results) {
@@ -48,10 +49,10 @@ function toPiiEntityRecognitionResult(docIds, results) {
48
49
  }
49
50
  function toSentimentAnalysisResult(docIds, results) {
50
51
  return transformDocumentResults(docIds, results, {
51
- processSuccess: (_a) => {
52
- var { sentences } = _a, rest = __rest(_a, ["sentences"]);
53
- return (Object.assign(Object.assign({}, rest), { sentences: sentences.map((sentence) => convertGeneratedSentenceSentiment(sentence, sentences)) }));
54
- },
52
+ processSuccess: ({ sentences, ...rest }) => ({
53
+ ...rest,
54
+ sentences: sentences.map((sentence) => convertGeneratedSentenceSentiment(sentence, sentences)),
55
+ }),
55
56
  });
56
57
  }
57
58
  /**
@@ -63,20 +64,18 @@ function toSentimentAnalysisResult(docIds, results) {
63
64
  * @returns The user-friendly sentence sentiment object.
64
65
  * @internal
65
66
  */
66
- function convertGeneratedSentenceSentiment(_a, sentences) {
67
- var _b;
68
- var { targets, assessments: _ } = _a, rest = __rest(_a, ["targets", "assessments"]);
69
- return Object.assign(Object.assign({}, rest), { opinions: (_b = targets === null || targets === void 0 ? void 0 : targets.map(
67
+ function convertGeneratedSentenceSentiment({ targets, assessments: _, ...rest }, sentences) {
68
+ return {
69
+ ...rest,
70
+ opinions: targets?.map(
70
71
  // eslint-disable-next-line @typescript-eslint/no-shadow
71
- (_a) => {
72
- var { relations } = _a, rest = __rest(_a, ["relations"]);
73
- return ({
74
- target: rest,
75
- assessments: relations
76
- .filter((relation) => relation.relationType === "assessment")
77
- .map((relation) => convertTargetRelationToAssessmentSentiment(relation, sentences)),
78
- });
79
- })) !== null && _b !== void 0 ? _b : [] });
72
+ ({ relations, ...rest }) => ({
73
+ target: rest,
74
+ assessments: relations
75
+ .filter((relation) => relation.relationType === "assessment")
76
+ .map((relation) => convertTargetRelationToAssessmentSentiment(relation, sentences)),
77
+ })) ?? [],
78
+ };
80
79
  }
81
80
  /**
82
81
  * Converts a target relation object returned by the service to an assessment
@@ -89,10 +88,9 @@ function convertGeneratedSentenceSentiment(_a, sentences) {
89
88
  * @internal
90
89
  */
91
90
  function convertTargetRelationToAssessmentSentiment(targetRelation, sentences) {
92
- var _a;
93
91
  const assessmentPtr = targetRelation.ref;
94
92
  const assessmentIndex = parseAssessmentIndex(assessmentPtr);
95
- const assessment = (_a = sentences === null || sentences === void 0 ? void 0 : sentences[assessmentIndex.sentence].assessments) === null || _a === void 0 ? void 0 : _a[assessmentIndex.assessment];
93
+ const assessment = sentences?.[assessmentIndex.sentence].assessments?.[assessmentIndex.assessment];
96
94
  if (assessment !== undefined) {
97
95
  return assessment;
98
96
  }
@@ -151,12 +149,11 @@ function appendReadableErrorMessage(currentMessage, innerMessage) {
151
149
  * @param error - the incoming error
152
150
  */
153
151
  function transformError(errorResponse) {
154
- var _a;
155
152
  const strongErrorResponse = errorResponse;
156
153
  if (!strongErrorResponse.response) {
157
154
  throw errorResponse;
158
155
  }
159
- const topLevelError = (_a = strongErrorResponse.response.parsedBody) === null || _a === void 0 ? void 0 : _a.error;
156
+ const topLevelError = strongErrorResponse.response.parsedBody?.error;
160
157
  if (!topLevelError)
161
158
  return errorResponse;
162
159
  let errorMessage = topLevelError.message;
@@ -190,8 +187,11 @@ export async function throwError(p) {
190
187
  }
191
188
  function toHealthcareResult(docIds, results) {
192
189
  function makeHealthcareEntity(entity) {
193
- const { dataSources } = entity, rest = __rest(entity, ["dataSources"]);
194
- return Object.assign({ dataSources: dataSources !== null && dataSources !== void 0 ? dataSources : [] }, rest);
190
+ const { dataSources, ...rest } = entity;
191
+ return {
192
+ dataSources: dataSources ?? [],
193
+ ...rest,
194
+ };
195
195
  }
196
196
  function makeHealthcareRelation(entities) {
197
197
  return ({ entities: generatedEntities, relationType, confidenceScore, }) => ({
@@ -204,10 +204,13 @@ function toHealthcareResult(docIds, results) {
204
204
  });
205
205
  }
206
206
  return transformDocumentResults(docIds, results, {
207
- processSuccess: (_a) => {
208
- var { entities, relations } = _a, rest = __rest(_a, ["entities", "relations"]);
207
+ processSuccess: ({ entities, relations, ...rest }) => {
209
208
  const newEntities = entities.map(makeHealthcareEntity);
210
- return Object.assign({ entities: newEntities, entityRelations: relations.map(makeHealthcareRelation(newEntities)) }, rest);
209
+ return {
210
+ entities: newEntities,
211
+ entityRelations: relations.map(makeHealthcareRelation(newEntities)),
212
+ ...rest,
213
+ };
211
214
  },
212
215
  });
213
216
  }
@@ -227,7 +230,14 @@ export function transformAnalyzeBatchResults(docIds, response = [], errors = [])
227
230
  }
228
231
  const { results } = actionData;
229
232
  const { modelVersion, statistics } = results;
230
- return Object.assign(Object.assign(Object.assign({ kind, results: toSentimentAnalysisResult(docIds, results), completedOn }, (actionName ? { actionName } : {})), (statistics ? { statistics } : {})), { modelVersion });
233
+ return {
234
+ kind,
235
+ results: toSentimentAnalysisResult(docIds, results),
236
+ completedOn,
237
+ ...(actionName ? { actionName } : {}),
238
+ ...(statistics ? { statistics } : {}),
239
+ modelVersion,
240
+ };
231
241
  }
232
242
  case "EntityRecognitionLROResults": {
233
243
  const kind = "EntityRecognition";
@@ -236,7 +246,14 @@ export function transformAnalyzeBatchResults(docIds, response = [], errors = [])
236
246
  }
237
247
  const { results } = actionData;
238
248
  const { modelVersion, statistics } = results;
239
- return Object.assign(Object.assign(Object.assign({ kind: "EntityRecognition", results: toEntityRecognitionResult(docIds, results), completedOn }, (actionName ? { actionName } : {})), (statistics ? { statistics } : {})), { modelVersion });
249
+ return {
250
+ kind: "EntityRecognition",
251
+ results: toEntityRecognitionResult(docIds, results),
252
+ completedOn,
253
+ ...(actionName ? { actionName } : {}),
254
+ ...(statistics ? { statistics } : {}),
255
+ modelVersion,
256
+ };
240
257
  }
241
258
  case "PiiEntityRecognitionLROResults": {
242
259
  const kind = "PiiEntityRecognition";
@@ -245,7 +262,14 @@ export function transformAnalyzeBatchResults(docIds, response = [], errors = [])
245
262
  }
246
263
  const { results } = actionData;
247
264
  const { modelVersion, statistics } = results;
248
- return Object.assign(Object.assign(Object.assign({ kind, results: toPiiEntityRecognitionResult(docIds, results), completedOn }, (actionName ? { actionName } : {})), (statistics ? { statistics } : {})), { modelVersion });
265
+ return {
266
+ kind,
267
+ results: toPiiEntityRecognitionResult(docIds, results),
268
+ completedOn,
269
+ ...(actionName ? { actionName } : {}),
270
+ ...(statistics ? { statistics } : {}),
271
+ modelVersion,
272
+ };
249
273
  }
250
274
  case "KeyPhraseExtractionLROResults": {
251
275
  const kind = "KeyPhraseExtraction";
@@ -254,7 +278,14 @@ export function transformAnalyzeBatchResults(docIds, response = [], errors = [])
254
278
  }
255
279
  const { results } = actionData;
256
280
  const { modelVersion, statistics } = results;
257
- return Object.assign(Object.assign(Object.assign({ kind, results: toKeyPhraseExtractionResult(docIds, results), completedOn }, (actionName ? { actionName } : {})), (statistics ? { statistics } : {})), { modelVersion });
281
+ return {
282
+ kind,
283
+ results: toKeyPhraseExtractionResult(docIds, results),
284
+ completedOn,
285
+ ...(actionName ? { actionName } : {}),
286
+ ...(statistics ? { statistics } : {}),
287
+ modelVersion,
288
+ };
258
289
  }
259
290
  case "EntityLinkingLROResults": {
260
291
  const kind = "EntityLinking";
@@ -263,7 +294,14 @@ export function transformAnalyzeBatchResults(docIds, response = [], errors = [])
263
294
  }
264
295
  const { results } = actionData;
265
296
  const { modelVersion, statistics } = results;
266
- return Object.assign(Object.assign(Object.assign({ kind, results: toEntityLinkingResult(docIds, results), completedOn }, (actionName ? { actionName } : {})), (statistics ? { statistics } : {})), { modelVersion });
297
+ return {
298
+ kind,
299
+ results: toEntityLinkingResult(docIds, results),
300
+ completedOn,
301
+ ...(actionName ? { actionName } : {}),
302
+ ...(statistics ? { statistics } : {}),
303
+ modelVersion,
304
+ };
267
305
  }
268
306
  case "HealthcareLROResults": {
269
307
  const kind = "Healthcare";
@@ -272,7 +310,14 @@ export function transformAnalyzeBatchResults(docIds, response = [], errors = [])
272
310
  }
273
311
  const { results } = actionData;
274
312
  const { modelVersion, statistics } = results;
275
- return Object.assign(Object.assign(Object.assign({ kind, results: toHealthcareResult(docIds, results), completedOn }, (actionName ? { actionName } : {})), (statistics ? { statistics } : {})), { modelVersion });
313
+ return {
314
+ kind,
315
+ results: toHealthcareResult(docIds, results),
316
+ completedOn,
317
+ ...(actionName ? { actionName } : {}),
318
+ ...(statistics ? { statistics } : {}),
319
+ modelVersion,
320
+ };
276
321
  }
277
322
  case "CustomEntityRecognitionLROResults": {
278
323
  const kind = "CustomEntityRecognition";
@@ -281,8 +326,15 @@ export function transformAnalyzeBatchResults(docIds, response = [], errors = [])
281
326
  }
282
327
  const { results } = actionData;
283
328
  const { deploymentName, projectName, statistics } = results;
284
- return Object.assign(Object.assign(Object.assign({ kind, results: transformDocumentResults(docIds, results), completedOn }, (actionName ? { actionName } : {})), (statistics ? { statistics } : {})), { deploymentName,
285
- projectName });
329
+ return {
330
+ kind,
331
+ results: transformDocumentResults(docIds, results),
332
+ completedOn,
333
+ ...(actionName ? { actionName } : {}),
334
+ ...(statistics ? { statistics } : {}),
335
+ deploymentName,
336
+ projectName,
337
+ };
286
338
  }
287
339
  case "CustomSingleLabelClassificationLROResults": {
288
340
  const kind = "CustomSingleLabelClassification";
@@ -291,8 +343,15 @@ export function transformAnalyzeBatchResults(docIds, response = [], errors = [])
291
343
  }
292
344
  const { results } = actionData;
293
345
  const { deploymentName, projectName, statistics } = results;
294
- return Object.assign(Object.assign(Object.assign({ kind, results: transformDocumentResults(docIds, results), completedOn }, (actionName ? { actionName } : {})), (statistics ? { statistics } : {})), { deploymentName,
295
- projectName });
346
+ return {
347
+ kind,
348
+ results: transformDocumentResults(docIds, results),
349
+ completedOn,
350
+ ...(actionName ? { actionName } : {}),
351
+ ...(statistics ? { statistics } : {}),
352
+ deploymentName,
353
+ projectName,
354
+ };
296
355
  }
297
356
  case "CustomMultiLabelClassificationLROResults": {
298
357
  const kind = "CustomMultiLabelClassification";
@@ -301,8 +360,15 @@ export function transformAnalyzeBatchResults(docIds, response = [], errors = [])
301
360
  }
302
361
  const { results } = actionData;
303
362
  const { deploymentName, projectName, statistics } = results;
304
- return Object.assign(Object.assign(Object.assign({ kind, results: transformDocumentResults(docIds, results), completedOn }, (actionName ? { actionName } : {})), (statistics ? { statistics } : {})), { deploymentName,
305
- projectName });
363
+ return {
364
+ kind,
365
+ results: transformDocumentResults(docIds, results),
366
+ completedOn,
367
+ ...(actionName ? { actionName } : {}),
368
+ ...(statistics ? { statistics } : {}),
369
+ deploymentName,
370
+ projectName,
371
+ };
306
372
  }
307
373
  case "ExtractiveSummarizationLROResults": {
308
374
  const kind = "ExtractiveSummarization";
@@ -311,7 +377,14 @@ export function transformAnalyzeBatchResults(docIds, response = [], errors = [])
311
377
  }
312
378
  const { results } = actionData;
313
379
  const { modelVersion, statistics } = results;
314
- return Object.assign(Object.assign(Object.assign({ kind: "ExtractiveSummarization", results: transformDocumentResults(docIds, results), completedOn }, (actionName ? { actionName } : {})), (statistics ? { statistics } : {})), { modelVersion });
380
+ return {
381
+ kind: "ExtractiveSummarization",
382
+ results: transformDocumentResults(docIds, results),
383
+ completedOn,
384
+ ...(actionName ? { actionName } : {}),
385
+ ...(statistics ? { statistics } : {}),
386
+ modelVersion,
387
+ };
315
388
  }
316
389
  case "AbstractiveSummarizationLROResults": {
317
390
  const kind = "AbstractiveSummarization";
@@ -320,7 +393,14 @@ export function transformAnalyzeBatchResults(docIds, response = [], errors = [])
320
393
  }
321
394
  const { results } = actionData;
322
395
  const { modelVersion, statistics } = results;
323
- return Object.assign(Object.assign(Object.assign({ kind: "AbstractiveSummarization", results: transformDocumentResults(docIds, results), completedOn }, (actionName ? { actionName } : {})), (statistics ? { statistics } : {})), { modelVersion });
396
+ return {
397
+ kind: "AbstractiveSummarization",
398
+ results: transformDocumentResults(docIds, results),
399
+ completedOn,
400
+ ...(actionName ? { actionName } : {}),
401
+ ...(statistics ? { statistics } : {}),
402
+ modelVersion,
403
+ };
324
404
  }
325
405
  default: {
326
406
  throw new Error(`Unsupported results kind: ${resultKind}`);
@@ -336,7 +416,7 @@ function toIndexErrorMap(errors) {
336
416
  const errorMap = new Map();
337
417
  for (const error of errors) {
338
418
  const position = extractErrorPointerIndex(error);
339
- const { target } = error, errorWithoutTarget = __rest(error, ["target"]);
419
+ const { target, ...errorWithoutTarget } = error;
340
420
  errorMap.set(position, toTextAnalysisError(errorWithoutTarget));
341
421
  }
342
422
  return errorMap;