azure-ai-textanalytics 5.3.0b2__py3-none-any.whl → 6.0.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of azure-ai-textanalytics might be problematic. Click here for more details.

Files changed (128) hide show
  1. azure/ai/textanalytics/__init__.py +26 -193
  2. azure/ai/textanalytics/_client.py +111 -0
  3. azure/ai/textanalytics/_configuration.py +73 -0
  4. azure/ai/textanalytics/{_generated/v2022_05_01/operations → _operations}/__init__.py +13 -8
  5. azure/ai/textanalytics/_operations/_operations.py +716 -0
  6. azure/ai/textanalytics/{_generated/v2022_05_01/models → _operations}/_patch.py +8 -6
  7. azure/ai/textanalytics/_patch.py +350 -0
  8. azure/ai/textanalytics/{_generated/aio → _utils}/__init__.py +1 -5
  9. azure/ai/textanalytics/_utils/model_base.py +1237 -0
  10. azure/ai/textanalytics/{_generated/_serialization.py → _utils/serialization.py} +640 -616
  11. azure/ai/textanalytics/{_generated/v2022_05_01/aio/_vendor.py → _utils/utils.py} +10 -12
  12. azure/ai/textanalytics/_version.py +8 -7
  13. azure/ai/textanalytics/aio/__init__.py +25 -14
  14. azure/ai/textanalytics/aio/_client.py +115 -0
  15. azure/ai/textanalytics/aio/_configuration.py +75 -0
  16. azure/ai/textanalytics/{_generated/v2022_10_01_preview/aio/operations → aio/_operations}/__init__.py +13 -8
  17. azure/ai/textanalytics/aio/_operations/_operations.py +623 -0
  18. azure/ai/textanalytics/{_generated/v2022_05_01 → aio/_operations}/_patch.py +8 -6
  19. azure/ai/textanalytics/aio/_patch.py +344 -0
  20. azure/ai/textanalytics/models/__init__.py +402 -0
  21. azure/ai/textanalytics/models/_enums.py +1979 -0
  22. azure/ai/textanalytics/models/_models.py +6641 -0
  23. azure/ai/textanalytics/{_generated/v2022_05_01/aio → models}/_patch.py +8 -6
  24. azure/ai/textanalytics/py.typed +1 -0
  25. {azure_ai_textanalytics-5.3.0b2.dist-info → azure_ai_textanalytics-6.0.0b1.dist-info}/METADATA +668 -403
  26. azure_ai_textanalytics-6.0.0b1.dist-info/RECORD +29 -0
  27. {azure_ai_textanalytics-5.3.0b2.dist-info → azure_ai_textanalytics-6.0.0b1.dist-info}/WHEEL +1 -1
  28. azure/ai/textanalytics/_base_client.py +0 -113
  29. azure/ai/textanalytics/_check.py +0 -22
  30. azure/ai/textanalytics/_dict_mixin.py +0 -57
  31. azure/ai/textanalytics/_generated/__init__.py +0 -16
  32. azure/ai/textanalytics/_generated/_configuration.py +0 -70
  33. azure/ai/textanalytics/_generated/_operations_mixin.py +0 -795
  34. azure/ai/textanalytics/_generated/_text_analytics_client.py +0 -126
  35. azure/ai/textanalytics/_generated/_version.py +0 -8
  36. azure/ai/textanalytics/_generated/aio/_configuration.py +0 -66
  37. azure/ai/textanalytics/_generated/aio/_operations_mixin.py +0 -776
  38. azure/ai/textanalytics/_generated/aio/_text_analytics_client.py +0 -124
  39. azure/ai/textanalytics/_generated/models.py +0 -8
  40. azure/ai/textanalytics/_generated/v2022_05_01/__init__.py +0 -20
  41. azure/ai/textanalytics/_generated/v2022_05_01/_configuration.py +0 -72
  42. azure/ai/textanalytics/_generated/v2022_05_01/_text_analytics_client.py +0 -100
  43. azure/ai/textanalytics/_generated/v2022_05_01/_vendor.py +0 -45
  44. azure/ai/textanalytics/_generated/v2022_05_01/aio/__init__.py +0 -20
  45. azure/ai/textanalytics/_generated/v2022_05_01/aio/_configuration.py +0 -71
  46. azure/ai/textanalytics/_generated/v2022_05_01/aio/_text_analytics_client.py +0 -97
  47. azure/ai/textanalytics/_generated/v2022_05_01/aio/operations/__init__.py +0 -18
  48. azure/ai/textanalytics/_generated/v2022_05_01/aio/operations/_patch.py +0 -121
  49. azure/ai/textanalytics/_generated/v2022_05_01/aio/operations/_text_analytics_client_operations.py +0 -603
  50. azure/ai/textanalytics/_generated/v2022_05_01/models/__init__.py +0 -281
  51. azure/ai/textanalytics/_generated/v2022_05_01/models/_models_py3.py +0 -5722
  52. azure/ai/textanalytics/_generated/v2022_05_01/models/_text_analytics_client_enums.py +0 -439
  53. azure/ai/textanalytics/_generated/v2022_05_01/operations/_patch.py +0 -120
  54. azure/ai/textanalytics/_generated/v2022_05_01/operations/_text_analytics_client_operations.py +0 -744
  55. azure/ai/textanalytics/_generated/v2022_10_01_preview/__init__.py +0 -20
  56. azure/ai/textanalytics/_generated/v2022_10_01_preview/_configuration.py +0 -72
  57. azure/ai/textanalytics/_generated/v2022_10_01_preview/_patch.py +0 -19
  58. azure/ai/textanalytics/_generated/v2022_10_01_preview/_text_analytics_client.py +0 -100
  59. azure/ai/textanalytics/_generated/v2022_10_01_preview/_vendor.py +0 -45
  60. azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/__init__.py +0 -20
  61. azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/_configuration.py +0 -71
  62. azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/_patch.py +0 -19
  63. azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/_text_analytics_client.py +0 -97
  64. azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/_vendor.py +0 -27
  65. azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/operations/_patch.py +0 -121
  66. azure/ai/textanalytics/_generated/v2022_10_01_preview/aio/operations/_text_analytics_client_operations.py +0 -603
  67. azure/ai/textanalytics/_generated/v2022_10_01_preview/models/__init__.py +0 -405
  68. azure/ai/textanalytics/_generated/v2022_10_01_preview/models/_models_py3.py +0 -8420
  69. azure/ai/textanalytics/_generated/v2022_10_01_preview/models/_patch.py +0 -486
  70. azure/ai/textanalytics/_generated/v2022_10_01_preview/models/_text_analytics_client_enums.py +0 -729
  71. azure/ai/textanalytics/_generated/v2022_10_01_preview/operations/__init__.py +0 -18
  72. azure/ai/textanalytics/_generated/v2022_10_01_preview/operations/_patch.py +0 -120
  73. azure/ai/textanalytics/_generated/v2022_10_01_preview/operations/_text_analytics_client_operations.py +0 -744
  74. azure/ai/textanalytics/_generated/v3_0/__init__.py +0 -20
  75. azure/ai/textanalytics/_generated/v3_0/_configuration.py +0 -66
  76. azure/ai/textanalytics/_generated/v3_0/_patch.py +0 -31
  77. azure/ai/textanalytics/_generated/v3_0/_text_analytics_client.py +0 -96
  78. azure/ai/textanalytics/_generated/v3_0/_vendor.py +0 -33
  79. azure/ai/textanalytics/_generated/v3_0/aio/__init__.py +0 -20
  80. azure/ai/textanalytics/_generated/v3_0/aio/_configuration.py +0 -65
  81. azure/ai/textanalytics/_generated/v3_0/aio/_patch.py +0 -31
  82. azure/ai/textanalytics/_generated/v3_0/aio/_text_analytics_client.py +0 -93
  83. azure/ai/textanalytics/_generated/v3_0/aio/_vendor.py +0 -27
  84. azure/ai/textanalytics/_generated/v3_0/aio/operations/__init__.py +0 -18
  85. azure/ai/textanalytics/_generated/v3_0/aio/operations/_patch.py +0 -19
  86. azure/ai/textanalytics/_generated/v3_0/aio/operations/_text_analytics_client_operations.py +0 -428
  87. azure/ai/textanalytics/_generated/v3_0/models/__init__.py +0 -81
  88. azure/ai/textanalytics/_generated/v3_0/models/_models_py3.py +0 -1467
  89. azure/ai/textanalytics/_generated/v3_0/models/_patch.py +0 -19
  90. azure/ai/textanalytics/_generated/v3_0/models/_text_analytics_client_enums.py +0 -58
  91. azure/ai/textanalytics/_generated/v3_0/operations/__init__.py +0 -18
  92. azure/ai/textanalytics/_generated/v3_0/operations/_patch.py +0 -19
  93. azure/ai/textanalytics/_generated/v3_0/operations/_text_analytics_client_operations.py +0 -604
  94. azure/ai/textanalytics/_generated/v3_1/__init__.py +0 -20
  95. azure/ai/textanalytics/_generated/v3_1/_configuration.py +0 -66
  96. azure/ai/textanalytics/_generated/v3_1/_patch.py +0 -31
  97. azure/ai/textanalytics/_generated/v3_1/_text_analytics_client.py +0 -98
  98. azure/ai/textanalytics/_generated/v3_1/_vendor.py +0 -45
  99. azure/ai/textanalytics/_generated/v3_1/aio/__init__.py +0 -20
  100. azure/ai/textanalytics/_generated/v3_1/aio/_configuration.py +0 -65
  101. azure/ai/textanalytics/_generated/v3_1/aio/_patch.py +0 -31
  102. azure/ai/textanalytics/_generated/v3_1/aio/_text_analytics_client.py +0 -95
  103. azure/ai/textanalytics/_generated/v3_1/aio/_vendor.py +0 -27
  104. azure/ai/textanalytics/_generated/v3_1/aio/operations/__init__.py +0 -18
  105. azure/ai/textanalytics/_generated/v3_1/aio/operations/_patch.py +0 -19
  106. azure/ai/textanalytics/_generated/v3_1/aio/operations/_text_analytics_client_operations.py +0 -1291
  107. azure/ai/textanalytics/_generated/v3_1/models/__init__.py +0 -205
  108. azure/ai/textanalytics/_generated/v3_1/models/_models_py3.py +0 -3976
  109. azure/ai/textanalytics/_generated/v3_1/models/_patch.py +0 -19
  110. azure/ai/textanalytics/_generated/v3_1/models/_text_analytics_client_enums.py +0 -367
  111. azure/ai/textanalytics/_generated/v3_1/operations/__init__.py +0 -18
  112. azure/ai/textanalytics/_generated/v3_1/operations/_patch.py +0 -19
  113. azure/ai/textanalytics/_generated/v3_1/operations/_text_analytics_client_operations.py +0 -1709
  114. azure/ai/textanalytics/_lro.py +0 -553
  115. azure/ai/textanalytics/_models.py +0 -3158
  116. azure/ai/textanalytics/_policies.py +0 -66
  117. azure/ai/textanalytics/_request_handlers.py +0 -104
  118. azure/ai/textanalytics/_response_handlers.py +0 -583
  119. azure/ai/textanalytics/_text_analytics_client.py +0 -2081
  120. azure/ai/textanalytics/_user_agent.py +0 -8
  121. azure/ai/textanalytics/_validate.py +0 -113
  122. azure/ai/textanalytics/aio/_base_client_async.py +0 -98
  123. azure/ai/textanalytics/aio/_lro_async.py +0 -503
  124. azure/ai/textanalytics/aio/_response_handlers_async.py +0 -94
  125. azure/ai/textanalytics/aio/_text_analytics_client_async.py +0 -2077
  126. azure_ai_textanalytics-5.3.0b2.dist-info/RECORD +0 -115
  127. {azure_ai_textanalytics-5.3.0b2.dist-info → azure_ai_textanalytics-6.0.0b1.dist-info/licenses}/LICENSE +0 -0
  128. {azure_ai_textanalytics-5.3.0b2.dist-info → azure_ai_textanalytics-6.0.0b1.dist-info}/top_level.txt +0 -0
@@ -1,3158 +0,0 @@
1
- # pylint: disable=too-many-lines
2
- # ------------------------------------
3
- # Copyright (c) Microsoft Corporation.
4
- # Licensed under the MIT License.
5
- # ------------------------------------
6
- # pylint: disable=unused-argument
7
- import re
8
- from enum import Enum
9
- from typing import Optional, List, Dict, Any, Union
10
- from typing_extensions import Literal
11
- from azure.core import CaseInsensitiveEnumMeta
12
- from ._generated.models import (
13
- LanguageInput,
14
- MultiLanguageInput,
15
- AgeResolution,
16
- AreaResolution,
17
- CurrencyResolution,
18
- DateTimeResolution,
19
- InformationResolution,
20
- LengthResolution,
21
- NumberResolution,
22
- NumericRangeResolution,
23
- OrdinalResolution,
24
- SpeedResolution,
25
- TemperatureResolution,
26
- TemporalSpanResolution,
27
- VolumeResolution,
28
- WeightResolution,
29
- HealthcareDocumentType,
30
- )
31
- from ._generated.v3_0 import models as _v3_0_models
32
- from ._generated.v3_1 import models as _v3_1_models
33
- from ._generated.v2022_10_01_preview import models as _v2022_10_01_preview_models
34
- from ._check import is_language_api, string_index_type_compatibility
35
- from ._dict_mixin import DictMixin
36
-
37
- STRING_INDEX_TYPE_DEFAULT = "UnicodeCodePoint"
38
-
39
- def _get_indices(relation):
40
- return [int(s) for s in re.findall(r"\d+", relation)]
41
-
42
-
43
- class TextAnalysisKind(str, Enum, metaclass=CaseInsensitiveEnumMeta):
44
- """Enumeration of supported Text Analysis kinds.
45
-
46
- .. versionadded:: 2022-05-01
47
- The *TextAnalysisKind* enum.
48
- """
49
-
50
- SENTIMENT_ANALYSIS = "SentimentAnalysis"
51
- ENTITY_RECOGNITION = "EntityRecognition"
52
- PII_ENTITY_RECOGNITION = "PiiEntityRecognition"
53
- KEY_PHRASE_EXTRACTION = "KeyPhraseExtraction"
54
- ENTITY_LINKING = "EntityLinking"
55
- HEALTHCARE = "Healthcare"
56
- CUSTOM_ENTITY_RECOGNITION = "CustomEntityRecognition"
57
- CUSTOM_DOCUMENT_CLASSIFICATION = "CustomDocumentClassification"
58
- LANGUAGE_DETECTION = "LanguageDetection"
59
- EXTRACTIVE_SUMMARIZATION = "ExtractiveSummarization"
60
- ABSTRACTIVE_SUMMARIZATION = "AbstractiveSummarization"
61
- DYNAMIC_CLASSIFICATION = "DynamicClassification"
62
-
63
-
64
- class EntityAssociation(str, Enum, metaclass=CaseInsensitiveEnumMeta):
65
- """Describes if the entity is the subject of the text or if it describes someone else."""
66
-
67
- SUBJECT = "subject"
68
- OTHER = "other"
69
-
70
-
71
- class EntityCertainty(str, Enum, metaclass=CaseInsensitiveEnumMeta):
72
- """Describes the entities certainty and polarity."""
73
-
74
- POSITIVE = "positive"
75
- POSITIVE_POSSIBLE = "positivePossible"
76
- NEUTRAL_POSSIBLE = "neutralPossible"
77
- NEGATIVE_POSSIBLE = "negativePossible"
78
- NEGATIVE = "negative"
79
-
80
-
81
- class EntityConditionality(str, Enum, metaclass=CaseInsensitiveEnumMeta):
82
- """Describes any conditionality on the entity."""
83
-
84
- HYPOTHETICAL = "hypothetical"
85
- CONDITIONAL = "conditional"
86
-
87
-
88
- class HealthcareEntityRelation(str, Enum, metaclass=CaseInsensitiveEnumMeta):
89
- """Type of relation. Examples include: 'DosageOfMedication' or 'FrequencyOfMedication', etc."""
90
-
91
- ABBREVIATION = "Abbreviation"
92
- DIRECTION_OF_BODY_STRUCTURE = "DirectionOfBodyStructure"
93
- DIRECTION_OF_CONDITION = "DirectionOfCondition"
94
- DIRECTION_OF_EXAMINATION = "DirectionOfExamination"
95
- DIRECTION_OF_TREATMENT = "DirectionOfTreatment"
96
- DOSAGE_OF_MEDICATION = "DosageOfMedication"
97
- FORM_OF_MEDICATION = "FormOfMedication"
98
- FREQUENCY_OF_MEDICATION = "FrequencyOfMedication"
99
- FREQUENCY_OF_TREATMENT = "FrequencyOfTreatment"
100
- QUALIFIER_OF_CONDITION = "QualifierOfCondition"
101
- RELATION_OF_EXAMINATION = "RelationOfExamination"
102
- ROUTE_OF_MEDICATION = "RouteOfMedication"
103
- TIME_OF_CONDITION = "TimeOfCondition"
104
- TIME_OF_EVENT = "TimeOfEvent"
105
- TIME_OF_EXAMINATION = "TimeOfExamination"
106
- TIME_OF_MEDICATION = "TimeOfMedication"
107
- TIME_OF_TREATMENT = "TimeOfTreatment"
108
- UNIT_OF_CONDITION = "UnitOfCondition"
109
- UNIT_OF_EXAMINATION = "UnitOfExamination"
110
- VALUE_OF_CONDITION = "ValueOfCondition"
111
- VALUE_OF_EXAMINATION = "ValueOfExamination"
112
-
113
-
114
- class PiiEntityCategory(str, Enum, metaclass=CaseInsensitiveEnumMeta):
115
- """Categories of Personally Identifiable Information (PII)."""
116
-
117
- ABA_ROUTING_NUMBER = "ABARoutingNumber"
118
- AR_NATIONAL_IDENTITY_NUMBER = "ARNationalIdentityNumber"
119
- AU_BANK_ACCOUNT_NUMBER = "AUBankAccountNumber"
120
- AU_DRIVERS_LICENSE_NUMBER = "AUDriversLicenseNumber"
121
- AU_MEDICAL_ACCOUNT_NUMBER = "AUMedicalAccountNumber"
122
- AU_PASSPORT_NUMBER = "AUPassportNumber"
123
- AU_TAX_FILE_NUMBER = "AUTaxFileNumber"
124
- AU_BUSINESS_NUMBER = "AUBusinessNumber"
125
- AU_COMPANY_NUMBER = "AUCompanyNumber"
126
- AT_IDENTITY_CARD = "ATIdentityCard"
127
- AT_TAX_IDENTIFICATION_NUMBER = "ATTaxIdentificationNumber"
128
- AT_VALUE_ADDED_TAX_NUMBER = "ATValueAddedTaxNumber"
129
- AZURE_DOCUMENT_DB_AUTH_KEY = "AzureDocumentDBAuthKey"
130
- AZURE_IAAS_DATABASE_CONNECTION_AND_SQL_STRING = (
131
- "AzureIAASDatabaseConnectionAndSQLString"
132
- )
133
- AZURE_IO_T_CONNECTION_STRING = "AzureIoTConnectionString"
134
- AZURE_PUBLISH_SETTING_PASSWORD = "AzurePublishSettingPassword"
135
- AZURE_REDIS_CACHE_STRING = "AzureRedisCacheString"
136
- AZURE_SAS = "AzureSAS"
137
- AZURE_SERVICE_BUS_STRING = "AzureServiceBusString"
138
- AZURE_STORAGE_ACCOUNT_KEY = "AzureStorageAccountKey"
139
- AZURE_STORAGE_ACCOUNT_GENERIC = "AzureStorageAccountGeneric"
140
- BE_NATIONAL_NUMBER = "BENationalNumber"
141
- BE_NATIONAL_NUMBER_V2 = "BENationalNumberV2"
142
- BE_VALUE_ADDED_TAX_NUMBER = "BEValueAddedTaxNumber"
143
- BRCPF_NUMBER = "BRCPFNumber"
144
- BR_LEGAL_ENTITY_NUMBER = "BRLegalEntityNumber"
145
- BR_NATIONAL_IDRG = "BRNationalIDRG"
146
- BG_UNIFORM_CIVIL_NUMBER = "BGUniformCivilNumber"
147
- CA_BANK_ACCOUNT_NUMBER = "CABankAccountNumber"
148
- CA_DRIVERS_LICENSE_NUMBER = "CADriversLicenseNumber"
149
- CA_HEALTH_SERVICE_NUMBER = "CAHealthServiceNumber"
150
- CA_PASSPORT_NUMBER = "CAPassportNumber"
151
- CA_PERSONAL_HEALTH_IDENTIFICATION = "CAPersonalHealthIdentification"
152
- CA_SOCIAL_INSURANCE_NUMBER = "CASocialInsuranceNumber"
153
- CL_IDENTITY_CARD_NUMBER = "CLIdentityCardNumber"
154
- CN_RESIDENT_IDENTITY_CARD_NUMBER = "CNResidentIdentityCardNumber"
155
- CREDIT_CARD_NUMBER = "CreditCardNumber"
156
- HR_IDENTITY_CARD_NUMBER = "HRIdentityCardNumber"
157
- HR_NATIONAL_ID_NUMBER = "HRNationalIDNumber"
158
- HR_PERSONAL_IDENTIFICATION_NUMBER = "HRPersonalIdentificationNumber"
159
- HR_PERSONAL_IDENTIFICATION_OIB_NUMBER_V2 = "HRPersonalIdentificationOIBNumberV2"
160
- CY_IDENTITY_CARD = "CYIdentityCard"
161
- CY_TAX_IDENTIFICATION_NUMBER = "CYTaxIdentificationNumber"
162
- CZ_PERSONAL_IDENTITY_NUMBER = "CZPersonalIdentityNumber"
163
- CZ_PERSONAL_IDENTITY_V2 = "CZPersonalIdentityV2"
164
- DK_PERSONAL_IDENTIFICATION_NUMBER = "DKPersonalIdentificationNumber"
165
- DK_PERSONAL_IDENTIFICATION_V2 = "DKPersonalIdentificationV2"
166
- DRUG_ENFORCEMENT_AGENCY_NUMBER = "DrugEnforcementAgencyNumber"
167
- EE_PERSONAL_IDENTIFICATION_CODE = "EEPersonalIdentificationCode"
168
- EU_DEBIT_CARD_NUMBER = "EUDebitCardNumber"
169
- EU_DRIVERS_LICENSE_NUMBER = "EUDriversLicenseNumber"
170
- EUGPS_COORDINATES = "EUGPSCoordinates"
171
- EU_NATIONAL_IDENTIFICATION_NUMBER = "EUNationalIdentificationNumber"
172
- EU_PASSPORT_NUMBER = "EUPassportNumber"
173
- EU_SOCIAL_SECURITY_NUMBER = "EUSocialSecurityNumber"
174
- EU_TAX_IDENTIFICATION_NUMBER = "EUTaxIdentificationNumber"
175
- FI_EUROPEAN_HEALTH_NUMBER = "FIEuropeanHealthNumber"
176
- FI_NATIONAL_ID = "FINationalID"
177
- FI_NATIONAL_IDV2 = "FINationalIDV2"
178
- FI_PASSPORT_NUMBER = "FIPassportNumber"
179
- FR_DRIVERS_LICENSE_NUMBER = "FRDriversLicenseNumber"
180
- FR_HEALTH_INSURANCE_NUMBER = "FRHealthInsuranceNumber"
181
- FR_NATIONAL_ID = "FRNationalID"
182
- FR_PASSPORT_NUMBER = "FRPassportNumber"
183
- FR_SOCIAL_SECURITY_NUMBER = "FRSocialSecurityNumber"
184
- FR_TAX_IDENTIFICATION_NUMBER = "FRTaxIdentificationNumber"
185
- FR_VALUE_ADDED_TAX_NUMBER = "FRValueAddedTaxNumber"
186
- DE_DRIVERS_LICENSE_NUMBER = "DEDriversLicenseNumber"
187
- DE_PASSPORT_NUMBER = "DEPassportNumber"
188
- DE_IDENTITY_CARD_NUMBER = "DEIdentityCardNumber"
189
- DE_TAX_IDENTIFICATION_NUMBER = "DETaxIdentificationNumber"
190
- DE_VALUE_ADDED_NUMBER = "DEValueAddedNumber"
191
- GR_NATIONAL_ID_CARD = "GRNationalIDCard"
192
- GR_NATIONAL_IDV2 = "GRNationalIDV2"
193
- GR_TAX_IDENTIFICATION_NUMBER = "GRTaxIdentificationNumber"
194
- HK_IDENTITY_CARD_NUMBER = "HKIdentityCardNumber"
195
- HU_VALUE_ADDED_NUMBER = "HUValueAddedNumber"
196
- HU_PERSONAL_IDENTIFICATION_NUMBER = "HUPersonalIdentificationNumber"
197
- HU_TAX_IDENTIFICATION_NUMBER = "HUTaxIdentificationNumber"
198
- IN_PERMANENT_ACCOUNT = "INPermanentAccount"
199
- IN_UNIQUE_IDENTIFICATION_NUMBER = "INUniqueIdentificationNumber"
200
- ID_IDENTITY_CARD_NUMBER = "IDIdentityCardNumber"
201
- INTERNATIONAL_BANKING_ACCOUNT_NUMBER = "InternationalBankingAccountNumber"
202
- IE_PERSONAL_PUBLIC_SERVICE_NUMBER = "IEPersonalPublicServiceNumber"
203
- IE_PERSONAL_PUBLIC_SERVICE_NUMBER_V2 = "IEPersonalPublicServiceNumberV2"
204
- IL_BANK_ACCOUNT_NUMBER = "ILBankAccountNumber"
205
- IL_NATIONAL_ID = "ILNationalID"
206
- IT_DRIVERS_LICENSE_NUMBER = "ITDriversLicenseNumber"
207
- IT_FISCAL_CODE = "ITFiscalCode"
208
- IT_VALUE_ADDED_TAX_NUMBER = "ITValueAddedTaxNumber"
209
- JP_BANK_ACCOUNT_NUMBER = "JPBankAccountNumber"
210
- JP_DRIVERS_LICENSE_NUMBER = "JPDriversLicenseNumber"
211
- JP_PASSPORT_NUMBER = "JPPassportNumber"
212
- JP_RESIDENT_REGISTRATION_NUMBER = "JPResidentRegistrationNumber"
213
- JP_SOCIAL_INSURANCE_NUMBER = "JPSocialInsuranceNumber"
214
- JP_MY_NUMBER_CORPORATE = "JPMyNumberCorporate"
215
- JP_MY_NUMBER_PERSONAL = "JPMyNumberPersonal"
216
- JP_RESIDENCE_CARD_NUMBER = "JPResidenceCardNumber"
217
- LV_PERSONAL_CODE = "LVPersonalCode"
218
- LT_PERSONAL_CODE = "LTPersonalCode"
219
- LU_NATIONAL_IDENTIFICATION_NUMBER_NATURAL = "LUNationalIdentificationNumberNatural"
220
- LU_NATIONAL_IDENTIFICATION_NUMBER_NON_NATURAL = (
221
- "LUNationalIdentificationNumberNonNatural"
222
- )
223
- MY_IDENTITY_CARD_NUMBER = "MYIdentityCardNumber"
224
- MT_IDENTITY_CARD_NUMBER = "MTIdentityCardNumber"
225
- MT_TAX_ID_NUMBER = "MTTaxIDNumber"
226
- NL_CITIZENS_SERVICE_NUMBER = "NLCitizensServiceNumber"
227
- NL_CITIZENS_SERVICE_NUMBER_V2 = "NLCitizensServiceNumberV2"
228
- NL_TAX_IDENTIFICATION_NUMBER = "NLTaxIdentificationNumber"
229
- NL_VALUE_ADDED_TAX_NUMBER = "NLValueAddedTaxNumber"
230
- NZ_BANK_ACCOUNT_NUMBER = "NZBankAccountNumber"
231
- NZ_DRIVERS_LICENSE_NUMBER = "NZDriversLicenseNumber"
232
- NZ_INLAND_REVENUE_NUMBER = "NZInlandRevenueNumber"
233
- NZ_MINISTRY_OF_HEALTH_NUMBER = "NZMinistryOfHealthNumber"
234
- NZ_SOCIAL_WELFARE_NUMBER = "NZSocialWelfareNumber"
235
- NO_IDENTITY_NUMBER = "NOIdentityNumber"
236
- PH_UNIFIED_MULTI_PURPOSE_ID_NUMBER = "PHUnifiedMultiPurposeIDNumber"
237
- PL_IDENTITY_CARD = "PLIdentityCard"
238
- PL_NATIONAL_ID = "PLNationalID"
239
- PL_NATIONAL_IDV2 = "PLNationalIDV2"
240
- PL_PASSPORT_NUMBER = "PLPassportNumber"
241
- PL_TAX_IDENTIFICATION_NUMBER = "PLTaxIdentificationNumber"
242
- PLREGON_NUMBER = "PLREGONNumber"
243
- PT_CITIZEN_CARD_NUMBER = "PTCitizenCardNumber"
244
- PT_CITIZEN_CARD_NUMBER_V2 = "PTCitizenCardNumberV2"
245
- PT_TAX_IDENTIFICATION_NUMBER = "PTTaxIdentificationNumber"
246
- RO_PERSONAL_NUMERICAL_CODE = "ROPersonalNumericalCode"
247
- RU_PASSPORT_NUMBER_DOMESTIC = "RUPassportNumberDomestic"
248
- RU_PASSPORT_NUMBER_INTERNATIONAL = "RUPassportNumberInternational"
249
- SA_NATIONAL_ID = "SANationalID"
250
- SG_NATIONAL_REGISTRATION_IDENTITY_CARD_NUMBER = (
251
- "SGNationalRegistrationIdentityCardNumber"
252
- )
253
- SK_PERSONAL_NUMBER = "SKPersonalNumber"
254
- SI_TAX_IDENTIFICATION_NUMBER = "SITaxIdentificationNumber"
255
- SI_UNIQUE_MASTER_CITIZEN_NUMBER = "SIUniqueMasterCitizenNumber"
256
- ZA_IDENTIFICATION_NUMBER = "ZAIdentificationNumber"
257
- KR_RESIDENT_REGISTRATION_NUMBER = "KRResidentRegistrationNumber"
258
- ESDNI = "ESDNI"
259
- ES_SOCIAL_SECURITY_NUMBER = "ESSocialSecurityNumber"
260
- ES_TAX_IDENTIFICATION_NUMBER = "ESTaxIdentificationNumber"
261
- SQL_SERVER_CONNECTION_STRING = "SQLServerConnectionString"
262
- SE_NATIONAL_ID = "SENationalID"
263
- SE_NATIONAL_IDV2 = "SENationalIDV2"
264
- SE_PASSPORT_NUMBER = "SEPassportNumber"
265
- SE_TAX_IDENTIFICATION_NUMBER = "SETaxIdentificationNumber"
266
- SWIFT_CODE = "SWIFTCode"
267
- CH_SOCIAL_SECURITY_NUMBER = "CHSocialSecurityNumber"
268
- TW_NATIONAL_ID = "TWNationalID"
269
- TW_PASSPORT_NUMBER = "TWPassportNumber"
270
- TW_RESIDENT_CERTIFICATE = "TWResidentCertificate"
271
- TH_POPULATION_IDENTIFICATION_CODE = "THPopulationIdentificationCode"
272
- TR_NATIONAL_IDENTIFICATION_NUMBER = "TRNationalIdentificationNumber"
273
- UK_DRIVERS_LICENSE_NUMBER = "UKDriversLicenseNumber"
274
- UK_ELECTORAL_ROLL_NUMBER = "UKElectoralRollNumber"
275
- UK_NATIONAL_HEALTH_NUMBER = "UKNationalHealthNumber"
276
- UK_NATIONAL_INSURANCE_NUMBER = "UKNationalInsuranceNumber"
277
- UK_UNIQUE_TAXPAYER_NUMBER = "UKUniqueTaxpayerNumber"
278
- USUK_PASSPORT_NUMBER = "USUKPassportNumber"
279
- US_BANK_ACCOUNT_NUMBER = "USBankAccountNumber"
280
- US_DRIVERS_LICENSE_NUMBER = "USDriversLicenseNumber"
281
- US_INDIVIDUAL_TAXPAYER_IDENTIFICATION = "USIndividualTaxpayerIdentification"
282
- US_SOCIAL_SECURITY_NUMBER = "USSocialSecurityNumber"
283
- UA_PASSPORT_NUMBER_DOMESTIC = "UAPassportNumberDomestic"
284
- UA_PASSPORT_NUMBER_INTERNATIONAL = "UAPassportNumberInternational"
285
- ORGANIZATION = "Organization"
286
- EMAIL = "Email"
287
- URL = "URL"
288
- AGE = "Age"
289
- PHONE_NUMBER = "PhoneNumber"
290
- IP_ADDRESS = "IPAddress"
291
- DATE = "Date"
292
- PERSON = "Person"
293
- ADDRESS = "Address"
294
- ALL = "All"
295
- DEFAULT = "Default"
296
-
297
-
298
- class HealthcareEntityCategory(str, Enum, metaclass=CaseInsensitiveEnumMeta):
299
- """Healthcare Entity Category."""
300
-
301
- BODY_STRUCTURE = "BodyStructure"
302
- AGE = "Age"
303
- GENDER = "Gender"
304
- EXAMINATION_NAME = "ExaminationName"
305
- DATE = "Date"
306
- DIRECTION = "Direction"
307
- FREQUENCY = "Frequency"
308
- MEASUREMENT_VALUE = "MeasurementValue"
309
- MEASUREMENT_UNIT = "MeasurementUnit"
310
- RELATIONAL_OPERATOR = "RelationalOperator"
311
- TIME = "Time"
312
- GENE_OR_PROTEIN = "GeneOrProtein"
313
- VARIANT = "Variant"
314
- ADMINISTRATIVE_EVENT = "AdministrativeEvent"
315
- CARE_ENVIRONMENT = "CareEnvironment"
316
- HEALTHCARE_PROFESSION = "HealthcareProfession"
317
- DIAGNOSIS = "Diagnosis"
318
- SYMPTOM_OR_SIGN = "SymptomOrSign"
319
- CONDITION_QUALIFIER = "ConditionQualifier"
320
- MEDICATION_CLASS = "MedicationClass"
321
- MEDICATION_NAME = "MedicationName"
322
- DOSAGE = "Dosage"
323
- MEDICATION_FORM = "MedicationForm"
324
- MEDICATION_ROUTE = "MedicationRoute"
325
- FAMILY_RELATION = "FamilyRelation"
326
- TREATMENT_NAME = "TreatmentName"
327
-
328
-
329
- class PiiEntityDomain(str, Enum, metaclass=CaseInsensitiveEnumMeta):
330
- """The different domains of PII entities that users can filter by"""
331
-
332
- PROTECTED_HEALTH_INFORMATION = (
333
- "phi" # See https://aka.ms/azsdk/language/pii for more information.
334
- )
335
-
336
-
337
- class DetectedLanguage(DictMixin):
338
- """DetectedLanguage contains the predicted language found in text,
339
- its confidence score, and its ISO 639-1 representation.
340
-
341
- .. versionadded:: 2022-10-01-preview
342
- The *script* property.
343
- """
344
-
345
- name: str
346
- """Long name of a detected language (e.g. English,
347
- French)."""
348
- iso6391_name: str
349
- """A two letter representation of the detected
350
- language according to the ISO 639-1 standard (e.g. en, fr)."""
351
- confidence_score: float
352
- """A confidence score between 0 and 1. Scores close
353
- to 1 indicate 100% certainty that the identified language is true."""
354
- script: Optional[str] = None
355
- """Identifies the script of the input document. Possible value is 'Latin'."""
356
-
357
- def __init__(self, **kwargs: Any) -> None:
358
- self.name = kwargs.get("name", None)
359
- self.iso6391_name = kwargs.get("iso6391_name", None)
360
- self.confidence_score = kwargs.get("confidence_score", None)
361
- self.script = kwargs.get("script", None)
362
-
363
- @classmethod
364
- def _from_generated(cls, language):
365
- script = language.script if hasattr(language, "script") else None
366
- return cls(
367
- name=language.name,
368
- iso6391_name=language.iso6391_name,
369
- confidence_score=language.confidence_score,
370
- script=script
371
- )
372
-
373
- def __repr__(self) -> str:
374
- return (
375
- f"DetectedLanguage(name={self.name}, iso6391_name={self.iso6391_name}, "
376
- f"confidence_score={self.confidence_score}, script={self.script})"[:1024]
377
- )
378
-
379
-
380
- class RecognizeEntitiesResult(DictMixin):
381
- """RecognizeEntitiesResult is a result object which contains
382
- the recognized entities from a particular document.
383
-
384
- .. versionadded:: 2022-10-01-preview
385
- The *detected_language* property.
386
- """
387
-
388
- id: str # pylint: disable=redefined-builtin
389
- """Unique, non-empty document identifier that matches the
390
- document id that was passed in with the request. If not specified
391
- in the request, an id is assigned for the document."""
392
- entities: List["CategorizedEntity"]
393
- """Recognized entities in the document."""
394
- warnings: List["TextAnalyticsWarning"]
395
- """Warnings encountered while processing document. Results will still be returned
396
- if there are warnings, but they may not be fully accurate."""
397
- statistics: Optional["TextDocumentStatistics"] = None
398
- """If `show_stats=True` was specified in the request this
399
- field will contain information about the document payload."""
400
- detected_language: Optional[DetectedLanguage] = None
401
- """If 'language' is set to 'auto' for the document in the request this
402
- field will contain the DetectedLanguage for the document."""
403
- is_error: Literal[False] = False
404
- """Boolean check for error item when iterating over list of
405
- results. Always False for an instance of a RecognizeEntitiesResult."""
406
- kind: Literal["EntityRecognition"] = "EntityRecognition"
407
- """The text analysis kind - "EntityRecognition"."""
408
-
409
- def __init__(self, **kwargs: Any) -> None:
410
- self.id = kwargs.get("id", None)
411
- self.entities = kwargs.get("entities", None)
412
- self.warnings = kwargs.get("warnings", [])
413
- self.statistics = kwargs.get("statistics", None)
414
- self.detected_language = kwargs.get("detected_language", None)
415
- self.is_error: Literal[False] = False
416
- self.kind: Literal["EntityRecognition"] = "EntityRecognition"
417
-
418
- def __repr__(self) -> str:
419
- return (
420
- f"RecognizeEntitiesResult(id={self.id}, entities={repr(self.entities)}, "
421
- f"warnings={repr(self.warnings)}, statistics={repr(self.statistics)}, "
422
- f"detected_language={repr(self.detected_language)}, is_error={self.is_error}, "
423
- f"kind={self.kind})"[:1024]
424
- )
425
-
426
-
427
- class RecognizePiiEntitiesResult(DictMixin):
428
- """RecognizePiiEntitiesResult is a result object which contains
429
- the recognized Personally Identifiable Information (PII) entities
430
- from a particular document.
431
-
432
- .. versionadded:: 2022-10-01-preview
433
- The *detected_language* property.
434
- """
435
-
436
- id: str # pylint: disable=redefined-builtin
437
- """Unique, non-empty document identifier that matches the
438
- document id that was passed in with the request. If not specified
439
- in the request, an id is assigned for the document."""
440
- entities: List["PiiEntity"]
441
- """Recognized PII entities in the document."""
442
- redacted_text: str
443
- """Returns the text of the input document with all of the PII information
444
- redacted out."""
445
- warnings: List["TextAnalyticsWarning"]
446
- """Warnings encountered while processing document. Results will still be returned
447
- if there are warnings, but they may not be fully accurate."""
448
- statistics: Optional["TextDocumentStatistics"] = None
449
- """If `show_stats=True` was specified in the request this
450
- field will contain information about the document payload."""
451
- detected_language: Optional[DetectedLanguage] = None
452
- """If 'language' is set to 'auto' for the document in the request this
453
- field will contain the DetectedLanguage for the document."""
454
- is_error: Literal[False] = False
455
- """Boolean check for error item when iterating over list of
456
- results. Always False for an instance of a RecognizePiiEntitiesResult."""
457
- kind: Literal["PiiEntityRecognition"] = "PiiEntityRecognition"
458
- """The text analysis kind - "PiiEntityRecognition"."""
459
-
460
- def __init__(self, **kwargs: Any) -> None:
461
- self.id = kwargs.get("id", None)
462
- self.entities = kwargs.get("entities", None)
463
- self.redacted_text = kwargs.get("redacted_text", None)
464
- self.warnings = kwargs.get("warnings", [])
465
- self.statistics = kwargs.get("statistics", None)
466
- self.detected_language = kwargs.get('detected_language', None)
467
- self.is_error: Literal[False] = False
468
- self.kind: Literal["PiiEntityRecognition"] = "PiiEntityRecognition"
469
-
470
- def __repr__(self) -> str:
471
- return (
472
- f"RecognizePiiEntitiesResult(id={self.id}, entities={repr(self.entities)}, "
473
- f"redacted_text={self.redacted_text}, warnings={repr(self.warnings)}, "
474
- f"statistics={repr(self.statistics)}, detected_language={repr(self.detected_language)}, "
475
- f"is_error={self.is_error}, kind={self.kind})"[:1024]
476
- )
477
-
478
-
479
- class AnalyzeHealthcareEntitiesResult(DictMixin):
480
- """
481
- AnalyzeHealthcareEntitiesResult contains the Healthcare entities from a
482
- particular document.
483
-
484
- .. versionadded:: 2022-10-01-preview
485
- The *fhir_bundle* and *detected_language* properties.
486
- """
487
-
488
- id: str # pylint: disable=redefined-builtin
489
- """Unique, non-empty document identifier that matches the
490
- document id that was passed in with the request. If not specified
491
- in the request, an id is assigned for the document."""
492
- entities: List["HealthcareEntity"]
493
- """Identified Healthcare entities in the document, i.e. in
494
- the document "The subject took ibuprofen", "ibuprofen" is an identified entity
495
- from the document."""
496
- entity_relations: List["HealthcareRelation"]
497
- """Identified Healthcare relations between entities. For example, in the
498
- document "The subject took 100mg of ibuprofen", we would identify the relationship
499
- between the dosage of 100mg and the medication ibuprofen."""
500
- warnings: List["TextAnalyticsWarning"]
501
- """Warnings encountered while processing document. Results will still be returned
502
- if there are warnings, but they may not be fully accurate."""
503
- statistics: Optional["TextDocumentStatistics"] = None
504
- """If show_stats=true was specified in the request this
505
- field will contain information about the document payload."""
506
- fhir_bundle: Optional[Dict[str, Any]] = None
507
- """If `fhir_version` is passed, this will contain a
508
- FHIR compatible object for consumption in other Healthcare tools. For additional
509
- information see https://www.hl7.org/fhir/overview.html."""
510
- detected_language: Optional[str] = None
511
- """If 'language' is set to 'auto' for the document in the request this
512
- field will contain the detected language for the document."""
513
- is_error: Literal[False] = False
514
- """Boolean check for error item when iterating over list of
515
- results. Always False for an instance of a AnalyzeHealthcareEntitiesResult."""
516
- kind: Literal["Healthcare"] = "Healthcare"
517
- """The text analysis kind - "Healthcare"."""
518
-
519
- def __init__(self, **kwargs: Any) -> None:
520
- self.id = kwargs.get("id", None)
521
- self.entities = kwargs.get("entities", None)
522
- self.entity_relations = kwargs.get("entity_relations", None)
523
- self.warnings = kwargs.get("warnings", [])
524
- self.statistics = kwargs.get("statistics", None)
525
- self.fhir_bundle = kwargs.get("fhir_bundle", None)
526
- self.detected_language = kwargs.get('detected_language', None)
527
- self.is_error: Literal[False] = False
528
- self.kind: Literal["Healthcare"] = "Healthcare"
529
-
530
- @classmethod
531
- def _from_generated(cls, healthcare_result):
532
- entities = [
533
- HealthcareEntity._from_generated(e) # pylint: disable=protected-access
534
- for e in healthcare_result.entities
535
- ]
536
- relations = [
537
- HealthcareRelation._from_generated( # pylint: disable=protected-access
538
- r, entities
539
- )
540
- for r in healthcare_result.relations
541
- ]
542
- fhir_bundle = healthcare_result.fhir_bundle if hasattr(healthcare_result, "fhir_bundle") else None
543
- detected_language = healthcare_result.detected_language \
544
- if hasattr(healthcare_result, "detected_language") else None
545
- return cls(
546
- id=healthcare_result.id,
547
- entities=entities,
548
- entity_relations=relations,
549
- warnings=[
550
- TextAnalyticsWarning._from_generated( # pylint: disable=protected-access
551
- w
552
- )
553
- for w in healthcare_result.warnings
554
- ],
555
- statistics=TextDocumentStatistics._from_generated( # pylint: disable=protected-access
556
- healthcare_result.statistics
557
- ),
558
- fhir_bundle=fhir_bundle,
559
- detected_language=detected_language # https://github.com/Azure/azure-sdk-for-python/issues/27171
560
- # detected_language=DetectedLanguage._from_generated( # pylint: disable=protected-access
561
- # healthcare_result.detected_language
562
- # ) if hasattr(healthcare_result, "detected_language") and healthcare_result.detected_language else None
563
- )
564
-
565
- def __repr__(self) -> str:
566
- return (
567
- f"AnalyzeHealthcareEntitiesResult(id={self.id}, entities={repr(self.entities)}, "
568
- f"entity_relations={repr(self.entity_relations)}, warnings={repr(self.warnings)}, "
569
- f"statistics={repr(self.statistics)}, fhir_bundle={self.fhir_bundle}, "
570
- f"detected_language={self.detected_language}, is_error={self.is_error}, kind={self.kind})"[:1024]
571
- )
572
-
573
-
574
- class HealthcareRelation(DictMixin):
575
- """HealthcareRelation is a result object which represents a relation detected in a document.
576
-
577
- Every HealthcareRelation is an entity graph of a certain relation type,
578
- where all entities are connected and have specific roles within the relation context.
579
-
580
- .. versionadded:: 2022-10-01-preview
581
- The *confidence_score* property.
582
- """
583
-
584
- relation_type: str
585
- """The type of relation, i.e. the relationship between "100mg" and
586
- "ibuprofen" in the document "The subject took 100 mg of ibuprofen" is "DosageOfMedication".
587
- Possible values found in :class:`~azure.ai.textanalytics.HealthcareEntityRelation`"""
588
- roles: List["HealthcareRelationRole"]
589
- """The roles present in this relation. I.e., in the document
590
- "The subject took 100 mg of ibuprofen", the present roles are "Dosage" and "Medication"."""
591
- confidence_score: Optional[float] = None
592
- """Confidence score between 0 and 1 of the extracted relation."""
593
-
594
- def __init__(self, **kwargs: Any) -> None:
595
- self.relation_type = kwargs.get("relation_type", None)
596
- self.roles = kwargs.get("roles", None)
597
- self.confidence_score = kwargs.get("confidence_score", None)
598
-
599
- @classmethod
600
- def _from_generated(cls, healthcare_relation_result, entities):
601
- roles = [
602
- HealthcareRelationRole._from_generated( # pylint: disable=protected-access
603
- r, entities
604
- )
605
- for r in healthcare_relation_result.entities
606
- ]
607
- confidence_score = healthcare_relation_result.confidence_score \
608
- if hasattr(healthcare_relation_result, "confidence_score") else None
609
- return cls(
610
- relation_type=healthcare_relation_result.relation_type,
611
- roles=roles,
612
- confidence_score=confidence_score,
613
- )
614
-
615
- def __repr__(self) -> str:
616
- return f"HealthcareRelation(relation_type={self.relation_type}, roles={repr(self.roles)}, " \
617
- f"confidence_score={self.confidence_score})"[:1024]
618
-
619
-
620
- class HealthcareRelationRole(DictMixin):
621
- """A model representing a role in a relation.
622
-
623
- For example, in "The subject took 100 mg of ibuprofen",
624
- "100 mg" is a dosage entity fulfilling the role "Dosage"
625
- in the extracted relation "DosageOfMedication".
626
- """
627
-
628
- name: str
629
- """The role of the entity in the relationship. I.e., in the relation
630
- "The subject took 100 mg of ibuprofen", the dosage entity "100 mg" has role
631
- "Dosage"."""
632
- entity: "HealthcareEntity"
633
- """The entity that is present in the relationship. For example, in
634
- "The subject took 100 mg of ibuprofen", this property holds the dosage entity
635
- of "100 mg"."""
636
-
637
- def __init__(self, **kwargs: Any) -> None:
638
- self.name = kwargs.get("name", None)
639
- self.entity = kwargs.get("entity", None)
640
-
641
- @staticmethod
642
- def _get_entity(healthcare_role_result, entities):
643
- numbers = _get_indices(healthcare_role_result.ref)
644
- entity_index = numbers[
645
- 1
646
- ] # first number parsed from index is document #, second is entity index
647
- return entities[entity_index]
648
-
649
- @classmethod
650
- def _from_generated(cls, healthcare_role_result, entities):
651
- return cls(
652
- name=healthcare_role_result.role,
653
- entity=HealthcareRelationRole._get_entity(healthcare_role_result, entities),
654
- )
655
-
656
- def __repr__(self) -> str:
657
- return f"HealthcareRelationRole(name={self.name}, entity={repr(self.entity)})"[:1024]
658
-
659
-
660
- class DetectLanguageResult(DictMixin):
661
- """DetectLanguageResult is a result object which contains
662
- the detected language of a particular document.
663
- """
664
-
665
- id: str # pylint: disable=redefined-builtin
666
- """Unique, non-empty document identifier that matches the
667
- document id that was passed in with the request. If not specified
668
- in the request, an id is assigned for the document."""
669
- primary_language: DetectedLanguage
670
- """The primary language detected in the document."""
671
- warnings: List["TextAnalyticsWarning"]
672
- """Warnings encountered while processing document. Results will still be returned
673
- if there are warnings, but they may not be fully accurate."""
674
- statistics: Optional["TextDocumentStatistics"] = None
675
- """If `show_stats=True` was specified in the request this
676
- field will contain information about the document payload."""
677
- is_error: Literal[False] = False
678
- """Boolean check for error item when iterating over list of
679
- results. Always False for an instance of a DetectLanguageResult."""
680
- kind: Literal["LanguageDetection"] = "LanguageDetection"
681
- """The text analysis kind - "LanguageDetection"."""
682
-
683
- def __init__(self, **kwargs: Any) -> None:
684
- self.id = kwargs.get("id", None)
685
- self.primary_language = kwargs.get("primary_language", None)
686
- self.warnings = kwargs.get("warnings", [])
687
- self.statistics = kwargs.get("statistics", None)
688
- self.is_error: Literal[False] = False
689
- self.kind: Literal["LanguageDetection"] = "LanguageDetection"
690
-
691
- def __repr__(self) -> str:
692
- return (
693
- f"DetectLanguageResult(id={self.id}, primary_language={repr(self.primary_language)}, "
694
- f"warnings={repr(self.warnings)}, statistics={repr(self.statistics)}, is_error={self.is_error}, "
695
- f"kind={self.kind})"[:1024]
696
- )
697
-
698
-
699
- class CategorizedEntity(DictMixin):
700
- """CategorizedEntity contains information about a particular
701
- entity found in text.
702
-
703
- .. versionadded:: v3.1
704
- The *offset* and *length* properties.
705
- .. versionadded:: 2022-10-01-preview
706
- The *resolutions* property.
707
- """
708
-
709
- text: str
710
- """Entity text as appears in the request."""
711
- category: str
712
- """Entity category, such as Person/Location/Org/SSN etc"""
713
- length: int
714
- """The entity text length. This value depends on the value of the
715
- `string_index_type` parameter set in the original request, which is UnicodeCodePoints
716
- by default."""
717
- offset: int
718
- """The entity text offset from the start of the document.
719
- The value depends on the value of the `string_index_type` parameter
720
- set in the original request, which is UnicodeCodePoints by default."""
721
- confidence_score: float
722
- """Confidence score between 0 and 1 of the extracted
723
- entity."""
724
- subcategory: Optional[str] = None
725
- """Entity subcategory, such as Age/Year/TimeRange etc"""
726
- resolutions: List[
727
- Union[
728
- AgeResolution,
729
- AreaResolution,
730
- CurrencyResolution,
731
- DateTimeResolution,
732
- InformationResolution,
733
- LengthResolution,
734
- NumberResolution,
735
- NumericRangeResolution,
736
- OrdinalResolution,
737
- SpeedResolution,
738
- TemperatureResolution,
739
- TemporalSpanResolution,
740
- VolumeResolution,
741
- WeightResolution,
742
- ]
743
- ]
744
- """The collection of entity resolution objects. More information can be found here:
745
- https://aka.ms/azsdk/language/ner-resolutions"""
746
-
747
- def __init__(self, **kwargs: Any) -> None:
748
- self.text = kwargs.get("text", None)
749
- self.category = kwargs.get("category", None)
750
- self.subcategory = kwargs.get("subcategory", None)
751
- self.length = kwargs.get("length", None)
752
- self.offset = kwargs.get("offset", None)
753
- self.confidence_score = kwargs.get("confidence_score", None)
754
- self.resolutions = kwargs.get("resolutions", None)
755
-
756
- @classmethod
757
- def _from_generated(cls, entity):
758
- offset = entity.offset
759
- length = entity.length
760
- if isinstance(entity, _v3_0_models.Entity):
761
- # we do not return offset for v3.0 since
762
- # the correct encoding was not introduced for v3.0
763
- offset = None
764
- length = None
765
- entity_resolutions = entity.resolutions if hasattr(entity, "resolutions") else None
766
- return cls(
767
- text=entity.text,
768
- category=entity.category,
769
- subcategory=entity.subcategory,
770
- length=length,
771
- offset=offset,
772
- confidence_score=entity.confidence_score,
773
- resolutions=entity_resolutions or []
774
- )
775
-
776
- def __repr__(self) -> str:
777
- return (
778
- f"CategorizedEntity(text={self.text}, category={self.category}, subcategory={self.subcategory}, "
779
- f"length={self.length}, offset={self.offset}, confidence_score={self.confidence_score}, "
780
- f"resolutions={repr(self.resolutions)})"[:1024]
781
- )
782
-
783
-
784
- class PiiEntity(DictMixin):
785
- """PiiEntity contains information about a Personally Identifiable
786
- Information (PII) entity found in text.
787
- """
788
-
789
- text: str
790
- """Entity text as appears in the request."""
791
- category: str
792
- """Entity category, such as Financial Account
793
- Identification/Social Security Number/Phone Number, etc."""
794
- length: int
795
- """The PII entity text length. This value depends on the value
796
- of the `string_index_type` parameter specified in the original request, which
797
- is UnicodeCodePoints by default."""
798
- offset: int
799
- """The PII entity text offset from the start of the document.
800
- This value depends on the value of the `string_index_type` parameter specified
801
- in the original request, which is UnicodeCodePoints by default."""
802
- confidence_score: float
803
- """Confidence score between 0 and 1 of the extracted entity."""
804
- subcategory: Optional[str] = None
805
- """Entity subcategory, such as Credit Card/EU
806
- Phone number/ABA Routing Numbers, etc."""
807
-
808
- def __init__(self, **kwargs: Any) -> None:
809
- self.text = kwargs.get("text", None)
810
- self.category = kwargs.get("category", None)
811
- self.subcategory = kwargs.get("subcategory", None)
812
- self.length = kwargs.get("length", None)
813
- self.offset = kwargs.get("offset", None)
814
- self.confidence_score = kwargs.get("confidence_score", None)
815
-
816
- @classmethod
817
- def _from_generated(cls, entity):
818
- return cls(
819
- text=entity.text,
820
- category=entity.category,
821
- subcategory=entity.subcategory,
822
- length=entity.length,
823
- offset=entity.offset,
824
- confidence_score=entity.confidence_score,
825
- )
826
-
827
- def __repr__(self) -> str:
828
- return (
829
- f"PiiEntity(text={self.text}, category={self.category}, subcategory={self.subcategory}, "
830
- f"length={self.length}, offset={self.offset}, confidence_score={self.confidence_score})"[:1024]
831
- )
832
-
833
-
834
- class HealthcareEntity(DictMixin):
835
- """HealthcareEntity contains information about a Healthcare entity found in text.
836
- """
837
-
838
- text: str
839
- """Entity text as appears in the document."""
840
- category: str
841
- """Entity category, see the :class:`~azure.ai.textanalytics.HealthcareEntityCategory`
842
- type for possible healthcare entity categories."""
843
- length: int
844
- """The entity text length. This value depends on the value
845
- of the `string_index_type` parameter specified in the original request, which is
846
- UnicodeCodePoints by default."""
847
- offset: int
848
- """The entity text offset from the start of the document.
849
- This value depends on the value of the `string_index_type` parameter specified
850
- in the original request, which is UnicodeCodePoints by default."""
851
- confidence_score: float
852
- """Confidence score between 0 and 1 of the extracted entity."""
853
- subcategory: Optional[str] = None
854
- """Entity subcategory."""
855
- assertion: Optional["HealthcareEntityAssertion"] = None
856
- """Contains various assertions about this entity. For example, if
857
- an entity is a diagnosis, is this diagnosis 'conditional' on a symptom?
858
- Are the doctors 'certain' about this diagnosis? Is this diagnosis 'associated'
859
- with another diagnosis?"""
860
- normalized_text: Optional[str] = None
861
- """Normalized version of the raw `text` we extract
862
- from the document. Not all `text` will have a normalized version."""
863
- data_sources: Optional[List["HealthcareEntityDataSource"]]
864
- """A collection of entity references in known data sources."""
865
-
866
- def __init__(self, **kwargs: Any) -> None:
867
- self.text = kwargs.get("text", None)
868
- self.normalized_text = kwargs.get("normalized_text", None)
869
- self.category = kwargs.get("category", None)
870
- self.subcategory = kwargs.get("subcategory", None)
871
- self.assertion = kwargs.get("assertion", None)
872
- self.length = kwargs.get("length", None)
873
- self.offset = kwargs.get("offset", None)
874
- self.confidence_score = kwargs.get("confidence_score", None)
875
- self.data_sources = kwargs.get("data_sources", [])
876
-
877
- @classmethod
878
- def _from_generated(cls, healthcare_entity):
879
- assertion = None
880
- try:
881
- if healthcare_entity.assertion:
882
- assertion = HealthcareEntityAssertion._from_generated( # pylint: disable=protected-access
883
- healthcare_entity.assertion
884
- )
885
- except AttributeError:
886
- assertion = None
887
-
888
- return cls(
889
- text=healthcare_entity.text,
890
- normalized_text=healthcare_entity.name,
891
- category=healthcare_entity.category,
892
- subcategory=healthcare_entity.subcategory,
893
- assertion=assertion,
894
- length=healthcare_entity.length,
895
- offset=healthcare_entity.offset,
896
- confidence_score=healthcare_entity.confidence_score,
897
- data_sources=[
898
- HealthcareEntityDataSource(entity_id=l.id, name=l.data_source)
899
- for l in healthcare_entity.links
900
- ]
901
- if healthcare_entity.links
902
- else None,
903
- )
904
-
905
- def __hash__(self) -> int:
906
- return hash(repr(self))
907
-
908
- def __repr__(self) -> str:
909
- return (
910
- f"HealthcareEntity(text={self.text}, normalized_text={self.normalized_text}, "
911
- f"category={self.category}, subcategory={self.subcategory}, assertion={repr(self.assertion)}, "
912
- f"length={self.length}, offset={self.offset}, confidence_score={self.confidence_score}, "
913
- f"data_sources={repr(self.data_sources)})"[:1024]
914
- )
915
-
916
-
917
- class HealthcareEntityAssertion(DictMixin):
918
- """Contains various assertions about a `HealthcareEntity`.
919
-
920
- For example, if an entity is a diagnosis, is this diagnosis 'conditional' on a symptom?
921
- Are the doctors 'certain' about this diagnosis? Is this diagnosis 'associated'
922
- with another diagnosis?
923
- """
924
-
925
- conditionality: Optional[str] = None
926
- """Describes whether the healthcare entity it's on is conditional
927
- on another entity. For example, "If the patient has a fever, he has pneumonia", the diagnosis of pneumonia
928
- is 'conditional' on whether the patient has a fever. Possible values are "hypothetical" and
929
- "conditional"."""
930
- certainty: Optional[str] = None
931
- """Describes how certain the healthcare entity it's on is. For example,
932
- in "The patient may have a fever", the fever entity is not 100% certain, but is instead
933
- "positivePossible". Possible values are "positive", "positivePossible", "neutralPossible",
934
- "negativePossible", and "negative"."""
935
- association: Optional[str] = None
936
- """Describes whether the healthcare entity it's on is the subject of the document, or
937
- if this entity describes someone else in the document. For example, in "The subject's mother has
938
- a fever", the "fever" entity is not associated with the subject themselves, but with the subject's
939
- mother. Possible values are "subject" and "other"."""
940
-
941
- def __init__(self, **kwargs: Any) -> None:
942
- self.conditionality = kwargs.get("conditionality", None)
943
- self.certainty = kwargs.get("certainty", None)
944
- self.association = kwargs.get("association", None)
945
-
946
- @classmethod
947
- def _from_generated(cls, healthcare_assertion):
948
- return cls(
949
- conditionality=healthcare_assertion.conditionality,
950
- certainty=healthcare_assertion.certainty,
951
- association=healthcare_assertion.association,
952
- )
953
-
954
- def __repr__(self) -> str:
955
- return f"HealthcareEntityAssertion(conditionality={self.conditionality}, certainty={self.certainty}, " \
956
- f"association={self.association})"[:1024]
957
-
958
-
959
- class HealthcareEntityDataSource(DictMixin):
960
- """
961
- HealthcareEntityDataSource contains information representing an entity reference in a known data source.
962
- """
963
-
964
- entity_id: str
965
- """ID of the entity in the given source catalog."""
966
- name: str
967
- """The name of the entity catalog from where the entity was identified, such as UMLS, CHV, MSH, etc."""
968
-
969
- def __init__(self, **kwargs: Any) -> None:
970
- self.entity_id = kwargs.get("entity_id", None)
971
- self.name = kwargs.get("name", None)
972
-
973
- def __repr__(self) -> str:
974
- return (
975
- f"HealthcareEntityDataSource(entity_id={self.entity_id}, name={self.name})"[:1024]
976
- )
977
-
978
-
979
- class TextAnalyticsError(DictMixin):
980
- """TextAnalyticsError contains the error code, message, and
981
- other details that explain why the batch or individual document
982
- failed to be processed by the service.
983
- """
984
-
985
- code: str
986
- """Error code. Possible values include
987
- 'invalidRequest', 'invalidArgument', 'internalServerError',
988
- 'serviceUnavailable', 'invalidParameterValue', 'invalidRequestBodyFormat',
989
- 'emptyRequest', 'missingInputRecords', 'invalidDocument', 'modelVersionIncorrect',
990
- 'invalidDocumentBatch', 'unsupportedLanguageCode', 'invalidCountryHint'"""
991
- message: str
992
- """Error message."""
993
- target: Optional[str] = None
994
- """Error target."""
995
-
996
- def __init__(self, **kwargs: Any) -> None:
997
- self.code = kwargs.get("code", None)
998
- self.message = kwargs.get("message", None)
999
- self.target = kwargs.get("target", None)
1000
-
1001
- @classmethod
1002
- def _from_generated(cls, err):
1003
- if err.innererror:
1004
- return cls(
1005
- code=err.innererror.code,
1006
- message=err.innererror.message,
1007
- target=err.innererror.target,
1008
- )
1009
- return cls(code=err.code, message=err.message, target=err.target)
1010
-
1011
- def __repr__(self) -> str:
1012
- return f"TextAnalyticsError(code={self.code}, message={self.message}, target={self.target})"[:1024]
1013
-
1014
-
1015
- class TextAnalyticsWarning(DictMixin):
1016
- """TextAnalyticsWarning contains the warning code and message that explains why
1017
- the response has a warning.
1018
- """
1019
-
1020
- code: str
1021
- """Warning code. Possible values include 'LongWordsInDocument',
1022
- 'DocumentTruncated'."""
1023
- message: str
1024
- """Warning message."""
1025
-
1026
- def __init__(self, **kwargs: Any) -> None:
1027
- self.code = kwargs.get("code", None)
1028
- self.message = kwargs.get("message", None)
1029
-
1030
- @classmethod
1031
- def _from_generated(cls, warning):
1032
- return cls(
1033
- code=warning.code,
1034
- message=warning.message,
1035
- )
1036
-
1037
- def __repr__(self) -> str:
1038
- return f"TextAnalyticsWarning(code={self.code}, message={self.message})"[:1024]
1039
-
1040
-
1041
- class ExtractKeyPhrasesResult(DictMixin):
1042
- """ExtractKeyPhrasesResult is a result object which contains
1043
- the key phrases found in a particular document.
1044
-
1045
- .. versionadded:: 2022-10-01-preview
1046
- The *detected_language* property.
1047
- """
1048
-
1049
- id: str # pylint: disable=redefined-builtin
1050
- """Unique, non-empty document identifier that matches the
1051
- document id that was passed in with the request. If not specified
1052
- in the request, an id is assigned for the document."""
1053
- key_phrases: List[str]
1054
- """A list of representative words or phrases.
1055
- The number of key phrases returned is proportional to the number of words
1056
- in the input document."""
1057
- warnings: List[TextAnalyticsWarning]
1058
- """Warnings encountered while processing document. Results will still be returned
1059
- if there are warnings, but they may not be fully accurate."""
1060
- statistics: Optional["TextDocumentStatistics"] = None
1061
- """If `show_stats=True` was specified in the request this
1062
- field will contain information about the document payload."""
1063
- detected_language: Optional[DetectedLanguage] = None
1064
- """If 'language' is set to 'auto' for the document in the request this
1065
- field will contain the DetectedLanguage for the document."""
1066
- is_error: Literal[False] = False
1067
- """Boolean check for error item when iterating over list of
1068
- results. Always False for an instance of a ExtractKeyPhrasesResult."""
1069
- kind: Literal["KeyPhraseExtraction"] = "KeyPhraseExtraction"
1070
- """The text analysis kind - "KeyPhraseExtraction"."""
1071
-
1072
- def __init__(self, **kwargs: Any) -> None:
1073
- self.id = kwargs.get("id", None)
1074
- self.key_phrases = kwargs.get("key_phrases", None)
1075
- self.warnings = kwargs.get("warnings", [])
1076
- self.statistics = kwargs.get("statistics", None)
1077
- self.detected_language = kwargs.get('detected_language', None)
1078
- self.is_error: Literal[False] = False
1079
- self.kind: Literal["KeyPhraseExtraction"] = "KeyPhraseExtraction"
1080
-
1081
- def __repr__(self) -> str:
1082
- return (
1083
- f"ExtractKeyPhrasesResult(id={self.id}, key_phrases={self.key_phrases}, "
1084
- f"warnings={repr(self.warnings)}, statistics={repr(self.statistics)}, "
1085
- f"detected_language={repr(self.detected_language)}, is_error={self.is_error}, kind={self.kind})"[:1024]
1086
- )
1087
-
1088
-
1089
- class RecognizeLinkedEntitiesResult(DictMixin):
1090
- """RecognizeLinkedEntitiesResult is a result object which contains
1091
- links to a well-known knowledge base, like for example, Wikipedia or Bing.
1092
-
1093
- .. versionadded:: 2022-10-01-preview
1094
- The *detected_language* property.
1095
- """
1096
-
1097
- id: str # pylint: disable=redefined-builtin
1098
- """Unique, non-empty document identifier that matches the
1099
- document id that was passed in with the request. If not specified
1100
- in the request, an id is assigned for the document."""
1101
- entities: List["LinkedEntity"]
1102
- """Recognized well-known entities in the document."""
1103
- warnings: List[TextAnalyticsWarning]
1104
- """Warnings encountered while processing document. Results will still be returned
1105
- if there are warnings, but they may not be fully accurate."""
1106
- statistics: Optional["TextDocumentStatistics"] = None
1107
- """If `show_stats=True` was specified in the request this
1108
- field will contain information about the document payload."""
1109
- detected_language: Optional[DetectedLanguage] = None
1110
- """If 'language' is set to 'auto' for the document in the request this
1111
- field will contain the DetectedLanguage for the document."""
1112
- is_error: Literal[False] = False
1113
- """Boolean check for error item when iterating over list of
1114
- results. Always False for an instance of a RecognizeLinkedEntitiesResult."""
1115
- kind: Literal["EntityLinking"] = "EntityLinking"
1116
- """The text analysis kind - "EntityLinking"."""
1117
-
1118
- def __init__(self, **kwargs: Any) -> None:
1119
- self.id = kwargs.get("id", None)
1120
- self.entities = kwargs.get("entities", None)
1121
- self.warnings = kwargs.get("warnings", [])
1122
- self.statistics = kwargs.get("statistics", None)
1123
- self.detected_language = kwargs.get('detected_language', None)
1124
- self.is_error: Literal[False] = False
1125
- self.kind: Literal["EntityLinking"] = "EntityLinking"
1126
-
1127
- def __repr__(self) -> str:
1128
- return (
1129
- f"RecognizeLinkedEntitiesResult(id={self.id}, entities={repr(self.entities)}, "
1130
- f"warnings={repr(self.warnings)}, statistics={repr(self.statistics)}, "
1131
- f"detected_language={repr(self.detected_language)}, is_error={self.is_error}, kind={self.kind})"[:1024]
1132
- )
1133
-
1134
-
1135
- class AnalyzeSentimentResult(DictMixin):
1136
- """AnalyzeSentimentResult is a result object which contains
1137
- the overall predicted sentiment and confidence scores for your document
1138
- and a per-sentence sentiment prediction with scores.
1139
-
1140
- .. versionadded:: 2022-10-01-preview
1141
- The *detected_language* property.
1142
- """
1143
-
1144
- id: str # pylint: disable=redefined-builtin
1145
- """Unique, non-empty document identifier that matches the
1146
- document id that was passed in with the request. If not specified
1147
- in the request, an id is assigned for the document."""
1148
- sentiment: str
1149
- """Predicted sentiment for document (Negative,
1150
- Neutral, Positive, or Mixed). Possible values include 'positive',
1151
- 'neutral', 'negative', 'mixed'"""
1152
- confidence_scores: "SentimentConfidenceScores"
1153
- """Document level sentiment confidence
1154
- scores between 0 and 1 for each sentiment label."""
1155
- sentences: List["SentenceSentiment"]
1156
- """Sentence level sentiment analysis."""
1157
- warnings: List[TextAnalyticsWarning]
1158
- """Warnings encountered while processing document. Results will still be returned
1159
- if there are warnings, but they may not be fully accurate."""
1160
- statistics: Optional["TextDocumentStatistics"] = None
1161
- """If `show_stats=True` was specified in the request this
1162
- field will contain information about the document payload."""
1163
- detected_language: Optional[DetectedLanguage] = None
1164
- """If 'language' is set to 'auto' for the document in the request this
1165
- field will contain the DetectedLanguage for the document."""
1166
- is_error: Literal[False] = False
1167
- """Boolean check for error item when iterating over list of
1168
- results. Always False for an instance of a AnalyzeSentimentResult."""
1169
- kind: Literal["SentimentAnalysis"] = "SentimentAnalysis"
1170
- """The text analysis kind - "SentimentAnalysis"."""
1171
-
1172
- def __init__(self, **kwargs: Any) -> None:
1173
- self.id = kwargs.get("id", None)
1174
- self.sentiment = kwargs.get("sentiment", None)
1175
- self.warnings = kwargs.get("warnings", [])
1176
- self.statistics = kwargs.get("statistics", None)
1177
- self.confidence_scores = kwargs.get("confidence_scores", None)
1178
- self.sentences = kwargs.get("sentences", None)
1179
- self.detected_language = kwargs.get('detected_language', None)
1180
- self.is_error: Literal[False] = False
1181
- self.kind: Literal["SentimentAnalysis"] = "SentimentAnalysis"
1182
-
1183
- def __repr__(self) -> str:
1184
- return (
1185
- f"AnalyzeSentimentResult(id={self.id}, sentiment={self.sentiment}, warnings={repr(self.warnings)}, "
1186
- f"statistics={repr(self.statistics)}, confidence_scores={repr(self.confidence_scores)}, "
1187
- f"sentences={repr(self.sentences)}, detected_language={repr(self.detected_language)}, "
1188
- f"is_error={self.is_error}, kind={self.kind})"[:1024]
1189
- )
1190
-
1191
-
1192
- class TextDocumentStatistics(DictMixin):
1193
- """TextDocumentStatistics contains information about
1194
- the document payload.
1195
- """
1196
-
1197
- character_count: int
1198
- """Number of text elements recognized in
1199
- the document."""
1200
- transaction_count: int
1201
- """Number of transactions for the document."""
1202
-
1203
- def __init__(self, **kwargs: Any) -> None:
1204
- self.character_count = kwargs.get("character_count", None)
1205
- self.transaction_count = kwargs.get("transaction_count", None)
1206
-
1207
- @classmethod
1208
- def _from_generated(cls, stats):
1209
- if stats is None:
1210
- return None
1211
- return cls(
1212
- character_count=stats.characters_count,
1213
- transaction_count=stats.transactions_count,
1214
- )
1215
-
1216
- def __repr__(self) -> str:
1217
- return f"TextDocumentStatistics(character_count={self.character_count}, " \
1218
- f"transaction_count={self.transaction_count})"[:1024]
1219
-
1220
-
1221
- class DocumentError(DictMixin):
1222
- """DocumentError is an error object which represents an error on
1223
- the individual document.
1224
- """
1225
-
1226
- id: str # pylint: disable=redefined-builtin
1227
- """Unique, non-empty document identifier that matches the
1228
- document id that was passed in with the request. If not specified
1229
- in the request, an id is assigned for the document."""
1230
- error: TextAnalyticsError
1231
- """The document error."""
1232
- is_error: Literal[True] = True
1233
- """Boolean check for error item when iterating over list of
1234
- results. Always True for an instance of a DocumentError."""
1235
- kind: Literal["DocumentError"] = "DocumentError"
1236
- """Error kind - "DocumentError"."""
1237
-
1238
- def __init__(self, **kwargs: Any) -> None:
1239
- self.id = kwargs.get("id", None)
1240
- self.error = kwargs.get("error", None)
1241
- self.is_error: Literal[True] = True
1242
- self.kind: Literal["DocumentError"] = "DocumentError"
1243
-
1244
- def __getattr__(self, attr: str) -> Any:
1245
- result_set = set()
1246
- result_set.update(
1247
- RecognizeEntitiesResult().keys() # type: ignore[operator]
1248
- + RecognizePiiEntitiesResult().keys()
1249
- + DetectLanguageResult().keys()
1250
- + RecognizeLinkedEntitiesResult().keys()
1251
- + AnalyzeSentimentResult().keys()
1252
- + ExtractKeyPhrasesResult().keys()
1253
- + AnalyzeHealthcareEntitiesResult().keys()
1254
- + RecognizeCustomEntitiesResult().keys()
1255
- + ClassifyDocumentResult().keys()
1256
- + ExtractSummaryResult().keys()
1257
- + AbstractiveSummaryResult().keys()
1258
- + DynamicClassificationResult().keys()
1259
- )
1260
- result_attrs = result_set.difference(DocumentError().keys())
1261
- if attr in result_attrs:
1262
- raise AttributeError(
1263
- "'DocumentError' object has no attribute '{}'. The service was unable to process this document:\n"
1264
- "Document Id: {}\nError: {} - {}\n".format(
1265
- attr, self.id, self.error.code, self.error.message
1266
- )
1267
- )
1268
- raise AttributeError(
1269
- f"'DocumentError' object has no attribute '{attr}'"
1270
- )
1271
-
1272
- @classmethod
1273
- def _from_generated(cls, doc_err):
1274
- return cls(
1275
- id=doc_err.id,
1276
- error=TextAnalyticsError._from_generated( # pylint: disable=protected-access
1277
- doc_err.error
1278
- ),
1279
- )
1280
-
1281
- def __repr__(self) -> str:
1282
- return f"DocumentError(id={self.id}, error={repr(self.error)}, " \
1283
- f"is_error={self.is_error}, kind={self.kind})"[:1024]
1284
-
1285
-
1286
- class DetectLanguageInput(LanguageInput):
1287
- """The input document to be analyzed for detecting language.
1288
-
1289
- :keyword str id: Required. Unique, non-empty document identifier.
1290
- :keyword str text: Required. The input text to process.
1291
- :keyword Optional[str] country_hint: A country hint to help better detect
1292
- the language of the text. Accepts two letter country codes
1293
- specified by ISO 3166-1 alpha-2. Defaults to "US". Pass
1294
- in the string "none" to not use a country_hint.
1295
- """
1296
-
1297
- id: str # pylint: disable=redefined-builtin
1298
- """Required. Unique, non-empty document identifier."""
1299
- text: str
1300
- """Required. The input text to process."""
1301
- country_hint: Optional[str] = None
1302
- """A country hint to help better detect
1303
- the language of the text. Accepts two letter country codes
1304
- specified by ISO 3166-1 alpha-2. Defaults to "US". Pass
1305
- in the string "none" to not use a country_hint."""
1306
-
1307
- def __init__(
1308
- self,
1309
- *,
1310
- id: str, # pylint: disable=redefined-builtin
1311
- text: str,
1312
- country_hint: Optional[str] = None,
1313
- **kwargs: Any # pylint: disable=unused-argument
1314
- ) -> None:
1315
- super().__init__(id=id, text=text, country_hint=country_hint)
1316
- self.id = id
1317
- self.text = text
1318
- self.country_hint = country_hint
1319
-
1320
- def __repr__(self) -> str:
1321
- return f"DetectLanguageInput(id={self.id}, text={self.text}, country_hint={self.country_hint})"[:1024]
1322
-
1323
-
1324
- class LinkedEntity(DictMixin):
1325
- """LinkedEntity contains a link to the well-known recognized
1326
- entity in text. The link comes from a data source like Wikipedia
1327
- or Bing. It additionally includes all of the matches of this
1328
- entity found in the document.
1329
-
1330
- .. versionadded:: v3.1
1331
- The *bing_entity_search_api_id* property.
1332
- """
1333
-
1334
- name: str
1335
- """Entity Linking formal name."""
1336
- matches: List["LinkedEntityMatch"]
1337
- """List of instances this entity appears in the text."""
1338
- language: str
1339
- """Language used in the data source."""
1340
- url: str
1341
- """URL to the entity's page from the data source."""
1342
- data_source: str
1343
- """Data source used to extract entity linking,
1344
- such as Wiki/Bing etc."""
1345
- data_source_entity_id: Optional[str] = None
1346
- """Unique identifier of the recognized entity from the data
1347
- source."""
1348
- bing_entity_search_api_id: Optional[str] = None
1349
- """Bing Entity Search unique identifier of the recognized entity.
1350
- Use in conjunction with the Bing Entity Search SDK to fetch additional relevant information."""
1351
-
1352
- def __init__(self, **kwargs: Any) -> None:
1353
- self.name = kwargs.get("name", None)
1354
- self.matches = kwargs.get("matches", None)
1355
- self.language = kwargs.get("language", None)
1356
- self.data_source_entity_id = kwargs.get("data_source_entity_id", None)
1357
- self.url = kwargs.get("url", None)
1358
- self.data_source = kwargs.get("data_source", None)
1359
- self.bing_entity_search_api_id = kwargs.get("bing_entity_search_api_id", None)
1360
-
1361
- @classmethod
1362
- def _from_generated(cls, entity):
1363
- bing_entity_search_api_id = (
1364
- entity.bing_id if hasattr(entity, "bing_id") else None
1365
- )
1366
- return cls(
1367
- name=entity.name,
1368
- matches=[
1369
- LinkedEntityMatch._from_generated(e) # pylint: disable=protected-access
1370
- for e in entity.matches
1371
- ],
1372
- language=entity.language,
1373
- data_source_entity_id=entity.id,
1374
- url=entity.url,
1375
- data_source=entity.data_source,
1376
- bing_entity_search_api_id=bing_entity_search_api_id,
1377
- )
1378
-
1379
- def __repr__(self) -> str:
1380
- return (
1381
- f"LinkedEntity(name={self.name}, matches={repr(self.matches)}, language={self.language}, "
1382
- f"data_source_entity_id={self.data_source_entity_id}, url={self.url}, "
1383
- f"data_source={self.data_source}, bing_entity_search_api_id={self.bing_entity_search_api_id})"[:1024]
1384
- )
1385
-
1386
-
1387
- class LinkedEntityMatch(DictMixin):
1388
- """A match for the linked entity found in text. Provides
1389
- the confidence score of the prediction and where the entity
1390
- was found in the text.
1391
-
1392
- .. versionadded:: v3.1
1393
- The *offset* and *length* properties.
1394
- """
1395
-
1396
- confidence_score: float
1397
- """If a well-known item is recognized, a
1398
- decimal number denoting the confidence level between 0 and 1 will be
1399
- returned."""
1400
- text: str
1401
- """Entity text as appears in the request."""
1402
- length: int
1403
- """The linked entity match text length. This value depends on the value of the
1404
- `string_index_type` parameter set in the original request, which is UnicodeCodePoints by default."""
1405
- offset: int
1406
- """The linked entity match text offset from the start of the document.
1407
- The value depends on the value of the `string_index_type` parameter
1408
- set in the original request, which is UnicodeCodePoints by default."""
1409
-
1410
- def __init__(self, **kwargs: Any) -> None:
1411
- self.confidence_score = kwargs.get("confidence_score", None)
1412
- self.text = kwargs.get("text", None)
1413
- self.length = kwargs.get("length", None)
1414
- self.offset = kwargs.get("offset", None)
1415
-
1416
- @classmethod
1417
- def _from_generated(cls, match):
1418
- offset = match.offset
1419
- length = match.length
1420
- if isinstance(match, _v3_0_models.Match):
1421
- # we do not return offset for v3.0 since
1422
- # the correct encoding was not introduced for v3.0
1423
- offset = None
1424
- length = None
1425
- return cls(
1426
- confidence_score=match.confidence_score,
1427
- text=match.text,
1428
- length=length,
1429
- offset=offset,
1430
- )
1431
-
1432
- def __repr__(self) -> str:
1433
- return f"LinkedEntityMatch(confidence_score={self.confidence_score}, text={self.text}, " \
1434
- f"length={self.length}, offset={self.offset})"[:1024]
1435
-
1436
-
1437
- class TextDocumentInput(DictMixin, MultiLanguageInput):
1438
- """The input document to be analyzed by the service.
1439
-
1440
- :keyword str id: Required. Unique, non-empty document identifier.
1441
- :keyword str text: Required. The input text to process.
1442
- :keyword str language: This is the 2 letter ISO 639-1 representation
1443
- of a language. For example, use "en" for English; "es" for Spanish etc.
1444
- For automatic language detection, use "auto" (Only supported by long-running
1445
- operation APIs with API version 2022-10-01-preview or newer). If
1446
- not set, uses "en" for English as default.
1447
-
1448
- .. versionadded:: 2022-10-01-preview
1449
- The 'auto' option for language.
1450
- """
1451
-
1452
- id: str # pylint: disable=redefined-builtin
1453
- """Required. Unique, non-empty document identifier."""
1454
- text: str
1455
- """Required. The input text to process."""
1456
- language: Optional[str] = None
1457
- """This is the 2 letter ISO 639-1 representation
1458
- of a language. For example, use "en" for English; "es" for Spanish etc.
1459
- For automatic language detection, use "auto" (Only supported by long-running
1460
- operation APIs with API version 2022-10-01-preview or newer). If
1461
- not set, uses "en" for English as default."""
1462
-
1463
- def __init__(
1464
- self,
1465
- *,
1466
- id: str, # pylint: disable=redefined-builtin
1467
- text: str,
1468
- language: Optional[str] = None,
1469
- **kwargs: Any # pylint: disable=unused-argument
1470
- ) -> None:
1471
- super().__init__(id=id, text=text, language=language)
1472
- self.id = id
1473
- self.text = text
1474
- self.language = language
1475
-
1476
- def __repr__(self) -> str:
1477
- return f"TextDocumentInput(id={self.id}, text={self.text}, language={self.language})"[:1024]
1478
-
1479
-
1480
- class TextDocumentBatchStatistics(DictMixin):
1481
- """TextDocumentBatchStatistics contains information about the
1482
- request payload. Note: This object is not returned
1483
- in the response and needs to be retrieved by a response hook.
1484
- """
1485
-
1486
- document_count: int
1487
- """Number of documents submitted in the request"""
1488
- valid_document_count: int
1489
- """Number of valid documents. This
1490
- excludes empty, over-size limit or non-supported languages documents."""
1491
- erroneous_document_count: int
1492
- """Number of invalid documents.
1493
- This includes empty, over-size limit or non-supported languages documents."""
1494
- transaction_count: int
1495
- """Number of transactions for the request."""
1496
-
1497
- def __init__(self, **kwargs: Any) -> None:
1498
- self.document_count = kwargs.get("document_count", None)
1499
- self.valid_document_count = kwargs.get("valid_document_count", None)
1500
- self.erroneous_document_count = kwargs.get("erroneous_document_count", None)
1501
- self.transaction_count = kwargs.get("transaction_count", None)
1502
-
1503
- @classmethod
1504
- def _from_generated(cls, statistics):
1505
- if statistics is None:
1506
- return None
1507
- return cls(
1508
- document_count=statistics["documentsCount"],
1509
- valid_document_count=statistics["validDocumentsCount"],
1510
- erroneous_document_count=statistics["erroneousDocumentsCount"],
1511
- transaction_count=statistics["transactionsCount"],
1512
- )
1513
-
1514
- def __repr__(self) -> str:
1515
- return (
1516
- f"TextDocumentBatchStatistics(document_count={self.document_count}, "
1517
- f"valid_document_count={self.valid_document_count}, "
1518
- f"erroneous_document_count={self.erroneous_document_count}, "
1519
- f"transaction_count={self.transaction_count})"[:1024]
1520
- )
1521
-
1522
-
1523
- class SentenceSentiment(DictMixin):
1524
- """SentenceSentiment contains the predicted sentiment and
1525
- confidence scores for each individual sentence in the document.
1526
-
1527
- .. versionadded:: v3.1
1528
- The *offset*, *length*, and *mined_opinions* properties.
1529
- """
1530
-
1531
- text: str
1532
- """The sentence text."""
1533
- sentiment: str
1534
- """The predicted Sentiment for the sentence.
1535
- Possible values include 'positive', 'neutral', 'negative'"""
1536
- confidence_scores: "SentimentConfidenceScores"
1537
- """The sentiment confidence score between 0
1538
- and 1 for the sentence for all labels."""
1539
- length: int
1540
- """The sentence text length. This value depends on the value of the
1541
- `string_index_type` parameter set in the original request, which is UnicodeCodePoints
1542
- by default."""
1543
- offset: int
1544
- """The sentence text offset from the start of the document.
1545
- The value depends on the value of the `string_index_type` parameter
1546
- set in the original request, which is UnicodeCodePoints by default."""
1547
- mined_opinions: Optional[List["MinedOpinion"]] = None
1548
- """The list of opinions mined from this sentence.
1549
- For example in the sentence "The food is good, but the service is bad", we would
1550
- mine the two opinions "food is good" and "service is bad". Only returned
1551
- if `show_opinion_mining` is set to True in the call to `analyze_sentiment` and
1552
- api version is v3.1 and up."""
1553
-
1554
- def __init__(self, **kwargs: Any) -> None:
1555
- self.text = kwargs.get("text", None)
1556
- self.sentiment = kwargs.get("sentiment", None)
1557
- self.confidence_scores = kwargs.get("confidence_scores", None)
1558
- self.length = kwargs.get("length", None)
1559
- self.offset = kwargs.get("offset", None)
1560
- self.mined_opinions = kwargs.get("mined_opinions", None)
1561
-
1562
- @classmethod
1563
- def _from_generated(cls, sentence, results, sentiment):
1564
- offset = sentence.offset
1565
- length = sentence.length
1566
- if isinstance(sentence, _v3_0_models.SentenceSentiment):
1567
- # we do not return offset for v3.0 since
1568
- # the correct encoding was not introduced for v3.0
1569
- offset = None
1570
- length = None
1571
- if hasattr(sentence, "targets"):
1572
- mined_opinions = (
1573
- [
1574
- MinedOpinion._from_generated( # pylint: disable=protected-access
1575
- target, results, sentiment
1576
- )
1577
- for target in sentence.targets
1578
- ]
1579
- if sentence.targets
1580
- else []
1581
- )
1582
- else:
1583
- mined_opinions = None
1584
- return cls(
1585
- text=sentence.text,
1586
- sentiment=sentence.sentiment,
1587
- confidence_scores=SentimentConfidenceScores._from_generated( # pylint: disable=protected-access
1588
- sentence.confidence_scores
1589
- ),
1590
- length=length,
1591
- offset=offset,
1592
- mined_opinions=mined_opinions,
1593
- )
1594
-
1595
- def __repr__(self) -> str:
1596
- return (
1597
- f"SentenceSentiment(text={self.text}, sentiment={self.sentiment}, "
1598
- f"confidence_scores={repr(self.confidence_scores)}, length={self.length}, "
1599
- f"offset={self.offset}, mined_opinions={repr(self.mined_opinions)})"[:1024]
1600
- )
1601
-
1602
-
1603
- class MinedOpinion(DictMixin):
1604
- """A mined opinion object represents an opinion we've extracted from a sentence.
1605
- It consists of both a target that these opinions are about, and the assessments
1606
- representing the opinion.
1607
- """
1608
-
1609
- target: "TargetSentiment"
1610
- """The target of an opinion about a product/service."""
1611
- assessments: List["AssessmentSentiment"]
1612
- """The assessments representing the opinion of the target."""
1613
-
1614
- def __init__(self, **kwargs: Any) -> None:
1615
- self.target = kwargs.get("target", None)
1616
- self.assessments = kwargs.get("assessments", None)
1617
-
1618
- @staticmethod
1619
- def _get_assessments(
1620
- relations, results, sentiment
1621
- ): # pylint: disable=unused-argument
1622
- if not relations:
1623
- return []
1624
- assessment_relations = [
1625
- r.ref for r in relations if r.relation_type == "assessment"
1626
- ]
1627
- assessments = []
1628
- for assessment_relation in assessment_relations:
1629
- numbers = _get_indices(assessment_relation)
1630
- sentence_index = numbers[1]
1631
- assessment_index = numbers[2]
1632
- assessments.append(
1633
- sentiment.sentences[sentence_index].assessments[assessment_index]
1634
- )
1635
- return assessments
1636
-
1637
- @classmethod
1638
- def _from_generated(cls, target, results, sentiment):
1639
- return cls(
1640
- target=TargetSentiment._from_generated( # pylint: disable=protected-access
1641
- target
1642
- ),
1643
- assessments=[
1644
- AssessmentSentiment._from_generated( # pylint: disable=protected-access
1645
- assessment
1646
- )
1647
- for assessment in cls._get_assessments(
1648
- target.relations, results, sentiment
1649
- )
1650
- ],
1651
- )
1652
-
1653
- def __repr__(self) -> str:
1654
- return f"MinedOpinion(target={repr(self.target)}, assessments={repr(self.assessments)})"[:1024]
1655
-
1656
-
1657
- class TargetSentiment(DictMixin):
1658
- """TargetSentiment contains the predicted sentiment,
1659
- confidence scores and other information about a key component of a product/service.
1660
- For example in "The food at Hotel Foo is good", "food" is an key component of
1661
- "Hotel Foo".
1662
- """
1663
-
1664
- text: str
1665
- """The text value of the target."""
1666
- sentiment: str
1667
- """The predicted Sentiment for the target. Possible values
1668
- include 'positive', 'mixed', and 'negative'."""
1669
- confidence_scores: "SentimentConfidenceScores"
1670
- """The sentiment confidence score between 0
1671
- and 1 for the target for 'positive' and 'negative' labels. It's score
1672
- for 'neutral' will always be 0"""
1673
- length: int
1674
- """The target text length. This value depends on the value of the
1675
- `string_index_type` parameter set in the original request, which is UnicodeCodePoints
1676
- by default."""
1677
- offset: int
1678
- """The target text offset from the start of the document.
1679
- The value depends on the value of the `string_index_type` parameter
1680
- set in the original request, which is UnicodeCodePoints by default."""
1681
-
1682
- def __init__(self, **kwargs: Any) -> None:
1683
- self.text = kwargs.get("text", None)
1684
- self.sentiment = kwargs.get("sentiment", None)
1685
- self.confidence_scores = kwargs.get("confidence_scores", None)
1686
- self.length = kwargs.get("length", None)
1687
- self.offset = kwargs.get("offset", None)
1688
-
1689
- @classmethod
1690
- def _from_generated(cls, target):
1691
- return cls(
1692
- text=target.text,
1693
- sentiment=target.sentiment,
1694
- confidence_scores=SentimentConfidenceScores._from_generated( # pylint: disable=protected-access
1695
- target.confidence_scores
1696
- ),
1697
- length=target.length,
1698
- offset=target.offset,
1699
- )
1700
-
1701
- def __repr__(self) -> str:
1702
- return (
1703
- f"TargetSentiment(text={self.text}, sentiment={self.sentiment}, "
1704
- f"confidence_scores={repr(self.confidence_scores)}, "
1705
- f"length={self.length}, offset={self.offset})"[:1024]
1706
- )
1707
-
1708
-
1709
- class AssessmentSentiment(DictMixin):
1710
- """AssessmentSentiment contains the predicted sentiment,
1711
- confidence scores and other information about an assessment given about
1712
- a particular target. For example, in the sentence "The food is good", the assessment
1713
- of the target 'food' is 'good'.
1714
- """
1715
-
1716
- text: str
1717
- """The assessment text."""
1718
- sentiment: str
1719
- """The predicted Sentiment for the assessment. Possible values
1720
- include 'positive', 'mixed', and 'negative'."""
1721
- confidence_scores: "SentimentConfidenceScores"
1722
- """The sentiment confidence score between 0
1723
- and 1 for the assessment for 'positive' and 'negative' labels. It's score
1724
- for 'neutral' will always be 0"""
1725
- length: int
1726
- """The assessment text length. This value depends on the value of the
1727
- `string_index_type` parameter set in the original request, which is UnicodeCodePoints
1728
- by default."""
1729
- offset: int
1730
- """The assessment text offset from the start of the document.
1731
- The value depends on the value of the `string_index_type` parameter
1732
- set in the original request, which is UnicodeCodePoints by default."""
1733
- is_negated: bool
1734
- """Whether the value of the assessment is negated. For example, in
1735
- "The food is not good", the assessment "good" is negated."""
1736
-
1737
- def __init__(self, **kwargs: Any) -> None:
1738
- self.text = kwargs.get("text", None)
1739
- self.sentiment = kwargs.get("sentiment", None)
1740
- self.confidence_scores = kwargs.get("confidence_scores", None)
1741
- self.length = kwargs.get("length", None)
1742
- self.offset = kwargs.get("offset", None)
1743
- self.is_negated = kwargs.get("is_negated", None)
1744
-
1745
- @classmethod
1746
- def _from_generated(cls, assessment):
1747
- return cls(
1748
- text=assessment.text,
1749
- sentiment=assessment.sentiment,
1750
- confidence_scores=SentimentConfidenceScores._from_generated( # pylint: disable=protected-access
1751
- assessment.confidence_scores
1752
- ),
1753
- length=assessment.length,
1754
- offset=assessment.offset,
1755
- is_negated=assessment.is_negated,
1756
- )
1757
-
1758
- def __repr__(self) -> str:
1759
- return (
1760
- f"AssessmentSentiment(text={self.text}, sentiment={self.sentiment}, "
1761
- f"confidence_scores={repr(self.confidence_scores)}, length={self.length}, "
1762
- f"offset={self.offset}, is_negated={self.is_negated})"[:1024]
1763
- )
1764
-
1765
-
1766
- class SentimentConfidenceScores(DictMixin):
1767
- """The confidence scores (Softmax scores) between 0 and 1.
1768
- Higher values indicate higher confidence.
1769
- """
1770
-
1771
- positive: float
1772
- """Positive score."""
1773
- neutral: float
1774
- """Neutral score."""
1775
- negative: float
1776
- """Negative score."""
1777
-
1778
- def __init__(self, **kwargs: Any) -> None:
1779
- self.positive = kwargs.get("positive", 0.0)
1780
- self.neutral = kwargs.get("neutral", 0.0)
1781
- self.negative = kwargs.get("negative", 0.0)
1782
-
1783
- @classmethod
1784
- def _from_generated(cls, score):
1785
- return cls(
1786
- positive=score.positive,
1787
- neutral=score.neutral if hasattr(score, "neutral") else 0.0,
1788
- negative=score.negative,
1789
- )
1790
-
1791
- def __repr__(self) -> str:
1792
- return f"SentimentConfidenceScores(positive={self.positive}, " \
1793
- f"neutral={self.neutral}, negative={self.negative})"[:1024]
1794
-
1795
-
1796
- class _AnalyzeActionsType(str, Enum, metaclass=CaseInsensitiveEnumMeta):
1797
- """The type of action that was applied to the documents"""
1798
-
1799
- RECOGNIZE_ENTITIES = "recognize_entities" #: Entities Recognition action.
1800
- RECOGNIZE_PII_ENTITIES = (
1801
- "recognize_pii_entities" #: PII Entities Recognition action.
1802
- )
1803
- EXTRACT_KEY_PHRASES = "extract_key_phrases" #: Key Phrase Extraction action.
1804
- RECOGNIZE_LINKED_ENTITIES = (
1805
- "recognize_linked_entities" #: Linked Entities Recognition action.
1806
- )
1807
- ANALYZE_SENTIMENT = "analyze_sentiment" #: Sentiment Analysis action.
1808
- RECOGNIZE_CUSTOM_ENTITIES = "recognize_custom_entities"
1809
- SINGLE_LABEL_CLASSIFY = "single_label_classify"
1810
- MULTI_LABEL_CLASSIFY = "multi_label_classify"
1811
- ANALYZE_HEALTHCARE_ENTITIES = "analyze_healthcare_entities"
1812
- EXTRACT_SUMMARY = "extract_summary"
1813
- ABSTRACT_SUMMARY = "abstract_summary"
1814
-
1815
-
1816
- class ActionPointerKind(str, Enum, metaclass=CaseInsensitiveEnumMeta):
1817
- """v3.1 only"""
1818
- RECOGNIZE_ENTITIES = "entityRecognitionTasks"
1819
- RECOGNIZE_PII_ENTITIES = "piiEntityRecognitionTasks"
1820
- EXTRACT_KEY_PHRASES = "keyPhraseExtractionTasks"
1821
- RECOGNIZE_LINKED_ENTITIES = "entityLinkingTasks"
1822
- ANALYZE_SENTIMENT = "sentimentAnalysisTasks"
1823
-
1824
-
1825
- class RecognizeEntitiesAction(DictMixin):
1826
- """RecognizeEntitiesAction encapsulates the parameters for starting a long-running Entities Recognition operation.
1827
-
1828
- If you just want to recognize entities in a list of documents, and not perform multiple
1829
- long running actions on the input of documents, call method `recognize_entities` instead
1830
- of interfacing with this model.
1831
-
1832
- :keyword Optional[str] model_version: The model version to use for the analysis.
1833
- :keyword Optional[str] string_index_type: Specifies the method used to interpret string offsets.
1834
- `UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
1835
- you can also pass in `Utf16CodeUnit` or `TextElement_v8`. For additional information
1836
- see https://aka.ms/text-analytics-offsets
1837
- :keyword Optional[bool] disable_service_logs: If set to true, you opt-out of having your text input
1838
- logged on the service side for troubleshooting. By default, the Language service logs your
1839
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
1840
- the service's natural language processing functions. Setting this parameter to true,
1841
- disables input logging and may limit our ability to remediate issues that occur. Please see
1842
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
1843
- additional details, and Microsoft Responsible AI principles at
1844
- https://www.microsoft.com/ai/responsible-ai.
1845
- """
1846
-
1847
- model_version: Optional[str] = None
1848
- """The model version to use for the analysis."""
1849
- string_index_type: Optional[str] = None
1850
- """Specifies the method used to interpret string offsets.
1851
- `UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
1852
- you can also pass in `Utf16CodeUnit` or `TextElement_v8`. For additional information
1853
- see https://aka.ms/text-analytics-offsets"""
1854
- disable_service_logs: Optional[bool] = None
1855
- """If set to true, you opt-out of having your text input
1856
- logged on the service side for troubleshooting. By default, the Language service logs your
1857
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
1858
- the service's natural language processing functions. Setting this parameter to true,
1859
- disables input logging and may limit our ability to remediate issues that occur. Please see
1860
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
1861
- additional details, and Microsoft Responsible AI principles at
1862
- https://www.microsoft.com/ai/responsible-ai."""
1863
-
1864
- def __init__(
1865
- self,
1866
- *,
1867
- model_version: Optional[str] = None,
1868
- string_index_type: Optional[str] = None,
1869
- disable_service_logs: Optional[bool] = None,
1870
- **kwargs: Any
1871
- ) -> None:
1872
- self.model_version = model_version
1873
- self.string_index_type: str = string_index_type if string_index_type is not None else STRING_INDEX_TYPE_DEFAULT
1874
- self.disable_service_logs = disable_service_logs
1875
-
1876
- def __repr__(self) -> str:
1877
- return f"RecognizeEntitiesAction(model_version={self.model_version}, " \
1878
- f"string_index_type={self.string_index_type}, " \
1879
- f"disable_service_logs={self.disable_service_logs})"[:1024]
1880
-
1881
- def _to_generated(self, api_version, task_id):
1882
- if is_language_api(api_version):
1883
- return _v2022_10_01_preview_models.EntitiesLROTask(
1884
- task_name=task_id,
1885
- parameters=_v2022_10_01_preview_models.EntitiesTaskParameters(
1886
- model_version=self.model_version,
1887
- string_index_type=string_index_type_compatibility(self.string_index_type),
1888
- logging_opt_out=self.disable_service_logs,
1889
- )
1890
- )
1891
-
1892
- return _v3_1_models.EntitiesTask(
1893
- parameters=_v3_1_models.EntitiesTaskParameters(
1894
- model_version=self.model_version,
1895
- string_index_type=self.string_index_type,
1896
- logging_opt_out=self.disable_service_logs,
1897
- ),
1898
- task_name=task_id
1899
- )
1900
-
1901
-
1902
- class AnalyzeSentimentAction(DictMixin):
1903
- """AnalyzeSentimentAction encapsulates the parameters for starting a long-running
1904
- Sentiment Analysis operation.
1905
-
1906
- If you just want to analyze sentiment in a list of documents, and not perform multiple
1907
- long running actions on the input of documents, call method `analyze_sentiment` instead
1908
- of interfacing with this model.
1909
-
1910
- :keyword Optional[str] model_version: The model version to use for the analysis.
1911
- :keyword Optional[bool] show_opinion_mining: Whether to mine the opinions of a sentence and conduct more
1912
- granular analysis around the aspects of a product or service (also known as
1913
- aspect-based sentiment analysis). If set to true, the returned
1914
- :class:`~azure.ai.textanalytics.SentenceSentiment` objects
1915
- will have property `mined_opinions` containing the result of this analysis.
1916
- :keyword Optional[str] string_index_type: Specifies the method used to interpret string offsets.
1917
- `UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
1918
- you can also pass in `Utf16CodeUnit` or `TextElement_v8`. For additional information
1919
- see https://aka.ms/text-analytics-offsets
1920
- :keyword Optional[bool] disable_service_logs: If set to true, you opt-out of having your text input
1921
- logged on the service side for troubleshooting. By default, the Language service logs your
1922
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
1923
- the service's natural language processing functions. Setting this parameter to true,
1924
- disables input logging and may limit our ability to remediate issues that occur. Please see
1925
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
1926
- additional details, and Microsoft Responsible AI principles at
1927
- https://www.microsoft.com/ai/responsible-ai.
1928
- """
1929
-
1930
- show_opinion_mining: Optional[bool] = None
1931
- """Whether to mine the opinions of a sentence and conduct more
1932
- granular analysis around the aspects of a product or service (also known as
1933
- aspect-based sentiment analysis). If set to true, the returned
1934
- :class:`~azure.ai.textanalytics.SentenceSentiment` objects
1935
- will have property `mined_opinions` containing the result of this analysis."""
1936
- model_version: Optional[str] = None
1937
- """The model version to use for the analysis."""
1938
- string_index_type: Optional[str] = None
1939
- """Specifies the method used to interpret string offsets.
1940
- `UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
1941
- you can also pass in `Utf16CodeUnit` or `TextElement_v8`. For additional information
1942
- see https://aka.ms/text-analytics-offsets"""
1943
- disable_service_logs: Optional[bool] = None
1944
- """If set to true, you opt-out of having your text input
1945
- logged on the service side for troubleshooting. By default, the Language service logs your
1946
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
1947
- the service's natural language processing functions. Setting this parameter to true,
1948
- disables input logging and may limit our ability to remediate issues that occur. Please see
1949
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
1950
- additional details, and Microsoft Responsible AI principles at
1951
- https://www.microsoft.com/ai/responsible-ai."""
1952
-
1953
- def __init__(
1954
- self,
1955
- *,
1956
- show_opinion_mining: Optional[bool] = None,
1957
- model_version: Optional[str] = None,
1958
- string_index_type: Optional[str] = None,
1959
- disable_service_logs: Optional[bool] = None,
1960
- **kwargs: Any
1961
- ) -> None:
1962
- self.model_version = model_version
1963
- self.show_opinion_mining = show_opinion_mining
1964
- self.string_index_type: str = string_index_type if string_index_type is not None else STRING_INDEX_TYPE_DEFAULT
1965
- self.disable_service_logs = disable_service_logs
1966
-
1967
- def __repr__(self) -> str:
1968
- return (
1969
- f"AnalyzeSentimentAction(model_version={self.model_version}, "
1970
- f"show_opinion_mining={self.show_opinion_mining}, "
1971
- f"string_index_type={self.string_index_type}, "
1972
- f"disable_service_logs={self.disable_service_logs}"[:1024]
1973
- )
1974
-
1975
- def _to_generated(self, api_version, task_id):
1976
- if is_language_api(api_version):
1977
- return _v2022_10_01_preview_models.SentimentAnalysisLROTask(
1978
- task_name=task_id,
1979
- parameters=_v2022_10_01_preview_models.SentimentAnalysisTaskParameters(
1980
- model_version=self.model_version,
1981
- opinion_mining=self.show_opinion_mining,
1982
- string_index_type=string_index_type_compatibility(self.string_index_type),
1983
- logging_opt_out=self.disable_service_logs,
1984
- )
1985
- )
1986
- return _v3_1_models.SentimentAnalysisTask(
1987
- parameters=_v3_1_models.SentimentAnalysisTaskParameters(
1988
- model_version=self.model_version,
1989
- opinion_mining=self.show_opinion_mining,
1990
- string_index_type=self.string_index_type,
1991
- logging_opt_out=self.disable_service_logs,
1992
- ),
1993
- task_name=task_id
1994
- )
1995
-
1996
-
1997
- class RecognizePiiEntitiesAction(DictMixin):
1998
- """RecognizePiiEntitiesAction encapsulates the parameters for starting a long-running PII
1999
- Entities Recognition operation. See more information in the service docs: https://aka.ms/azsdk/language/pii
2000
-
2001
- If you just want to recognize pii entities in a list of documents, and not perform multiple
2002
- long running actions on the input of documents, call method `recognize_pii_entities` instead
2003
- of interfacing with this model.
2004
-
2005
- :keyword Optional[str] model_version: The model version to use for the analysis.
2006
- :keyword Optional[str] domain_filter: An optional string to set the PII domain to include only a
2007
- subset of the PII entity categories. Possible values include 'phi' or None.
2008
- :keyword categories_filter: Instead of filtering over all PII entity categories, you can pass in a list of
2009
- the specific PII entity categories you want to filter out. For example, if you only want to filter out
2010
- U.S. social security numbers in a document, you can pass in
2011
- `[PiiEntityCategory.US_SOCIAL_SECURITY_NUMBER]` for this kwarg.
2012
- :paramtype categories_filter: Optional[list[str or ~azure.ai.textanalytics.PiiEntityCategory]]
2013
- :keyword Optional[str] string_index_type: Specifies the method used to interpret string offsets.
2014
- `UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
2015
- you can also pass in `Utf16CodeUnit` or `TextElement_v8`. For additional information
2016
- see https://aka.ms/text-analytics-offsets
2017
- :keyword Optional[bool] disable_service_logs: Defaults to true, meaning that the Language service will not log your
2018
- input text on the service side for troubleshooting. If set to False, the Language service logs your
2019
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
2020
- the service's natural language processing functions. Please see
2021
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
2022
- additional details, and Microsoft Responsible AI principles at
2023
- https://www.microsoft.com/ai/responsible-ai.
2024
- """
2025
-
2026
- categories_filter: Optional[List[Union[str, PiiEntityCategory]]] = None
2027
- """Instead of filtering over all PII entity categories, you can pass in a list of
2028
- the specific PII entity categories you want to filter out. For example, if you only want to filter out
2029
- U.S. social security numbers in a document, you can pass in
2030
- `[PiiEntityCategory.US_SOCIAL_SECURITY_NUMBER]` for this kwarg."""
2031
- domain_filter: Optional[str] = None
2032
- """An optional string to set the PII domain to include only a
2033
- subset of the PII entity categories. Possible values include 'phi' or None."""
2034
- model_version: Optional[str] = None
2035
- """The model version to use for the analysis."""
2036
- string_index_type: Optional[str] = None
2037
- """Specifies the method used to interpret string offsets.
2038
- `UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
2039
- you can also pass in `Utf16CodeUnit` or `TextElement_v8`. For additional information
2040
- see https://aka.ms/text-analytics-offsets"""
2041
- disable_service_logs: Optional[bool] = None
2042
- """Defaults to true, meaning that the Language service will not log your
2043
- input text on the service side for troubleshooting. If set to False, the Language service logs your
2044
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
2045
- the service's natural language processing functions. Please see
2046
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
2047
- additional details, and Microsoft Responsible AI principles at
2048
- https://www.microsoft.com/ai/responsible-ai."""
2049
-
2050
- def __init__(
2051
- self,
2052
- *,
2053
- categories_filter: Optional[List[Union[str, PiiEntityCategory]]] = None,
2054
- domain_filter: Optional[str] = None,
2055
- model_version: Optional[str] = None,
2056
- string_index_type: Optional[str] = None,
2057
- disable_service_logs: Optional[bool] = None,
2058
- **kwargs: Any
2059
- ) -> None:
2060
- self.model_version = model_version
2061
- self.domain_filter = domain_filter
2062
- self.categories_filter = categories_filter
2063
- self.string_index_type: str = string_index_type if string_index_type is not None else STRING_INDEX_TYPE_DEFAULT
2064
- self.disable_service_logs = disable_service_logs
2065
-
2066
- def __repr__(self) -> str:
2067
- return (
2068
- f"RecognizePiiEntitiesAction(model_version={self.model_version}, "
2069
- f"domain_filter={self.domain_filter}, categories_filter={self.categories_filter}, "
2070
- f"string_index_type={self.string_index_type}, "
2071
- f"disable_service_logs={self.disable_service_logs}"[:1024]
2072
- )
2073
-
2074
- def _to_generated(self, api_version, task_id):
2075
- if is_language_api(api_version):
2076
- return _v2022_10_01_preview_models.PiiLROTask(
2077
- task_name=task_id,
2078
- parameters=_v2022_10_01_preview_models.PiiTaskParameters(
2079
- model_version=self.model_version,
2080
- domain=self.domain_filter,
2081
- pii_categories=self.categories_filter,
2082
- string_index_type=string_index_type_compatibility(self.string_index_type),
2083
- logging_opt_out=self.disable_service_logs,
2084
- )
2085
- )
2086
-
2087
- return _v3_1_models.PiiTask(
2088
- parameters=_v3_1_models.PiiTaskParameters(
2089
- model_version=self.model_version,
2090
- domain=self.domain_filter,
2091
- pii_categories=self.categories_filter,
2092
- string_index_type=self.string_index_type,
2093
- logging_opt_out=self.disable_service_logs,
2094
- ),
2095
- task_name=task_id
2096
- )
2097
-
2098
-
2099
- class ExtractKeyPhrasesAction(DictMixin):
2100
- """ExtractKeyPhrasesAction encapsulates the parameters for starting a long-running key phrase
2101
- extraction operation
2102
-
2103
- If you just want to extract key phrases from a list of documents, and not perform multiple
2104
- long running actions on the input of documents, call method `extract_key_phrases` instead
2105
- of interfacing with this model.
2106
-
2107
- :keyword Optional[str] model_version: The model version to use for the analysis.
2108
- :keyword Optional[bool] disable_service_logs: If set to true, you opt-out of having your text input
2109
- logged on the service side for troubleshooting. By default, the Language service logs your
2110
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
2111
- the service's natural language processing functions. Setting this parameter to true,
2112
- disables input logging and may limit our ability to remediate issues that occur. Please see
2113
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
2114
- additional details, and Microsoft Responsible AI principles at
2115
- https://www.microsoft.com/ai/responsible-ai.
2116
- """
2117
-
2118
- model_version: Optional[str] = None
2119
- """The model version to use for the analysis."""
2120
- disable_service_logs: Optional[bool] = None
2121
- """If set to true, you opt-out of having your text input
2122
- logged on the service side for troubleshooting. By default, the Language service logs your
2123
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
2124
- the service's natural language processing functions. Setting this parameter to true,
2125
- disables input logging and may limit our ability to remediate issues that occur. Please see
2126
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
2127
- additional details, and Microsoft Responsible AI principles at
2128
- https://www.microsoft.com/ai/responsible-ai."""
2129
-
2130
- def __init__(
2131
- self,
2132
- *,
2133
- model_version: Optional[str] = None,
2134
- disable_service_logs: Optional[bool] = None,
2135
- **kwargs: Any
2136
- ) -> None:
2137
- self.model_version = model_version
2138
- self.disable_service_logs = disable_service_logs
2139
-
2140
- def __repr__(self) -> str:
2141
- return f"ExtractKeyPhrasesAction(model_version={self.model_version}, " \
2142
- f"disable_service_logs={self.disable_service_logs})"[:1024]
2143
-
2144
- def _to_generated(self, api_version, task_id):
2145
- if is_language_api(api_version):
2146
- return _v2022_10_01_preview_models.KeyPhraseLROTask(
2147
- task_name=task_id,
2148
- parameters=_v2022_10_01_preview_models.KeyPhraseTaskParameters(
2149
- model_version=self.model_version,
2150
- logging_opt_out=self.disable_service_logs,
2151
- )
2152
- )
2153
-
2154
- return _v3_1_models.KeyPhrasesTask(
2155
- parameters=_v3_1_models.KeyPhrasesTaskParameters(
2156
- model_version=self.model_version,
2157
- logging_opt_out=self.disable_service_logs,
2158
- ),
2159
- task_name=task_id
2160
- )
2161
-
2162
-
2163
- class RecognizeLinkedEntitiesAction(DictMixin):
2164
- """RecognizeLinkedEntitiesAction encapsulates the parameters for starting a long-running Linked Entities
2165
- Recognition operation.
2166
-
2167
- If you just want to recognize linked entities in a list of documents, and not perform multiple
2168
- long running actions on the input of documents, call method `recognize_linked_entities` instead
2169
- of interfacing with this model.
2170
-
2171
- :keyword Optional[str] model_version: The model version to use for the analysis.
2172
- :keyword Optional[str] string_index_type: Specifies the method used to interpret string offsets.
2173
- `UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
2174
- you can also pass in `Utf16CodeUnit` or `TextElement_v8`. For additional information
2175
- see https://aka.ms/text-analytics-offsets
2176
- :keyword Optional[bool] disable_service_logs: If set to true, you opt-out of having your text input
2177
- logged on the service side for troubleshooting. By default, the Language service logs your
2178
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
2179
- the service's natural language processing functions. Setting this parameter to true,
2180
- disables input logging and may limit our ability to remediate issues that occur. Please see
2181
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
2182
- additional details, and Microsoft Responsible AI principles at
2183
- https://www.microsoft.com/ai/responsible-ai.
2184
- """
2185
-
2186
- model_version: Optional[str] = None
2187
- """The model version to use for the analysis."""
2188
- string_index_type: Optional[str] = None
2189
- """Specifies the method used to interpret string offsets.
2190
- `UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
2191
- you can also pass in `Utf16CodeUnit` or `TextElement_v8`. For additional information
2192
- see https://aka.ms/text-analytics-offsets"""
2193
- disable_service_logs: Optional[bool] = None
2194
- """If set to true, you opt-out of having your text input
2195
- logged on the service side for troubleshooting. By default, the Language service logs your
2196
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
2197
- the service's natural language processing functions. Setting this parameter to true,
2198
- disables input logging and may limit our ability to remediate issues that occur. Please see
2199
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
2200
- additional details, and Microsoft Responsible AI principles at
2201
- https://www.microsoft.com/ai/responsible-ai."""
2202
-
2203
- def __init__(
2204
- self,
2205
- *,
2206
- model_version: Optional[str] = None,
2207
- string_index_type: Optional[str] = None,
2208
- disable_service_logs: Optional[bool] = None,
2209
- **kwargs: Any
2210
- ) -> None:
2211
- self.model_version = model_version
2212
- self.string_index_type: str = string_index_type if string_index_type is not None else STRING_INDEX_TYPE_DEFAULT
2213
- self.disable_service_logs = disable_service_logs
2214
-
2215
- def __repr__(self) -> str:
2216
- return (
2217
- f"RecognizeLinkedEntitiesAction(model_version={self.model_version}, "
2218
- f"string_index_type={self.string_index_type}), "
2219
- f"disable_service_logs={self.disable_service_logs}"[:1024]
2220
- )
2221
-
2222
- def _to_generated(self, api_version, task_id):
2223
- if is_language_api(api_version):
2224
- return _v2022_10_01_preview_models.EntityLinkingLROTask(
2225
- task_name=task_id,
2226
- parameters=_v2022_10_01_preview_models.EntityLinkingTaskParameters(
2227
- model_version=self.model_version,
2228
- string_index_type=string_index_type_compatibility(self.string_index_type),
2229
- logging_opt_out=self.disable_service_logs,
2230
- )
2231
- )
2232
-
2233
- return _v3_1_models.EntityLinkingTask(
2234
- parameters=_v3_1_models.EntityLinkingTaskParameters(
2235
- model_version=self.model_version,
2236
- string_index_type=self.string_index_type,
2237
- logging_opt_out=self.disable_service_logs,
2238
- ),
2239
- task_name=task_id
2240
- )
2241
-
2242
-
2243
- class RecognizeCustomEntitiesAction(DictMixin):
2244
- """RecognizeCustomEntitiesAction encapsulates the parameters for starting a long-running custom entity
2245
- recognition operation. For information on regional support of custom features and how to train a model to
2246
- recognize custom entities, see https://aka.ms/azsdk/textanalytics/customentityrecognition
2247
-
2248
- :param str project_name: Required. This field indicates the project name for the model.
2249
- :param str deployment_name: This field indicates the deployment name for the model.
2250
- :keyword Optional[str] string_index_type: Specifies the method used to interpret string offsets.
2251
- `UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
2252
- you can also pass in `Utf16CodeUnit` or `TextElement_v8`. For additional information
2253
- see https://aka.ms/text-analytics-offsets
2254
- :keyword Optional[bool] disable_service_logs: If set to true, you opt-out of having your text input
2255
- logged on the service side for troubleshooting. By default, the Language service logs your
2256
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
2257
- the service's natural language processing functions. Setting this parameter to true,
2258
- disables input logging and may limit our ability to remediate issues that occur. Please see
2259
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
2260
- additional details, and Microsoft Responsible AI principles at
2261
- https://www.microsoft.com/ai/responsible-ai.
2262
-
2263
- .. versionadded:: 2022-05-01
2264
- The *RecognizeCustomEntitiesAction* model.
2265
- """
2266
-
2267
- project_name: str
2268
- """This field indicates the project name for the model."""
2269
- deployment_name: str
2270
- """This field indicates the deployment name for the model."""
2271
- string_index_type: Optional[str] = None
2272
- """Specifies the method used to interpret string offsets.
2273
- `UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
2274
- you can also pass in `Utf16CodeUnit` or `TextElement_v8`. For additional information
2275
- see https://aka.ms/text-analytics-offsets"""
2276
- disable_service_logs: Optional[bool] = None
2277
- """If set to true, you opt-out of having your text input
2278
- logged on the service side for troubleshooting. By default, the Language service logs your
2279
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
2280
- the service's natural language processing functions. Setting this parameter to true,
2281
- disables input logging and may limit our ability to remediate issues that occur. Please see
2282
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
2283
- additional details, and Microsoft Responsible AI principles at
2284
- https://www.microsoft.com/ai/responsible-ai."""
2285
-
2286
- def __init__(
2287
- self,
2288
- project_name: str,
2289
- deployment_name: str,
2290
- *,
2291
- string_index_type: Optional[str] = None,
2292
- disable_service_logs: Optional[bool] = None,
2293
- **kwargs: Any
2294
- ) -> None:
2295
- self.project_name = project_name
2296
- self.deployment_name = deployment_name
2297
- self.disable_service_logs = disable_service_logs
2298
- self.string_index_type: str = string_index_type if string_index_type is not None else STRING_INDEX_TYPE_DEFAULT
2299
-
2300
- def __repr__(self) -> str:
2301
- return (
2302
- f"RecognizeCustomEntitiesAction(project_name={self.project_name}, "
2303
- f"deployment_name={self.deployment_name}, disable_service_logs={self.disable_service_logs}, "
2304
- f"string_index_type={self.string_index_type})"[:1024]
2305
- )
2306
-
2307
- def _to_generated(self, api_version, task_id): # pylint: disable=unused-argument
2308
- return _v2022_10_01_preview_models.CustomEntitiesLROTask(
2309
- task_name=task_id,
2310
- parameters=_v2022_10_01_preview_models.CustomEntitiesTaskParameters(
2311
- project_name=self.project_name,
2312
- deployment_name=self.deployment_name,
2313
- string_index_type=string_index_type_compatibility(self.string_index_type),
2314
- logging_opt_out=self.disable_service_logs,
2315
- )
2316
- )
2317
-
2318
-
2319
- class RecognizeCustomEntitiesResult(DictMixin):
2320
- """RecognizeCustomEntitiesResult is a result object which contains
2321
- the custom recognized entities from a particular document.
2322
-
2323
- .. versionadded:: 2022-10-01-preview
2324
- The *detected_language* property.
2325
- """
2326
-
2327
- id: str # pylint: disable=redefined-builtin
2328
- """Unique, non-empty document identifier that matches the
2329
- document id that was passed in with the request. If not specified
2330
- in the request, an id is assigned for the document."""
2331
- entities: List[CategorizedEntity]
2332
- """Recognized custom entities in the document."""
2333
- warnings: List[TextAnalyticsWarning]
2334
- """Warnings encountered while processing document."""
2335
- statistics: Optional[TextDocumentStatistics] = None
2336
- """If `show_stats=True` was specified in the request this
2337
- field will contain information about the document payload."""
2338
- detected_language: Optional[DetectedLanguage] = None
2339
- """If 'language' is set to 'auto' for the document in the request this
2340
- field will contain the DetectedLanguage for the document."""
2341
- is_error: Literal[False] = False
2342
- """Boolean check for error item when iterating over list of
2343
- results. Always False for an instance of a RecognizeCustomEntitiesResult."""
2344
- kind: Literal["CustomEntityRecognition"] = "CustomEntityRecognition"
2345
- """The text analysis kind - "CustomEntityRecognition"."""
2346
-
2347
- def __init__(self, **kwargs: Any) -> None:
2348
- self.id = kwargs.get("id", None)
2349
- self.entities = kwargs.get("entities", None)
2350
- self.warnings = kwargs.get("warnings", [])
2351
- self.statistics = kwargs.get("statistics", None)
2352
- self.detected_language = kwargs.get("detected_language", None)
2353
- self.is_error: Literal[False] = False
2354
- self.kind: Literal["CustomEntityRecognition"] = "CustomEntityRecognition"
2355
-
2356
- def __repr__(self) -> str:
2357
- return (
2358
- f"RecognizeCustomEntitiesResult(id={self.id}, entities={repr(self.entities)}, "
2359
- f"warnings={repr(self.warnings)}, statistics={repr(self.statistics)}, "
2360
- f"detected_language={repr(self.detected_language)}, is_error={self.is_error},"
2361
- f" kind={self.kind})"[:1024]
2362
- )
2363
-
2364
- @classmethod
2365
- def _from_generated(cls, result):
2366
- return cls(
2367
- id=result.id,
2368
- entities=[
2369
- CategorizedEntity._from_generated(e) # pylint: disable=protected-access
2370
- for e in result.entities
2371
- ],
2372
- warnings=[
2373
- TextAnalyticsWarning._from_generated( # pylint: disable=protected-access
2374
- w
2375
- )
2376
- for w in result.warnings
2377
- ],
2378
- statistics=TextDocumentStatistics._from_generated( # pylint: disable=protected-access
2379
- result.statistics
2380
- ),
2381
- detected_language=DetectedLanguage._from_generated( # pylint: disable=protected-access
2382
- result.detected_language
2383
- ) if hasattr(result, "detected_language") and result.detected_language else None
2384
- )
2385
-
2386
-
2387
- class MultiLabelClassifyAction(DictMixin):
2388
- """MultiLabelClassifyAction encapsulates the parameters for starting a long-running custom multi label
2389
- classification operation. For information on regional support of custom features and how to train a model to
2390
- classify your documents, see https://aka.ms/azsdk/textanalytics/customfunctionalities
2391
-
2392
- :param str project_name: Required. This field indicates the project name for the model.
2393
- :param str deployment_name: Required. This field indicates the deployment name for the model.
2394
- :keyword Optional[bool] disable_service_logs: If set to true, you opt-out of having your text input
2395
- logged on the service side for troubleshooting. By default, the Language service logs your
2396
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
2397
- the service's natural language processing functions. Setting this parameter to true,
2398
- disables input logging and may limit our ability to remediate issues that occur. Please see
2399
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
2400
- additional details, and Microsoft Responsible AI principles at
2401
- https://www.microsoft.com/ai/responsible-ai.
2402
-
2403
- .. versionadded:: 2022-05-01
2404
- The *MultiLabelClassifyAction* model.
2405
- """
2406
-
2407
- project_name: str
2408
- """This field indicates the project name for the model."""
2409
- deployment_name: str
2410
- """This field indicates the deployment name for the model."""
2411
- disable_service_logs: Optional[bool] = None
2412
- """If set to true, you opt-out of having your text input
2413
- logged on the service side for troubleshooting. By default, the Language service logs your
2414
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
2415
- the service's natural language processing functions. Setting this parameter to true,
2416
- disables input logging and may limit our ability to remediate issues that occur. Please see
2417
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
2418
- additional details, and Microsoft Responsible AI principles at
2419
- https://www.microsoft.com/ai/responsible-ai."""
2420
-
2421
- def __init__(
2422
- self,
2423
- project_name: str,
2424
- deployment_name: str,
2425
- *,
2426
- disable_service_logs: Optional[bool] = None,
2427
- **kwargs: Any
2428
- ) -> None:
2429
- self.project_name = project_name
2430
- self.deployment_name = deployment_name
2431
- self.disable_service_logs = disable_service_logs
2432
-
2433
- def __repr__(self) -> str:
2434
- return (
2435
- f"MultiLabelClassifyAction(project_name={self.project_name}, deployment_name={self.deployment_name}, "
2436
- f"disable_service_logs={self.disable_service_logs})"[:1024]
2437
- )
2438
-
2439
- def _to_generated(self, api_version, task_id): # pylint: disable=unused-argument
2440
- return _v2022_10_01_preview_models.CustomMultiLabelClassificationLROTask(
2441
- task_name=task_id,
2442
- parameters=_v2022_10_01_preview_models.CustomMultiLabelClassificationTaskParameters(
2443
- project_name=self.project_name,
2444
- deployment_name=self.deployment_name,
2445
- logging_opt_out=self.disable_service_logs,
2446
- )
2447
- )
2448
-
2449
-
2450
- class ClassifyDocumentResult(DictMixin):
2451
- """ClassifyDocumentResult is a result object which contains
2452
- the classifications for a particular document.
2453
-
2454
- .. versionadded:: 2022-10-01-preview
2455
- The *detected_language* property.
2456
- """
2457
-
2458
- id: str # pylint: disable=redefined-builtin
2459
- """Unique, non-empty document identifier."""
2460
- classifications: List["ClassificationCategory"]
2461
- """Recognized classification results in the document."""
2462
- warnings: List[TextAnalyticsWarning]
2463
- """Warnings encountered while processing document."""
2464
- statistics: Optional[TextDocumentStatistics] = None
2465
- """If `show_stats=True` was specified in the request this
2466
- field will contain information about the document payload."""
2467
- detected_language: Optional[DetectedLanguage] = None
2468
- """If 'language' is set to 'auto' for the document in the request this
2469
- field will contain the DetectedLanguage for the document."""
2470
- is_error: Literal[False] = False
2471
- """Boolean check for error item when iterating over list of
2472
- results. Always False for an instance of a ClassifyDocumentResult."""
2473
- kind: Literal["CustomDocumentClassification"] = "CustomDocumentClassification"
2474
- """The text analysis kind - "CustomDocumentClassification"."""
2475
-
2476
- def __init__(self, **kwargs: Any) -> None:
2477
- self.id = kwargs.get('id', None)
2478
- self.classifications = kwargs.get('classifications', None)
2479
- self.warnings = kwargs.get('warnings', [])
2480
- self.statistics = kwargs.get('statistics', None)
2481
- self.detected_language = kwargs.get('detected_language', None)
2482
- self.is_error: Literal[False] = False
2483
- self.kind: Literal["CustomDocumentClassification"] = "CustomDocumentClassification"
2484
-
2485
- def __repr__(self) -> str:
2486
- return (
2487
- f"ClassifyDocumentResult(id={self.id}, classifications={repr(self.classifications)}, "
2488
- f"warnings={repr(self.warnings)}, statistics={repr(self.statistics)}, "
2489
- f"detected_language={repr(self.detected_language)} "
2490
- f"is_error={self.is_error}, kind={self.kind})"[:1024]
2491
- )
2492
-
2493
- @classmethod
2494
- def _from_generated(cls, result):
2495
- return cls(
2496
- id=result.id,
2497
- classifications=[
2498
- ClassificationCategory._from_generated(e) # pylint: disable=protected-access
2499
- for e in result.class_property
2500
- ],
2501
- warnings=[
2502
- TextAnalyticsWarning._from_generated( # pylint: disable=protected-access
2503
- w
2504
- )
2505
- for w in result.warnings
2506
- ],
2507
- statistics=TextDocumentStatistics._from_generated( # pylint: disable=protected-access
2508
- result.statistics
2509
- ),
2510
- detected_language=DetectedLanguage._from_generated( # pylint: disable=protected-access
2511
- result.detected_language
2512
- ) if hasattr(result, "detected_language") and result.detected_language else None
2513
- )
2514
-
2515
-
2516
- class SingleLabelClassifyAction(DictMixin):
2517
- """SingleLabelClassifyAction encapsulates the parameters for starting a long-running custom single label
2518
- classification operation. For information on regional support of custom features and how to train a model to
2519
- classify your documents, see https://aka.ms/azsdk/textanalytics/customfunctionalities
2520
-
2521
- :param str project_name: Required. This field indicates the project name for the model.
2522
- :param str deployment_name: Required. This field indicates the deployment name for the model.
2523
- :keyword Optional[bool] disable_service_logs: If set to true, you opt-out of having your text input
2524
- logged on the service side for troubleshooting. By default, the Language service logs your
2525
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
2526
- the service's natural language processing functions. Setting this parameter to true,
2527
- disables input logging and may limit our ability to remediate issues that occur. Please see
2528
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
2529
- additional details, and Microsoft Responsible AI principles at
2530
- https://www.microsoft.com/ai/responsible-ai.
2531
-
2532
- .. versionadded:: 2022-05-01
2533
- The *SingleLabelClassifyAction* model.
2534
- """
2535
-
2536
- project_name: str
2537
- """This field indicates the project name for the model."""
2538
- deployment_name: str
2539
- """This field indicates the deployment name for the model."""
2540
- disable_service_logs: Optional[bool] = None
2541
- """If set to true, you opt-out of having your text input
2542
- logged on the service side for troubleshooting. By default, the Language service logs your
2543
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
2544
- the service's natural language processing functions. Setting this parameter to true,
2545
- disables input logging and may limit our ability to remediate issues that occur. Please see
2546
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
2547
- additional details, and Microsoft Responsible AI principles at
2548
- https://www.microsoft.com/ai/responsible-ai."""
2549
-
2550
- def __init__(
2551
- self,
2552
- project_name: str,
2553
- deployment_name: str,
2554
- *,
2555
- disable_service_logs: Optional[bool] = None,
2556
- **kwargs: Any
2557
- ) -> None:
2558
- self.project_name = project_name
2559
- self.deployment_name = deployment_name
2560
- self.disable_service_logs = disable_service_logs
2561
-
2562
- def __repr__(self) -> str:
2563
- return (
2564
- f"SingleLabelClassifyAction(project_name={self.project_name}, deployment_name={self.deployment_name}, "
2565
- f"disable_service_logs={self.disable_service_logs})"[:1024]
2566
- )
2567
-
2568
- def _to_generated(self, api_version, task_id): # pylint: disable=unused-argument
2569
- return _v2022_10_01_preview_models.CustomSingleLabelClassificationLROTask(
2570
- task_name=task_id,
2571
- parameters=_v2022_10_01_preview_models.CustomSingleLabelClassificationTaskParameters(
2572
- project_name=self.project_name,
2573
- deployment_name=self.deployment_name,
2574
- logging_opt_out=self.disable_service_logs,
2575
- )
2576
- )
2577
-
2578
-
2579
- class ClassificationCategory(DictMixin):
2580
- """ClassificationCategory represents a classification of the input document.
2581
- """
2582
-
2583
- category: str
2584
- """Classification category for the document."""
2585
- confidence_score: float
2586
- """Confidence score between 0 and 1 of the recognized classification."""
2587
-
2588
- def __init__(self, **kwargs: Any) -> None:
2589
- self.category = kwargs.get('category', None)
2590
- self.confidence_score = kwargs.get('confidence_score', None)
2591
-
2592
- def __repr__(self) -> str:
2593
- return f"ClassificationCategory(category={self.category}, " \
2594
- f"confidence_score={self.confidence_score})"[:1024]
2595
-
2596
- @classmethod
2597
- def _from_generated(cls, result):
2598
- return cls(
2599
- category=result.category,
2600
- confidence_score=result.confidence_score
2601
- )
2602
-
2603
-
2604
- class AnalyzeHealthcareEntitiesAction(DictMixin):
2605
- """AnalyzeHealthcareEntitiesAction encapsulates the parameters for starting a long-running
2606
- healthcare entities analysis operation.
2607
-
2608
- If you just want to analyze healthcare entities in a list of documents, and not perform multiple
2609
- long running actions on the input of documents, call method `begin_analyze_healthcare_entities` instead
2610
- of interfacing with this model.
2611
-
2612
- :keyword Optional[str] model_version: The model version to use for the analysis.
2613
- :keyword Optional[str] string_index_type: Specifies the method used to interpret string offsets.
2614
- `UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
2615
- you can also pass in `Utf16CodeUnit` or `TextElement_v8`. For additional information
2616
- see https://aka.ms/text-analytics-offsets
2617
- :keyword Optional[bool] disable_service_logs: If set to true, you opt-out of having your text input
2618
- logged on the service side for troubleshooting. By default, the Language service logs your
2619
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
2620
- the service's natural language processing functions. Setting this parameter to true,
2621
- disables input logging and may limit our ability to remediate issues that occur. Please see
2622
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
2623
- additional details, and Microsoft Responsible AI principles at
2624
- https://www.microsoft.com/ai/responsible-ai.
2625
- :keyword Optional[str] fhir_version: The FHIR Spec version that the result will use to format the fhir_bundle
2626
- on the result object. For additional information see https://www.hl7.org/fhir/overview.html.
2627
- The only acceptable values to pass in are None and "4.0.1". The default value is None.
2628
- :keyword document_type: Document type that can be provided as input for Fhir Documents. Expect to
2629
- have fhir_version provided when used. Behavior of using None enum is the same as not using the
2630
- document_type parameter. Known values are: "None", "ClinicalTrial", "DischargeSummary",
2631
- "ProgressNote", "HistoryAndPhysical", "Consult", "Imaging", "Pathology", and "ProcedureNote".
2632
- :paramtype document_type: Optional[str or ~azure.ai.textanalytics.HealthcareDocumentType]
2633
-
2634
- .. versionadded:: 2022-05-01
2635
- The *AnalyzeHealthcareEntitiesAction* model.
2636
- .. versionadded:: 2022-10-01-preview
2637
- The *fhir_version* and *document_type* keyword arguments.
2638
- """
2639
-
2640
- model_version: Optional[str] = None
2641
- """The model version to use for the analysis."""
2642
- string_index_type: Optional[str] = None
2643
- """Specifies the method used to interpret string offsets.
2644
- `UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
2645
- you can also pass in `Utf16CodeUnit` or `TextElement_v8`. For additional information
2646
- see https://aka.ms/text-analytics-offsets"""
2647
- disable_service_logs: Optional[bool] = None
2648
- """If set to true, you opt-out of having your text input
2649
- logged on the service side for troubleshooting. By default, the Language service logs your
2650
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
2651
- the service's natural language processing functions. Setting this parameter to true,
2652
- disables input logging and may limit our ability to remediate issues that occur. Please see
2653
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
2654
- additional details, and Microsoft Responsible AI principles at
2655
- https://www.microsoft.com/ai/responsible-ai."""
2656
- fhir_version: Optional[str] = None
2657
- """The FHIR Spec version that the result will use to format the fhir_bundle
2658
- on the result object. For additional information see https://www.hl7.org/fhir/overview.html.
2659
- The only acceptable values to pass in are None and "4.0.1". The default value is None."""
2660
- document_type: Optional[Union[str, HealthcareDocumentType]] = None
2661
- """Document type that can be provided as input for Fhir Documents. Expect to
2662
- have fhir_version provided when used. Behavior of using None enum is the same as not using the
2663
- document_type parameter. Known values are "None", "ClinicalTrial", "DischargeSummary",
2664
- "ProgressNote", "HistoryAndPhysical", "Consult", "Imaging", "Pathology", and "ProcedureNote"."""
2665
-
2666
- def __init__(
2667
- self,
2668
- *,
2669
- model_version: Optional[str] = None,
2670
- string_index_type: Optional[str] = None,
2671
- disable_service_logs: Optional[bool] = None,
2672
- fhir_version: Optional[str] = None,
2673
- document_type: Optional[Union[str, HealthcareDocumentType]] = None,
2674
- **kwargs: Any
2675
- ) -> None:
2676
- self.model_version = model_version
2677
- self.string_index_type: str = string_index_type if string_index_type is not None else STRING_INDEX_TYPE_DEFAULT
2678
- self.disable_service_logs = disable_service_logs
2679
- self.fhir_version = fhir_version
2680
- self.document_type = document_type
2681
-
2682
- def __repr__(self) -> str:
2683
- return (
2684
- f"AnalyzeHealthcareEntitiesAction(model_version={self.model_version}, "
2685
- f"string_index_type={self.string_index_type}, disable_service_logs={self.disable_service_logs}, "
2686
- f"fhir_version={self.fhir_version}, document_type={self.document_type})"[:1024]
2687
- )
2688
-
2689
- def _to_generated(self, api_version, task_id): # pylint: disable=unused-argument
2690
- return _v2022_10_01_preview_models.HealthcareLROTask(
2691
- task_name=task_id,
2692
- parameters=_v2022_10_01_preview_models.HealthcareTaskParameters(
2693
- model_version=self.model_version,
2694
- string_index_type=string_index_type_compatibility(self.string_index_type),
2695
- logging_opt_out=self.disable_service_logs,
2696
- fhir_version=self.fhir_version,
2697
- document_type=self.document_type,
2698
- )
2699
- )
2700
-
2701
-
2702
- class ExtractSummaryAction(DictMixin):
2703
- """ExtractSummaryAction encapsulates the parameters for starting a long-running Extractive Text
2704
- Summarization operation. For a conceptual discussion of extractive summarization, see the service documentation:
2705
- https://learn.microsoft.com/azure/cognitive-services/language-service/summarization/overview
2706
-
2707
- :keyword Optional[str] model_version: The model version to use for the analysis.
2708
- :keyword Optional[str] string_index_type: Specifies the method used to interpret string offsets.
2709
- `UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
2710
- you can also pass in `Utf16CodeUnit` or `TextElement_v8`. For additional information
2711
- see https://aka.ms/text-analytics-offsets
2712
- :keyword Optional[bool] disable_service_logs: If set to true, you opt-out of having your text input
2713
- logged on the service side for troubleshooting. By default, the Language service logs your
2714
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
2715
- the service's natural language processing functions. Setting this parameter to true,
2716
- disables input logging and may limit our ability to remediate issues that occur. Please see
2717
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
2718
- additional details, and Microsoft Responsible AI principles at
2719
- https://www.microsoft.com/ai/responsible-ai.
2720
- :keyword Optional[int] max_sentence_count: Maximum number of sentences to return. Defaults to 3.
2721
- :keyword Optional[str] order_by: Possible values include: "Offset", "Rank". Default value: "Offset".
2722
-
2723
- .. versionadded:: 2022-10-01-preview
2724
- The *ExtractSummaryAction* model.
2725
- """
2726
-
2727
- model_version: Optional[str] = None
2728
- """The model version to use for the analysis."""
2729
- string_index_type: Optional[str] = None
2730
- """Specifies the method used to interpret string offsets.
2731
- `UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
2732
- you can also pass in `Utf16CodeUnit` or `TextElement_v8`. For additional information
2733
- see https://aka.ms/text-analytics-offsets"""
2734
- disable_service_logs: Optional[bool] = None
2735
- """If set to true, you opt-out of having your text input
2736
- logged on the service side for troubleshooting. By default, the Language service logs your
2737
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
2738
- the service's natural language processing functions. Setting this parameter to true,
2739
- disables input logging and may limit our ability to remediate issues that occur. Please see
2740
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
2741
- additional details, and Microsoft Responsible AI principles at
2742
- https://www.microsoft.com/ai/responsible-ai."""
2743
- max_sentence_count: Optional[int] = None
2744
- """Number of sentences to return. Defaults to 3."""
2745
- order_by: Optional[str] = None
2746
- """Possible values include "Offset", "Rank". Default value is "Offset"."""
2747
-
2748
- def __init__(
2749
- self,
2750
- *,
2751
- model_version: Optional[str] = None,
2752
- string_index_type: Optional[str] = None,
2753
- disable_service_logs: Optional[bool] = None,
2754
- max_sentence_count: Optional[int] = None,
2755
- order_by: Optional[str] = None,
2756
- **kwargs: Any
2757
- ) -> None:
2758
- self.model_version = model_version
2759
- self.string_index_type: str = string_index_type if string_index_type is not None else STRING_INDEX_TYPE_DEFAULT
2760
- self.disable_service_logs = disable_service_logs
2761
- self.max_sentence_count = max_sentence_count
2762
- self.order_by = order_by
2763
-
2764
- def __repr__(self) -> str:
2765
- return (
2766
- f"ExtractSummaryAction(model_version={self.model_version}, "
2767
- f"string_index_type={self.string_index_type}, disable_service_logs={self.disable_service_logs}, "
2768
- f"max_sentence_count={self.max_sentence_count}, order_by={self.order_by})"[:1024]
2769
- )
2770
-
2771
- def _to_generated(self, api_version, task_id): # pylint: disable=unused-argument
2772
- return _v2022_10_01_preview_models.ExtractiveSummarizationLROTask( # pylint: disable=no-member
2773
- task_name=task_id,
2774
- parameters=_v2022_10_01_preview_models.ExtractiveSummarizationTaskParameters( # pylint: disable=no-member
2775
- model_version=self.model_version,
2776
- string_index_type=string_index_type_compatibility(self.string_index_type),
2777
- logging_opt_out=self.disable_service_logs,
2778
- sentence_count=self.max_sentence_count,
2779
- sort_by=self.order_by,
2780
- )
2781
- )
2782
-
2783
-
2784
- class ExtractSummaryResult(DictMixin):
2785
- """ExtractSummaryResult is a result object which contains
2786
- the extractive text summarization from a particular document.
2787
-
2788
- .. versionadded:: 2022-10-01-preview
2789
- The *ExtractSummaryResult* model.
2790
- """
2791
-
2792
- id: str # pylint: disable=redefined-builtin
2793
- """Unique, non-empty document identifier."""
2794
- sentences: List["SummarySentence"]
2795
- """A ranked list of sentences representing the extracted summary."""
2796
- warnings: List[TextAnalyticsWarning]
2797
- """Warnings encountered while processing document."""
2798
- statistics: Optional[TextDocumentStatistics] = None
2799
- """If `show_stats=True` was specified in the request this
2800
- field will contain information about the document payload."""
2801
- detected_language: Optional[DetectedLanguage] = None
2802
- """If 'language' is set to 'auto' for the document in the request this
2803
- field will contain the DetectedLanguage for the document."""
2804
- is_error: Literal[False] = False
2805
- """Boolean check for error item when iterating over list of
2806
- results. Always False for an instance of an ExtractSummaryResult."""
2807
- kind: Literal["ExtractiveSummarization"] = "ExtractiveSummarization"
2808
- """The text analysis kind - "ExtractiveSummarization"."""
2809
-
2810
- def __init__(self, **kwargs: Any) -> None:
2811
- self.id = kwargs.get("id", None)
2812
- self.sentences = kwargs.get("sentences", None)
2813
- self.warnings = kwargs.get("warnings", None)
2814
- self.statistics = kwargs.get("statistics", None)
2815
- self.detected_language = kwargs.get("detected_language", None)
2816
- self.is_error: Literal[False] = False
2817
- self.kind: Literal["ExtractiveSummarization"] = "ExtractiveSummarization"
2818
-
2819
- def __repr__(self) -> str:
2820
- return (
2821
- f"ExtractSummaryResult(id={self.id}, sentences={repr(self.sentences)}, "
2822
- f"warnings={repr(self.warnings)}, statistics={repr(self.statistics)}, "
2823
- f"detected_language={repr(self.detected_language)},"
2824
- f" is_error={self.is_error}, kind={self.kind})"[:1024]
2825
- )
2826
-
2827
- @classmethod
2828
- def _from_generated(cls, summary):
2829
- return cls(
2830
- id=summary.id,
2831
- sentences=[
2832
- SummarySentence._from_generated( # pylint: disable=protected-access
2833
- sentence
2834
- )
2835
- for sentence in summary.sentences
2836
- ],
2837
- warnings=[
2838
- TextAnalyticsWarning._from_generated( # pylint: disable=protected-access
2839
- w
2840
- )
2841
- for w in summary.warnings
2842
- ],
2843
- statistics=TextDocumentStatistics._from_generated( # pylint: disable=protected-access
2844
- summary.statistics
2845
- ),
2846
- detected_language=DetectedLanguage._from_generated( # pylint: disable=protected-access
2847
- summary.detected_language
2848
- ) if hasattr(summary, "detected_language") and summary.detected_language else None
2849
- )
2850
-
2851
-
2852
- class SummarySentence(DictMixin):
2853
- """Represents a single sentence from the extractive text summarization.
2854
-
2855
- .. versionadded:: 2022-10-01-preview
2856
- The *SummarySentence* model.
2857
- """
2858
-
2859
- text: str
2860
- """The extracted sentence text."""
2861
- rank_score: float
2862
- """A float value representing the relevance of the sentence within
2863
- the summary. Higher values indicate higher importance."""
2864
- offset: int
2865
- """The sentence offset from the start of the document.
2866
- The value depends on the value of the `string_index_type` parameter
2867
- set in the original request, which is UnicodeCodePoint by default."""
2868
- length: int
2869
- """The length of the sentence. This value depends on the value of the
2870
- `string_index_type` parameter set in the original request, which is UnicodeCodePoint
2871
- by default."""
2872
-
2873
- def __init__(self, **kwargs: Any) -> None:
2874
- self.text = kwargs.get("text", None)
2875
- self.rank_score = kwargs.get("rank_score", None)
2876
- self.offset = kwargs.get("offset", None)
2877
- self.length = kwargs.get("length", None)
2878
-
2879
- def __repr__(self) -> str:
2880
- return f"SummarySentence(text={self.text}, rank_score={self.rank_score}, " \
2881
- f"offset={self.offset}, length={self.length})"[:1024]
2882
-
2883
- @classmethod
2884
- def _from_generated(cls, sentence):
2885
- return cls(
2886
- text=sentence.text,
2887
- rank_score=sentence.rank_score,
2888
- offset=sentence.offset,
2889
- length=sentence.length,
2890
- )
2891
-
2892
-
2893
- class AbstractiveSummaryResult(DictMixin):
2894
- """AbstractiveSummaryResult is a result object which contains
2895
- the summary generated for a particular document.
2896
-
2897
- .. versionadded:: 2022-10-01-preview
2898
- The *AbstractiveSummaryResult* model.
2899
- """
2900
-
2901
- id: str # pylint: disable=redefined-builtin
2902
- """Unique, non-empty document identifier. Required."""
2903
- summaries: List["AbstractiveSummary"]
2904
- """A list of abstractive summaries. Required."""
2905
- warnings: List[TextAnalyticsWarning]
2906
- """Warnings encountered while processing document. Results will still be returned
2907
- if there are warnings, but they may not be fully accurate."""
2908
- detected_language: Optional[DetectedLanguage] = None
2909
- """If 'language' is set to 'auto' for the document in the request this
2910
- field will contain the DetectedLanguage for the document."""
2911
- statistics: Optional[TextDocumentStatistics] = None
2912
- """If `show_stats=True` was specified in the request this
2913
- field will contain information about the document payload."""
2914
- is_error: Literal[False] = False
2915
- """Boolean check for error item when iterating over list of
2916
- results. Always False for an instance of a AbstractiveSummaryResult."""
2917
- kind: Literal["AbstractiveSummarization"] = "AbstractiveSummarization"
2918
- """The text analysis kind - "AbstractiveSummarization"."""
2919
-
2920
- def __init__(self, **kwargs: Any) -> None:
2921
- self.id = kwargs.get("id", None)
2922
- self.detected_language = kwargs.get("detected_language", None)
2923
- self.warnings = kwargs.get("warnings", None)
2924
- self.statistics = kwargs.get("statistics", None)
2925
- self.summaries = kwargs.get("summaries", None)
2926
- self.is_error: Literal[False] = False
2927
- self.kind: Literal["AbstractiveSummarization"] = "AbstractiveSummarization"
2928
-
2929
- def __repr__(self) -> str:
2930
- return (
2931
- f"AbstractiveSummaryResult(id={self.id}, detected_language={repr(self.detected_language)}, "
2932
- f"warnings={repr(self.warnings)}, statistics={repr(self.statistics)}, "
2933
- f"summaries={repr(self.summaries)}, is_error={self.is_error}, kind={self.kind})"[:1024]
2934
- )
2935
-
2936
- @classmethod
2937
- def _from_generated(cls, result):
2938
- return cls(
2939
- id=result.id,
2940
- detected_language=DetectedLanguage._from_generated( # pylint: disable=protected-access
2941
- result.detected_language
2942
- ) if hasattr(result, "detected_language") and result.detected_language else None,
2943
- warnings=[
2944
- TextAnalyticsWarning._from_generated( # pylint: disable=protected-access
2945
- w
2946
- )
2947
- for w in result.warnings
2948
- ],
2949
- statistics=TextDocumentStatistics._from_generated( # pylint: disable=protected-access
2950
- result.statistics
2951
- ),
2952
- summaries=[
2953
- AbstractiveSummary._from_generated(summary) # pylint: disable=protected-access
2954
- for summary in result.summaries
2955
- ],
2956
- )
2957
-
2958
-
2959
- class AbstractiveSummary(DictMixin):
2960
- """An object representing a single summary with context for given document.
2961
-
2962
- .. versionadded:: 2022-10-01-preview
2963
- The *AbstractiveSummary* model.
2964
- """
2965
-
2966
- text: str
2967
- """The text of the summary. Required."""
2968
- contexts: List["SummaryContext"]
2969
- """The context list of the summary."""
2970
-
2971
- def __init__(self, **kwargs: Any) -> None:
2972
- self.text = kwargs.get("text", None)
2973
- self.contexts = kwargs.get("contexts", None)
2974
-
2975
- def __repr__(self) -> str:
2976
- return f"AbstractiveSummary(text={self.text}, contexts={repr(self.contexts)})"[:1024]
2977
-
2978
- @classmethod
2979
- def _from_generated(cls, result):
2980
- return cls(
2981
- text=result.text,
2982
- contexts=[
2983
- SummaryContext._from_generated(context) # pylint: disable=protected-access
2984
- for context in result.contexts
2985
- ] if result.contexts else []
2986
- )
2987
-
2988
-
2989
- class SummaryContext(DictMixin):
2990
- """The context of the summary.
2991
-
2992
- .. versionadded:: 2022-10-01-preview
2993
- The *SummaryContext* model.
2994
- """
2995
-
2996
- offset: int
2997
- """Start position for the context. Use of different 'string_index_type' values can
2998
- affect the offset returned. Required."""
2999
- length: int
3000
- """The length of the context. Use of different 'string_index_type' values can affect
3001
- the length returned. Required."""
3002
-
3003
- def __init__(self, **kwargs: Any) -> None:
3004
- self.offset = kwargs.get("offset", None)
3005
- self.length = kwargs.get("length", None)
3006
-
3007
- def __repr__(self) -> str:
3008
- return f"SummaryContext(offset={self.offset}, length={self.length})"[:1024]
3009
-
3010
- @classmethod
3011
- def _from_generated(cls, summary):
3012
- return cls(
3013
- offset=summary.offset,
3014
- length=summary.length
3015
- )
3016
-
3017
-
3018
- class AbstractiveSummaryAction(DictMixin):
3019
- """AbstractiveSummaryAction encapsulates the parameters for starting a long-running
3020
- abstractive summarization operation. For a conceptual discussion of extractive summarization,
3021
- see the service documentation:
3022
- https://learn.microsoft.com/azure/cognitive-services/language-service/summarization/overview
3023
-
3024
- Abstractive summarization generates a summary for the input documents. Abstractive summarization
3025
- is different from extractive summarization in that extractive summarization is the strategy of
3026
- concatenating extracted sentences from the input document into a summary, while abstractive
3027
- summarization involves paraphrasing the document using novel sentences.
3028
-
3029
- .. note:: The abstractive summarization feature is part of a gated preview. Request access here:
3030
- https://aka.ms/applyforgatedsummarizationfeatures
3031
-
3032
- :keyword Optional[int] sentence_count: It controls the approximate number of sentences in the output summaries.
3033
- :keyword Optional[str] model_version: The model version to use for the analysis.
3034
- :keyword Optional[str] string_index_type: Specifies the method used to interpret string offsets.
3035
- `UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
3036
- you can also pass in `Utf16CodeUnit` or `TextElement_v8`. For additional information
3037
- see https://aka.ms/text-analytics-offsets
3038
- :keyword Optional[bool] disable_service_logs: If set to true, you opt-out of having your text input
3039
- logged on the service side for troubleshooting. By default, the Language service logs your
3040
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
3041
- the service's natural language processing functions. Setting this parameter to true,
3042
- disables input logging and may limit our ability to remediate issues that occur. Please see
3043
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
3044
- additional details, and Microsoft Responsible AI principles at
3045
- https://www.microsoft.com/ai/responsible-ai.
3046
-
3047
- .. versionadded:: 2022-10-01-preview
3048
- The *AbstractiveSummaryAction* model.
3049
- """
3050
-
3051
- sentence_count: Optional[int] = None
3052
- """It controls the approximate number of sentences in the output summaries."""
3053
- model_version: Optional[str] = None
3054
- """The model version to use for the analysis."""
3055
- string_index_type: Optional[str] = None
3056
- """Specifies the method used to interpret string offsets.
3057
- `UnicodeCodePoint`, the Python encoding, is the default. To override the Python default,
3058
- you can also pass in `Utf16CodeUnit` or `TextElement_v8`. For additional information
3059
- see https://aka.ms/text-analytics-offsets"""
3060
- disable_service_logs: Optional[bool] = None
3061
- """If set to true, you opt-out of having your text input
3062
- logged on the service side for troubleshooting. By default, the Language service logs your
3063
- input text for 48 hours, solely to allow for troubleshooting issues in providing you with
3064
- the service's natural language processing functions. Setting this parameter to true,
3065
- disables input logging and may limit our ability to remediate issues that occur. Please see
3066
- Cognitive Services Compliance and Privacy notes at https://aka.ms/cs-compliance for
3067
- additional details, and Microsoft Responsible AI principles at
3068
- https://www.microsoft.com/ai/responsible-ai."""
3069
-
3070
- def __init__(
3071
- self,
3072
- *,
3073
- sentence_count: Optional[int] = None,
3074
- model_version: Optional[str] = None,
3075
- string_index_type: Optional[str] = None,
3076
- disable_service_logs: Optional[bool] = None,
3077
- **kwargs: Any
3078
- ) -> None:
3079
- self.sentence_count = sentence_count
3080
- self.model_version = model_version
3081
- self.string_index_type: str = string_index_type if string_index_type is not None else STRING_INDEX_TYPE_DEFAULT
3082
- self.disable_service_logs = disable_service_logs
3083
-
3084
- def __repr__(self) -> str:
3085
- return (
3086
- f"AbstractiveSummaryAction(model_version={self.model_version}, "
3087
- f"string_index_type={self.string_index_type}, disable_service_logs={self.disable_service_logs}, "
3088
- f"sentence_count={self.sentence_count})"[:1024]
3089
- )
3090
-
3091
- def _to_generated(self, api_version, task_id): # pylint: disable=unused-argument
3092
- return _v2022_10_01_preview_models.AbstractiveSummarizationLROTask(
3093
- task_name=task_id,
3094
- parameters=_v2022_10_01_preview_models.AbstractiveSummarizationTaskParameters(
3095
- model_version=self.model_version,
3096
- string_index_type=string_index_type_compatibility(self.string_index_type),
3097
- logging_opt_out=self.disable_service_logs,
3098
- sentence_count=self.sentence_count,
3099
- )
3100
- )
3101
-
3102
-
3103
- class DynamicClassificationResult(DictMixin):
3104
- """DynamicClassificationResult is a result object which contains
3105
- the classifications for a particular document.
3106
-
3107
- .. versionadded:: 2022-10-01-preview
3108
- The *DynamicClassificationResult* model.
3109
- """
3110
-
3111
- id: str # pylint: disable=redefined-builtin
3112
- """Unique, non-empty document identifier."""
3113
- classifications: List[ClassificationCategory]
3114
- """Recognized classification results in the document."""
3115
- warnings: List[TextAnalyticsWarning]
3116
- """Warnings encountered while processing document."""
3117
- statistics: Optional[TextDocumentStatistics] = None
3118
- """If `show_stats=True` was specified in the request this
3119
- field will contain information about the document payload."""
3120
- is_error: Literal[False] = False
3121
- """Boolean check for error item when iterating over list of
3122
- results. Always False for an instance of a DynamicClassificationResult."""
3123
- kind: Literal["DynamicClassification"] = "DynamicClassification"
3124
- """The text analysis kind - "DynamicClassification"."""
3125
-
3126
- def __init__(self, **kwargs: Any) -> None:
3127
- self.id = kwargs.get('id', None)
3128
- self.classifications = kwargs.get('classifications', None)
3129
- self.warnings = kwargs.get('warnings', [])
3130
- self.statistics = kwargs.get('statistics', None)
3131
- self.is_error: Literal[False] = False
3132
- self.kind: Literal["DynamicClassification"] = "DynamicClassification"
3133
-
3134
- def __repr__(self) -> str:
3135
- return (
3136
- f"DynamicClassificationResult(id={self.id}, classifications={repr(self.classifications)}, "
3137
- f"warnings={repr(self.warnings)}, statistics={repr(self.statistics)}, "
3138
- f"is_error={self.is_error}, kind={self.kind})"[:1024]
3139
- )
3140
-
3141
- @classmethod
3142
- def _from_generated(cls, result):
3143
- return cls(
3144
- id=result.id,
3145
- classifications=[
3146
- ClassificationCategory._from_generated(c) # pylint: disable=protected-access
3147
- for c in result.classifications
3148
- ],
3149
- warnings=[
3150
- TextAnalyticsWarning._from_generated( # pylint: disable=protected-access
3151
- w
3152
- )
3153
- for w in result.warnings
3154
- ],
3155
- statistics=TextDocumentStatistics._from_generated( # pylint: disable=protected-access
3156
- result.statistics
3157
- ),
3158
- )