azure_cognitiveservices_computervision 0.20.0 → 0.20.1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (57) hide show
  1. checksums.yaml +5 -5
  2. data/lib/1.0/generated/azure_cognitiveservices_computervision/computer_vision_client.rb +1 -1
  3. data/lib/2.0/generated/azure_cognitiveservices_computervision/computer_vision_client.rb +1 -1
  4. data/lib/2.1/generated/azure_cognitiveservices_computervision.rb +74 -0
  5. data/lib/2.1/generated/azure_cognitiveservices_computervision/computer_vision_client.rb +3253 -0
  6. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/adult_info.rb +105 -0
  7. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/area_of_interest_result.rb +72 -0
  8. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/bounding_rect.rb +83 -0
  9. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/category.rb +69 -0
  10. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/category_detail.rb +77 -0
  11. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/celebrities_model.rb +70 -0
  12. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/celebrity_results.rb +79 -0
  13. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/color_info.rb +98 -0
  14. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/computer_vision_error.rb +69 -0
  15. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/description_exclude.rb +16 -0
  16. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/details.rb +16 -0
  17. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/detect_result.rb +79 -0
  18. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/detected_brand.rb +73 -0
  19. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/detected_object.rb +86 -0
  20. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/domain_model_results.rb +70 -0
  21. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/face_description.rb +72 -0
  22. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/face_rectangle.rb +83 -0
  23. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/gender.rb +16 -0
  24. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/image_analysis.rb +212 -0
  25. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/image_caption.rb +57 -0
  26. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/image_description.rb +99 -0
  27. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/image_description_details.rb +76 -0
  28. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/image_metadata.rb +68 -0
  29. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/image_tag.rb +68 -0
  30. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/image_type.rb +57 -0
  31. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/image_url.rb +47 -0
  32. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/landmark_results.rb +79 -0
  33. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/landmarks_model.rb +58 -0
  34. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/line.rb +85 -0
  35. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/list_models_result.rb +56 -0
  36. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/model_description.rb +65 -0
  37. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/object_hierarchy.rb +73 -0
  38. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/ocr_languages.rb +41 -0
  39. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/ocr_line.rb +72 -0
  40. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/ocr_region.rb +72 -0
  41. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/ocr_result.rb +105 -0
  42. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/ocr_word.rb +62 -0
  43. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/read_operation_result.rb +69 -0
  44. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/tag_result.rb +79 -0
  45. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/text_operation_result.rb +61 -0
  46. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/text_operation_status_codes.rb +18 -0
  47. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/text_recognition_mode.rb +16 -0
  48. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/text_recognition_result.rb +114 -0
  49. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/text_recognition_result_confidence_class.rb +16 -0
  50. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/text_recognition_result_dimension_unit.rb +16 -0
  51. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/visual_feature_types.rb +23 -0
  52. data/lib/2.1/generated/azure_cognitiveservices_computervision/models/word.rb +78 -0
  53. data/lib/2.1/generated/azure_cognitiveservices_computervision/module_definition.rb +9 -0
  54. data/lib/azure_cognitiveservices_computervision.rb +1 -0
  55. data/lib/profiles/latest/modules/computervision_profile_module.rb +97 -93
  56. data/lib/version.rb +1 -1
  57. metadata +53 -3
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
- SHA1:
3
- metadata.gz: 7ad18653bcb5b979aab2adc9c10d8ed0b28ccbbc
4
- data.tar.gz: 9a1ce765f9e2059af6fe451657564580d72d44ec
2
+ SHA256:
3
+ metadata.gz: 11d43e350180a2474564f4dea8cbe8454c1852ac1575384d69f641c1decc41fe
4
+ data.tar.gz: 45e9fd0b15667ffe337af34e95650f6ebc479e63906076c7b94e61e45491e06c
5
5
  SHA512:
6
- metadata.gz: bcf66bd477a2ac858bb6fb280f8ad7cb824b0b10dafe2b2be541053b3fe3dbccd18273400c322b5e289033f36ebe8b0b84add1514f37bfc5382edc539eed7c1d
7
- data.tar.gz: a7bb5ecd4702f519836d9e9d3ad631fec763295e9bc08f3be7c8b995a62469a7c1549a9f198b90b5b536a989f45621e961c9f2a29b9eaab24c1d9b7fd6f3ea8f
6
+ metadata.gz: fd83734b21118194db51ef0cae12528864032730b92abc96f6c2b31221f9e4b61bf1cff8908139cbcdf42343f228197875dbbf23c9afafd60933bdeb6207aa8c
7
+ data.tar.gz: 50b63a3638e050ff10a041aa31366c523355b3cc7d6460326bb21a7154b5953048689d9f9dd868161c683fa357531442e80fc250f17ef73c8f44c2d36f3879c2
@@ -2286,7 +2286,7 @@ module Azure::CognitiveServices::ComputerVision::V1_0
2286
2286
  #
2287
2287
  def add_telemetry
2288
2288
  sdk_information = 'azure_cognitiveservices_computervision'
2289
- sdk_information = "#{sdk_information}/0.20.0"
2289
+ sdk_information = "#{sdk_information}/0.20.1"
2290
2290
  add_user_agent_information(sdk_information)
2291
2291
  end
2292
2292
  end
@@ -3210,7 +3210,7 @@ module Azure::CognitiveServices::ComputerVision::V2_0
3210
3210
  #
3211
3211
  def add_telemetry
3212
3212
  sdk_information = 'azure_cognitiveservices_computervision'
3213
- sdk_information = "#{sdk_information}/0.20.0"
3213
+ sdk_information = "#{sdk_information}/0.20.1"
3214
3214
  add_user_agent_information(sdk_information)
3215
3215
  end
3216
3216
  end
@@ -0,0 +1,74 @@
1
+ # encoding: utf-8
2
+ # Code generated by Microsoft (R) AutoRest Code Generator.
3
+ # Changes may cause incorrect behavior and will be lost if the code is
4
+ # regenerated.
5
+
6
+ require 'uri'
7
+ require 'cgi'
8
+ require 'date'
9
+ require 'json'
10
+ require 'base64'
11
+ require 'erb'
12
+ require 'securerandom'
13
+ require 'time'
14
+ require 'timeliness'
15
+ require 'faraday'
16
+ require 'faraday-cookie_jar'
17
+ require 'concurrent'
18
+ require 'ms_rest'
19
+ require '2.1/generated/azure_cognitiveservices_computervision/module_definition'
20
+ require 'ms_rest_azure'
21
+
22
+ module Azure::CognitiveServices::ComputerVision::V2_1
23
+ autoload :ComputerVisionClient, '2.1/generated/azure_cognitiveservices_computervision/computer_vision_client.rb'
24
+
25
+ module Models
26
+ autoload :DetectResult, '2.1/generated/azure_cognitiveservices_computervision/models/detect_result.rb'
27
+ autoload :FaceRectangle, '2.1/generated/azure_cognitiveservices_computervision/models/face_rectangle.rb'
28
+ autoload :ModelDescription, '2.1/generated/azure_cognitiveservices_computervision/models/model_description.rb'
29
+ autoload :LandmarksModel, '2.1/generated/azure_cognitiveservices_computervision/models/landmarks_model.rb'
30
+ autoload :ListModelsResult, '2.1/generated/azure_cognitiveservices_computervision/models/list_models_result.rb'
31
+ autoload :Category, '2.1/generated/azure_cognitiveservices_computervision/models/category.rb'
32
+ autoload :DomainModelResults, '2.1/generated/azure_cognitiveservices_computervision/models/domain_model_results.rb'
33
+ autoload :ColorInfo, '2.1/generated/azure_cognitiveservices_computervision/models/color_info.rb'
34
+ autoload :OcrWord, '2.1/generated/azure_cognitiveservices_computervision/models/ocr_word.rb'
35
+ autoload :ImageTag, '2.1/generated/azure_cognitiveservices_computervision/models/image_tag.rb'
36
+ autoload :OcrLine, '2.1/generated/azure_cognitiveservices_computervision/models/ocr_line.rb'
37
+ autoload :ImageDescriptionDetails, '2.1/generated/azure_cognitiveservices_computervision/models/image_description_details.rb'
38
+ autoload :OcrRegion, '2.1/generated/azure_cognitiveservices_computervision/models/ocr_region.rb'
39
+ autoload :BoundingRect, '2.1/generated/azure_cognitiveservices_computervision/models/bounding_rect.rb'
40
+ autoload :OcrResult, '2.1/generated/azure_cognitiveservices_computervision/models/ocr_result.rb'
41
+ autoload :DetectedObject, '2.1/generated/azure_cognitiveservices_computervision/models/detected_object.rb'
42
+ autoload :TagResult, '2.1/generated/azure_cognitiveservices_computervision/models/tag_result.rb'
43
+ autoload :ImageMetadata, '2.1/generated/azure_cognitiveservices_computervision/models/image_metadata.rb'
44
+ autoload :AreaOfInterestResult, '2.1/generated/azure_cognitiveservices_computervision/models/area_of_interest_result.rb'
45
+ autoload :ImageDescription, '2.1/generated/azure_cognitiveservices_computervision/models/image_description.rb'
46
+ autoload :ImageUrl, '2.1/generated/azure_cognitiveservices_computervision/models/image_url.rb'
47
+ autoload :CategoryDetail, '2.1/generated/azure_cognitiveservices_computervision/models/category_detail.rb'
48
+ autoload :ComputerVisionError, '2.1/generated/azure_cognitiveservices_computervision/models/computer_vision_error.rb'
49
+ autoload :ImageType, '2.1/generated/azure_cognitiveservices_computervision/models/image_type.rb'
50
+ autoload :LandmarkResults, '2.1/generated/azure_cognitiveservices_computervision/models/landmark_results.rb'
51
+ autoload :FaceDescription, '2.1/generated/azure_cognitiveservices_computervision/models/face_description.rb'
52
+ autoload :CelebrityResults, '2.1/generated/azure_cognitiveservices_computervision/models/celebrity_results.rb'
53
+ autoload :DetectedBrand, '2.1/generated/azure_cognitiveservices_computervision/models/detected_brand.rb'
54
+ autoload :Word, '2.1/generated/azure_cognitiveservices_computervision/models/word.rb'
55
+ autoload :CelebritiesModel, '2.1/generated/azure_cognitiveservices_computervision/models/celebrities_model.rb'
56
+ autoload :Line, '2.1/generated/azure_cognitiveservices_computervision/models/line.rb'
57
+ autoload :ImageCaption, '2.1/generated/azure_cognitiveservices_computervision/models/image_caption.rb'
58
+ autoload :TextRecognitionResult, '2.1/generated/azure_cognitiveservices_computervision/models/text_recognition_result.rb'
59
+ autoload :ImageAnalysis, '2.1/generated/azure_cognitiveservices_computervision/models/image_analysis.rb'
60
+ autoload :TextOperationResult, '2.1/generated/azure_cognitiveservices_computervision/models/text_operation_result.rb'
61
+ autoload :ObjectHierarchy, '2.1/generated/azure_cognitiveservices_computervision/models/object_hierarchy.rb'
62
+ autoload :ReadOperationResult, '2.1/generated/azure_cognitiveservices_computervision/models/read_operation_result.rb'
63
+ autoload :AdultInfo, '2.1/generated/azure_cognitiveservices_computervision/models/adult_info.rb'
64
+ autoload :Gender, '2.1/generated/azure_cognitiveservices_computervision/models/gender.rb'
65
+ autoload :TextOperationStatusCodes, '2.1/generated/azure_cognitiveservices_computervision/models/text_operation_status_codes.rb'
66
+ autoload :TextRecognitionResultDimensionUnit, '2.1/generated/azure_cognitiveservices_computervision/models/text_recognition_result_dimension_unit.rb'
67
+ autoload :TextRecognitionResultConfidenceClass, '2.1/generated/azure_cognitiveservices_computervision/models/text_recognition_result_confidence_class.rb'
68
+ autoload :DescriptionExclude, '2.1/generated/azure_cognitiveservices_computervision/models/description_exclude.rb'
69
+ autoload :OcrLanguages, '2.1/generated/azure_cognitiveservices_computervision/models/ocr_languages.rb'
70
+ autoload :VisualFeatureTypes, '2.1/generated/azure_cognitiveservices_computervision/models/visual_feature_types.rb'
71
+ autoload :TextRecognitionMode, '2.1/generated/azure_cognitiveservices_computervision/models/text_recognition_mode.rb'
72
+ autoload :Details, '2.1/generated/azure_cognitiveservices_computervision/models/details.rb'
73
+ end
74
+ end
@@ -0,0 +1,3253 @@
1
+ # encoding: utf-8
2
+ # Code generated by Microsoft (R) AutoRest Code Generator.
3
+ # Changes may cause incorrect behavior and will be lost if the code is
4
+ # regenerated.
5
+
6
+ module Azure::CognitiveServices::ComputerVision::V2_1
7
+ #
8
+ # A service client - single point of access to the REST API.
9
+ #
10
+ class ComputerVisionClient < MsRestAzure::AzureServiceClient
11
+ include MsRestAzure
12
+ include MsRestAzure::Serialization
13
+
14
+ # @return [String] the base URI of the service.
15
+ attr_reader :base_url
16
+
17
+ # @return Credentials needed for the client to connect to Azure.
18
+ attr_reader :credentials1
19
+
20
+ # @return [String] Supported Cognitive Services endpoints.
21
+ attr_accessor :endpoint
22
+
23
+ # @return Subscription credentials which uniquely identify client
24
+ # subscription.
25
+ attr_accessor :credentials
26
+
27
+ # @return [String] The preferred language for the response.
28
+ attr_accessor :accept_language
29
+
30
+ # @return [Integer] The retry timeout in seconds for Long Running
31
+ # Operations. Default value is 30.
32
+ attr_accessor :long_running_operation_retry_timeout
33
+
34
+ # @return [Boolean] Whether a unique x-ms-client-request-id should be
35
+ # generated. When set to true a unique x-ms-client-request-id value is
36
+ # generated and included in each request. Default is true.
37
+ attr_accessor :generate_client_request_id
38
+
39
+ #
40
+ # Creates initializes a new instance of the ComputerVisionClient class.
41
+ # @param credentials [MsRest::ServiceClientCredentials] credentials to authorize HTTP requests made by the service client.
42
+ # @param options [Array] filters to be applied to the HTTP requests.
43
+ #
44
+ def initialize(credentials = nil, options = nil)
45
+ super(credentials, options)
46
+ @base_url = '{Endpoint}/vision/v2.1'
47
+
48
+ fail ArgumentError, 'invalid type of credentials input parameter' unless credentials.is_a?(MsRest::ServiceClientCredentials) unless credentials.nil?
49
+ @credentials = credentials
50
+
51
+ @accept_language = 'en-US'
52
+ @long_running_operation_retry_timeout = 30
53
+ @generate_client_request_id = true
54
+ add_telemetry
55
+ end
56
+
57
+ #
58
+ # Makes a request and returns the body of the response.
59
+ # @param method [Symbol] with any of the following values :get, :put, :post, :patch, :delete.
60
+ # @param path [String] the path, relative to {base_url}.
61
+ # @param options [Hash{String=>String}] specifying any request options like :body.
62
+ # @return [Hash{String=>String}] containing the body of the response.
63
+ # Example:
64
+ #
65
+ # request_content = "{'location':'westus','tags':{'tag1':'val1','tag2':'val2'}}"
66
+ # path = "/path"
67
+ # options = {
68
+ # body: request_content,
69
+ # query_params: {'api-version' => '2016-02-01'}
70
+ # }
71
+ # result = @client.make_request(:put, path, options)
72
+ #
73
+ def make_request(method, path, options = {})
74
+ result = make_request_with_http_info(method, path, options)
75
+ result.body unless result.nil?
76
+ end
77
+
78
+ #
79
+ # Makes a request and returns the operation response.
80
+ # @param method [Symbol] with any of the following values :get, :put, :post, :patch, :delete.
81
+ # @param path [String] the path, relative to {base_url}.
82
+ # @param options [Hash{String=>String}] specifying any request options like :body.
83
+ # @return [MsRestAzure::AzureOperationResponse] Operation response containing the request, response and status.
84
+ #
85
+ def make_request_with_http_info(method, path, options = {})
86
+ result = make_request_async(method, path, options).value!
87
+ result.body = result.response.body.to_s.empty? ? nil : JSON.load(result.response.body)
88
+ result
89
+ end
90
+
91
+ #
92
+ # Makes a request asynchronously.
93
+ # @param method [Symbol] with any of the following values :get, :put, :post, :patch, :delete.
94
+ # @param path [String] the path, relative to {base_url}.
95
+ # @param options [Hash{String=>String}] specifying any request options like :body.
96
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
97
+ #
98
+ def make_request_async(method, path, options = {})
99
+ fail ArgumentError, 'method is nil' if method.nil?
100
+ fail ArgumentError, 'path is nil' if path.nil?
101
+
102
+ request_url = options[:base_url] || @base_url
103
+ if(!options[:headers].nil? && !options[:headers]['Content-Type'].nil?)
104
+ @request_headers['Content-Type'] = options[:headers]['Content-Type']
105
+ end
106
+
107
+ request_headers = @request_headers
108
+ request_headers.merge!({'accept-language' => @accept_language}) unless @accept_language.nil?
109
+ options.merge!({headers: request_headers.merge(options[:headers] || {})})
110
+ options.merge!({credentials: @credentials}) unless @credentials.nil?
111
+
112
+ super(request_url, method, path, options)
113
+ end
114
+
115
+ #
116
+ # This operation extracts a rich set of visual features based on the image
117
+ # content.
118
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
119
+ # an image URL. Within your request, there is an optional parameter to allow
120
+ # you to choose which features to return. By default, image categories are
121
+ # returned in the response.
122
+ # A successful response will be returned in JSON. If the request failed, the
123
+ # response will contain an error code and a message to help understand what
124
+ # went wrong.
125
+ #
126
+ # @param url [String] Publicly reachable URL of an image.
127
+ # @param visual_features [Array<VisualFeatureTypes>] A string indicating what
128
+ # visual feature types to return. Multiple values should be comma-separated.
129
+ # Valid visual feature types include: Categories - categorizes image content
130
+ # according to a taxonomy defined in documentation. Tags - tags the image with
131
+ # a detailed list of words related to the image content. Description -
132
+ # describes the image content with a complete English sentence. Faces - detects
133
+ # if faces are present. If present, generate coordinates, gender and age.
134
+ # ImageType - detects if image is clipart or a line drawing. Color - determines
135
+ # the accent color, dominant color, and whether an image is black&white. Adult
136
+ # - detects if the image is pornographic in nature (depicts nudity or a sex
137
+ # act), or is gory (depicts extreme violence or blood). Sexually suggestive
138
+ # content (aka racy content) is also detected. Objects - detects various
139
+ # objects within an image, including the approximate location. The Objects
140
+ # argument is only available in English. Brands - detects various brands within
141
+ # an image, including the approximate location. The Brands argument is only
142
+ # available in English.
143
+ # @param details [Array<Details>] A string indicating which domain-specific
144
+ # details to return. Multiple values should be comma-separated. Valid visual
145
+ # feature types include: Celebrities - identifies celebrities if detected in
146
+ # the image, Landmarks - identifies notable landmarks in the image.
147
+ # @param language [Enum] The desired language for output generation. If this
148
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
149
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
150
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
151
+ # 'ja', 'pt', 'zh'
152
+ # @param description_exclude [Array<DescriptionExclude>] Turn off specified
153
+ # domain models when generating the description.
154
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
155
+ # will be added to the HTTP request.
156
+ #
157
+ # @return [ImageAnalysis] operation results.
158
+ #
159
+ def analyze_image(url, visual_features:nil, details:nil, language:nil, description_exclude:nil, custom_headers:nil)
160
+ response = analyze_image_async(url, visual_features:visual_features, details:details, language:language, description_exclude:description_exclude, custom_headers:custom_headers).value!
161
+ response.body unless response.nil?
162
+ end
163
+
164
+ #
165
+ # This operation extracts a rich set of visual features based on the image
166
+ # content.
167
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
168
+ # an image URL. Within your request, there is an optional parameter to allow
169
+ # you to choose which features to return. By default, image categories are
170
+ # returned in the response.
171
+ # A successful response will be returned in JSON. If the request failed, the
172
+ # response will contain an error code and a message to help understand what
173
+ # went wrong.
174
+ #
175
+ # @param url [String] Publicly reachable URL of an image.
176
+ # @param visual_features [Array<VisualFeatureTypes>] A string indicating what
177
+ # visual feature types to return. Multiple values should be comma-separated.
178
+ # Valid visual feature types include: Categories - categorizes image content
179
+ # according to a taxonomy defined in documentation. Tags - tags the image with
180
+ # a detailed list of words related to the image content. Description -
181
+ # describes the image content with a complete English sentence. Faces - detects
182
+ # if faces are present. If present, generate coordinates, gender and age.
183
+ # ImageType - detects if image is clipart or a line drawing. Color - determines
184
+ # the accent color, dominant color, and whether an image is black&white. Adult
185
+ # - detects if the image is pornographic in nature (depicts nudity or a sex
186
+ # act), or is gory (depicts extreme violence or blood). Sexually suggestive
187
+ # content (aka racy content) is also detected. Objects - detects various
188
+ # objects within an image, including the approximate location. The Objects
189
+ # argument is only available in English. Brands - detects various brands within
190
+ # an image, including the approximate location. The Brands argument is only
191
+ # available in English.
192
+ # @param details [Array<Details>] A string indicating which domain-specific
193
+ # details to return. Multiple values should be comma-separated. Valid visual
194
+ # feature types include: Celebrities - identifies celebrities if detected in
195
+ # the image, Landmarks - identifies notable landmarks in the image.
196
+ # @param language [Enum] The desired language for output generation. If this
197
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
198
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
199
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
200
+ # 'ja', 'pt', 'zh'
201
+ # @param description_exclude [Array<DescriptionExclude>] Turn off specified
202
+ # domain models when generating the description.
203
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
204
+ # will be added to the HTTP request.
205
+ #
206
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
207
+ #
208
+ def analyze_image_with_http_info(url, visual_features:nil, details:nil, language:nil, description_exclude:nil, custom_headers:nil)
209
+ analyze_image_async(url, visual_features:visual_features, details:details, language:language, description_exclude:description_exclude, custom_headers:custom_headers).value!
210
+ end
211
+
212
+ #
213
+ # This operation extracts a rich set of visual features based on the image
214
+ # content.
215
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
216
+ # an image URL. Within your request, there is an optional parameter to allow
217
+ # you to choose which features to return. By default, image categories are
218
+ # returned in the response.
219
+ # A successful response will be returned in JSON. If the request failed, the
220
+ # response will contain an error code and a message to help understand what
221
+ # went wrong.
222
+ #
223
+ # @param url [String] Publicly reachable URL of an image.
224
+ # @param visual_features [Array<VisualFeatureTypes>] A string indicating what
225
+ # visual feature types to return. Multiple values should be comma-separated.
226
+ # Valid visual feature types include: Categories - categorizes image content
227
+ # according to a taxonomy defined in documentation. Tags - tags the image with
228
+ # a detailed list of words related to the image content. Description -
229
+ # describes the image content with a complete English sentence. Faces - detects
230
+ # if faces are present. If present, generate coordinates, gender and age.
231
+ # ImageType - detects if image is clipart or a line drawing. Color - determines
232
+ # the accent color, dominant color, and whether an image is black&white. Adult
233
+ # - detects if the image is pornographic in nature (depicts nudity or a sex
234
+ # act), or is gory (depicts extreme violence or blood). Sexually suggestive
235
+ # content (aka racy content) is also detected. Objects - detects various
236
+ # objects within an image, including the approximate location. The Objects
237
+ # argument is only available in English. Brands - detects various brands within
238
+ # an image, including the approximate location. The Brands argument is only
239
+ # available in English.
240
+ # @param details [Array<Details>] A string indicating which domain-specific
241
+ # details to return. Multiple values should be comma-separated. Valid visual
242
+ # feature types include: Celebrities - identifies celebrities if detected in
243
+ # the image, Landmarks - identifies notable landmarks in the image.
244
+ # @param language [Enum] The desired language for output generation. If this
245
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
246
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
247
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
248
+ # 'ja', 'pt', 'zh'
249
+ # @param description_exclude [Array<DescriptionExclude>] Turn off specified
250
+ # domain models when generating the description.
251
+ # @param [Hash{String => String}] A hash of custom headers that will be added
252
+ # to the HTTP request.
253
+ #
254
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
255
+ #
256
+ def analyze_image_async(url, visual_features:nil, details:nil, language:nil, description_exclude:nil, custom_headers:nil)
257
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
258
+ fail ArgumentError, 'url is nil' if url.nil?
259
+
260
+ image_url = ImageUrl.new
261
+ unless url.nil?
262
+ image_url.url = url
263
+ end
264
+
265
+ request_headers = {}
266
+ request_headers['Content-Type'] = 'application/json; charset=utf-8'
267
+
268
+ # Set Headers
269
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
270
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
271
+
272
+ # Serialize Request
273
+ request_mapper = Azure::CognitiveServices::ComputerVision::V2_1::Models::ImageUrl.mapper()
274
+ request_content = self.serialize(request_mapper, image_url)
275
+ request_content = request_content != nil ? JSON.generate(request_content, quirks_mode: true) : nil
276
+
277
+ path_template = 'analyze'
278
+
279
+ request_url = @base_url || self.base_url
280
+ request_url = request_url.gsub('{Endpoint}', endpoint)
281
+
282
+ options = {
283
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
284
+ query_params: {'visualFeatures' => visual_features.nil? ? nil : visual_features.join(','),'details' => details.nil? ? nil : details.join(','),'language' => language,'descriptionExclude' => description_exclude.nil? ? nil : description_exclude.join(',')},
285
+ body: request_content,
286
+ headers: request_headers.merge(custom_headers || {}),
287
+ base_url: request_url
288
+ }
289
+ promise = self.make_request_async(:post, path_template, options)
290
+
291
+ promise = promise.then do |result|
292
+ http_response = result.response
293
+ status_code = http_response.status
294
+ response_content = http_response.body
295
+ unless status_code == 200
296
+ error_model = JSON.load(response_content)
297
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
298
+ end
299
+
300
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
301
+ result.correlation_request_id = http_response['x-ms-correlation-request-id'] unless http_response['x-ms-correlation-request-id'].nil?
302
+ result.client_request_id = http_response['x-ms-client-request-id'] unless http_response['x-ms-client-request-id'].nil?
303
+ # Deserialize Response
304
+ if status_code == 200
305
+ begin
306
+ parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
307
+ result_mapper = Azure::CognitiveServices::ComputerVision::V2_1::Models::ImageAnalysis.mapper()
308
+ result.body = self.deserialize(result_mapper, parsed_response)
309
+ rescue Exception => e
310
+ fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
311
+ end
312
+ end
313
+
314
+ result
315
+ end
316
+
317
+ promise.execute
318
+ end
319
+
320
+ #
321
+ # This operation generates a description of an image in human readable language
322
+ # with complete sentences. The description is based on a collection of content
323
+ # tags, which are also returned by the operation. More than one description can
324
+ # be generated for each image. Descriptions are ordered by their confidence
325
+ # score. Descriptions may include results from celebrity and landmark domain
326
+ # models, if applicable.
327
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
328
+ # an image URL.
329
+ # A successful response will be returned in JSON. If the request failed, the
330
+ # response will contain an error code and a message to help understand what
331
+ # went wrong.
332
+ #
333
+ # @param url [String] Publicly reachable URL of an image.
334
+ # @param max_candidates [Integer] Maximum number of candidate descriptions to
335
+ # be returned. The default is 1.
336
+ # @param language [Enum] The desired language for output generation. If this
337
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
338
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
339
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
340
+ # 'ja', 'pt', 'zh'
341
+ # @param description_exclude [Array<DescriptionExclude>] Turn off specified
342
+ # domain models when generating the description.
343
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
344
+ # will be added to the HTTP request.
345
+ #
346
+ # @return [ImageDescription] operation results.
347
+ #
348
+ def describe_image(url, max_candidates:1, language:nil, description_exclude:nil, custom_headers:nil)
349
+ response = describe_image_async(url, max_candidates:max_candidates, language:language, description_exclude:description_exclude, custom_headers:custom_headers).value!
350
+ response.body unless response.nil?
351
+ end
352
+
353
+ #
354
+ # This operation generates a description of an image in human readable language
355
+ # with complete sentences. The description is based on a collection of content
356
+ # tags, which are also returned by the operation. More than one description can
357
+ # be generated for each image. Descriptions are ordered by their confidence
358
+ # score. Descriptions may include results from celebrity and landmark domain
359
+ # models, if applicable.
360
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
361
+ # an image URL.
362
+ # A successful response will be returned in JSON. If the request failed, the
363
+ # response will contain an error code and a message to help understand what
364
+ # went wrong.
365
+ #
366
+ # @param url [String] Publicly reachable URL of an image.
367
+ # @param max_candidates [Integer] Maximum number of candidate descriptions to
368
+ # be returned. The default is 1.
369
+ # @param language [Enum] The desired language for output generation. If this
370
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
371
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
372
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
373
+ # 'ja', 'pt', 'zh'
374
+ # @param description_exclude [Array<DescriptionExclude>] Turn off specified
375
+ # domain models when generating the description.
376
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
377
+ # will be added to the HTTP request.
378
+ #
379
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
380
+ #
381
+ def describe_image_with_http_info(url, max_candidates:1, language:nil, description_exclude:nil, custom_headers:nil)
382
+ describe_image_async(url, max_candidates:max_candidates, language:language, description_exclude:description_exclude, custom_headers:custom_headers).value!
383
+ end
384
+
385
+ #
386
+ # This operation generates a description of an image in human readable language
387
+ # with complete sentences. The description is based on a collection of content
388
+ # tags, which are also returned by the operation. More than one description can
389
+ # be generated for each image. Descriptions are ordered by their confidence
390
+ # score. Descriptions may include results from celebrity and landmark domain
391
+ # models, if applicable.
392
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
393
+ # an image URL.
394
+ # A successful response will be returned in JSON. If the request failed, the
395
+ # response will contain an error code and a message to help understand what
396
+ # went wrong.
397
+ #
398
+ # @param url [String] Publicly reachable URL of an image.
399
+ # @param max_candidates [Integer] Maximum number of candidate descriptions to
400
+ # be returned. The default is 1.
401
+ # @param language [Enum] The desired language for output generation. If this
402
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
403
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
404
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
405
+ # 'ja', 'pt', 'zh'
406
+ # @param description_exclude [Array<DescriptionExclude>] Turn off specified
407
+ # domain models when generating the description.
408
+ # @param [Hash{String => String}] A hash of custom headers that will be added
409
+ # to the HTTP request.
410
+ #
411
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
412
+ #
413
+ def describe_image_async(url, max_candidates:1, language:nil, description_exclude:nil, custom_headers:nil)
414
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
415
+ fail ArgumentError, 'url is nil' if url.nil?
416
+
417
+ image_url = ImageUrl.new
418
+ unless url.nil?
419
+ image_url.url = url
420
+ end
421
+
422
+ request_headers = {}
423
+ request_headers['Content-Type'] = 'application/json; charset=utf-8'
424
+
425
+ # Set Headers
426
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
427
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
428
+
429
+ # Serialize Request
430
+ request_mapper = Azure::CognitiveServices::ComputerVision::V2_1::Models::ImageUrl.mapper()
431
+ request_content = self.serialize(request_mapper, image_url)
432
+ request_content = request_content != nil ? JSON.generate(request_content, quirks_mode: true) : nil
433
+
434
+ path_template = 'describe'
435
+
436
+ request_url = @base_url || self.base_url
437
+ request_url = request_url.gsub('{Endpoint}', endpoint)
438
+
439
+ options = {
440
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
441
+ query_params: {'maxCandidates' => max_candidates,'language' => language,'descriptionExclude' => description_exclude.nil? ? nil : description_exclude.join(',')},
442
+ body: request_content,
443
+ headers: request_headers.merge(custom_headers || {}),
444
+ base_url: request_url
445
+ }
446
+ promise = self.make_request_async(:post, path_template, options)
447
+
448
+ promise = promise.then do |result|
449
+ http_response = result.response
450
+ status_code = http_response.status
451
+ response_content = http_response.body
452
+ unless status_code == 200
453
+ error_model = JSON.load(response_content)
454
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
455
+ end
456
+
457
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
458
+ result.correlation_request_id = http_response['x-ms-correlation-request-id'] unless http_response['x-ms-correlation-request-id'].nil?
459
+ result.client_request_id = http_response['x-ms-client-request-id'] unless http_response['x-ms-client-request-id'].nil?
460
+ # Deserialize Response
461
+ if status_code == 200
462
+ begin
463
+ parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
464
+ result_mapper = Azure::CognitiveServices::ComputerVision::V2_1::Models::ImageDescription.mapper()
465
+ result.body = self.deserialize(result_mapper, parsed_response)
466
+ rescue Exception => e
467
+ fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
468
+ end
469
+ end
470
+
471
+ result
472
+ end
473
+
474
+ promise.execute
475
+ end
476
+
477
+ #
478
+ # Performs object detection on the specified image.
479
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
480
+ # an image URL.
481
+ # A successful response will be returned in JSON. If the request failed, the
482
+ # response will contain an error code and a message to help understand what
483
+ # went wrong.
484
+ #
485
+ # @param url [String] Publicly reachable URL of an image.
486
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
487
+ # will be added to the HTTP request.
488
+ #
489
+ # @return [DetectResult] operation results.
490
+ #
491
+ def detect_objects(url, custom_headers:nil)
492
+ response = detect_objects_async(url, custom_headers:custom_headers).value!
493
+ response.body unless response.nil?
494
+ end
495
+
496
+ #
497
+ # Performs object detection on the specified image.
498
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
499
+ # an image URL.
500
+ # A successful response will be returned in JSON. If the request failed, the
501
+ # response will contain an error code and a message to help understand what
502
+ # went wrong.
503
+ #
504
+ # @param url [String] Publicly reachable URL of an image.
505
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
506
+ # will be added to the HTTP request.
507
+ #
508
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
509
+ #
510
+ def detect_objects_with_http_info(url, custom_headers:nil)
511
+ detect_objects_async(url, custom_headers:custom_headers).value!
512
+ end
513
+
514
+ #
515
+ # Performs object detection on the specified image.
516
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
517
+ # an image URL.
518
+ # A successful response will be returned in JSON. If the request failed, the
519
+ # response will contain an error code and a message to help understand what
520
+ # went wrong.
521
+ #
522
+ # @param url [String] Publicly reachable URL of an image.
523
+ # @param [Hash{String => String}] A hash of custom headers that will be added
524
+ # to the HTTP request.
525
+ #
526
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
527
+ #
528
+ def detect_objects_async(url, custom_headers:nil)
529
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
530
+ fail ArgumentError, 'url is nil' if url.nil?
531
+
532
+ image_url = ImageUrl.new
533
+ unless url.nil?
534
+ image_url.url = url
535
+ end
536
+
537
+ request_headers = {}
538
+ request_headers['Content-Type'] = 'application/json; charset=utf-8'
539
+
540
+ # Set Headers
541
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
542
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
543
+
544
+ # Serialize Request
545
+ request_mapper = Azure::CognitiveServices::ComputerVision::V2_1::Models::ImageUrl.mapper()
546
+ request_content = self.serialize(request_mapper, image_url)
547
+ request_content = request_content != nil ? JSON.generate(request_content, quirks_mode: true) : nil
548
+
549
+ path_template = 'detect'
550
+
551
+ request_url = @base_url || self.base_url
552
+ request_url = request_url.gsub('{Endpoint}', endpoint)
553
+
554
+ options = {
555
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
556
+ body: request_content,
557
+ headers: request_headers.merge(custom_headers || {}),
558
+ base_url: request_url
559
+ }
560
+ promise = self.make_request_async(:post, path_template, options)
561
+
562
+ promise = promise.then do |result|
563
+ http_response = result.response
564
+ status_code = http_response.status
565
+ response_content = http_response.body
566
+ unless status_code == 200
567
+ error_model = JSON.load(response_content)
568
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
569
+ end
570
+
571
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
572
+ result.correlation_request_id = http_response['x-ms-correlation-request-id'] unless http_response['x-ms-correlation-request-id'].nil?
573
+ result.client_request_id = http_response['x-ms-client-request-id'] unless http_response['x-ms-client-request-id'].nil?
574
+ # Deserialize Response
575
+ if status_code == 200
576
+ begin
577
+ parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
578
+ result_mapper = Azure::CognitiveServices::ComputerVision::V2_1::Models::DetectResult.mapper()
579
+ result.body = self.deserialize(result_mapper, parsed_response)
580
+ rescue Exception => e
581
+ fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
582
+ end
583
+ end
584
+
585
+ result
586
+ end
587
+
588
+ promise.execute
589
+ end
590
+
591
+ #
592
+ # This operation returns the list of domain-specific models that are supported
593
+ # by the Computer Vision API. Currently, the API supports following
594
+ # domain-specific models: celebrity recognizer, landmark recognizer.
595
+ # A successful response will be returned in JSON. If the request failed, the
596
+ # response will contain an error code and a message to help understand what
597
+ # went wrong.
598
+ #
599
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
600
+ # will be added to the HTTP request.
601
+ #
602
+ # @return [ListModelsResult] operation results.
603
+ #
604
+ def list_models(custom_headers:nil)
605
+ response = list_models_async(custom_headers:custom_headers).value!
606
+ response.body unless response.nil?
607
+ end
608
+
609
+ #
610
+ # This operation returns the list of domain-specific models that are supported
611
+ # by the Computer Vision API. Currently, the API supports following
612
+ # domain-specific models: celebrity recognizer, landmark recognizer.
613
+ # A successful response will be returned in JSON. If the request failed, the
614
+ # response will contain an error code and a message to help understand what
615
+ # went wrong.
616
+ #
617
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
618
+ # will be added to the HTTP request.
619
+ #
620
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
621
+ #
622
+ def list_models_with_http_info(custom_headers:nil)
623
+ list_models_async(custom_headers:custom_headers).value!
624
+ end
625
+
626
+ #
627
+ # This operation returns the list of domain-specific models that are supported
628
+ # by the Computer Vision API. Currently, the API supports following
629
+ # domain-specific models: celebrity recognizer, landmark recognizer.
630
+ # A successful response will be returned in JSON. If the request failed, the
631
+ # response will contain an error code and a message to help understand what
632
+ # went wrong.
633
+ #
634
+ # @param [Hash{String => String}] A hash of custom headers that will be added
635
+ # to the HTTP request.
636
+ #
637
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
638
+ #
639
+ def list_models_async(custom_headers:nil)
640
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
641
+
642
+
643
+ request_headers = {}
644
+ request_headers['Content-Type'] = 'application/json; charset=utf-8'
645
+
646
+ # Set Headers
647
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
648
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
649
+ path_template = 'models'
650
+
651
+ request_url = @base_url || self.base_url
652
+ request_url = request_url.gsub('{Endpoint}', endpoint)
653
+
654
+ options = {
655
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
656
+ headers: request_headers.merge(custom_headers || {}),
657
+ base_url: request_url
658
+ }
659
+ promise = self.make_request_async(:get, path_template, options)
660
+
661
+ promise = promise.then do |result|
662
+ http_response = result.response
663
+ status_code = http_response.status
664
+ response_content = http_response.body
665
+ unless status_code == 200
666
+ error_model = JSON.load(response_content)
667
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
668
+ end
669
+
670
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
671
+ result.correlation_request_id = http_response['x-ms-correlation-request-id'] unless http_response['x-ms-correlation-request-id'].nil?
672
+ result.client_request_id = http_response['x-ms-client-request-id'] unless http_response['x-ms-client-request-id'].nil?
673
+ # Deserialize Response
674
+ if status_code == 200
675
+ begin
676
+ parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
677
+ result_mapper = Azure::CognitiveServices::ComputerVision::V2_1::Models::ListModelsResult.mapper()
678
+ result.body = self.deserialize(result_mapper, parsed_response)
679
+ rescue Exception => e
680
+ fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
681
+ end
682
+ end
683
+
684
+ result
685
+ end
686
+
687
+ promise.execute
688
+ end
689
+
690
+ #
691
+ # This operation recognizes content within an image by applying a
692
+ # domain-specific model. The list of domain-specific models that are supported
693
+ # by the Computer Vision API can be retrieved using the /models GET request.
694
+ # Currently, the API provides following domain-specific models: celebrities,
695
+ # landmarks.
696
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
697
+ # an image URL.
698
+ # A successful response will be returned in JSON.
699
+ # If the request failed, the response will contain an error code and a message
700
+ # to help understand what went wrong.
701
+ #
702
+ # @param model [String] The domain-specific content to recognize.
703
+ # @param url [String] Publicly reachable URL of an image.
704
+ # @param language [Enum] The desired language for output generation. If this
705
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
706
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
707
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
708
+ # 'ja', 'pt', 'zh'
709
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
710
+ # will be added to the HTTP request.
711
+ #
712
+ # @return [DomainModelResults] operation results.
713
+ #
714
+ def analyze_image_by_domain(model, url, language:nil, custom_headers:nil)
715
+ response = analyze_image_by_domain_async(model, url, language:language, custom_headers:custom_headers).value!
716
+ response.body unless response.nil?
717
+ end
718
+
719
+ #
720
+ # This operation recognizes content within an image by applying a
721
+ # domain-specific model. The list of domain-specific models that are supported
722
+ # by the Computer Vision API can be retrieved using the /models GET request.
723
+ # Currently, the API provides following domain-specific models: celebrities,
724
+ # landmarks.
725
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
726
+ # an image URL.
727
+ # A successful response will be returned in JSON.
728
+ # If the request failed, the response will contain an error code and a message
729
+ # to help understand what went wrong.
730
+ #
731
+ # @param model [String] The domain-specific content to recognize.
732
+ # @param url [String] Publicly reachable URL of an image.
733
+ # @param language [Enum] The desired language for output generation. If this
734
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
735
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
736
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
737
+ # 'ja', 'pt', 'zh'
738
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
739
+ # will be added to the HTTP request.
740
+ #
741
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
742
+ #
743
+ def analyze_image_by_domain_with_http_info(model, url, language:nil, custom_headers:nil)
744
+ analyze_image_by_domain_async(model, url, language:language, custom_headers:custom_headers).value!
745
+ end
746
+
747
+ #
748
+ # This operation recognizes content within an image by applying a
749
+ # domain-specific model. The list of domain-specific models that are supported
750
+ # by the Computer Vision API can be retrieved using the /models GET request.
751
+ # Currently, the API provides following domain-specific models: celebrities,
752
+ # landmarks.
753
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
754
+ # an image URL.
755
+ # A successful response will be returned in JSON.
756
+ # If the request failed, the response will contain an error code and a message
757
+ # to help understand what went wrong.
758
+ #
759
+ # @param model [String] The domain-specific content to recognize.
760
+ # @param url [String] Publicly reachable URL of an image.
761
+ # @param language [Enum] The desired language for output generation. If this
762
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
763
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
764
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
765
+ # 'ja', 'pt', 'zh'
766
+ # @param [Hash{String => String}] A hash of custom headers that will be added
767
+ # to the HTTP request.
768
+ #
769
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
770
+ #
771
+ def analyze_image_by_domain_async(model, url, language:nil, custom_headers:nil)
772
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
773
+ fail ArgumentError, 'model is nil' if model.nil?
774
+ fail ArgumentError, 'url is nil' if url.nil?
775
+
776
+ image_url = ImageUrl.new
777
+ unless url.nil?
778
+ image_url.url = url
779
+ end
780
+
781
+ request_headers = {}
782
+ request_headers['Content-Type'] = 'application/json; charset=utf-8'
783
+
784
+ # Set Headers
785
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
786
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
787
+
788
+ # Serialize Request
789
+ request_mapper = Azure::CognitiveServices::ComputerVision::V2_1::Models::ImageUrl.mapper()
790
+ request_content = self.serialize(request_mapper, image_url)
791
+ request_content = request_content != nil ? JSON.generate(request_content, quirks_mode: true) : nil
792
+
793
+ path_template = 'models/{model}/analyze'
794
+
795
+ request_url = @base_url || self.base_url
796
+ request_url = request_url.gsub('{Endpoint}', endpoint)
797
+
798
+ options = {
799
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
800
+ path_params: {'model' => model},
801
+ query_params: {'language' => language},
802
+ body: request_content,
803
+ headers: request_headers.merge(custom_headers || {}),
804
+ base_url: request_url
805
+ }
806
+ promise = self.make_request_async(:post, path_template, options)
807
+
808
+ promise = promise.then do |result|
809
+ http_response = result.response
810
+ status_code = http_response.status
811
+ response_content = http_response.body
812
+ unless status_code == 200
813
+ error_model = JSON.load(response_content)
814
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
815
+ end
816
+
817
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
818
+ result.correlation_request_id = http_response['x-ms-correlation-request-id'] unless http_response['x-ms-correlation-request-id'].nil?
819
+ result.client_request_id = http_response['x-ms-client-request-id'] unless http_response['x-ms-client-request-id'].nil?
820
+ # Deserialize Response
821
+ if status_code == 200
822
+ begin
823
+ parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
824
+ result_mapper = Azure::CognitiveServices::ComputerVision::V2_1::Models::DomainModelResults.mapper()
825
+ result.body = self.deserialize(result_mapper, parsed_response)
826
+ rescue Exception => e
827
+ fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
828
+ end
829
+ end
830
+
831
+ result
832
+ end
833
+
834
+ promise.execute
835
+ end
836
+
837
+ #
838
+ # Optical Character Recognition (OCR) detects text in an image and extracts the
839
+ # recognized characters into a machine-usable character stream.
840
+ # Upon success, the OCR results will be returned.
841
+ # Upon failure, the error code together with an error message will be returned.
842
+ # The error code can be one of InvalidImageUrl, InvalidImageFormat,
843
+ # InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or
844
+ # InternalServerError.
845
+ #
846
+ # @param detect_orientation [Boolean] Whether detect the text orientation in
847
+ # the image. With detectOrientation=true the OCR service tries to detect the
848
+ # image orientation and correct it before further processing (e.g. if it's
849
+ # upside-down).
850
+ # @param url [String] Publicly reachable URL of an image.
851
+ # @param language [OcrLanguages] The BCP-47 language code of the text to be
852
+ # detected in the image. The default value is 'unk'. Possible values include:
853
+ # 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el',
854
+ # 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro',
855
+ # 'sr-Cyrl', 'sr-Latn', 'sk'
856
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
857
+ # will be added to the HTTP request.
858
+ #
859
+ # @return [OcrResult] operation results.
860
+ #
861
+ def recognize_printed_text(detect_orientation, url, language:nil, custom_headers:nil)
862
+ response = recognize_printed_text_async(detect_orientation, url, language:language, custom_headers:custom_headers).value!
863
+ response.body unless response.nil?
864
+ end
865
+
866
+ #
867
+ # Optical Character Recognition (OCR) detects text in an image and extracts the
868
+ # recognized characters into a machine-usable character stream.
869
+ # Upon success, the OCR results will be returned.
870
+ # Upon failure, the error code together with an error message will be returned.
871
+ # The error code can be one of InvalidImageUrl, InvalidImageFormat,
872
+ # InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or
873
+ # InternalServerError.
874
+ #
875
+ # @param detect_orientation [Boolean] Whether detect the text orientation in
876
+ # the image. With detectOrientation=true the OCR service tries to detect the
877
+ # image orientation and correct it before further processing (e.g. if it's
878
+ # upside-down).
879
+ # @param url [String] Publicly reachable URL of an image.
880
+ # @param language [OcrLanguages] The BCP-47 language code of the text to be
881
+ # detected in the image. The default value is 'unk'. Possible values include:
882
+ # 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el',
883
+ # 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro',
884
+ # 'sr-Cyrl', 'sr-Latn', 'sk'
885
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
886
+ # will be added to the HTTP request.
887
+ #
888
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
889
+ #
890
+ def recognize_printed_text_with_http_info(detect_orientation, url, language:nil, custom_headers:nil)
891
+ recognize_printed_text_async(detect_orientation, url, language:language, custom_headers:custom_headers).value!
892
+ end
893
+
894
+ #
895
+ # Optical Character Recognition (OCR) detects text in an image and extracts the
896
+ # recognized characters into a machine-usable character stream.
897
+ # Upon success, the OCR results will be returned.
898
+ # Upon failure, the error code together with an error message will be returned.
899
+ # The error code can be one of InvalidImageUrl, InvalidImageFormat,
900
+ # InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or
901
+ # InternalServerError.
902
+ #
903
+ # @param detect_orientation [Boolean] Whether detect the text orientation in
904
+ # the image. With detectOrientation=true the OCR service tries to detect the
905
+ # image orientation and correct it before further processing (e.g. if it's
906
+ # upside-down).
907
+ # @param url [String] Publicly reachable URL of an image.
908
+ # @param language [OcrLanguages] The BCP-47 language code of the text to be
909
+ # detected in the image. The default value is 'unk'. Possible values include:
910
+ # 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el',
911
+ # 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro',
912
+ # 'sr-Cyrl', 'sr-Latn', 'sk'
913
+ # @param [Hash{String => String}] A hash of custom headers that will be added
914
+ # to the HTTP request.
915
+ #
916
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
917
+ #
918
+ def recognize_printed_text_async(detect_orientation, url, language:nil, custom_headers:nil)
919
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
920
+ fail ArgumentError, 'detect_orientation is nil' if detect_orientation.nil?
921
+ fail ArgumentError, 'url is nil' if url.nil?
922
+
923
+ image_url = ImageUrl.new
924
+ unless url.nil?
925
+ image_url.url = url
926
+ end
927
+
928
+ request_headers = {}
929
+ request_headers['Content-Type'] = 'application/json; charset=utf-8'
930
+
931
+ # Set Headers
932
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
933
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
934
+
935
+ # Serialize Request
936
+ request_mapper = Azure::CognitiveServices::ComputerVision::V2_1::Models::ImageUrl.mapper()
937
+ request_content = self.serialize(request_mapper, image_url)
938
+ request_content = request_content != nil ? JSON.generate(request_content, quirks_mode: true) : nil
939
+
940
+ path_template = 'ocr'
941
+
942
+ request_url = @base_url || self.base_url
943
+ request_url = request_url.gsub('{Endpoint}', endpoint)
944
+
945
+ options = {
946
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
947
+ query_params: {'detectOrientation' => detect_orientation,'language' => language},
948
+ body: request_content,
949
+ headers: request_headers.merge(custom_headers || {}),
950
+ base_url: request_url
951
+ }
952
+ promise = self.make_request_async(:post, path_template, options)
953
+
954
+ promise = promise.then do |result|
955
+ http_response = result.response
956
+ status_code = http_response.status
957
+ response_content = http_response.body
958
+ unless status_code == 200
959
+ error_model = JSON.load(response_content)
960
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
961
+ end
962
+
963
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
964
+ result.correlation_request_id = http_response['x-ms-correlation-request-id'] unless http_response['x-ms-correlation-request-id'].nil?
965
+ result.client_request_id = http_response['x-ms-client-request-id'] unless http_response['x-ms-client-request-id'].nil?
966
+ # Deserialize Response
967
+ if status_code == 200
968
+ begin
969
+ parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
970
+ result_mapper = Azure::CognitiveServices::ComputerVision::V2_1::Models::OcrResult.mapper()
971
+ result.body = self.deserialize(result_mapper, parsed_response)
972
+ rescue Exception => e
973
+ fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
974
+ end
975
+ end
976
+
977
+ result
978
+ end
979
+
980
+ promise.execute
981
+ end
982
+
983
+ #
984
+ # This operation generates a list of words, or tags, that are relevant to the
985
+ # content of the supplied image. The Computer Vision API can return tags based
986
+ # on objects, living beings, scenery or actions found in images. Unlike
987
+ # categories, tags are not organized according to a hierarchical classification
988
+ # system, but correspond to image content. Tags may contain hints to avoid
989
+ # ambiguity or provide context, for example the tag "ascomycete" may be
990
+ # accompanied by the hint "fungus".
991
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
992
+ # an image URL.
993
+ # A successful response will be returned in JSON. If the request failed, the
994
+ # response will contain an error code and a message to help understand what
995
+ # went wrong.
996
+ #
997
+ # @param url [String] Publicly reachable URL of an image.
998
+ # @param language [Enum] The desired language for output generation. If this
999
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
1000
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
1001
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
1002
+ # 'ja', 'pt', 'zh'
1003
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
1004
+ # will be added to the HTTP request.
1005
+ #
1006
+ # @return [TagResult] operation results.
1007
+ #
1008
+ def tag_image(url, language:nil, custom_headers:nil)
1009
+ response = tag_image_async(url, language:language, custom_headers:custom_headers).value!
1010
+ response.body unless response.nil?
1011
+ end
1012
+
1013
+ #
1014
+ # This operation generates a list of words, or tags, that are relevant to the
1015
+ # content of the supplied image. The Computer Vision API can return tags based
1016
+ # on objects, living beings, scenery or actions found in images. Unlike
1017
+ # categories, tags are not organized according to a hierarchical classification
1018
+ # system, but correspond to image content. Tags may contain hints to avoid
1019
+ # ambiguity or provide context, for example the tag "ascomycete" may be
1020
+ # accompanied by the hint "fungus".
1021
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
1022
+ # an image URL.
1023
+ # A successful response will be returned in JSON. If the request failed, the
1024
+ # response will contain an error code and a message to help understand what
1025
+ # went wrong.
1026
+ #
1027
+ # @param url [String] Publicly reachable URL of an image.
1028
+ # @param language [Enum] The desired language for output generation. If this
1029
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
1030
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
1031
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
1032
+ # 'ja', 'pt', 'zh'
1033
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
1034
+ # will be added to the HTTP request.
1035
+ #
1036
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
1037
+ #
1038
+ def tag_image_with_http_info(url, language:nil, custom_headers:nil)
1039
+ tag_image_async(url, language:language, custom_headers:custom_headers).value!
1040
+ end
1041
+
1042
+ #
1043
+ # This operation generates a list of words, or tags, that are relevant to the
1044
+ # content of the supplied image. The Computer Vision API can return tags based
1045
+ # on objects, living beings, scenery or actions found in images. Unlike
1046
+ # categories, tags are not organized according to a hierarchical classification
1047
+ # system, but correspond to image content. Tags may contain hints to avoid
1048
+ # ambiguity or provide context, for example the tag "ascomycete" may be
1049
+ # accompanied by the hint "fungus".
1050
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
1051
+ # an image URL.
1052
+ # A successful response will be returned in JSON. If the request failed, the
1053
+ # response will contain an error code and a message to help understand what
1054
+ # went wrong.
1055
+ #
1056
+ # @param url [String] Publicly reachable URL of an image.
1057
+ # @param language [Enum] The desired language for output generation. If this
1058
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
1059
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
1060
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
1061
+ # 'ja', 'pt', 'zh'
1062
+ # @param [Hash{String => String}] A hash of custom headers that will be added
1063
+ # to the HTTP request.
1064
+ #
1065
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
1066
+ #
1067
+ def tag_image_async(url, language:nil, custom_headers:nil)
1068
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
1069
+ fail ArgumentError, 'url is nil' if url.nil?
1070
+
1071
+ image_url = ImageUrl.new
1072
+ unless url.nil?
1073
+ image_url.url = url
1074
+ end
1075
+
1076
+ request_headers = {}
1077
+ request_headers['Content-Type'] = 'application/json; charset=utf-8'
1078
+
1079
+ # Set Headers
1080
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
1081
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
1082
+
1083
+ # Serialize Request
1084
+ request_mapper = Azure::CognitiveServices::ComputerVision::V2_1::Models::ImageUrl.mapper()
1085
+ request_content = self.serialize(request_mapper, image_url)
1086
+ request_content = request_content != nil ? JSON.generate(request_content, quirks_mode: true) : nil
1087
+
1088
+ path_template = 'tag'
1089
+
1090
+ request_url = @base_url || self.base_url
1091
+ request_url = request_url.gsub('{Endpoint}', endpoint)
1092
+
1093
+ options = {
1094
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
1095
+ query_params: {'language' => language},
1096
+ body: request_content,
1097
+ headers: request_headers.merge(custom_headers || {}),
1098
+ base_url: request_url
1099
+ }
1100
+ promise = self.make_request_async(:post, path_template, options)
1101
+
1102
+ promise = promise.then do |result|
1103
+ http_response = result.response
1104
+ status_code = http_response.status
1105
+ response_content = http_response.body
1106
+ unless status_code == 200
1107
+ error_model = JSON.load(response_content)
1108
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
1109
+ end
1110
+
1111
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
1112
+ result.correlation_request_id = http_response['x-ms-correlation-request-id'] unless http_response['x-ms-correlation-request-id'].nil?
1113
+ result.client_request_id = http_response['x-ms-client-request-id'] unless http_response['x-ms-client-request-id'].nil?
1114
+ # Deserialize Response
1115
+ if status_code == 200
1116
+ begin
1117
+ parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
1118
+ result_mapper = Azure::CognitiveServices::ComputerVision::V2_1::Models::TagResult.mapper()
1119
+ result.body = self.deserialize(result_mapper, parsed_response)
1120
+ rescue Exception => e
1121
+ fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
1122
+ end
1123
+ end
1124
+
1125
+ result
1126
+ end
1127
+
1128
+ promise.execute
1129
+ end
1130
+
1131
+ #
1132
+ # This operation generates a thumbnail image with the user-specified width and
1133
+ # height. By default, the service analyzes the image, identifies the region of
1134
+ # interest (ROI), and generates smart cropping coordinates based on the ROI.
1135
+ # Smart cropping helps when you specify an aspect ratio that differs from that
1136
+ # of the input image.
1137
+ # A successful response contains the thumbnail image binary. If the request
1138
+ # failed, the response contains an error code and a message to help determine
1139
+ # what went wrong.
1140
+ # Upon failure, the error code and an error message are returned. The error
1141
+ # code could be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize,
1142
+ # InvalidThumbnailSize, NotSupportedImage, FailedToProcess, Timeout, or
1143
+ # InternalServerError.
1144
+ #
1145
+ # @param width [Integer] Width of the thumbnail, in pixels. It must be between
1146
+ # 1 and 1024. Recommended minimum of 50.
1147
+ # @param height [Integer] Height of the thumbnail, in pixels. It must be
1148
+ # between 1 and 1024. Recommended minimum of 50.
1149
+ # @param url [String] Publicly reachable URL of an image.
1150
+ # @param smart_cropping [Boolean] Boolean flag for enabling smart cropping.
1151
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
1152
+ # will be added to the HTTP request.
1153
+ #
1154
+ # @return [NOT_IMPLEMENTED] operation results.
1155
+ #
1156
+ def generate_thumbnail(width, height, url, smart_cropping:false, custom_headers:nil)
1157
+ response = generate_thumbnail_async(width, height, url, smart_cropping:smart_cropping, custom_headers:custom_headers).value!
1158
+ response.body unless response.nil?
1159
+ end
1160
+
1161
+ #
1162
+ # This operation generates a thumbnail image with the user-specified width and
1163
+ # height. By default, the service analyzes the image, identifies the region of
1164
+ # interest (ROI), and generates smart cropping coordinates based on the ROI.
1165
+ # Smart cropping helps when you specify an aspect ratio that differs from that
1166
+ # of the input image.
1167
+ # A successful response contains the thumbnail image binary. If the request
1168
+ # failed, the response contains an error code and a message to help determine
1169
+ # what went wrong.
1170
+ # Upon failure, the error code and an error message are returned. The error
1171
+ # code could be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize,
1172
+ # InvalidThumbnailSize, NotSupportedImage, FailedToProcess, Timeout, or
1173
+ # InternalServerError.
1174
+ #
1175
+ # @param width [Integer] Width of the thumbnail, in pixels. It must be between
1176
+ # 1 and 1024. Recommended minimum of 50.
1177
+ # @param height [Integer] Height of the thumbnail, in pixels. It must be
1178
+ # between 1 and 1024. Recommended minimum of 50.
1179
+ # @param url [String] Publicly reachable URL of an image.
1180
+ # @param smart_cropping [Boolean] Boolean flag for enabling smart cropping.
1181
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
1182
+ # will be added to the HTTP request.
1183
+ #
1184
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
1185
+ #
1186
+ def generate_thumbnail_with_http_info(width, height, url, smart_cropping:false, custom_headers:nil)
1187
+ generate_thumbnail_async(width, height, url, smart_cropping:smart_cropping, custom_headers:custom_headers).value!
1188
+ end
1189
+
1190
+ #
1191
+ # This operation generates a thumbnail image with the user-specified width and
1192
+ # height. By default, the service analyzes the image, identifies the region of
1193
+ # interest (ROI), and generates smart cropping coordinates based on the ROI.
1194
+ # Smart cropping helps when you specify an aspect ratio that differs from that
1195
+ # of the input image.
1196
+ # A successful response contains the thumbnail image binary. If the request
1197
+ # failed, the response contains an error code and a message to help determine
1198
+ # what went wrong.
1199
+ # Upon failure, the error code and an error message are returned. The error
1200
+ # code could be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize,
1201
+ # InvalidThumbnailSize, NotSupportedImage, FailedToProcess, Timeout, or
1202
+ # InternalServerError.
1203
+ #
1204
+ # @param width [Integer] Width of the thumbnail, in pixels. It must be between
1205
+ # 1 and 1024. Recommended minimum of 50.
1206
+ # @param height [Integer] Height of the thumbnail, in pixels. It must be
1207
+ # between 1 and 1024. Recommended minimum of 50.
1208
+ # @param url [String] Publicly reachable URL of an image.
1209
+ # @param smart_cropping [Boolean] Boolean flag for enabling smart cropping.
1210
+ # @param [Hash{String => String}] A hash of custom headers that will be added
1211
+ # to the HTTP request.
1212
+ #
1213
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
1214
+ #
1215
+ def generate_thumbnail_async(width, height, url, smart_cropping:false, custom_headers:nil)
1216
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
1217
+ fail ArgumentError, 'width is nil' if width.nil?
1218
+ fail ArgumentError, "'width' should satisfy the constraint - 'InclusiveMaximum': '1024'" if !width.nil? && width > 1024
1219
+ fail ArgumentError, "'width' should satisfy the constraint - 'InclusiveMinimum': '1'" if !width.nil? && width < 1
1220
+ fail ArgumentError, 'height is nil' if height.nil?
1221
+ fail ArgumentError, "'height' should satisfy the constraint - 'InclusiveMaximum': '1024'" if !height.nil? && height > 1024
1222
+ fail ArgumentError, "'height' should satisfy the constraint - 'InclusiveMinimum': '1'" if !height.nil? && height < 1
1223
+ fail ArgumentError, 'url is nil' if url.nil?
1224
+
1225
+ image_url = ImageUrl.new
1226
+ unless url.nil?
1227
+ image_url.url = url
1228
+ end
1229
+
1230
+ request_headers = {}
1231
+ request_headers['Content-Type'] = 'application/json; charset=utf-8'
1232
+
1233
+ # Set Headers
1234
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
1235
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
1236
+
1237
+ # Serialize Request
1238
+ request_mapper = Azure::CognitiveServices::ComputerVision::V2_1::Models::ImageUrl.mapper()
1239
+ request_content = self.serialize(request_mapper, image_url)
1240
+ request_content = request_content != nil ? JSON.generate(request_content, quirks_mode: true) : nil
1241
+
1242
+ path_template = 'generateThumbnail'
1243
+
1244
+ request_url = @base_url || self.base_url
1245
+ request_url = request_url.gsub('{Endpoint}', endpoint)
1246
+
1247
+ options = {
1248
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
1249
+ query_params: {'width' => width,'height' => height,'smartCropping' => smart_cropping},
1250
+ body: request_content,
1251
+ headers: request_headers.merge(custom_headers || {}),
1252
+ base_url: request_url
1253
+ }
1254
+ promise = self.make_request_async(:post, path_template, options)
1255
+
1256
+ promise = promise.then do |result|
1257
+ http_response = result.response
1258
+ status_code = http_response.status
1259
+ response_content = http_response.body
1260
+ unless status_code == 200
1261
+ error_model = JSON.load(response_content)
1262
+ fail MsRestAzure::AzureOperationError.new(result.request, http_response, error_model)
1263
+ end
1264
+
1265
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
1266
+ result.correlation_request_id = http_response['x-ms-correlation-request-id'] unless http_response['x-ms-correlation-request-id'].nil?
1267
+ result.client_request_id = http_response['x-ms-client-request-id'] unless http_response['x-ms-client-request-id'].nil?
1268
+ # Deserialize Response
1269
+ if status_code == 200
1270
+ begin
1271
+ parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
1272
+ result_mapper = {
1273
+ client_side_validation: true,
1274
+ required: false,
1275
+ serialized_name: 'parsed_response',
1276
+ type: {
1277
+ name: 'Stream'
1278
+ }
1279
+ }
1280
+ result.body = self.deserialize(result_mapper, parsed_response)
1281
+ rescue Exception => e
1282
+ fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
1283
+ end
1284
+ end
1285
+
1286
+ result
1287
+ end
1288
+
1289
+ promise.execute
1290
+ end
1291
+
1292
+ #
1293
+ # This operation returns a bounding box around the most important area of the
1294
+ # image.
1295
+ # A successful response will be returned in JSON. If the request failed, the
1296
+ # response contains an error code and a message to help determine what went
1297
+ # wrong.
1298
+ # Upon failure, the error code and an error message are returned. The error
1299
+ # code could be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize,
1300
+ # NotSupportedImage, FailedToProcess, Timeout, or InternalServerError.
1301
+ #
1302
+ # @param url [String] Publicly reachable URL of an image.
1303
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
1304
+ # will be added to the HTTP request.
1305
+ #
1306
+ # @return [AreaOfInterestResult] operation results.
1307
+ #
1308
+ def get_area_of_interest(url, custom_headers:nil)
1309
+ response = get_area_of_interest_async(url, custom_headers:custom_headers).value!
1310
+ response.body unless response.nil?
1311
+ end
1312
+
1313
+ #
1314
+ # This operation returns a bounding box around the most important area of the
1315
+ # image.
1316
+ # A successful response will be returned in JSON. If the request failed, the
1317
+ # response contains an error code and a message to help determine what went
1318
+ # wrong.
1319
+ # Upon failure, the error code and an error message are returned. The error
1320
+ # code could be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize,
1321
+ # NotSupportedImage, FailedToProcess, Timeout, or InternalServerError.
1322
+ #
1323
+ # @param url [String] Publicly reachable URL of an image.
1324
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
1325
+ # will be added to the HTTP request.
1326
+ #
1327
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
1328
+ #
1329
+ def get_area_of_interest_with_http_info(url, custom_headers:nil)
1330
+ get_area_of_interest_async(url, custom_headers:custom_headers).value!
1331
+ end
1332
+
1333
+ #
1334
+ # This operation returns a bounding box around the most important area of the
1335
+ # image.
1336
+ # A successful response will be returned in JSON. If the request failed, the
1337
+ # response contains an error code and a message to help determine what went
1338
+ # wrong.
1339
+ # Upon failure, the error code and an error message are returned. The error
1340
+ # code could be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize,
1341
+ # NotSupportedImage, FailedToProcess, Timeout, or InternalServerError.
1342
+ #
1343
+ # @param url [String] Publicly reachable URL of an image.
1344
+ # @param [Hash{String => String}] A hash of custom headers that will be added
1345
+ # to the HTTP request.
1346
+ #
1347
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
1348
+ #
1349
+ def get_area_of_interest_async(url, custom_headers:nil)
1350
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
1351
+ fail ArgumentError, 'url is nil' if url.nil?
1352
+
1353
+ image_url = ImageUrl.new
1354
+ unless url.nil?
1355
+ image_url.url = url
1356
+ end
1357
+
1358
+ request_headers = {}
1359
+ request_headers['Content-Type'] = 'application/json; charset=utf-8'
1360
+
1361
+ # Set Headers
1362
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
1363
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
1364
+
1365
+ # Serialize Request
1366
+ request_mapper = Azure::CognitiveServices::ComputerVision::V2_1::Models::ImageUrl.mapper()
1367
+ request_content = self.serialize(request_mapper, image_url)
1368
+ request_content = request_content != nil ? JSON.generate(request_content, quirks_mode: true) : nil
1369
+
1370
+ path_template = 'areaOfInterest'
1371
+
1372
+ request_url = @base_url || self.base_url
1373
+ request_url = request_url.gsub('{Endpoint}', endpoint)
1374
+
1375
+ options = {
1376
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
1377
+ body: request_content,
1378
+ headers: request_headers.merge(custom_headers || {}),
1379
+ base_url: request_url
1380
+ }
1381
+ promise = self.make_request_async(:post, path_template, options)
1382
+
1383
+ promise = promise.then do |result|
1384
+ http_response = result.response
1385
+ status_code = http_response.status
1386
+ response_content = http_response.body
1387
+ unless status_code == 200
1388
+ error_model = JSON.load(response_content)
1389
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
1390
+ end
1391
+
1392
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
1393
+ result.correlation_request_id = http_response['x-ms-correlation-request-id'] unless http_response['x-ms-correlation-request-id'].nil?
1394
+ result.client_request_id = http_response['x-ms-client-request-id'] unless http_response['x-ms-client-request-id'].nil?
1395
+ # Deserialize Response
1396
+ if status_code == 200
1397
+ begin
1398
+ parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
1399
+ result_mapper = Azure::CognitiveServices::ComputerVision::V2_1::Models::AreaOfInterestResult.mapper()
1400
+ result.body = self.deserialize(result_mapper, parsed_response)
1401
+ rescue Exception => e
1402
+ fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
1403
+ end
1404
+ end
1405
+
1406
+ result
1407
+ end
1408
+
1409
+ promise.execute
1410
+ end
1411
+
1412
+ #
1413
+ # Recognize Text operation. When you use the Recognize Text interface, the
1414
+ # response contains a field called 'Operation-Location'. The
1415
+ # 'Operation-Location' field contains the URL that you must use for your Get
1416
+ # Recognize Text Operation Result operation.
1417
+ #
1418
+ # @param mode [TextRecognitionMode] Type of text to recognize. Possible values
1419
+ # include: 'Handwritten', 'Printed'
1420
+ # @param url [String] Publicly reachable URL of an image.
1421
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
1422
+ # will be added to the HTTP request.
1423
+ #
1424
+ #
1425
+ def recognize_text(url, mode, custom_headers:nil)
1426
+ response = recognize_text_async(url, mode, custom_headers:custom_headers).value!
1427
+ nil
1428
+ end
1429
+
1430
+ #
1431
+ # Recognize Text operation. When you use the Recognize Text interface, the
1432
+ # response contains a field called 'Operation-Location'. The
1433
+ # 'Operation-Location' field contains the URL that you must use for your Get
1434
+ # Recognize Text Operation Result operation.
1435
+ #
1436
+ # @param mode [TextRecognitionMode] Type of text to recognize. Possible values
1437
+ # include: 'Handwritten', 'Printed'
1438
+ # @param url [String] Publicly reachable URL of an image.
1439
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
1440
+ # will be added to the HTTP request.
1441
+ #
1442
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
1443
+ #
1444
+ def recognize_text_with_http_info(url, mode, custom_headers:nil)
1445
+ recognize_text_async(url, mode, custom_headers:custom_headers).value!
1446
+ end
1447
+
1448
+ #
1449
+ # Recognize Text operation. When you use the Recognize Text interface, the
1450
+ # response contains a field called 'Operation-Location'. The
1451
+ # 'Operation-Location' field contains the URL that you must use for your Get
1452
+ # Recognize Text Operation Result operation.
1453
+ #
1454
+ # @param mode [TextRecognitionMode] Type of text to recognize. Possible values
1455
+ # include: 'Handwritten', 'Printed'
1456
+ # @param url [String] Publicly reachable URL of an image.
1457
+ # @param [Hash{String => String}] A hash of custom headers that will be added
1458
+ # to the HTTP request.
1459
+ #
1460
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
1461
+ #
1462
+ def recognize_text_async(url, mode, custom_headers:nil)
1463
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
1464
+ fail ArgumentError, 'mode is nil' if mode.nil?
1465
+ fail ArgumentError, 'url is nil' if url.nil?
1466
+
1467
+ image_url = ImageUrl.new
1468
+ unless url.nil?
1469
+ image_url.url = url
1470
+ end
1471
+
1472
+ request_headers = {}
1473
+ request_headers['Content-Type'] = 'application/json; charset=utf-8'
1474
+
1475
+ # Set Headers
1476
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
1477
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
1478
+
1479
+ # Serialize Request
1480
+ request_mapper = Azure::CognitiveServices::ComputerVision::V2_1::Models::ImageUrl.mapper()
1481
+ request_content = self.serialize(request_mapper, image_url)
1482
+ request_content = request_content != nil ? JSON.generate(request_content, quirks_mode: true) : nil
1483
+
1484
+ path_template = 'recognizeText'
1485
+
1486
+ request_url = @base_url || self.base_url
1487
+ request_url = request_url.gsub('{Endpoint}', endpoint)
1488
+
1489
+ options = {
1490
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
1491
+ query_params: {'mode' => mode},
1492
+ body: request_content,
1493
+ headers: request_headers.merge(custom_headers || {}),
1494
+ base_url: request_url
1495
+ }
1496
+ promise = self.make_request_async(:post, path_template, options)
1497
+
1498
+ promise = promise.then do |result|
1499
+ http_response = result.response
1500
+ status_code = http_response.status
1501
+ response_content = http_response.body
1502
+ unless status_code == 202
1503
+ error_model = JSON.load(response_content)
1504
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
1505
+ end
1506
+
1507
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
1508
+ result.correlation_request_id = http_response['x-ms-correlation-request-id'] unless http_response['x-ms-correlation-request-id'].nil?
1509
+ result.client_request_id = http_response['x-ms-client-request-id'] unless http_response['x-ms-client-request-id'].nil?
1510
+
1511
+ result
1512
+ end
1513
+
1514
+ promise.execute
1515
+ end
1516
+
1517
+ #
1518
+ # This interface is used for getting text operation result. The URL to this
1519
+ # interface should be retrieved from 'Operation-Location' field returned from
1520
+ # Recognize Text interface.
1521
+ #
1522
+ # @param operation_id [String] Id of the text operation returned in the
1523
+ # response of the 'Recognize Text'
1524
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
1525
+ # will be added to the HTTP request.
1526
+ #
1527
+ # @return [TextOperationResult] operation results.
1528
+ #
1529
+ def get_text_operation_result(operation_id, custom_headers:nil)
1530
+ response = get_text_operation_result_async(operation_id, custom_headers:custom_headers).value!
1531
+ response.body unless response.nil?
1532
+ end
1533
+
1534
+ #
1535
+ # This interface is used for getting text operation result. The URL to this
1536
+ # interface should be retrieved from 'Operation-Location' field returned from
1537
+ # Recognize Text interface.
1538
+ #
1539
+ # @param operation_id [String] Id of the text operation returned in the
1540
+ # response of the 'Recognize Text'
1541
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
1542
+ # will be added to the HTTP request.
1543
+ #
1544
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
1545
+ #
1546
+ def get_text_operation_result_with_http_info(operation_id, custom_headers:nil)
1547
+ get_text_operation_result_async(operation_id, custom_headers:custom_headers).value!
1548
+ end
1549
+
1550
+ #
1551
+ # This interface is used for getting text operation result. The URL to this
1552
+ # interface should be retrieved from 'Operation-Location' field returned from
1553
+ # Recognize Text interface.
1554
+ #
1555
+ # @param operation_id [String] Id of the text operation returned in the
1556
+ # response of the 'Recognize Text'
1557
+ # @param [Hash{String => String}] A hash of custom headers that will be added
1558
+ # to the HTTP request.
1559
+ #
1560
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
1561
+ #
1562
+ def get_text_operation_result_async(operation_id, custom_headers:nil)
1563
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
1564
+ fail ArgumentError, 'operation_id is nil' if operation_id.nil?
1565
+
1566
+
1567
+ request_headers = {}
1568
+ request_headers['Content-Type'] = 'application/json; charset=utf-8'
1569
+
1570
+ # Set Headers
1571
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
1572
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
1573
+ path_template = 'textOperations/{operationId}'
1574
+
1575
+ request_url = @base_url || self.base_url
1576
+ request_url = request_url.gsub('{Endpoint}', endpoint)
1577
+
1578
+ options = {
1579
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
1580
+ path_params: {'operationId' => operation_id},
1581
+ headers: request_headers.merge(custom_headers || {}),
1582
+ base_url: request_url
1583
+ }
1584
+ promise = self.make_request_async(:get, path_template, options)
1585
+
1586
+ promise = promise.then do |result|
1587
+ http_response = result.response
1588
+ status_code = http_response.status
1589
+ response_content = http_response.body
1590
+ unless status_code == 200
1591
+ error_model = JSON.load(response_content)
1592
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
1593
+ end
1594
+
1595
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
1596
+ result.correlation_request_id = http_response['x-ms-correlation-request-id'] unless http_response['x-ms-correlation-request-id'].nil?
1597
+ result.client_request_id = http_response['x-ms-client-request-id'] unless http_response['x-ms-client-request-id'].nil?
1598
+ # Deserialize Response
1599
+ if status_code == 200
1600
+ begin
1601
+ parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
1602
+ result_mapper = Azure::CognitiveServices::ComputerVision::V2_1::Models::TextOperationResult.mapper()
1603
+ result.body = self.deserialize(result_mapper, parsed_response)
1604
+ rescue Exception => e
1605
+ fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
1606
+ end
1607
+ end
1608
+
1609
+ result
1610
+ end
1611
+
1612
+ promise.execute
1613
+ end
1614
+
1615
+ #
1616
+ # Use this interface to get the result of a Read operation, employing the
1617
+ # state-of-the-art Optical Character Recognition (OCR) algorithms optimized for
1618
+ # text-heavy documents. When you use the Read File interface, the response
1619
+ # contains a field called 'Operation-Location'. The 'Operation-Location' field
1620
+ # contains the URL that you must use for your 'GetReadOperationResult'
1621
+ # operation to access OCR results.​
1622
+ #
1623
+ # @param url [String] Publicly reachable URL of an image.
1624
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
1625
+ # will be added to the HTTP request.
1626
+ #
1627
+ #
1628
+ def batch_read_file(url, custom_headers:nil)
1629
+ response = batch_read_file_async(url, custom_headers:custom_headers).value!
1630
+ nil
1631
+ end
1632
+
1633
+ #
1634
+ # Use this interface to get the result of a Read operation, employing the
1635
+ # state-of-the-art Optical Character Recognition (OCR) algorithms optimized for
1636
+ # text-heavy documents. When you use the Read File interface, the response
1637
+ # contains a field called 'Operation-Location'. The 'Operation-Location' field
1638
+ # contains the URL that you must use for your 'GetReadOperationResult'
1639
+ # operation to access OCR results.​
1640
+ #
1641
+ # @param url [String] Publicly reachable URL of an image.
1642
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
1643
+ # will be added to the HTTP request.
1644
+ #
1645
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
1646
+ #
1647
+ def batch_read_file_with_http_info(url, custom_headers:nil)
1648
+ batch_read_file_async(url, custom_headers:custom_headers).value!
1649
+ end
1650
+
1651
+ #
1652
+ # Use this interface to get the result of a Read operation, employing the
1653
+ # state-of-the-art Optical Character Recognition (OCR) algorithms optimized for
1654
+ # text-heavy documents. When you use the Read File interface, the response
1655
+ # contains a field called 'Operation-Location'. The 'Operation-Location' field
1656
+ # contains the URL that you must use for your 'GetReadOperationResult'
1657
+ # operation to access OCR results.​
1658
+ #
1659
+ # @param url [String] Publicly reachable URL of an image.
1660
+ # @param [Hash{String => String}] A hash of custom headers that will be added
1661
+ # to the HTTP request.
1662
+ #
1663
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
1664
+ #
1665
+ def batch_read_file_async(url, custom_headers:nil)
1666
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
1667
+ fail ArgumentError, 'url is nil' if url.nil?
1668
+
1669
+ image_url = ImageUrl.new
1670
+ unless url.nil?
1671
+ image_url.url = url
1672
+ end
1673
+
1674
+ request_headers = {}
1675
+ request_headers['Content-Type'] = 'application/json; charset=utf-8'
1676
+
1677
+ # Set Headers
1678
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
1679
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
1680
+
1681
+ # Serialize Request
1682
+ request_mapper = Azure::CognitiveServices::ComputerVision::V2_1::Models::ImageUrl.mapper()
1683
+ request_content = self.serialize(request_mapper, image_url)
1684
+ request_content = request_content != nil ? JSON.generate(request_content, quirks_mode: true) : nil
1685
+
1686
+ path_template = 'read/core/asyncBatchAnalyze'
1687
+
1688
+ request_url = @base_url || self.base_url
1689
+ request_url = request_url.gsub('{Endpoint}', endpoint)
1690
+
1691
+ options = {
1692
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
1693
+ body: request_content,
1694
+ headers: request_headers.merge(custom_headers || {}),
1695
+ base_url: request_url
1696
+ }
1697
+ promise = self.make_request_async(:post, path_template, options)
1698
+
1699
+ promise = promise.then do |result|
1700
+ http_response = result.response
1701
+ status_code = http_response.status
1702
+ response_content = http_response.body
1703
+ unless status_code == 202
1704
+ error_model = JSON.load(response_content)
1705
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
1706
+ end
1707
+
1708
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
1709
+ result.correlation_request_id = http_response['x-ms-correlation-request-id'] unless http_response['x-ms-correlation-request-id'].nil?
1710
+ result.client_request_id = http_response['x-ms-client-request-id'] unless http_response['x-ms-client-request-id'].nil?
1711
+
1712
+ result
1713
+ end
1714
+
1715
+ promise.execute
1716
+ end
1717
+
1718
+ #
1719
+ # This interface is used for getting OCR results of Read operation. The URL to
1720
+ # this interface should be retrieved from 'Operation-Location' field returned
1721
+ # from Batch Read File interface.
1722
+ #
1723
+ # @param operation_id [String] Id of read operation returned in the response of
1724
+ # the 'Batch Read File' interface.
1725
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
1726
+ # will be added to the HTTP request.
1727
+ #
1728
+ # @return [ReadOperationResult] operation results.
1729
+ #
1730
+ def get_read_operation_result(operation_id, custom_headers:nil)
1731
+ response = get_read_operation_result_async(operation_id, custom_headers:custom_headers).value!
1732
+ response.body unless response.nil?
1733
+ end
1734
+
1735
+ #
1736
+ # This interface is used for getting OCR results of Read operation. The URL to
1737
+ # this interface should be retrieved from 'Operation-Location' field returned
1738
+ # from Batch Read File interface.
1739
+ #
1740
+ # @param operation_id [String] Id of read operation returned in the response of
1741
+ # the 'Batch Read File' interface.
1742
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
1743
+ # will be added to the HTTP request.
1744
+ #
1745
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
1746
+ #
1747
+ def get_read_operation_result_with_http_info(operation_id, custom_headers:nil)
1748
+ get_read_operation_result_async(operation_id, custom_headers:custom_headers).value!
1749
+ end
1750
+
1751
+ #
1752
+ # This interface is used for getting OCR results of Read operation. The URL to
1753
+ # this interface should be retrieved from 'Operation-Location' field returned
1754
+ # from Batch Read File interface.
1755
+ #
1756
+ # @param operation_id [String] Id of read operation returned in the response of
1757
+ # the 'Batch Read File' interface.
1758
+ # @param [Hash{String => String}] A hash of custom headers that will be added
1759
+ # to the HTTP request.
1760
+ #
1761
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
1762
+ #
1763
+ def get_read_operation_result_async(operation_id, custom_headers:nil)
1764
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
1765
+ fail ArgumentError, 'operation_id is nil' if operation_id.nil?
1766
+
1767
+
1768
+ request_headers = {}
1769
+ request_headers['Content-Type'] = 'application/json; charset=utf-8'
1770
+
1771
+ # Set Headers
1772
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
1773
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
1774
+ path_template = 'read/operations/{operationId}'
1775
+
1776
+ request_url = @base_url || self.base_url
1777
+ request_url = request_url.gsub('{Endpoint}', endpoint)
1778
+
1779
+ options = {
1780
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
1781
+ path_params: {'operationId' => operation_id},
1782
+ headers: request_headers.merge(custom_headers || {}),
1783
+ base_url: request_url
1784
+ }
1785
+ promise = self.make_request_async(:get, path_template, options)
1786
+
1787
+ promise = promise.then do |result|
1788
+ http_response = result.response
1789
+ status_code = http_response.status
1790
+ response_content = http_response.body
1791
+ unless status_code == 200
1792
+ error_model = JSON.load(response_content)
1793
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
1794
+ end
1795
+
1796
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
1797
+ result.correlation_request_id = http_response['x-ms-correlation-request-id'] unless http_response['x-ms-correlation-request-id'].nil?
1798
+ result.client_request_id = http_response['x-ms-client-request-id'] unless http_response['x-ms-client-request-id'].nil?
1799
+ # Deserialize Response
1800
+ if status_code == 200
1801
+ begin
1802
+ parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
1803
+ result_mapper = Azure::CognitiveServices::ComputerVision::V2_1::Models::ReadOperationResult.mapper()
1804
+ result.body = self.deserialize(result_mapper, parsed_response)
1805
+ rescue Exception => e
1806
+ fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
1807
+ end
1808
+ end
1809
+
1810
+ result
1811
+ end
1812
+
1813
+ promise.execute
1814
+ end
1815
+
1816
+ #
1817
+ # This operation extracts a rich set of visual features based on the image
1818
+ # content.
1819
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
1820
+ # an image URL. Within your request, there is an optional parameter to allow
1821
+ # you to choose which features to return. By default, image categories are
1822
+ # returned in the response.
1823
+ # A successful response will be returned in JSON. If the request failed, the
1824
+ # response will contain an error code and a message to help understand what
1825
+ # went wrong.
1826
+ #
1827
+ # @param image An image stream.
1828
+ # @param visual_features [Array<VisualFeatureTypes>] A string indicating what
1829
+ # visual feature types to return. Multiple values should be comma-separated.
1830
+ # Valid visual feature types include: Categories - categorizes image content
1831
+ # according to a taxonomy defined in documentation. Tags - tags the image with
1832
+ # a detailed list of words related to the image content. Description -
1833
+ # describes the image content with a complete English sentence. Faces - detects
1834
+ # if faces are present. If present, generate coordinates, gender and age.
1835
+ # ImageType - detects if image is clipart or a line drawing. Color - determines
1836
+ # the accent color, dominant color, and whether an image is black&white. Adult
1837
+ # - detects if the image is pornographic in nature (depicts nudity or a sex
1838
+ # act), or is gory (depicts extreme violence or blood). Sexually suggestive
1839
+ # content (aka racy content) is also detected. Objects - detects various
1840
+ # objects within an image, including the approximate location. The Objects
1841
+ # argument is only available in English. Brands - detects various brands within
1842
+ # an image, including the approximate location. The Brands argument is only
1843
+ # available in English.
1844
+ # @param details [Array<Details>] A string indicating which domain-specific
1845
+ # details to return. Multiple values should be comma-separated. Valid visual
1846
+ # feature types include: Celebrities - identifies celebrities if detected in
1847
+ # the image, Landmarks - identifies notable landmarks in the image.
1848
+ # @param language [Enum] The desired language for output generation. If this
1849
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
1850
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
1851
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
1852
+ # 'ja', 'pt', 'zh'
1853
+ # @param description_exclude [Array<DescriptionExclude>] Turn off specified
1854
+ # domain models when generating the description.
1855
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
1856
+ # will be added to the HTTP request.
1857
+ #
1858
+ # @return [ImageAnalysis] operation results.
1859
+ #
1860
+ def analyze_image_in_stream(image, visual_features:nil, details:nil, language:nil, description_exclude:nil, custom_headers:nil)
1861
+ response = analyze_image_in_stream_async(image, visual_features:visual_features, details:details, language:language, description_exclude:description_exclude, custom_headers:custom_headers).value!
1862
+ response.body unless response.nil?
1863
+ end
1864
+
1865
+ #
1866
+ # This operation extracts a rich set of visual features based on the image
1867
+ # content.
1868
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
1869
+ # an image URL. Within your request, there is an optional parameter to allow
1870
+ # you to choose which features to return. By default, image categories are
1871
+ # returned in the response.
1872
+ # A successful response will be returned in JSON. If the request failed, the
1873
+ # response will contain an error code and a message to help understand what
1874
+ # went wrong.
1875
+ #
1876
+ # @param image An image stream.
1877
+ # @param visual_features [Array<VisualFeatureTypes>] A string indicating what
1878
+ # visual feature types to return. Multiple values should be comma-separated.
1879
+ # Valid visual feature types include: Categories - categorizes image content
1880
+ # according to a taxonomy defined in documentation. Tags - tags the image with
1881
+ # a detailed list of words related to the image content. Description -
1882
+ # describes the image content with a complete English sentence. Faces - detects
1883
+ # if faces are present. If present, generate coordinates, gender and age.
1884
+ # ImageType - detects if image is clipart or a line drawing. Color - determines
1885
+ # the accent color, dominant color, and whether an image is black&white. Adult
1886
+ # - detects if the image is pornographic in nature (depicts nudity or a sex
1887
+ # act), or is gory (depicts extreme violence or blood). Sexually suggestive
1888
+ # content (aka racy content) is also detected. Objects - detects various
1889
+ # objects within an image, including the approximate location. The Objects
1890
+ # argument is only available in English. Brands - detects various brands within
1891
+ # an image, including the approximate location. The Brands argument is only
1892
+ # available in English.
1893
+ # @param details [Array<Details>] A string indicating which domain-specific
1894
+ # details to return. Multiple values should be comma-separated. Valid visual
1895
+ # feature types include: Celebrities - identifies celebrities if detected in
1896
+ # the image, Landmarks - identifies notable landmarks in the image.
1897
+ # @param language [Enum] The desired language for output generation. If this
1898
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
1899
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
1900
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
1901
+ # 'ja', 'pt', 'zh'
1902
+ # @param description_exclude [Array<DescriptionExclude>] Turn off specified
1903
+ # domain models when generating the description.
1904
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
1905
+ # will be added to the HTTP request.
1906
+ #
1907
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
1908
+ #
1909
+ def analyze_image_in_stream_with_http_info(image, visual_features:nil, details:nil, language:nil, description_exclude:nil, custom_headers:nil)
1910
+ analyze_image_in_stream_async(image, visual_features:visual_features, details:details, language:language, description_exclude:description_exclude, custom_headers:custom_headers).value!
1911
+ end
1912
+
1913
+ #
1914
+ # This operation extracts a rich set of visual features based on the image
1915
+ # content.
1916
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
1917
+ # an image URL. Within your request, there is an optional parameter to allow
1918
+ # you to choose which features to return. By default, image categories are
1919
+ # returned in the response.
1920
+ # A successful response will be returned in JSON. If the request failed, the
1921
+ # response will contain an error code and a message to help understand what
1922
+ # went wrong.
1923
+ #
1924
+ # @param image An image stream.
1925
+ # @param visual_features [Array<VisualFeatureTypes>] A string indicating what
1926
+ # visual feature types to return. Multiple values should be comma-separated.
1927
+ # Valid visual feature types include: Categories - categorizes image content
1928
+ # according to a taxonomy defined in documentation. Tags - tags the image with
1929
+ # a detailed list of words related to the image content. Description -
1930
+ # describes the image content with a complete English sentence. Faces - detects
1931
+ # if faces are present. If present, generate coordinates, gender and age.
1932
+ # ImageType - detects if image is clipart or a line drawing. Color - determines
1933
+ # the accent color, dominant color, and whether an image is black&white. Adult
1934
+ # - detects if the image is pornographic in nature (depicts nudity or a sex
1935
+ # act), or is gory (depicts extreme violence or blood). Sexually suggestive
1936
+ # content (aka racy content) is also detected. Objects - detects various
1937
+ # objects within an image, including the approximate location. The Objects
1938
+ # argument is only available in English. Brands - detects various brands within
1939
+ # an image, including the approximate location. The Brands argument is only
1940
+ # available in English.
1941
+ # @param details [Array<Details>] A string indicating which domain-specific
1942
+ # details to return. Multiple values should be comma-separated. Valid visual
1943
+ # feature types include: Celebrities - identifies celebrities if detected in
1944
+ # the image, Landmarks - identifies notable landmarks in the image.
1945
+ # @param language [Enum] The desired language for output generation. If this
1946
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
1947
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
1948
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
1949
+ # 'ja', 'pt', 'zh'
1950
+ # @param description_exclude [Array<DescriptionExclude>] Turn off specified
1951
+ # domain models when generating the description.
1952
+ # @param [Hash{String => String}] A hash of custom headers that will be added
1953
+ # to the HTTP request.
1954
+ #
1955
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
1956
+ #
1957
+ def analyze_image_in_stream_async(image, visual_features:nil, details:nil, language:nil, description_exclude:nil, custom_headers:nil)
1958
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
1959
+ fail ArgumentError, 'image is nil' if image.nil?
1960
+
1961
+
1962
+ request_headers = {}
1963
+ request_headers['Content-Type'] = 'application/octet-stream'
1964
+
1965
+ # Set Headers
1966
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
1967
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
1968
+
1969
+ # Serialize Request
1970
+ request_mapper = {
1971
+ client_side_validation: true,
1972
+ required: true,
1973
+ serialized_name: 'Image',
1974
+ type: {
1975
+ name: 'Stream'
1976
+ }
1977
+ }
1978
+ request_content = self.serialize(request_mapper, image)
1979
+
1980
+ path_template = 'analyze'
1981
+
1982
+ request_url = @base_url || self.base_url
1983
+ request_url = request_url.gsub('{Endpoint}', endpoint)
1984
+
1985
+ options = {
1986
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
1987
+ query_params: {'visualFeatures' => visual_features.nil? ? nil : visual_features.join(','),'details' => details.nil? ? nil : details.join(','),'language' => language,'descriptionExclude' => description_exclude.nil? ? nil : description_exclude.join(',')},
1988
+ body: request_content,
1989
+ headers: request_headers.merge(custom_headers || {}),
1990
+ base_url: request_url
1991
+ }
1992
+ promise = self.make_request_async(:post, path_template, options)
1993
+
1994
+ promise = promise.then do |result|
1995
+ http_response = result.response
1996
+ status_code = http_response.status
1997
+ response_content = http_response.body
1998
+ unless status_code == 200
1999
+ error_model = JSON.load(response_content)
2000
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
2001
+ end
2002
+
2003
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
2004
+ result.correlation_request_id = http_response['x-ms-correlation-request-id'] unless http_response['x-ms-correlation-request-id'].nil?
2005
+ result.client_request_id = http_response['x-ms-client-request-id'] unless http_response['x-ms-client-request-id'].nil?
2006
+ # Deserialize Response
2007
+ if status_code == 200
2008
+ begin
2009
+ parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
2010
+ result_mapper = Azure::CognitiveServices::ComputerVision::V2_1::Models::ImageAnalysis.mapper()
2011
+ result.body = self.deserialize(result_mapper, parsed_response)
2012
+ rescue Exception => e
2013
+ fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
2014
+ end
2015
+ end
2016
+
2017
+ result
2018
+ end
2019
+
2020
+ promise.execute
2021
+ end
2022
+
2023
+ #
2024
+ # This operation returns a bounding box around the most important area of the
2025
+ # image.
2026
+ # A successful response will be returned in JSON. If the request failed, the
2027
+ # response contains an error code and a message to help determine what went
2028
+ # wrong.
2029
+ # Upon failure, the error code and an error message are returned. The error
2030
+ # code could be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize,
2031
+ # NotSupportedImage, FailedToProcess, Timeout, or InternalServerError.
2032
+ #
2033
+ # @param image An image stream.
2034
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
2035
+ # will be added to the HTTP request.
2036
+ #
2037
+ # @return [AreaOfInterestResult] operation results.
2038
+ #
2039
+ def get_area_of_interest_in_stream(image, custom_headers:nil)
2040
+ response = get_area_of_interest_in_stream_async(image, custom_headers:custom_headers).value!
2041
+ response.body unless response.nil?
2042
+ end
2043
+
2044
+ #
2045
+ # This operation returns a bounding box around the most important area of the
2046
+ # image.
2047
+ # A successful response will be returned in JSON. If the request failed, the
2048
+ # response contains an error code and a message to help determine what went
2049
+ # wrong.
2050
+ # Upon failure, the error code and an error message are returned. The error
2051
+ # code could be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize,
2052
+ # NotSupportedImage, FailedToProcess, Timeout, or InternalServerError.
2053
+ #
2054
+ # @param image An image stream.
2055
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
2056
+ # will be added to the HTTP request.
2057
+ #
2058
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
2059
+ #
2060
+ def get_area_of_interest_in_stream_with_http_info(image, custom_headers:nil)
2061
+ get_area_of_interest_in_stream_async(image, custom_headers:custom_headers).value!
2062
+ end
2063
+
2064
+ #
2065
+ # This operation returns a bounding box around the most important area of the
2066
+ # image.
2067
+ # A successful response will be returned in JSON. If the request failed, the
2068
+ # response contains an error code and a message to help determine what went
2069
+ # wrong.
2070
+ # Upon failure, the error code and an error message are returned. The error
2071
+ # code could be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize,
2072
+ # NotSupportedImage, FailedToProcess, Timeout, or InternalServerError.
2073
+ #
2074
+ # @param image An image stream.
2075
+ # @param [Hash{String => String}] A hash of custom headers that will be added
2076
+ # to the HTTP request.
2077
+ #
2078
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
2079
+ #
2080
+ def get_area_of_interest_in_stream_async(image, custom_headers:nil)
2081
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
2082
+ fail ArgumentError, 'image is nil' if image.nil?
2083
+
2084
+
2085
+ request_headers = {}
2086
+ request_headers['Content-Type'] = 'application/octet-stream'
2087
+
2088
+ # Set Headers
2089
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
2090
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
2091
+
2092
+ # Serialize Request
2093
+ request_mapper = {
2094
+ client_side_validation: true,
2095
+ required: true,
2096
+ serialized_name: 'Image',
2097
+ type: {
2098
+ name: 'Stream'
2099
+ }
2100
+ }
2101
+ request_content = self.serialize(request_mapper, image)
2102
+
2103
+ path_template = 'areaOfInterest'
2104
+
2105
+ request_url = @base_url || self.base_url
2106
+ request_url = request_url.gsub('{Endpoint}', endpoint)
2107
+
2108
+ options = {
2109
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
2110
+ body: request_content,
2111
+ headers: request_headers.merge(custom_headers || {}),
2112
+ base_url: request_url
2113
+ }
2114
+ promise = self.make_request_async(:post, path_template, options)
2115
+
2116
+ promise = promise.then do |result|
2117
+ http_response = result.response
2118
+ status_code = http_response.status
2119
+ response_content = http_response.body
2120
+ unless status_code == 200
2121
+ error_model = JSON.load(response_content)
2122
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
2123
+ end
2124
+
2125
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
2126
+ result.correlation_request_id = http_response['x-ms-correlation-request-id'] unless http_response['x-ms-correlation-request-id'].nil?
2127
+ result.client_request_id = http_response['x-ms-client-request-id'] unless http_response['x-ms-client-request-id'].nil?
2128
+ # Deserialize Response
2129
+ if status_code == 200
2130
+ begin
2131
+ parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
2132
+ result_mapper = Azure::CognitiveServices::ComputerVision::V2_1::Models::AreaOfInterestResult.mapper()
2133
+ result.body = self.deserialize(result_mapper, parsed_response)
2134
+ rescue Exception => e
2135
+ fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
2136
+ end
2137
+ end
2138
+
2139
+ result
2140
+ end
2141
+
2142
+ promise.execute
2143
+ end
2144
+
2145
+ #
2146
+ # This operation generates a description of an image in human readable language
2147
+ # with complete sentences. The description is based on a collection of content
2148
+ # tags, which are also returned by the operation. More than one description can
2149
+ # be generated for each image. Descriptions are ordered by their confidence
2150
+ # score. Descriptions may include results from celebrity and landmark domain
2151
+ # models, if applicable.
2152
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
2153
+ # an image URL.
2154
+ # A successful response will be returned in JSON. If the request failed, the
2155
+ # response will contain an error code and a message to help understand what
2156
+ # went wrong.
2157
+ #
2158
+ # @param image An image stream.
2159
+ # @param max_candidates [Integer] Maximum number of candidate descriptions to
2160
+ # be returned. The default is 1.
2161
+ # @param language [Enum] The desired language for output generation. If this
2162
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
2163
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
2164
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
2165
+ # 'ja', 'pt', 'zh'
2166
+ # @param description_exclude [Array<DescriptionExclude>] Turn off specified
2167
+ # domain models when generating the description.
2168
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
2169
+ # will be added to the HTTP request.
2170
+ #
2171
+ # @return [ImageDescription] operation results.
2172
+ #
2173
+ def describe_image_in_stream(image, max_candidates:1, language:nil, description_exclude:nil, custom_headers:nil)
2174
+ response = describe_image_in_stream_async(image, max_candidates:max_candidates, language:language, description_exclude:description_exclude, custom_headers:custom_headers).value!
2175
+ response.body unless response.nil?
2176
+ end
2177
+
2178
+ #
2179
+ # This operation generates a description of an image in human readable language
2180
+ # with complete sentences. The description is based on a collection of content
2181
+ # tags, which are also returned by the operation. More than one description can
2182
+ # be generated for each image. Descriptions are ordered by their confidence
2183
+ # score. Descriptions may include results from celebrity and landmark domain
2184
+ # models, if applicable.
2185
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
2186
+ # an image URL.
2187
+ # A successful response will be returned in JSON. If the request failed, the
2188
+ # response will contain an error code and a message to help understand what
2189
+ # went wrong.
2190
+ #
2191
+ # @param image An image stream.
2192
+ # @param max_candidates [Integer] Maximum number of candidate descriptions to
2193
+ # be returned. The default is 1.
2194
+ # @param language [Enum] The desired language for output generation. If this
2195
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
2196
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
2197
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
2198
+ # 'ja', 'pt', 'zh'
2199
+ # @param description_exclude [Array<DescriptionExclude>] Turn off specified
2200
+ # domain models when generating the description.
2201
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
2202
+ # will be added to the HTTP request.
2203
+ #
2204
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
2205
+ #
2206
+ def describe_image_in_stream_with_http_info(image, max_candidates:1, language:nil, description_exclude:nil, custom_headers:nil)
2207
+ describe_image_in_stream_async(image, max_candidates:max_candidates, language:language, description_exclude:description_exclude, custom_headers:custom_headers).value!
2208
+ end
2209
+
2210
+ #
2211
+ # This operation generates a description of an image in human readable language
2212
+ # with complete sentences. The description is based on a collection of content
2213
+ # tags, which are also returned by the operation. More than one description can
2214
+ # be generated for each image. Descriptions are ordered by their confidence
2215
+ # score. Descriptions may include results from celebrity and landmark domain
2216
+ # models, if applicable.
2217
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
2218
+ # an image URL.
2219
+ # A successful response will be returned in JSON. If the request failed, the
2220
+ # response will contain an error code and a message to help understand what
2221
+ # went wrong.
2222
+ #
2223
+ # @param image An image stream.
2224
+ # @param max_candidates [Integer] Maximum number of candidate descriptions to
2225
+ # be returned. The default is 1.
2226
+ # @param language [Enum] The desired language for output generation. If this
2227
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
2228
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
2229
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
2230
+ # 'ja', 'pt', 'zh'
2231
+ # @param description_exclude [Array<DescriptionExclude>] Turn off specified
2232
+ # domain models when generating the description.
2233
+ # @param [Hash{String => String}] A hash of custom headers that will be added
2234
+ # to the HTTP request.
2235
+ #
2236
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
2237
+ #
2238
+ def describe_image_in_stream_async(image, max_candidates:1, language:nil, description_exclude:nil, custom_headers:nil)
2239
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
2240
+ fail ArgumentError, 'image is nil' if image.nil?
2241
+
2242
+
2243
+ request_headers = {}
2244
+ request_headers['Content-Type'] = 'application/octet-stream'
2245
+
2246
+ # Set Headers
2247
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
2248
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
2249
+
2250
+ # Serialize Request
2251
+ request_mapper = {
2252
+ client_side_validation: true,
2253
+ required: true,
2254
+ serialized_name: 'Image',
2255
+ type: {
2256
+ name: 'Stream'
2257
+ }
2258
+ }
2259
+ request_content = self.serialize(request_mapper, image)
2260
+
2261
+ path_template = 'describe'
2262
+
2263
+ request_url = @base_url || self.base_url
2264
+ request_url = request_url.gsub('{Endpoint}', endpoint)
2265
+
2266
+ options = {
2267
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
2268
+ query_params: {'maxCandidates' => max_candidates,'language' => language,'descriptionExclude' => description_exclude.nil? ? nil : description_exclude.join(',')},
2269
+ body: request_content,
2270
+ headers: request_headers.merge(custom_headers || {}),
2271
+ base_url: request_url
2272
+ }
2273
+ promise = self.make_request_async(:post, path_template, options)
2274
+
2275
+ promise = promise.then do |result|
2276
+ http_response = result.response
2277
+ status_code = http_response.status
2278
+ response_content = http_response.body
2279
+ unless status_code == 200
2280
+ error_model = JSON.load(response_content)
2281
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
2282
+ end
2283
+
2284
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
2285
+ result.correlation_request_id = http_response['x-ms-correlation-request-id'] unless http_response['x-ms-correlation-request-id'].nil?
2286
+ result.client_request_id = http_response['x-ms-client-request-id'] unless http_response['x-ms-client-request-id'].nil?
2287
+ # Deserialize Response
2288
+ if status_code == 200
2289
+ begin
2290
+ parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
2291
+ result_mapper = Azure::CognitiveServices::ComputerVision::V2_1::Models::ImageDescription.mapper()
2292
+ result.body = self.deserialize(result_mapper, parsed_response)
2293
+ rescue Exception => e
2294
+ fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
2295
+ end
2296
+ end
2297
+
2298
+ result
2299
+ end
2300
+
2301
+ promise.execute
2302
+ end
2303
+
2304
+ #
2305
+ # Performs object detection on the specified image.
2306
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
2307
+ # an image URL.
2308
+ # A successful response will be returned in JSON. If the request failed, the
2309
+ # response will contain an error code and a message to help understand what
2310
+ # went wrong.
2311
+ #
2312
+ # @param image An image stream.
2313
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
2314
+ # will be added to the HTTP request.
2315
+ #
2316
+ # @return [DetectResult] operation results.
2317
+ #
2318
+ def detect_objects_in_stream(image, custom_headers:nil)
2319
+ response = detect_objects_in_stream_async(image, custom_headers:custom_headers).value!
2320
+ response.body unless response.nil?
2321
+ end
2322
+
2323
+ #
2324
+ # Performs object detection on the specified image.
2325
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
2326
+ # an image URL.
2327
+ # A successful response will be returned in JSON. If the request failed, the
2328
+ # response will contain an error code and a message to help understand what
2329
+ # went wrong.
2330
+ #
2331
+ # @param image An image stream.
2332
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
2333
+ # will be added to the HTTP request.
2334
+ #
2335
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
2336
+ #
2337
+ def detect_objects_in_stream_with_http_info(image, custom_headers:nil)
2338
+ detect_objects_in_stream_async(image, custom_headers:custom_headers).value!
2339
+ end
2340
+
2341
+ #
2342
+ # Performs object detection on the specified image.
2343
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
2344
+ # an image URL.
2345
+ # A successful response will be returned in JSON. If the request failed, the
2346
+ # response will contain an error code and a message to help understand what
2347
+ # went wrong.
2348
+ #
2349
+ # @param image An image stream.
2350
+ # @param [Hash{String => String}] A hash of custom headers that will be added
2351
+ # to the HTTP request.
2352
+ #
2353
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
2354
+ #
2355
+ def detect_objects_in_stream_async(image, custom_headers:nil)
2356
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
2357
+ fail ArgumentError, 'image is nil' if image.nil?
2358
+
2359
+
2360
+ request_headers = {}
2361
+ request_headers['Content-Type'] = 'application/octet-stream'
2362
+
2363
+ # Set Headers
2364
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
2365
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
2366
+
2367
+ # Serialize Request
2368
+ request_mapper = {
2369
+ client_side_validation: true,
2370
+ required: true,
2371
+ serialized_name: 'Image',
2372
+ type: {
2373
+ name: 'Stream'
2374
+ }
2375
+ }
2376
+ request_content = self.serialize(request_mapper, image)
2377
+
2378
+ path_template = 'detect'
2379
+
2380
+ request_url = @base_url || self.base_url
2381
+ request_url = request_url.gsub('{Endpoint}', endpoint)
2382
+
2383
+ options = {
2384
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
2385
+ body: request_content,
2386
+ headers: request_headers.merge(custom_headers || {}),
2387
+ base_url: request_url
2388
+ }
2389
+ promise = self.make_request_async(:post, path_template, options)
2390
+
2391
+ promise = promise.then do |result|
2392
+ http_response = result.response
2393
+ status_code = http_response.status
2394
+ response_content = http_response.body
2395
+ unless status_code == 200
2396
+ error_model = JSON.load(response_content)
2397
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
2398
+ end
2399
+
2400
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
2401
+ result.correlation_request_id = http_response['x-ms-correlation-request-id'] unless http_response['x-ms-correlation-request-id'].nil?
2402
+ result.client_request_id = http_response['x-ms-client-request-id'] unless http_response['x-ms-client-request-id'].nil?
2403
+ # Deserialize Response
2404
+ if status_code == 200
2405
+ begin
2406
+ parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
2407
+ result_mapper = Azure::CognitiveServices::ComputerVision::V2_1::Models::DetectResult.mapper()
2408
+ result.body = self.deserialize(result_mapper, parsed_response)
2409
+ rescue Exception => e
2410
+ fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
2411
+ end
2412
+ end
2413
+
2414
+ result
2415
+ end
2416
+
2417
+ promise.execute
2418
+ end
2419
+
2420
+ #
2421
+ # This operation generates a thumbnail image with the user-specified width and
2422
+ # height. By default, the service analyzes the image, identifies the region of
2423
+ # interest (ROI), and generates smart cropping coordinates based on the ROI.
2424
+ # Smart cropping helps when you specify an aspect ratio that differs from that
2425
+ # of the input image.
2426
+ # A successful response contains the thumbnail image binary. If the request
2427
+ # failed, the response contains an error code and a message to help determine
2428
+ # what went wrong.
2429
+ # Upon failure, the error code and an error message are returned. The error
2430
+ # code could be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize,
2431
+ # InvalidThumbnailSize, NotSupportedImage, FailedToProcess, Timeout, or
2432
+ # InternalServerError.
2433
+ #
2434
+ # @param width [Integer] Width of the thumbnail, in pixels. It must be between
2435
+ # 1 and 1024. Recommended minimum of 50.
2436
+ # @param height [Integer] Height of the thumbnail, in pixels. It must be
2437
+ # between 1 and 1024. Recommended minimum of 50.
2438
+ # @param image An image stream.
2439
+ # @param smart_cropping [Boolean] Boolean flag for enabling smart cropping.
2440
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
2441
+ # will be added to the HTTP request.
2442
+ #
2443
+ # @return [NOT_IMPLEMENTED] operation results.
2444
+ #
2445
+ def generate_thumbnail_in_stream(width, height, image, smart_cropping:false, custom_headers:nil)
2446
+ response = generate_thumbnail_in_stream_async(width, height, image, smart_cropping:smart_cropping, custom_headers:custom_headers).value!
2447
+ response.body unless response.nil?
2448
+ end
2449
+
2450
+ #
2451
+ # This operation generates a thumbnail image with the user-specified width and
2452
+ # height. By default, the service analyzes the image, identifies the region of
2453
+ # interest (ROI), and generates smart cropping coordinates based on the ROI.
2454
+ # Smart cropping helps when you specify an aspect ratio that differs from that
2455
+ # of the input image.
2456
+ # A successful response contains the thumbnail image binary. If the request
2457
+ # failed, the response contains an error code and a message to help determine
2458
+ # what went wrong.
2459
+ # Upon failure, the error code and an error message are returned. The error
2460
+ # code could be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize,
2461
+ # InvalidThumbnailSize, NotSupportedImage, FailedToProcess, Timeout, or
2462
+ # InternalServerError.
2463
+ #
2464
+ # @param width [Integer] Width of the thumbnail, in pixels. It must be between
2465
+ # 1 and 1024. Recommended minimum of 50.
2466
+ # @param height [Integer] Height of the thumbnail, in pixels. It must be
2467
+ # between 1 and 1024. Recommended minimum of 50.
2468
+ # @param image An image stream.
2469
+ # @param smart_cropping [Boolean] Boolean flag for enabling smart cropping.
2470
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
2471
+ # will be added to the HTTP request.
2472
+ #
2473
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
2474
+ #
2475
+ def generate_thumbnail_in_stream_with_http_info(width, height, image, smart_cropping:false, custom_headers:nil)
2476
+ generate_thumbnail_in_stream_async(width, height, image, smart_cropping:smart_cropping, custom_headers:custom_headers).value!
2477
+ end
2478
+
2479
+ #
2480
+ # This operation generates a thumbnail image with the user-specified width and
2481
+ # height. By default, the service analyzes the image, identifies the region of
2482
+ # interest (ROI), and generates smart cropping coordinates based on the ROI.
2483
+ # Smart cropping helps when you specify an aspect ratio that differs from that
2484
+ # of the input image.
2485
+ # A successful response contains the thumbnail image binary. If the request
2486
+ # failed, the response contains an error code and a message to help determine
2487
+ # what went wrong.
2488
+ # Upon failure, the error code and an error message are returned. The error
2489
+ # code could be one of InvalidImageUrl, InvalidImageFormat, InvalidImageSize,
2490
+ # InvalidThumbnailSize, NotSupportedImage, FailedToProcess, Timeout, or
2491
+ # InternalServerError.
2492
+ #
2493
+ # @param width [Integer] Width of the thumbnail, in pixels. It must be between
2494
+ # 1 and 1024. Recommended minimum of 50.
2495
+ # @param height [Integer] Height of the thumbnail, in pixels. It must be
2496
+ # between 1 and 1024. Recommended minimum of 50.
2497
+ # @param image An image stream.
2498
+ # @param smart_cropping [Boolean] Boolean flag for enabling smart cropping.
2499
+ # @param [Hash{String => String}] A hash of custom headers that will be added
2500
+ # to the HTTP request.
2501
+ #
2502
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
2503
+ #
2504
+ def generate_thumbnail_in_stream_async(width, height, image, smart_cropping:false, custom_headers:nil)
2505
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
2506
+ fail ArgumentError, 'width is nil' if width.nil?
2507
+ fail ArgumentError, "'width' should satisfy the constraint - 'InclusiveMaximum': '1024'" if !width.nil? && width > 1024
2508
+ fail ArgumentError, "'width' should satisfy the constraint - 'InclusiveMinimum': '1'" if !width.nil? && width < 1
2509
+ fail ArgumentError, 'height is nil' if height.nil?
2510
+ fail ArgumentError, "'height' should satisfy the constraint - 'InclusiveMaximum': '1024'" if !height.nil? && height > 1024
2511
+ fail ArgumentError, "'height' should satisfy the constraint - 'InclusiveMinimum': '1'" if !height.nil? && height < 1
2512
+ fail ArgumentError, 'image is nil' if image.nil?
2513
+
2514
+
2515
+ request_headers = {}
2516
+ request_headers['Content-Type'] = 'application/octet-stream'
2517
+
2518
+ # Set Headers
2519
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
2520
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
2521
+
2522
+ # Serialize Request
2523
+ request_mapper = {
2524
+ client_side_validation: true,
2525
+ required: true,
2526
+ serialized_name: 'Image',
2527
+ type: {
2528
+ name: 'Stream'
2529
+ }
2530
+ }
2531
+ request_content = self.serialize(request_mapper, image)
2532
+
2533
+ path_template = 'generateThumbnail'
2534
+
2535
+ request_url = @base_url || self.base_url
2536
+ request_url = request_url.gsub('{Endpoint}', endpoint)
2537
+
2538
+ options = {
2539
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
2540
+ query_params: {'width' => width,'height' => height,'smartCropping' => smart_cropping},
2541
+ body: request_content,
2542
+ headers: request_headers.merge(custom_headers || {}),
2543
+ base_url: request_url
2544
+ }
2545
+ promise = self.make_request_async(:post, path_template, options)
2546
+
2547
+ promise = promise.then do |result|
2548
+ http_response = result.response
2549
+ status_code = http_response.status
2550
+ response_content = http_response.body
2551
+ unless status_code == 200
2552
+ error_model = JSON.load(response_content)
2553
+ fail MsRestAzure::AzureOperationError.new(result.request, http_response, error_model)
2554
+ end
2555
+
2556
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
2557
+ result.correlation_request_id = http_response['x-ms-correlation-request-id'] unless http_response['x-ms-correlation-request-id'].nil?
2558
+ result.client_request_id = http_response['x-ms-client-request-id'] unless http_response['x-ms-client-request-id'].nil?
2559
+ # Deserialize Response
2560
+ if status_code == 200
2561
+ begin
2562
+ parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
2563
+ result_mapper = {
2564
+ client_side_validation: true,
2565
+ required: false,
2566
+ serialized_name: 'parsed_response',
2567
+ type: {
2568
+ name: 'Stream'
2569
+ }
2570
+ }
2571
+ result.body = self.deserialize(result_mapper, parsed_response)
2572
+ rescue Exception => e
2573
+ fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
2574
+ end
2575
+ end
2576
+
2577
+ result
2578
+ end
2579
+
2580
+ promise.execute
2581
+ end
2582
+
2583
+ #
2584
+ # This operation recognizes content within an image by applying a
2585
+ # domain-specific model. The list of domain-specific models that are supported
2586
+ # by the Computer Vision API can be retrieved using the /models GET request.
2587
+ # Currently, the API provides following domain-specific models: celebrities,
2588
+ # landmarks.
2589
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
2590
+ # an image URL.
2591
+ # A successful response will be returned in JSON.
2592
+ # If the request failed, the response will contain an error code and a message
2593
+ # to help understand what went wrong.
2594
+ #
2595
+ # @param model [String] The domain-specific content to recognize.
2596
+ # @param image An image stream.
2597
+ # @param language [Enum] The desired language for output generation. If this
2598
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
2599
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
2600
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
2601
+ # 'ja', 'pt', 'zh'
2602
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
2603
+ # will be added to the HTTP request.
2604
+ #
2605
+ # @return [DomainModelResults] operation results.
2606
+ #
2607
+ def analyze_image_by_domain_in_stream(model, image, language:nil, custom_headers:nil)
2608
+ response = analyze_image_by_domain_in_stream_async(model, image, language:language, custom_headers:custom_headers).value!
2609
+ response.body unless response.nil?
2610
+ end
2611
+
2612
+ #
2613
+ # This operation recognizes content within an image by applying a
2614
+ # domain-specific model. The list of domain-specific models that are supported
2615
+ # by the Computer Vision API can be retrieved using the /models GET request.
2616
+ # Currently, the API provides following domain-specific models: celebrities,
2617
+ # landmarks.
2618
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
2619
+ # an image URL.
2620
+ # A successful response will be returned in JSON.
2621
+ # If the request failed, the response will contain an error code and a message
2622
+ # to help understand what went wrong.
2623
+ #
2624
+ # @param model [String] The domain-specific content to recognize.
2625
+ # @param image An image stream.
2626
+ # @param language [Enum] The desired language for output generation. If this
2627
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
2628
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
2629
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
2630
+ # 'ja', 'pt', 'zh'
2631
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
2632
+ # will be added to the HTTP request.
2633
+ #
2634
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
2635
+ #
2636
+ def analyze_image_by_domain_in_stream_with_http_info(model, image, language:nil, custom_headers:nil)
2637
+ analyze_image_by_domain_in_stream_async(model, image, language:language, custom_headers:custom_headers).value!
2638
+ end
2639
+
2640
+ #
2641
+ # This operation recognizes content within an image by applying a
2642
+ # domain-specific model. The list of domain-specific models that are supported
2643
+ # by the Computer Vision API can be retrieved using the /models GET request.
2644
+ # Currently, the API provides following domain-specific models: celebrities,
2645
+ # landmarks.
2646
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
2647
+ # an image URL.
2648
+ # A successful response will be returned in JSON.
2649
+ # If the request failed, the response will contain an error code and a message
2650
+ # to help understand what went wrong.
2651
+ #
2652
+ # @param model [String] The domain-specific content to recognize.
2653
+ # @param image An image stream.
2654
+ # @param language [Enum] The desired language for output generation. If this
2655
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
2656
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
2657
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
2658
+ # 'ja', 'pt', 'zh'
2659
+ # @param [Hash{String => String}] A hash of custom headers that will be added
2660
+ # to the HTTP request.
2661
+ #
2662
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
2663
+ #
2664
+ def analyze_image_by_domain_in_stream_async(model, image, language:nil, custom_headers:nil)
2665
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
2666
+ fail ArgumentError, 'model is nil' if model.nil?
2667
+ fail ArgumentError, 'image is nil' if image.nil?
2668
+
2669
+
2670
+ request_headers = {}
2671
+ request_headers['Content-Type'] = 'application/octet-stream'
2672
+
2673
+ # Set Headers
2674
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
2675
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
2676
+
2677
+ # Serialize Request
2678
+ request_mapper = {
2679
+ client_side_validation: true,
2680
+ required: true,
2681
+ serialized_name: 'Image',
2682
+ type: {
2683
+ name: 'Stream'
2684
+ }
2685
+ }
2686
+ request_content = self.serialize(request_mapper, image)
2687
+
2688
+ path_template = 'models/{model}/analyze'
2689
+
2690
+ request_url = @base_url || self.base_url
2691
+ request_url = request_url.gsub('{Endpoint}', endpoint)
2692
+
2693
+ options = {
2694
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
2695
+ path_params: {'model' => model},
2696
+ query_params: {'language' => language},
2697
+ body: request_content,
2698
+ headers: request_headers.merge(custom_headers || {}),
2699
+ base_url: request_url
2700
+ }
2701
+ promise = self.make_request_async(:post, path_template, options)
2702
+
2703
+ promise = promise.then do |result|
2704
+ http_response = result.response
2705
+ status_code = http_response.status
2706
+ response_content = http_response.body
2707
+ unless status_code == 200
2708
+ error_model = JSON.load(response_content)
2709
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
2710
+ end
2711
+
2712
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
2713
+ result.correlation_request_id = http_response['x-ms-correlation-request-id'] unless http_response['x-ms-correlation-request-id'].nil?
2714
+ result.client_request_id = http_response['x-ms-client-request-id'] unless http_response['x-ms-client-request-id'].nil?
2715
+ # Deserialize Response
2716
+ if status_code == 200
2717
+ begin
2718
+ parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
2719
+ result_mapper = Azure::CognitiveServices::ComputerVision::V2_1::Models::DomainModelResults.mapper()
2720
+ result.body = self.deserialize(result_mapper, parsed_response)
2721
+ rescue Exception => e
2722
+ fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
2723
+ end
2724
+ end
2725
+
2726
+ result
2727
+ end
2728
+
2729
+ promise.execute
2730
+ end
2731
+
2732
+ #
2733
+ # Optical Character Recognition (OCR) detects text in an image and extracts the
2734
+ # recognized characters into a machine-usable character stream.
2735
+ # Upon success, the OCR results will be returned.
2736
+ # Upon failure, the error code together with an error message will be returned.
2737
+ # The error code can be one of InvalidImageUrl, InvalidImageFormat,
2738
+ # InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or
2739
+ # InternalServerError.
2740
+ #
2741
+ # @param detect_orientation [Boolean] Whether detect the text orientation in
2742
+ # the image. With detectOrientation=true the OCR service tries to detect the
2743
+ # image orientation and correct it before further processing (e.g. if it's
2744
+ # upside-down).
2745
+ # @param image An image stream.
2746
+ # @param language [OcrLanguages] The BCP-47 language code of the text to be
2747
+ # detected in the image. The default value is 'unk'. Possible values include:
2748
+ # 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el',
2749
+ # 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro',
2750
+ # 'sr-Cyrl', 'sr-Latn', 'sk'
2751
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
2752
+ # will be added to the HTTP request.
2753
+ #
2754
+ # @return [OcrResult] operation results.
2755
+ #
2756
+ def recognize_printed_text_in_stream(detect_orientation, image, language:nil, custom_headers:nil)
2757
+ response = recognize_printed_text_in_stream_async(detect_orientation, image, language:language, custom_headers:custom_headers).value!
2758
+ response.body unless response.nil?
2759
+ end
2760
+
2761
+ #
2762
+ # Optical Character Recognition (OCR) detects text in an image and extracts the
2763
+ # recognized characters into a machine-usable character stream.
2764
+ # Upon success, the OCR results will be returned.
2765
+ # Upon failure, the error code together with an error message will be returned.
2766
+ # The error code can be one of InvalidImageUrl, InvalidImageFormat,
2767
+ # InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or
2768
+ # InternalServerError.
2769
+ #
2770
+ # @param detect_orientation [Boolean] Whether detect the text orientation in
2771
+ # the image. With detectOrientation=true the OCR service tries to detect the
2772
+ # image orientation and correct it before further processing (e.g. if it's
2773
+ # upside-down).
2774
+ # @param image An image stream.
2775
+ # @param language [OcrLanguages] The BCP-47 language code of the text to be
2776
+ # detected in the image. The default value is 'unk'. Possible values include:
2777
+ # 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el',
2778
+ # 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro',
2779
+ # 'sr-Cyrl', 'sr-Latn', 'sk'
2780
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
2781
+ # will be added to the HTTP request.
2782
+ #
2783
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
2784
+ #
2785
+ def recognize_printed_text_in_stream_with_http_info(detect_orientation, image, language:nil, custom_headers:nil)
2786
+ recognize_printed_text_in_stream_async(detect_orientation, image, language:language, custom_headers:custom_headers).value!
2787
+ end
2788
+
2789
+ #
2790
+ # Optical Character Recognition (OCR) detects text in an image and extracts the
2791
+ # recognized characters into a machine-usable character stream.
2792
+ # Upon success, the OCR results will be returned.
2793
+ # Upon failure, the error code together with an error message will be returned.
2794
+ # The error code can be one of InvalidImageUrl, InvalidImageFormat,
2795
+ # InvalidImageSize, NotSupportedImage, NotSupportedLanguage, or
2796
+ # InternalServerError.
2797
+ #
2798
+ # @param detect_orientation [Boolean] Whether detect the text orientation in
2799
+ # the image. With detectOrientation=true the OCR service tries to detect the
2800
+ # image orientation and correct it before further processing (e.g. if it's
2801
+ # upside-down).
2802
+ # @param image An image stream.
2803
+ # @param language [OcrLanguages] The BCP-47 language code of the text to be
2804
+ # detected in the image. The default value is 'unk'. Possible values include:
2805
+ # 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el',
2806
+ # 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro',
2807
+ # 'sr-Cyrl', 'sr-Latn', 'sk'
2808
+ # @param [Hash{String => String}] A hash of custom headers that will be added
2809
+ # to the HTTP request.
2810
+ #
2811
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
2812
+ #
2813
+ def recognize_printed_text_in_stream_async(detect_orientation, image, language:nil, custom_headers:nil)
2814
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
2815
+ fail ArgumentError, 'detect_orientation is nil' if detect_orientation.nil?
2816
+ fail ArgumentError, 'image is nil' if image.nil?
2817
+
2818
+
2819
+ request_headers = {}
2820
+ request_headers['Content-Type'] = 'application/octet-stream'
2821
+
2822
+ # Set Headers
2823
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
2824
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
2825
+
2826
+ # Serialize Request
2827
+ request_mapper = {
2828
+ client_side_validation: true,
2829
+ required: true,
2830
+ serialized_name: 'Image',
2831
+ type: {
2832
+ name: 'Stream'
2833
+ }
2834
+ }
2835
+ request_content = self.serialize(request_mapper, image)
2836
+
2837
+ path_template = 'ocr'
2838
+
2839
+ request_url = @base_url || self.base_url
2840
+ request_url = request_url.gsub('{Endpoint}', endpoint)
2841
+
2842
+ options = {
2843
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
2844
+ query_params: {'detectOrientation' => detect_orientation,'language' => language},
2845
+ body: request_content,
2846
+ headers: request_headers.merge(custom_headers || {}),
2847
+ base_url: request_url
2848
+ }
2849
+ promise = self.make_request_async(:post, path_template, options)
2850
+
2851
+ promise = promise.then do |result|
2852
+ http_response = result.response
2853
+ status_code = http_response.status
2854
+ response_content = http_response.body
2855
+ unless status_code == 200
2856
+ error_model = JSON.load(response_content)
2857
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
2858
+ end
2859
+
2860
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
2861
+ result.correlation_request_id = http_response['x-ms-correlation-request-id'] unless http_response['x-ms-correlation-request-id'].nil?
2862
+ result.client_request_id = http_response['x-ms-client-request-id'] unless http_response['x-ms-client-request-id'].nil?
2863
+ # Deserialize Response
2864
+ if status_code == 200
2865
+ begin
2866
+ parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
2867
+ result_mapper = Azure::CognitiveServices::ComputerVision::V2_1::Models::OcrResult.mapper()
2868
+ result.body = self.deserialize(result_mapper, parsed_response)
2869
+ rescue Exception => e
2870
+ fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
2871
+ end
2872
+ end
2873
+
2874
+ result
2875
+ end
2876
+
2877
+ promise.execute
2878
+ end
2879
+
2880
+ #
2881
+ # This operation generates a list of words, or tags, that are relevant to the
2882
+ # content of the supplied image. The Computer Vision API can return tags based
2883
+ # on objects, living beings, scenery or actions found in images. Unlike
2884
+ # categories, tags are not organized according to a hierarchical classification
2885
+ # system, but correspond to image content. Tags may contain hints to avoid
2886
+ # ambiguity or provide context, for example the tag "ascomycete" may be
2887
+ # accompanied by the hint "fungus".
2888
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
2889
+ # an image URL.
2890
+ # A successful response will be returned in JSON. If the request failed, the
2891
+ # response will contain an error code and a message to help understand what
2892
+ # went wrong.
2893
+ #
2894
+ # @param image An image stream.
2895
+ # @param language [Enum] The desired language for output generation. If this
2896
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
2897
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
2898
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
2899
+ # 'ja', 'pt', 'zh'
2900
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
2901
+ # will be added to the HTTP request.
2902
+ #
2903
+ # @return [TagResult] operation results.
2904
+ #
2905
+ def tag_image_in_stream(image, language:nil, custom_headers:nil)
2906
+ response = tag_image_in_stream_async(image, language:language, custom_headers:custom_headers).value!
2907
+ response.body unless response.nil?
2908
+ end
2909
+
2910
+ #
2911
+ # This operation generates a list of words, or tags, that are relevant to the
2912
+ # content of the supplied image. The Computer Vision API can return tags based
2913
+ # on objects, living beings, scenery or actions found in images. Unlike
2914
+ # categories, tags are not organized according to a hierarchical classification
2915
+ # system, but correspond to image content. Tags may contain hints to avoid
2916
+ # ambiguity or provide context, for example the tag "ascomycete" may be
2917
+ # accompanied by the hint "fungus".
2918
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
2919
+ # an image URL.
2920
+ # A successful response will be returned in JSON. If the request failed, the
2921
+ # response will contain an error code and a message to help understand what
2922
+ # went wrong.
2923
+ #
2924
+ # @param image An image stream.
2925
+ # @param language [Enum] The desired language for output generation. If this
2926
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
2927
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
2928
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
2929
+ # 'ja', 'pt', 'zh'
2930
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
2931
+ # will be added to the HTTP request.
2932
+ #
2933
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
2934
+ #
2935
+ def tag_image_in_stream_with_http_info(image, language:nil, custom_headers:nil)
2936
+ tag_image_in_stream_async(image, language:language, custom_headers:custom_headers).value!
2937
+ end
2938
+
2939
+ #
2940
+ # This operation generates a list of words, or tags, that are relevant to the
2941
+ # content of the supplied image. The Computer Vision API can return tags based
2942
+ # on objects, living beings, scenery or actions found in images. Unlike
2943
+ # categories, tags are not organized according to a hierarchical classification
2944
+ # system, but correspond to image content. Tags may contain hints to avoid
2945
+ # ambiguity or provide context, for example the tag "ascomycete" may be
2946
+ # accompanied by the hint "fungus".
2947
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
2948
+ # an image URL.
2949
+ # A successful response will be returned in JSON. If the request failed, the
2950
+ # response will contain an error code and a message to help understand what
2951
+ # went wrong.
2952
+ #
2953
+ # @param image An image stream.
2954
+ # @param language [Enum] The desired language for output generation. If this
2955
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
2956
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
2957
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
2958
+ # 'ja', 'pt', 'zh'
2959
+ # @param [Hash{String => String}] A hash of custom headers that will be added
2960
+ # to the HTTP request.
2961
+ #
2962
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
2963
+ #
2964
+ def tag_image_in_stream_async(image, language:nil, custom_headers:nil)
2965
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
2966
+ fail ArgumentError, 'image is nil' if image.nil?
2967
+
2968
+
2969
+ request_headers = {}
2970
+ request_headers['Content-Type'] = 'application/octet-stream'
2971
+
2972
+ # Set Headers
2973
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
2974
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
2975
+
2976
+ # Serialize Request
2977
+ request_mapper = {
2978
+ client_side_validation: true,
2979
+ required: true,
2980
+ serialized_name: 'Image',
2981
+ type: {
2982
+ name: 'Stream'
2983
+ }
2984
+ }
2985
+ request_content = self.serialize(request_mapper, image)
2986
+
2987
+ path_template = 'tag'
2988
+
2989
+ request_url = @base_url || self.base_url
2990
+ request_url = request_url.gsub('{Endpoint}', endpoint)
2991
+
2992
+ options = {
2993
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
2994
+ query_params: {'language' => language},
2995
+ body: request_content,
2996
+ headers: request_headers.merge(custom_headers || {}),
2997
+ base_url: request_url
2998
+ }
2999
+ promise = self.make_request_async(:post, path_template, options)
3000
+
3001
+ promise = promise.then do |result|
3002
+ http_response = result.response
3003
+ status_code = http_response.status
3004
+ response_content = http_response.body
3005
+ unless status_code == 200
3006
+ error_model = JSON.load(response_content)
3007
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
3008
+ end
3009
+
3010
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
3011
+ result.correlation_request_id = http_response['x-ms-correlation-request-id'] unless http_response['x-ms-correlation-request-id'].nil?
3012
+ result.client_request_id = http_response['x-ms-client-request-id'] unless http_response['x-ms-client-request-id'].nil?
3013
+ # Deserialize Response
3014
+ if status_code == 200
3015
+ begin
3016
+ parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
3017
+ result_mapper = Azure::CognitiveServices::ComputerVision::V2_1::Models::TagResult.mapper()
3018
+ result.body = self.deserialize(result_mapper, parsed_response)
3019
+ rescue Exception => e
3020
+ fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
3021
+ end
3022
+ end
3023
+
3024
+ result
3025
+ end
3026
+
3027
+ promise.execute
3028
+ end
3029
+
3030
+ #
3031
+ # Recognize Text operation. When you use the Recognize Text interface, the
3032
+ # response contains a field called 'Operation-Location'. The
3033
+ # 'Operation-Location' field contains the URL that you must use for your Get
3034
+ # Recognize Text Operation Result operation.
3035
+ #
3036
+ # @param image An image stream.
3037
+ # @param mode [TextRecognitionMode] Type of text to recognize. Possible values
3038
+ # include: 'Handwritten', 'Printed'
3039
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
3040
+ # will be added to the HTTP request.
3041
+ #
3042
+ #
3043
+ def recognize_text_in_stream(image, mode, custom_headers:nil)
3044
+ response = recognize_text_in_stream_async(image, mode, custom_headers:custom_headers).value!
3045
+ nil
3046
+ end
3047
+
3048
+ #
3049
+ # Recognize Text operation. When you use the Recognize Text interface, the
3050
+ # response contains a field called 'Operation-Location'. The
3051
+ # 'Operation-Location' field contains the URL that you must use for your Get
3052
+ # Recognize Text Operation Result operation.
3053
+ #
3054
+ # @param image An image stream.
3055
+ # @param mode [TextRecognitionMode] Type of text to recognize. Possible values
3056
+ # include: 'Handwritten', 'Printed'
3057
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
3058
+ # will be added to the HTTP request.
3059
+ #
3060
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
3061
+ #
3062
+ def recognize_text_in_stream_with_http_info(image, mode, custom_headers:nil)
3063
+ recognize_text_in_stream_async(image, mode, custom_headers:custom_headers).value!
3064
+ end
3065
+
3066
+ #
3067
+ # Recognize Text operation. When you use the Recognize Text interface, the
3068
+ # response contains a field called 'Operation-Location'. The
3069
+ # 'Operation-Location' field contains the URL that you must use for your Get
3070
+ # Recognize Text Operation Result operation.
3071
+ #
3072
+ # @param image An image stream.
3073
+ # @param mode [TextRecognitionMode] Type of text to recognize. Possible values
3074
+ # include: 'Handwritten', 'Printed'
3075
+ # @param [Hash{String => String}] A hash of custom headers that will be added
3076
+ # to the HTTP request.
3077
+ #
3078
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
3079
+ #
3080
+ def recognize_text_in_stream_async(image, mode, custom_headers:nil)
3081
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
3082
+ fail ArgumentError, 'image is nil' if image.nil?
3083
+ fail ArgumentError, 'mode is nil' if mode.nil?
3084
+
3085
+
3086
+ request_headers = {}
3087
+ request_headers['Content-Type'] = 'application/octet-stream'
3088
+
3089
+ # Set Headers
3090
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
3091
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
3092
+
3093
+ # Serialize Request
3094
+ request_mapper = {
3095
+ client_side_validation: true,
3096
+ required: true,
3097
+ serialized_name: 'Image',
3098
+ type: {
3099
+ name: 'Stream'
3100
+ }
3101
+ }
3102
+ request_content = self.serialize(request_mapper, image)
3103
+
3104
+ path_template = 'recognizeText'
3105
+
3106
+ request_url = @base_url || self.base_url
3107
+ request_url = request_url.gsub('{Endpoint}', endpoint)
3108
+
3109
+ options = {
3110
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
3111
+ query_params: {'mode' => mode},
3112
+ body: request_content,
3113
+ headers: request_headers.merge(custom_headers || {}),
3114
+ base_url: request_url
3115
+ }
3116
+ promise = self.make_request_async(:post, path_template, options)
3117
+
3118
+ promise = promise.then do |result|
3119
+ http_response = result.response
3120
+ status_code = http_response.status
3121
+ response_content = http_response.body
3122
+ unless status_code == 202
3123
+ error_model = JSON.load(response_content)
3124
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
3125
+ end
3126
+
3127
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
3128
+ result.correlation_request_id = http_response['x-ms-correlation-request-id'] unless http_response['x-ms-correlation-request-id'].nil?
3129
+ result.client_request_id = http_response['x-ms-client-request-id'] unless http_response['x-ms-client-request-id'].nil?
3130
+
3131
+ result
3132
+ end
3133
+
3134
+ promise.execute
3135
+ end
3136
+
3137
+ #
3138
+ # Use this interface to get the result of a Read Document operation, employing
3139
+ # the state-of-the-art Optical Character Recognition (OCR) algorithms optimized
3140
+ # for text-heavy documents. When you use the Read Document interface, the
3141
+ # response contains a field called 'Operation-Location'. The
3142
+ # 'Operation-Location' field contains the URL that you must use for your 'Get
3143
+ # Read Result operation' to access OCR results.​
3144
+ #
3145
+ # @param image An image stream.
3146
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
3147
+ # will be added to the HTTP request.
3148
+ #
3149
+ #
3150
+ def batch_read_file_in_stream(image, custom_headers:nil)
3151
+ response = batch_read_file_in_stream_async(image, custom_headers:custom_headers).value!
3152
+ nil
3153
+ end
3154
+
3155
+ #
3156
+ # Use this interface to get the result of a Read Document operation, employing
3157
+ # the state-of-the-art Optical Character Recognition (OCR) algorithms optimized
3158
+ # for text-heavy documents. When you use the Read Document interface, the
3159
+ # response contains a field called 'Operation-Location'. The
3160
+ # 'Operation-Location' field contains the URL that you must use for your 'Get
3161
+ # Read Result operation' to access OCR results.​
3162
+ #
3163
+ # @param image An image stream.
3164
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
3165
+ # will be added to the HTTP request.
3166
+ #
3167
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
3168
+ #
3169
+ def batch_read_file_in_stream_with_http_info(image, custom_headers:nil)
3170
+ batch_read_file_in_stream_async(image, custom_headers:custom_headers).value!
3171
+ end
3172
+
3173
+ #
3174
+ # Use this interface to get the result of a Read Document operation, employing
3175
+ # the state-of-the-art Optical Character Recognition (OCR) algorithms optimized
3176
+ # for text-heavy documents. When you use the Read Document interface, the
3177
+ # response contains a field called 'Operation-Location'. The
3178
+ # 'Operation-Location' field contains the URL that you must use for your 'Get
3179
+ # Read Result operation' to access OCR results.​
3180
+ #
3181
+ # @param image An image stream.
3182
+ # @param [Hash{String => String}] A hash of custom headers that will be added
3183
+ # to the HTTP request.
3184
+ #
3185
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
3186
+ #
3187
+ def batch_read_file_in_stream_async(image, custom_headers:nil)
3188
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
3189
+ fail ArgumentError, 'image is nil' if image.nil?
3190
+
3191
+
3192
+ request_headers = {}
3193
+ request_headers['Content-Type'] = 'application/octet-stream'
3194
+
3195
+ # Set Headers
3196
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
3197
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
3198
+
3199
+ # Serialize Request
3200
+ request_mapper = {
3201
+ client_side_validation: true,
3202
+ required: true,
3203
+ serialized_name: 'Image',
3204
+ type: {
3205
+ name: 'Stream'
3206
+ }
3207
+ }
3208
+ request_content = self.serialize(request_mapper, image)
3209
+
3210
+ path_template = 'read/core/asyncBatchAnalyze'
3211
+
3212
+ request_url = @base_url || self.base_url
3213
+ request_url = request_url.gsub('{Endpoint}', endpoint)
3214
+
3215
+ options = {
3216
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
3217
+ body: request_content,
3218
+ headers: request_headers.merge(custom_headers || {}),
3219
+ base_url: request_url
3220
+ }
3221
+ promise = self.make_request_async(:post, path_template, options)
3222
+
3223
+ promise = promise.then do |result|
3224
+ http_response = result.response
3225
+ status_code = http_response.status
3226
+ response_content = http_response.body
3227
+ unless status_code == 202
3228
+ error_model = JSON.load(response_content)
3229
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
3230
+ end
3231
+
3232
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
3233
+ result.correlation_request_id = http_response['x-ms-correlation-request-id'] unless http_response['x-ms-correlation-request-id'].nil?
3234
+ result.client_request_id = http_response['x-ms-client-request-id'] unless http_response['x-ms-client-request-id'].nil?
3235
+
3236
+ result
3237
+ end
3238
+
3239
+ promise.execute
3240
+ end
3241
+
3242
+
3243
+ private
3244
+ #
3245
+ # Adds telemetry information.
3246
+ #
3247
+ def add_telemetry
3248
+ sdk_information = 'azure_cognitiveservices_computervision'
3249
+ sdk_information = "#{sdk_information}/0.20.1"
3250
+ add_user_agent_information(sdk_information)
3251
+ end
3252
+ end
3253
+ end