azure_cognitiveservices_computervision 0.16.0 → 0.17.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (62) hide show
  1. checksums.yaml +4 -4
  2. data/lib/1.0/generated/azure_cognitiveservices_computervision.rb +19 -17
  3. data/lib/1.0/generated/azure_cognitiveservices_computervision/computer_vision_client.rb +220 -132
  4. data/lib/1.0/generated/azure_cognitiveservices_computervision/models/azure_regions.rb +4 -0
  5. data/lib/1.0/generated/azure_cognitiveservices_computervision/models/category.rb +1 -1
  6. data/lib/1.0/generated/azure_cognitiveservices_computervision/models/celebrity_results.rb +78 -0
  7. data/lib/1.0/generated/azure_cognitiveservices_computervision/models/domain_model_results.rb +6 -16
  8. data/lib/1.0/generated/azure_cognitiveservices_computervision/models/face_description.rb +3 -2
  9. data/lib/1.0/generated/azure_cognitiveservices_computervision/models/{language1.rb → gender.rb} +4 -4
  10. data/lib/1.0/generated/azure_cognitiveservices_computervision/models/image_analysis.rb +5 -7
  11. data/lib/1.0/generated/azure_cognitiveservices_computervision/models/image_description.rb +1 -1
  12. data/lib/1.0/generated/azure_cognitiveservices_computervision/models/image_description_details.rb +1 -1
  13. data/lib/1.0/generated/azure_cognitiveservices_computervision/models/image_url.rb +1 -1
  14. data/lib/1.0/generated/azure_cognitiveservices_computervision/models/landmark_results.rb +78 -0
  15. data/lib/1.0/generated/azure_cognitiveservices_computervision/models/landmark_results_landmarks_item.rb +57 -0
  16. data/lib/1.0/generated/azure_cognitiveservices_computervision/models/ocr_result.rb +2 -3
  17. data/lib/1.0/generated/azure_cognitiveservices_computervision/models/tag_result.rb +1 -1
  18. data/lib/2.0/generated/azure_cognitiveservices_computervision.rb +65 -0
  19. data/lib/2.0/generated/azure_cognitiveservices_computervision/computer_vision_client.rb +2257 -0
  20. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/adult_info.rb +83 -0
  21. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/category.rb +69 -0
  22. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/category_detail.rb +77 -0
  23. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/celebrities_model.rb +69 -0
  24. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/celebrity_results.rb +78 -0
  25. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/color_info.rb +98 -0
  26. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/computer_vision_error.rb +75 -0
  27. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/computer_vision_error_codes.rb +27 -0
  28. data/lib/{1.0/generated/azure_cognitiveservices_computervision/models/domain_models.rb → 2.0/generated/azure_cognitiveservices_computervision/models/details.rb} +3 -3
  29. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/domain_model_results.rb +70 -0
  30. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/face_description.rb +71 -0
  31. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/face_rectangle.rb +79 -0
  32. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/gender.rb +16 -0
  33. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/image_analysis.rb +167 -0
  34. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/image_caption.rb +57 -0
  35. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/image_description.rb +99 -0
  36. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/image_description_details.rb +76 -0
  37. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/image_metadata.rb +68 -0
  38. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/image_tag.rb +68 -0
  39. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/image_type.rb +57 -0
  40. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/image_url.rb +47 -0
  41. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/landmark_results.rb +78 -0
  42. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/landmarks_model.rb +57 -0
  43. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/line.rb +86 -0
  44. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/list_models_result.rb +56 -0
  45. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/model_description.rb +65 -0
  46. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/ocr_languages.rb +41 -0
  47. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/ocr_line.rb +72 -0
  48. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/ocr_region.rb +72 -0
  49. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/ocr_result.rb +103 -0
  50. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/ocr_word.rb +62 -0
  51. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/recognition_result.rb +56 -0
  52. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/tag_result.rb +79 -0
  53. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/text_operation_result.rb +62 -0
  54. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/text_operation_status_codes.rb +18 -0
  55. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/text_recognition_mode.rb +16 -0
  56. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/visual_feature_types.rb +21 -0
  57. data/lib/2.0/generated/azure_cognitiveservices_computervision/models/word.rb +66 -0
  58. data/lib/2.0/generated/azure_cognitiveservices_computervision/module_definition.rb +9 -0
  59. data/lib/azure_cognitiveservices_computervision.rb +1 -0
  60. data/lib/profiles/latest/modules/computervision_profile_module.rb +103 -95
  61. data/lib/version.rb +1 -1
  62. metadata +49 -6
@@ -0,0 +1,2257 @@
1
+ # encoding: utf-8
2
+ # Code generated by Microsoft (R) AutoRest Code Generator.
3
+ # Changes may cause incorrect behavior and will be lost if the code is
4
+ # regenerated.
5
+
6
+ module Azure::CognitiveServices::ComputerVision::V2_0
7
+ #
8
+ # A service client - single point of access to the REST API.
9
+ #
10
+ class ComputerVisionClient < MsRestAzure::AzureServiceClient
11
+ include MsRestAzure
12
+ include MsRestAzure::Serialization
13
+
14
+ # @return [String] the base URI of the service.
15
+ attr_reader :base_url
16
+
17
+ # @return Credentials needed for the client to connect to Azure.
18
+ attr_reader :credentials1
19
+
20
+ # @return [String] Supported Cognitive Services endpoints
21
+ attr_accessor :endpoint
22
+
23
+ # @return Subscription credentials which uniquely identify client
24
+ # subscription.
25
+ attr_accessor :credentials
26
+
27
+ # @return [String] The preferred language for the response.
28
+ attr_accessor :accept_language
29
+
30
+ # @return [Integer] The retry timeout in seconds for Long Running
31
+ # Operations. Default value is 30.
32
+ attr_accessor :long_running_operation_retry_timeout
33
+
34
+ # @return [Boolean] Whether a unique x-ms-client-request-id should be
35
+ # generated. When set to true a unique x-ms-client-request-id value is
36
+ # generated and included in each request. Default is true.
37
+ attr_accessor :generate_client_request_id
38
+
39
+ #
40
+ # Creates initializes a new instance of the ComputerVisionClient class.
41
+ # @param credentials [MsRest::ServiceClientCredentials] credentials to authorize HTTP requests made by the service client.
42
+ # @param options [Array] filters to be applied to the HTTP requests.
43
+ #
44
+ def initialize(credentials = nil, options = nil)
45
+ super(credentials, options)
46
+ @base_url = '{Endpoint}/vision/v2.0'
47
+
48
+ fail ArgumentError, 'invalid type of credentials input parameter' unless credentials.is_a?(MsRest::ServiceClientCredentials) unless credentials.nil?
49
+ @credentials = credentials
50
+
51
+ @accept_language = 'en-US'
52
+ @long_running_operation_retry_timeout = 30
53
+ @generate_client_request_id = true
54
+ add_telemetry
55
+ end
56
+
57
+ #
58
+ # Makes a request and returns the body of the response.
59
+ # @param method [Symbol] with any of the following values :get, :put, :post, :patch, :delete.
60
+ # @param path [String] the path, relative to {base_url}.
61
+ # @param options [Hash{String=>String}] specifying any request options like :body.
62
+ # @return [Hash{String=>String}] containing the body of the response.
63
+ # Example:
64
+ #
65
+ # request_content = "{'location':'westus','tags':{'tag1':'val1','tag2':'val2'}}"
66
+ # path = "/path"
67
+ # options = {
68
+ # body: request_content,
69
+ # query_params: {'api-version' => '2016-02-01'}
70
+ # }
71
+ # result = @client.make_request(:put, path, options)
72
+ #
73
+ def make_request(method, path, options = {})
74
+ result = make_request_with_http_info(method, path, options)
75
+ result.body unless result.nil?
76
+ end
77
+
78
+ #
79
+ # Makes a request and returns the operation response.
80
+ # @param method [Symbol] with any of the following values :get, :put, :post, :patch, :delete.
81
+ # @param path [String] the path, relative to {base_url}.
82
+ # @param options [Hash{String=>String}] specifying any request options like :body.
83
+ # @return [MsRestAzure::AzureOperationResponse] Operation response containing the request, response and status.
84
+ #
85
+ def make_request_with_http_info(method, path, options = {})
86
+ result = make_request_async(method, path, options).value!
87
+ result.body = result.response.body.to_s.empty? ? nil : JSON.load(result.response.body)
88
+ result
89
+ end
90
+
91
+ #
92
+ # Makes a request asynchronously.
93
+ # @param method [Symbol] with any of the following values :get, :put, :post, :patch, :delete.
94
+ # @param path [String] the path, relative to {base_url}.
95
+ # @param options [Hash{String=>String}] specifying any request options like :body.
96
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
97
+ #
98
+ def make_request_async(method, path, options = {})
99
+ fail ArgumentError, 'method is nil' if method.nil?
100
+ fail ArgumentError, 'path is nil' if path.nil?
101
+
102
+ request_url = options[:base_url] || @base_url
103
+ if(!options[:headers].nil? && !options[:headers]['Content-Type'].nil?)
104
+ @request_headers['Content-Type'] = options[:headers]['Content-Type']
105
+ end
106
+
107
+ request_headers = @request_headers
108
+ request_headers.merge!({'accept-language' => @accept_language}) unless @accept_language.nil?
109
+ options.merge!({headers: request_headers.merge(options[:headers] || {})})
110
+ options.merge!({credentials: @credentials}) unless @credentials.nil?
111
+
112
+ super(request_url, method, path, options)
113
+ end
114
+
115
+ #
116
+ # This operation returns the list of domain-specific models that are supported
117
+ # by the Computer Vision API. Currently, the API only supports one
118
+ # domain-specific model: a celebrity recognizer. A successful response will be
119
+ # returned in JSON. If the request failed, the response will contain an error
120
+ # code and a message to help understand what went wrong.
121
+ #
122
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
123
+ # will be added to the HTTP request.
124
+ #
125
+ # @return [ListModelsResult] operation results.
126
+ #
127
+ def list_models(custom_headers:nil)
128
+ response = list_models_async(custom_headers:custom_headers).value!
129
+ response.body unless response.nil?
130
+ end
131
+
132
+ #
133
+ # This operation returns the list of domain-specific models that are supported
134
+ # by the Computer Vision API. Currently, the API only supports one
135
+ # domain-specific model: a celebrity recognizer. A successful response will be
136
+ # returned in JSON. If the request failed, the response will contain an error
137
+ # code and a message to help understand what went wrong.
138
+ #
139
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
140
+ # will be added to the HTTP request.
141
+ #
142
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
143
+ #
144
+ def list_models_with_http_info(custom_headers:nil)
145
+ list_models_async(custom_headers:custom_headers).value!
146
+ end
147
+
148
+ #
149
+ # This operation returns the list of domain-specific models that are supported
150
+ # by the Computer Vision API. Currently, the API only supports one
151
+ # domain-specific model: a celebrity recognizer. A successful response will be
152
+ # returned in JSON. If the request failed, the response will contain an error
153
+ # code and a message to help understand what went wrong.
154
+ #
155
+ # @param [Hash{String => String}] A hash of custom headers that will be added
156
+ # to the HTTP request.
157
+ #
158
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
159
+ #
160
+ def list_models_async(custom_headers:nil)
161
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
162
+
163
+
164
+ request_headers = {}
165
+ request_headers['Content-Type'] = 'application/json; charset=utf-8'
166
+
167
+ # Set Headers
168
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
169
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
170
+ path_template = 'models'
171
+
172
+ request_url = @base_url || self.base_url
173
+ request_url = request_url.gsub('{Endpoint}', endpoint)
174
+
175
+ options = {
176
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
177
+ headers: request_headers.merge(custom_headers || {}),
178
+ base_url: request_url
179
+ }
180
+ promise = self.make_request_async(:get, path_template, options)
181
+
182
+ promise = promise.then do |result|
183
+ http_response = result.response
184
+ status_code = http_response.status
185
+ response_content = http_response.body
186
+ unless status_code == 200
187
+ error_model = JSON.load(response_content)
188
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
189
+ end
190
+
191
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
192
+ # Deserialize Response
193
+ if status_code == 200
194
+ begin
195
+ parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
196
+ result_mapper = Azure::CognitiveServices::ComputerVision::V2_0::Models::ListModelsResult.mapper()
197
+ result.body = self.deserialize(result_mapper, parsed_response)
198
+ rescue Exception => e
199
+ fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
200
+ end
201
+ end
202
+
203
+ result
204
+ end
205
+
206
+ promise.execute
207
+ end
208
+
209
+ #
210
+ # This operation extracts a rich set of visual features based on the image
211
+ # content. Two input methods are supported -- (1) Uploading an image or (2)
212
+ # specifying an image URL. Within your request, there is an optional parameter
213
+ # to allow you to choose which features to return. By default, image
214
+ # categories are returned in the response.
215
+ #
216
+ # @param url [String] Publicly reachable URL of an image
217
+ # @param visual_features [Array<VisualFeatureTypes>] A string indicating what
218
+ # visual feature types to return. Multiple values should be comma-separated.
219
+ # Valid visual feature types include:Categories - categorizes image content
220
+ # according to a taxonomy defined in documentation. Tags - tags the image with
221
+ # a detailed list of words related to the image content. Description -
222
+ # describes the image content with a complete English sentence. Faces - detects
223
+ # if faces are present. If present, generate coordinates, gender and age.
224
+ # ImageType - detects if image is clipart or a line drawing. Color - determines
225
+ # the accent color, dominant color, and whether an image is black&white.Adult -
226
+ # detects if the image is pornographic in nature (depicts nudity or a sex act).
227
+ # Sexually suggestive content is also detected.
228
+ # @param details [Array<Details>] A string indicating which domain-specific
229
+ # details to return. Multiple values should be comma-separated. Valid visual
230
+ # feature types include:Celebrities - identifies celebrities if detected in the
231
+ # image.
232
+ # @param language [Enum] The desired language for output generation. If this
233
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
234
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
235
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
236
+ # 'ja', 'pt', 'zh'
237
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
238
+ # will be added to the HTTP request.
239
+ #
240
+ # @return [ImageAnalysis] operation results.
241
+ #
242
+ def analyze_image(url, visual_features:nil, details:nil, language:nil, custom_headers:nil)
243
+ response = analyze_image_async(url, visual_features:visual_features, details:details, language:language, custom_headers:custom_headers).value!
244
+ response.body unless response.nil?
245
+ end
246
+
247
+ #
248
+ # This operation extracts a rich set of visual features based on the image
249
+ # content. Two input methods are supported -- (1) Uploading an image or (2)
250
+ # specifying an image URL. Within your request, there is an optional parameter
251
+ # to allow you to choose which features to return. By default, image
252
+ # categories are returned in the response.
253
+ #
254
+ # @param url [String] Publicly reachable URL of an image
255
+ # @param visual_features [Array<VisualFeatureTypes>] A string indicating what
256
+ # visual feature types to return. Multiple values should be comma-separated.
257
+ # Valid visual feature types include:Categories - categorizes image content
258
+ # according to a taxonomy defined in documentation. Tags - tags the image with
259
+ # a detailed list of words related to the image content. Description -
260
+ # describes the image content with a complete English sentence. Faces - detects
261
+ # if faces are present. If present, generate coordinates, gender and age.
262
+ # ImageType - detects if image is clipart or a line drawing. Color - determines
263
+ # the accent color, dominant color, and whether an image is black&white.Adult -
264
+ # detects if the image is pornographic in nature (depicts nudity or a sex act).
265
+ # Sexually suggestive content is also detected.
266
+ # @param details [Array<Details>] A string indicating which domain-specific
267
+ # details to return. Multiple values should be comma-separated. Valid visual
268
+ # feature types include:Celebrities - identifies celebrities if detected in the
269
+ # image.
270
+ # @param language [Enum] The desired language for output generation. If this
271
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
272
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
273
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
274
+ # 'ja', 'pt', 'zh'
275
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
276
+ # will be added to the HTTP request.
277
+ #
278
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
279
+ #
280
+ def analyze_image_with_http_info(url, visual_features:nil, details:nil, language:nil, custom_headers:nil)
281
+ analyze_image_async(url, visual_features:visual_features, details:details, language:language, custom_headers:custom_headers).value!
282
+ end
283
+
284
+ #
285
+ # This operation extracts a rich set of visual features based on the image
286
+ # content. Two input methods are supported -- (1) Uploading an image or (2)
287
+ # specifying an image URL. Within your request, there is an optional parameter
288
+ # to allow you to choose which features to return. By default, image
289
+ # categories are returned in the response.
290
+ #
291
+ # @param url [String] Publicly reachable URL of an image
292
+ # @param visual_features [Array<VisualFeatureTypes>] A string indicating what
293
+ # visual feature types to return. Multiple values should be comma-separated.
294
+ # Valid visual feature types include:Categories - categorizes image content
295
+ # according to a taxonomy defined in documentation. Tags - tags the image with
296
+ # a detailed list of words related to the image content. Description -
297
+ # describes the image content with a complete English sentence. Faces - detects
298
+ # if faces are present. If present, generate coordinates, gender and age.
299
+ # ImageType - detects if image is clipart or a line drawing. Color - determines
300
+ # the accent color, dominant color, and whether an image is black&white.Adult -
301
+ # detects if the image is pornographic in nature (depicts nudity or a sex act).
302
+ # Sexually suggestive content is also detected.
303
+ # @param details [Array<Details>] A string indicating which domain-specific
304
+ # details to return. Multiple values should be comma-separated. Valid visual
305
+ # feature types include:Celebrities - identifies celebrities if detected in the
306
+ # image.
307
+ # @param language [Enum] The desired language for output generation. If this
308
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
309
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
310
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
311
+ # 'ja', 'pt', 'zh'
312
+ # @param [Hash{String => String}] A hash of custom headers that will be added
313
+ # to the HTTP request.
314
+ #
315
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
316
+ #
317
+ def analyze_image_async(url, visual_features:nil, details:nil, language:nil, custom_headers:nil)
318
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
319
+ fail ArgumentError, 'url is nil' if url.nil?
320
+
321
+ image_url = ImageUrl.new
322
+ unless url.nil?
323
+ image_url.url = url
324
+ end
325
+
326
+ request_headers = {}
327
+ request_headers['Content-Type'] = 'application/json; charset=utf-8'
328
+
329
+ # Set Headers
330
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
331
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
332
+
333
+ # Serialize Request
334
+ request_mapper = Azure::CognitiveServices::ComputerVision::V2_0::Models::ImageUrl.mapper()
335
+ request_content = self.serialize(request_mapper, image_url)
336
+ request_content = request_content != nil ? JSON.generate(request_content, quirks_mode: true) : nil
337
+
338
+ path_template = 'analyze'
339
+
340
+ request_url = @base_url || self.base_url
341
+ request_url = request_url.gsub('{Endpoint}', endpoint)
342
+
343
+ options = {
344
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
345
+ query_params: {'visualFeatures' => visual_features.nil? ? nil : visual_features.join(','),'details' => details.nil? ? nil : details.join(','),'language' => language},
346
+ body: request_content,
347
+ headers: request_headers.merge(custom_headers || {}),
348
+ base_url: request_url
349
+ }
350
+ promise = self.make_request_async(:post, path_template, options)
351
+
352
+ promise = promise.then do |result|
353
+ http_response = result.response
354
+ status_code = http_response.status
355
+ response_content = http_response.body
356
+ unless status_code == 200
357
+ error_model = JSON.load(response_content)
358
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
359
+ end
360
+
361
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
362
+ # Deserialize Response
363
+ if status_code == 200
364
+ begin
365
+ parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
366
+ result_mapper = Azure::CognitiveServices::ComputerVision::V2_0::Models::ImageAnalysis.mapper()
367
+ result.body = self.deserialize(result_mapper, parsed_response)
368
+ rescue Exception => e
369
+ fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
370
+ end
371
+ end
372
+
373
+ result
374
+ end
375
+
376
+ promise.execute
377
+ end
378
+
379
+ #
380
+ # This operation generates a thumbnail image with the user-specified width and
381
+ # height. By default, the service analyzes the image, identifies the region of
382
+ # interest (ROI), and generates smart cropping coordinates based on the ROI.
383
+ # Smart cropping helps when you specify an aspect ratio that differs from that
384
+ # of the input image. A successful response contains the thumbnail image
385
+ # binary. If the request failed, the response contains an error code and a
386
+ # message to help determine what went wrong.
387
+ #
388
+ # @param width [Integer] Width of the thumbnail. It must be between 1 and 1024.
389
+ # Recommended minimum of 50.
390
+ # @param height [Integer] Height of the thumbnail. It must be between 1 and
391
+ # 1024. Recommended minimum of 50.
392
+ # @param url [String] Publicly reachable URL of an image
393
+ # @param smart_cropping [Boolean] Boolean flag for enabling smart cropping.
394
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
395
+ # will be added to the HTTP request.
396
+ #
397
+ # @return [NOT_IMPLEMENTED] operation results.
398
+ #
399
+ def generate_thumbnail(width, height, url, smart_cropping:false, custom_headers:nil)
400
+ response = generate_thumbnail_async(width, height, url, smart_cropping:smart_cropping, custom_headers:custom_headers).value!
401
+ response.body unless response.nil?
402
+ end
403
+
404
+ #
405
+ # This operation generates a thumbnail image with the user-specified width and
406
+ # height. By default, the service analyzes the image, identifies the region of
407
+ # interest (ROI), and generates smart cropping coordinates based on the ROI.
408
+ # Smart cropping helps when you specify an aspect ratio that differs from that
409
+ # of the input image. A successful response contains the thumbnail image
410
+ # binary. If the request failed, the response contains an error code and a
411
+ # message to help determine what went wrong.
412
+ #
413
+ # @param width [Integer] Width of the thumbnail. It must be between 1 and 1024.
414
+ # Recommended minimum of 50.
415
+ # @param height [Integer] Height of the thumbnail. It must be between 1 and
416
+ # 1024. Recommended minimum of 50.
417
+ # @param url [String] Publicly reachable URL of an image
418
+ # @param smart_cropping [Boolean] Boolean flag for enabling smart cropping.
419
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
420
+ # will be added to the HTTP request.
421
+ #
422
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
423
+ #
424
+ def generate_thumbnail_with_http_info(width, height, url, smart_cropping:false, custom_headers:nil)
425
+ generate_thumbnail_async(width, height, url, smart_cropping:smart_cropping, custom_headers:custom_headers).value!
426
+ end
427
+
428
+ #
429
+ # This operation generates a thumbnail image with the user-specified width and
430
+ # height. By default, the service analyzes the image, identifies the region of
431
+ # interest (ROI), and generates smart cropping coordinates based on the ROI.
432
+ # Smart cropping helps when you specify an aspect ratio that differs from that
433
+ # of the input image. A successful response contains the thumbnail image
434
+ # binary. If the request failed, the response contains an error code and a
435
+ # message to help determine what went wrong.
436
+ #
437
+ # @param width [Integer] Width of the thumbnail. It must be between 1 and 1024.
438
+ # Recommended minimum of 50.
439
+ # @param height [Integer] Height of the thumbnail. It must be between 1 and
440
+ # 1024. Recommended minimum of 50.
441
+ # @param url [String] Publicly reachable URL of an image
442
+ # @param smart_cropping [Boolean] Boolean flag for enabling smart cropping.
443
+ # @param [Hash{String => String}] A hash of custom headers that will be added
444
+ # to the HTTP request.
445
+ #
446
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
447
+ #
448
+ def generate_thumbnail_async(width, height, url, smart_cropping:false, custom_headers:nil)
449
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
450
+ fail ArgumentError, 'width is nil' if width.nil?
451
+ fail ArgumentError, "'width' should satisfy the constraint - 'InclusiveMaximum': '1023'" if !width.nil? && width > 1023
452
+ fail ArgumentError, "'width' should satisfy the constraint - 'InclusiveMinimum': '1'" if !width.nil? && width < 1
453
+ fail ArgumentError, 'height is nil' if height.nil?
454
+ fail ArgumentError, "'height' should satisfy the constraint - 'InclusiveMaximum': '1023'" if !height.nil? && height > 1023
455
+ fail ArgumentError, "'height' should satisfy the constraint - 'InclusiveMinimum': '1'" if !height.nil? && height < 1
456
+ fail ArgumentError, 'url is nil' if url.nil?
457
+
458
+ image_url = ImageUrl.new
459
+ unless url.nil?
460
+ image_url.url = url
461
+ end
462
+
463
+ request_headers = {}
464
+ request_headers['Content-Type'] = 'application/json; charset=utf-8'
465
+
466
+ # Set Headers
467
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
468
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
469
+
470
+ # Serialize Request
471
+ request_mapper = Azure::CognitiveServices::ComputerVision::V2_0::Models::ImageUrl.mapper()
472
+ request_content = self.serialize(request_mapper, image_url)
473
+ request_content = request_content != nil ? JSON.generate(request_content, quirks_mode: true) : nil
474
+
475
+ path_template = 'generateThumbnail'
476
+
477
+ request_url = @base_url || self.base_url
478
+ request_url = request_url.gsub('{Endpoint}', endpoint)
479
+
480
+ options = {
481
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
482
+ query_params: {'width' => width,'height' => height,'smartCropping' => smart_cropping},
483
+ body: request_content,
484
+ headers: request_headers.merge(custom_headers || {}),
485
+ base_url: request_url
486
+ }
487
+ promise = self.make_request_async(:post, path_template, options)
488
+
489
+ promise = promise.then do |result|
490
+ http_response = result.response
491
+ status_code = http_response.status
492
+ response_content = http_response.body
493
+ unless status_code == 200
494
+ error_model = JSON.load(response_content)
495
+ fail MsRestAzure::AzureOperationError.new(result.request, http_response, error_model)
496
+ end
497
+
498
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
499
+ # Deserialize Response
500
+ if status_code == 200
501
+ begin
502
+ parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
503
+ result_mapper = {
504
+ client_side_validation: true,
505
+ required: false,
506
+ serialized_name: 'parsed_response',
507
+ type: {
508
+ name: 'Stream'
509
+ }
510
+ }
511
+ result.body = self.deserialize(result_mapper, parsed_response)
512
+ rescue Exception => e
513
+ fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
514
+ end
515
+ end
516
+
517
+ result
518
+ end
519
+
520
+ promise.execute
521
+ end
522
+
523
+ #
524
+ # Optical Character Recognition (OCR) detects printed text in an image and
525
+ # extracts the recognized characters into a machine-usable character stream.
526
+ # Upon success, the OCR results will be returned. Upon failure, the error code
527
+ # together with an error message will be returned. The error code can be one of
528
+ # InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage,
529
+ # NotSupportedLanguage, or InternalServerError.
530
+ #
531
+ # @param detect_orientation [Boolean] Whether detect the text orientation in
532
+ # the image. With detectOrientation=true the OCR service tries to detect the
533
+ # image orientation and correct it before further processing (e.g. if it's
534
+ # upside-down).
535
+ # @param url [String] Publicly reachable URL of an image
536
+ # @param language [OcrLanguages] The BCP-47 language code of the text to be
537
+ # detected in the image. The default value is 'unk'. Possible values include:
538
+ # 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el',
539
+ # 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro',
540
+ # 'sr-Cyrl', 'sr-Latn', 'sk'
541
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
542
+ # will be added to the HTTP request.
543
+ #
544
+ # @return [OcrResult] operation results.
545
+ #
546
+ def recognize_printed_text(detect_orientation, url, language:nil, custom_headers:nil)
547
+ response = recognize_printed_text_async(detect_orientation, url, language:language, custom_headers:custom_headers).value!
548
+ response.body unless response.nil?
549
+ end
550
+
551
+ #
552
+ # Optical Character Recognition (OCR) detects printed text in an image and
553
+ # extracts the recognized characters into a machine-usable character stream.
554
+ # Upon success, the OCR results will be returned. Upon failure, the error code
555
+ # together with an error message will be returned. The error code can be one of
556
+ # InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage,
557
+ # NotSupportedLanguage, or InternalServerError.
558
+ #
559
+ # @param detect_orientation [Boolean] Whether detect the text orientation in
560
+ # the image. With detectOrientation=true the OCR service tries to detect the
561
+ # image orientation and correct it before further processing (e.g. if it's
562
+ # upside-down).
563
+ # @param url [String] Publicly reachable URL of an image
564
+ # @param language [OcrLanguages] The BCP-47 language code of the text to be
565
+ # detected in the image. The default value is 'unk'. Possible values include:
566
+ # 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el',
567
+ # 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro',
568
+ # 'sr-Cyrl', 'sr-Latn', 'sk'
569
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
570
+ # will be added to the HTTP request.
571
+ #
572
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
573
+ #
574
+ def recognize_printed_text_with_http_info(detect_orientation, url, language:nil, custom_headers:nil)
575
+ recognize_printed_text_async(detect_orientation, url, language:language, custom_headers:custom_headers).value!
576
+ end
577
+
578
+ #
579
+ # Optical Character Recognition (OCR) detects printed text in an image and
580
+ # extracts the recognized characters into a machine-usable character stream.
581
+ # Upon success, the OCR results will be returned. Upon failure, the error code
582
+ # together with an error message will be returned. The error code can be one of
583
+ # InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage,
584
+ # NotSupportedLanguage, or InternalServerError.
585
+ #
586
+ # @param detect_orientation [Boolean] Whether detect the text orientation in
587
+ # the image. With detectOrientation=true the OCR service tries to detect the
588
+ # image orientation and correct it before further processing (e.g. if it's
589
+ # upside-down).
590
+ # @param url [String] Publicly reachable URL of an image
591
+ # @param language [OcrLanguages] The BCP-47 language code of the text to be
592
+ # detected in the image. The default value is 'unk'. Possible values include:
593
+ # 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el',
594
+ # 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro',
595
+ # 'sr-Cyrl', 'sr-Latn', 'sk'
596
+ # @param [Hash{String => String}] A hash of custom headers that will be added
597
+ # to the HTTP request.
598
+ #
599
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
600
+ #
601
+ def recognize_printed_text_async(detect_orientation, url, language:nil, custom_headers:nil)
602
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
603
+ fail ArgumentError, 'detect_orientation is nil' if detect_orientation.nil?
604
+ fail ArgumentError, 'url is nil' if url.nil?
605
+
606
+ image_url = ImageUrl.new
607
+ unless url.nil?
608
+ image_url.url = url
609
+ end
610
+
611
+ request_headers = {}
612
+ request_headers['Content-Type'] = 'application/json; charset=utf-8'
613
+
614
+ # Set Headers
615
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
616
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
617
+
618
+ # Serialize Request
619
+ request_mapper = Azure::CognitiveServices::ComputerVision::V2_0::Models::ImageUrl.mapper()
620
+ request_content = self.serialize(request_mapper, image_url)
621
+ request_content = request_content != nil ? JSON.generate(request_content, quirks_mode: true) : nil
622
+
623
+ path_template = 'ocr'
624
+
625
+ request_url = @base_url || self.base_url
626
+ request_url = request_url.gsub('{Endpoint}', endpoint)
627
+
628
+ options = {
629
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
630
+ query_params: {'detectOrientation' => detect_orientation,'language' => language},
631
+ body: request_content,
632
+ headers: request_headers.merge(custom_headers || {}),
633
+ base_url: request_url
634
+ }
635
+ promise = self.make_request_async(:post, path_template, options)
636
+
637
+ promise = promise.then do |result|
638
+ http_response = result.response
639
+ status_code = http_response.status
640
+ response_content = http_response.body
641
+ unless status_code == 200
642
+ error_model = JSON.load(response_content)
643
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
644
+ end
645
+
646
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
647
+ # Deserialize Response
648
+ if status_code == 200
649
+ begin
650
+ parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
651
+ result_mapper = Azure::CognitiveServices::ComputerVision::V2_0::Models::OcrResult.mapper()
652
+ result.body = self.deserialize(result_mapper, parsed_response)
653
+ rescue Exception => e
654
+ fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
655
+ end
656
+ end
657
+
658
+ result
659
+ end
660
+
661
+ promise.execute
662
+ end
663
+
664
+ #
665
+ # This operation generates a description of an image in human readable language
666
+ # with complete sentences. The description is based on a collection of content
667
+ # tags, which are also returned by the operation. More than one description can
668
+ # be generated for each image. Descriptions are ordered by their confidence
669
+ # score. All descriptions are in English. Two input methods are supported --
670
+ # (1) Uploading an image or (2) specifying an image URL.A successful response
671
+ # will be returned in JSON. If the request failed, the response will contain
672
+ # an error code and a message to help understand what went wrong.
673
+ #
674
+ # @param url [String] Publicly reachable URL of an image
675
+ # @param max_candidates [String] Maximum number of candidate descriptions to be
676
+ # returned. The default is 1.
677
+ # @param language [Enum] The desired language for output generation. If this
678
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
679
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
680
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
681
+ # 'ja', 'pt', 'zh'
682
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
683
+ # will be added to the HTTP request.
684
+ #
685
+ # @return [ImageDescription] operation results.
686
+ #
687
+ def describe_image(url, max_candidates:'1', language:nil, custom_headers:nil)
688
+ response = describe_image_async(url, max_candidates:max_candidates, language:language, custom_headers:custom_headers).value!
689
+ response.body unless response.nil?
690
+ end
691
+
692
+ #
693
+ # This operation generates a description of an image in human readable language
694
+ # with complete sentences. The description is based on a collection of content
695
+ # tags, which are also returned by the operation. More than one description can
696
+ # be generated for each image. Descriptions are ordered by their confidence
697
+ # score. All descriptions are in English. Two input methods are supported --
698
+ # (1) Uploading an image or (2) specifying an image URL.A successful response
699
+ # will be returned in JSON. If the request failed, the response will contain
700
+ # an error code and a message to help understand what went wrong.
701
+ #
702
+ # @param url [String] Publicly reachable URL of an image
703
+ # @param max_candidates [String] Maximum number of candidate descriptions to be
704
+ # returned. The default is 1.
705
+ # @param language [Enum] The desired language for output generation. If this
706
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
707
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
708
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
709
+ # 'ja', 'pt', 'zh'
710
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
711
+ # will be added to the HTTP request.
712
+ #
713
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
714
+ #
715
+ def describe_image_with_http_info(url, max_candidates:'1', language:nil, custom_headers:nil)
716
+ describe_image_async(url, max_candidates:max_candidates, language:language, custom_headers:custom_headers).value!
717
+ end
718
+
719
+ #
720
+ # This operation generates a description of an image in human readable language
721
+ # with complete sentences. The description is based on a collection of content
722
+ # tags, which are also returned by the operation. More than one description can
723
+ # be generated for each image. Descriptions are ordered by their confidence
724
+ # score. All descriptions are in English. Two input methods are supported --
725
+ # (1) Uploading an image or (2) specifying an image URL.A successful response
726
+ # will be returned in JSON. If the request failed, the response will contain
727
+ # an error code and a message to help understand what went wrong.
728
+ #
729
+ # @param url [String] Publicly reachable URL of an image
730
+ # @param max_candidates [String] Maximum number of candidate descriptions to be
731
+ # returned. The default is 1.
732
+ # @param language [Enum] The desired language for output generation. If this
733
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
734
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
735
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
736
+ # 'ja', 'pt', 'zh'
737
+ # @param [Hash{String => String}] A hash of custom headers that will be added
738
+ # to the HTTP request.
739
+ #
740
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
741
+ #
742
+ def describe_image_async(url, max_candidates:'1', language:nil, custom_headers:nil)
743
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
744
+ fail ArgumentError, 'url is nil' if url.nil?
745
+
746
+ image_url = ImageUrl.new
747
+ unless url.nil?
748
+ image_url.url = url
749
+ end
750
+
751
+ request_headers = {}
752
+ request_headers['Content-Type'] = 'application/json; charset=utf-8'
753
+
754
+ # Set Headers
755
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
756
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
757
+
758
+ # Serialize Request
759
+ request_mapper = Azure::CognitiveServices::ComputerVision::V2_0::Models::ImageUrl.mapper()
760
+ request_content = self.serialize(request_mapper, image_url)
761
+ request_content = request_content != nil ? JSON.generate(request_content, quirks_mode: true) : nil
762
+
763
+ path_template = 'describe'
764
+
765
+ request_url = @base_url || self.base_url
766
+ request_url = request_url.gsub('{Endpoint}', endpoint)
767
+
768
+ options = {
769
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
770
+ query_params: {'maxCandidates' => max_candidates,'language' => language},
771
+ body: request_content,
772
+ headers: request_headers.merge(custom_headers || {}),
773
+ base_url: request_url
774
+ }
775
+ promise = self.make_request_async(:post, path_template, options)
776
+
777
+ promise = promise.then do |result|
778
+ http_response = result.response
779
+ status_code = http_response.status
780
+ response_content = http_response.body
781
+ unless status_code == 200
782
+ error_model = JSON.load(response_content)
783
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
784
+ end
785
+
786
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
787
+ # Deserialize Response
788
+ if status_code == 200
789
+ begin
790
+ parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
791
+ result_mapper = Azure::CognitiveServices::ComputerVision::V2_0::Models::ImageDescription.mapper()
792
+ result.body = self.deserialize(result_mapper, parsed_response)
793
+ rescue Exception => e
794
+ fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
795
+ end
796
+ end
797
+
798
+ result
799
+ end
800
+
801
+ promise.execute
802
+ end
803
+
804
+ #
805
+ # This operation generates a list of words, or tags, that are relevant to the
806
+ # content of the supplied image. The Computer Vision API can return tags based
807
+ # on objects, living beings, scenery or actions found in images. Unlike
808
+ # categories, tags are not organized according to a hierarchical classification
809
+ # system, but correspond to image content. Tags may contain hints to avoid
810
+ # ambiguity or provide context, for example the tag 'cello' may be accompanied
811
+ # by the hint 'musical instrument'. All tags are in English.
812
+ #
813
+ # @param url [String] Publicly reachable URL of an image
814
+ # @param language [Enum] The desired language for output generation. If this
815
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
816
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
817
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
818
+ # 'ja', 'pt', 'zh'
819
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
820
+ # will be added to the HTTP request.
821
+ #
822
+ # @return [TagResult] operation results.
823
+ #
824
+ def tag_image(url, language:nil, custom_headers:nil)
825
+ response = tag_image_async(url, language:language, custom_headers:custom_headers).value!
826
+ response.body unless response.nil?
827
+ end
828
+
829
+ #
830
+ # This operation generates a list of words, or tags, that are relevant to the
831
+ # content of the supplied image. The Computer Vision API can return tags based
832
+ # on objects, living beings, scenery or actions found in images. Unlike
833
+ # categories, tags are not organized according to a hierarchical classification
834
+ # system, but correspond to image content. Tags may contain hints to avoid
835
+ # ambiguity or provide context, for example the tag 'cello' may be accompanied
836
+ # by the hint 'musical instrument'. All tags are in English.
837
+ #
838
+ # @param url [String] Publicly reachable URL of an image
839
+ # @param language [Enum] The desired language for output generation. If this
840
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
841
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
842
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
843
+ # 'ja', 'pt', 'zh'
844
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
845
+ # will be added to the HTTP request.
846
+ #
847
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
848
+ #
849
+ def tag_image_with_http_info(url, language:nil, custom_headers:nil)
850
+ tag_image_async(url, language:language, custom_headers:custom_headers).value!
851
+ end
852
+
853
+ #
854
+ # This operation generates a list of words, or tags, that are relevant to the
855
+ # content of the supplied image. The Computer Vision API can return tags based
856
+ # on objects, living beings, scenery or actions found in images. Unlike
857
+ # categories, tags are not organized according to a hierarchical classification
858
+ # system, but correspond to image content. Tags may contain hints to avoid
859
+ # ambiguity or provide context, for example the tag 'cello' may be accompanied
860
+ # by the hint 'musical instrument'. All tags are in English.
861
+ #
862
+ # @param url [String] Publicly reachable URL of an image
863
+ # @param language [Enum] The desired language for output generation. If this
864
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
865
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
866
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
867
+ # 'ja', 'pt', 'zh'
868
+ # @param [Hash{String => String}] A hash of custom headers that will be added
869
+ # to the HTTP request.
870
+ #
871
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
872
+ #
873
+ def tag_image_async(url, language:nil, custom_headers:nil)
874
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
875
+ fail ArgumentError, 'url is nil' if url.nil?
876
+
877
+ image_url = ImageUrl.new
878
+ unless url.nil?
879
+ image_url.url = url
880
+ end
881
+
882
+ request_headers = {}
883
+ request_headers['Content-Type'] = 'application/json; charset=utf-8'
884
+
885
+ # Set Headers
886
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
887
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
888
+
889
+ # Serialize Request
890
+ request_mapper = Azure::CognitiveServices::ComputerVision::V2_0::Models::ImageUrl.mapper()
891
+ request_content = self.serialize(request_mapper, image_url)
892
+ request_content = request_content != nil ? JSON.generate(request_content, quirks_mode: true) : nil
893
+
894
+ path_template = 'tag'
895
+
896
+ request_url = @base_url || self.base_url
897
+ request_url = request_url.gsub('{Endpoint}', endpoint)
898
+
899
+ options = {
900
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
901
+ query_params: {'language' => language},
902
+ body: request_content,
903
+ headers: request_headers.merge(custom_headers || {}),
904
+ base_url: request_url
905
+ }
906
+ promise = self.make_request_async(:post, path_template, options)
907
+
908
+ promise = promise.then do |result|
909
+ http_response = result.response
910
+ status_code = http_response.status
911
+ response_content = http_response.body
912
+ unless status_code == 200
913
+ error_model = JSON.load(response_content)
914
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
915
+ end
916
+
917
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
918
+ # Deserialize Response
919
+ if status_code == 200
920
+ begin
921
+ parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
922
+ result_mapper = Azure::CognitiveServices::ComputerVision::V2_0::Models::TagResult.mapper()
923
+ result.body = self.deserialize(result_mapper, parsed_response)
924
+ rescue Exception => e
925
+ fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
926
+ end
927
+ end
928
+
929
+ result
930
+ end
931
+
932
+ promise.execute
933
+ end
934
+
935
+ #
936
+ # This operation recognizes content within an image by applying a
937
+ # domain-specific model. The list of domain-specific models that are supported
938
+ # by the Computer Vision API can be retrieved using the /models GET request.
939
+ # Currently, the API only provides a single domain-specific model: celebrities.
940
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
941
+ # an image URL. A successful response will be returned in JSON. If the request
942
+ # failed, the response will contain an error code and a message to help
943
+ # understand what went wrong.
944
+ #
945
+ # @param model [String] The domain-specific content to recognize.
946
+ # @param url [String] Publicly reachable URL of an image
947
+ # @param language [Enum] The desired language for output generation. If this
948
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
949
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
950
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
951
+ # 'ja', 'pt', 'zh'
952
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
953
+ # will be added to the HTTP request.
954
+ #
955
+ # @return [DomainModelResults] operation results.
956
+ #
957
+ def analyze_image_by_domain(model, url, language:nil, custom_headers:nil)
958
+ response = analyze_image_by_domain_async(model, url, language:language, custom_headers:custom_headers).value!
959
+ response.body unless response.nil?
960
+ end
961
+
962
+ #
963
+ # This operation recognizes content within an image by applying a
964
+ # domain-specific model. The list of domain-specific models that are supported
965
+ # by the Computer Vision API can be retrieved using the /models GET request.
966
+ # Currently, the API only provides a single domain-specific model: celebrities.
967
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
968
+ # an image URL. A successful response will be returned in JSON. If the request
969
+ # failed, the response will contain an error code and a message to help
970
+ # understand what went wrong.
971
+ #
972
+ # @param model [String] The domain-specific content to recognize.
973
+ # @param url [String] Publicly reachable URL of an image
974
+ # @param language [Enum] The desired language for output generation. If this
975
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
976
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
977
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
978
+ # 'ja', 'pt', 'zh'
979
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
980
+ # will be added to the HTTP request.
981
+ #
982
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
983
+ #
984
+ def analyze_image_by_domain_with_http_info(model, url, language:nil, custom_headers:nil)
985
+ analyze_image_by_domain_async(model, url, language:language, custom_headers:custom_headers).value!
986
+ end
987
+
988
+ #
989
+ # This operation recognizes content within an image by applying a
990
+ # domain-specific model. The list of domain-specific models that are supported
991
+ # by the Computer Vision API can be retrieved using the /models GET request.
992
+ # Currently, the API only provides a single domain-specific model: celebrities.
993
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
994
+ # an image URL. A successful response will be returned in JSON. If the request
995
+ # failed, the response will contain an error code and a message to help
996
+ # understand what went wrong.
997
+ #
998
+ # @param model [String] The domain-specific content to recognize.
999
+ # @param url [String] Publicly reachable URL of an image
1000
+ # @param language [Enum] The desired language for output generation. If this
1001
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
1002
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
1003
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
1004
+ # 'ja', 'pt', 'zh'
1005
+ # @param [Hash{String => String}] A hash of custom headers that will be added
1006
+ # to the HTTP request.
1007
+ #
1008
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
1009
+ #
1010
+ def analyze_image_by_domain_async(model, url, language:nil, custom_headers:nil)
1011
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
1012
+ fail ArgumentError, 'model is nil' if model.nil?
1013
+ fail ArgumentError, 'url is nil' if url.nil?
1014
+
1015
+ image_url = ImageUrl.new
1016
+ unless url.nil?
1017
+ image_url.url = url
1018
+ end
1019
+
1020
+ request_headers = {}
1021
+ request_headers['Content-Type'] = 'application/json; charset=utf-8'
1022
+
1023
+ # Set Headers
1024
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
1025
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
1026
+
1027
+ # Serialize Request
1028
+ request_mapper = Azure::CognitiveServices::ComputerVision::V2_0::Models::ImageUrl.mapper()
1029
+ request_content = self.serialize(request_mapper, image_url)
1030
+ request_content = request_content != nil ? JSON.generate(request_content, quirks_mode: true) : nil
1031
+
1032
+ path_template = 'models/{model}/analyze'
1033
+
1034
+ request_url = @base_url || self.base_url
1035
+ request_url = request_url.gsub('{Endpoint}', endpoint)
1036
+
1037
+ options = {
1038
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
1039
+ path_params: {'model' => model},
1040
+ query_params: {'language' => language},
1041
+ body: request_content,
1042
+ headers: request_headers.merge(custom_headers || {}),
1043
+ base_url: request_url
1044
+ }
1045
+ promise = self.make_request_async(:post, path_template, options)
1046
+
1047
+ promise = promise.then do |result|
1048
+ http_response = result.response
1049
+ status_code = http_response.status
1050
+ response_content = http_response.body
1051
+ unless status_code == 200
1052
+ error_model = JSON.load(response_content)
1053
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
1054
+ end
1055
+
1056
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
1057
+ # Deserialize Response
1058
+ if status_code == 200
1059
+ begin
1060
+ parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
1061
+ result_mapper = Azure::CognitiveServices::ComputerVision::V2_0::Models::DomainModelResults.mapper()
1062
+ result.body = self.deserialize(result_mapper, parsed_response)
1063
+ rescue Exception => e
1064
+ fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
1065
+ end
1066
+ end
1067
+
1068
+ result
1069
+ end
1070
+
1071
+ promise.execute
1072
+ end
1073
+
1074
+ #
1075
+ # Recognize Text operation. When you use the Recognize Text interface, the
1076
+ # response contains a field called 'Operation-Location'. The
1077
+ # 'Operation-Location' field contains the URL that you must use for your Get
1078
+ # Recognize Text Operation Result operation.
1079
+ #
1080
+ # @param mode [TextRecognitionMode] Type of text to recognize. Possible values
1081
+ # include: 'Handwritten', 'Printed'
1082
+ # @param url [String] Publicly reachable URL of an image
1083
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
1084
+ # will be added to the HTTP request.
1085
+ #
1086
+ #
1087
+ def recognize_text(url, mode, custom_headers:nil)
1088
+ response = recognize_text_async(url, mode, custom_headers:custom_headers).value!
1089
+ nil
1090
+ end
1091
+
1092
+ #
1093
+ # Recognize Text operation. When you use the Recognize Text interface, the
1094
+ # response contains a field called 'Operation-Location'. The
1095
+ # 'Operation-Location' field contains the URL that you must use for your Get
1096
+ # Recognize Text Operation Result operation.
1097
+ #
1098
+ # @param mode [TextRecognitionMode] Type of text to recognize. Possible values
1099
+ # include: 'Handwritten', 'Printed'
1100
+ # @param url [String] Publicly reachable URL of an image
1101
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
1102
+ # will be added to the HTTP request.
1103
+ #
1104
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
1105
+ #
1106
+ def recognize_text_with_http_info(url, mode, custom_headers:nil)
1107
+ recognize_text_async(url, mode, custom_headers:custom_headers).value!
1108
+ end
1109
+
1110
+ #
1111
+ # Recognize Text operation. When you use the Recognize Text interface, the
1112
+ # response contains a field called 'Operation-Location'. The
1113
+ # 'Operation-Location' field contains the URL that you must use for your Get
1114
+ # Recognize Text Operation Result operation.
1115
+ #
1116
+ # @param mode [TextRecognitionMode] Type of text to recognize. Possible values
1117
+ # include: 'Handwritten', 'Printed'
1118
+ # @param url [String] Publicly reachable URL of an image
1119
+ # @param [Hash{String => String}] A hash of custom headers that will be added
1120
+ # to the HTTP request.
1121
+ #
1122
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
1123
+ #
1124
+ def recognize_text_async(url, mode, custom_headers:nil)
1125
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
1126
+ fail ArgumentError, 'mode is nil' if mode.nil?
1127
+ fail ArgumentError, 'url is nil' if url.nil?
1128
+
1129
+ image_url = ImageUrl.new
1130
+ unless url.nil?
1131
+ image_url.url = url
1132
+ end
1133
+
1134
+ request_headers = {}
1135
+ request_headers['Content-Type'] = 'application/json; charset=utf-8'
1136
+
1137
+ # Set Headers
1138
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
1139
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
1140
+
1141
+ # Serialize Request
1142
+ request_mapper = Azure::CognitiveServices::ComputerVision::V2_0::Models::ImageUrl.mapper()
1143
+ request_content = self.serialize(request_mapper, image_url)
1144
+ request_content = request_content != nil ? JSON.generate(request_content, quirks_mode: true) : nil
1145
+
1146
+ path_template = 'recognizeText'
1147
+
1148
+ request_url = @base_url || self.base_url
1149
+ request_url = request_url.gsub('{Endpoint}', endpoint)
1150
+
1151
+ options = {
1152
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
1153
+ query_params: {'mode' => mode},
1154
+ body: request_content,
1155
+ headers: request_headers.merge(custom_headers || {}),
1156
+ base_url: request_url
1157
+ }
1158
+ promise = self.make_request_async(:post, path_template, options)
1159
+
1160
+ promise = promise.then do |result|
1161
+ http_response = result.response
1162
+ status_code = http_response.status
1163
+ response_content = http_response.body
1164
+ unless status_code == 202
1165
+ error_model = JSON.load(response_content)
1166
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
1167
+ end
1168
+
1169
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
1170
+
1171
+ result
1172
+ end
1173
+
1174
+ promise.execute
1175
+ end
1176
+
1177
+ #
1178
+ # This interface is used for getting text operation result. The URL to this
1179
+ # interface should be retrieved from 'Operation-Location' field returned from
1180
+ # Recognize Text interface.
1181
+ #
1182
+ # @param operation_id [String] Id of the text operation returned in the
1183
+ # response of the 'Recognize Text'
1184
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
1185
+ # will be added to the HTTP request.
1186
+ #
1187
+ # @return [TextOperationResult] operation results.
1188
+ #
1189
+ def get_text_operation_result(operation_id, custom_headers:nil)
1190
+ response = get_text_operation_result_async(operation_id, custom_headers:custom_headers).value!
1191
+ response.body unless response.nil?
1192
+ end
1193
+
1194
+ #
1195
+ # This interface is used for getting text operation result. The URL to this
1196
+ # interface should be retrieved from 'Operation-Location' field returned from
1197
+ # Recognize Text interface.
1198
+ #
1199
+ # @param operation_id [String] Id of the text operation returned in the
1200
+ # response of the 'Recognize Text'
1201
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
1202
+ # will be added to the HTTP request.
1203
+ #
1204
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
1205
+ #
1206
+ def get_text_operation_result_with_http_info(operation_id, custom_headers:nil)
1207
+ get_text_operation_result_async(operation_id, custom_headers:custom_headers).value!
1208
+ end
1209
+
1210
+ #
1211
+ # This interface is used for getting text operation result. The URL to this
1212
+ # interface should be retrieved from 'Operation-Location' field returned from
1213
+ # Recognize Text interface.
1214
+ #
1215
+ # @param operation_id [String] Id of the text operation returned in the
1216
+ # response of the 'Recognize Text'
1217
+ # @param [Hash{String => String}] A hash of custom headers that will be added
1218
+ # to the HTTP request.
1219
+ #
1220
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
1221
+ #
1222
+ def get_text_operation_result_async(operation_id, custom_headers:nil)
1223
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
1224
+ fail ArgumentError, 'operation_id is nil' if operation_id.nil?
1225
+
1226
+
1227
+ request_headers = {}
1228
+ request_headers['Content-Type'] = 'application/json; charset=utf-8'
1229
+
1230
+ # Set Headers
1231
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
1232
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
1233
+ path_template = 'textOperations/{operationId}'
1234
+
1235
+ request_url = @base_url || self.base_url
1236
+ request_url = request_url.gsub('{Endpoint}', endpoint)
1237
+
1238
+ options = {
1239
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
1240
+ path_params: {'operationId' => operation_id},
1241
+ headers: request_headers.merge(custom_headers || {}),
1242
+ base_url: request_url
1243
+ }
1244
+ promise = self.make_request_async(:get, path_template, options)
1245
+
1246
+ promise = promise.then do |result|
1247
+ http_response = result.response
1248
+ status_code = http_response.status
1249
+ response_content = http_response.body
1250
+ unless status_code == 200
1251
+ error_model = JSON.load(response_content)
1252
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
1253
+ end
1254
+
1255
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
1256
+ # Deserialize Response
1257
+ if status_code == 200
1258
+ begin
1259
+ parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
1260
+ result_mapper = Azure::CognitiveServices::ComputerVision::V2_0::Models::TextOperationResult.mapper()
1261
+ result.body = self.deserialize(result_mapper, parsed_response)
1262
+ rescue Exception => e
1263
+ fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
1264
+ end
1265
+ end
1266
+
1267
+ result
1268
+ end
1269
+
1270
+ promise.execute
1271
+ end
1272
+
1273
+ #
1274
+ # This operation extracts a rich set of visual features based on the image
1275
+ # content.
1276
+ #
1277
+ # @param image An image stream.
1278
+ # @param visual_features [Array<VisualFeatureTypes>] A string indicating what
1279
+ # visual feature types to return. Multiple values should be comma-separated.
1280
+ # Valid visual feature types include:Categories - categorizes image content
1281
+ # according to a taxonomy defined in documentation. Tags - tags the image with
1282
+ # a detailed list of words related to the image content. Description -
1283
+ # describes the image content with a complete English sentence. Faces - detects
1284
+ # if faces are present. If present, generate coordinates, gender and age.
1285
+ # ImageType - detects if image is clipart or a line drawing. Color - determines
1286
+ # the accent color, dominant color, and whether an image is black&white.Adult -
1287
+ # detects if the image is pornographic in nature (depicts nudity or a sex act).
1288
+ # Sexually suggestive content is also detected.
1289
+ # @param details [Enum] A string indicating which domain-specific details to
1290
+ # return. Multiple values should be comma-separated. Valid visual feature types
1291
+ # include:Celebrities - identifies celebrities if detected in the image.
1292
+ # Possible values include: 'Celebrities', 'Landmarks'
1293
+ # @param language [Enum] The desired language for output generation. If this
1294
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
1295
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
1296
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
1297
+ # 'ja', 'pt', 'zh'
1298
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
1299
+ # will be added to the HTTP request.
1300
+ #
1301
+ # @return [ImageAnalysis] operation results.
1302
+ #
1303
+ def analyze_image_in_stream(image, visual_features:nil, details:nil, language:nil, custom_headers:nil)
1304
+ response = analyze_image_in_stream_async(image, visual_features:visual_features, details:details, language:language, custom_headers:custom_headers).value!
1305
+ response.body unless response.nil?
1306
+ end
1307
+
1308
+ #
1309
+ # This operation extracts a rich set of visual features based on the image
1310
+ # content.
1311
+ #
1312
+ # @param image An image stream.
1313
+ # @param visual_features [Array<VisualFeatureTypes>] A string indicating what
1314
+ # visual feature types to return. Multiple values should be comma-separated.
1315
+ # Valid visual feature types include:Categories - categorizes image content
1316
+ # according to a taxonomy defined in documentation. Tags - tags the image with
1317
+ # a detailed list of words related to the image content. Description -
1318
+ # describes the image content with a complete English sentence. Faces - detects
1319
+ # if faces are present. If present, generate coordinates, gender and age.
1320
+ # ImageType - detects if image is clipart or a line drawing. Color - determines
1321
+ # the accent color, dominant color, and whether an image is black&white.Adult -
1322
+ # detects if the image is pornographic in nature (depicts nudity or a sex act).
1323
+ # Sexually suggestive content is also detected.
1324
+ # @param details [Enum] A string indicating which domain-specific details to
1325
+ # return. Multiple values should be comma-separated. Valid visual feature types
1326
+ # include:Celebrities - identifies celebrities if detected in the image.
1327
+ # Possible values include: 'Celebrities', 'Landmarks'
1328
+ # @param language [Enum] The desired language for output generation. If this
1329
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
1330
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
1331
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
1332
+ # 'ja', 'pt', 'zh'
1333
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
1334
+ # will be added to the HTTP request.
1335
+ #
1336
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
1337
+ #
1338
+ def analyze_image_in_stream_with_http_info(image, visual_features:nil, details:nil, language:nil, custom_headers:nil)
1339
+ analyze_image_in_stream_async(image, visual_features:visual_features, details:details, language:language, custom_headers:custom_headers).value!
1340
+ end
1341
+
1342
+ #
1343
+ # This operation extracts a rich set of visual features based on the image
1344
+ # content.
1345
+ #
1346
+ # @param image An image stream.
1347
+ # @param visual_features [Array<VisualFeatureTypes>] A string indicating what
1348
+ # visual feature types to return. Multiple values should be comma-separated.
1349
+ # Valid visual feature types include:Categories - categorizes image content
1350
+ # according to a taxonomy defined in documentation. Tags - tags the image with
1351
+ # a detailed list of words related to the image content. Description -
1352
+ # describes the image content with a complete English sentence. Faces - detects
1353
+ # if faces are present. If present, generate coordinates, gender and age.
1354
+ # ImageType - detects if image is clipart or a line drawing. Color - determines
1355
+ # the accent color, dominant color, and whether an image is black&white.Adult -
1356
+ # detects if the image is pornographic in nature (depicts nudity or a sex act).
1357
+ # Sexually suggestive content is also detected.
1358
+ # @param details [Enum] A string indicating which domain-specific details to
1359
+ # return. Multiple values should be comma-separated. Valid visual feature types
1360
+ # include:Celebrities - identifies celebrities if detected in the image.
1361
+ # Possible values include: 'Celebrities', 'Landmarks'
1362
+ # @param language [Enum] The desired language for output generation. If this
1363
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
1364
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
1365
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
1366
+ # 'ja', 'pt', 'zh'
1367
+ # @param [Hash{String => String}] A hash of custom headers that will be added
1368
+ # to the HTTP request.
1369
+ #
1370
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
1371
+ #
1372
+ def analyze_image_in_stream_async(image, visual_features:nil, details:nil, language:nil, custom_headers:nil)
1373
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
1374
+ fail ArgumentError, 'image is nil' if image.nil?
1375
+
1376
+
1377
+ request_headers = {}
1378
+ request_headers['Content-Type'] = 'application/octet-stream'
1379
+
1380
+ # Set Headers
1381
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
1382
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
1383
+
1384
+ # Serialize Request
1385
+ request_mapper = {
1386
+ client_side_validation: true,
1387
+ required: true,
1388
+ serialized_name: 'Image',
1389
+ type: {
1390
+ name: 'Stream'
1391
+ }
1392
+ }
1393
+ request_content = self.serialize(request_mapper, image)
1394
+
1395
+ path_template = 'analyze'
1396
+
1397
+ request_url = @base_url || self.base_url
1398
+ request_url = request_url.gsub('{Endpoint}', endpoint)
1399
+
1400
+ options = {
1401
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
1402
+ query_params: {'visualFeatures' => visual_features.nil? ? nil : visual_features.join(','),'details' => details,'language' => language},
1403
+ body: request_content,
1404
+ headers: request_headers.merge(custom_headers || {}),
1405
+ base_url: request_url
1406
+ }
1407
+ promise = self.make_request_async(:post, path_template, options)
1408
+
1409
+ promise = promise.then do |result|
1410
+ http_response = result.response
1411
+ status_code = http_response.status
1412
+ response_content = http_response.body
1413
+ unless status_code == 200
1414
+ error_model = JSON.load(response_content)
1415
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
1416
+ end
1417
+
1418
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
1419
+ # Deserialize Response
1420
+ if status_code == 200
1421
+ begin
1422
+ parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
1423
+ result_mapper = Azure::CognitiveServices::ComputerVision::V2_0::Models::ImageAnalysis.mapper()
1424
+ result.body = self.deserialize(result_mapper, parsed_response)
1425
+ rescue Exception => e
1426
+ fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
1427
+ end
1428
+ end
1429
+
1430
+ result
1431
+ end
1432
+
1433
+ promise.execute
1434
+ end
1435
+
1436
+ #
1437
+ # This operation generates a thumbnail image with the user-specified width and
1438
+ # height. By default, the service analyzes the image, identifies the region of
1439
+ # interest (ROI), and generates smart cropping coordinates based on the ROI.
1440
+ # Smart cropping helps when you specify an aspect ratio that differs from that
1441
+ # of the input image. A successful response contains the thumbnail image
1442
+ # binary. If the request failed, the response contains an error code and a
1443
+ # message to help determine what went wrong.
1444
+ #
1445
+ # @param width [Integer] Width of the thumbnail. It must be between 1 and 1024.
1446
+ # Recommended minimum of 50.
1447
+ # @param height [Integer] Height of the thumbnail. It must be between 1 and
1448
+ # 1024. Recommended minimum of 50.
1449
+ # @param image An image stream.
1450
+ # @param smart_cropping [Boolean] Boolean flag for enabling smart cropping.
1451
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
1452
+ # will be added to the HTTP request.
1453
+ #
1454
+ # @return [NOT_IMPLEMENTED] operation results.
1455
+ #
1456
+ def generate_thumbnail_in_stream(width, height, image, smart_cropping:false, custom_headers:nil)
1457
+ response = generate_thumbnail_in_stream_async(width, height, image, smart_cropping:smart_cropping, custom_headers:custom_headers).value!
1458
+ response.body unless response.nil?
1459
+ end
1460
+
1461
+ #
1462
+ # This operation generates a thumbnail image with the user-specified width and
1463
+ # height. By default, the service analyzes the image, identifies the region of
1464
+ # interest (ROI), and generates smart cropping coordinates based on the ROI.
1465
+ # Smart cropping helps when you specify an aspect ratio that differs from that
1466
+ # of the input image. A successful response contains the thumbnail image
1467
+ # binary. If the request failed, the response contains an error code and a
1468
+ # message to help determine what went wrong.
1469
+ #
1470
+ # @param width [Integer] Width of the thumbnail. It must be between 1 and 1024.
1471
+ # Recommended minimum of 50.
1472
+ # @param height [Integer] Height of the thumbnail. It must be between 1 and
1473
+ # 1024. Recommended minimum of 50.
1474
+ # @param image An image stream.
1475
+ # @param smart_cropping [Boolean] Boolean flag for enabling smart cropping.
1476
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
1477
+ # will be added to the HTTP request.
1478
+ #
1479
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
1480
+ #
1481
+ def generate_thumbnail_in_stream_with_http_info(width, height, image, smart_cropping:false, custom_headers:nil)
1482
+ generate_thumbnail_in_stream_async(width, height, image, smart_cropping:smart_cropping, custom_headers:custom_headers).value!
1483
+ end
1484
+
1485
+ #
1486
+ # This operation generates a thumbnail image with the user-specified width and
1487
+ # height. By default, the service analyzes the image, identifies the region of
1488
+ # interest (ROI), and generates smart cropping coordinates based on the ROI.
1489
+ # Smart cropping helps when you specify an aspect ratio that differs from that
1490
+ # of the input image. A successful response contains the thumbnail image
1491
+ # binary. If the request failed, the response contains an error code and a
1492
+ # message to help determine what went wrong.
1493
+ #
1494
+ # @param width [Integer] Width of the thumbnail. It must be between 1 and 1024.
1495
+ # Recommended minimum of 50.
1496
+ # @param height [Integer] Height of the thumbnail. It must be between 1 and
1497
+ # 1024. Recommended minimum of 50.
1498
+ # @param image An image stream.
1499
+ # @param smart_cropping [Boolean] Boolean flag for enabling smart cropping.
1500
+ # @param [Hash{String => String}] A hash of custom headers that will be added
1501
+ # to the HTTP request.
1502
+ #
1503
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
1504
+ #
1505
+ def generate_thumbnail_in_stream_async(width, height, image, smart_cropping:false, custom_headers:nil)
1506
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
1507
+ fail ArgumentError, 'width is nil' if width.nil?
1508
+ fail ArgumentError, "'width' should satisfy the constraint - 'InclusiveMaximum': '1023'" if !width.nil? && width > 1023
1509
+ fail ArgumentError, "'width' should satisfy the constraint - 'InclusiveMinimum': '1'" if !width.nil? && width < 1
1510
+ fail ArgumentError, 'height is nil' if height.nil?
1511
+ fail ArgumentError, "'height' should satisfy the constraint - 'InclusiveMaximum': '1023'" if !height.nil? && height > 1023
1512
+ fail ArgumentError, "'height' should satisfy the constraint - 'InclusiveMinimum': '1'" if !height.nil? && height < 1
1513
+ fail ArgumentError, 'image is nil' if image.nil?
1514
+
1515
+
1516
+ request_headers = {}
1517
+ request_headers['Content-Type'] = 'application/octet-stream'
1518
+
1519
+ # Set Headers
1520
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
1521
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
1522
+
1523
+ # Serialize Request
1524
+ request_mapper = {
1525
+ client_side_validation: true,
1526
+ required: true,
1527
+ serialized_name: 'Image',
1528
+ type: {
1529
+ name: 'Stream'
1530
+ }
1531
+ }
1532
+ request_content = self.serialize(request_mapper, image)
1533
+
1534
+ path_template = 'generateThumbnail'
1535
+
1536
+ request_url = @base_url || self.base_url
1537
+ request_url = request_url.gsub('{Endpoint}', endpoint)
1538
+
1539
+ options = {
1540
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
1541
+ query_params: {'width' => width,'height' => height,'smartCropping' => smart_cropping},
1542
+ body: request_content,
1543
+ headers: request_headers.merge(custom_headers || {}),
1544
+ base_url: request_url
1545
+ }
1546
+ promise = self.make_request_async(:post, path_template, options)
1547
+
1548
+ promise = promise.then do |result|
1549
+ http_response = result.response
1550
+ status_code = http_response.status
1551
+ response_content = http_response.body
1552
+ unless status_code == 200
1553
+ error_model = JSON.load(response_content)
1554
+ fail MsRestAzure::AzureOperationError.new(result.request, http_response, error_model)
1555
+ end
1556
+
1557
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
1558
+ # Deserialize Response
1559
+ if status_code == 200
1560
+ begin
1561
+ parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
1562
+ result_mapper = {
1563
+ client_side_validation: true,
1564
+ required: false,
1565
+ serialized_name: 'parsed_response',
1566
+ type: {
1567
+ name: 'Stream'
1568
+ }
1569
+ }
1570
+ result.body = self.deserialize(result_mapper, parsed_response)
1571
+ rescue Exception => e
1572
+ fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
1573
+ end
1574
+ end
1575
+
1576
+ result
1577
+ end
1578
+
1579
+ promise.execute
1580
+ end
1581
+
1582
+ #
1583
+ # Optical Character Recognition (OCR) detects printed text in an image and
1584
+ # extracts the recognized characters into a machine-usable character stream.
1585
+ # Upon success, the OCR results will be returned. Upon failure, the error code
1586
+ # together with an error message will be returned. The error code can be one of
1587
+ # InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage,
1588
+ # NotSupportedLanguage, or InternalServerError.
1589
+ #
1590
+ # @param detect_orientation [Boolean] Whether detect the text orientation in
1591
+ # the image. With detectOrientation=true the OCR service tries to detect the
1592
+ # image orientation and correct it before further processing (e.g. if it's
1593
+ # upside-down).
1594
+ # @param image An image stream.
1595
+ # @param language [OcrLanguages] The BCP-47 language code of the text to be
1596
+ # detected in the image. The default value is 'unk'. Possible values include:
1597
+ # 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el',
1598
+ # 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro',
1599
+ # 'sr-Cyrl', 'sr-Latn', 'sk'
1600
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
1601
+ # will be added to the HTTP request.
1602
+ #
1603
+ # @return [OcrResult] operation results.
1604
+ #
1605
+ def recognize_printed_text_in_stream(detect_orientation, image, language:nil, custom_headers:nil)
1606
+ response = recognize_printed_text_in_stream_async(detect_orientation, image, language:language, custom_headers:custom_headers).value!
1607
+ response.body unless response.nil?
1608
+ end
1609
+
1610
+ #
1611
+ # Optical Character Recognition (OCR) detects printed text in an image and
1612
+ # extracts the recognized characters into a machine-usable character stream.
1613
+ # Upon success, the OCR results will be returned. Upon failure, the error code
1614
+ # together with an error message will be returned. The error code can be one of
1615
+ # InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage,
1616
+ # NotSupportedLanguage, or InternalServerError.
1617
+ #
1618
+ # @param detect_orientation [Boolean] Whether detect the text orientation in
1619
+ # the image. With detectOrientation=true the OCR service tries to detect the
1620
+ # image orientation and correct it before further processing (e.g. if it's
1621
+ # upside-down).
1622
+ # @param image An image stream.
1623
+ # @param language [OcrLanguages] The BCP-47 language code of the text to be
1624
+ # detected in the image. The default value is 'unk'. Possible values include:
1625
+ # 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el',
1626
+ # 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro',
1627
+ # 'sr-Cyrl', 'sr-Latn', 'sk'
1628
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
1629
+ # will be added to the HTTP request.
1630
+ #
1631
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
1632
+ #
1633
+ def recognize_printed_text_in_stream_with_http_info(detect_orientation, image, language:nil, custom_headers:nil)
1634
+ recognize_printed_text_in_stream_async(detect_orientation, image, language:language, custom_headers:custom_headers).value!
1635
+ end
1636
+
1637
+ #
1638
+ # Optical Character Recognition (OCR) detects printed text in an image and
1639
+ # extracts the recognized characters into a machine-usable character stream.
1640
+ # Upon success, the OCR results will be returned. Upon failure, the error code
1641
+ # together with an error message will be returned. The error code can be one of
1642
+ # InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage,
1643
+ # NotSupportedLanguage, or InternalServerError.
1644
+ #
1645
+ # @param detect_orientation [Boolean] Whether detect the text orientation in
1646
+ # the image. With detectOrientation=true the OCR service tries to detect the
1647
+ # image orientation and correct it before further processing (e.g. if it's
1648
+ # upside-down).
1649
+ # @param image An image stream.
1650
+ # @param language [OcrLanguages] The BCP-47 language code of the text to be
1651
+ # detected in the image. The default value is 'unk'. Possible values include:
1652
+ # 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el',
1653
+ # 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro',
1654
+ # 'sr-Cyrl', 'sr-Latn', 'sk'
1655
+ # @param [Hash{String => String}] A hash of custom headers that will be added
1656
+ # to the HTTP request.
1657
+ #
1658
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
1659
+ #
1660
+ def recognize_printed_text_in_stream_async(detect_orientation, image, language:nil, custom_headers:nil)
1661
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
1662
+ fail ArgumentError, 'detect_orientation is nil' if detect_orientation.nil?
1663
+ fail ArgumentError, 'image is nil' if image.nil?
1664
+
1665
+
1666
+ request_headers = {}
1667
+ request_headers['Content-Type'] = 'application/octet-stream'
1668
+
1669
+ # Set Headers
1670
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
1671
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
1672
+
1673
+ # Serialize Request
1674
+ request_mapper = {
1675
+ client_side_validation: true,
1676
+ required: true,
1677
+ serialized_name: 'Image',
1678
+ type: {
1679
+ name: 'Stream'
1680
+ }
1681
+ }
1682
+ request_content = self.serialize(request_mapper, image)
1683
+
1684
+ path_template = 'ocr'
1685
+
1686
+ request_url = @base_url || self.base_url
1687
+ request_url = request_url.gsub('{Endpoint}', endpoint)
1688
+
1689
+ options = {
1690
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
1691
+ query_params: {'language' => language,'detectOrientation' => detect_orientation},
1692
+ body: request_content,
1693
+ headers: request_headers.merge(custom_headers || {}),
1694
+ base_url: request_url
1695
+ }
1696
+ promise = self.make_request_async(:post, path_template, options)
1697
+
1698
+ promise = promise.then do |result|
1699
+ http_response = result.response
1700
+ status_code = http_response.status
1701
+ response_content = http_response.body
1702
+ unless status_code == 200
1703
+ error_model = JSON.load(response_content)
1704
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
1705
+ end
1706
+
1707
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
1708
+ # Deserialize Response
1709
+ if status_code == 200
1710
+ begin
1711
+ parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
1712
+ result_mapper = Azure::CognitiveServices::ComputerVision::V2_0::Models::OcrResult.mapper()
1713
+ result.body = self.deserialize(result_mapper, parsed_response)
1714
+ rescue Exception => e
1715
+ fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
1716
+ end
1717
+ end
1718
+
1719
+ result
1720
+ end
1721
+
1722
+ promise.execute
1723
+ end
1724
+
1725
+ #
1726
+ # This operation generates a description of an image in human readable language
1727
+ # with complete sentences. The description is based on a collection of content
1728
+ # tags, which are also returned by the operation. More than one description can
1729
+ # be generated for each image. Descriptions are ordered by their confidence
1730
+ # score. All descriptions are in English. Two input methods are supported --
1731
+ # (1) Uploading an image or (2) specifying an image URL.A successful response
1732
+ # will be returned in JSON. If the request failed, the response will contain
1733
+ # an error code and a message to help understand what went wrong.
1734
+ #
1735
+ # @param image An image stream.
1736
+ # @param max_candidates [String] Maximum number of candidate descriptions to be
1737
+ # returned. The default is 1.
1738
+ # @param language [Enum] The desired language for output generation. If this
1739
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
1740
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
1741
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
1742
+ # 'ja', 'pt', 'zh'
1743
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
1744
+ # will be added to the HTTP request.
1745
+ #
1746
+ # @return [ImageDescription] operation results.
1747
+ #
1748
+ def describe_image_in_stream(image, max_candidates:'1', language:nil, custom_headers:nil)
1749
+ response = describe_image_in_stream_async(image, max_candidates:max_candidates, language:language, custom_headers:custom_headers).value!
1750
+ response.body unless response.nil?
1751
+ end
1752
+
1753
+ #
1754
+ # This operation generates a description of an image in human readable language
1755
+ # with complete sentences. The description is based on a collection of content
1756
+ # tags, which are also returned by the operation. More than one description can
1757
+ # be generated for each image. Descriptions are ordered by their confidence
1758
+ # score. All descriptions are in English. Two input methods are supported --
1759
+ # (1) Uploading an image or (2) specifying an image URL.A successful response
1760
+ # will be returned in JSON. If the request failed, the response will contain
1761
+ # an error code and a message to help understand what went wrong.
1762
+ #
1763
+ # @param image An image stream.
1764
+ # @param max_candidates [String] Maximum number of candidate descriptions to be
1765
+ # returned. The default is 1.
1766
+ # @param language [Enum] The desired language for output generation. If this
1767
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
1768
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
1769
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
1770
+ # 'ja', 'pt', 'zh'
1771
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
1772
+ # will be added to the HTTP request.
1773
+ #
1774
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
1775
+ #
1776
+ def describe_image_in_stream_with_http_info(image, max_candidates:'1', language:nil, custom_headers:nil)
1777
+ describe_image_in_stream_async(image, max_candidates:max_candidates, language:language, custom_headers:custom_headers).value!
1778
+ end
1779
+
1780
+ #
1781
+ # This operation generates a description of an image in human readable language
1782
+ # with complete sentences. The description is based on a collection of content
1783
+ # tags, which are also returned by the operation. More than one description can
1784
+ # be generated for each image. Descriptions are ordered by their confidence
1785
+ # score. All descriptions are in English. Two input methods are supported --
1786
+ # (1) Uploading an image or (2) specifying an image URL.A successful response
1787
+ # will be returned in JSON. If the request failed, the response will contain
1788
+ # an error code and a message to help understand what went wrong.
1789
+ #
1790
+ # @param image An image stream.
1791
+ # @param max_candidates [String] Maximum number of candidate descriptions to be
1792
+ # returned. The default is 1.
1793
+ # @param language [Enum] The desired language for output generation. If this
1794
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
1795
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
1796
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
1797
+ # 'ja', 'pt', 'zh'
1798
+ # @param [Hash{String => String}] A hash of custom headers that will be added
1799
+ # to the HTTP request.
1800
+ #
1801
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
1802
+ #
1803
+ def describe_image_in_stream_async(image, max_candidates:'1', language:nil, custom_headers:nil)
1804
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
1805
+ fail ArgumentError, 'image is nil' if image.nil?
1806
+
1807
+
1808
+ request_headers = {}
1809
+ request_headers['Content-Type'] = 'application/octet-stream'
1810
+
1811
+ # Set Headers
1812
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
1813
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
1814
+
1815
+ # Serialize Request
1816
+ request_mapper = {
1817
+ client_side_validation: true,
1818
+ required: true,
1819
+ serialized_name: 'Image',
1820
+ type: {
1821
+ name: 'Stream'
1822
+ }
1823
+ }
1824
+ request_content = self.serialize(request_mapper, image)
1825
+
1826
+ path_template = 'describe'
1827
+
1828
+ request_url = @base_url || self.base_url
1829
+ request_url = request_url.gsub('{Endpoint}', endpoint)
1830
+
1831
+ options = {
1832
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
1833
+ query_params: {'maxCandidates' => max_candidates,'language' => language},
1834
+ body: request_content,
1835
+ headers: request_headers.merge(custom_headers || {}),
1836
+ base_url: request_url
1837
+ }
1838
+ promise = self.make_request_async(:post, path_template, options)
1839
+
1840
+ promise = promise.then do |result|
1841
+ http_response = result.response
1842
+ status_code = http_response.status
1843
+ response_content = http_response.body
1844
+ unless status_code == 200
1845
+ error_model = JSON.load(response_content)
1846
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
1847
+ end
1848
+
1849
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
1850
+ # Deserialize Response
1851
+ if status_code == 200
1852
+ begin
1853
+ parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
1854
+ result_mapper = Azure::CognitiveServices::ComputerVision::V2_0::Models::ImageDescription.mapper()
1855
+ result.body = self.deserialize(result_mapper, parsed_response)
1856
+ rescue Exception => e
1857
+ fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
1858
+ end
1859
+ end
1860
+
1861
+ result
1862
+ end
1863
+
1864
+ promise.execute
1865
+ end
1866
+
1867
+ #
1868
+ # This operation generates a list of words, or tags, that are relevant to the
1869
+ # content of the supplied image. The Computer Vision API can return tags based
1870
+ # on objects, living beings, scenery or actions found in images. Unlike
1871
+ # categories, tags are not organized according to a hierarchical classification
1872
+ # system, but correspond to image content. Tags may contain hints to avoid
1873
+ # ambiguity or provide context, for example the tag 'cello' may be accompanied
1874
+ # by the hint 'musical instrument'. All tags are in English.
1875
+ #
1876
+ # @param image An image stream.
1877
+ # @param language [Enum] The desired language for output generation. If this
1878
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
1879
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
1880
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
1881
+ # 'ja', 'pt', 'zh'
1882
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
1883
+ # will be added to the HTTP request.
1884
+ #
1885
+ # @return [TagResult] operation results.
1886
+ #
1887
+ def tag_image_in_stream(image, language:nil, custom_headers:nil)
1888
+ response = tag_image_in_stream_async(image, language:language, custom_headers:custom_headers).value!
1889
+ response.body unless response.nil?
1890
+ end
1891
+
1892
+ #
1893
+ # This operation generates a list of words, or tags, that are relevant to the
1894
+ # content of the supplied image. The Computer Vision API can return tags based
1895
+ # on objects, living beings, scenery or actions found in images. Unlike
1896
+ # categories, tags are not organized according to a hierarchical classification
1897
+ # system, but correspond to image content. Tags may contain hints to avoid
1898
+ # ambiguity or provide context, for example the tag 'cello' may be accompanied
1899
+ # by the hint 'musical instrument'. All tags are in English.
1900
+ #
1901
+ # @param image An image stream.
1902
+ # @param language [Enum] The desired language for output generation. If this
1903
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
1904
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
1905
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
1906
+ # 'ja', 'pt', 'zh'
1907
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
1908
+ # will be added to the HTTP request.
1909
+ #
1910
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
1911
+ #
1912
+ def tag_image_in_stream_with_http_info(image, language:nil, custom_headers:nil)
1913
+ tag_image_in_stream_async(image, language:language, custom_headers:custom_headers).value!
1914
+ end
1915
+
1916
+ #
1917
+ # This operation generates a list of words, or tags, that are relevant to the
1918
+ # content of the supplied image. The Computer Vision API can return tags based
1919
+ # on objects, living beings, scenery or actions found in images. Unlike
1920
+ # categories, tags are not organized according to a hierarchical classification
1921
+ # system, but correspond to image content. Tags may contain hints to avoid
1922
+ # ambiguity or provide context, for example the tag 'cello' may be accompanied
1923
+ # by the hint 'musical instrument'. All tags are in English.
1924
+ #
1925
+ # @param image An image stream.
1926
+ # @param language [Enum] The desired language for output generation. If this
1927
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
1928
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
1929
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
1930
+ # 'ja', 'pt', 'zh'
1931
+ # @param [Hash{String => String}] A hash of custom headers that will be added
1932
+ # to the HTTP request.
1933
+ #
1934
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
1935
+ #
1936
+ def tag_image_in_stream_async(image, language:nil, custom_headers:nil)
1937
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
1938
+ fail ArgumentError, 'image is nil' if image.nil?
1939
+
1940
+
1941
+ request_headers = {}
1942
+ request_headers['Content-Type'] = 'application/octet-stream'
1943
+
1944
+ # Set Headers
1945
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
1946
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
1947
+
1948
+ # Serialize Request
1949
+ request_mapper = {
1950
+ client_side_validation: true,
1951
+ required: true,
1952
+ serialized_name: 'Image',
1953
+ type: {
1954
+ name: 'Stream'
1955
+ }
1956
+ }
1957
+ request_content = self.serialize(request_mapper, image)
1958
+
1959
+ path_template = 'tag'
1960
+
1961
+ request_url = @base_url || self.base_url
1962
+ request_url = request_url.gsub('{Endpoint}', endpoint)
1963
+
1964
+ options = {
1965
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
1966
+ query_params: {'language' => language},
1967
+ body: request_content,
1968
+ headers: request_headers.merge(custom_headers || {}),
1969
+ base_url: request_url
1970
+ }
1971
+ promise = self.make_request_async(:post, path_template, options)
1972
+
1973
+ promise = promise.then do |result|
1974
+ http_response = result.response
1975
+ status_code = http_response.status
1976
+ response_content = http_response.body
1977
+ unless status_code == 200
1978
+ error_model = JSON.load(response_content)
1979
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
1980
+ end
1981
+
1982
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
1983
+ # Deserialize Response
1984
+ if status_code == 200
1985
+ begin
1986
+ parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
1987
+ result_mapper = Azure::CognitiveServices::ComputerVision::V2_0::Models::TagResult.mapper()
1988
+ result.body = self.deserialize(result_mapper, parsed_response)
1989
+ rescue Exception => e
1990
+ fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
1991
+ end
1992
+ end
1993
+
1994
+ result
1995
+ end
1996
+
1997
+ promise.execute
1998
+ end
1999
+
2000
+ #
2001
+ # This operation recognizes content within an image by applying a
2002
+ # domain-specific model. The list of domain-specific models that are supported
2003
+ # by the Computer Vision API can be retrieved using the /models GET request.
2004
+ # Currently, the API only provides a single domain-specific model: celebrities.
2005
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
2006
+ # an image URL. A successful response will be returned in JSON. If the request
2007
+ # failed, the response will contain an error code and a message to help
2008
+ # understand what went wrong.
2009
+ #
2010
+ # @param model [String] The domain-specific content to recognize.
2011
+ # @param image An image stream.
2012
+ # @param language [Enum] The desired language for output generation. If this
2013
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
2014
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
2015
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
2016
+ # 'ja', 'pt', 'zh'
2017
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
2018
+ # will be added to the HTTP request.
2019
+ #
2020
+ # @return [DomainModelResults] operation results.
2021
+ #
2022
+ def analyze_image_by_domain_in_stream(model, image, language:nil, custom_headers:nil)
2023
+ response = analyze_image_by_domain_in_stream_async(model, image, language:language, custom_headers:custom_headers).value!
2024
+ response.body unless response.nil?
2025
+ end
2026
+
2027
+ #
2028
+ # This operation recognizes content within an image by applying a
2029
+ # domain-specific model. The list of domain-specific models that are supported
2030
+ # by the Computer Vision API can be retrieved using the /models GET request.
2031
+ # Currently, the API only provides a single domain-specific model: celebrities.
2032
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
2033
+ # an image URL. A successful response will be returned in JSON. If the request
2034
+ # failed, the response will contain an error code and a message to help
2035
+ # understand what went wrong.
2036
+ #
2037
+ # @param model [String] The domain-specific content to recognize.
2038
+ # @param image An image stream.
2039
+ # @param language [Enum] The desired language for output generation. If this
2040
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
2041
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
2042
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
2043
+ # 'ja', 'pt', 'zh'
2044
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
2045
+ # will be added to the HTTP request.
2046
+ #
2047
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
2048
+ #
2049
+ def analyze_image_by_domain_in_stream_with_http_info(model, image, language:nil, custom_headers:nil)
2050
+ analyze_image_by_domain_in_stream_async(model, image, language:language, custom_headers:custom_headers).value!
2051
+ end
2052
+
2053
+ #
2054
+ # This operation recognizes content within an image by applying a
2055
+ # domain-specific model. The list of domain-specific models that are supported
2056
+ # by the Computer Vision API can be retrieved using the /models GET request.
2057
+ # Currently, the API only provides a single domain-specific model: celebrities.
2058
+ # Two input methods are supported -- (1) Uploading an image or (2) specifying
2059
+ # an image URL. A successful response will be returned in JSON. If the request
2060
+ # failed, the response will contain an error code and a message to help
2061
+ # understand what went wrong.
2062
+ #
2063
+ # @param model [String] The domain-specific content to recognize.
2064
+ # @param image An image stream.
2065
+ # @param language [Enum] The desired language for output generation. If this
2066
+ # parameter is not specified, the default value is &quot;en&quot;.Supported
2067
+ # languages:en - English, Default. es - Spanish, ja - Japanese, pt -
2068
+ # Portuguese, zh - Simplified Chinese. Possible values include: 'en', 'es',
2069
+ # 'ja', 'pt', 'zh'
2070
+ # @param [Hash{String => String}] A hash of custom headers that will be added
2071
+ # to the HTTP request.
2072
+ #
2073
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
2074
+ #
2075
+ def analyze_image_by_domain_in_stream_async(model, image, language:nil, custom_headers:nil)
2076
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
2077
+ fail ArgumentError, 'model is nil' if model.nil?
2078
+ fail ArgumentError, 'image is nil' if image.nil?
2079
+
2080
+
2081
+ request_headers = {}
2082
+ request_headers['Content-Type'] = 'application/octet-stream'
2083
+
2084
+ # Set Headers
2085
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
2086
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
2087
+
2088
+ # Serialize Request
2089
+ request_mapper = {
2090
+ client_side_validation: true,
2091
+ required: true,
2092
+ serialized_name: 'Image',
2093
+ type: {
2094
+ name: 'Stream'
2095
+ }
2096
+ }
2097
+ request_content = self.serialize(request_mapper, image)
2098
+
2099
+ path_template = 'models/{model}/analyze'
2100
+
2101
+ request_url = @base_url || self.base_url
2102
+ request_url = request_url.gsub('{Endpoint}', endpoint)
2103
+
2104
+ options = {
2105
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
2106
+ path_params: {'model' => model},
2107
+ query_params: {'language' => language},
2108
+ body: request_content,
2109
+ headers: request_headers.merge(custom_headers || {}),
2110
+ base_url: request_url
2111
+ }
2112
+ promise = self.make_request_async(:post, path_template, options)
2113
+
2114
+ promise = promise.then do |result|
2115
+ http_response = result.response
2116
+ status_code = http_response.status
2117
+ response_content = http_response.body
2118
+ unless status_code == 200
2119
+ error_model = JSON.load(response_content)
2120
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
2121
+ end
2122
+
2123
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
2124
+ # Deserialize Response
2125
+ if status_code == 200
2126
+ begin
2127
+ parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
2128
+ result_mapper = Azure::CognitiveServices::ComputerVision::V2_0::Models::DomainModelResults.mapper()
2129
+ result.body = self.deserialize(result_mapper, parsed_response)
2130
+ rescue Exception => e
2131
+ fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
2132
+ end
2133
+ end
2134
+
2135
+ result
2136
+ end
2137
+
2138
+ promise.execute
2139
+ end
2140
+
2141
+ #
2142
+ # Recognize Text operation. When you use the Recognize Text interface, the
2143
+ # response contains a field called 'Operation-Location'. The
2144
+ # 'Operation-Location' field contains the URL that you must use for your Get
2145
+ # Recognize Text Operation Result operation.
2146
+ #
2147
+ # @param image An image stream.
2148
+ # @param mode [TextRecognitionMode] Type of text to recognize. Possible values
2149
+ # include: 'Handwritten', 'Printed'
2150
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
2151
+ # will be added to the HTTP request.
2152
+ #
2153
+ #
2154
+ def recognize_text_in_stream(image, mode, custom_headers:nil)
2155
+ response = recognize_text_in_stream_async(image, mode, custom_headers:custom_headers).value!
2156
+ nil
2157
+ end
2158
+
2159
+ #
2160
+ # Recognize Text operation. When you use the Recognize Text interface, the
2161
+ # response contains a field called 'Operation-Location'. The
2162
+ # 'Operation-Location' field contains the URL that you must use for your Get
2163
+ # Recognize Text Operation Result operation.
2164
+ #
2165
+ # @param image An image stream.
2166
+ # @param mode [TextRecognitionMode] Type of text to recognize. Possible values
2167
+ # include: 'Handwritten', 'Printed'
2168
+ # @param custom_headers [Hash{String => String}] A hash of custom headers that
2169
+ # will be added to the HTTP request.
2170
+ #
2171
+ # @return [MsRestAzure::AzureOperationResponse] HTTP response information.
2172
+ #
2173
+ def recognize_text_in_stream_with_http_info(image, mode, custom_headers:nil)
2174
+ recognize_text_in_stream_async(image, mode, custom_headers:custom_headers).value!
2175
+ end
2176
+
2177
+ #
2178
+ # Recognize Text operation. When you use the Recognize Text interface, the
2179
+ # response contains a field called 'Operation-Location'. The
2180
+ # 'Operation-Location' field contains the URL that you must use for your Get
2181
+ # Recognize Text Operation Result operation.
2182
+ #
2183
+ # @param image An image stream.
2184
+ # @param mode [TextRecognitionMode] Type of text to recognize. Possible values
2185
+ # include: 'Handwritten', 'Printed'
2186
+ # @param [Hash{String => String}] A hash of custom headers that will be added
2187
+ # to the HTTP request.
2188
+ #
2189
+ # @return [Concurrent::Promise] Promise object which holds the HTTP response.
2190
+ #
2191
+ def recognize_text_in_stream_async(image, mode, custom_headers:nil)
2192
+ fail ArgumentError, 'endpoint is nil' if endpoint.nil?
2193
+ fail ArgumentError, 'image is nil' if image.nil?
2194
+ fail ArgumentError, 'mode is nil' if mode.nil?
2195
+
2196
+
2197
+ request_headers = {}
2198
+ request_headers['Content-Type'] = 'application/octet-stream'
2199
+
2200
+ # Set Headers
2201
+ request_headers['x-ms-client-request-id'] = SecureRandom.uuid
2202
+ request_headers['accept-language'] = accept_language unless accept_language.nil?
2203
+
2204
+ # Serialize Request
2205
+ request_mapper = {
2206
+ client_side_validation: true,
2207
+ required: true,
2208
+ serialized_name: 'Image',
2209
+ type: {
2210
+ name: 'Stream'
2211
+ }
2212
+ }
2213
+ request_content = self.serialize(request_mapper, image)
2214
+
2215
+ path_template = 'recognizeText'
2216
+
2217
+ request_url = @base_url || self.base_url
2218
+ request_url = request_url.gsub('{Endpoint}', endpoint)
2219
+
2220
+ options = {
2221
+ middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
2222
+ query_params: {'mode' => mode},
2223
+ body: request_content,
2224
+ headers: request_headers.merge(custom_headers || {}),
2225
+ base_url: request_url
2226
+ }
2227
+ promise = self.make_request_async(:post, path_template, options)
2228
+
2229
+ promise = promise.then do |result|
2230
+ http_response = result.response
2231
+ status_code = http_response.status
2232
+ response_content = http_response.body
2233
+ unless status_code == 202
2234
+ error_model = JSON.load(response_content)
2235
+ fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
2236
+ end
2237
+
2238
+ result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
2239
+
2240
+ result
2241
+ end
2242
+
2243
+ promise.execute
2244
+ end
2245
+
2246
+
2247
+ private
2248
+ #
2249
+ # Adds telemetry information.
2250
+ #
2251
+ def add_telemetry
2252
+ sdk_information = 'azure_cognitiveservices_computervision'
2253
+ sdk_information = "#{sdk_information}/0.17.0"
2254
+ add_user_agent_information(sdk_information)
2255
+ end
2256
+ end
2257
+ end