azure_cognitiveservices_computervision 0.16.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/LICENSE.txt +21 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision.rb +63 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/computer_vision_client.rb +2173 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/adult_info.rb +83 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/azure_regions.rb +26 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/category.rb +69 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/category_detail.rb +56 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/celebrities_model.rb +69 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/color_info.rb +98 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/computer_vision_error.rb +75 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/computer_vision_error_codes.rb +27 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/details.rb +16 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/domain_model_results.rb +80 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/domain_models.rb +16 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/face_description.rb +70 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/face_rectangle.rb +79 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/image_analysis.rb +169 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/image_caption.rb +57 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/image_description.rb +99 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/image_description_details.rb +99 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/image_metadata.rb +68 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/image_tag.rb +57 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/image_type.rb +57 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/image_url.rb +47 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/language1.rb +16 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/line.rb +86 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/list_models_result.rb +56 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/model_description.rb +65 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/ocr_languages.rb +41 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/ocr_line.rb +72 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/ocr_region.rb +72 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/ocr_result.rb +104 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/ocr_word.rb +62 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/recognition_result.rb +56 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/tag_result.rb +79 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/text_operation_result.rb +62 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/text_operation_status_codes.rb +18 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/visual_feature_types.rb +21 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/models/word.rb +66 -0
- data/lib/1.0/generated/azure_cognitiveservices_computervision/module_definition.rb +9 -0
- data/lib/azure_cognitiveservices_computervision.rb +6 -0
- data/lib/module_definition.rb +7 -0
- data/lib/profiles/latest/computervision_latest_profile_client.rb +38 -0
- data/lib/profiles/latest/computervision_module_definition.rb +8 -0
- data/lib/profiles/latest/modules/computervision_profile_module.rb +201 -0
- data/lib/version.rb +7 -0
- metadata +166 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA1:
|
3
|
+
metadata.gz: 4a6e8cd0a296e99e18501d95cf0e18da4fead897
|
4
|
+
data.tar.gz: ddf258adf6a8adffc45f4ad36166f4b9dc262c60
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: 5f0d72c28afac30da2fec81fb5e8f922d87ec03c34defdcd31e4dd63859fc37e0a291f06d9e93e0b3057a1e0b5dc98776728986c9f5e84ccaf87c651defaa68a
|
7
|
+
data.tar.gz: 83b6464b9079c18107be7c054e5e0c3b1d98e9b597087ba5b1f31e4334a4c4f94553224e3bdd6575a50e29bc173cc2ee1c0852cc8b178ccc78799a654d96338c
|
data/LICENSE.txt
ADDED
@@ -0,0 +1,21 @@
|
|
1
|
+
The MIT License (MIT)
|
2
|
+
|
3
|
+
Copyright (c) 2015 Microsoft Corporation
|
4
|
+
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
7
|
+
in the Software without restriction, including without limitation the rights
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
10
|
+
furnished to do so, subject to the following conditions:
|
11
|
+
|
12
|
+
The above copyright notice and this permission notice shall be included in
|
13
|
+
all copies or substantial portions of the Software.
|
14
|
+
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
21
|
+
THE SOFTWARE.
|
@@ -0,0 +1,63 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
# Code generated by Microsoft (R) AutoRest Code Generator.
|
3
|
+
# Changes may cause incorrect behavior and will be lost if the code is
|
4
|
+
# regenerated.
|
5
|
+
|
6
|
+
require 'uri'
|
7
|
+
require 'cgi'
|
8
|
+
require 'date'
|
9
|
+
require 'json'
|
10
|
+
require 'base64'
|
11
|
+
require 'erb'
|
12
|
+
require 'securerandom'
|
13
|
+
require 'time'
|
14
|
+
require 'timeliness'
|
15
|
+
require 'faraday'
|
16
|
+
require 'faraday-cookie_jar'
|
17
|
+
require 'concurrent'
|
18
|
+
require 'ms_rest'
|
19
|
+
require '1.0/generated/azure_cognitiveservices_computervision/module_definition'
|
20
|
+
require 'ms_rest_azure'
|
21
|
+
|
22
|
+
module Azure::CognitiveServices::ComputerVision::V1_0
|
23
|
+
autoload :ComputerVisionClient, '1.0/generated/azure_cognitiveservices_computervision/computer_vision_client.rb'
|
24
|
+
|
25
|
+
module Models
|
26
|
+
autoload :ImageDescriptionDetails, '1.0/generated/azure_cognitiveservices_computervision/models/image_description_details.rb'
|
27
|
+
autoload :Word, '1.0/generated/azure_cognitiveservices_computervision/models/word.rb'
|
28
|
+
autoload :FaceDescription, '1.0/generated/azure_cognitiveservices_computervision/models/face_description.rb'
|
29
|
+
autoload :RecognitionResult, '1.0/generated/azure_cognitiveservices_computervision/models/recognition_result.rb'
|
30
|
+
autoload :ImageAnalysis, '1.0/generated/azure_cognitiveservices_computervision/models/image_analysis.rb'
|
31
|
+
autoload :FaceRectangle, '1.0/generated/azure_cognitiveservices_computervision/models/face_rectangle.rb'
|
32
|
+
autoload :OcrWord, '1.0/generated/azure_cognitiveservices_computervision/models/ocr_word.rb'
|
33
|
+
autoload :CategoryDetail, '1.0/generated/azure_cognitiveservices_computervision/models/category_detail.rb'
|
34
|
+
autoload :OcrLine, '1.0/generated/azure_cognitiveservices_computervision/models/ocr_line.rb'
|
35
|
+
autoload :AdultInfo, '1.0/generated/azure_cognitiveservices_computervision/models/adult_info.rb'
|
36
|
+
autoload :OcrRegion, '1.0/generated/azure_cognitiveservices_computervision/models/ocr_region.rb'
|
37
|
+
autoload :ImageType, '1.0/generated/azure_cognitiveservices_computervision/models/image_type.rb'
|
38
|
+
autoload :OcrResult, '1.0/generated/azure_cognitiveservices_computervision/models/ocr_result.rb'
|
39
|
+
autoload :ImageCaption, '1.0/generated/azure_cognitiveservices_computervision/models/image_caption.rb'
|
40
|
+
autoload :ModelDescription, '1.0/generated/azure_cognitiveservices_computervision/models/model_description.rb'
|
41
|
+
autoload :Line, '1.0/generated/azure_cognitiveservices_computervision/models/line.rb'
|
42
|
+
autoload :ListModelsResult, '1.0/generated/azure_cognitiveservices_computervision/models/list_models_result.rb'
|
43
|
+
autoload :CelebritiesModel, '1.0/generated/azure_cognitiveservices_computervision/models/celebrities_model.rb'
|
44
|
+
autoload :DomainModelResults, '1.0/generated/azure_cognitiveservices_computervision/models/domain_model_results.rb'
|
45
|
+
autoload :ColorInfo, '1.0/generated/azure_cognitiveservices_computervision/models/color_info.rb'
|
46
|
+
autoload :ImageDescription, '1.0/generated/azure_cognitiveservices_computervision/models/image_description.rb'
|
47
|
+
autoload :ImageMetadata, '1.0/generated/azure_cognitiveservices_computervision/models/image_metadata.rb'
|
48
|
+
autoload :TagResult, '1.0/generated/azure_cognitiveservices_computervision/models/tag_result.rb'
|
49
|
+
autoload :Category, '1.0/generated/azure_cognitiveservices_computervision/models/category.rb'
|
50
|
+
autoload :ComputerVisionError, '1.0/generated/azure_cognitiveservices_computervision/models/computer_vision_error.rb'
|
51
|
+
autoload :TextOperationResult, '1.0/generated/azure_cognitiveservices_computervision/models/text_operation_result.rb'
|
52
|
+
autoload :ImageUrl, '1.0/generated/azure_cognitiveservices_computervision/models/image_url.rb'
|
53
|
+
autoload :ImageTag, '1.0/generated/azure_cognitiveservices_computervision/models/image_tag.rb'
|
54
|
+
autoload :TextOperationStatusCodes, '1.0/generated/azure_cognitiveservices_computervision/models/text_operation_status_codes.rb'
|
55
|
+
autoload :ComputerVisionErrorCodes, '1.0/generated/azure_cognitiveservices_computervision/models/computer_vision_error_codes.rb'
|
56
|
+
autoload :VisualFeatureTypes, '1.0/generated/azure_cognitiveservices_computervision/models/visual_feature_types.rb'
|
57
|
+
autoload :OcrLanguages, '1.0/generated/azure_cognitiveservices_computervision/models/ocr_languages.rb'
|
58
|
+
autoload :AzureRegions, '1.0/generated/azure_cognitiveservices_computervision/models/azure_regions.rb'
|
59
|
+
autoload :Details, '1.0/generated/azure_cognitiveservices_computervision/models/details.rb'
|
60
|
+
autoload :Language1, '1.0/generated/azure_cognitiveservices_computervision/models/language1.rb'
|
61
|
+
autoload :DomainModels, '1.0/generated/azure_cognitiveservices_computervision/models/domain_models.rb'
|
62
|
+
end
|
63
|
+
end
|
@@ -0,0 +1,2173 @@
|
|
1
|
+
# encoding: utf-8
|
2
|
+
# Code generated by Microsoft (R) AutoRest Code Generator.
|
3
|
+
# Changes may cause incorrect behavior and will be lost if the code is
|
4
|
+
# regenerated.
|
5
|
+
|
6
|
+
module Azure::CognitiveServices::ComputerVision::V1_0
|
7
|
+
#
|
8
|
+
# A service client - single point of access to the REST API.
|
9
|
+
#
|
10
|
+
class ComputerVisionClient < MsRestAzure::AzureServiceClient
|
11
|
+
include MsRestAzure
|
12
|
+
include MsRestAzure::Serialization
|
13
|
+
|
14
|
+
# @return [String] the base URI of the service.
|
15
|
+
attr_reader :base_url
|
16
|
+
|
17
|
+
# @return Credentials needed for the client to connect to Azure.
|
18
|
+
attr_reader :credentials1
|
19
|
+
|
20
|
+
# @return [AzureRegions] Supported Azure regions for Cognitive Services
|
21
|
+
# endpoints. Possible values include: 'westus', 'westeurope',
|
22
|
+
# 'southeastasia', 'eastus2', 'westcentralus', 'westus2', 'eastus',
|
23
|
+
# 'southcentralus', 'northeurope', 'eastasia', 'australiaeast',
|
24
|
+
# 'brazilsouth'
|
25
|
+
attr_accessor :azure_region
|
26
|
+
|
27
|
+
# @return Subscription credentials which uniquely identify client
|
28
|
+
# subscription.
|
29
|
+
attr_accessor :credentials
|
30
|
+
|
31
|
+
# @return [String] Gets or sets the preferred language for the response.
|
32
|
+
attr_accessor :accept_language
|
33
|
+
|
34
|
+
# @return [Integer] Gets or sets the retry timeout in seconds for Long
|
35
|
+
# Running Operations. Default value is 30.
|
36
|
+
attr_accessor :long_running_operation_retry_timeout
|
37
|
+
|
38
|
+
# @return [Boolean] When set to true a unique x-ms-client-request-id value
|
39
|
+
# is generated and included in each request. Default is true.
|
40
|
+
attr_accessor :generate_client_request_id
|
41
|
+
|
42
|
+
#
|
43
|
+
# Creates initializes a new instance of the ComputerVisionClient class.
|
44
|
+
# @param credentials [MsRest::ServiceClientCredentials] credentials to authorize HTTP requests made by the service client.
|
45
|
+
# @param options [Array] filters to be applied to the HTTP requests.
|
46
|
+
#
|
47
|
+
def initialize(credentials = nil, options = nil)
|
48
|
+
super(credentials, options)
|
49
|
+
@base_url = 'https://{AzureRegion}.api.cognitive.microsoft.com/vision/v1.0'
|
50
|
+
|
51
|
+
fail ArgumentError, 'invalid type of credentials input parameter' unless credentials.is_a?(MsRest::ServiceClientCredentials) unless credentials.nil?
|
52
|
+
@credentials = credentials
|
53
|
+
|
54
|
+
@accept_language = 'en-US'
|
55
|
+
@long_running_operation_retry_timeout = 30
|
56
|
+
@generate_client_request_id = true
|
57
|
+
add_telemetry
|
58
|
+
end
|
59
|
+
|
60
|
+
#
|
61
|
+
# Makes a request and returns the body of the response.
|
62
|
+
# @param method [Symbol] with any of the following values :get, :put, :post, :patch, :delete.
|
63
|
+
# @param path [String] the path, relative to {base_url}.
|
64
|
+
# @param options [Hash{String=>String}] specifying any request options like :body.
|
65
|
+
# @return [Hash{String=>String}] containing the body of the response.
|
66
|
+
# Example:
|
67
|
+
#
|
68
|
+
# request_content = "{'location':'westus','tags':{'tag1':'val1','tag2':'val2'}}"
|
69
|
+
# path = "/path"
|
70
|
+
# options = {
|
71
|
+
# body: request_content,
|
72
|
+
# query_params: {'api-version' => '2016-02-01'}
|
73
|
+
# }
|
74
|
+
# result = @client.make_request(:put, path, options)
|
75
|
+
#
|
76
|
+
def make_request(method, path, options = {})
|
77
|
+
result = make_request_with_http_info(method, path, options)
|
78
|
+
result.body unless result.nil?
|
79
|
+
end
|
80
|
+
|
81
|
+
#
|
82
|
+
# Makes a request and returns the operation response.
|
83
|
+
# @param method [Symbol] with any of the following values :get, :put, :post, :patch, :delete.
|
84
|
+
# @param path [String] the path, relative to {base_url}.
|
85
|
+
# @param options [Hash{String=>String}] specifying any request options like :body.
|
86
|
+
# @return [MsRestAzure::AzureOperationResponse] Operation response containing the request, response and status.
|
87
|
+
#
|
88
|
+
def make_request_with_http_info(method, path, options = {})
|
89
|
+
result = make_request_async(method, path, options).value!
|
90
|
+
result.body = result.response.body.to_s.empty? ? nil : JSON.load(result.response.body)
|
91
|
+
result
|
92
|
+
end
|
93
|
+
|
94
|
+
#
|
95
|
+
# Makes a request asynchronously.
|
96
|
+
# @param method [Symbol] with any of the following values :get, :put, :post, :patch, :delete.
|
97
|
+
# @param path [String] the path, relative to {base_url}.
|
98
|
+
# @param options [Hash{String=>String}] specifying any request options like :body.
|
99
|
+
# @return [Concurrent::Promise] Promise object which holds the HTTP response.
|
100
|
+
#
|
101
|
+
def make_request_async(method, path, options = {})
|
102
|
+
fail ArgumentError, 'method is nil' if method.nil?
|
103
|
+
fail ArgumentError, 'path is nil' if path.nil?
|
104
|
+
|
105
|
+
request_url = options[:base_url] || @base_url
|
106
|
+
if(!options[:headers].nil? && !options[:headers]['Content-Type'].nil?)
|
107
|
+
@request_headers['Content-Type'] = options[:headers]['Content-Type']
|
108
|
+
end
|
109
|
+
|
110
|
+
request_headers = @request_headers
|
111
|
+
request_headers.merge!({'accept-language' => @accept_language}) unless @accept_language.nil?
|
112
|
+
options.merge!({headers: request_headers.merge(options[:headers] || {})})
|
113
|
+
options.merge!({credentials: @credentials}) unless @credentials.nil?
|
114
|
+
|
115
|
+
super(request_url, method, path, options)
|
116
|
+
end
|
117
|
+
|
118
|
+
#
|
119
|
+
# This operation returns the list of domain-specific models that are supported
|
120
|
+
# by the Computer Vision API. Currently, the API only supports one
|
121
|
+
# domain-specific model: a celebrity recognizer. A successful response will be
|
122
|
+
# returned in JSON. If the request failed, the response will contain an error
|
123
|
+
# code and a message to help understand what went wrong.
|
124
|
+
#
|
125
|
+
# @param custom_headers [Hash{String => String}] A hash of custom headers that
|
126
|
+
# will be added to the HTTP request.
|
127
|
+
#
|
128
|
+
# @return [ListModelsResult] operation results.
|
129
|
+
#
|
130
|
+
def list_models(custom_headers:nil)
|
131
|
+
response = list_models_async(custom_headers:custom_headers).value!
|
132
|
+
response.body unless response.nil?
|
133
|
+
end
|
134
|
+
|
135
|
+
#
|
136
|
+
# This operation returns the list of domain-specific models that are supported
|
137
|
+
# by the Computer Vision API. Currently, the API only supports one
|
138
|
+
# domain-specific model: a celebrity recognizer. A successful response will be
|
139
|
+
# returned in JSON. If the request failed, the response will contain an error
|
140
|
+
# code and a message to help understand what went wrong.
|
141
|
+
#
|
142
|
+
# @param custom_headers [Hash{String => String}] A hash of custom headers that
|
143
|
+
# will be added to the HTTP request.
|
144
|
+
#
|
145
|
+
# @return [MsRestAzure::AzureOperationResponse] HTTP response information.
|
146
|
+
#
|
147
|
+
def list_models_with_http_info(custom_headers:nil)
|
148
|
+
list_models_async(custom_headers:custom_headers).value!
|
149
|
+
end
|
150
|
+
|
151
|
+
#
|
152
|
+
# This operation returns the list of domain-specific models that are supported
|
153
|
+
# by the Computer Vision API. Currently, the API only supports one
|
154
|
+
# domain-specific model: a celebrity recognizer. A successful response will be
|
155
|
+
# returned in JSON. If the request failed, the response will contain an error
|
156
|
+
# code and a message to help understand what went wrong.
|
157
|
+
#
|
158
|
+
# @param [Hash{String => String}] A hash of custom headers that will be added
|
159
|
+
# to the HTTP request.
|
160
|
+
#
|
161
|
+
# @return [Concurrent::Promise] Promise object which holds the HTTP response.
|
162
|
+
#
|
163
|
+
def list_models_async(custom_headers:nil)
|
164
|
+
fail ArgumentError, 'azure_region is nil' if azure_region.nil?
|
165
|
+
|
166
|
+
|
167
|
+
request_headers = {}
|
168
|
+
request_headers['Content-Type'] = 'application/json; charset=utf-8'
|
169
|
+
|
170
|
+
# Set Headers
|
171
|
+
request_headers['x-ms-client-request-id'] = SecureRandom.uuid
|
172
|
+
request_headers['accept-language'] = accept_language unless accept_language.nil?
|
173
|
+
path_template = 'models'
|
174
|
+
|
175
|
+
request_url = @base_url || self.base_url
|
176
|
+
request_url = request_url.gsub('{AzureRegion}', azure_region)
|
177
|
+
|
178
|
+
options = {
|
179
|
+
middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
|
180
|
+
headers: request_headers.merge(custom_headers || {}),
|
181
|
+
base_url: request_url
|
182
|
+
}
|
183
|
+
promise = self.make_request_async(:get, path_template, options)
|
184
|
+
|
185
|
+
promise = promise.then do |result|
|
186
|
+
http_response = result.response
|
187
|
+
status_code = http_response.status
|
188
|
+
response_content = http_response.body
|
189
|
+
unless status_code == 200
|
190
|
+
error_model = JSON.load(response_content)
|
191
|
+
fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
|
192
|
+
end
|
193
|
+
|
194
|
+
result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
|
195
|
+
# Deserialize Response
|
196
|
+
if status_code == 200
|
197
|
+
begin
|
198
|
+
parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
|
199
|
+
result_mapper = Azure::CognitiveServices::ComputerVision::V1_0::Models::ListModelsResult.mapper()
|
200
|
+
result.body = self.deserialize(result_mapper, parsed_response)
|
201
|
+
rescue Exception => e
|
202
|
+
fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
|
203
|
+
end
|
204
|
+
end
|
205
|
+
|
206
|
+
result
|
207
|
+
end
|
208
|
+
|
209
|
+
promise.execute
|
210
|
+
end
|
211
|
+
|
212
|
+
#
|
213
|
+
# This operation extracts a rich set of visual features based on the image
|
214
|
+
# content. Two input methods are supported -- (1) Uploading an image or (2)
|
215
|
+
# specifying an image URL. Within your request, there is an optional parameter
|
216
|
+
# to allow you to choose which features to return. By default, image
|
217
|
+
# categories are returned in the response.
|
218
|
+
#
|
219
|
+
# @param url [String]
|
220
|
+
# @param visual_features [Array<VisualFeatureTypes>] A string indicating what
|
221
|
+
# visual feature types to return. Multiple values should be comma-separated.
|
222
|
+
# Valid visual feature types include:Categories - categorizes image content
|
223
|
+
# according to a taxonomy defined in documentation. Tags - tags the image with
|
224
|
+
# a detailed list of words related to the image content. Description -
|
225
|
+
# describes the image content with a complete English sentence. Faces - detects
|
226
|
+
# if faces are present. If present, generate coordinates, gender and age.
|
227
|
+
# ImageType - detects if image is clipart or a line drawing. Color - determines
|
228
|
+
# the accent color, dominant color, and whether an image is black&white.Adult -
|
229
|
+
# detects if the image is pornographic in nature (depicts nudity or a sex act).
|
230
|
+
# Sexually suggestive content is also detected.
|
231
|
+
# @param details [Array<Details>] A string indicating which domain-specific
|
232
|
+
# details to return. Multiple values should be comma-separated. Valid visual
|
233
|
+
# feature types include:Celebrities - identifies celebrities if detected in the
|
234
|
+
# image.
|
235
|
+
# @param language [Language1] A string indicating which language to return. The
|
236
|
+
# service will return recognition results in specified language. If this
|
237
|
+
# parameter is not specified, the default value is "en".Supported
|
238
|
+
# languages:en - English, Default.zh - Simplified Chinese. Possible values
|
239
|
+
# include: 'en', 'zh'
|
240
|
+
# @param custom_headers [Hash{String => String}] A hash of custom headers that
|
241
|
+
# will be added to the HTTP request.
|
242
|
+
#
|
243
|
+
# @return [ImageAnalysis] operation results.
|
244
|
+
#
|
245
|
+
def analyze_image(url, visual_features:nil, details:nil, language:nil, custom_headers:nil)
|
246
|
+
response = analyze_image_async(url, visual_features:visual_features, details:details, language:language, custom_headers:custom_headers).value!
|
247
|
+
response.body unless response.nil?
|
248
|
+
end
|
249
|
+
|
250
|
+
#
|
251
|
+
# This operation extracts a rich set of visual features based on the image
|
252
|
+
# content. Two input methods are supported -- (1) Uploading an image or (2)
|
253
|
+
# specifying an image URL. Within your request, there is an optional parameter
|
254
|
+
# to allow you to choose which features to return. By default, image
|
255
|
+
# categories are returned in the response.
|
256
|
+
#
|
257
|
+
# @param url [String]
|
258
|
+
# @param visual_features [Array<VisualFeatureTypes>] A string indicating what
|
259
|
+
# visual feature types to return. Multiple values should be comma-separated.
|
260
|
+
# Valid visual feature types include:Categories - categorizes image content
|
261
|
+
# according to a taxonomy defined in documentation. Tags - tags the image with
|
262
|
+
# a detailed list of words related to the image content. Description -
|
263
|
+
# describes the image content with a complete English sentence. Faces - detects
|
264
|
+
# if faces are present. If present, generate coordinates, gender and age.
|
265
|
+
# ImageType - detects if image is clipart or a line drawing. Color - determines
|
266
|
+
# the accent color, dominant color, and whether an image is black&white.Adult -
|
267
|
+
# detects if the image is pornographic in nature (depicts nudity or a sex act).
|
268
|
+
# Sexually suggestive content is also detected.
|
269
|
+
# @param details [Array<Details>] A string indicating which domain-specific
|
270
|
+
# details to return. Multiple values should be comma-separated. Valid visual
|
271
|
+
# feature types include:Celebrities - identifies celebrities if detected in the
|
272
|
+
# image.
|
273
|
+
# @param language [Language1] A string indicating which language to return. The
|
274
|
+
# service will return recognition results in specified language. If this
|
275
|
+
# parameter is not specified, the default value is "en".Supported
|
276
|
+
# languages:en - English, Default.zh - Simplified Chinese. Possible values
|
277
|
+
# include: 'en', 'zh'
|
278
|
+
# @param custom_headers [Hash{String => String}] A hash of custom headers that
|
279
|
+
# will be added to the HTTP request.
|
280
|
+
#
|
281
|
+
# @return [MsRestAzure::AzureOperationResponse] HTTP response information.
|
282
|
+
#
|
283
|
+
def analyze_image_with_http_info(url, visual_features:nil, details:nil, language:nil, custom_headers:nil)
|
284
|
+
analyze_image_async(url, visual_features:visual_features, details:details, language:language, custom_headers:custom_headers).value!
|
285
|
+
end
|
286
|
+
|
287
|
+
#
|
288
|
+
# This operation extracts a rich set of visual features based on the image
|
289
|
+
# content. Two input methods are supported -- (1) Uploading an image or (2)
|
290
|
+
# specifying an image URL. Within your request, there is an optional parameter
|
291
|
+
# to allow you to choose which features to return. By default, image
|
292
|
+
# categories are returned in the response.
|
293
|
+
#
|
294
|
+
# @param url [String]
|
295
|
+
# @param visual_features [Array<VisualFeatureTypes>] A string indicating what
|
296
|
+
# visual feature types to return. Multiple values should be comma-separated.
|
297
|
+
# Valid visual feature types include:Categories - categorizes image content
|
298
|
+
# according to a taxonomy defined in documentation. Tags - tags the image with
|
299
|
+
# a detailed list of words related to the image content. Description -
|
300
|
+
# describes the image content with a complete English sentence. Faces - detects
|
301
|
+
# if faces are present. If present, generate coordinates, gender and age.
|
302
|
+
# ImageType - detects if image is clipart or a line drawing. Color - determines
|
303
|
+
# the accent color, dominant color, and whether an image is black&white.Adult -
|
304
|
+
# detects if the image is pornographic in nature (depicts nudity or a sex act).
|
305
|
+
# Sexually suggestive content is also detected.
|
306
|
+
# @param details [Array<Details>] A string indicating which domain-specific
|
307
|
+
# details to return. Multiple values should be comma-separated. Valid visual
|
308
|
+
# feature types include:Celebrities - identifies celebrities if detected in the
|
309
|
+
# image.
|
310
|
+
# @param language [Language1] A string indicating which language to return. The
|
311
|
+
# service will return recognition results in specified language. If this
|
312
|
+
# parameter is not specified, the default value is "en".Supported
|
313
|
+
# languages:en - English, Default.zh - Simplified Chinese. Possible values
|
314
|
+
# include: 'en', 'zh'
|
315
|
+
# @param [Hash{String => String}] A hash of custom headers that will be added
|
316
|
+
# to the HTTP request.
|
317
|
+
#
|
318
|
+
# @return [Concurrent::Promise] Promise object which holds the HTTP response.
|
319
|
+
#
|
320
|
+
def analyze_image_async(url, visual_features:nil, details:nil, language:nil, custom_headers:nil)
|
321
|
+
fail ArgumentError, 'azure_region is nil' if azure_region.nil?
|
322
|
+
fail ArgumentError, 'url is nil' if url.nil?
|
323
|
+
|
324
|
+
image_url = ImageUrl.new
|
325
|
+
unless url.nil?
|
326
|
+
image_url.url = url
|
327
|
+
end
|
328
|
+
|
329
|
+
request_headers = {}
|
330
|
+
request_headers['Content-Type'] = 'application/json; charset=utf-8'
|
331
|
+
|
332
|
+
# Set Headers
|
333
|
+
request_headers['x-ms-client-request-id'] = SecureRandom.uuid
|
334
|
+
request_headers['accept-language'] = accept_language unless accept_language.nil?
|
335
|
+
|
336
|
+
# Serialize Request
|
337
|
+
request_mapper = Azure::CognitiveServices::ComputerVision::V1_0::Models::ImageUrl.mapper()
|
338
|
+
request_content = self.serialize(request_mapper, image_url)
|
339
|
+
request_content = request_content != nil ? JSON.generate(request_content, quirks_mode: true) : nil
|
340
|
+
|
341
|
+
path_template = 'analyze'
|
342
|
+
|
343
|
+
request_url = @base_url || self.base_url
|
344
|
+
request_url = request_url.gsub('{AzureRegion}', azure_region)
|
345
|
+
|
346
|
+
options = {
|
347
|
+
middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
|
348
|
+
query_params: {'visualFeatures' => visual_features.nil? ? nil : visual_features.join(','),'details' => details.nil? ? nil : details.join(','),'language' => language},
|
349
|
+
body: request_content,
|
350
|
+
headers: request_headers.merge(custom_headers || {}),
|
351
|
+
base_url: request_url
|
352
|
+
}
|
353
|
+
promise = self.make_request_async(:post, path_template, options)
|
354
|
+
|
355
|
+
promise = promise.then do |result|
|
356
|
+
http_response = result.response
|
357
|
+
status_code = http_response.status
|
358
|
+
response_content = http_response.body
|
359
|
+
unless status_code == 200
|
360
|
+
error_model = JSON.load(response_content)
|
361
|
+
fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
|
362
|
+
end
|
363
|
+
|
364
|
+
result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
|
365
|
+
# Deserialize Response
|
366
|
+
if status_code == 200
|
367
|
+
begin
|
368
|
+
parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
|
369
|
+
result_mapper = Azure::CognitiveServices::ComputerVision::V1_0::Models::ImageAnalysis.mapper()
|
370
|
+
result.body = self.deserialize(result_mapper, parsed_response)
|
371
|
+
rescue Exception => e
|
372
|
+
fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
|
373
|
+
end
|
374
|
+
end
|
375
|
+
|
376
|
+
result
|
377
|
+
end
|
378
|
+
|
379
|
+
promise.execute
|
380
|
+
end
|
381
|
+
|
382
|
+
#
|
383
|
+
# This operation generates a thumbnail image with the user-specified width and
|
384
|
+
# height. By default, the service analyzes the image, identifies the region of
|
385
|
+
# interest (ROI), and generates smart cropping coordinates based on the ROI.
|
386
|
+
# Smart cropping helps when you specify an aspect ratio that differs from that
|
387
|
+
# of the input image. A successful response contains the thumbnail image
|
388
|
+
# binary. If the request failed, the response contains an error code and a
|
389
|
+
# message to help determine what went wrong.
|
390
|
+
#
|
391
|
+
# @param width [Integer] Width of the thumbnail. It must be between 1 and 1024.
|
392
|
+
# Recommended minimum of 50.
|
393
|
+
# @param height [Integer] Height of the thumbnail. It must be between 1 and
|
394
|
+
# 1024. Recommended minimum of 50.
|
395
|
+
# @param url [String]
|
396
|
+
# @param smart_cropping [Boolean] Boolean flag for enabling smart cropping.
|
397
|
+
# @param custom_headers [Hash{String => String}] A hash of custom headers that
|
398
|
+
# will be added to the HTTP request.
|
399
|
+
#
|
400
|
+
# @return [NOT_IMPLEMENTED] operation results.
|
401
|
+
#
|
402
|
+
def generate_thumbnail(width, height, url, smart_cropping:false, custom_headers:nil)
|
403
|
+
response = generate_thumbnail_async(width, height, url, smart_cropping:smart_cropping, custom_headers:custom_headers).value!
|
404
|
+
response.body unless response.nil?
|
405
|
+
end
|
406
|
+
|
407
|
+
#
|
408
|
+
# This operation generates a thumbnail image with the user-specified width and
|
409
|
+
# height. By default, the service analyzes the image, identifies the region of
|
410
|
+
# interest (ROI), and generates smart cropping coordinates based on the ROI.
|
411
|
+
# Smart cropping helps when you specify an aspect ratio that differs from that
|
412
|
+
# of the input image. A successful response contains the thumbnail image
|
413
|
+
# binary. If the request failed, the response contains an error code and a
|
414
|
+
# message to help determine what went wrong.
|
415
|
+
#
|
416
|
+
# @param width [Integer] Width of the thumbnail. It must be between 1 and 1024.
|
417
|
+
# Recommended minimum of 50.
|
418
|
+
# @param height [Integer] Height of the thumbnail. It must be between 1 and
|
419
|
+
# 1024. Recommended minimum of 50.
|
420
|
+
# @param url [String]
|
421
|
+
# @param smart_cropping [Boolean] Boolean flag for enabling smart cropping.
|
422
|
+
# @param custom_headers [Hash{String => String}] A hash of custom headers that
|
423
|
+
# will be added to the HTTP request.
|
424
|
+
#
|
425
|
+
# @return [MsRestAzure::AzureOperationResponse] HTTP response information.
|
426
|
+
#
|
427
|
+
def generate_thumbnail_with_http_info(width, height, url, smart_cropping:false, custom_headers:nil)
|
428
|
+
generate_thumbnail_async(width, height, url, smart_cropping:smart_cropping, custom_headers:custom_headers).value!
|
429
|
+
end
|
430
|
+
|
431
|
+
#
|
432
|
+
# This operation generates a thumbnail image with the user-specified width and
|
433
|
+
# height. By default, the service analyzes the image, identifies the region of
|
434
|
+
# interest (ROI), and generates smart cropping coordinates based on the ROI.
|
435
|
+
# Smart cropping helps when you specify an aspect ratio that differs from that
|
436
|
+
# of the input image. A successful response contains the thumbnail image
|
437
|
+
# binary. If the request failed, the response contains an error code and a
|
438
|
+
# message to help determine what went wrong.
|
439
|
+
#
|
440
|
+
# @param width [Integer] Width of the thumbnail. It must be between 1 and 1024.
|
441
|
+
# Recommended minimum of 50.
|
442
|
+
# @param height [Integer] Height of the thumbnail. It must be between 1 and
|
443
|
+
# 1024. Recommended minimum of 50.
|
444
|
+
# @param url [String]
|
445
|
+
# @param smart_cropping [Boolean] Boolean flag for enabling smart cropping.
|
446
|
+
# @param [Hash{String => String}] A hash of custom headers that will be added
|
447
|
+
# to the HTTP request.
|
448
|
+
#
|
449
|
+
# @return [Concurrent::Promise] Promise object which holds the HTTP response.
|
450
|
+
#
|
451
|
+
def generate_thumbnail_async(width, height, url, smart_cropping:false, custom_headers:nil)
|
452
|
+
fail ArgumentError, 'azure_region is nil' if azure_region.nil?
|
453
|
+
fail ArgumentError, 'width is nil' if width.nil?
|
454
|
+
fail ArgumentError, "'width' should satisfy the constraint - 'InclusiveMaximum': '1023'" if !width.nil? && width > 1023
|
455
|
+
fail ArgumentError, "'width' should satisfy the constraint - 'InclusiveMinimum': '1'" if !width.nil? && width < 1
|
456
|
+
fail ArgumentError, 'height is nil' if height.nil?
|
457
|
+
fail ArgumentError, "'height' should satisfy the constraint - 'InclusiveMaximum': '1023'" if !height.nil? && height > 1023
|
458
|
+
fail ArgumentError, "'height' should satisfy the constraint - 'InclusiveMinimum': '1'" if !height.nil? && height < 1
|
459
|
+
fail ArgumentError, 'url is nil' if url.nil?
|
460
|
+
|
461
|
+
image_url = ImageUrl.new
|
462
|
+
unless url.nil?
|
463
|
+
image_url.url = url
|
464
|
+
end
|
465
|
+
|
466
|
+
request_headers = {}
|
467
|
+
request_headers['Content-Type'] = 'application/json; charset=utf-8'
|
468
|
+
|
469
|
+
# Set Headers
|
470
|
+
request_headers['x-ms-client-request-id'] = SecureRandom.uuid
|
471
|
+
request_headers['accept-language'] = accept_language unless accept_language.nil?
|
472
|
+
|
473
|
+
# Serialize Request
|
474
|
+
request_mapper = Azure::CognitiveServices::ComputerVision::V1_0::Models::ImageUrl.mapper()
|
475
|
+
request_content = self.serialize(request_mapper, image_url)
|
476
|
+
request_content = request_content != nil ? JSON.generate(request_content, quirks_mode: true) : nil
|
477
|
+
|
478
|
+
path_template = 'generateThumbnail'
|
479
|
+
|
480
|
+
request_url = @base_url || self.base_url
|
481
|
+
request_url = request_url.gsub('{AzureRegion}', azure_region)
|
482
|
+
|
483
|
+
options = {
|
484
|
+
middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
|
485
|
+
query_params: {'width' => width,'height' => height,'smartCropping' => smart_cropping},
|
486
|
+
body: request_content,
|
487
|
+
headers: request_headers.merge(custom_headers || {}),
|
488
|
+
base_url: request_url
|
489
|
+
}
|
490
|
+
promise = self.make_request_async(:post, path_template, options)
|
491
|
+
|
492
|
+
promise = promise.then do |result|
|
493
|
+
http_response = result.response
|
494
|
+
status_code = http_response.status
|
495
|
+
response_content = http_response.body
|
496
|
+
unless status_code == 200
|
497
|
+
error_model = JSON.load(response_content)
|
498
|
+
fail MsRestAzure::AzureOperationError.new(result.request, http_response, error_model)
|
499
|
+
end
|
500
|
+
|
501
|
+
result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
|
502
|
+
# Deserialize Response
|
503
|
+
if status_code == 200
|
504
|
+
begin
|
505
|
+
parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
|
506
|
+
result_mapper = {
|
507
|
+
client_side_validation: true,
|
508
|
+
required: false,
|
509
|
+
serialized_name: 'parsed_response',
|
510
|
+
type: {
|
511
|
+
name: 'Stream'
|
512
|
+
}
|
513
|
+
}
|
514
|
+
result.body = self.deserialize(result_mapper, parsed_response)
|
515
|
+
rescue Exception => e
|
516
|
+
fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
|
517
|
+
end
|
518
|
+
end
|
519
|
+
|
520
|
+
result
|
521
|
+
end
|
522
|
+
|
523
|
+
promise.execute
|
524
|
+
end
|
525
|
+
|
526
|
+
#
|
527
|
+
# Optical Character Recognition (OCR) detects printed text in an image and
|
528
|
+
# extracts the recognized characters into a machine-usable character stream.
|
529
|
+
# Upon success, the OCR results will be returned. Upon failure, the error code
|
530
|
+
# together with an error message will be returned. The error code can be one of
|
531
|
+
# InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage,
|
532
|
+
# NotSupportedLanguage, or InternalServerError.
|
533
|
+
#
|
534
|
+
# @param detect_orientation [Boolean] Whether detect the text orientation in
|
535
|
+
# the image. With detectOrientation=true the OCR service tries to detect the
|
536
|
+
# image orientation and correct it before further processing (e.g. if it's
|
537
|
+
# upside-down).
|
538
|
+
# @param url [String]
|
539
|
+
# @param language [OcrLanguages] The BCP-47 language code of the text to be
|
540
|
+
# detected in the image. The default value is 'unk'. Possible values include:
|
541
|
+
# 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el',
|
542
|
+
# 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro',
|
543
|
+
# 'sr-Cyrl', 'sr-Latn', 'sk'
|
544
|
+
# @param custom_headers [Hash{String => String}] A hash of custom headers that
|
545
|
+
# will be added to the HTTP request.
|
546
|
+
#
|
547
|
+
# @return [OcrResult] operation results.
|
548
|
+
#
|
549
|
+
def recognize_printed_text(detect_orientation, url, language:nil, custom_headers:nil)
|
550
|
+
response = recognize_printed_text_async(detect_orientation, url, language:language, custom_headers:custom_headers).value!
|
551
|
+
response.body unless response.nil?
|
552
|
+
end
|
553
|
+
|
554
|
+
#
|
555
|
+
# Optical Character Recognition (OCR) detects printed text in an image and
|
556
|
+
# extracts the recognized characters into a machine-usable character stream.
|
557
|
+
# Upon success, the OCR results will be returned. Upon failure, the error code
|
558
|
+
# together with an error message will be returned. The error code can be one of
|
559
|
+
# InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage,
|
560
|
+
# NotSupportedLanguage, or InternalServerError.
|
561
|
+
#
|
562
|
+
# @param detect_orientation [Boolean] Whether detect the text orientation in
|
563
|
+
# the image. With detectOrientation=true the OCR service tries to detect the
|
564
|
+
# image orientation and correct it before further processing (e.g. if it's
|
565
|
+
# upside-down).
|
566
|
+
# @param url [String]
|
567
|
+
# @param language [OcrLanguages] The BCP-47 language code of the text to be
|
568
|
+
# detected in the image. The default value is 'unk'. Possible values include:
|
569
|
+
# 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el',
|
570
|
+
# 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro',
|
571
|
+
# 'sr-Cyrl', 'sr-Latn', 'sk'
|
572
|
+
# @param custom_headers [Hash{String => String}] A hash of custom headers that
|
573
|
+
# will be added to the HTTP request.
|
574
|
+
#
|
575
|
+
# @return [MsRestAzure::AzureOperationResponse] HTTP response information.
|
576
|
+
#
|
577
|
+
def recognize_printed_text_with_http_info(detect_orientation, url, language:nil, custom_headers:nil)
|
578
|
+
recognize_printed_text_async(detect_orientation, url, language:language, custom_headers:custom_headers).value!
|
579
|
+
end
|
580
|
+
|
581
|
+
#
|
582
|
+
# Optical Character Recognition (OCR) detects printed text in an image and
|
583
|
+
# extracts the recognized characters into a machine-usable character stream.
|
584
|
+
# Upon success, the OCR results will be returned. Upon failure, the error code
|
585
|
+
# together with an error message will be returned. The error code can be one of
|
586
|
+
# InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage,
|
587
|
+
# NotSupportedLanguage, or InternalServerError.
|
588
|
+
#
|
589
|
+
# @param detect_orientation [Boolean] Whether detect the text orientation in
|
590
|
+
# the image. With detectOrientation=true the OCR service tries to detect the
|
591
|
+
# image orientation and correct it before further processing (e.g. if it's
|
592
|
+
# upside-down).
|
593
|
+
# @param url [String]
|
594
|
+
# @param language [OcrLanguages] The BCP-47 language code of the text to be
|
595
|
+
# detected in the image. The default value is 'unk'. Possible values include:
|
596
|
+
# 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el',
|
597
|
+
# 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro',
|
598
|
+
# 'sr-Cyrl', 'sr-Latn', 'sk'
|
599
|
+
# @param [Hash{String => String}] A hash of custom headers that will be added
|
600
|
+
# to the HTTP request.
|
601
|
+
#
|
602
|
+
# @return [Concurrent::Promise] Promise object which holds the HTTP response.
|
603
|
+
#
|
604
|
+
def recognize_printed_text_async(detect_orientation, url, language:nil, custom_headers:nil)
|
605
|
+
fail ArgumentError, 'azure_region is nil' if azure_region.nil?
|
606
|
+
fail ArgumentError, 'detect_orientation is nil' if detect_orientation.nil?
|
607
|
+
fail ArgumentError, 'url is nil' if url.nil?
|
608
|
+
|
609
|
+
image_url = ImageUrl.new
|
610
|
+
unless url.nil?
|
611
|
+
image_url.url = url
|
612
|
+
end
|
613
|
+
|
614
|
+
request_headers = {}
|
615
|
+
request_headers['Content-Type'] = 'application/json; charset=utf-8'
|
616
|
+
|
617
|
+
# Set Headers
|
618
|
+
request_headers['x-ms-client-request-id'] = SecureRandom.uuid
|
619
|
+
request_headers['accept-language'] = accept_language unless accept_language.nil?
|
620
|
+
|
621
|
+
# Serialize Request
|
622
|
+
request_mapper = Azure::CognitiveServices::ComputerVision::V1_0::Models::ImageUrl.mapper()
|
623
|
+
request_content = self.serialize(request_mapper, image_url)
|
624
|
+
request_content = request_content != nil ? JSON.generate(request_content, quirks_mode: true) : nil
|
625
|
+
|
626
|
+
path_template = 'ocr'
|
627
|
+
|
628
|
+
request_url = @base_url || self.base_url
|
629
|
+
request_url = request_url.gsub('{AzureRegion}', azure_region)
|
630
|
+
|
631
|
+
options = {
|
632
|
+
middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
|
633
|
+
query_params: {'detectOrientation' => detect_orientation,'language' => language},
|
634
|
+
body: request_content,
|
635
|
+
headers: request_headers.merge(custom_headers || {}),
|
636
|
+
base_url: request_url
|
637
|
+
}
|
638
|
+
promise = self.make_request_async(:post, path_template, options)
|
639
|
+
|
640
|
+
promise = promise.then do |result|
|
641
|
+
http_response = result.response
|
642
|
+
status_code = http_response.status
|
643
|
+
response_content = http_response.body
|
644
|
+
unless status_code == 200
|
645
|
+
error_model = JSON.load(response_content)
|
646
|
+
fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
|
647
|
+
end
|
648
|
+
|
649
|
+
result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
|
650
|
+
# Deserialize Response
|
651
|
+
if status_code == 200
|
652
|
+
begin
|
653
|
+
parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
|
654
|
+
result_mapper = Azure::CognitiveServices::ComputerVision::V1_0::Models::OcrResult.mapper()
|
655
|
+
result.body = self.deserialize(result_mapper, parsed_response)
|
656
|
+
rescue Exception => e
|
657
|
+
fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
|
658
|
+
end
|
659
|
+
end
|
660
|
+
|
661
|
+
result
|
662
|
+
end
|
663
|
+
|
664
|
+
promise.execute
|
665
|
+
end
|
666
|
+
|
667
|
+
#
|
668
|
+
# This operation generates a description of an image in human readable language
|
669
|
+
# with complete sentences. The description is based on a collection of content
|
670
|
+
# tags, which are also returned by the operation. More than one description can
|
671
|
+
# be generated for each image. Descriptions are ordered by their confidence
|
672
|
+
# score. All descriptions are in English. Two input methods are supported --
|
673
|
+
# (1) Uploading an image or (2) specifying an image URL.A successful response
|
674
|
+
# will be returned in JSON. If the request failed, the response will contain
|
675
|
+
# an error code and a message to help understand what went wrong.
|
676
|
+
#
|
677
|
+
# @param url [String]
|
678
|
+
# @param max_candidates [String] Maximum number of candidate descriptions to be
|
679
|
+
# returned. The default is 1.
|
680
|
+
# @param custom_headers [Hash{String => String}] A hash of custom headers that
|
681
|
+
# will be added to the HTTP request.
|
682
|
+
#
|
683
|
+
# @return [ImageDescription] operation results.
|
684
|
+
#
|
685
|
+
def describe_image(url, max_candidates:'1', custom_headers:nil)
|
686
|
+
response = describe_image_async(url, max_candidates:max_candidates, custom_headers:custom_headers).value!
|
687
|
+
response.body unless response.nil?
|
688
|
+
end
|
689
|
+
|
690
|
+
#
|
691
|
+
# This operation generates a description of an image in human readable language
|
692
|
+
# with complete sentences. The description is based on a collection of content
|
693
|
+
# tags, which are also returned by the operation. More than one description can
|
694
|
+
# be generated for each image. Descriptions are ordered by their confidence
|
695
|
+
# score. All descriptions are in English. Two input methods are supported --
|
696
|
+
# (1) Uploading an image or (2) specifying an image URL.A successful response
|
697
|
+
# will be returned in JSON. If the request failed, the response will contain
|
698
|
+
# an error code and a message to help understand what went wrong.
|
699
|
+
#
|
700
|
+
# @param url [String]
|
701
|
+
# @param max_candidates [String] Maximum number of candidate descriptions to be
|
702
|
+
# returned. The default is 1.
|
703
|
+
# @param custom_headers [Hash{String => String}] A hash of custom headers that
|
704
|
+
# will be added to the HTTP request.
|
705
|
+
#
|
706
|
+
# @return [MsRestAzure::AzureOperationResponse] HTTP response information.
|
707
|
+
#
|
708
|
+
def describe_image_with_http_info(url, max_candidates:'1', custom_headers:nil)
|
709
|
+
describe_image_async(url, max_candidates:max_candidates, custom_headers:custom_headers).value!
|
710
|
+
end
|
711
|
+
|
712
|
+
#
|
713
|
+
# This operation generates a description of an image in human readable language
|
714
|
+
# with complete sentences. The description is based on a collection of content
|
715
|
+
# tags, which are also returned by the operation. More than one description can
|
716
|
+
# be generated for each image. Descriptions are ordered by their confidence
|
717
|
+
# score. All descriptions are in English. Two input methods are supported --
|
718
|
+
# (1) Uploading an image or (2) specifying an image URL.A successful response
|
719
|
+
# will be returned in JSON. If the request failed, the response will contain
|
720
|
+
# an error code and a message to help understand what went wrong.
|
721
|
+
#
|
722
|
+
# @param url [String]
|
723
|
+
# @param max_candidates [String] Maximum number of candidate descriptions to be
|
724
|
+
# returned. The default is 1.
|
725
|
+
# @param [Hash{String => String}] A hash of custom headers that will be added
|
726
|
+
# to the HTTP request.
|
727
|
+
#
|
728
|
+
# @return [Concurrent::Promise] Promise object which holds the HTTP response.
|
729
|
+
#
|
730
|
+
def describe_image_async(url, max_candidates:'1', custom_headers:nil)
|
731
|
+
fail ArgumentError, 'azure_region is nil' if azure_region.nil?
|
732
|
+
fail ArgumentError, 'url is nil' if url.nil?
|
733
|
+
|
734
|
+
image_url = ImageUrl.new
|
735
|
+
unless url.nil?
|
736
|
+
image_url.url = url
|
737
|
+
end
|
738
|
+
|
739
|
+
request_headers = {}
|
740
|
+
request_headers['Content-Type'] = 'application/json; charset=utf-8'
|
741
|
+
|
742
|
+
# Set Headers
|
743
|
+
request_headers['x-ms-client-request-id'] = SecureRandom.uuid
|
744
|
+
request_headers['accept-language'] = accept_language unless accept_language.nil?
|
745
|
+
|
746
|
+
# Serialize Request
|
747
|
+
request_mapper = Azure::CognitiveServices::ComputerVision::V1_0::Models::ImageUrl.mapper()
|
748
|
+
request_content = self.serialize(request_mapper, image_url)
|
749
|
+
request_content = request_content != nil ? JSON.generate(request_content, quirks_mode: true) : nil
|
750
|
+
|
751
|
+
path_template = 'describe'
|
752
|
+
|
753
|
+
request_url = @base_url || self.base_url
|
754
|
+
request_url = request_url.gsub('{AzureRegion}', azure_region)
|
755
|
+
|
756
|
+
options = {
|
757
|
+
middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
|
758
|
+
query_params: {'maxCandidates' => max_candidates},
|
759
|
+
body: request_content,
|
760
|
+
headers: request_headers.merge(custom_headers || {}),
|
761
|
+
base_url: request_url
|
762
|
+
}
|
763
|
+
promise = self.make_request_async(:post, path_template, options)
|
764
|
+
|
765
|
+
promise = promise.then do |result|
|
766
|
+
http_response = result.response
|
767
|
+
status_code = http_response.status
|
768
|
+
response_content = http_response.body
|
769
|
+
unless status_code == 200
|
770
|
+
error_model = JSON.load(response_content)
|
771
|
+
fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
|
772
|
+
end
|
773
|
+
|
774
|
+
result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
|
775
|
+
# Deserialize Response
|
776
|
+
if status_code == 200
|
777
|
+
begin
|
778
|
+
parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
|
779
|
+
result_mapper = Azure::CognitiveServices::ComputerVision::V1_0::Models::ImageDescription.mapper()
|
780
|
+
result.body = self.deserialize(result_mapper, parsed_response)
|
781
|
+
rescue Exception => e
|
782
|
+
fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
|
783
|
+
end
|
784
|
+
end
|
785
|
+
|
786
|
+
result
|
787
|
+
end
|
788
|
+
|
789
|
+
promise.execute
|
790
|
+
end
|
791
|
+
|
792
|
+
#
|
793
|
+
# This operation generates a list of words, or tags, that are relevant to the
|
794
|
+
# content of the supplied image. The Computer Vision API can return tags based
|
795
|
+
# on objects, living beings, scenery or actions found in images. Unlike
|
796
|
+
# categories, tags are not organized according to a hierarchical classification
|
797
|
+
# system, but correspond to image content. Tags may contain hints to avoid
|
798
|
+
# ambiguity or provide context, for example the tag “cello” may be accompanied
|
799
|
+
# by the hint “musical instrument”. All tags are in English.
|
800
|
+
#
|
801
|
+
# @param url [String]
|
802
|
+
# @param custom_headers [Hash{String => String}] A hash of custom headers that
|
803
|
+
# will be added to the HTTP request.
|
804
|
+
#
|
805
|
+
# @return [TagResult] operation results.
|
806
|
+
#
|
807
|
+
def tag_image(url, custom_headers:nil)
|
808
|
+
response = tag_image_async(url, custom_headers:custom_headers).value!
|
809
|
+
response.body unless response.nil?
|
810
|
+
end
|
811
|
+
|
812
|
+
#
|
813
|
+
# This operation generates a list of words, or tags, that are relevant to the
|
814
|
+
# content of the supplied image. The Computer Vision API can return tags based
|
815
|
+
# on objects, living beings, scenery or actions found in images. Unlike
|
816
|
+
# categories, tags are not organized according to a hierarchical classification
|
817
|
+
# system, but correspond to image content. Tags may contain hints to avoid
|
818
|
+
# ambiguity or provide context, for example the tag “cello” may be accompanied
|
819
|
+
# by the hint “musical instrument”. All tags are in English.
|
820
|
+
#
|
821
|
+
# @param url [String]
|
822
|
+
# @param custom_headers [Hash{String => String}] A hash of custom headers that
|
823
|
+
# will be added to the HTTP request.
|
824
|
+
#
|
825
|
+
# @return [MsRestAzure::AzureOperationResponse] HTTP response information.
|
826
|
+
#
|
827
|
+
def tag_image_with_http_info(url, custom_headers:nil)
|
828
|
+
tag_image_async(url, custom_headers:custom_headers).value!
|
829
|
+
end
|
830
|
+
|
831
|
+
#
|
832
|
+
# This operation generates a list of words, or tags, that are relevant to the
|
833
|
+
# content of the supplied image. The Computer Vision API can return tags based
|
834
|
+
# on objects, living beings, scenery or actions found in images. Unlike
|
835
|
+
# categories, tags are not organized according to a hierarchical classification
|
836
|
+
# system, but correspond to image content. Tags may contain hints to avoid
|
837
|
+
# ambiguity or provide context, for example the tag “cello” may be accompanied
|
838
|
+
# by the hint “musical instrument”. All tags are in English.
|
839
|
+
#
|
840
|
+
# @param url [String]
|
841
|
+
# @param [Hash{String => String}] A hash of custom headers that will be added
|
842
|
+
# to the HTTP request.
|
843
|
+
#
|
844
|
+
# @return [Concurrent::Promise] Promise object which holds the HTTP response.
|
845
|
+
#
|
846
|
+
def tag_image_async(url, custom_headers:nil)
|
847
|
+
fail ArgumentError, 'azure_region is nil' if azure_region.nil?
|
848
|
+
fail ArgumentError, 'url is nil' if url.nil?
|
849
|
+
|
850
|
+
image_url = ImageUrl.new
|
851
|
+
unless url.nil?
|
852
|
+
image_url.url = url
|
853
|
+
end
|
854
|
+
|
855
|
+
request_headers = {}
|
856
|
+
request_headers['Content-Type'] = 'application/json; charset=utf-8'
|
857
|
+
|
858
|
+
# Set Headers
|
859
|
+
request_headers['x-ms-client-request-id'] = SecureRandom.uuid
|
860
|
+
request_headers['accept-language'] = accept_language unless accept_language.nil?
|
861
|
+
|
862
|
+
# Serialize Request
|
863
|
+
request_mapper = Azure::CognitiveServices::ComputerVision::V1_0::Models::ImageUrl.mapper()
|
864
|
+
request_content = self.serialize(request_mapper, image_url)
|
865
|
+
request_content = request_content != nil ? JSON.generate(request_content, quirks_mode: true) : nil
|
866
|
+
|
867
|
+
path_template = 'tag'
|
868
|
+
|
869
|
+
request_url = @base_url || self.base_url
|
870
|
+
request_url = request_url.gsub('{AzureRegion}', azure_region)
|
871
|
+
|
872
|
+
options = {
|
873
|
+
middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
|
874
|
+
body: request_content,
|
875
|
+
headers: request_headers.merge(custom_headers || {}),
|
876
|
+
base_url: request_url
|
877
|
+
}
|
878
|
+
promise = self.make_request_async(:post, path_template, options)
|
879
|
+
|
880
|
+
promise = promise.then do |result|
|
881
|
+
http_response = result.response
|
882
|
+
status_code = http_response.status
|
883
|
+
response_content = http_response.body
|
884
|
+
unless status_code == 200
|
885
|
+
error_model = JSON.load(response_content)
|
886
|
+
fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
|
887
|
+
end
|
888
|
+
|
889
|
+
result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
|
890
|
+
# Deserialize Response
|
891
|
+
if status_code == 200
|
892
|
+
begin
|
893
|
+
parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
|
894
|
+
result_mapper = Azure::CognitiveServices::ComputerVision::V1_0::Models::TagResult.mapper()
|
895
|
+
result.body = self.deserialize(result_mapper, parsed_response)
|
896
|
+
rescue Exception => e
|
897
|
+
fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
|
898
|
+
end
|
899
|
+
end
|
900
|
+
|
901
|
+
result
|
902
|
+
end
|
903
|
+
|
904
|
+
promise.execute
|
905
|
+
end
|
906
|
+
|
907
|
+
#
|
908
|
+
# This operation recognizes content within an image by applying a
|
909
|
+
# domain-specific model. The list of domain-specific models that are supported
|
910
|
+
# by the Computer Vision API can be retrieved using the /models GET request.
|
911
|
+
# Currently, the API only provides a single domain-specific model: celebrities.
|
912
|
+
# Two input methods are supported -- (1) Uploading an image or (2) specifying
|
913
|
+
# an image URL. A successful response will be returned in JSON. If the request
|
914
|
+
# failed, the response will contain an error code and a message to help
|
915
|
+
# understand what went wrong.
|
916
|
+
#
|
917
|
+
# @param model [DomainModels] The domain-specific content to recognize.
|
918
|
+
# Possible values include: 'Celebrities', 'Landmarks'
|
919
|
+
# @param url [String]
|
920
|
+
# @param custom_headers [Hash{String => String}] A hash of custom headers that
|
921
|
+
# will be added to the HTTP request.
|
922
|
+
#
|
923
|
+
# @return [DomainModelResults] operation results.
|
924
|
+
#
|
925
|
+
def analyze_image_by_domain(model, url, custom_headers:nil)
|
926
|
+
response = analyze_image_by_domain_async(model, url, custom_headers:custom_headers).value!
|
927
|
+
response.body unless response.nil?
|
928
|
+
end
|
929
|
+
|
930
|
+
#
|
931
|
+
# This operation recognizes content within an image by applying a
|
932
|
+
# domain-specific model. The list of domain-specific models that are supported
|
933
|
+
# by the Computer Vision API can be retrieved using the /models GET request.
|
934
|
+
# Currently, the API only provides a single domain-specific model: celebrities.
|
935
|
+
# Two input methods are supported -- (1) Uploading an image or (2) specifying
|
936
|
+
# an image URL. A successful response will be returned in JSON. If the request
|
937
|
+
# failed, the response will contain an error code and a message to help
|
938
|
+
# understand what went wrong.
|
939
|
+
#
|
940
|
+
# @param model [DomainModels] The domain-specific content to recognize.
|
941
|
+
# Possible values include: 'Celebrities', 'Landmarks'
|
942
|
+
# @param url [String]
|
943
|
+
# @param custom_headers [Hash{String => String}] A hash of custom headers that
|
944
|
+
# will be added to the HTTP request.
|
945
|
+
#
|
946
|
+
# @return [MsRestAzure::AzureOperationResponse] HTTP response information.
|
947
|
+
#
|
948
|
+
def analyze_image_by_domain_with_http_info(model, url, custom_headers:nil)
|
949
|
+
analyze_image_by_domain_async(model, url, custom_headers:custom_headers).value!
|
950
|
+
end
|
951
|
+
|
952
|
+
#
|
953
|
+
# This operation recognizes content within an image by applying a
|
954
|
+
# domain-specific model. The list of domain-specific models that are supported
|
955
|
+
# by the Computer Vision API can be retrieved using the /models GET request.
|
956
|
+
# Currently, the API only provides a single domain-specific model: celebrities.
|
957
|
+
# Two input methods are supported -- (1) Uploading an image or (2) specifying
|
958
|
+
# an image URL. A successful response will be returned in JSON. If the request
|
959
|
+
# failed, the response will contain an error code and a message to help
|
960
|
+
# understand what went wrong.
|
961
|
+
#
|
962
|
+
# @param model [DomainModels] The domain-specific content to recognize.
|
963
|
+
# Possible values include: 'Celebrities', 'Landmarks'
|
964
|
+
# @param url [String]
|
965
|
+
# @param [Hash{String => String}] A hash of custom headers that will be added
|
966
|
+
# to the HTTP request.
|
967
|
+
#
|
968
|
+
# @return [Concurrent::Promise] Promise object which holds the HTTP response.
|
969
|
+
#
|
970
|
+
def analyze_image_by_domain_async(model, url, custom_headers:nil)
|
971
|
+
fail ArgumentError, 'azure_region is nil' if azure_region.nil?
|
972
|
+
fail ArgumentError, 'model is nil' if model.nil?
|
973
|
+
fail ArgumentError, 'url is nil' if url.nil?
|
974
|
+
|
975
|
+
image_url = ImageUrl.new
|
976
|
+
unless url.nil?
|
977
|
+
image_url.url = url
|
978
|
+
end
|
979
|
+
|
980
|
+
request_headers = {}
|
981
|
+
request_headers['Content-Type'] = 'application/json; charset=utf-8'
|
982
|
+
|
983
|
+
# Set Headers
|
984
|
+
request_headers['x-ms-client-request-id'] = SecureRandom.uuid
|
985
|
+
request_headers['accept-language'] = accept_language unless accept_language.nil?
|
986
|
+
|
987
|
+
# Serialize Request
|
988
|
+
request_mapper = Azure::CognitiveServices::ComputerVision::V1_0::Models::ImageUrl.mapper()
|
989
|
+
request_content = self.serialize(request_mapper, image_url)
|
990
|
+
request_content = request_content != nil ? JSON.generate(request_content, quirks_mode: true) : nil
|
991
|
+
|
992
|
+
path_template = 'models/{model}/analyze'
|
993
|
+
|
994
|
+
request_url = @base_url || self.base_url
|
995
|
+
request_url = request_url.gsub('{AzureRegion}', azure_region)
|
996
|
+
|
997
|
+
options = {
|
998
|
+
middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
|
999
|
+
path_params: {'model' => model},
|
1000
|
+
body: request_content,
|
1001
|
+
headers: request_headers.merge(custom_headers || {}),
|
1002
|
+
base_url: request_url
|
1003
|
+
}
|
1004
|
+
promise = self.make_request_async(:post, path_template, options)
|
1005
|
+
|
1006
|
+
promise = promise.then do |result|
|
1007
|
+
http_response = result.response
|
1008
|
+
status_code = http_response.status
|
1009
|
+
response_content = http_response.body
|
1010
|
+
unless status_code == 200
|
1011
|
+
error_model = JSON.load(response_content)
|
1012
|
+
fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
|
1013
|
+
end
|
1014
|
+
|
1015
|
+
result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
|
1016
|
+
# Deserialize Response
|
1017
|
+
if status_code == 200
|
1018
|
+
begin
|
1019
|
+
parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
|
1020
|
+
result_mapper = Azure::CognitiveServices::ComputerVision::V1_0::Models::DomainModelResults.mapper()
|
1021
|
+
result.body = self.deserialize(result_mapper, parsed_response)
|
1022
|
+
rescue Exception => e
|
1023
|
+
fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
|
1024
|
+
end
|
1025
|
+
end
|
1026
|
+
|
1027
|
+
result
|
1028
|
+
end
|
1029
|
+
|
1030
|
+
promise.execute
|
1031
|
+
end
|
1032
|
+
|
1033
|
+
#
|
1034
|
+
# Recognize Text operation. When you use the Recognize Text interface, the
|
1035
|
+
# response contains a field called “Operation-Location”. The
|
1036
|
+
# “Operation-Location” field contains the URL that you must use for your Get
|
1037
|
+
# Handwritten Text Operation Result operation.
|
1038
|
+
#
|
1039
|
+
# @param url [String]
|
1040
|
+
# @param detect_handwriting [Boolean] If “true” is specified, handwriting
|
1041
|
+
# recognition is performed. If this parameter is set to “false” or is not
|
1042
|
+
# specified, printed text recognition is performed.
|
1043
|
+
# @param custom_headers [Hash{String => String}] A hash of custom headers that
|
1044
|
+
# will be added to the HTTP request.
|
1045
|
+
#
|
1046
|
+
#
|
1047
|
+
def recognize_text(url, detect_handwriting:false, custom_headers:nil)
|
1048
|
+
response = recognize_text_async(url, detect_handwriting:detect_handwriting, custom_headers:custom_headers).value!
|
1049
|
+
nil
|
1050
|
+
end
|
1051
|
+
|
1052
|
+
#
|
1053
|
+
# Recognize Text operation. When you use the Recognize Text interface, the
|
1054
|
+
# response contains a field called “Operation-Location”. The
|
1055
|
+
# “Operation-Location” field contains the URL that you must use for your Get
|
1056
|
+
# Handwritten Text Operation Result operation.
|
1057
|
+
#
|
1058
|
+
# @param url [String]
|
1059
|
+
# @param detect_handwriting [Boolean] If “true” is specified, handwriting
|
1060
|
+
# recognition is performed. If this parameter is set to “false” or is not
|
1061
|
+
# specified, printed text recognition is performed.
|
1062
|
+
# @param custom_headers [Hash{String => String}] A hash of custom headers that
|
1063
|
+
# will be added to the HTTP request.
|
1064
|
+
#
|
1065
|
+
# @return [MsRestAzure::AzureOperationResponse] HTTP response information.
|
1066
|
+
#
|
1067
|
+
def recognize_text_with_http_info(url, detect_handwriting:false, custom_headers:nil)
|
1068
|
+
recognize_text_async(url, detect_handwriting:detect_handwriting, custom_headers:custom_headers).value!
|
1069
|
+
end
|
1070
|
+
|
1071
|
+
#
|
1072
|
+
# Recognize Text operation. When you use the Recognize Text interface, the
|
1073
|
+
# response contains a field called “Operation-Location”. The
|
1074
|
+
# “Operation-Location” field contains the URL that you must use for your Get
|
1075
|
+
# Handwritten Text Operation Result operation.
|
1076
|
+
#
|
1077
|
+
# @param url [String]
|
1078
|
+
# @param detect_handwriting [Boolean] If “true” is specified, handwriting
|
1079
|
+
# recognition is performed. If this parameter is set to “false” or is not
|
1080
|
+
# specified, printed text recognition is performed.
|
1081
|
+
# @param [Hash{String => String}] A hash of custom headers that will be added
|
1082
|
+
# to the HTTP request.
|
1083
|
+
#
|
1084
|
+
# @return [Concurrent::Promise] Promise object which holds the HTTP response.
|
1085
|
+
#
|
1086
|
+
def recognize_text_async(url, detect_handwriting:false, custom_headers:nil)
|
1087
|
+
fail ArgumentError, 'azure_region is nil' if azure_region.nil?
|
1088
|
+
fail ArgumentError, 'url is nil' if url.nil?
|
1089
|
+
|
1090
|
+
image_url = ImageUrl.new
|
1091
|
+
unless url.nil?
|
1092
|
+
image_url.url = url
|
1093
|
+
end
|
1094
|
+
|
1095
|
+
request_headers = {}
|
1096
|
+
request_headers['Content-Type'] = 'application/json; charset=utf-8'
|
1097
|
+
|
1098
|
+
# Set Headers
|
1099
|
+
request_headers['x-ms-client-request-id'] = SecureRandom.uuid
|
1100
|
+
request_headers['accept-language'] = accept_language unless accept_language.nil?
|
1101
|
+
|
1102
|
+
# Serialize Request
|
1103
|
+
request_mapper = Azure::CognitiveServices::ComputerVision::V1_0::Models::ImageUrl.mapper()
|
1104
|
+
request_content = self.serialize(request_mapper, image_url)
|
1105
|
+
request_content = request_content != nil ? JSON.generate(request_content, quirks_mode: true) : nil
|
1106
|
+
|
1107
|
+
path_template = 'recognizeText'
|
1108
|
+
|
1109
|
+
request_url = @base_url || self.base_url
|
1110
|
+
request_url = request_url.gsub('{AzureRegion}', azure_region)
|
1111
|
+
|
1112
|
+
options = {
|
1113
|
+
middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
|
1114
|
+
query_params: {'detectHandwriting' => detect_handwriting},
|
1115
|
+
body: request_content,
|
1116
|
+
headers: request_headers.merge(custom_headers || {}),
|
1117
|
+
base_url: request_url
|
1118
|
+
}
|
1119
|
+
promise = self.make_request_async(:post, path_template, options)
|
1120
|
+
|
1121
|
+
promise = promise.then do |result|
|
1122
|
+
http_response = result.response
|
1123
|
+
status_code = http_response.status
|
1124
|
+
response_content = http_response.body
|
1125
|
+
unless status_code == 202
|
1126
|
+
error_model = JSON.load(response_content)
|
1127
|
+
fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
|
1128
|
+
end
|
1129
|
+
|
1130
|
+
result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
|
1131
|
+
|
1132
|
+
result
|
1133
|
+
end
|
1134
|
+
|
1135
|
+
promise.execute
|
1136
|
+
end
|
1137
|
+
|
1138
|
+
#
|
1139
|
+
# This interface is used for getting text operation result. The URL to this
|
1140
|
+
# interface should be retrieved from 'Operation-Location' field returned from
|
1141
|
+
# Recognize Text interface.
|
1142
|
+
#
|
1143
|
+
# @param operation_id [String] Id of the text operation returned in the
|
1144
|
+
# response of the 'Recognize Handwritten Text'
|
1145
|
+
# @param custom_headers [Hash{String => String}] A hash of custom headers that
|
1146
|
+
# will be added to the HTTP request.
|
1147
|
+
#
|
1148
|
+
# @return [TextOperationResult] operation results.
|
1149
|
+
#
|
1150
|
+
def get_text_operation_result(operation_id, custom_headers:nil)
|
1151
|
+
response = get_text_operation_result_async(operation_id, custom_headers:custom_headers).value!
|
1152
|
+
response.body unless response.nil?
|
1153
|
+
end
|
1154
|
+
|
1155
|
+
#
|
1156
|
+
# This interface is used for getting text operation result. The URL to this
|
1157
|
+
# interface should be retrieved from 'Operation-Location' field returned from
|
1158
|
+
# Recognize Text interface.
|
1159
|
+
#
|
1160
|
+
# @param operation_id [String] Id of the text operation returned in the
|
1161
|
+
# response of the 'Recognize Handwritten Text'
|
1162
|
+
# @param custom_headers [Hash{String => String}] A hash of custom headers that
|
1163
|
+
# will be added to the HTTP request.
|
1164
|
+
#
|
1165
|
+
# @return [MsRestAzure::AzureOperationResponse] HTTP response information.
|
1166
|
+
#
|
1167
|
+
def get_text_operation_result_with_http_info(operation_id, custom_headers:nil)
|
1168
|
+
get_text_operation_result_async(operation_id, custom_headers:custom_headers).value!
|
1169
|
+
end
|
1170
|
+
|
1171
|
+
#
|
1172
|
+
# This interface is used for getting text operation result. The URL to this
|
1173
|
+
# interface should be retrieved from 'Operation-Location' field returned from
|
1174
|
+
# Recognize Text interface.
|
1175
|
+
#
|
1176
|
+
# @param operation_id [String] Id of the text operation returned in the
|
1177
|
+
# response of the 'Recognize Handwritten Text'
|
1178
|
+
# @param [Hash{String => String}] A hash of custom headers that will be added
|
1179
|
+
# to the HTTP request.
|
1180
|
+
#
|
1181
|
+
# @return [Concurrent::Promise] Promise object which holds the HTTP response.
|
1182
|
+
#
|
1183
|
+
def get_text_operation_result_async(operation_id, custom_headers:nil)
|
1184
|
+
fail ArgumentError, 'azure_region is nil' if azure_region.nil?
|
1185
|
+
fail ArgumentError, 'operation_id is nil' if operation_id.nil?
|
1186
|
+
|
1187
|
+
|
1188
|
+
request_headers = {}
|
1189
|
+
request_headers['Content-Type'] = 'application/json; charset=utf-8'
|
1190
|
+
|
1191
|
+
# Set Headers
|
1192
|
+
request_headers['x-ms-client-request-id'] = SecureRandom.uuid
|
1193
|
+
request_headers['accept-language'] = accept_language unless accept_language.nil?
|
1194
|
+
path_template = 'textOperations/{operationId}'
|
1195
|
+
|
1196
|
+
request_url = @base_url || self.base_url
|
1197
|
+
request_url = request_url.gsub('{AzureRegion}', azure_region)
|
1198
|
+
|
1199
|
+
options = {
|
1200
|
+
middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
|
1201
|
+
path_params: {'operationId' => operation_id},
|
1202
|
+
headers: request_headers.merge(custom_headers || {}),
|
1203
|
+
base_url: request_url
|
1204
|
+
}
|
1205
|
+
promise = self.make_request_async(:get, path_template, options)
|
1206
|
+
|
1207
|
+
promise = promise.then do |result|
|
1208
|
+
http_response = result.response
|
1209
|
+
status_code = http_response.status
|
1210
|
+
response_content = http_response.body
|
1211
|
+
unless status_code == 200
|
1212
|
+
error_model = JSON.load(response_content)
|
1213
|
+
fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
|
1214
|
+
end
|
1215
|
+
|
1216
|
+
result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
|
1217
|
+
# Deserialize Response
|
1218
|
+
if status_code == 200
|
1219
|
+
begin
|
1220
|
+
parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
|
1221
|
+
result_mapper = Azure::CognitiveServices::ComputerVision::V1_0::Models::TextOperationResult.mapper()
|
1222
|
+
result.body = self.deserialize(result_mapper, parsed_response)
|
1223
|
+
rescue Exception => e
|
1224
|
+
fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
|
1225
|
+
end
|
1226
|
+
end
|
1227
|
+
|
1228
|
+
result
|
1229
|
+
end
|
1230
|
+
|
1231
|
+
promise.execute
|
1232
|
+
end
|
1233
|
+
|
1234
|
+
#
|
1235
|
+
# This operation extracts a rich set of visual features based on the image
|
1236
|
+
# content.
|
1237
|
+
#
|
1238
|
+
# @param image An image stream.
|
1239
|
+
# @param visual_features [Array<VisualFeatureTypes>] A string indicating what
|
1240
|
+
# visual feature types to return. Multiple values should be comma-separated.
|
1241
|
+
# Valid visual feature types include:Categories - categorizes image content
|
1242
|
+
# according to a taxonomy defined in documentation. Tags - tags the image with
|
1243
|
+
# a detailed list of words related to the image content. Description -
|
1244
|
+
# describes the image content with a complete English sentence. Faces - detects
|
1245
|
+
# if faces are present. If present, generate coordinates, gender and age.
|
1246
|
+
# ImageType - detects if image is clipart or a line drawing. Color - determines
|
1247
|
+
# the accent color, dominant color, and whether an image is black&white.Adult -
|
1248
|
+
# detects if the image is pornographic in nature (depicts nudity or a sex act).
|
1249
|
+
# Sexually suggestive content is also detected.
|
1250
|
+
# @param details [Enum] A string indicating which domain-specific details to
|
1251
|
+
# return. Multiple values should be comma-separated. Valid visual feature types
|
1252
|
+
# include:Celebrities - identifies celebrities if detected in the image.
|
1253
|
+
# Possible values include: 'Celebrities', 'Landmarks'
|
1254
|
+
# @param language [Enum] A string indicating which language to return. The
|
1255
|
+
# service will return recognition results in specified language. If this
|
1256
|
+
# parameter is not specified, the default value is "en".Supported
|
1257
|
+
# languages:en - English, Default.zh - Simplified Chinese. Possible values
|
1258
|
+
# include: 'en', 'zh'
|
1259
|
+
# @param custom_headers [Hash{String => String}] A hash of custom headers that
|
1260
|
+
# will be added to the HTTP request.
|
1261
|
+
#
|
1262
|
+
# @return [ImageAnalysis] operation results.
|
1263
|
+
#
|
1264
|
+
def analyze_image_in_stream(image, visual_features:nil, details:nil, language:nil, custom_headers:nil)
|
1265
|
+
response = analyze_image_in_stream_async(image, visual_features:visual_features, details:details, language:language, custom_headers:custom_headers).value!
|
1266
|
+
response.body unless response.nil?
|
1267
|
+
end
|
1268
|
+
|
1269
|
+
#
|
1270
|
+
# This operation extracts a rich set of visual features based on the image
|
1271
|
+
# content.
|
1272
|
+
#
|
1273
|
+
# @param image An image stream.
|
1274
|
+
# @param visual_features [Array<VisualFeatureTypes>] A string indicating what
|
1275
|
+
# visual feature types to return. Multiple values should be comma-separated.
|
1276
|
+
# Valid visual feature types include:Categories - categorizes image content
|
1277
|
+
# according to a taxonomy defined in documentation. Tags - tags the image with
|
1278
|
+
# a detailed list of words related to the image content. Description -
|
1279
|
+
# describes the image content with a complete English sentence. Faces - detects
|
1280
|
+
# if faces are present. If present, generate coordinates, gender and age.
|
1281
|
+
# ImageType - detects if image is clipart or a line drawing. Color - determines
|
1282
|
+
# the accent color, dominant color, and whether an image is black&white.Adult -
|
1283
|
+
# detects if the image is pornographic in nature (depicts nudity or a sex act).
|
1284
|
+
# Sexually suggestive content is also detected.
|
1285
|
+
# @param details [Enum] A string indicating which domain-specific details to
|
1286
|
+
# return. Multiple values should be comma-separated. Valid visual feature types
|
1287
|
+
# include:Celebrities - identifies celebrities if detected in the image.
|
1288
|
+
# Possible values include: 'Celebrities', 'Landmarks'
|
1289
|
+
# @param language [Enum] A string indicating which language to return. The
|
1290
|
+
# service will return recognition results in specified language. If this
|
1291
|
+
# parameter is not specified, the default value is "en".Supported
|
1292
|
+
# languages:en - English, Default.zh - Simplified Chinese. Possible values
|
1293
|
+
# include: 'en', 'zh'
|
1294
|
+
# @param custom_headers [Hash{String => String}] A hash of custom headers that
|
1295
|
+
# will be added to the HTTP request.
|
1296
|
+
#
|
1297
|
+
# @return [MsRestAzure::AzureOperationResponse] HTTP response information.
|
1298
|
+
#
|
1299
|
+
def analyze_image_in_stream_with_http_info(image, visual_features:nil, details:nil, language:nil, custom_headers:nil)
|
1300
|
+
analyze_image_in_stream_async(image, visual_features:visual_features, details:details, language:language, custom_headers:custom_headers).value!
|
1301
|
+
end
|
1302
|
+
|
1303
|
+
#
|
1304
|
+
# This operation extracts a rich set of visual features based on the image
|
1305
|
+
# content.
|
1306
|
+
#
|
1307
|
+
# @param image An image stream.
|
1308
|
+
# @param visual_features [Array<VisualFeatureTypes>] A string indicating what
|
1309
|
+
# visual feature types to return. Multiple values should be comma-separated.
|
1310
|
+
# Valid visual feature types include:Categories - categorizes image content
|
1311
|
+
# according to a taxonomy defined in documentation. Tags - tags the image with
|
1312
|
+
# a detailed list of words related to the image content. Description -
|
1313
|
+
# describes the image content with a complete English sentence. Faces - detects
|
1314
|
+
# if faces are present. If present, generate coordinates, gender and age.
|
1315
|
+
# ImageType - detects if image is clipart or a line drawing. Color - determines
|
1316
|
+
# the accent color, dominant color, and whether an image is black&white.Adult -
|
1317
|
+
# detects if the image is pornographic in nature (depicts nudity or a sex act).
|
1318
|
+
# Sexually suggestive content is also detected.
|
1319
|
+
# @param details [Enum] A string indicating which domain-specific details to
|
1320
|
+
# return. Multiple values should be comma-separated. Valid visual feature types
|
1321
|
+
# include:Celebrities - identifies celebrities if detected in the image.
|
1322
|
+
# Possible values include: 'Celebrities', 'Landmarks'
|
1323
|
+
# @param language [Enum] A string indicating which language to return. The
|
1324
|
+
# service will return recognition results in specified language. If this
|
1325
|
+
# parameter is not specified, the default value is "en".Supported
|
1326
|
+
# languages:en - English, Default.zh - Simplified Chinese. Possible values
|
1327
|
+
# include: 'en', 'zh'
|
1328
|
+
# @param [Hash{String => String}] A hash of custom headers that will be added
|
1329
|
+
# to the HTTP request.
|
1330
|
+
#
|
1331
|
+
# @return [Concurrent::Promise] Promise object which holds the HTTP response.
|
1332
|
+
#
|
1333
|
+
def analyze_image_in_stream_async(image, visual_features:nil, details:nil, language:nil, custom_headers:nil)
|
1334
|
+
fail ArgumentError, 'azure_region is nil' if azure_region.nil?
|
1335
|
+
fail ArgumentError, 'image is nil' if image.nil?
|
1336
|
+
|
1337
|
+
|
1338
|
+
request_headers = {}
|
1339
|
+
request_headers['Content-Type'] = 'application/octet-stream'
|
1340
|
+
|
1341
|
+
# Set Headers
|
1342
|
+
request_headers['x-ms-client-request-id'] = SecureRandom.uuid
|
1343
|
+
request_headers['accept-language'] = accept_language unless accept_language.nil?
|
1344
|
+
|
1345
|
+
# Serialize Request
|
1346
|
+
request_mapper = {
|
1347
|
+
client_side_validation: true,
|
1348
|
+
required: true,
|
1349
|
+
serialized_name: 'Image',
|
1350
|
+
type: {
|
1351
|
+
name: 'Stream'
|
1352
|
+
}
|
1353
|
+
}
|
1354
|
+
request_content = self.serialize(request_mapper, image)
|
1355
|
+
|
1356
|
+
path_template = 'analyze'
|
1357
|
+
|
1358
|
+
request_url = @base_url || self.base_url
|
1359
|
+
request_url = request_url.gsub('{AzureRegion}', azure_region)
|
1360
|
+
|
1361
|
+
options = {
|
1362
|
+
middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
|
1363
|
+
query_params: {'visualFeatures' => visual_features.nil? ? nil : visual_features.join(','),'details' => details,'language' => language},
|
1364
|
+
body: request_content,
|
1365
|
+
headers: request_headers.merge(custom_headers || {}),
|
1366
|
+
base_url: request_url
|
1367
|
+
}
|
1368
|
+
promise = self.make_request_async(:post, path_template, options)
|
1369
|
+
|
1370
|
+
promise = promise.then do |result|
|
1371
|
+
http_response = result.response
|
1372
|
+
status_code = http_response.status
|
1373
|
+
response_content = http_response.body
|
1374
|
+
unless status_code == 200
|
1375
|
+
error_model = JSON.load(response_content)
|
1376
|
+
fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
|
1377
|
+
end
|
1378
|
+
|
1379
|
+
result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
|
1380
|
+
# Deserialize Response
|
1381
|
+
if status_code == 200
|
1382
|
+
begin
|
1383
|
+
parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
|
1384
|
+
result_mapper = Azure::CognitiveServices::ComputerVision::V1_0::Models::ImageAnalysis.mapper()
|
1385
|
+
result.body = self.deserialize(result_mapper, parsed_response)
|
1386
|
+
rescue Exception => e
|
1387
|
+
fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
|
1388
|
+
end
|
1389
|
+
end
|
1390
|
+
|
1391
|
+
result
|
1392
|
+
end
|
1393
|
+
|
1394
|
+
promise.execute
|
1395
|
+
end
|
1396
|
+
|
1397
|
+
#
|
1398
|
+
# This operation generates a thumbnail image with the user-specified width and
|
1399
|
+
# height. By default, the service analyzes the image, identifies the region of
|
1400
|
+
# interest (ROI), and generates smart cropping coordinates based on the ROI.
|
1401
|
+
# Smart cropping helps when you specify an aspect ratio that differs from that
|
1402
|
+
# of the input image. A successful response contains the thumbnail image
|
1403
|
+
# binary. If the request failed, the response contains an error code and a
|
1404
|
+
# message to help determine what went wrong.
|
1405
|
+
#
|
1406
|
+
# @param width [Integer] Width of the thumbnail. It must be between 1 and 1024.
|
1407
|
+
# Recommended minimum of 50.
|
1408
|
+
# @param height [Integer] Height of the thumbnail. It must be between 1 and
|
1409
|
+
# 1024. Recommended minimum of 50.
|
1410
|
+
# @param image An image stream.
|
1411
|
+
# @param smart_cropping [Boolean] Boolean flag for enabling smart cropping.
|
1412
|
+
# @param custom_headers [Hash{String => String}] A hash of custom headers that
|
1413
|
+
# will be added to the HTTP request.
|
1414
|
+
#
|
1415
|
+
# @return [NOT_IMPLEMENTED] operation results.
|
1416
|
+
#
|
1417
|
+
def generate_thumbnail_in_stream(width, height, image, smart_cropping:false, custom_headers:nil)
|
1418
|
+
response = generate_thumbnail_in_stream_async(width, height, image, smart_cropping:smart_cropping, custom_headers:custom_headers).value!
|
1419
|
+
response.body unless response.nil?
|
1420
|
+
end
|
1421
|
+
|
1422
|
+
#
|
1423
|
+
# This operation generates a thumbnail image with the user-specified width and
|
1424
|
+
# height. By default, the service analyzes the image, identifies the region of
|
1425
|
+
# interest (ROI), and generates smart cropping coordinates based on the ROI.
|
1426
|
+
# Smart cropping helps when you specify an aspect ratio that differs from that
|
1427
|
+
# of the input image. A successful response contains the thumbnail image
|
1428
|
+
# binary. If the request failed, the response contains an error code and a
|
1429
|
+
# message to help determine what went wrong.
|
1430
|
+
#
|
1431
|
+
# @param width [Integer] Width of the thumbnail. It must be between 1 and 1024.
|
1432
|
+
# Recommended minimum of 50.
|
1433
|
+
# @param height [Integer] Height of the thumbnail. It must be between 1 and
|
1434
|
+
# 1024. Recommended minimum of 50.
|
1435
|
+
# @param image An image stream.
|
1436
|
+
# @param smart_cropping [Boolean] Boolean flag for enabling smart cropping.
|
1437
|
+
# @param custom_headers [Hash{String => String}] A hash of custom headers that
|
1438
|
+
# will be added to the HTTP request.
|
1439
|
+
#
|
1440
|
+
# @return [MsRestAzure::AzureOperationResponse] HTTP response information.
|
1441
|
+
#
|
1442
|
+
def generate_thumbnail_in_stream_with_http_info(width, height, image, smart_cropping:false, custom_headers:nil)
|
1443
|
+
generate_thumbnail_in_stream_async(width, height, image, smart_cropping:smart_cropping, custom_headers:custom_headers).value!
|
1444
|
+
end
|
1445
|
+
|
1446
|
+
#
|
1447
|
+
# This operation generates a thumbnail image with the user-specified width and
|
1448
|
+
# height. By default, the service analyzes the image, identifies the region of
|
1449
|
+
# interest (ROI), and generates smart cropping coordinates based on the ROI.
|
1450
|
+
# Smart cropping helps when you specify an aspect ratio that differs from that
|
1451
|
+
# of the input image. A successful response contains the thumbnail image
|
1452
|
+
# binary. If the request failed, the response contains an error code and a
|
1453
|
+
# message to help determine what went wrong.
|
1454
|
+
#
|
1455
|
+
# @param width [Integer] Width of the thumbnail. It must be between 1 and 1024.
|
1456
|
+
# Recommended minimum of 50.
|
1457
|
+
# @param height [Integer] Height of the thumbnail. It must be between 1 and
|
1458
|
+
# 1024. Recommended minimum of 50.
|
1459
|
+
# @param image An image stream.
|
1460
|
+
# @param smart_cropping [Boolean] Boolean flag for enabling smart cropping.
|
1461
|
+
# @param [Hash{String => String}] A hash of custom headers that will be added
|
1462
|
+
# to the HTTP request.
|
1463
|
+
#
|
1464
|
+
# @return [Concurrent::Promise] Promise object which holds the HTTP response.
|
1465
|
+
#
|
1466
|
+
def generate_thumbnail_in_stream_async(width, height, image, smart_cropping:false, custom_headers:nil)
|
1467
|
+
fail ArgumentError, 'azure_region is nil' if azure_region.nil?
|
1468
|
+
fail ArgumentError, 'width is nil' if width.nil?
|
1469
|
+
fail ArgumentError, "'width' should satisfy the constraint - 'InclusiveMaximum': '1023'" if !width.nil? && width > 1023
|
1470
|
+
fail ArgumentError, "'width' should satisfy the constraint - 'InclusiveMinimum': '1'" if !width.nil? && width < 1
|
1471
|
+
fail ArgumentError, 'height is nil' if height.nil?
|
1472
|
+
fail ArgumentError, "'height' should satisfy the constraint - 'InclusiveMaximum': '1023'" if !height.nil? && height > 1023
|
1473
|
+
fail ArgumentError, "'height' should satisfy the constraint - 'InclusiveMinimum': '1'" if !height.nil? && height < 1
|
1474
|
+
fail ArgumentError, 'image is nil' if image.nil?
|
1475
|
+
|
1476
|
+
|
1477
|
+
request_headers = {}
|
1478
|
+
request_headers['Content-Type'] = 'application/octet-stream'
|
1479
|
+
|
1480
|
+
# Set Headers
|
1481
|
+
request_headers['x-ms-client-request-id'] = SecureRandom.uuid
|
1482
|
+
request_headers['accept-language'] = accept_language unless accept_language.nil?
|
1483
|
+
|
1484
|
+
# Serialize Request
|
1485
|
+
request_mapper = {
|
1486
|
+
client_side_validation: true,
|
1487
|
+
required: true,
|
1488
|
+
serialized_name: 'Image',
|
1489
|
+
type: {
|
1490
|
+
name: 'Stream'
|
1491
|
+
}
|
1492
|
+
}
|
1493
|
+
request_content = self.serialize(request_mapper, image)
|
1494
|
+
|
1495
|
+
path_template = 'generateThumbnail'
|
1496
|
+
|
1497
|
+
request_url = @base_url || self.base_url
|
1498
|
+
request_url = request_url.gsub('{AzureRegion}', azure_region)
|
1499
|
+
|
1500
|
+
options = {
|
1501
|
+
middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
|
1502
|
+
query_params: {'width' => width,'height' => height,'smartCropping' => smart_cropping},
|
1503
|
+
body: request_content,
|
1504
|
+
headers: request_headers.merge(custom_headers || {}),
|
1505
|
+
base_url: request_url
|
1506
|
+
}
|
1507
|
+
promise = self.make_request_async(:post, path_template, options)
|
1508
|
+
|
1509
|
+
promise = promise.then do |result|
|
1510
|
+
http_response = result.response
|
1511
|
+
status_code = http_response.status
|
1512
|
+
response_content = http_response.body
|
1513
|
+
unless status_code == 200
|
1514
|
+
error_model = JSON.load(response_content)
|
1515
|
+
fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
|
1516
|
+
end
|
1517
|
+
|
1518
|
+
result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
|
1519
|
+
# Deserialize Response
|
1520
|
+
if status_code == 200
|
1521
|
+
begin
|
1522
|
+
parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
|
1523
|
+
result_mapper = {
|
1524
|
+
client_side_validation: true,
|
1525
|
+
required: false,
|
1526
|
+
serialized_name: 'parsed_response',
|
1527
|
+
type: {
|
1528
|
+
name: 'Stream'
|
1529
|
+
}
|
1530
|
+
}
|
1531
|
+
result.body = self.deserialize(result_mapper, parsed_response)
|
1532
|
+
rescue Exception => e
|
1533
|
+
fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
|
1534
|
+
end
|
1535
|
+
end
|
1536
|
+
|
1537
|
+
result
|
1538
|
+
end
|
1539
|
+
|
1540
|
+
promise.execute
|
1541
|
+
end
|
1542
|
+
|
1543
|
+
#
|
1544
|
+
# Optical Character Recognition (OCR) detects printed text in an image and
|
1545
|
+
# extracts the recognized characters into a machine-usable character stream.
|
1546
|
+
# Upon success, the OCR results will be returned. Upon failure, the error code
|
1547
|
+
# together with an error message will be returned. The error code can be one of
|
1548
|
+
# InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage,
|
1549
|
+
# NotSupportedLanguage, or InternalServerError.
|
1550
|
+
#
|
1551
|
+
# @param detect_orientation [Boolean] Whether detect the text orientation in
|
1552
|
+
# the image. With detectOrientation=true the OCR service tries to detect the
|
1553
|
+
# image orientation and correct it before further processing (e.g. if it's
|
1554
|
+
# upside-down).
|
1555
|
+
# @param image An image stream.
|
1556
|
+
# @param language [OcrLanguages] The BCP-47 language code of the text to be
|
1557
|
+
# detected in the image. The default value is 'unk'. Possible values include:
|
1558
|
+
# 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el',
|
1559
|
+
# 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro',
|
1560
|
+
# 'sr-Cyrl', 'sr-Latn', 'sk'
|
1561
|
+
# @param custom_headers [Hash{String => String}] A hash of custom headers that
|
1562
|
+
# will be added to the HTTP request.
|
1563
|
+
#
|
1564
|
+
# @return [OcrResult] operation results.
|
1565
|
+
#
|
1566
|
+
def recognize_printed_text_in_stream(detect_orientation, image, language:nil, custom_headers:nil)
|
1567
|
+
response = recognize_printed_text_in_stream_async(detect_orientation, image, language:language, custom_headers:custom_headers).value!
|
1568
|
+
response.body unless response.nil?
|
1569
|
+
end
|
1570
|
+
|
1571
|
+
#
|
1572
|
+
# Optical Character Recognition (OCR) detects printed text in an image and
|
1573
|
+
# extracts the recognized characters into a machine-usable character stream.
|
1574
|
+
# Upon success, the OCR results will be returned. Upon failure, the error code
|
1575
|
+
# together with an error message will be returned. The error code can be one of
|
1576
|
+
# InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage,
|
1577
|
+
# NotSupportedLanguage, or InternalServerError.
|
1578
|
+
#
|
1579
|
+
# @param detect_orientation [Boolean] Whether detect the text orientation in
|
1580
|
+
# the image. With detectOrientation=true the OCR service tries to detect the
|
1581
|
+
# image orientation and correct it before further processing (e.g. if it's
|
1582
|
+
# upside-down).
|
1583
|
+
# @param image An image stream.
|
1584
|
+
# @param language [OcrLanguages] The BCP-47 language code of the text to be
|
1585
|
+
# detected in the image. The default value is 'unk'. Possible values include:
|
1586
|
+
# 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el',
|
1587
|
+
# 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro',
|
1588
|
+
# 'sr-Cyrl', 'sr-Latn', 'sk'
|
1589
|
+
# @param custom_headers [Hash{String => String}] A hash of custom headers that
|
1590
|
+
# will be added to the HTTP request.
|
1591
|
+
#
|
1592
|
+
# @return [MsRestAzure::AzureOperationResponse] HTTP response information.
|
1593
|
+
#
|
1594
|
+
def recognize_printed_text_in_stream_with_http_info(detect_orientation, image, language:nil, custom_headers:nil)
|
1595
|
+
recognize_printed_text_in_stream_async(detect_orientation, image, language:language, custom_headers:custom_headers).value!
|
1596
|
+
end
|
1597
|
+
|
1598
|
+
#
|
1599
|
+
# Optical Character Recognition (OCR) detects printed text in an image and
|
1600
|
+
# extracts the recognized characters into a machine-usable character stream.
|
1601
|
+
# Upon success, the OCR results will be returned. Upon failure, the error code
|
1602
|
+
# together with an error message will be returned. The error code can be one of
|
1603
|
+
# InvalidImageUrl, InvalidImageFormat, InvalidImageSize, NotSupportedImage,
|
1604
|
+
# NotSupportedLanguage, or InternalServerError.
|
1605
|
+
#
|
1606
|
+
# @param detect_orientation [Boolean] Whether detect the text orientation in
|
1607
|
+
# the image. With detectOrientation=true the OCR service tries to detect the
|
1608
|
+
# image orientation and correct it before further processing (e.g. if it's
|
1609
|
+
# upside-down).
|
1610
|
+
# @param image An image stream.
|
1611
|
+
# @param language [OcrLanguages] The BCP-47 language code of the text to be
|
1612
|
+
# detected in the image. The default value is 'unk'. Possible values include:
|
1613
|
+
# 'unk', 'zh-Hans', 'zh-Hant', 'cs', 'da', 'nl', 'en', 'fi', 'fr', 'de', 'el',
|
1614
|
+
# 'hu', 'it', 'ja', 'ko', 'nb', 'pl', 'pt', 'ru', 'es', 'sv', 'tr', 'ar', 'ro',
|
1615
|
+
# 'sr-Cyrl', 'sr-Latn', 'sk'
|
1616
|
+
# @param [Hash{String => String}] A hash of custom headers that will be added
|
1617
|
+
# to the HTTP request.
|
1618
|
+
#
|
1619
|
+
# @return [Concurrent::Promise] Promise object which holds the HTTP response.
|
1620
|
+
#
|
1621
|
+
def recognize_printed_text_in_stream_async(detect_orientation, image, language:nil, custom_headers:nil)
|
1622
|
+
fail ArgumentError, 'azure_region is nil' if azure_region.nil?
|
1623
|
+
fail ArgumentError, 'detect_orientation is nil' if detect_orientation.nil?
|
1624
|
+
fail ArgumentError, 'image is nil' if image.nil?
|
1625
|
+
|
1626
|
+
|
1627
|
+
request_headers = {}
|
1628
|
+
request_headers['Content-Type'] = 'application/octet-stream'
|
1629
|
+
|
1630
|
+
# Set Headers
|
1631
|
+
request_headers['x-ms-client-request-id'] = SecureRandom.uuid
|
1632
|
+
request_headers['accept-language'] = accept_language unless accept_language.nil?
|
1633
|
+
|
1634
|
+
# Serialize Request
|
1635
|
+
request_mapper = {
|
1636
|
+
client_side_validation: true,
|
1637
|
+
required: true,
|
1638
|
+
serialized_name: 'Image',
|
1639
|
+
type: {
|
1640
|
+
name: 'Stream'
|
1641
|
+
}
|
1642
|
+
}
|
1643
|
+
request_content = self.serialize(request_mapper, image)
|
1644
|
+
|
1645
|
+
path_template = 'ocr'
|
1646
|
+
|
1647
|
+
request_url = @base_url || self.base_url
|
1648
|
+
request_url = request_url.gsub('{AzureRegion}', azure_region)
|
1649
|
+
|
1650
|
+
options = {
|
1651
|
+
middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
|
1652
|
+
query_params: {'language' => language,'detectOrientation' => detect_orientation},
|
1653
|
+
body: request_content,
|
1654
|
+
headers: request_headers.merge(custom_headers || {}),
|
1655
|
+
base_url: request_url
|
1656
|
+
}
|
1657
|
+
promise = self.make_request_async(:post, path_template, options)
|
1658
|
+
|
1659
|
+
promise = promise.then do |result|
|
1660
|
+
http_response = result.response
|
1661
|
+
status_code = http_response.status
|
1662
|
+
response_content = http_response.body
|
1663
|
+
unless status_code == 200
|
1664
|
+
error_model = JSON.load(response_content)
|
1665
|
+
fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
|
1666
|
+
end
|
1667
|
+
|
1668
|
+
result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
|
1669
|
+
# Deserialize Response
|
1670
|
+
if status_code == 200
|
1671
|
+
begin
|
1672
|
+
parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
|
1673
|
+
result_mapper = Azure::CognitiveServices::ComputerVision::V1_0::Models::OcrResult.mapper()
|
1674
|
+
result.body = self.deserialize(result_mapper, parsed_response)
|
1675
|
+
rescue Exception => e
|
1676
|
+
fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
|
1677
|
+
end
|
1678
|
+
end
|
1679
|
+
|
1680
|
+
result
|
1681
|
+
end
|
1682
|
+
|
1683
|
+
promise.execute
|
1684
|
+
end
|
1685
|
+
|
1686
|
+
#
|
1687
|
+
# This operation generates a description of an image in human readable language
|
1688
|
+
# with complete sentences. The description is based on a collection of content
|
1689
|
+
# tags, which are also returned by the operation. More than one description can
|
1690
|
+
# be generated for each image. Descriptions are ordered by their confidence
|
1691
|
+
# score. All descriptions are in English. Two input methods are supported --
|
1692
|
+
# (1) Uploading an image or (2) specifying an image URL.A successful response
|
1693
|
+
# will be returned in JSON. If the request failed, the response will contain
|
1694
|
+
# an error code and a message to help understand what went wrong.
|
1695
|
+
#
|
1696
|
+
# @param image An image stream.
|
1697
|
+
# @param max_candidates [String] Maximum number of candidate descriptions to be
|
1698
|
+
# returned. The default is 1.
|
1699
|
+
# @param custom_headers [Hash{String => String}] A hash of custom headers that
|
1700
|
+
# will be added to the HTTP request.
|
1701
|
+
#
|
1702
|
+
# @return [ImageDescription] operation results.
|
1703
|
+
#
|
1704
|
+
def describe_image_in_stream(image, max_candidates:'1', custom_headers:nil)
|
1705
|
+
response = describe_image_in_stream_async(image, max_candidates:max_candidates, custom_headers:custom_headers).value!
|
1706
|
+
response.body unless response.nil?
|
1707
|
+
end
|
1708
|
+
|
1709
|
+
#
|
1710
|
+
# This operation generates a description of an image in human readable language
|
1711
|
+
# with complete sentences. The description is based on a collection of content
|
1712
|
+
# tags, which are also returned by the operation. More than one description can
|
1713
|
+
# be generated for each image. Descriptions are ordered by their confidence
|
1714
|
+
# score. All descriptions are in English. Two input methods are supported --
|
1715
|
+
# (1) Uploading an image or (2) specifying an image URL.A successful response
|
1716
|
+
# will be returned in JSON. If the request failed, the response will contain
|
1717
|
+
# an error code and a message to help understand what went wrong.
|
1718
|
+
#
|
1719
|
+
# @param image An image stream.
|
1720
|
+
# @param max_candidates [String] Maximum number of candidate descriptions to be
|
1721
|
+
# returned. The default is 1.
|
1722
|
+
# @param custom_headers [Hash{String => String}] A hash of custom headers that
|
1723
|
+
# will be added to the HTTP request.
|
1724
|
+
#
|
1725
|
+
# @return [MsRestAzure::AzureOperationResponse] HTTP response information.
|
1726
|
+
#
|
1727
|
+
def describe_image_in_stream_with_http_info(image, max_candidates:'1', custom_headers:nil)
|
1728
|
+
describe_image_in_stream_async(image, max_candidates:max_candidates, custom_headers:custom_headers).value!
|
1729
|
+
end
|
1730
|
+
|
1731
|
+
#
|
1732
|
+
# This operation generates a description of an image in human readable language
|
1733
|
+
# with complete sentences. The description is based on a collection of content
|
1734
|
+
# tags, which are also returned by the operation. More than one description can
|
1735
|
+
# be generated for each image. Descriptions are ordered by their confidence
|
1736
|
+
# score. All descriptions are in English. Two input methods are supported --
|
1737
|
+
# (1) Uploading an image or (2) specifying an image URL.A successful response
|
1738
|
+
# will be returned in JSON. If the request failed, the response will contain
|
1739
|
+
# an error code and a message to help understand what went wrong.
|
1740
|
+
#
|
1741
|
+
# @param image An image stream.
|
1742
|
+
# @param max_candidates [String] Maximum number of candidate descriptions to be
|
1743
|
+
# returned. The default is 1.
|
1744
|
+
# @param [Hash{String => String}] A hash of custom headers that will be added
|
1745
|
+
# to the HTTP request.
|
1746
|
+
#
|
1747
|
+
# @return [Concurrent::Promise] Promise object which holds the HTTP response.
|
1748
|
+
#
|
1749
|
+
def describe_image_in_stream_async(image, max_candidates:'1', custom_headers:nil)
|
1750
|
+
fail ArgumentError, 'azure_region is nil' if azure_region.nil?
|
1751
|
+
fail ArgumentError, 'image is nil' if image.nil?
|
1752
|
+
|
1753
|
+
|
1754
|
+
request_headers = {}
|
1755
|
+
request_headers['Content-Type'] = 'application/octet-stream'
|
1756
|
+
|
1757
|
+
# Set Headers
|
1758
|
+
request_headers['x-ms-client-request-id'] = SecureRandom.uuid
|
1759
|
+
request_headers['accept-language'] = accept_language unless accept_language.nil?
|
1760
|
+
|
1761
|
+
# Serialize Request
|
1762
|
+
request_mapper = {
|
1763
|
+
client_side_validation: true,
|
1764
|
+
required: true,
|
1765
|
+
serialized_name: 'Image',
|
1766
|
+
type: {
|
1767
|
+
name: 'Stream'
|
1768
|
+
}
|
1769
|
+
}
|
1770
|
+
request_content = self.serialize(request_mapper, image)
|
1771
|
+
|
1772
|
+
path_template = 'describe'
|
1773
|
+
|
1774
|
+
request_url = @base_url || self.base_url
|
1775
|
+
request_url = request_url.gsub('{AzureRegion}', azure_region)
|
1776
|
+
|
1777
|
+
options = {
|
1778
|
+
middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
|
1779
|
+
query_params: {'maxCandidates' => max_candidates},
|
1780
|
+
body: request_content,
|
1781
|
+
headers: request_headers.merge(custom_headers || {}),
|
1782
|
+
base_url: request_url
|
1783
|
+
}
|
1784
|
+
promise = self.make_request_async(:post, path_template, options)
|
1785
|
+
|
1786
|
+
promise = promise.then do |result|
|
1787
|
+
http_response = result.response
|
1788
|
+
status_code = http_response.status
|
1789
|
+
response_content = http_response.body
|
1790
|
+
unless status_code == 200
|
1791
|
+
error_model = JSON.load(response_content)
|
1792
|
+
fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
|
1793
|
+
end
|
1794
|
+
|
1795
|
+
result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
|
1796
|
+
# Deserialize Response
|
1797
|
+
if status_code == 200
|
1798
|
+
begin
|
1799
|
+
parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
|
1800
|
+
result_mapper = Azure::CognitiveServices::ComputerVision::V1_0::Models::ImageDescription.mapper()
|
1801
|
+
result.body = self.deserialize(result_mapper, parsed_response)
|
1802
|
+
rescue Exception => e
|
1803
|
+
fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
|
1804
|
+
end
|
1805
|
+
end
|
1806
|
+
|
1807
|
+
result
|
1808
|
+
end
|
1809
|
+
|
1810
|
+
promise.execute
|
1811
|
+
end
|
1812
|
+
|
1813
|
+
#
|
1814
|
+
# This operation generates a list of words, or tags, that are relevant to the
|
1815
|
+
# content of the supplied image. The Computer Vision API can return tags based
|
1816
|
+
# on objects, living beings, scenery or actions found in images. Unlike
|
1817
|
+
# categories, tags are not organized according to a hierarchical classification
|
1818
|
+
# system, but correspond to image content. Tags may contain hints to avoid
|
1819
|
+
# ambiguity or provide context, for example the tag “cello” may be accompanied
|
1820
|
+
# by the hint “musical instrument”. All tags are in English.
|
1821
|
+
#
|
1822
|
+
# @param image An image stream.
|
1823
|
+
# @param custom_headers [Hash{String => String}] A hash of custom headers that
|
1824
|
+
# will be added to the HTTP request.
|
1825
|
+
#
|
1826
|
+
# @return [TagResult] operation results.
|
1827
|
+
#
|
1828
|
+
def tag_image_in_stream(image, custom_headers:nil)
|
1829
|
+
response = tag_image_in_stream_async(image, custom_headers:custom_headers).value!
|
1830
|
+
response.body unless response.nil?
|
1831
|
+
end
|
1832
|
+
|
1833
|
+
#
|
1834
|
+
# This operation generates a list of words, or tags, that are relevant to the
|
1835
|
+
# content of the supplied image. The Computer Vision API can return tags based
|
1836
|
+
# on objects, living beings, scenery or actions found in images. Unlike
|
1837
|
+
# categories, tags are not organized according to a hierarchical classification
|
1838
|
+
# system, but correspond to image content. Tags may contain hints to avoid
|
1839
|
+
# ambiguity or provide context, for example the tag “cello” may be accompanied
|
1840
|
+
# by the hint “musical instrument”. All tags are in English.
|
1841
|
+
#
|
1842
|
+
# @param image An image stream.
|
1843
|
+
# @param custom_headers [Hash{String => String}] A hash of custom headers that
|
1844
|
+
# will be added to the HTTP request.
|
1845
|
+
#
|
1846
|
+
# @return [MsRestAzure::AzureOperationResponse] HTTP response information.
|
1847
|
+
#
|
1848
|
+
def tag_image_in_stream_with_http_info(image, custom_headers:nil)
|
1849
|
+
tag_image_in_stream_async(image, custom_headers:custom_headers).value!
|
1850
|
+
end
|
1851
|
+
|
1852
|
+
#
|
1853
|
+
# This operation generates a list of words, or tags, that are relevant to the
|
1854
|
+
# content of the supplied image. The Computer Vision API can return tags based
|
1855
|
+
# on objects, living beings, scenery or actions found in images. Unlike
|
1856
|
+
# categories, tags are not organized according to a hierarchical classification
|
1857
|
+
# system, but correspond to image content. Tags may contain hints to avoid
|
1858
|
+
# ambiguity or provide context, for example the tag “cello” may be accompanied
|
1859
|
+
# by the hint “musical instrument”. All tags are in English.
|
1860
|
+
#
|
1861
|
+
# @param image An image stream.
|
1862
|
+
# @param [Hash{String => String}] A hash of custom headers that will be added
|
1863
|
+
# to the HTTP request.
|
1864
|
+
#
|
1865
|
+
# @return [Concurrent::Promise] Promise object which holds the HTTP response.
|
1866
|
+
#
|
1867
|
+
def tag_image_in_stream_async(image, custom_headers:nil)
|
1868
|
+
fail ArgumentError, 'azure_region is nil' if azure_region.nil?
|
1869
|
+
fail ArgumentError, 'image is nil' if image.nil?
|
1870
|
+
|
1871
|
+
|
1872
|
+
request_headers = {}
|
1873
|
+
request_headers['Content-Type'] = 'application/octet-stream'
|
1874
|
+
|
1875
|
+
# Set Headers
|
1876
|
+
request_headers['x-ms-client-request-id'] = SecureRandom.uuid
|
1877
|
+
request_headers['accept-language'] = accept_language unless accept_language.nil?
|
1878
|
+
|
1879
|
+
# Serialize Request
|
1880
|
+
request_mapper = {
|
1881
|
+
client_side_validation: true,
|
1882
|
+
required: true,
|
1883
|
+
serialized_name: 'Image',
|
1884
|
+
type: {
|
1885
|
+
name: 'Stream'
|
1886
|
+
}
|
1887
|
+
}
|
1888
|
+
request_content = self.serialize(request_mapper, image)
|
1889
|
+
|
1890
|
+
path_template = 'tag'
|
1891
|
+
|
1892
|
+
request_url = @base_url || self.base_url
|
1893
|
+
request_url = request_url.gsub('{AzureRegion}', azure_region)
|
1894
|
+
|
1895
|
+
options = {
|
1896
|
+
middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
|
1897
|
+
body: request_content,
|
1898
|
+
headers: request_headers.merge(custom_headers || {}),
|
1899
|
+
base_url: request_url
|
1900
|
+
}
|
1901
|
+
promise = self.make_request_async(:post, path_template, options)
|
1902
|
+
|
1903
|
+
promise = promise.then do |result|
|
1904
|
+
http_response = result.response
|
1905
|
+
status_code = http_response.status
|
1906
|
+
response_content = http_response.body
|
1907
|
+
unless status_code == 200
|
1908
|
+
error_model = JSON.load(response_content)
|
1909
|
+
fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
|
1910
|
+
end
|
1911
|
+
|
1912
|
+
result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
|
1913
|
+
# Deserialize Response
|
1914
|
+
if status_code == 200
|
1915
|
+
begin
|
1916
|
+
parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
|
1917
|
+
result_mapper = Azure::CognitiveServices::ComputerVision::V1_0::Models::TagResult.mapper()
|
1918
|
+
result.body = self.deserialize(result_mapper, parsed_response)
|
1919
|
+
rescue Exception => e
|
1920
|
+
fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
|
1921
|
+
end
|
1922
|
+
end
|
1923
|
+
|
1924
|
+
result
|
1925
|
+
end
|
1926
|
+
|
1927
|
+
promise.execute
|
1928
|
+
end
|
1929
|
+
|
1930
|
+
#
|
1931
|
+
# This operation recognizes content within an image by applying a
|
1932
|
+
# domain-specific model. The list of domain-specific models that are supported
|
1933
|
+
# by the Computer Vision API can be retrieved using the /models GET request.
|
1934
|
+
# Currently, the API only provides a single domain-specific model: celebrities.
|
1935
|
+
# Two input methods are supported -- (1) Uploading an image or (2) specifying
|
1936
|
+
# an image URL. A successful response will be returned in JSON. If the request
|
1937
|
+
# failed, the response will contain an error code and a message to help
|
1938
|
+
# understand what went wrong.
|
1939
|
+
#
|
1940
|
+
# @param model [String] The domain-specific content to recognize.
|
1941
|
+
# @param image An image stream.
|
1942
|
+
# @param custom_headers [Hash{String => String}] A hash of custom headers that
|
1943
|
+
# will be added to the HTTP request.
|
1944
|
+
#
|
1945
|
+
# @return [DomainModelResults] operation results.
|
1946
|
+
#
|
1947
|
+
def analyze_image_by_domain_in_stream(model, image, custom_headers:nil)
|
1948
|
+
response = analyze_image_by_domain_in_stream_async(model, image, custom_headers:custom_headers).value!
|
1949
|
+
response.body unless response.nil?
|
1950
|
+
end
|
1951
|
+
|
1952
|
+
#
|
1953
|
+
# This operation recognizes content within an image by applying a
|
1954
|
+
# domain-specific model. The list of domain-specific models that are supported
|
1955
|
+
# by the Computer Vision API can be retrieved using the /models GET request.
|
1956
|
+
# Currently, the API only provides a single domain-specific model: celebrities.
|
1957
|
+
# Two input methods are supported -- (1) Uploading an image or (2) specifying
|
1958
|
+
# an image URL. A successful response will be returned in JSON. If the request
|
1959
|
+
# failed, the response will contain an error code and a message to help
|
1960
|
+
# understand what went wrong.
|
1961
|
+
#
|
1962
|
+
# @param model [String] The domain-specific content to recognize.
|
1963
|
+
# @param image An image stream.
|
1964
|
+
# @param custom_headers [Hash{String => String}] A hash of custom headers that
|
1965
|
+
# will be added to the HTTP request.
|
1966
|
+
#
|
1967
|
+
# @return [MsRestAzure::AzureOperationResponse] HTTP response information.
|
1968
|
+
#
|
1969
|
+
def analyze_image_by_domain_in_stream_with_http_info(model, image, custom_headers:nil)
|
1970
|
+
analyze_image_by_domain_in_stream_async(model, image, custom_headers:custom_headers).value!
|
1971
|
+
end
|
1972
|
+
|
1973
|
+
#
|
1974
|
+
# This operation recognizes content within an image by applying a
|
1975
|
+
# domain-specific model. The list of domain-specific models that are supported
|
1976
|
+
# by the Computer Vision API can be retrieved using the /models GET request.
|
1977
|
+
# Currently, the API only provides a single domain-specific model: celebrities.
|
1978
|
+
# Two input methods are supported -- (1) Uploading an image or (2) specifying
|
1979
|
+
# an image URL. A successful response will be returned in JSON. If the request
|
1980
|
+
# failed, the response will contain an error code and a message to help
|
1981
|
+
# understand what went wrong.
|
1982
|
+
#
|
1983
|
+
# @param model [String] The domain-specific content to recognize.
|
1984
|
+
# @param image An image stream.
|
1985
|
+
# @param [Hash{String => String}] A hash of custom headers that will be added
|
1986
|
+
# to the HTTP request.
|
1987
|
+
#
|
1988
|
+
# @return [Concurrent::Promise] Promise object which holds the HTTP response.
|
1989
|
+
#
|
1990
|
+
def analyze_image_by_domain_in_stream_async(model, image, custom_headers:nil)
|
1991
|
+
fail ArgumentError, 'azure_region is nil' if azure_region.nil?
|
1992
|
+
fail ArgumentError, 'model is nil' if model.nil?
|
1993
|
+
fail ArgumentError, 'image is nil' if image.nil?
|
1994
|
+
|
1995
|
+
|
1996
|
+
request_headers = {}
|
1997
|
+
request_headers['Content-Type'] = 'application/octet-stream'
|
1998
|
+
|
1999
|
+
# Set Headers
|
2000
|
+
request_headers['x-ms-client-request-id'] = SecureRandom.uuid
|
2001
|
+
request_headers['accept-language'] = accept_language unless accept_language.nil?
|
2002
|
+
|
2003
|
+
# Serialize Request
|
2004
|
+
request_mapper = {
|
2005
|
+
client_side_validation: true,
|
2006
|
+
required: true,
|
2007
|
+
serialized_name: 'Image',
|
2008
|
+
type: {
|
2009
|
+
name: 'Stream'
|
2010
|
+
}
|
2011
|
+
}
|
2012
|
+
request_content = self.serialize(request_mapper, image)
|
2013
|
+
|
2014
|
+
path_template = 'models/{model}/analyze'
|
2015
|
+
|
2016
|
+
request_url = @base_url || self.base_url
|
2017
|
+
request_url = request_url.gsub('{AzureRegion}', azure_region)
|
2018
|
+
|
2019
|
+
options = {
|
2020
|
+
middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
|
2021
|
+
path_params: {'model' => model},
|
2022
|
+
body: request_content,
|
2023
|
+
headers: request_headers.merge(custom_headers || {}),
|
2024
|
+
base_url: request_url
|
2025
|
+
}
|
2026
|
+
promise = self.make_request_async(:post, path_template, options)
|
2027
|
+
|
2028
|
+
promise = promise.then do |result|
|
2029
|
+
http_response = result.response
|
2030
|
+
status_code = http_response.status
|
2031
|
+
response_content = http_response.body
|
2032
|
+
unless status_code == 200
|
2033
|
+
error_model = JSON.load(response_content)
|
2034
|
+
fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
|
2035
|
+
end
|
2036
|
+
|
2037
|
+
result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
|
2038
|
+
# Deserialize Response
|
2039
|
+
if status_code == 200
|
2040
|
+
begin
|
2041
|
+
parsed_response = response_content.to_s.empty? ? nil : JSON.load(response_content)
|
2042
|
+
result_mapper = Azure::CognitiveServices::ComputerVision::V1_0::Models::DomainModelResults.mapper()
|
2043
|
+
result.body = self.deserialize(result_mapper, parsed_response)
|
2044
|
+
rescue Exception => e
|
2045
|
+
fail MsRest::DeserializationError.new('Error occurred in deserializing the response', e.message, e.backtrace, result)
|
2046
|
+
end
|
2047
|
+
end
|
2048
|
+
|
2049
|
+
result
|
2050
|
+
end
|
2051
|
+
|
2052
|
+
promise.execute
|
2053
|
+
end
|
2054
|
+
|
2055
|
+
#
|
2056
|
+
# Recognize Text operation. When you use the Recognize Text interface, the
|
2057
|
+
# response contains a field called “Operation-Location”. The
|
2058
|
+
# “Operation-Location” field contains the URL that you must use for your Get
|
2059
|
+
# Handwritten Text Operation Result operation.
|
2060
|
+
#
|
2061
|
+
# @param image An image stream.
|
2062
|
+
# @param detect_handwriting [Boolean] If “true” is specified, handwriting
|
2063
|
+
# recognition is performed. If this parameter is set to “false” or is not
|
2064
|
+
# specified, printed text recognition is performed.
|
2065
|
+
# @param custom_headers [Hash{String => String}] A hash of custom headers that
|
2066
|
+
# will be added to the HTTP request.
|
2067
|
+
#
|
2068
|
+
#
|
2069
|
+
def recognize_text_in_stream(image, detect_handwriting:false, custom_headers:nil)
|
2070
|
+
response = recognize_text_in_stream_async(image, detect_handwriting:detect_handwriting, custom_headers:custom_headers).value!
|
2071
|
+
nil
|
2072
|
+
end
|
2073
|
+
|
2074
|
+
#
|
2075
|
+
# Recognize Text operation. When you use the Recognize Text interface, the
|
2076
|
+
# response contains a field called “Operation-Location”. The
|
2077
|
+
# “Operation-Location” field contains the URL that you must use for your Get
|
2078
|
+
# Handwritten Text Operation Result operation.
|
2079
|
+
#
|
2080
|
+
# @param image An image stream.
|
2081
|
+
# @param detect_handwriting [Boolean] If “true” is specified, handwriting
|
2082
|
+
# recognition is performed. If this parameter is set to “false” or is not
|
2083
|
+
# specified, printed text recognition is performed.
|
2084
|
+
# @param custom_headers [Hash{String => String}] A hash of custom headers that
|
2085
|
+
# will be added to the HTTP request.
|
2086
|
+
#
|
2087
|
+
# @return [MsRestAzure::AzureOperationResponse] HTTP response information.
|
2088
|
+
#
|
2089
|
+
def recognize_text_in_stream_with_http_info(image, detect_handwriting:false, custom_headers:nil)
|
2090
|
+
recognize_text_in_stream_async(image, detect_handwriting:detect_handwriting, custom_headers:custom_headers).value!
|
2091
|
+
end
|
2092
|
+
|
2093
|
+
#
|
2094
|
+
# Recognize Text operation. When you use the Recognize Text interface, the
|
2095
|
+
# response contains a field called “Operation-Location”. The
|
2096
|
+
# “Operation-Location” field contains the URL that you must use for your Get
|
2097
|
+
# Handwritten Text Operation Result operation.
|
2098
|
+
#
|
2099
|
+
# @param image An image stream.
|
2100
|
+
# @param detect_handwriting [Boolean] If “true” is specified, handwriting
|
2101
|
+
# recognition is performed. If this parameter is set to “false” or is not
|
2102
|
+
# specified, printed text recognition is performed.
|
2103
|
+
# @param [Hash{String => String}] A hash of custom headers that will be added
|
2104
|
+
# to the HTTP request.
|
2105
|
+
#
|
2106
|
+
# @return [Concurrent::Promise] Promise object which holds the HTTP response.
|
2107
|
+
#
|
2108
|
+
def recognize_text_in_stream_async(image, detect_handwriting:false, custom_headers:nil)
|
2109
|
+
fail ArgumentError, 'azure_region is nil' if azure_region.nil?
|
2110
|
+
fail ArgumentError, 'image is nil' if image.nil?
|
2111
|
+
|
2112
|
+
|
2113
|
+
request_headers = {}
|
2114
|
+
request_headers['Content-Type'] = 'application/octet-stream'
|
2115
|
+
|
2116
|
+
# Set Headers
|
2117
|
+
request_headers['x-ms-client-request-id'] = SecureRandom.uuid
|
2118
|
+
request_headers['accept-language'] = accept_language unless accept_language.nil?
|
2119
|
+
|
2120
|
+
# Serialize Request
|
2121
|
+
request_mapper = {
|
2122
|
+
client_side_validation: true,
|
2123
|
+
required: true,
|
2124
|
+
serialized_name: 'Image',
|
2125
|
+
type: {
|
2126
|
+
name: 'Stream'
|
2127
|
+
}
|
2128
|
+
}
|
2129
|
+
request_content = self.serialize(request_mapper, image)
|
2130
|
+
|
2131
|
+
path_template = 'recognizeText'
|
2132
|
+
|
2133
|
+
request_url = @base_url || self.base_url
|
2134
|
+
request_url = request_url.gsub('{AzureRegion}', azure_region)
|
2135
|
+
|
2136
|
+
options = {
|
2137
|
+
middlewares: [[MsRest::RetryPolicyMiddleware, times: 3, retry: 0.02], [:cookie_jar]],
|
2138
|
+
query_params: {'detectHandwriting' => detect_handwriting},
|
2139
|
+
body: request_content,
|
2140
|
+
headers: request_headers.merge(custom_headers || {}),
|
2141
|
+
base_url: request_url
|
2142
|
+
}
|
2143
|
+
promise = self.make_request_async(:post, path_template, options)
|
2144
|
+
|
2145
|
+
promise = promise.then do |result|
|
2146
|
+
http_response = result.response
|
2147
|
+
status_code = http_response.status
|
2148
|
+
response_content = http_response.body
|
2149
|
+
unless status_code == 202
|
2150
|
+
error_model = JSON.load(response_content)
|
2151
|
+
fail MsRest::HttpOperationError.new(result.request, http_response, error_model)
|
2152
|
+
end
|
2153
|
+
|
2154
|
+
result.request_id = http_response['x-ms-request-id'] unless http_response['x-ms-request-id'].nil?
|
2155
|
+
|
2156
|
+
result
|
2157
|
+
end
|
2158
|
+
|
2159
|
+
promise.execute
|
2160
|
+
end
|
2161
|
+
|
2162
|
+
|
2163
|
+
private
|
2164
|
+
#
|
2165
|
+
# Adds telemetry information.
|
2166
|
+
#
|
2167
|
+
def add_telemetry
|
2168
|
+
sdk_information = 'azure_cognitiveservices_computervision'
|
2169
|
+
sdk_information = "#{sdk_information}/0.16.0"
|
2170
|
+
add_user_agent_information(sdk_information)
|
2171
|
+
end
|
2172
|
+
end
|
2173
|
+
end
|