ibm_watson 0.4.1 → 0.4.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/lib/ibm_watson/speech_to_text_v1.rb +26 -26
- data/lib/ibm_watson/version.rb +1 -1
- data/test/appveyor_status.rb +10 -3
- data/test/integration/test_speech_to_text_v1.rb +5 -5
- data/test/unit/test_personality_insights_v3.rb +2 -2
- data/test/unit/test_vcap_using_personality_insights.rb +2 -2
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 3e5beca0130db02ef8d574f58ebe96eef6cb879a7b656146405a03854f972b56
|
4
|
+
data.tar.gz: 7a1c2370c981076c0ad4be3d00123facac33bd9975fc675095f27a2730bac005
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 728c141e67558420faf66cb1b204893309d73dbfa65bd5c16620c40657ec9101706168a13d404c2961e36a29992e4360125e9b59988138fec1c2e28ecd84f01f
|
7
|
+
data.tar.gz: 5d4d8fa466ee58d68dd5275bfb0f0ef9d113f7b05d1cf26c017b1132b81ab92b9a1a4b97c50dfe907ca345a075780d0deef375ded62f6b45bc5a0a1e7173103c
|
@@ -426,17 +426,17 @@ module IBMWatson
|
|
426
426
|
end
|
427
427
|
|
428
428
|
##
|
429
|
-
# @!method recognize_using_websocket(audio: nil,chunk_data: false,
|
429
|
+
# @!method recognize_using_websocket(content_type:,recognize_callback:,audio: nil,chunk_data: false,model: nil,customization_id: nil,acoustic_customization_id: nil,customization_weight: nil,base_model_version: nil,inactivity_timeout: nil,interim_results: nil,keywords: nil,keywords_threshold: nil,max_alternatives: nil,word_alternatives_threshold: nil,word_confidence: nil,timestamps: nil,profanity_filter: nil,smart_formatting: nil,speaker_labels: nil)
|
430
430
|
# Sends audio for speech recognition using web sockets.
|
431
|
+
# @param content_type [String] The type of the input: audio/basic, audio/flac, audio/l16, audio/mp3, audio/mpeg, audio/mulaw, audio/ogg, audio/ogg;codecs=opus, audio/ogg;codecs=vorbis, audio/wav, audio/webm, audio/webm;codecs=opus, audio/webm;codecs=vorbis, or multipart/form-data.
|
432
|
+
# @param recognize_callback [RecognizeCallback] The instance handling events returned from the service.
|
431
433
|
# @param audio [IO] Audio to transcribe in the format specified by the `Content-Type` header.
|
432
434
|
# @param chunk_data [Boolean] If true, then the WebSocketClient will expect to receive data in chunks rather than as a single audio file
|
433
|
-
# @param content_type [String] The type of the input: audio/basic, audio/flac, audio/l16, audio/mp3, audio/mpeg, audio/mulaw, audio/ogg, audio/ogg;codecs=opus, audio/ogg;codecs=vorbis, audio/wav, audio/webm, audio/webm;codecs=opus, audio/webm;codecs=vorbis, or multipart/form-data.
|
434
435
|
# @param model [String] The identifier of the model to be used for the recognition request.
|
435
|
-
# @param recognize_callback [RecognizeCallback] The instance handling events returned from the service.
|
436
436
|
# @param customization_id [String] The GUID of a custom language model that is to be used with the request. The base model of the specified custom language model must match the model specified with the `model` parameter. You must make the request with service credentials created for the instance of the service that owns the custom model. By default, no custom language model is used.
|
437
437
|
# @param acoustic_customization_id [String] The GUID of a custom acoustic model that is to be used with the request. The base model of the specified custom acoustic model must match the model specified with the `model` parameter. You must make the request with service credentials created for the instance of the service that owns the custom model. By default, no custom acoustic model is used.
|
438
438
|
# @param customization_weight [Float] If you specify a `customization_id` with the request, you can use the `customization_weight` parameter to tell the service how much weight to give to words from the custom language model compared to those from the base model for speech recognition. Specify a value between 0.0 and 1.0. Unless a different customization weight was specified for the custom model when it was trained, the default value is 0.3. A customization weight that you specify overrides a weight that was specified when the custom model was trained. The default value yields the best performance in general. Assign a higher value if your audio makes frequent use of OOV words from the custom model. Use caution when setting the weight: a higher value can improve the accuracy of phrases from the custom model's domain, but it can negatively affect performance on non-domain phrases.
|
439
|
-
# @param
|
439
|
+
# @param base_model_version [String] The version of the specified base `model` that is to be used for speech recognition. Multiple versions of a base model can exist when a model is updated for internal improvements. The parameter is intended primarily for use with custom models that have been upgraded for a new base model. The default value depends on whether the parameter is used with or without a custom model. For more information, see [Base model version](https://console.bluemix.net/docs/services/speech-to-text/input.html#version).
|
440
440
|
# @param inactivity_timeout [Integer] The time in seconds after which, if only silence (no speech) is detected in submitted audio, the connection is closed with a 400 error. Useful for stopping audio submission from a live microphone when a user simply walks away. Use `-1` for infinity.
|
441
441
|
# @param interim_results [Boolean] Send back non-final previews of each "sentence" as it is being processed. These results are ignored in text mode.
|
442
442
|
# @param keywords [Array<String>] Array of keyword strings to spot in the audio. Each keyword string can include one or more tokens. Keywords are spotted only in the final hypothesis, not in interim results. If you specify any keywords, you must also specify a keywords threshold. Omit the parameter or specify an empty array if you do not need to spot keywords.
|
@@ -450,25 +450,25 @@ module IBMWatson
|
|
450
450
|
# @param speaker_labels [Boolean] Indicates whether labels that identify which words were spoken by which participants in a multi-person exchange are to be included in the response. The default is `false`; no speaker labels are returned. Setting `speaker_labels` to `true` forces the `timestamps` parameter to be `true`, regardless of whether you specify `false` for the parameter. To determine whether a language model supports speaker labels, use the `GET /v1/models` method and check that the attribute `speaker_labels` is set to `true`. You can also refer to [Speaker labels](https://console.bluemix.net/docs/services/speech-to-text/output.html#speaker_labels).
|
451
451
|
# @return [WebSocketClient] Returns a new WebSocketClient object
|
452
452
|
def recognize_using_websocket(
|
453
|
+
content_type:,
|
454
|
+
recognize_callback:,
|
453
455
|
audio: nil,
|
454
456
|
chunk_data: false,
|
455
|
-
|
456
|
-
model: "en-US_BroadbandModel",
|
457
|
-
recognize_callback: nil,
|
457
|
+
model: nil,
|
458
458
|
customization_id: nil,
|
459
459
|
acoustic_customization_id: nil,
|
460
460
|
customization_weight: nil,
|
461
|
-
|
462
|
-
inactivity_timeout:
|
463
|
-
interim_results:
|
461
|
+
base_model_version: nil,
|
462
|
+
inactivity_timeout: nil,
|
463
|
+
interim_results: nil,
|
464
464
|
keywords: nil,
|
465
465
|
keywords_threshold: nil,
|
466
|
-
max_alternatives:
|
466
|
+
max_alternatives: nil,
|
467
467
|
word_alternatives_threshold: nil,
|
468
|
-
word_confidence:
|
469
|
-
timestamps:
|
468
|
+
word_confidence: nil,
|
469
|
+
timestamps: nil,
|
470
470
|
profanity_filter: nil,
|
471
|
-
smart_formatting:
|
471
|
+
smart_formatting: nil,
|
472
472
|
speaker_labels: nil
|
473
473
|
)
|
474
474
|
raise ArgumentError("Audio must be provided") if audio.nil? && !chunk_data
|
@@ -489,7 +489,7 @@ module IBMWatson
|
|
489
489
|
"customization_id" => customization_id,
|
490
490
|
"acoustic_customization_id" => acoustic_customization_id,
|
491
491
|
"customization_weight" => customization_weight,
|
492
|
-
"
|
492
|
+
"base_model_version" => base_model_version
|
493
493
|
}
|
494
494
|
params.delete_if { |_, v| v.nil? }
|
495
495
|
url += "/v1/recognize?" + HTTP::URI.form_encode(params)
|
@@ -514,25 +514,25 @@ module IBMWatson
|
|
514
514
|
# :nocov:
|
515
515
|
# @deprecated This will method be removed in the next major release. Use {#recognize_using_websocket} instead.
|
516
516
|
def recognize_with_websocket(
|
517
|
+
content_type:,
|
518
|
+
recognize_callback:,
|
517
519
|
audio: nil,
|
518
520
|
chunk_data: false,
|
519
|
-
|
520
|
-
model: "en-US_BroadbandModel",
|
521
|
-
recognize_callback: nil,
|
521
|
+
model: nil,
|
522
522
|
customization_id: nil,
|
523
523
|
acoustic_customization_id: nil,
|
524
524
|
customization_weight: nil,
|
525
|
-
|
526
|
-
inactivity_timeout:
|
527
|
-
interim_results:
|
525
|
+
base_model_version: nil,
|
526
|
+
inactivity_timeout: nil,
|
527
|
+
interim_results: nil,
|
528
528
|
keywords: nil,
|
529
529
|
keywords_threshold: nil,
|
530
|
-
max_alternatives:
|
530
|
+
max_alternatives: nil,
|
531
531
|
word_alternatives_threshold: nil,
|
532
|
-
word_confidence:
|
533
|
-
timestamps:
|
532
|
+
word_confidence: nil,
|
533
|
+
timestamps: nil,
|
534
534
|
profanity_filter: nil,
|
535
|
-
smart_formatting:
|
535
|
+
smart_formatting: nil,
|
536
536
|
speaker_labels: nil
|
537
537
|
)
|
538
538
|
Kernel.warn("[DEPRECATION] `recognize_with_websocket` is deprecated and will be removed in the next major release. Please use `recognize_using_websocket` instead.")
|
@@ -545,7 +545,7 @@ module IBMWatson
|
|
545
545
|
customization_id: customization_id,
|
546
546
|
acoustic_customization_id: acoustic_customization_id,
|
547
547
|
customization_weight: customization_weight,
|
548
|
-
|
548
|
+
base_model_version: base_model_version,
|
549
549
|
inactivity_timeout: inactivity_timeout,
|
550
550
|
interim_results: interim_results,
|
551
551
|
keywords: keywords,
|
data/lib/ibm_watson/version.rb
CHANGED
data/test/appveyor_status.rb
CHANGED
@@ -15,18 +15,25 @@ class AppVeyorStatusTest < Minitest::Test
|
|
15
15
|
skip "Branch is NOT master and/or Ruby != 2.5.1, so AppVeyor check before deployment will not be run." if ENV["TRAVIS_BRANCH"] != "master" || ENV["TRAVIS_RUBY_VERSION"] != "2.5.1"
|
16
16
|
client = HTTP::Client.new
|
17
17
|
attempts = 0
|
18
|
-
|
18
|
+
builds = JSON.parse(client.get("https://ci.appveyor.com/api/projects/maxnussbaum/ruby-sdk/history?recordsNumber=25&branch=master").body.to_s)["builds"]
|
19
|
+
index = builds.index { |build| build["commitId"] == ENV["TRAVIS_COMMIT"] }
|
20
|
+
flunk("An AppVeyor build for commit #{ENV["TRAVIS_COMMIT"]} could not be found") unless index.is_a?(Integer)
|
21
|
+
current_build = builds[index]
|
22
|
+
status = current_build["status"]
|
19
23
|
puts("0 AppVeyor Status: #{status}")
|
20
24
|
while status != "success" && status != "failed" && status != "cancelled"
|
21
25
|
attempts += 1
|
22
26
|
sleep(15)
|
23
|
-
|
27
|
+
builds = JSON.parse(client.get("https://ci.appveyor.com/api/projects/maxnussbaum/ruby-sdk/history?recordsNumber=25&branch=master").body.to_s)["builds"]
|
28
|
+
index = builds.index { |build| build["commitId"] == ENV["TRAVIS_COMMIT"] }
|
29
|
+
current_build = builds[index]
|
30
|
+
status = current_build["status"]
|
24
31
|
puts("#{attempts} AppVeyor Status: #{status}")
|
25
32
|
end
|
26
33
|
if status == "success"
|
27
34
|
assert(true)
|
28
35
|
else
|
29
|
-
|
36
|
+
flunk("AppVeyor tests have NOT passed! Please ensure that AppVeyor passes before deploying")
|
30
37
|
end
|
31
38
|
end
|
32
39
|
end
|
@@ -138,7 +138,7 @@ unless ENV["SPEECH_TO_TEXT_USERNAME"].nil? || ENV["SPEECH_TO_TEXT_PASSWORD"].nil
|
|
138
138
|
timestamps: true,
|
139
139
|
max_alternatives: 2,
|
140
140
|
word_alternatives_threshold: 0.5,
|
141
|
-
|
141
|
+
content_type: "audio/wav"
|
142
142
|
)
|
143
143
|
Thread.new do
|
144
144
|
until audio_file.eof?
|
@@ -162,7 +162,7 @@ unless ENV["SPEECH_TO_TEXT_USERNAME"].nil? || ENV["SPEECH_TO_TEXT_PASSWORD"].nil
|
|
162
162
|
timestamps: true,
|
163
163
|
max_alternatives: 2,
|
164
164
|
word_alternatives_threshold: 0.5,
|
165
|
-
|
165
|
+
content_type: "audio/wav"
|
166
166
|
)
|
167
167
|
thr = Thread.new { speech.start }
|
168
168
|
thr.join
|
@@ -180,7 +180,7 @@ unless ENV["SPEECH_TO_TEXT_USERNAME"].nil? || ENV["SPEECH_TO_TEXT_PASSWORD"].nil
|
|
180
180
|
timestamps: true,
|
181
181
|
max_alternatives: 2,
|
182
182
|
word_alternatives_threshold: 0.5,
|
183
|
-
|
183
|
+
content_type: "audio/wav"
|
184
184
|
)
|
185
185
|
thr = Thread.new { speech.start }
|
186
186
|
thr.join
|
@@ -198,7 +198,7 @@ unless ENV["SPEECH_TO_TEXT_USERNAME"].nil? || ENV["SPEECH_TO_TEXT_PASSWORD"].nil
|
|
198
198
|
timestamps: true,
|
199
199
|
max_alternatives: 2,
|
200
200
|
word_alternatives_threshold: 0.5,
|
201
|
-
|
201
|
+
content_type: "audio/wav"
|
202
202
|
)
|
203
203
|
thr = Thread.new { speech.start }
|
204
204
|
thr.join
|
@@ -225,7 +225,7 @@ unless ENV["SPEECH_TO_TEXT_USERNAME"].nil? || ENV["SPEECH_TO_TEXT_PASSWORD"].nil
|
|
225
225
|
timestamps: true,
|
226
226
|
max_alternatives: 2,
|
227
227
|
word_alternatives_threshold: 0.5,
|
228
|
-
|
228
|
+
content_type: "audio/wav"
|
229
229
|
)
|
230
230
|
thr = Thread.new { speech.start }
|
231
231
|
thr.join
|
@@ -98,7 +98,7 @@ class PersonalityInsightsV3Test < Minitest::Test
|
|
98
98
|
profile_response = File.read(Dir.getwd + "/resources/personality-v3-expect3.txt")
|
99
99
|
personality_text = File.read(Dir.getwd + "/resources/personality-v3.json")
|
100
100
|
headers = {
|
101
|
-
"Content-Type" => "
|
101
|
+
"Content-Type" => "text/csv"
|
102
102
|
}
|
103
103
|
expected_response = DetailedResponse.new(status: 200, body: profile_response, headers: headers)
|
104
104
|
stub_request(:post, "https://gateway.watsonplatform.net/personality-insights/api/v3/profile?consumption_preferences=true&csv_headers=true&raw_scores=true&version=2017-10-13")
|
@@ -110,7 +110,7 @@ class PersonalityInsightsV3Test < Minitest::Test
|
|
110
110
|
"Content-Type" => "application/json",
|
111
111
|
"Host" => "gateway.watsonplatform.net"
|
112
112
|
}
|
113
|
-
).to_return(status: 200, body: profile_response
|
113
|
+
).to_return(status: 200, body: profile_response, headers: headers)
|
114
114
|
service = IBMWatson::PersonalityInsightsV3.new(
|
115
115
|
version: "2017-10-13",
|
116
116
|
username: "username",
|
@@ -80,7 +80,7 @@ class VcapPersonalityInsightsV3Test < Minitest::Test
|
|
80
80
|
profile_response = File.read(Dir.getwd + "/resources/personality-v3-expect3.txt")
|
81
81
|
personality_text = File.read(Dir.getwd + "/resources/personality-v3.json")
|
82
82
|
headers = {
|
83
|
-
"Content-Type" => "
|
83
|
+
"Content-Type" => "text/csv"
|
84
84
|
}
|
85
85
|
expected_response = DetailedResponse.new(status: 200, body: profile_response, headers: headers)
|
86
86
|
stub_request(:post, "https://gateway.watsonplatform.net/personality-insights/api/v3/profile?consumption_preferences=true&csv_headers=true&raw_scores=true&version=2017-10-13")
|
@@ -92,7 +92,7 @@ class VcapPersonalityInsightsV3Test < Minitest::Test
|
|
92
92
|
"Content-Type" => "application/json",
|
93
93
|
"Host" => "gateway.watsonplatform.net"
|
94
94
|
}
|
95
|
-
).to_return(status: 200, body: profile_response
|
95
|
+
).to_return(status: 200, body: profile_response, headers: headers)
|
96
96
|
service = IBMWatson::PersonalityInsightsV3.new(
|
97
97
|
version: "2017-10-13"
|
98
98
|
)
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: ibm_watson
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.4.
|
4
|
+
version: 0.4.2
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Max Nussbaum
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2018-08-
|
11
|
+
date: 2018-08-10 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: concurrent-ruby
|