ibm_watson 1.4.0 → 1.5.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 2d4c227c6c5c5022489eaf6a69b8b674ec91facde966f3e077b9c53c969ccd80
4
- data.tar.gz: 1eed5dfb21f288d25d555828a741c5836b6b25c984d7f2b1eb3de2a9f703f977
3
+ metadata.gz: b197aaa628e62691bfa2a18c5b15bb0bc7688d73c3d8ed7f005d2e805f72f549
4
+ data.tar.gz: d9db08b75ff0bcc1fad6af8f9e6b234e97523dae6ce9d01eea5a65f64d46529a
5
5
  SHA512:
6
- metadata.gz: f8a58cdc4bc68c22395584350e43e47d3d93a82602d41443dfb7160f9887241d76616372ec8a77f596686b47a7216a26b710107776d19c601f54dd05e2a20089
7
- data.tar.gz: 5e80aa7bba94a5951ac4bbfae545c82a72f161f360b7e4b9633da07461d3ef679ceeb54880552216c84a33b7d8407599ac85035926f3b830fa9351664c70a4bf
6
+ metadata.gz: 9650dad4ea8eda2cd026015cc4863fc0a2834ab081f3602ed52be9dec392cac4fe47b4d9d918cba76be42994de3e53d658e147516b2272c3e4dfb68c102a54b6
7
+ data.tar.gz: cf68a5911478d0171f4e4fed85e839ec10187eeb5ec2fda145a812389787ad7a9245c5dfc51d6826f574b6ea80c29d01c807ec411a9286dd5203d8882ea4c176
data/README.md CHANGED
@@ -398,8 +398,10 @@ If you have issues with the APIs or have a question about the Watson services, s
398
398
 
399
399
  Tested on:
400
400
 
401
- * MRI Ruby (RVM): 2.3.7, 2.4.4, 2.5.1
402
- * RubyInstaller (Windows x64): 2.3.3, 2.4.4, 2.5.1
401
+ * MRI Ruby (RVM): 2.5.1, 2.6.1
402
+ * RubyInstaller (Windows x64): 2.5.1, 2.6.1
403
+
404
+ 2.3.7 and 2.4.4 should still work but support will be deprecated in next major release.
403
405
 
404
406
  ## Contributing
405
407
 
@@ -160,8 +160,10 @@ module IBMWatson
160
160
 
161
161
  ##
162
162
  # @!method message(assistant_id:, session_id:, input: nil, context: nil)
163
- # Send user input to assistant.
164
- # Send user input to an assistant and receive a response.
163
+ # Send user input to assistant (stateful).
164
+ # Send user input to an assistant and receive a response, with conversation state
165
+ # (including context data) stored by Watson Assistant for the duration of the
166
+ # session.
165
167
  #
166
168
  # There is no rate limit for this operation.
167
169
  # @param assistant_id [String] Unique identifier of the assistant. To find the assistant ID in the Watson
@@ -172,9 +174,12 @@ module IBMWatson
172
174
  # **Note:** Currently, the v2 API does not support creating assistants.
173
175
  # @param session_id [String] Unique identifier of the session.
174
176
  # @param input [MessageInput] An input object that includes the input text.
175
- # @param context [MessageContext] State information for the conversation. The context is stored by the assistant on
176
- # a per-session basis. You can use this property to set or modify context variables,
177
- # which can also be accessed by dialog nodes.
177
+ # @param context [MessageContext] Context data for the conversation. You can use this property to set or modify
178
+ # context variables, which can also be accessed by dialog nodes. The context is
179
+ # stored by the assistant on a per-session basis.
180
+ #
181
+ # **Note:** The total size of the context data stored for a stateful session cannot
182
+ # exceed 100KB.
178
183
  # @return [IBMCloudSdkCore::DetailedResponse] A `IBMCloudSdkCore::DetailedResponse` object representing the response.
179
184
  def message(assistant_id:, session_id:, input: nil, context: nil)
180
185
  raise ArgumentError.new("assistant_id must be provided") if assistant_id.nil?
@@ -207,5 +212,57 @@ module IBMWatson
207
212
  )
208
213
  response
209
214
  end
215
+
216
+ ##
217
+ # @!method message_stateless(assistant_id:, input: nil, context: nil)
218
+ # Send user input to assistant (stateless).
219
+ # Send user input to an assistant and receive a response, with conversation state
220
+ # (including context data) managed by your application.
221
+ #
222
+ # There is no rate limit for this operation.
223
+ # @param assistant_id [String] Unique identifier of the assistant. To find the assistant ID in the Watson
224
+ # Assistant user interface, open the assistant settings and click **API Details**.
225
+ # For information about creating assistants, see the
226
+ # [documentation](https://cloud.ibm.com/docs/assistant?topic=assistant-assistant-add#assistant-add-task).
227
+ #
228
+ # **Note:** Currently, the v2 API does not support creating assistants.
229
+ # @param input [MessageInputStateless] An input object that includes the input text.
230
+ # @param context [MessageContextStateless] Context data for the conversation. You can use this property to set or modify
231
+ # context variables, which can also be accessed by dialog nodes. The context is not
232
+ # stored by the assistant. To maintain session state, include the context from the
233
+ # previous response.
234
+ #
235
+ # **Note:** The total size of the context data for a stateless session cannot exceed
236
+ # 250KB.
237
+ # @return [IBMCloudSdkCore::DetailedResponse] A `IBMCloudSdkCore::DetailedResponse` object representing the response.
238
+ def message_stateless(assistant_id:, input: nil, context: nil)
239
+ raise ArgumentError.new("assistant_id must be provided") if assistant_id.nil?
240
+
241
+ headers = {
242
+ }
243
+ sdk_headers = Common.new.get_sdk_headers("conversation", "V2", "message_stateless")
244
+ headers.merge!(sdk_headers)
245
+
246
+ params = {
247
+ "version" => @version
248
+ }
249
+
250
+ data = {
251
+ "input" => input,
252
+ "context" => context
253
+ }
254
+
255
+ method_url = "/v2/assistants/%s/message" % [ERB::Util.url_encode(assistant_id)]
256
+
257
+ response = request(
258
+ method: "POST",
259
+ url: method_url,
260
+ headers: headers,
261
+ params: params,
262
+ json: data,
263
+ accept_json: true
264
+ )
265
+ response
266
+ end
210
267
  end
211
268
  end
@@ -467,6 +467,9 @@ module IBMWatson
467
467
  # **Note:** This operation only works on collections created to accept direct file
468
468
  # uploads. It cannot be used to modify a collection that connects to an external
469
469
  # source such as Microsoft SharePoint.
470
+ #
471
+ # **Note:** If an uploaded document is segmented, all segments will be overwritten,
472
+ # even if the updated version of the document has fewer segments.
470
473
  # @param project_id [String] The ID of the project. This information can be found from the deploy page of the
471
474
  # Discovery administrative tooling.
472
475
  # @param collection_id [String] The ID of the collection.
@@ -537,6 +540,9 @@ module IBMWatson
537
540
  # **Note:** This operation only works on collections created to accept direct file
538
541
  # uploads. It cannot be used to modify a collection that connects to an external
539
542
  # source such as Microsoft SharePoint.
543
+ #
544
+ # **Note:** Segments of an uploaded document cannot be deleted individually. Delete
545
+ # all segments by deleting using the `parent_document_id` of a segment result.
540
546
  # @param project_id [String] The ID of the project. This information can be found from the deploy page of the
541
547
  # Discovery administrative tooling.
542
548
  # @param collection_id [String] The ID of the collection.
@@ -488,7 +488,7 @@ module IBMWatson
488
488
  # @param customization_id [String] The GUID of a custom language model that is to be used with the request. The base model of the specified custom language model must match the model specified with the `model` parameter. You must make the request with service credentials created for the instance of the service that owns the custom model. By default, no custom language model is used.
489
489
  # @param acoustic_customization_id [String] The GUID of a custom acoustic model that is to be used with the request. The base model of the specified custom acoustic model must match the model specified with the `model` parameter. You must make the request with service credentials created for the instance of the service that owns the custom model. By default, no custom acoustic model is used.
490
490
  # @param language_customization_id [String] The GUID of a custom language model that is to be used with the request. The base model of the specified custom language model must match the model specified with the `model` parameter. You must make the request with service credentials created for the instance of the service that owns the custom model. By default, no custom language model is used.
491
- # @param base_model_version [String] The version of the specified base `model` that is to be used for speech recognition. Multiple versions of a base model can exist when a model is updated for internal improvements. The parameter is intended primarily for use with custom models that have been upgraded for a new base model. The default value depends on whether the parameter is used with or without a custom model. For more information, see [Base model version](https://console.bluemix.net/docs/services/speech-to-text/input.html#version).
491
+ # @param base_model_version [String] The version of the specified base `model` that is to be used for speech recognition. Multiple versions of a base model can exist when a model is updated for internal improvements. The parameter is intended primarily for use with custom models that have been upgraded for a new base model. The default value depends on whether the parameter is used with or without a custom model. For more information, see [Base model version](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-input#version).
492
492
  # @param inactivity_timeout [Integer] The time in seconds after which, if only silence (no speech) is detected in submitted audio, the connection is closed with a 400 error. Useful for stopping audio submission from a live microphone when a user simply walks away. Use `-1` for infinity.
493
493
  # @param interim_results [Boolean] Send back non-final previews of each "sentence" as it is being processed. These results are ignored in text mode.
494
494
  # @param keywords [Array<String>] Array of keyword strings to spot in the audio. Each keyword string can include one or more tokens. Keywords are spotted only in the final hypothesis, not in interim results. If you specify any keywords, you must also specify a keywords threshold. Omit the parameter or specify an empty array if you do not need to spot keywords.
@@ -499,13 +499,13 @@ module IBMWatson
499
499
  # @param timestamps [Boolean] If `true`, time alignment for each word is returned.
500
500
  # @param profanity_filter [Boolean] If `true` (the default), filters profanity from all output except for keyword results by replacing inappropriate words with a series of asterisks. Set the parameter to `false` to return results with no censoring. Applies to US English transcription only.
501
501
  # @param smart_formatting [Boolean] If `true`, converts dates, times, series of digits and numbers, phone numbers, currency values, and Internet addresses into more readable, conventional representations in the final transcript of a recognition request. If `false` (the default), no formatting is performed. Applies to US English transcription only.
502
- # @param speaker_labels [Boolean] Indicates whether labels that identify which words were spoken by which participants in a multi-person exchange are to be included in the response. The default is `false`; no speaker labels are returned. Setting `speaker_labels` to `true` forces the `timestamps` parameter to be `true`, regardless of whether you specify `false` for the parameter. To determine whether a language model supports speaker labels, use the `GET /v1/models` method and check that the attribute `speaker_labels` is set to `true`. You can also refer to [Speaker labels](https://console.bluemix.net/docs/services/speech-to-text/output.html#speaker_labels).
502
+ # @param speaker_labels [Boolean] Indicates whether labels that identify which words were spoken by which participants in a multi-person exchange are to be included in the response. The default is `false`; no speaker labels are returned. Setting `speaker_labels` to `true` forces the `timestamps` parameter to be `true`, regardless of whether you specify `false` for the parameter. To determine whether a language model supports speaker labels, use the `GET /v1/models` method and check that the attribute `speaker_labels` is set to `true`. You can also refer to [Speaker labels](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#speaker_labels).
503
503
  # @param grammar_name [String] The name of a grammar that is to be used with the recognition request. If you
504
504
  # specify a grammar, you must also use the `language_customization_id` parameter to
505
505
  # specify the name of the custom language model for which the grammar is defined.
506
506
  # The service recognizes only strings that are recognized by the specified grammar;
507
507
  # it does not recognize other custom words from the model's words resource. See
508
- # [Grammars](https://cloud.ibm.com/docs/services/speech-to-text/output.html).
508
+ # [Grammars](https://cloud.ibm.com/docs/speech-to-text/output.html).
509
509
  # @param redaction [Boolean] If `true`, the service redacts, or masks, numeric data from final transcripts. The
510
510
  # feature redacts any number that has three or more consecutive digits by replacing
511
511
  # each digit with an `X` character. It is intended to redact sensitive numeric data,
@@ -520,7 +520,7 @@ module IBMWatson
520
520
  # **Note:** Applies to US English, Japanese, and Korean transcription only.
521
521
  #
522
522
  # See [Numeric
523
- # redaction](https://cloud.ibm.com/docs/services/speech-to-text/output.html#redaction).
523
+ # redaction](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#redaction).
524
524
  #
525
525
  # @param processing_metrics [Boolean] If `true`, requests processing metrics about the service's transcription of the
526
526
  # input audio. The service returns processing metrics at the interval specified by
@@ -542,7 +542,7 @@ module IBMWatson
542
542
  # @return [WebSocketClient] Returns a new WebSocketClient object
543
543
  #
544
544
  # See [Audio
545
- # metrics](https://cloud.ibm.com/docs/services/speech-to-text?topic=speech-to-text-metrics#audio_metrics).
545
+ # metrics](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-metrics#audio_metrics).
546
546
  # @param end_of_phrase_silence_time [Float] If `true`, specifies the duration of the pause interval at which the service
547
547
  # splits a transcript into multiple final results. If the service detects pauses or
548
548
  # extended silence before it reaches the end of the audio stream, its response can
@@ -559,7 +559,7 @@ module IBMWatson
559
559
  # Chinese is 0.6 seconds.
560
560
  #
561
561
  # See [End of phrase silence
562
- # time](https://cloud.ibm.com/docs/services/speech-to-text?topic=speech-to-text-output#silence_time).
562
+ # time](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#silence_time).
563
563
  # @param split_transcript_at_phrase_end [Boolean] If `true`, directs the service to split the transcript into multiple final results
564
564
  # based on semantic features of the input, for example, at the conclusion of
565
565
  # meaningful phrases such as sentences. The service bases its understanding of
@@ -569,7 +569,7 @@ module IBMWatson
569
569
  # interval.
570
570
  #
571
571
  # See [Split transcript at phrase
572
- # end](https://cloud.ibm.com/docs/services/speech-to-text?topic=speech-to-text-output#split_transcript).
572
+ # end](https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-output#split_transcript).
573
573
  # @param speech_detector_sensitivity [Float] The sensitivity of speech activity detection that the service is to perform. Use
574
574
  # the parameter to suppress word insertions from music, coughing, and other
575
575
  # non-speech events. The service biases the audio it passes for speech recognition
@@ -20,18 +20,18 @@
20
20
  # both, for each language. The audio is streamed back to the client with minimal delay.
21
21
  #
22
22
  # For speech synthesis, the service supports a synchronous HTTP Representational State
23
- # Transfer (REST) interface. It also supports a WebSocket interface that provides both
24
- # plain text and SSML input, including the SSML &lt;mark&gt; element and word timings.
25
- # SSML is an XML-based markup language that provides text annotation for speech-synthesis
26
- # applications.
23
+ # Transfer (REST) interface and a WebSocket interface. Both interfaces support plain text
24
+ # and SSML input. SSML is an XML-based markup language that provides text annotation for
25
+ # speech-synthesis applications. The WebSocket interface also supports the SSML
26
+ # <code>&lt;mark&gt;</code> element and word timings.
27
27
  #
28
- # The service also offers a customization interface. You can use the interface to define
29
- # sounds-like or phonetic translations for words. A sounds-like translation consists of
30
- # one or more words that, when combined, sound like the word. A phonetic translation is
31
- # based on the SSML phoneme format for representing a word. You can specify a phonetic
32
- # translation in standard International Phonetic Alphabet (IPA) representation or in the
33
- # proprietary IBM Symbolic Phonetic Representation (SPR). The Arabic, Chinese, Dutch, and
34
- # Korean languages support only IPA.
28
+ # The service offers a customization interface that you can use to define sounds-like or
29
+ # phonetic translations for words. A sounds-like translation consists of one or more words
30
+ # that, when combined, sound like the word. A phonetic translation is based on the SSML
31
+ # phoneme format for representing a word. You can specify a phonetic translation in
32
+ # standard International Phonetic Alphabet (IPA) representation or in the proprietary IBM
33
+ # Symbolic Phonetic Representation (SPR). The Arabic, Chinese, Dutch, and Korean languages
34
+ # support only IPA.
35
35
 
36
36
  require "concurrent"
37
37
  require "erb"
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module IBMWatson
4
- VERSION = "1.4.0"
4
+ VERSION = "1.5.0"
5
5
  end
@@ -325,6 +325,51 @@ module IBMWatson
325
325
  )
326
326
  nil
327
327
  end
328
+
329
+ ##
330
+ # @!method get_model_file(collection_id:, feature:, model_format:)
331
+ # Get a model.
332
+ # Download a model that you can deploy to detect objects in images. The collection
333
+ # must include a generated model, which is indicated in the response for the
334
+ # collection details as `"rscnn_ready": true`. If the value is `false`, train or
335
+ # retrain the collection to generate the model.
336
+ #
337
+ # Currently, the model format is specific to Android apps. For more information
338
+ # about how to deploy the model to your app, see the [Watson Visual Recognition on
339
+ # Android](https://github.com/matt-ny/rscnn) project in GitHub.
340
+ # @param collection_id [String] The identifier of the collection.
341
+ # @param feature [String] The feature for the model.
342
+ # @param model_format [String] The format of the returned model.
343
+ # @return [IBMCloudSdkCore::DetailedResponse] A `IBMCloudSdkCore::DetailedResponse` object representing the response.
344
+ def get_model_file(collection_id:, feature:, model_format:)
345
+ raise ArgumentError.new("collection_id must be provided") if collection_id.nil?
346
+
347
+ raise ArgumentError.new("feature must be provided") if feature.nil?
348
+
349
+ raise ArgumentError.new("model_format must be provided") if model_format.nil?
350
+
351
+ headers = {
352
+ }
353
+ sdk_headers = Common.new.get_sdk_headers("watson_vision_combined", "V4", "get_model_file")
354
+ headers.merge!(sdk_headers)
355
+
356
+ params = {
357
+ "version" => @version,
358
+ "feature" => feature,
359
+ "model_format" => model_format
360
+ }
361
+
362
+ method_url = "/v4/collections/%s/model" % [ERB::Util.url_encode(collection_id)]
363
+
364
+ response = request(
365
+ method: "GET",
366
+ url: method_url,
367
+ headers: headers,
368
+ params: params,
369
+ accept_json: false
370
+ )
371
+ response
372
+ end
328
373
  #########################
329
374
  # Images
330
375
  #########################
@@ -52,6 +52,13 @@ if !ENV["ASSISTANT_APIKEY"].nil? && !ENV["ASSISTANT_URL"].nil?
52
52
  )
53
53
  assert((200..299).cover?(service_response.status))
54
54
 
55
+ service_response = service.message_stateless(
56
+ assistant_id: ENV["ASSISTANT_ASSISTANT_ID"],
57
+ input: { "text" => "Turn on the lights" },
58
+ context: nil
59
+ )
60
+ assert((200..299).cover?(service_response.status))
61
+
55
62
  service.delete_session(
56
63
  assistant_id: ENV["ASSISTANT_ASSISTANT_ID"],
57
64
  session_id: session_id
@@ -68,6 +68,15 @@ if !ENV["VISUAL_RECOGNITION_APIKEY"].nil? && !ENV["VISUAL_RECOGNITION_URL"].nil?
68
68
  ).result
69
69
  refute(result.nil?)
70
70
  end
71
+
72
+ def test_get_model_file
73
+ result = @service.get_model_file(
74
+ collection_id: @collection_id,
75
+ feature: "objects",
76
+ model_format: "rscnn"
77
+ ).result
78
+ refute(result.nil?)
79
+ end
71
80
  end
72
81
  else
73
82
  class VisualRecognitionV4Test < Minitest::Test
@@ -128,4 +128,70 @@ class AssistantV2Test < Minitest::Test
128
128
  )
129
129
  assert_nil(service_response)
130
130
  end
131
+
132
+ def test_message_stateless
133
+ # service.set_default_headers("x-watson-learning-opt-out" => true)
134
+ assistant_id = "f8fdbc65-e0bd-4e43-b9f8-2975a366d4ec"
135
+ message_response = {
136
+ "context" => {
137
+ "conversation_id" => "1b7b67c0-90ed-45dc-8508-9488bc483d5b",
138
+ "system" => {
139
+ "dialog_stack" => ["root"],
140
+ "dialog_turn_counter" => 1,
141
+ "dialog_request_counter" => 1
142
+ }
143
+ },
144
+ "intents" => [],
145
+ "entities" => [],
146
+ "input" => {},
147
+ "output" => {
148
+ "text" => "okay",
149
+ "log_messages" => []
150
+ }
151
+ }
152
+ headers = {
153
+ "Content-Type" => "application/json"
154
+ }
155
+ stub_request(:post, "https://gateway.watsonplatform.net/assistant/api/v2/assistants/f8fdbc65-e0bd-4e43-b9f8-2975a366d4ec/message?version=2018-02-16")
156
+ .with(
157
+ body: "{\"input\":{\"text\":\"Turn on the lights\"}}",
158
+ headers: {
159
+ "Accept" => "application/json",
160
+ "Content-Type" => "application/json",
161
+ "Host" => "gateway.watsonplatform.net"
162
+ }
163
+ ).to_return(status: 200, body: message_response.to_json, headers: headers)
164
+ service_response = service.message_stateless(
165
+ assistant_id: assistant_id,
166
+ input: { "text" => "Turn on the lights" },
167
+ context: nil
168
+ )
169
+ assert_equal(message_response, service_response.result)
170
+
171
+ message_ctx = {
172
+ "context" => {
173
+ "conversation_id" => "1b7b67c0-90ed-45dc-8508-9488bc483d5b",
174
+ "system" => {
175
+ "dialog_stack" => ["root"],
176
+ "dialog_turn_counter" => 2,
177
+ "dialog_request_counter" => 1
178
+ }
179
+ }
180
+ }
181
+ stub_request(:post, "https://gateway.watsonplatform.net/assistant/api/v2/assistants/f8fdbc65-e0bd-4e43-b9f8-2975a366d4ec/message?version=2018-02-16")
182
+ .with(
183
+ body: "{\"input\":{\"text\":\"Turn on the lights\"},\"context\":\"{\\\"conversation_id\\\":\\\"1b7b67c0-90ed-45dc-8508-9488bc483d5b\\\",\\\"system\\\":{\\\"dialog_stack\\\":[\\\"root\\\"],\\\"dialog_turn_counter\\\":2,\\\"dialog_request_counter\\\":1}}\"}",
184
+ headers: {
185
+ "Accept" => "application/json",
186
+ "Content-Type" => "application/json",
187
+ "Host" => "gateway.watsonplatform.net"
188
+ }
189
+ ).to_return(status: 200, body: message_response.to_json, headers: headers)
190
+ service_response = service.message_stateless(
191
+ assistant_id: assistant_id,
192
+ input: { "text" => "Turn on the lights" },
193
+ context: message_ctx["context"].to_json
194
+ )
195
+ assert_equal(message_response, service_response.result)
196
+ end
131
197
  end
@@ -401,4 +401,22 @@ class VisualRecognitionV4Test < Minitest::Test
401
401
  )
402
402
  assert_nil(service_response)
403
403
  end
404
+
405
+ def test_get_model_file
406
+ response = {
407
+ "binary" => []
408
+ }
409
+ stub_request(:get, "https://gateway.watsonplatform.net/visual-recognition/api/v4/collections/collid/model?feature=objects&model_format=rscnn_ready&version=2018-03-19")
410
+ .with(
411
+ headers: {
412
+ "Host" => "gateway.watsonplatform.net"
413
+ }
414
+ ).to_return(status: 200, body: response.to_json, headers: { "Content-Type" => "application/json" })
415
+ service_response = service.get_model_file(
416
+ collection_id: "collid",
417
+ feature: "objects",
418
+ model_format: "rscnn_ready"
419
+ )
420
+ assert_equal(response, service_response.result)
421
+ end
404
422
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: ibm_watson
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.4.0
4
+ version: 1.5.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Max Nussbaum
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2020-04-24 00:00:00.000000000 Z
11
+ date: 2020-06-03 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: concurrent-ruby
@@ -346,7 +346,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
346
346
  - !ruby/object:Gem::Version
347
347
  version: '0'
348
348
  requirements: []
349
- rubygems_version: 3.1.2
349
+ rubygems_version: 3.1.4
350
350
  signing_key:
351
351
  specification_version: 4
352
352
  summary: Official client library to use the IBM Watson Services