maritaca-ai 1.0.1 → 1.2.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 528dad62e33505dbe008d7a4a2829f8d0fb91d7ae621884cf4eb436777785589
4
- data.tar.gz: fa2f39f286f091d238aee2ba8da22419c0880867892139a8d2c581f78d10f3f4
3
+ metadata.gz: afaa9a68f6751d7fdc770f727beb62076789b0a2e1b149e1a15c7973e571f4fb
4
+ data.tar.gz: 913f22cf572a477d03c2b227a478a67a860e01462f22e8a53cba2d650315823d
5
5
  SHA512:
6
- metadata.gz: 23cb4d50db7d60142fe05313031c16096200b59ff7d10fb4bc51316b64396085d28a4916f2402bc9ff9ada668beb914e701a0b384297a5fd7b47e68a76112610
7
- data.tar.gz: 0b0a0ab8c1d4f3f5d69ac0a95a5e63533419f4fb64ca70ffc882bc9e87d8a3867ae68a4d865b7a2a0546869eb2d479a5404cf8aa01025fd4e7ebe56530cee5fa
6
+ metadata.gz: 455d565cbf7092b0f2fa72902b78d87e8293a03800755c5020461cc86916724471d2b269d3630822ff433c15258733dcdc93ae3b25f745840306ede7062eb208
7
+ data.tar.gz: 58f7bd165d66a673110ca317393672ed67fe20aa053f97e83195b8892f63c3c8575494795b1055d774ee616e7dd9964bb3e81af28bc7ede3837a05420948295f
data/Gemfile CHANGED
@@ -7,5 +7,5 @@ gemspec
7
7
  group :test, :development do
8
8
  gem 'dotenv', '~> 2.8', '>= 2.8.1'
9
9
  gem 'pry-byebug', '~> 3.10', '>= 3.10.1'
10
- gem 'rubocop', '~> 1.58'
10
+ gem 'rubocop', '~> 1.60', '>= 1.60.2'
11
11
  end
data/Gemfile.lock CHANGED
@@ -1,8 +1,10 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- maritaca-ai (1.0.1)
4
+ maritaca-ai (1.2.0)
5
+ event_stream_parser (~> 1.0)
5
6
  faraday (~> 2.9)
7
+ faraday-typhoeus (~> 1.1)
6
8
 
7
9
  GEM
8
10
  remote: https://rubygems.org/
@@ -11,17 +13,24 @@ GEM
11
13
  byebug (11.1.3)
12
14
  coderay (1.1.3)
13
15
  dotenv (2.8.1)
16
+ ethon (0.16.0)
17
+ ffi (>= 1.15.0)
18
+ event_stream_parser (1.0.0)
14
19
  faraday (2.9.0)
15
20
  faraday-net_http (>= 2.0, < 3.2)
16
21
  faraday-net_http (3.1.0)
17
22
  net-http
18
- json (2.7.1)
23
+ faraday-typhoeus (1.1.0)
24
+ faraday (~> 2.0)
25
+ typhoeus (~> 1.4)
26
+ ffi (1.16.3)
27
+ json (2.7.2)
19
28
  language_server-protocol (3.17.0.3)
20
- method_source (1.0.0)
29
+ method_source (1.1.0)
21
30
  net-http (0.4.1)
22
31
  uri
23
32
  parallel (1.24.0)
24
- parser (3.3.0.3)
33
+ parser (3.3.1.0)
25
34
  ast (~> 2.4.1)
26
35
  racc
27
36
  pry (0.14.2)
@@ -32,22 +41,26 @@ GEM
32
41
  pry (>= 0.13, < 0.15)
33
42
  racc (1.7.3)
34
43
  rainbow (3.1.1)
35
- regexp_parser (2.9.0)
36
- rexml (3.2.6)
37
- rubocop (1.59.0)
44
+ regexp_parser (2.9.2)
45
+ rexml (3.2.8)
46
+ strscan (>= 3.0.9)
47
+ rubocop (1.63.5)
38
48
  json (~> 2.3)
39
49
  language_server-protocol (>= 3.17.0)
40
50
  parallel (~> 1.10)
41
- parser (>= 3.2.2.4)
51
+ parser (>= 3.3.0.2)
42
52
  rainbow (>= 2.2.2, < 4.0)
43
53
  regexp_parser (>= 1.8, < 3.0)
44
54
  rexml (>= 3.2.5, < 4.0)
45
- rubocop-ast (>= 1.30.0, < 2.0)
55
+ rubocop-ast (>= 1.31.1, < 2.0)
46
56
  ruby-progressbar (~> 1.7)
47
57
  unicode-display_width (>= 2.4.0, < 3.0)
48
- rubocop-ast (1.30.0)
49
- parser (>= 3.2.1.0)
58
+ rubocop-ast (1.31.3)
59
+ parser (>= 3.3.1.0)
50
60
  ruby-progressbar (1.13.0)
61
+ strscan (3.1.0)
62
+ typhoeus (1.4.1)
63
+ ethon (>= 0.9.0)
51
64
  unicode-display_width (2.5.0)
52
65
  uri (0.13.0)
53
66
 
@@ -58,7 +71,7 @@ DEPENDENCIES
58
71
  dotenv (~> 2.8, >= 2.8.1)
59
72
  maritaca-ai!
60
73
  pry-byebug (~> 3.10, >= 3.10.1)
61
- rubocop (~> 1.58)
74
+ rubocop (~> 1.60, >= 1.60.2)
62
75
 
63
76
  BUNDLED WITH
64
77
  2.4.22
data/README.md CHANGED
@@ -9,18 +9,19 @@ A Ruby gem for interacting with [MariTalk](https://chat.maritaca.ai) from [Marit
9
9
  ## TL;DR and Quick Start
10
10
 
11
11
  ```ruby
12
- gem 'maritaca-ai', '~> 1.0.1'
12
+ gem 'maritaca-ai', '~> 1.2.0'
13
13
  ```
14
14
 
15
15
  ```ruby
16
16
  require 'maritaca-ai'
17
17
 
18
18
  client = Maritaca.new(
19
- credentials: { api_key: ENV['MARITACA_API_KEY'] }
19
+ credentials: { api_key: ENV['MARITACA_API_KEY'] },
20
+ options: { server_sent_events: true }
20
21
  )
21
22
 
22
23
  result = client.chat_inference(
23
- { model: 'maritalk',
24
+ { model: 'sabia-2-medium',
24
25
  chat_mode: true,
25
26
  messages: [ { role: 'user', content: 'Oi!' } ] }
26
27
  )
@@ -28,7 +29,13 @@ result = client.chat_inference(
28
29
 
29
30
  Result:
30
31
  ```ruby
31
- { 'answer' => 'Oi! Como posso ajudá-lo(a) hoje?' }
32
+ { 'answer' => ' Oi! Como posso ajudar você hoje?',
33
+ 'usage' => {
34
+ 'completion_tokens' => 15,
35
+ 'prompt_tokens' => 3,
36
+ 'total_tokens' => 18
37
+ },
38
+ 'model' => 'sabia-2-medium' }
32
39
  ```
33
40
 
34
41
  ## Index
@@ -36,27 +43,32 @@ Result:
36
43
  - [TL;DR and Quick Start](#tldr-and-quick-start)
37
44
  - [Index](#index)
38
45
  - [Setup](#setup)
39
- - [Installing](#installing)
40
- - [Credentials](#credentials)
46
+ - [Installing](#installing)
47
+ - [Credentials](#credentials)
41
48
  - [Usage](#usage)
42
- - [Client](#client)
43
- - [Custom Address](#custom-address)
44
- - [Methods](#methods)
45
- - [chat_inference](#chat_inference)
46
- - [Chat](#chat)
47
- - [Back-and-Forth Conversations](#back-and-forth-conversations)
48
- - [Without Chat](#without-chat)
49
- - [New Functionalities and APIs](#new-functionalities-and-apis)
50
- - [Request Options](#request-options)
51
- - [Timeout](#timeout)
52
- - [Error Handling](#error-handling)
53
- - [Rescuing](#rescuing)
54
- - [For Short](#for-short)
55
- - [Errors](#errors)
49
+ - [Client](#client)
50
+ - [Custom Address](#custom-address)
51
+ - [Methods](#methods)
52
+ - [chat_inference](#chat_inference)
53
+ - [Without Streaming Events](#without-streaming-events)
54
+ - [Chat](#chat)
55
+ - [Back-and-Forth Conversations](#back-and-forth-conversations)
56
+ - [Without Chat](#without-chat)
57
+ - [Receiving Stream Events](#receiving-stream-events)
58
+ - [Streaming and Server-Sent Events (SSE)](#streaming-and-server-sent-events-sse)
59
+ - [Server-Sent Events (SSE) Hang](#server-sent-events-sse-hang)
60
+ - [New Functionalities and APIs](#new-functionalities-and-apis)
61
+ - [Request Options](#request-options)
62
+ - [Adapter](#adapter)
63
+ - [Timeout](#timeout)
64
+ - [Error Handling](#error-handling)
65
+ - [Rescuing](#rescuing)
66
+ - [For Short](#for-short)
67
+ - [Errors](#errors)
56
68
  - [Development](#development)
57
- - [Purpose](#purpose)
58
- - [Publish to RubyGems](#publish-to-rubygems)
59
- - [Updating the README](#updating-the-readme)
69
+ - [Purpose](#purpose)
70
+ - [Publish to RubyGems](#publish-to-rubygems)
71
+ - [Updating the README](#updating-the-readme)
60
72
  - [Resources and References](#resources-and-references)
61
73
  - [Disclaimer](#disclaimer)
62
74
 
@@ -65,11 +77,11 @@ Result:
65
77
  ### Installing
66
78
 
67
79
  ```sh
68
- gem install maritaca-ai -v 1.0.1
80
+ gem install maritaca-ai -v 1.2.0
69
81
  ```
70
82
 
71
83
  ```sh
72
- gem 'maritaca-ai', '~> 1.0.1'
84
+ gem 'maritaca-ai', '~> 1.2.0'
73
85
  ```
74
86
 
75
87
  ### Credentials
@@ -93,7 +105,8 @@ Create a new client:
93
105
  require 'maritaca-ai'
94
106
 
95
107
  client = Maritaca.new(
96
- credentials: { api_key: ENV['MARITACA_API_KEY'] }
108
+ credentials: { api_key: ENV['MARITACA_API_KEY'] },
109
+ options: { server_sent_events: true }
97
110
  )
98
111
  ```
99
112
 
@@ -116,11 +129,13 @@ client = Maritaca.new(
116
129
 
117
130
  #### chat_inference
118
131
 
119
- ##### Chat
132
+ ##### Without Streaming Events
133
+
134
+ ###### Chat
120
135
 
121
136
  ```ruby
122
137
  result = client.chat_inference(
123
- { model: 'maritalk',
138
+ { model: 'sabia-2-medium',
124
139
  chat_mode: true,
125
140
  messages: [ { role: 'user', content: 'Oi!' } ] }
126
141
  )
@@ -128,16 +143,22 @@ result = client.chat_inference(
128
143
 
129
144
  Result:
130
145
  ```ruby
131
- { 'answer' => 'Oi! Como posso ajudá-lo(a) hoje?' }
146
+ { 'answer' => ' Oi! Como posso ajudar você hoje?',
147
+ 'usage' => {
148
+ 'completion_tokens' => 15,
149
+ 'prompt_tokens' => 3,
150
+ 'total_tokens' => 18
151
+ },
152
+ 'model' => 'sabia-2-medium' }
132
153
  ```
133
154
 
134
- ##### Back-and-Forth Conversations
155
+ ###### Back-and-Forth Conversations
135
156
 
136
157
  To maintain a back-and-forth conversation, you need to append the received responses and build a history for your requests:
137
158
 
138
159
  ```rb
139
160
  result = client.chat_inference(
140
- { model: 'maritalk',
161
+ { model: 'sabia-2-medium',
141
162
  chat_mode: true,
142
163
  messages: [
143
164
  { role: 'user', content: 'Oi, meu nome é Tamanduá.' },
@@ -149,28 +170,127 @@ result = client.chat_inference(
149
170
 
150
171
  Result:
151
172
  ```ruby
152
- { 'answer' => 'Seu nome é Tamanduá.' }
173
+ { 'answer' => ' Seu nome é Tamanduá. É um prazer conhecê-lo! Como posso ajudá-lo hoje?',
174
+ 'usage' => {
175
+ 'completion_tokens' => 35,
176
+ 'prompt_tokens' => 39,
177
+ 'total_tokens' => 74
178
+ },
179
+ 'model' => 'sabia-2-medium' }
153
180
  ```
154
181
 
155
- ##### Without Chat
182
+ ###### Without Chat
156
183
 
157
184
  You can prompt the model without using chat mode:
158
185
 
159
186
  ```ruby
160
187
  result = client.chat_inference(
161
- { model: 'maritalk',
188
+ { model: 'sabia-2-medium',
162
189
  chat_mode: false,
163
- messages: "Minha terra tem palmeiras,\nOnde canta o Sabiá;\n" }
190
+ messages: "Minha terra tem palmeiras,\nOnde canta o Sabiá;\n",
191
+ stopping_tokens: ['.'] }
164
192
  )
165
193
  ```
166
194
 
167
195
  Result:
168
196
  ```ruby
169
197
  { 'answer' =>
170
- "As aves, que aqui gorjeiam,\n" \
171
- 'Não gorjeiam como lá.' }
198
+ "As aves, que aqui gorjeiam,\n" \
199
+ 'Não gorjeiam como lá.',
200
+ 'usage' => {
201
+ 'completion_tokens' => 21,
202
+ 'prompt_tokens' => 21,
203
+ 'total_tokens' => 42
204
+ },
205
+ 'model' => 'sabia-2-medium' }
206
+ ```
207
+
208
+ ##### Receiving Stream Events
209
+
210
+ Ensure that you have enabled [Server-Sent Events](#streaming-and-server-sent-events-sse) before using blocks for streaming. You also need to add `stream: true` in your payload:
211
+
212
+ ```ruby
213
+ client.chat_inference(
214
+ { model: 'sabia-2-medium',
215
+ stream: true,
216
+ chat_mode: true,
217
+ messages: [ { role: 'user', content: 'Oi!' } ] }
218
+ ) do |event, parsed, raw|
219
+ puts event
220
+ end
221
+ ```
222
+
223
+ Event:
224
+ ```ruby
225
+ { 'text' => ' Oi! Com' }
226
+ ```
227
+
228
+ You can get all the receive events at once as an array:
229
+
230
+ ```ruby
231
+ result = client.chat_inference(
232
+ { model: 'sabia-2-medium',
233
+ stream: true,
234
+ chat_mode: true,
235
+ messages: [ { role: 'user', content: 'Oi!' } ] }
236
+ )
172
237
  ```
173
238
 
239
+ Result:
240
+ ```ruby
241
+ [{ 'text' => ' Oi! Com' },
242
+ { 'text' => 'o posso a' },
243
+ { 'text' => 'judar você' },
244
+ { 'text' => ' hoje?' },
245
+ { 'completion_tokens' => 15,
246
+ 'prompt_tokens' => 74,
247
+ 'total_tokens' => 89,
248
+ 'model' => 'sabia-2-medium' }]
249
+ ```
250
+
251
+ You can mix both as well:
252
+ ```ruby
253
+ result = client.chat_inference(
254
+ { model: 'sabia-2-medium',
255
+ stream: true,
256
+ chat_mode: true,
257
+ messages: [ { role: 'user', content: 'Oi!' } ] }
258
+ ) do |event, parsed, raw|
259
+ puts event
260
+ end
261
+ ```
262
+
263
+ ### Streaming and Server-Sent Events (SSE)
264
+
265
+ [Server-Sent Events (SSE)](https://en.wikipedia.org/wiki/Server-sent_events) is a technology that allows certain endpoints to offer streaming capabilities, such as creating the impression that "the model is typing along with you," rather than delivering the entire answer all at once.
266
+
267
+ You can set up the client to use Server-Sent Events (SSE) for all supported endpoints:
268
+ ```ruby
269
+ client = Maritaca.new(
270
+ credentials: { api_key: ENV['MARITACA_API_KEY'] },
271
+ options: { server_sent_events: true }
272
+ )
273
+ ```
274
+
275
+ Or, you can decide on a request basis:
276
+ ```ruby
277
+ client.chat_inference(
278
+ { model: 'sabia-2-medium',
279
+ stream: true,
280
+ chat_mode: true,
281
+ messages: [ { role: 'user', content: 'Oi!' } ] },
282
+ server_sent_events: true
283
+ ) do |event, parsed, raw|
284
+ puts event
285
+ end
286
+ ```
287
+
288
+ With Server-Sent Events (SSE) enabled, you can use a block to receive partial results via events. This feature is particularly useful for methods that offer streaming capabilities, such as `chat_inference`: [Receiving Stream Events](#receiving-stream-events)
289
+
290
+ #### Server-Sent Events (SSE) Hang
291
+
292
+ Method calls will _hang_ until the server-sent events finish, so even without providing a block, you can obtain the final results of the received events: [Receiving Stream Events](#receiving-stream-events)
293
+
174
294
  ### New Functionalities and APIs
175
295
 
176
296
  Maritaca may launch a new endpoint that we haven't covered in the Gem yet. If that's the case, you may still be able to use it through the `request` method. For example, `chat_inference` is just a wrapper for `api/chat/inference`, which you can call directly like this:
@@ -178,7 +298,7 @@ Maritaca may launch a new endpoint that we haven't covered in the Gem yet. If th
178
298
  ```ruby
179
299
  result = client.request(
180
300
  'api/chat/inference',
181
- { model: 'maritalk',
301
+ { model: 'sabia-2-medium',
182
302
  chat_mode: true,
183
303
  messages: [{ role: 'user', content: 'Oi!' }] },
184
304
  request_method: 'POST'
@@ -187,6 +307,21 @@ result = client.request(
187
307
 
188
308
  ### Request Options
189
309
 
310
+ #### Adapter
311
+
312
+ The gem uses [Faraday](https://github.com/lostisland/faraday) with the [Typhoeus](https://github.com/typhoeus/typhoeus) adapter by default.
313
+
314
+ You can use a different adapter if you want:
315
+
316
+ ```ruby
317
+ require 'faraday/net_http'
318
+
319
+ client = Maritaca.new(
320
+ credentials: { api_key: ENV['MARITACA_API_KEY'] },
321
+ options: { connection: { adapter: :net_http } }
322
+ )
323
+ ```
324
+
190
325
  #### Timeout
191
326
 
192
327
  You can set the maximum number of seconds to wait for the request to complete with the `timeout` option:
@@ -225,7 +360,7 @@ require 'maritaca-ai'
225
360
 
226
361
  begin
227
362
  client.chat_inference(
228
- { model: 'maritalk',
363
+ { model: 'sabia-2-medium',
229
364
  chat_mode: true,
230
365
  messages: [ { role: 'user', content: 'Oi!' } ] }
231
366
  )
@@ -234,7 +369,7 @@ rescue Maritaca::Errors::MaritacaError => error
234
369
  puts error.message # 'the server responded with status 500'
235
370
 
236
371
  puts error.payload
237
- # { model: 'maritalk',
372
+ # { model: 'sabia-2-medium',
238
373
  # chat_mode: true,
239
374
  # ...
240
375
  # }
@@ -251,7 +386,7 @@ require 'maritaca-ai/errors'
251
386
 
252
387
  begin
253
388
  client.chat_inference(
254
- { model: 'maritalk',
389
+ { model: 'sabia-2-medium',
255
390
  chat_mode: true,
256
391
  messages: [ { role: 'user', content: 'Oi!' } ] }
257
392
  )
@@ -290,7 +425,7 @@ gem build maritaca-ai.gemspec
290
425
 
291
426
  gem signin
292
427
 
293
- gem push maritaca-ai-1.0.1.gem
428
+ gem push maritaca-ai-1.2.0.gem
294
429
  ```
295
430
 
296
431
  ### Updating the README
@@ -1,6 +1,8 @@
1
1
  # frozen_string_literal: true
2
2
 
3
+ require 'event_stream_parser'
3
4
  require 'faraday'
5
+ require 'faraday/typhoeus'
4
6
  require 'json'
5
7
 
6
8
  require_relative '../components/errors'
@@ -12,6 +14,8 @@ module Maritaca
12
14
 
13
15
  ALLOWED_REQUEST_OPTIONS = %i[timeout open_timeout read_timeout write_timeout].freeze
14
16
 
17
+ DEFAULT_FARADAY_ADAPTER = :typhoeus
18
+
15
19
  def initialize(config)
16
20
  @api_key = config.dig(:credentials, :api_key)
17
21
  @server_sent_events = config.dig(:options, :server_sent_events)
@@ -35,6 +39,8 @@ module Maritaca
35
39
  else
36
40
  {}
37
41
  end
42
+
43
+ @faraday_adapter = config.dig(:options, :connection, :adapter) || DEFAULT_FARADAY_ADAPTER
38
44
  end
39
45
 
40
46
  def chat_inference(payload, server_sent_events: nil, &callback)
@@ -54,9 +60,8 @@ module Maritaca
54
60
 
55
61
  method_to_call = request_method.to_s.strip.downcase.to_sym
56
62
 
57
- partial_json = ''
58
-
59
63
  response = Faraday.new(request: @request_options) do |faraday|
64
+ faraday.adapter @faraday_adapter
60
65
  faraday.response :raise_error
61
66
  end.send(method_to_call) do |request|
62
67
  request.url url
@@ -67,6 +72,10 @@ module Maritaca
67
72
  request.body = payload.to_json unless payload.nil?
68
73
 
69
74
  if server_sent_events_enabled
75
+ parser = EventStreamParser::Parser.new
76
+
77
+ partial_json = ''
78
+
70
79
  request.options.on_data = proc do |chunk, bytes, env|
71
80
  if env && env.status != 200
72
81
  raise_error = Faraday::Response::RaiseError.new
@@ -78,14 +87,33 @@ module Maritaca
78
87
  parsed_json = safe_parse_json(partial_json)
79
88
 
80
89
  if parsed_json
81
- result = { event: parsed_json, raw: { chunk:, bytes:, env: } }
90
+ result = {
91
+ event: parsed_json,
92
+ raw: { chunk:, bytes:, env: }
93
+ }
82
94
 
83
- callback.call(result[:event], result[:raw]) unless callback.nil?
95
+ callback.call(result[:event], result[:parsed], result[:raw]) unless callback.nil?
84
96
 
85
97
  results << result
86
98
 
87
99
  partial_json = ''
88
100
  end
101
+
102
+ parser.feed(chunk) do |type, data, id, reconnection_time|
103
+ parsed_data = safe_parse_json(data)
104
+
105
+ unless parsed_data.nil?
106
+ result = {
107
+ event: parsed_data,
108
+ parsed: { type:, data:, id:, reconnection_time: },
109
+ raw: { chunk:, bytes:, env: }
110
+ }
111
+
112
+ callback.call(result[:event], result[:parsed], result[:raw]) unless callback.nil?
113
+
114
+ results << result
115
+ end
116
+ end
89
117
  end
90
118
  end
91
119
  end
data/maritaca-ai.gemspec CHANGED
@@ -29,7 +29,9 @@ Gem::Specification.new do |spec|
29
29
 
30
30
  spec.require_paths = ['ports/dsl']
31
31
 
32
+ spec.add_dependency 'event_stream_parser', '~> 1.0'
32
33
  spec.add_dependency 'faraday', '~> 2.9'
34
+ spec.add_dependency 'faraday-typhoeus', '~> 1.1'
33
35
 
34
36
  spec.metadata['rubygems_mfa_required'] = 'true'
35
37
  end
data/static/gem.rb CHANGED
@@ -3,7 +3,7 @@
3
3
  module Maritaca
4
4
  GEM = {
5
5
  name: 'maritaca-ai',
6
- version: '1.0.1',
6
+ version: '1.2.0',
7
7
  author: 'gbaptista',
8
8
  summary: 'Interact with Maritaca AI.',
9
9
  description: "A Ruby gem for interacting with Maritaca AI's large language models.",
@@ -23,7 +23,7 @@
23
23
  (remove nil?))]
24
24
  (->> processed-lines
25
25
  (map (fn [{:keys [level title link]}]
26
- (str (apply str (repeat (* 4 (- level 2)) " "))
26
+ (str (apply str (repeat (* 2 (- level 2)) " "))
27
27
  "- ["
28
28
  title
29
29
  "](#"
data/template.md CHANGED
@@ -9,18 +9,19 @@ A Ruby gem for interacting with [MariTalk](https://chat.maritaca.ai) from [Marit
9
9
  ## TL;DR and Quick Start
10
10
 
11
11
  ```ruby
12
- gem 'maritaca-ai', '~> 1.0.1'
12
+ gem 'maritaca-ai', '~> 1.2.0'
13
13
  ```
14
14
 
15
15
  ```ruby
16
16
  require 'maritaca-ai'
17
17
 
18
18
  client = Maritaca.new(
19
- credentials: { api_key: ENV['MARITACA_API_KEY'] }
19
+ credentials: { api_key: ENV['MARITACA_API_KEY'] },
20
+ options: { server_sent_events: true }
20
21
  )
21
22
 
22
23
  result = client.chat_inference(
23
- { model: 'maritalk',
24
+ { model: 'sabia-2-medium',
24
25
  chat_mode: true,
25
26
  messages: [ { role: 'user', content: 'Oi!' } ] }
26
27
  )
@@ -28,7 +29,13 @@ result = client.chat_inference(
28
29
 
29
30
  Result:
30
31
  ```ruby
31
- { 'answer' => 'Oi! Como posso ajudá-lo(a) hoje?' }
32
+ { 'answer' => ' Oi! Como posso ajudar você hoje?',
33
+ 'usage' => {
34
+ 'completion_tokens' => 15,
35
+ 'prompt_tokens' => 3,
36
+ 'total_tokens' => 18
37
+ },
38
+ 'model' => 'sabia-2-medium' }
32
39
  ```
33
40
 
34
41
  ## Index
@@ -40,11 +47,11 @@ Result:
40
47
  ### Installing
41
48
 
42
49
  ```sh
43
- gem install maritaca-ai -v 1.0.1
50
+ gem install maritaca-ai -v 1.2.0
44
51
  ```
45
52
 
46
53
  ```sh
47
- gem 'maritaca-ai', '~> 1.0.1'
54
+ gem 'maritaca-ai', '~> 1.2.0'
48
55
  ```
49
56
 
50
57
  ### Credentials
@@ -68,7 +75,8 @@ Create a new client:
68
75
  require 'maritaca-ai'
69
76
 
70
77
  client = Maritaca.new(
71
- credentials: { api_key: ENV['MARITACA_API_KEY'] }
78
+ credentials: { api_key: ENV['MARITACA_API_KEY'] },
79
+ options: { server_sent_events: true }
72
80
  )
73
81
  ```
74
82
 
@@ -91,11 +99,13 @@ client = Maritaca.new(
91
99
 
92
100
  #### chat_inference
93
101
 
94
- ##### Chat
102
+ ##### Without Streaming Events
103
+
104
+ ###### Chat
95
105
 
96
106
  ```ruby
97
107
  result = client.chat_inference(
98
- { model: 'maritalk',
108
+ { model: 'sabia-2-medium',
99
109
  chat_mode: true,
100
110
  messages: [ { role: 'user', content: 'Oi!' } ] }
101
111
  )
@@ -103,16 +113,22 @@ result = client.chat_inference(
103
113
 
104
114
  Result:
105
115
  ```ruby
106
- { 'answer' => 'Oi! Como posso ajudá-lo(a) hoje?' }
116
+ { 'answer' => ' Oi! Como posso ajudar você hoje?',
117
+ 'usage' => {
118
+ 'completion_tokens' => 15,
119
+ 'prompt_tokens' => 3,
120
+ 'total_tokens' => 18
121
+ },
122
+ 'model' => 'sabia-2-medium' }
107
123
  ```
108
124
 
109
- ##### Back-and-Forth Conversations
125
+ ###### Back-and-Forth Conversations
110
126
 
111
127
  To maintain a back-and-forth conversation, you need to append the received responses and build a history for your requests:
112
128
 
113
129
  ```rb
114
130
  result = client.chat_inference(
115
- { model: 'maritalk',
131
+ { model: 'sabia-2-medium',
116
132
  chat_mode: true,
117
133
  messages: [
118
134
  { role: 'user', content: 'Oi, meu nome é Tamanduá.' },
@@ -124,28 +140,127 @@ result = client.chat_inference(
124
140
 
125
141
  Result:
126
142
  ```ruby
127
- { 'answer' => 'Seu nome é Tamanduá.' }
143
+ { 'answer' => ' Seu nome é Tamanduá. É um prazer conhecê-lo! Como posso ajudá-lo hoje?',
144
+ 'usage' => {
145
+ 'completion_tokens' => 35,
146
+ 'prompt_tokens' => 39,
147
+ 'total_tokens' => 74
148
+ },
149
+ 'model' => 'sabia-2-medium' }
128
150
  ```
129
151
 
130
- ##### Without Chat
152
+ ###### Without Chat
131
153
 
132
154
  You can prompt the model without using chat mode:
133
155
 
134
156
  ```ruby
135
157
  result = client.chat_inference(
136
- { model: 'maritalk',
158
+ { model: 'sabia-2-medium',
137
159
  chat_mode: false,
138
- messages: "Minha terra tem palmeiras,\nOnde canta o Sabiá;\n" }
160
+ messages: "Minha terra tem palmeiras,\nOnde canta o Sabiá;\n",
161
+ stopping_tokens: ['.'] }
139
162
  )
140
163
  ```
141
164
 
142
165
  Result:
143
166
  ```ruby
144
167
  { 'answer' =>
145
- "As aves, que aqui gorjeiam,\n" \
146
- 'Não gorjeiam como lá.' }
168
+ "As aves, que aqui gorjeiam,\n" \
169
+ 'Não gorjeiam como lá.',
170
+ 'usage' => {
171
+ 'completion_tokens' => 21,
172
+ 'prompt_tokens' => 21,
173
+ 'total_tokens' => 42
174
+ },
175
+ 'model' => 'sabia-2-medium' }
176
+ ```
177
+
178
+ ##### Receiving Stream Events
179
+
180
+ Ensure that you have enabled [Server-Sent Events](#streaming-and-server-sent-events-sse) before using blocks for streaming. You also need to add `stream: true` in your payload:
181
+
182
+ ```ruby
183
+ client.chat_inference(
184
+ { model: 'sabia-2-medium',
185
+ stream: true,
186
+ chat_mode: true,
187
+ messages: [ { role: 'user', content: 'Oi!' } ] }
188
+ ) do |event, parsed, raw|
189
+ puts event
190
+ end
191
+ ```
192
+
193
+ Event:
194
+ ```ruby
195
+ { 'text' => ' Oi! Com' }
196
+ ```
197
+
198
+ You can get all the receive events at once as an array:
199
+
200
+ ```ruby
201
+ result = client.chat_inference(
202
+ { model: 'sabia-2-medium',
203
+ stream: true,
204
+ chat_mode: true,
205
+ messages: [ { role: 'user', content: 'Oi!' } ] }
206
+ )
147
207
  ```
148
208
 
209
+ Result:
210
+ ```ruby
211
+ [{ 'text' => ' Oi! Com' },
212
+ { 'text' => 'o posso a' },
213
+ { 'text' => 'judar você' },
214
+ { 'text' => ' hoje?' },
215
+ { 'completion_tokens' => 15,
216
+ 'prompt_tokens' => 74,
217
+ 'total_tokens' => 89,
218
+ 'model' => 'sabia-2-medium' }]
219
+ ```
220
+
221
+ You can mix both as well:
222
+ ```ruby
223
+ result = client.chat_inference(
224
+ { model: 'sabia-2-medium',
225
+ stream: true,
226
+ chat_mode: true,
227
+ messages: [ { role: 'user', content: 'Oi!' } ] }
228
+ ) do |event, parsed, raw|
229
+ puts event
230
+ end
231
+ ```
232
+
233
+ ### Streaming and Server-Sent Events (SSE)
234
+
235
+ [Server-Sent Events (SSE)](https://en.wikipedia.org/wiki/Server-sent_events) is a technology that allows certain endpoints to offer streaming capabilities, such as creating the impression that "the model is typing along with you," rather than delivering the entire answer all at once.
236
+
237
+ You can set up the client to use Server-Sent Events (SSE) for all supported endpoints:
238
+ ```ruby
239
+ client = Maritaca.new(
240
+ credentials: { api_key: ENV['MARITACA_API_KEY'] },
241
+ options: { server_sent_events: true }
242
+ )
243
+ ```
244
+
245
+ Or, you can decide on a request basis:
246
+ ```ruby
247
+ client.chat_inference(
248
+ { model: 'sabia-2-medium',
249
+ stream: true,
250
+ chat_mode: true,
251
+ messages: [ { role: 'user', content: 'Oi!' } ] },
252
+ server_sent_events: true
253
+ ) do |event, parsed, raw|
254
+ puts event
255
+ end
256
+ ```
257
+
258
+ With Server-Sent Events (SSE) enabled, you can use a block to receive partial results via events. This feature is particularly useful for methods that offer streaming capabilities, such as `chat_inference`: [Receiving Stream Events](#receiving-stream-events)
259
+
260
+ #### Server-Sent Events (SSE) Hang
261
+
262
+ Method calls will _hang_ until the server-sent events finish, so even without providing a block, you can obtain the final results of the received events: [Receiving Stream Events](#receiving-stream-events)
263
+
149
264
  ### New Functionalities and APIs
150
265
 
151
266
  Maritaca may launch a new endpoint that we haven't covered in the Gem yet. If that's the case, you may still be able to use it through the `request` method. For example, `chat_inference` is just a wrapper for `api/chat/inference`, which you can call directly like this:
@@ -153,7 +268,7 @@ Maritaca may launch a new endpoint that we haven't covered in the Gem yet. If th
153
268
  ```ruby
154
269
  result = client.request(
155
270
  'api/chat/inference',
156
- { model: 'maritalk',
271
+ { model: 'sabia-2-medium',
157
272
  chat_mode: true,
158
273
  messages: [{ role: 'user', content: 'Oi!' }] },
159
274
  request_method: 'POST'
@@ -162,6 +277,21 @@ result = client.request(
162
277
 
163
278
  ### Request Options
164
279
 
280
+ #### Adapter
281
+
282
+ The gem uses [Faraday](https://github.com/lostisland/faraday) with the [Typhoeus](https://github.com/typhoeus/typhoeus) adapter by default.
283
+
284
+ You can use a different adapter if you want:
285
+
286
+ ```ruby
287
+ require 'faraday/net_http'
288
+
289
+ client = Maritaca.new(
290
+ credentials: { api_key: ENV['MARITACA_API_KEY'] },
291
+ options: { connection: { adapter: :net_http } }
292
+ )
293
+ ```
294
+
165
295
  #### Timeout
166
296
 
167
297
  You can set the maximum number of seconds to wait for the request to complete with the `timeout` option:
@@ -200,7 +330,7 @@ require 'maritaca-ai'
200
330
 
201
331
  begin
202
332
  client.chat_inference(
203
- { model: 'maritalk',
333
+ { model: 'sabia-2-medium',
204
334
  chat_mode: true,
205
335
  messages: [ { role: 'user', content: 'Oi!' } ] }
206
336
  )
@@ -209,7 +339,7 @@ rescue Maritaca::Errors::MaritacaError => error
209
339
  puts error.message # 'the server responded with status 500'
210
340
 
211
341
  puts error.payload
212
- # { model: 'maritalk',
342
+ # { model: 'sabia-2-medium',
213
343
  # chat_mode: true,
214
344
  # ...
215
345
  # }
@@ -226,7 +356,7 @@ require 'maritaca-ai/errors'
226
356
 
227
357
  begin
228
358
  client.chat_inference(
229
- { model: 'maritalk',
359
+ { model: 'sabia-2-medium',
230
360
  chat_mode: true,
231
361
  messages: [ { role: 'user', content: 'Oi!' } ] }
232
362
  )
@@ -265,7 +395,7 @@ gem build maritaca-ai.gemspec
265
395
 
266
396
  gem signin
267
397
 
268
- gem push maritaca-ai-1.0.1.gem
398
+ gem push maritaca-ai-1.2.0.gem
269
399
  ```
270
400
 
271
401
  ### Updating the README
metadata CHANGED
@@ -1,15 +1,29 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: maritaca-ai
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.0.1
4
+ version: 1.2.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - gbaptista
8
- autorequire:
8
+ autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2024-01-13 00:00:00.000000000 Z
11
+ date: 2024-05-19 00:00:00.000000000 Z
12
12
  dependencies:
13
+ - !ruby/object:Gem::Dependency
14
+ name: event_stream_parser
15
+ requirement: !ruby/object:Gem::Requirement
16
+ requirements:
17
+ - - "~>"
18
+ - !ruby/object:Gem::Version
19
+ version: '1.0'
20
+ type: :runtime
21
+ prerelease: false
22
+ version_requirements: !ruby/object:Gem::Requirement
23
+ requirements:
24
+ - - "~>"
25
+ - !ruby/object:Gem::Version
26
+ version: '1.0'
13
27
  - !ruby/object:Gem::Dependency
14
28
  name: faraday
15
29
  requirement: !ruby/object:Gem::Requirement
@@ -24,8 +38,22 @@ dependencies:
24
38
  - - "~>"
25
39
  - !ruby/object:Gem::Version
26
40
  version: '2.9'
41
+ - !ruby/object:Gem::Dependency
42
+ name: faraday-typhoeus
43
+ requirement: !ruby/object:Gem::Requirement
44
+ requirements:
45
+ - - "~>"
46
+ - !ruby/object:Gem::Version
47
+ version: '1.1'
48
+ type: :runtime
49
+ prerelease: false
50
+ version_requirements: !ruby/object:Gem::Requirement
51
+ requirements:
52
+ - - "~>"
53
+ - !ruby/object:Gem::Version
54
+ version: '1.1'
27
55
  description: A Ruby gem for interacting with Maritaca AI's large language models.
28
- email:
56
+ email:
29
57
  executables: []
30
58
  extensions: []
31
59
  extra_rdoc_files: []
@@ -53,7 +81,7 @@ metadata:
53
81
  homepage_uri: https://github.com/gbaptista/maritaca-ai
54
82
  source_code_uri: https://github.com/gbaptista/maritaca-ai
55
83
  rubygems_mfa_required: 'true'
56
- post_install_message:
84
+ post_install_message:
57
85
  rdoc_options: []
58
86
  require_paths:
59
87
  - ports/dsl
@@ -69,7 +97,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
69
97
  version: '0'
70
98
  requirements: []
71
99
  rubygems_version: 3.3.3
72
- signing_key:
100
+ signing_key:
73
101
  specification_version: 4
74
102
  summary: Interact with Maritaca AI.
75
103
  test_files: []