maritaca-ai 1.1.0 → 1.2.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 6303b41bc438b8e903a651e906e695cca755c34820cb75c66a9f669b2b8bedc4
4
- data.tar.gz: cf98f0c7cdd2580885e681077fb57d15c78b68d6908176bc1cff8b737adff174
3
+ metadata.gz: afaa9a68f6751d7fdc770f727beb62076789b0a2e1b149e1a15c7973e571f4fb
4
+ data.tar.gz: 913f22cf572a477d03c2b227a478a67a860e01462f22e8a53cba2d650315823d
5
5
  SHA512:
6
- metadata.gz: e2107ca61810c3ac4c0ef71955cfd998b71451a6dd2c0ef91a3442448a38f0594d4791687c5852bf668736c5f5a3b9f954932f601cd7629c8e871592cc8a7002
7
- data.tar.gz: bde4023b33d3afbd36cbe183a60c14fd558135d99f323686a8130b98e601d9910af4e3d81f97a08a49ffd51d1ecbec68704d3772c86c6f2a2fdaa27f982335d1
6
+ metadata.gz: 455d565cbf7092b0f2fa72902b78d87e8293a03800755c5020461cc86916724471d2b269d3630822ff433c15258733dcdc93ae3b25f745840306ede7062eb208
7
+ data.tar.gz: 58f7bd165d66a673110ca317393672ed67fe20aa053f97e83195b8892f63c3c8575494795b1055d774ee616e7dd9964bb3e81af28bc7ede3837a05420948295f
data/Gemfile.lock CHANGED
@@ -1,7 +1,8 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- maritaca-ai (1.1.0)
4
+ maritaca-ai (1.2.0)
5
+ event_stream_parser (~> 1.0)
5
6
  faraday (~> 2.9)
6
7
  faraday-typhoeus (~> 1.1)
7
8
 
@@ -14,6 +15,7 @@ GEM
14
15
  dotenv (2.8.1)
15
16
  ethon (0.16.0)
16
17
  ffi (>= 1.15.0)
18
+ event_stream_parser (1.0.0)
17
19
  faraday (2.9.0)
18
20
  faraday-net_http (>= 2.0, < 3.2)
19
21
  faraday-net_http (3.1.0)
@@ -22,13 +24,13 @@ GEM
22
24
  faraday (~> 2.0)
23
25
  typhoeus (~> 1.4)
24
26
  ffi (1.16.3)
25
- json (2.7.1)
27
+ json (2.7.2)
26
28
  language_server-protocol (3.17.0.3)
27
- method_source (1.0.0)
29
+ method_source (1.1.0)
28
30
  net-http (0.4.1)
29
31
  uri
30
32
  parallel (1.24.0)
31
- parser (3.3.0.5)
33
+ parser (3.3.1.0)
32
34
  ast (~> 2.4.1)
33
35
  racc
34
36
  pry (0.14.2)
@@ -39,9 +41,10 @@ GEM
39
41
  pry (>= 0.13, < 0.15)
40
42
  racc (1.7.3)
41
43
  rainbow (3.1.1)
42
- regexp_parser (2.9.0)
43
- rexml (3.2.6)
44
- rubocop (1.60.2)
44
+ regexp_parser (2.9.2)
45
+ rexml (3.2.8)
46
+ strscan (>= 3.0.9)
47
+ rubocop (1.63.5)
45
48
  json (~> 2.3)
46
49
  language_server-protocol (>= 3.17.0)
47
50
  parallel (~> 1.10)
@@ -49,12 +52,13 @@ GEM
49
52
  rainbow (>= 2.2.2, < 4.0)
50
53
  regexp_parser (>= 1.8, < 3.0)
51
54
  rexml (>= 3.2.5, < 4.0)
52
- rubocop-ast (>= 1.30.0, < 2.0)
55
+ rubocop-ast (>= 1.31.1, < 2.0)
53
56
  ruby-progressbar (~> 1.7)
54
57
  unicode-display_width (>= 2.4.0, < 3.0)
55
- rubocop-ast (1.30.0)
56
- parser (>= 3.2.1.0)
58
+ rubocop-ast (1.31.3)
59
+ parser (>= 3.3.1.0)
57
60
  ruby-progressbar (1.13.0)
61
+ strscan (3.1.0)
58
62
  typhoeus (1.4.1)
59
63
  ethon (>= 0.9.0)
60
64
  unicode-display_width (2.5.0)
data/README.md CHANGED
@@ -9,18 +9,19 @@ A Ruby gem for interacting with [MariTalk](https://chat.maritaca.ai) from [Marit
9
9
  ## TL;DR and Quick Start
10
10
 
11
11
  ```ruby
12
- gem 'maritaca-ai', '~> 1.1.0'
12
+ gem 'maritaca-ai', '~> 1.2.0'
13
13
  ```
14
14
 
15
15
  ```ruby
16
16
  require 'maritaca-ai'
17
17
 
18
18
  client = Maritaca.new(
19
- credentials: { api_key: ENV['MARITACA_API_KEY'] }
19
+ credentials: { api_key: ENV['MARITACA_API_KEY'] },
20
+ options: { server_sent_events: true }
20
21
  )
21
22
 
22
23
  result = client.chat_inference(
23
- { model: 'maritalk',
24
+ { model: 'sabia-2-medium',
24
25
  chat_mode: true,
25
26
  messages: [ { role: 'user', content: 'Oi!' } ] }
26
27
  )
@@ -28,7 +29,13 @@ result = client.chat_inference(
28
29
 
29
30
  Result:
30
31
  ```ruby
31
- { 'answer' => 'Oi! Como posso ajudá-lo(a) hoje?' }
32
+ { 'answer' => ' Oi! Como posso ajudar você hoje?',
33
+ 'usage' => {
34
+ 'completion_tokens' => 15,
35
+ 'prompt_tokens' => 3,
36
+ 'total_tokens' => 18
37
+ },
38
+ 'model' => 'sabia-2-medium' }
32
39
  ```
33
40
 
34
41
  ## Index
@@ -43,9 +50,13 @@ Result:
43
50
  - [Custom Address](#custom-address)
44
51
  - [Methods](#methods)
45
52
  - [chat_inference](#chat_inference)
46
- - [Chat](#chat)
47
- - [Back-and-Forth Conversations](#back-and-forth-conversations)
48
- - [Without Chat](#without-chat)
53
+ - [Without Streaming Events](#without-streaming-events)
54
+ - [Chat](#chat)
55
+ - [Back-and-Forth Conversations](#back-and-forth-conversations)
56
+ - [Without Chat](#without-chat)
57
+ - [Receiving Stream Events](#receiving-stream-events)
58
+ - [Streaming and Server-Sent Events (SSE)](#streaming-and-server-sent-events-sse)
59
+ - [Server-Sent Events (SSE) Hang](#server-sent-events-sse-hang)
49
60
  - [New Functionalities and APIs](#new-functionalities-and-apis)
50
61
  - [Request Options](#request-options)
51
62
  - [Adapter](#adapter)
@@ -66,11 +77,11 @@ Result:
66
77
  ### Installing
67
78
 
68
79
  ```sh
69
- gem install maritaca-ai -v 1.1.0
80
+ gem install maritaca-ai -v 1.2.0
70
81
  ```
71
82
 
72
83
  ```sh
73
- gem 'maritaca-ai', '~> 1.1.0'
84
+ gem 'maritaca-ai', '~> 1.2.0'
74
85
  ```
75
86
 
76
87
  ### Credentials
@@ -94,7 +105,8 @@ Create a new client:
94
105
  require 'maritaca-ai'
95
106
 
96
107
  client = Maritaca.new(
97
- credentials: { api_key: ENV['MARITACA_API_KEY'] }
108
+ credentials: { api_key: ENV['MARITACA_API_KEY'] },
109
+ options: { server_sent_events: true }
98
110
  )
99
111
  ```
100
112
 
@@ -117,11 +129,13 @@ client = Maritaca.new(
117
129
 
118
130
  #### chat_inference
119
131
 
120
- ##### Chat
132
+ ##### Without Streaming Events
133
+
134
+ ###### Chat
121
135
 
122
136
  ```ruby
123
137
  result = client.chat_inference(
124
- { model: 'maritalk',
138
+ { model: 'sabia-2-medium',
125
139
  chat_mode: true,
126
140
  messages: [ { role: 'user', content: 'Oi!' } ] }
127
141
  )
@@ -129,16 +143,22 @@ result = client.chat_inference(
129
143
 
130
144
  Result:
131
145
  ```ruby
132
- { 'answer' => 'Oi! Como posso ajudá-lo(a) hoje?' }
146
+ { 'answer' => ' Oi! Como posso ajudar você hoje?',
147
+ 'usage' => {
148
+ 'completion_tokens' => 15,
149
+ 'prompt_tokens' => 3,
150
+ 'total_tokens' => 18
151
+ },
152
+ 'model' => 'sabia-2-medium' }
133
153
  ```
134
154
 
135
- ##### Back-and-Forth Conversations
155
+ ###### Back-and-Forth Conversations
136
156
 
137
157
  To maintain a back-and-forth conversation, you need to append the received responses and build a history for your requests:
138
158
 
139
159
  ```rb
140
160
  result = client.chat_inference(
141
- { model: 'maritalk',
161
+ { model: 'sabia-2-medium',
142
162
  chat_mode: true,
143
163
  messages: [
144
164
  { role: 'user', content: 'Oi, meu nome é Tamanduá.' },
@@ -150,28 +170,127 @@ result = client.chat_inference(
150
170
 
151
171
  Result:
152
172
  ```ruby
153
- { 'answer' => 'Seu nome é Tamanduá.' }
173
+ { 'answer' => ' Seu nome é Tamanduá. É um prazer conhecê-lo! Como posso ajudá-lo hoje?',
174
+ 'usage' => {
175
+ 'completion_tokens' => 35,
176
+ 'prompt_tokens' => 39,
177
+ 'total_tokens' => 74
178
+ },
179
+ 'model' => 'sabia-2-medium' }
154
180
  ```
155
181
 
156
- ##### Without Chat
182
+ ###### Without Chat
157
183
 
158
184
  You can prompt the model without using chat mode:
159
185
 
160
186
  ```ruby
161
187
  result = client.chat_inference(
162
- { model: 'maritalk',
188
+ { model: 'sabia-2-medium',
163
189
  chat_mode: false,
164
- messages: "Minha terra tem palmeiras,\nOnde canta o Sabiá;\n" }
190
+ messages: "Minha terra tem palmeiras,\nOnde canta o Sabiá;\n",
191
+ stopping_tokens: ['.'] }
165
192
  )
166
193
  ```
167
194
 
168
195
  Result:
169
196
  ```ruby
170
197
  { 'answer' =>
171
- "As aves, que aqui gorjeiam,\n" \
172
- 'Não gorjeiam como lá.' }
198
+ "As aves, que aqui gorjeiam,\n" \
199
+ 'Não gorjeiam como lá.',
200
+ 'usage' => {
201
+ 'completion_tokens' => 21,
202
+ 'prompt_tokens' => 21,
203
+ 'total_tokens' => 42
204
+ },
205
+ 'model' => 'sabia-2-medium' }
173
206
  ```
174
207
 
208
+ ##### Receiving Stream Events
209
+
210
+ Ensure that you have enabled [Server-Sent Events](#streaming-and-server-sent-events-sse) before using blocks for streaming. You also need to add `stream: true` in your payload:
211
+
212
+ ```ruby
213
+ client.chat_inference(
214
+ { model: 'sabia-2-medium',
215
+ stream: true,
216
+ chat_mode: true,
217
+ messages: [ { role: 'user', content: 'Oi!' } ] }
218
+ ) do |event, parsed, raw|
219
+ puts event
220
+ end
221
+ ```
222
+
223
+ Event:
224
+ ```ruby
225
+ { 'text' => ' Oi! Com' }
226
+ ```
227
+
228
+ You can get all the receive events at once as an array:
229
+
230
+ ```ruby
231
+ result = client.chat_inference(
232
+ { model: 'sabia-2-medium',
233
+ stream: true,
234
+ chat_mode: true,
235
+ messages: [ { role: 'user', content: 'Oi!' } ] }
236
+ )
237
+ ```
238
+
239
+ Result:
240
+ ```ruby
241
+ [{ 'text' => ' Oi! Com' },
242
+ { 'text' => 'o posso a' },
243
+ { 'text' => 'judar você' },
244
+ { 'text' => ' hoje?' },
245
+ { 'completion_tokens' => 15,
246
+ 'prompt_tokens' => 74,
247
+ 'total_tokens' => 89,
248
+ 'model' => 'sabia-2-medium' }]
249
+ ```
250
+
251
+ You can mix both as well:
252
+ ```ruby
253
+ result = client.chat_inference(
254
+ { model: 'sabia-2-medium',
255
+ stream: true,
256
+ chat_mode: true,
257
+ messages: [ { role: 'user', content: 'Oi!' } ] }
258
+ ) do |event, parsed, raw|
259
+ puts event
260
+ end
261
+ ```
262
+
263
+ ### Streaming and Server-Sent Events (SSE)
264
+
265
+ [Server-Sent Events (SSE)](https://en.wikipedia.org/wiki/Server-sent_events) is a technology that allows certain endpoints to offer streaming capabilities, such as creating the impression that "the model is typing along with you," rather than delivering the entire answer all at once.
266
+
267
+ You can set up the client to use Server-Sent Events (SSE) for all supported endpoints:
268
+ ```ruby
269
+ client = Maritaca.new(
270
+ credentials: { api_key: ENV['MARITACA_API_KEY'] },
271
+ options: { server_sent_events: true }
272
+ )
273
+ ```
274
+
275
+ Or, you can decide on a request basis:
276
+ ```ruby
277
+ client.chat_inference(
278
+ { model: 'sabia-2-medium',
279
+ stream: true,
280
+ chat_mode: true,
281
+ messages: [ { role: 'user', content: 'Oi!' } ] },
282
+ server_sent_events: true
283
+ ) do |event, parsed, raw|
284
+ puts event
285
+ end
286
+ ```
287
+
288
+ With Server-Sent Events (SSE) enabled, you can use a block to receive partial results via events. This feature is particularly useful for methods that offer streaming capabilities, such as `chat_inference`: [Receiving Stream Events](#receiving-stream-events)
289
+
290
+ #### Server-Sent Events (SSE) Hang
291
+
292
+ Method calls will _hang_ until the server-sent events finish, so even without providing a block, you can obtain the final results of the received events: [Receiving Stream Events](#receiving-stream-events)
293
+
175
294
  ### New Functionalities and APIs
176
295
 
177
296
  Maritaca may launch a new endpoint that we haven't covered in the Gem yet. If that's the case, you may still be able to use it through the `request` method. For example, `chat_inference` is just a wrapper for `api/chat/inference`, which you can call directly like this:
@@ -179,7 +298,7 @@ Maritaca may launch a new endpoint that we haven't covered in the Gem yet. If th
179
298
  ```ruby
180
299
  result = client.request(
181
300
  'api/chat/inference',
182
- { model: 'maritalk',
301
+ { model: 'sabia-2-medium',
183
302
  chat_mode: true,
184
303
  messages: [{ role: 'user', content: 'Oi!' }] },
185
304
  request_method: 'POST'
@@ -241,7 +360,7 @@ require 'maritaca-ai'
241
360
 
242
361
  begin
243
362
  client.chat_inference(
244
- { model: 'maritalk',
363
+ { model: 'sabia-2-medium',
245
364
  chat_mode: true,
246
365
  messages: [ { role: 'user', content: 'Oi!' } ] }
247
366
  )
@@ -250,7 +369,7 @@ rescue Maritaca::Errors::MaritacaError => error
250
369
  puts error.message # 'the server responded with status 500'
251
370
 
252
371
  puts error.payload
253
- # { model: 'maritalk',
372
+ # { model: 'sabia-2-medium',
254
373
  # chat_mode: true,
255
374
  # ...
256
375
  # }
@@ -267,7 +386,7 @@ require 'maritaca-ai/errors'
267
386
 
268
387
  begin
269
388
  client.chat_inference(
270
- { model: 'maritalk',
389
+ { model: 'sabia-2-medium',
271
390
  chat_mode: true,
272
391
  messages: [ { role: 'user', content: 'Oi!' } ] }
273
392
  )
@@ -306,7 +425,7 @@ gem build maritaca-ai.gemspec
306
425
 
307
426
  gem signin
308
427
 
309
- gem push maritaca-ai-1.1.0.gem
428
+ gem push maritaca-ai-1.2.0.gem
310
429
  ```
311
430
 
312
431
  ### Updating the README
@@ -1,5 +1,6 @@
1
1
  # frozen_string_literal: true
2
2
 
3
+ require 'event_stream_parser'
3
4
  require 'faraday'
4
5
  require 'faraday/typhoeus'
5
6
  require 'json'
@@ -59,8 +60,6 @@ module Maritaca
59
60
 
60
61
  method_to_call = request_method.to_s.strip.downcase.to_sym
61
62
 
62
- partial_json = ''
63
-
64
63
  response = Faraday.new(request: @request_options) do |faraday|
65
64
  faraday.adapter @faraday_adapter
66
65
  faraday.response :raise_error
@@ -73,6 +72,10 @@ module Maritaca
73
72
  request.body = payload.to_json unless payload.nil?
74
73
 
75
74
  if server_sent_events_enabled
75
+ parser = EventStreamParser::Parser.new
76
+
77
+ partial_json = ''
78
+
76
79
  request.options.on_data = proc do |chunk, bytes, env|
77
80
  if env && env.status != 200
78
81
  raise_error = Faraday::Response::RaiseError.new
@@ -84,14 +87,33 @@ module Maritaca
84
87
  parsed_json = safe_parse_json(partial_json)
85
88
 
86
89
  if parsed_json
87
- result = { event: parsed_json, raw: { chunk:, bytes:, env: } }
90
+ result = {
91
+ event: parsed_json,
92
+ raw: { chunk:, bytes:, env: }
93
+ }
88
94
 
89
- callback.call(result[:event], result[:raw]) unless callback.nil?
95
+ callback.call(result[:event], result[:parsed], result[:raw]) unless callback.nil?
90
96
 
91
97
  results << result
92
98
 
93
99
  partial_json = ''
94
100
  end
101
+
102
+ parser.feed(chunk) do |type, data, id, reconnection_time|
103
+ parsed_data = safe_parse_json(data)
104
+
105
+ unless parsed_data.nil?
106
+ result = {
107
+ event: parsed_data,
108
+ parsed: { type:, data:, id:, reconnection_time: },
109
+ raw: { chunk:, bytes:, env: }
110
+ }
111
+
112
+ callback.call(result[:event], result[:parsed], result[:raw]) unless callback.nil?
113
+
114
+ results << result
115
+ end
116
+ end
95
117
  end
96
118
  end
97
119
  end
data/maritaca-ai.gemspec CHANGED
@@ -29,6 +29,7 @@ Gem::Specification.new do |spec|
29
29
 
30
30
  spec.require_paths = ['ports/dsl']
31
31
 
32
+ spec.add_dependency 'event_stream_parser', '~> 1.0'
32
33
  spec.add_dependency 'faraday', '~> 2.9'
33
34
  spec.add_dependency 'faraday-typhoeus', '~> 1.1'
34
35
 
data/static/gem.rb CHANGED
@@ -3,7 +3,7 @@
3
3
  module Maritaca
4
4
  GEM = {
5
5
  name: 'maritaca-ai',
6
- version: '1.1.0',
6
+ version: '1.2.0',
7
7
  author: 'gbaptista',
8
8
  summary: 'Interact with Maritaca AI.',
9
9
  description: "A Ruby gem for interacting with Maritaca AI's large language models.",
data/template.md CHANGED
@@ -9,18 +9,19 @@ A Ruby gem for interacting with [MariTalk](https://chat.maritaca.ai) from [Marit
9
9
  ## TL;DR and Quick Start
10
10
 
11
11
  ```ruby
12
- gem 'maritaca-ai', '~> 1.1.0'
12
+ gem 'maritaca-ai', '~> 1.2.0'
13
13
  ```
14
14
 
15
15
  ```ruby
16
16
  require 'maritaca-ai'
17
17
 
18
18
  client = Maritaca.new(
19
- credentials: { api_key: ENV['MARITACA_API_KEY'] }
19
+ credentials: { api_key: ENV['MARITACA_API_KEY'] },
20
+ options: { server_sent_events: true }
20
21
  )
21
22
 
22
23
  result = client.chat_inference(
23
- { model: 'maritalk',
24
+ { model: 'sabia-2-medium',
24
25
  chat_mode: true,
25
26
  messages: [ { role: 'user', content: 'Oi!' } ] }
26
27
  )
@@ -28,7 +29,13 @@ result = client.chat_inference(
28
29
 
29
30
  Result:
30
31
  ```ruby
31
- { 'answer' => 'Oi! Como posso ajudá-lo(a) hoje?' }
32
+ { 'answer' => ' Oi! Como posso ajudar você hoje?',
33
+ 'usage' => {
34
+ 'completion_tokens' => 15,
35
+ 'prompt_tokens' => 3,
36
+ 'total_tokens' => 18
37
+ },
38
+ 'model' => 'sabia-2-medium' }
32
39
  ```
33
40
 
34
41
  ## Index
@@ -40,11 +47,11 @@ Result:
40
47
  ### Installing
41
48
 
42
49
  ```sh
43
- gem install maritaca-ai -v 1.1.0
50
+ gem install maritaca-ai -v 1.2.0
44
51
  ```
45
52
 
46
53
  ```sh
47
- gem 'maritaca-ai', '~> 1.1.0'
54
+ gem 'maritaca-ai', '~> 1.2.0'
48
55
  ```
49
56
 
50
57
  ### Credentials
@@ -68,7 +75,8 @@ Create a new client:
68
75
  require 'maritaca-ai'
69
76
 
70
77
  client = Maritaca.new(
71
- credentials: { api_key: ENV['MARITACA_API_KEY'] }
78
+ credentials: { api_key: ENV['MARITACA_API_KEY'] },
79
+ options: { server_sent_events: true }
72
80
  )
73
81
  ```
74
82
 
@@ -91,11 +99,13 @@ client = Maritaca.new(
91
99
 
92
100
  #### chat_inference
93
101
 
94
- ##### Chat
102
+ ##### Without Streaming Events
103
+
104
+ ###### Chat
95
105
 
96
106
  ```ruby
97
107
  result = client.chat_inference(
98
- { model: 'maritalk',
108
+ { model: 'sabia-2-medium',
99
109
  chat_mode: true,
100
110
  messages: [ { role: 'user', content: 'Oi!' } ] }
101
111
  )
@@ -103,16 +113,22 @@ result = client.chat_inference(
103
113
 
104
114
  Result:
105
115
  ```ruby
106
- { 'answer' => 'Oi! Como posso ajudá-lo(a) hoje?' }
116
+ { 'answer' => ' Oi! Como posso ajudar você hoje?',
117
+ 'usage' => {
118
+ 'completion_tokens' => 15,
119
+ 'prompt_tokens' => 3,
120
+ 'total_tokens' => 18
121
+ },
122
+ 'model' => 'sabia-2-medium' }
107
123
  ```
108
124
 
109
- ##### Back-and-Forth Conversations
125
+ ###### Back-and-Forth Conversations
110
126
 
111
127
  To maintain a back-and-forth conversation, you need to append the received responses and build a history for your requests:
112
128
 
113
129
  ```rb
114
130
  result = client.chat_inference(
115
- { model: 'maritalk',
131
+ { model: 'sabia-2-medium',
116
132
  chat_mode: true,
117
133
  messages: [
118
134
  { role: 'user', content: 'Oi, meu nome é Tamanduá.' },
@@ -124,28 +140,127 @@ result = client.chat_inference(
124
140
 
125
141
  Result:
126
142
  ```ruby
127
- { 'answer' => 'Seu nome é Tamanduá.' }
143
+ { 'answer' => ' Seu nome é Tamanduá. É um prazer conhecê-lo! Como posso ajudá-lo hoje?',
144
+ 'usage' => {
145
+ 'completion_tokens' => 35,
146
+ 'prompt_tokens' => 39,
147
+ 'total_tokens' => 74
148
+ },
149
+ 'model' => 'sabia-2-medium' }
128
150
  ```
129
151
 
130
- ##### Without Chat
152
+ ###### Without Chat
131
153
 
132
154
  You can prompt the model without using chat mode:
133
155
 
134
156
  ```ruby
135
157
  result = client.chat_inference(
136
- { model: 'maritalk',
158
+ { model: 'sabia-2-medium',
137
159
  chat_mode: false,
138
- messages: "Minha terra tem palmeiras,\nOnde canta o Sabiá;\n" }
160
+ messages: "Minha terra tem palmeiras,\nOnde canta o Sabiá;\n",
161
+ stopping_tokens: ['.'] }
139
162
  )
140
163
  ```
141
164
 
142
165
  Result:
143
166
  ```ruby
144
167
  { 'answer' =>
145
- "As aves, que aqui gorjeiam,\n" \
146
- 'Não gorjeiam como lá.' }
168
+ "As aves, que aqui gorjeiam,\n" \
169
+ 'Não gorjeiam como lá.',
170
+ 'usage' => {
171
+ 'completion_tokens' => 21,
172
+ 'prompt_tokens' => 21,
173
+ 'total_tokens' => 42
174
+ },
175
+ 'model' => 'sabia-2-medium' }
147
176
  ```
148
177
 
178
+ ##### Receiving Stream Events
179
+
180
+ Ensure that you have enabled [Server-Sent Events](#streaming-and-server-sent-events-sse) before using blocks for streaming. You also need to add `stream: true` in your payload:
181
+
182
+ ```ruby
183
+ client.chat_inference(
184
+ { model: 'sabia-2-medium',
185
+ stream: true,
186
+ chat_mode: true,
187
+ messages: [ { role: 'user', content: 'Oi!' } ] }
188
+ ) do |event, parsed, raw|
189
+ puts event
190
+ end
191
+ ```
192
+
193
+ Event:
194
+ ```ruby
195
+ { 'text' => ' Oi! Com' }
196
+ ```
197
+
198
+ You can get all the receive events at once as an array:
199
+
200
+ ```ruby
201
+ result = client.chat_inference(
202
+ { model: 'sabia-2-medium',
203
+ stream: true,
204
+ chat_mode: true,
205
+ messages: [ { role: 'user', content: 'Oi!' } ] }
206
+ )
207
+ ```
208
+
209
+ Result:
210
+ ```ruby
211
+ [{ 'text' => ' Oi! Com' },
212
+ { 'text' => 'o posso a' },
213
+ { 'text' => 'judar você' },
214
+ { 'text' => ' hoje?' },
215
+ { 'completion_tokens' => 15,
216
+ 'prompt_tokens' => 74,
217
+ 'total_tokens' => 89,
218
+ 'model' => 'sabia-2-medium' }]
219
+ ```
220
+
221
+ You can mix both as well:
222
+ ```ruby
223
+ result = client.chat_inference(
224
+ { model: 'sabia-2-medium',
225
+ stream: true,
226
+ chat_mode: true,
227
+ messages: [ { role: 'user', content: 'Oi!' } ] }
228
+ ) do |event, parsed, raw|
229
+ puts event
230
+ end
231
+ ```
232
+
233
+ ### Streaming and Server-Sent Events (SSE)
234
+
235
+ [Server-Sent Events (SSE)](https://en.wikipedia.org/wiki/Server-sent_events) is a technology that allows certain endpoints to offer streaming capabilities, such as creating the impression that "the model is typing along with you," rather than delivering the entire answer all at once.
236
+
237
+ You can set up the client to use Server-Sent Events (SSE) for all supported endpoints:
238
+ ```ruby
239
+ client = Maritaca.new(
240
+ credentials: { api_key: ENV['MARITACA_API_KEY'] },
241
+ options: { server_sent_events: true }
242
+ )
243
+ ```
244
+
245
+ Or, you can decide on a request basis:
246
+ ```ruby
247
+ client.chat_inference(
248
+ { model: 'sabia-2-medium',
249
+ stream: true,
250
+ chat_mode: true,
251
+ messages: [ { role: 'user', content: 'Oi!' } ] },
252
+ server_sent_events: true
253
+ ) do |event, parsed, raw|
254
+ puts event
255
+ end
256
+ ```
257
+
258
+ With Server-Sent Events (SSE) enabled, you can use a block to receive partial results via events. This feature is particularly useful for methods that offer streaming capabilities, such as `chat_inference`: [Receiving Stream Events](#receiving-stream-events)
259
+
260
+ #### Server-Sent Events (SSE) Hang
261
+
262
+ Method calls will _hang_ until the server-sent events finish, so even without providing a block, you can obtain the final results of the received events: [Receiving Stream Events](#receiving-stream-events)
263
+
149
264
  ### New Functionalities and APIs
150
265
 
151
266
  Maritaca may launch a new endpoint that we haven't covered in the Gem yet. If that's the case, you may still be able to use it through the `request` method. For example, `chat_inference` is just a wrapper for `api/chat/inference`, which you can call directly like this:
@@ -153,7 +268,7 @@ Maritaca may launch a new endpoint that we haven't covered in the Gem yet. If th
153
268
  ```ruby
154
269
  result = client.request(
155
270
  'api/chat/inference',
156
- { model: 'maritalk',
271
+ { model: 'sabia-2-medium',
157
272
  chat_mode: true,
158
273
  messages: [{ role: 'user', content: 'Oi!' }] },
159
274
  request_method: 'POST'
@@ -215,7 +330,7 @@ require 'maritaca-ai'
215
330
 
216
331
  begin
217
332
  client.chat_inference(
218
- { model: 'maritalk',
333
+ { model: 'sabia-2-medium',
219
334
  chat_mode: true,
220
335
  messages: [ { role: 'user', content: 'Oi!' } ] }
221
336
  )
@@ -224,7 +339,7 @@ rescue Maritaca::Errors::MaritacaError => error
224
339
  puts error.message # 'the server responded with status 500'
225
340
 
226
341
  puts error.payload
227
- # { model: 'maritalk',
342
+ # { model: 'sabia-2-medium',
228
343
  # chat_mode: true,
229
344
  # ...
230
345
  # }
@@ -241,7 +356,7 @@ require 'maritaca-ai/errors'
241
356
 
242
357
  begin
243
358
  client.chat_inference(
244
- { model: 'maritalk',
359
+ { model: 'sabia-2-medium',
245
360
  chat_mode: true,
246
361
  messages: [ { role: 'user', content: 'Oi!' } ] }
247
362
  )
@@ -280,7 +395,7 @@ gem build maritaca-ai.gemspec
280
395
 
281
396
  gem signin
282
397
 
283
- gem push maritaca-ai-1.1.0.gem
398
+ gem push maritaca-ai-1.2.0.gem
284
399
  ```
285
400
 
286
401
  ### Updating the README
metadata CHANGED
@@ -1,15 +1,29 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: maritaca-ai
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.1.0
4
+ version: 1.2.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - gbaptista
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2024-02-10 00:00:00.000000000 Z
11
+ date: 2024-05-19 00:00:00.000000000 Z
12
12
  dependencies:
13
+ - !ruby/object:Gem::Dependency
14
+ name: event_stream_parser
15
+ requirement: !ruby/object:Gem::Requirement
16
+ requirements:
17
+ - - "~>"
18
+ - !ruby/object:Gem::Version
19
+ version: '1.0'
20
+ type: :runtime
21
+ prerelease: false
22
+ version_requirements: !ruby/object:Gem::Requirement
23
+ requirements:
24
+ - - "~>"
25
+ - !ruby/object:Gem::Version
26
+ version: '1.0'
13
27
  - !ruby/object:Gem::Dependency
14
28
  name: faraday
15
29
  requirement: !ruby/object:Gem::Requirement