ollama-ai 1.0.0 → 1.0.1

Sign up to get free protection for your applications and to get access to all the features.
data/template.md CHANGED
@@ -1,6 +1,6 @@
1
1
  # Ollama AI
2
2
 
3
- A Ruby gem for interacting with [Ollama](https://github.com/jmorganca/ollama)'s API that allows you to run open source AI LLMs (Large Language Models) locally.
3
+ A Ruby gem for interacting with [Ollama](https://ollama.ai)'s API that allows you to run open source AI LLMs (Large Language Models) locally.
4
4
 
5
5
  ![The image presents a llama's head merged with a red ruby gemstone against a light beige background. The red facets form both the ruby and the contours of the llama, creating a clever visual fusion.](https://raw.githubusercontent.com/gbaptista/assets/main/ollama-ai/ollama-ai-canvas.png)
6
6
 
@@ -9,7 +9,7 @@ A Ruby gem for interacting with [Ollama](https://github.com/jmorganca/ollama)'s
9
9
  ## TL;DR and Quick Start
10
10
 
11
11
  ```ruby
12
- gem 'ollama-ai', '~> 1.0.0'
12
+ gem 'ollama-ai', '~> 1.0.1'
13
13
  ```
14
14
 
15
15
  ```ruby
@@ -21,40 +21,40 @@ client = Ollama.new(
21
21
  )
22
22
 
23
23
  result = client.generate(
24
- { model: 'dolphin-phi',
24
+ { model: 'llama2',
25
25
  prompt: 'Hi!' }
26
26
  )
27
27
  ```
28
28
 
29
29
  Result:
30
30
  ```ruby
31
- [{ 'model' => 'dolphin-phi',
32
- 'created_at' => '2024-01-06T16:53:21.357816652Z',
31
+ [{ 'model' => 'llama2',
32
+ 'created_at' => '2024-01-07T01:34:02.088810408Z',
33
33
  'response' => 'Hello',
34
34
  'done' => false },
35
- { 'model' => 'dolphin-phi',
36
- 'created_at' => '2024-01-06T16:53:21.490053654Z',
35
+ { 'model' => 'llama2',
36
+ 'created_at' => '2024-01-07T01:34:02.419045606Z',
37
37
  'response' => '!',
38
38
  'done' => false },
39
- # ...
40
- { 'model' => 'dolphin-phi',
41
- 'created_at' => '2024-01-06T16:53:24.82505599Z',
42
- 'response' => '.',
39
+ # ..
40
+ { 'model' => 'llama2',
41
+ 'created_at' => '2024-01-07T01:34:07.680049831Z',
42
+ 'response' => '?',
43
43
  'done' => false },
44
- { 'model' => 'dolphin-phi',
45
- 'created_at' => '2024-01-06T16:53:24.956774721Z',
44
+ { 'model' => 'llama2',
45
+ 'created_at' => '2024-01-07T01:34:07.872170352Z',
46
46
  'response' => '',
47
47
  'done' => true,
48
48
  'context' =>
49
- [50_296, 10_057,
49
+ [518, 25_580,
50
50
  # ...
51
- 1037, 13],
52
- 'total_duration' => 5_702_027_026,
53
- 'load_duration' => 649_711,
54
- 'prompt_eval_count' => 25,
55
- 'prompt_eval_duration' => 2_227_159_000,
56
- 'eval_count' => 39,
57
- 'eval_duration' => 3_466_593_000 }]
51
+ 13_563, 29_973],
52
+ 'total_duration' => 11_653_781_127,
53
+ 'load_duration' => 1_186_200_439,
54
+ 'prompt_eval_count' => 22,
55
+ 'prompt_eval_duration' => 5_006_751_000,
56
+ 'eval_count' => 25,
57
+ 'eval_duration' => 5_453_058_000 }]
58
58
  ```
59
59
 
60
60
  ## Index
@@ -66,11 +66,11 @@ Result:
66
66
  ### Installing
67
67
 
68
68
  ```sh
69
- gem install ollama-ai -v 1.0.0
69
+ gem install ollama-ai -v 1.0.1
70
70
  ```
71
71
 
72
72
  ```sh
73
- gem 'ollama-ai', '~> 1.0.0'
73
+ gem 'ollama-ai', '~> 1.0.1'
74
74
  ```
75
75
 
76
76
  ## Usage
@@ -113,7 +113,7 @@ API Documentation: https://github.com/jmorganca/ollama/blob/main/docs/api.md#gen
113
113
 
114
114
  ```ruby
115
115
  result = client.generate(
116
- { model: 'dolphin-phi',
116
+ { model: 'llama2',
117
117
  prompt: 'Hi!',
118
118
  stream: false }
119
119
  )
@@ -121,21 +121,20 @@ result = client.generate(
121
121
 
122
122
  Result:
123
123
  ```ruby
124
- [{ 'model' => 'dolphin-phi',
125
- 'created_at' => '2024-01-06T17:47:26.443128626Z',
126
- 'response' =>
127
- "Hello! How can I assist you today? Do you have any questions or problems that you'd like help with?",
124
+ [{ 'model' => 'llama2',
125
+ 'created_at' => '2024-01-07T01:35:41.951371247Z',
126
+ 'response' => "Hi there! It's nice to meet you. How are you today?",
128
127
  'done' => true,
129
128
  'context' =>
130
- [50_296, 10_057,
129
+ [518, 25_580,
131
130
  # ...
132
- 351, 30],
133
- 'total_duration' => 6_495_278_960,
134
- 'load_duration' => 1_434_052_851,
135
- 'prompt_eval_count' => 25,
136
- 'prompt_eval_duration' => 1_938_861_000,
137
- 'eval_count' => 23,
138
- 'eval_duration' => 3_119_030_000 }]
131
+ 9826, 29_973],
132
+ 'total_duration' => 6_981_097_576,
133
+ 'load_duration' => 625_053,
134
+ 'prompt_eval_count' => 22,
135
+ 'prompt_eval_duration' => 4_075_171_000,
136
+ 'eval_count' => 16,
137
+ 'eval_duration' => 2_900_325_000 }]
139
138
  ```
140
139
 
141
140
  ##### Receiving Stream Events
@@ -146,7 +145,7 @@ Ensure that you have enabled [Server-Sent Events](#streaming-and-server-sent-eve
146
145
 
147
146
  ```ruby
148
147
  client.generate(
149
- { model: 'dolphin-phi',
148
+ { model: 'llama2',
150
149
  prompt: 'Hi!' }
151
150
  ) do |event, raw|
152
151
  puts event
@@ -155,8 +154,8 @@ end
155
154
 
156
155
  Event:
157
156
  ```ruby
158
- { 'model' => 'dolphin-phi',
159
- 'created_at' => '2024-01-06T17:27:29.366879586Z',
157
+ { 'model' => 'llama2',
158
+ 'created_at' => '2024-01-07T01:36:30.665245712Z',
160
159
  'response' => 'Hello',
161
160
  'done' => false }
162
161
  ```
@@ -164,46 +163,46 @@ Event:
164
163
  You can get all the receive events at once as an array:
165
164
  ```ruby
166
165
  result = client.generate(
167
- { model: 'dolphin-phi',
166
+ { model: 'llama2',
168
167
  prompt: 'Hi!' }
169
168
  )
170
169
  ```
171
170
 
172
171
  Result:
173
172
  ```ruby
174
- [{ 'model' => 'dolphin-phi',
175
- 'created_at' => '2024-01-06T16:53:21.357816652Z',
173
+ [{ 'model' => 'llama2',
174
+ 'created_at' => '2024-01-07T01:36:30.665245712Z',
176
175
  'response' => 'Hello',
177
176
  'done' => false },
178
- { 'model' => 'dolphin-phi',
179
- 'created_at' => '2024-01-06T16:53:21.490053654Z',
177
+ { 'model' => 'llama2',
178
+ 'created_at' => '2024-01-07T01:36:30.927337136Z',
180
179
  'response' => '!',
181
180
  'done' => false },
182
181
  # ...
183
- { 'model' => 'dolphin-phi',
184
- 'created_at' => '2024-01-06T16:53:24.82505599Z',
185
- 'response' => '.',
182
+ { 'model' => 'llama2',
183
+ 'created_at' => '2024-01-07T01:36:37.249416767Z',
184
+ 'response' => '?',
186
185
  'done' => false },
187
- { 'model' => 'dolphin-phi',
188
- 'created_at' => '2024-01-06T16:53:24.956774721Z',
186
+ { 'model' => 'llama2',
187
+ 'created_at' => '2024-01-07T01:36:37.44041283Z',
189
188
  'response' => '',
190
189
  'done' => true,
191
190
  'context' =>
192
- [50_296, 10_057,
191
+ [518, 25_580,
193
192
  # ...
194
- 1037, 13],
195
- 'total_duration' => 5_702_027_026,
196
- 'load_duration' => 649_711,
197
- 'prompt_eval_count' => 25,
198
- 'prompt_eval_duration' => 2_227_159_000,
199
- 'eval_count' => 39,
200
- 'eval_duration' => 3_466_593_000 }]
193
+ 13_563, 29_973],
194
+ 'total_duration' => 10_551_395_645,
195
+ 'load_duration' => 966_631,
196
+ 'prompt_eval_count' => 22,
197
+ 'prompt_eval_duration' => 4_034_990_000,
198
+ 'eval_count' => 25,
199
+ 'eval_duration' => 6_512_954_000 }]
201
200
  ```
202
201
 
203
202
  You can mix both as well:
204
203
  ```ruby
205
204
  result = client.generate(
206
- { model: 'dolphin-phi',
205
+ { model: 'llama2',
207
206
  prompt: 'Hi!' }
208
207
  ) do |event, raw|
209
208
  puts event
@@ -216,7 +215,7 @@ API Documentation: https://github.com/jmorganca/ollama/blob/main/docs/api.md#gen
216
215
 
217
216
  ```ruby
218
217
  result = client.chat(
219
- { model: 'dolphin-phi',
218
+ { model: 'llama2',
220
219
  messages: [
221
220
  { role: 'user', content: 'Hi! My name is Purple.' }
222
221
  ] }
@@ -227,37 +226,37 @@ end
227
226
 
228
227
  Event:
229
228
  ```ruby
230
- { 'model' => 'dolphin-phi',
231
- 'created_at' => '2024-01-06T18:17:22.468231988Z',
232
- 'message' => { 'role' => 'assistant', 'content' => 'Hello' },
229
+ { 'model' => 'llama2',
230
+ 'created_at' => '2024-01-07T01:38:01.729897311Z',
231
+ 'message' => { 'role' => 'assistant', 'content' => "\n" },
233
232
  'done' => false }
234
233
  ```
235
234
 
236
235
  Result:
237
236
  ```ruby
238
- [{ 'model' => 'dolphin-phi',
239
- 'created_at' => '2024-01-06T18:17:22.468231988Z',
240
- 'message' => { 'role' => 'assistant', 'content' => 'Hello' },
237
+ [{ 'model' => 'llama2',
238
+ 'created_at' => '2024-01-07T01:38:01.729897311Z',
239
+ 'message' => { 'role' => 'assistant', 'content' => "\n" },
241
240
  'done' => false },
242
- { 'model' => 'dolphin-phi',
243
- 'created_at' => '2024-01-06T18:17:22.594414415Z',
244
- 'message' => { 'role' => 'assistant', 'content' => ' Purple' },
241
+ { 'model' => 'llama2',
242
+ 'created_at' => '2024-01-07T01:38:02.081494506Z',
243
+ 'message' => { 'role' => 'assistant', 'content' => '*' },
245
244
  'done' => false },
246
245
  # ...
247
- { 'model' => 'dolphin-phi',
248
- 'created_at' => '2024-01-06T18:17:25.491597233Z',
246
+ { 'model' => 'llama2',
247
+ 'created_at' => '2024-01-07T01:38:17.855905499Z',
249
248
  'message' => { 'role' => 'assistant', 'content' => '?' },
250
249
  'done' => false },
251
- { 'model' => 'dolphin-phi',
252
- 'created_at' => '2024-01-06T18:17:25.578463723Z',
250
+ { 'model' => 'llama2',
251
+ 'created_at' => '2024-01-07T01:38:18.07331245Z',
253
252
  'message' => { 'role' => 'assistant', 'content' => '' },
254
253
  'done' => true,
255
- 'total_duration' => 5_274_177_696,
256
- 'load_duration' => 1_565_325,
257
- 'prompt_eval_count' => 30,
258
- 'prompt_eval_duration' => 2_284_638_000,
259
- 'eval_count' => 29,
260
- 'eval_duration' => 2_983_962_000 }]
254
+ 'total_duration' => 22_494_544_502,
255
+ 'load_duration' => 4_224_600,
256
+ 'prompt_eval_count' => 28,
257
+ 'prompt_eval_duration' => 6_496_583_000,
258
+ 'eval_count' => 61,
259
+ 'eval_duration' => 15_991_728_000 }]
261
260
  ```
262
261
 
263
262
  ##### Back-and-Forth Conversations
@@ -268,11 +267,11 @@ To maintain a back-and-forth conversation, you need to append the received respo
268
267
 
269
268
  ```ruby
270
269
  result = client.chat(
271
- { model: 'dolphin-phi',
270
+ { model: 'llama2',
272
271
  messages: [
273
272
  { role: 'user', content: 'Hi! My name is Purple.' },
274
273
  { role: 'assistant',
275
- content: "Hi, Purple! It's nice to meet you. I am Dolphin. How can I help you today?" },
274
+ content: 'Hi, Purple!' },
276
275
  { role: 'user', content: "What's my name?" }
277
276
  ] }
278
277
  ) do |event, raw|
@@ -283,37 +282,41 @@ end
283
282
  Event:
284
283
 
285
284
  ```ruby
286
- { 'model' => 'dolphin-phi',
287
- 'created_at' => '2024-01-06T19:07:51.05465997Z',
288
- 'message' => { 'role' => 'assistant', 'content' => 'Your' },
285
+ { 'model' => 'llama2',
286
+ 'created_at' => '2024-01-07T01:40:07.352998498Z',
287
+ 'message' => { 'role' => 'assistant', 'content' => ' Pur' },
289
288
  'done' => false }
290
289
  ```
291
290
 
292
291
  Result:
293
292
  ```ruby
294
- [{ 'model' => 'dolphin-phi',
295
- 'created_at' => '2024-01-06T19:07:51.05465997Z',
293
+ [{ 'model' => 'llama2',
294
+ 'created_at' => '2024-01-07T01:40:06.562939469Z',
296
295
  'message' => { 'role' => 'assistant', 'content' => 'Your' },
297
296
  'done' => false },
298
- { 'model' => 'dolphin-phi',
299
- 'created_at' => '2024-01-06T19:07:51.184476541Z',
300
- 'message' => { 'role' => 'assistant', 'content' => ' name' },
301
- 'done' => false },
302
297
  # ...
303
- { 'model' => 'dolphin-phi',
304
- 'created_at' => '2024-01-06T19:07:56.526297223Z',
305
- 'message' => { 'role' => 'assistant', 'content' => '.' },
298
+ { 'model' => 'llama2',
299
+ 'created_at' => '2024-01-07T01:40:07.352998498Z',
300
+ 'message' => { 'role' => 'assistant', 'content' => ' Pur' },
301
+ 'done' => false },
302
+ { 'model' => 'llama2',
303
+ 'created_at' => '2024-01-07T01:40:07.545323584Z',
304
+ 'message' => { 'role' => 'assistant', 'content' => 'ple' },
306
305
  'done' => false },
307
- { 'model' => 'dolphin-phi',
308
- 'created_at' => '2024-01-06T19:07:56.667809424Z',
306
+ { 'model' => 'llama2',
307
+ 'created_at' => '2024-01-07T01:40:07.77769408Z',
308
+ 'message' => { 'role' => 'assistant', 'content' => '!' },
309
+ 'done' => false },
310
+ { 'model' => 'llama2',
311
+ 'created_at' => '2024-01-07T01:40:07.974165849Z',
309
312
  'message' => { 'role' => 'assistant', 'content' => '' },
310
313
  'done' => true,
311
- 'total_duration' => 12_169_557_266,
312
- 'load_duration' => 4_486_689,
313
- 'prompt_eval_count' => 95,
314
- 'prompt_eval_duration' => 6_678_566_000,
315
- 'eval_count' => 40,
316
- 'eval_duration' => 5_483_133_000 }]
314
+ 'total_duration' => 11_482_012_681,
315
+ 'load_duration' => 4_246_882,
316
+ 'prompt_eval_count' => 57,
317
+ 'prompt_eval_duration' => 10_387_150_000,
318
+ 'eval_count' => 6,
319
+ 'eval_duration' => 1_089_249_000 }]
317
320
  ```
318
321
 
319
322
  #### embeddings: Generate Embeddings
@@ -322,7 +325,7 @@ API Documentation: https://github.com/jmorganca/ollama/blob/main/docs/api.md#gen
322
325
 
323
326
  ```ruby
324
327
  result = client.embeddings(
325
- { model: 'dolphin-phi',
328
+ { model: 'llama2',
326
329
  prompt: 'Hi!' }
327
330
  )
328
331
  ```
@@ -330,11 +333,9 @@ result = client.embeddings(
330
333
  Result:
331
334
  ```ruby
332
335
  [{ 'embedding' =>
333
- [1.0372048616409302,
334
- 1.0635842084884644,
336
+ [0.6970467567443848, -2.248202085494995,
335
337
  # ...
336
- -0.5416496396064758,
337
- 0.051569778472185135] }]
338
+ -1.5994540452957153, -0.3464218080043793] }]
338
339
  ```
339
340
 
340
341
  #### Models
@@ -346,7 +347,7 @@ API Documentation: https://github.com/jmorganca/ollama/blob/main/docs/api.md#cre
346
347
  ```ruby
347
348
  result = client.create(
348
349
  { name: 'mario',
349
- modelfile: "FROM dolphin-phi\nSYSTEM You are mario from Super Mario Bros." }
350
+ modelfile: "FROM llama2\nSYSTEM You are mario from Super Mario Bros." }
350
351
  ) do |event, raw|
351
352
  puts event
352
353
  end
@@ -387,9 +388,7 @@ client.generate(
387
388
  end
388
389
  ```
389
390
 
390
- > _Hello! I'm Mario, a character from the popular video game series Super Mario Bros. My goal is to rescue Princess Peach from the evil Bowser and his minions, so we can live happily ever after in the Mushroom Kingdom! 🍄🐥_
391
- >
392
- > _What brings you here? How can I help you on your journey?_
391
+ > _Woah! *adjusts sunglasses* It's-a me, Mario! *winks* You must be a new friend I've-a met here in the Mushroom Kingdom. *tips top hat* What brings you to this neck of the woods? Maybe you're looking for-a some help on your adventure? *nods* Just let me know, and I'll do my best to-a assist ya! 😃_
393
392
 
394
393
  ##### tags: List Local Models
395
394
 
@@ -402,28 +401,28 @@ result = client.tags
402
401
  Result:
403
402
  ```ruby
404
403
  [{ 'models' =>
405
- [{ 'name' => 'dolphin-phi:latest',
406
- 'modified_at' => '2024-01-06T12:20:42.778120982-03:00',
407
- 'size' => 1_602_473_850,
404
+ [{ 'name' => 'llama2:latest',
405
+ 'modified_at' => '2024-01-06T15:06:23.6349195-03:00',
406
+ 'size' => 3_826_793_677,
408
407
  'digest' =>
409
- 'c5761fc772409945787240af89a5cce01dd39dc52f1b7b80d080a1163e8dbe10',
408
+ '78e26419b4469263f75331927a00a0284ef6544c1975b826b15abdaef17bb962',
410
409
  'details' =>
411
- { 'format' => 'gguf',
412
- 'family' => 'phi2',
413
- 'families' => ['phi2'],
414
- 'parameter_size' => '3B',
415
- 'quantization_level' => 'Q4_0' } },
410
+ { 'format' => 'gguf',
411
+ 'family' => 'llama',
412
+ 'families' => ['llama'],
413
+ 'parameter_size' => '7B',
414
+ 'quantization_level' => 'Q4_0' } },
416
415
  { 'name' => 'mario:latest',
417
- 'modified_at' => '2024-01-06T16:19:11.340234644-03:00',
418
- 'size' => 1_602_473_846,
416
+ 'modified_at' => '2024-01-06T22:41:59.495298101-03:00',
417
+ 'size' => 3_826_793_787,
419
418
  'digest' =>
420
- '582e668feaba3fcb6add3cee26046a1d6a0c940b86a692ea30d5100aec90135f',
419
+ '291f46d2fa687dfaff45de96a8cb6e32707bc16ec1e1dfe8d65e9634c34c660c',
421
420
  'details' =>
422
- { 'format' => 'gguf',
423
- 'family' => 'phi2',
424
- 'families' => ['phi2'],
425
- 'parameter_size' => '3B',
426
- 'quantization_level' => 'Q4_0' } }] }]
421
+ { 'format' => 'gguf',
422
+ 'family' => 'llama',
423
+ 'families' => ['llama'],
424
+ 'parameter_size' => '7B',
425
+ 'quantization_level' => 'Q4_0' } }] }]
427
426
  ```
428
427
 
429
428
  ##### show: Show Model Information
@@ -432,35 +431,33 @@ API Documentation: https://github.com/jmorganca/ollama/blob/main/docs/api.md#sho
432
431
 
433
432
  ```ruby
434
433
  result = client.show(
435
- { name: 'dolphin-phi' }
434
+ { name: 'llama2' }
436
435
  )
437
436
  ```
438
437
 
439
438
  Result:
440
439
  ```ruby
441
440
  [{ 'license' =>
442
- "MICROSOFT RESEARCH LICENSE TERMS\n" \
441
+ "LLAMA 2 COMMUNITY LICENSE AGREEMENT\t\n" \
443
442
  # ...
444
- 'It also applies even if Microsoft knew or should have known about the possibility...',
443
+ "* Reporting violations of the Acceptable Use Policy or unlicensed uses of Llama..." \
444
+ "\n",
445
445
  'modelfile' =>
446
446
  "# Modelfile generated by \"ollama show\"\n" \
447
447
  # ...
448
- 'PARAMETER stop "<|im_end|>"',
448
+ 'PARAMETER stop "<</SYS>>"',
449
449
  'parameters' =>
450
- "stop <|im_start|>\n" \
451
- 'stop <|im_end|>',
452
- 'template' =>
453
- "<|im_start|>system\n" \
454
- "{{ .System }}<|im_end|>\n" \
455
- "<|im_start|>user\n" \
456
- "{{ .Prompt }}<|im_end|>\n" \
457
- "<|im_start|>assistant\n",
458
- 'system' => 'You are Dolphin, a helpful AI assistant.',
450
+ "stop [INST]\n" \
451
+ "stop [/INST]\n" \
452
+ "stop <<SYS>>\n" \
453
+ 'stop <</SYS>>',
454
+ 'template' =>
455
+ "[INST] <<SYS>>{{ .System }}<</SYS>>\n\n{{ .Prompt }} [/INST]\n",
459
456
  'details' =>
460
457
  { 'format' => 'gguf',
461
- 'family' => 'phi2',
462
- 'families' => ['phi2'],
463
- 'parameter_size' => '3B',
458
+ 'family' => 'llama',
459
+ 'families' => ['llama'],
460
+ 'parameter_size' => '7B',
464
461
  'quantization_level' => 'Q4_0' } }]
465
462
  ```
466
463
 
@@ -470,8 +467,8 @@ API Documentation: https://github.com/jmorganca/ollama/blob/main/docs/api.md#cop
470
467
 
471
468
  ```ruby
472
469
  result = client.copy(
473
- { source: 'dolphin-phi',
474
- destination: 'dolphin-phi-backup' }
470
+ { source: 'llama2',
471
+ destination: 'llama2-backup' }
475
472
  )
476
473
  ```
477
474
 
@@ -508,7 +505,7 @@ API Documentation: https://github.com/jmorganca/ollama/blob/main/docs/api.md#del
508
505
 
509
506
  ```ruby
510
507
  result = client.delete(
511
- { name: 'dolphin-phi' }
508
+ { name: 'llama2' }
512
509
  )
513
510
  ```
514
511
 
@@ -521,14 +518,14 @@ If the model does not exist:
521
518
  ```ruby
522
519
  begin
523
520
  result = client.delete(
524
- { name: 'dolphin-phi' }
521
+ { name: 'llama2' }
525
522
  )
526
523
  rescue Ollama::Errors::OllamaError => error
527
524
  puts error.class # Ollama::Errors::RequestError
528
525
  puts error.message # 'the server responded with status 404'
529
526
 
530
527
  puts error.payload
531
- # { name: 'dolphin-phi',
528
+ # { name: 'llama2',
532
529
  # ...
533
530
  # }
534
531
 
@@ -543,7 +540,7 @@ API Documentation: https://github.com/jmorganca/ollama/blob/main/docs/api.md#pul
543
540
 
544
541
  ```ruby
545
542
  result = client.pull(
546
- { name: 'dolphin-phi' }
543
+ { name: 'llama2' }
547
544
  ) do |event, raw|
548
545
  puts event
549
546
  end
@@ -625,6 +622,69 @@ Result:
625
622
  { 'status' => 'success' }]
626
623
  ```
627
624
 
625
+ ### Modes
626
+
627
+ #### Text
628
+
629
+ You can use the [generate](#generate-generate-a-completion) or [chat](#chat-generate-a-chat-completion) methods for text.
630
+
631
+ #### Image
632
+
633
+ ![A black and white image of an old piano. The piano is an upright model, with the keys on the right side of the image. The piano is sitting on a tiled floor. There is a small round object on the top of the piano.](https://raw.githubusercontent.com/gbaptista/assets/main/gemini-ai/piano.jpg)
634
+
635
+ > _Courtesy of [Unsplash](https://unsplash.com/photos/greyscale-photo-of-grand-piano-czPs0z3-Ggg)_
636
+
637
+ You need to choose a model that supports images, like [LLaVA](https://ollama.ai/library/llava) or [bakllava](https://ollama.ai/library/bakllava), and encode the image as [Base64](https://en.wikipedia.org/wiki/Base64).
638
+
639
+ Depending on your hardware, some models that support images can be slow, so you may want to increase the client [timeout](#timeout):
640
+
641
+ ```ruby
642
+ client = Ollama.new(
643
+ credentials: { address: 'http://localhost:11434' },
644
+ options: {
645
+ server_sent_events: true,
646
+ connection: { request: { timeout: 120, read_timeout: 120 } } }
647
+ )
648
+ ```
649
+
650
+ Using the `generate` method:
651
+
652
+ ```ruby
653
+ require 'base64'
654
+
655
+ client.generate(
656
+ { model: 'llava',
657
+ prompt: 'Please describe this image.',
658
+ images: [Base64.strict_encode64(File.read('piano.jpg'))] }
659
+ ) do |event, raw|
660
+ print event['response']
661
+ end
662
+ ```
663
+
664
+ Output:
665
+ > _The image is a black and white photo of an old piano, which appears to be in need of maintenance. A chair is situated right next to the piano. Apart from that, there are no other objects or people visible in the scene._
666
+
667
+ Using the `chat` method:
668
+ ```ruby
669
+ require 'base64'
670
+
671
+ result = client.chat(
672
+ { model: 'llava',
673
+ messages: [
674
+ { role: 'user',
675
+ content: 'Please describe this image.',
676
+ images: [Base64.strict_encode64(File.read('piano.jpg'))] }
677
+ ] }
678
+ ) do |event, raw|
679
+ puts event
680
+ end
681
+ ```
682
+
683
+ Output:
684
+ > _The image displays an old piano, sitting on a wooden floor with black keys. Next to the piano, there is another keyboard in the scene, possibly used for playing music._
685
+ >
686
+ > _On top of the piano, there are two mice placed in different locations within its frame. These mice might be meant for controlling the music being played or simply as decorative items. The overall atmosphere seems to be focused on artistic expression through this unique instrument._
687
+
628
688
  ### Streaming and Server-Sent Events (SSE)
629
689
 
630
690
  [Server-Sent Events (SSE)](https://en.wikipedia.org/wiki/Server-sent_events) is a technology that allows certain endpoints to offer streaming capabilities, such as creating the impression that "the model is typing along with you," rather than delivering the entire answer all at once.
@@ -640,7 +700,7 @@ client = Ollama.new(
640
700
  Or, you can decide on a request basis:
641
701
  ```ruby
642
702
  result = client.generate(
643
- { model: 'dolphin-phi',
703
+ { model: 'llama2',
644
704
  prompt: 'Hi!' },
645
705
  server_sent_events: true
646
706
  ) do |event, raw|
@@ -661,7 +721,7 @@ Ollama may launch a new endpoint that we haven't covered in the Gem yet. If that
661
721
  ```ruby
662
722
  result = client.request(
663
723
  'api/generate',
664
- { model: 'dolphin-phi',
724
+ { model: 'llama2',
665
725
  prompt: 'Hi!' },
666
726
  request_method: 'POST', server_sent_events: true
667
727
  )
@@ -707,7 +767,7 @@ require 'ollama-ai'
707
767
 
708
768
  begin
709
769
  client.chat_completions(
710
- { model: 'dolphin-phi',
770
+ { model: 'llama2',
711
771
  prompt: 'Hi!' }
712
772
  )
713
773
  rescue Ollama::Errors::OllamaError => error
@@ -715,7 +775,7 @@ rescue Ollama::Errors::OllamaError => error
715
775
  puts error.message # 'the server responded with status 500'
716
776
 
717
777
  puts error.payload
718
- # { model: 'dolphin-phi',
778
+ # { model: 'llama2',
719
779
  # prompt: 'Hi!',
720
780
  # ...
721
781
  # }
@@ -732,7 +792,7 @@ require 'ollama-ai/errors'
732
792
 
733
793
  begin
734
794
  client.chat_completions(
735
- { model: 'dolphin-phi',
795
+ { model: 'llama2',
736
796
  prompt: 'Hi!' }
737
797
  )
738
798
  rescue OllamaError => error
@@ -755,6 +815,8 @@ RequestError
755
815
  ```bash
756
816
  bundle
757
817
  rubocop -A
818
+
819
+ bundle exec ruby spec/tasks/run-client.rb
758
820
  ```
759
821
 
760
822
  ### Purpose
@@ -768,7 +830,7 @@ gem build ollama-ai.gemspec
768
830
 
769
831
  gem signin
770
832
 
771
- gem push ollama-ai-1.0.0.gem
833
+ gem push ollama-ai-1.0.1.gem
772
834
  ```
773
835
 
774
836
  ### Updating the README