ollama-rb 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/.rubocop.yml +13 -0
- data/CHANGELOG.md +5 -0
- data/LICENSE.txt +21 -0
- data/README.md +301 -0
- data/Rakefile +12 -0
- data/lib/ollama/api/blobs.rb +20 -0
- data/lib/ollama/api/chat.rb +22 -0
- data/lib/ollama/api/completion.rb +25 -0
- data/lib/ollama/api/embeddings.rb +15 -0
- data/lib/ollama/api/models.rb +39 -0
- data/lib/ollama/client.rb +41 -0
- data/lib/ollama/connection.rb +84 -0
- data/lib/ollama/response.rb +31 -0
- data/lib/ollama/version.rb +5 -0
- data/lib/ollama.rb +9 -0
- metadata +61 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA256:
|
3
|
+
metadata.gz: bc1be94d75a4db6787115a0cfbdc7b6aa149418a59612e0012f845512f361ec0
|
4
|
+
data.tar.gz: d1a565383613490c54c45a534e5fff1a9060803423b6dd9146f036eb769c15ef
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: 6a09fcb88ec6f2a4e41706fad85ca3def0e75b0a7df87d60fa7c167cccb19abc24163a5dc3d9c1bee1a8a88eca06fa0dbeefa8dd9b79c9215d2db564d28ea5e8
|
7
|
+
data.tar.gz: c8ec22503cb81e3dd376b0775930047d685c0ead3c6d699249449a08544d1de2435b18fb4f2a92d2819cd7f25e320ee2a57d23e82e6386a7760bd0074137de6d
|
data/.rubocop.yml
ADDED
data/CHANGELOG.md
ADDED
data/LICENSE.txt
ADDED
@@ -0,0 +1,21 @@
|
|
1
|
+
The MIT License (MIT)
|
2
|
+
|
3
|
+
Copyright (c) 2024 songji.zeng
|
4
|
+
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
7
|
+
in the Software without restriction, including without limitation the rights
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
10
|
+
furnished to do so, subject to the following conditions:
|
11
|
+
|
12
|
+
The above copyright notice and this permission notice shall be included in
|
13
|
+
all copies or substantial portions of the Software.
|
14
|
+
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
21
|
+
THE SOFTWARE.
|
data/README.md
ADDED
@@ -0,0 +1,301 @@
|
|
1
|
+
# Ollama Ruby Library
|
2
|
+
|
3
|
+
The Ollama Ruby library provides the easiest way to integrate your Ruby project with [Ollama](https://github.com/jmorganca/ollama).
|
4
|
+
|
5
|
+
## Index
|
6
|
+
|
7
|
+
- [Ollama Ruby Library](#ollama-ruby-library)
|
8
|
+
- [Installation](#installation)
|
9
|
+
- [Usage](#usage)
|
10
|
+
- [Create a Client](#create-a-client)
|
11
|
+
- [Generate a chat completion](#generate-a-chat-completion)
|
12
|
+
- [Generate a completion](#generate-a-completion)
|
13
|
+
- [Create a Model](#create-a-model)
|
14
|
+
- [List Local Models](#list-local-models)
|
15
|
+
- [Show Model Information](#show-model-information)
|
16
|
+
- [Copy a Model](#copy-a-model)
|
17
|
+
- [Delete a Model](#delete-a-model)
|
18
|
+
- [Pull a Model](#pull-a-model)
|
19
|
+
- [Push a Model](#push-a-model)
|
20
|
+
- [Generate Embeddings](#generate-embeddings)
|
21
|
+
- [Development](#development)
|
22
|
+
- [Contributing](#contributing)
|
23
|
+
- [License](#license)
|
24
|
+
|
25
|
+
## Installation
|
26
|
+
|
27
|
+
Install the gem and add to the application's Gemfile by executing:
|
28
|
+
|
29
|
+
```sh
|
30
|
+
bundle add ollama-rb
|
31
|
+
```
|
32
|
+
|
33
|
+
If bundler is not being used to manage dependencies, install the gem by executing:
|
34
|
+
|
35
|
+
```sh
|
36
|
+
gem install ollama-rb
|
37
|
+
```
|
38
|
+
|
39
|
+
## Usage
|
40
|
+
|
41
|
+
### Create a Client
|
42
|
+
|
43
|
+
```ruby
|
44
|
+
require "ollama"
|
45
|
+
|
46
|
+
ollama = Ollama::Client.new
|
47
|
+
|
48
|
+
# Specify url, default to http://localhost:11434
|
49
|
+
ollama = Ollama::Client.new(base_url: "http://localhost:11434")
|
50
|
+
```
|
51
|
+
|
52
|
+
### Generate a chat completion
|
53
|
+
|
54
|
+
```ruby
|
55
|
+
response = ollama.chat.create(
|
56
|
+
model: "llama2",
|
57
|
+
messages: [
|
58
|
+
{ role: "user", content: "Why is the sky blue?" }
|
59
|
+
]
|
60
|
+
)
|
61
|
+
# => #<Ollama::Response:0x000000011fa1a840...
|
62
|
+
response.ok?
|
63
|
+
# => true
|
64
|
+
response.result
|
65
|
+
# =>
|
66
|
+
# {"model"=>"llama2",
|
67
|
+
# "created_at"=>"2024-03-20T06:53:18.298807078Z",
|
68
|
+
# "message"=>
|
69
|
+
# {"role"=>"assistant",
|
70
|
+
# "content"=> ...
|
71
|
+
```
|
72
|
+
|
73
|
+
**Streaming response**
|
74
|
+
|
75
|
+
```ruby
|
76
|
+
response = ollama.chat.create(
|
77
|
+
model: "llama2",
|
78
|
+
messages: [
|
79
|
+
{ role: "user", content: "Why is the sky blue?" }
|
80
|
+
]
|
81
|
+
) do |chunk|
|
82
|
+
puts chunk
|
83
|
+
end
|
84
|
+
# =>
|
85
|
+
#{"model"=>"llama2", "created_at"=>"2024-03-20T06:57:57.513159464Z", "message"=>{"role"=>"assistant", "content"=>"\n"}, "done"=>false}
|
86
|
+
#{"model"=>"llama2", "created_at"=>"2024-03-20T06:57:57.616592691Z", "message"=>{"role"=>"assistant", "content"=>"The"}, "done"=>false}
|
87
|
+
#{"model"=>"llama2", "created_at"=>"2024-03-20T06:57:57.70737176Z", "message"=>{"role"=>"assistant", "content"=>" sky"}, "done"=>false}
|
88
|
+
#{"model"=>"llama2", "created_at"=>"2024-03-20T06:57:57.796324471Z", "message"=>{"role"=>"assistant", "content"=>" appears"}, "done"=>false}
|
89
|
+
#{"model"=>"llama2", "created_at"=>"2024-03-20T06:57:57.884097322Z", "message"=>{"role"=>"assistant", "content"=>" blue"}, "done"=>false}
|
90
|
+
# ...
|
91
|
+
```
|
92
|
+
|
93
|
+
### Generate a completion
|
94
|
+
|
95
|
+
```ruby
|
96
|
+
response = ollama.completion.create(model: "llama2", prompt: "hello!")
|
97
|
+
|
98
|
+
response.result
|
99
|
+
# =>
|
100
|
+
# {"model"=>"llama2",
|
101
|
+
# "created_at"=>"2024-03-20T08:03:27.910169204Z",
|
102
|
+
# "response"=>"Hello there! It's nice to meet you. Is there something I can help you with or would you like to chat?",
|
103
|
+
# "done"=>true,
|
104
|
+
# "context"=>
|
105
|
+
# [518,
|
106
|
+
# 25580,
|
107
|
+
# 29962,
|
108
|
+
# 3532,
|
109
|
+
# ...
|
110
|
+
# 13563,
|
111
|
+
# 29973],
|
112
|
+
# "total_duration"=>6212545461,
|
113
|
+
# "load_duration"=>2024921059,
|
114
|
+
# "prompt_eval_count"=>22,
|
115
|
+
# "prompt_eval_duration"=>1815255000,
|
116
|
+
# "eval_count"=>27,
|
117
|
+
# "eval_duration"=>2371725000}
|
118
|
+
```
|
119
|
+
|
120
|
+
**Streaming response**
|
121
|
+
|
122
|
+
```ruby
|
123
|
+
ollama.completion.create(model: "llama2", prompt: "hello!") do |chunk|
|
124
|
+
puts chunk
|
125
|
+
end
|
126
|
+
# =>
|
127
|
+
# {"model"=>"llama2", "created_at"=>"2024-03-20T08:08:01.196291424Z", "response"=>"Hello", "done"=>false}
|
128
|
+
# {"model"=>"llama2", "created_at"=>"2024-03-20T08:08:01.285639365Z", "response"=>"!", "done"=>false}
|
129
|
+
# {"model"=>"llama2", "created_at"=>"2024-03-20T08:08:01.3753276Z", "response"=>" It", "done"=>false}
|
130
|
+
# {"model"=>"llama2", "created_at"=>"2024-03-20T08:08:01.464252328Z", "response"=>"'", "done"=>false}
|
131
|
+
# {"model"=>"llama2", "created_at"=>"2024-03-20T08:08:01.552918803Z", "response"=>"s", "done"=>false}
|
132
|
+
# {"model"=>"llama2", "created_at"=>"2024-03-20T08:08:01.641877239Z", "response"=>" nice", "done"=>false}
|
133
|
+
# {"model"=>"llama2", "created_at"=>"2024-03-20T08:08:01.730397754Z", "response"=>" to", "done"=>false}
|
134
|
+
# {"model"=>"llama2", "created_at"=>"2024-03-20T08:08:01.819209813Z", "response"=>" meet", "done"=>false}
|
135
|
+
# {"model"=>"llama2", "created_at"=>"2024-03-20T08:08:01.907875913Z", "response"=>" you", "done"=>false}
|
136
|
+
# {"model"=>"llama2", "created_at"=>"2024-03-20T08:08:01.996684973Z", "response"=>".", "done"=>false}
|
137
|
+
# {"model"=>"llama2", "created_at"=>"2024-03-20T08:08:02.085516116Z", "response"=>" Is", "done"=>false}
|
138
|
+
# {"model"=>"llama2", "created_at"=>"2024-03-20T08:08:02.1781973Z", "response"=>" there", "done"=>false}
|
139
|
+
# {"model"=>"llama2", "created_at"=>"2024-03-20T08:08:02.267609408Z", "response"=>" something", "done"=>false}
|
140
|
+
# {"model"=>"llama2", "created_at"=>"2024-03-20T08:08:02.357217892Z", "response"=>" I", "done"=>false}
|
141
|
+
# {"model"=>"llama2", "created_at"=>"2024-03-20T08:08:02.446981087Z", "response"=>" can", "done"=>false}
|
142
|
+
# {"model"=>"llama2", "created_at"=>"2024-03-20T08:08:02.536783282Z", "response"=>" help", "done"=>false}
|
143
|
+
# {"model"=>"llama2", "created_at"=>"2024-03-20T08:08:02.645166548Z", "response"=>" you", "done"=>false}
|
144
|
+
# {"model"=>"llama2", "created_at"=>"2024-03-20T08:08:02.737494769Z", "response"=>" with", "done"=>false}
|
145
|
+
# {"model"=>"llama2", "created_at"=>"2024-03-20T08:08:02.82763751Z", "response"=>" or", "done"=>false}
|
146
|
+
# {"model"=>"llama2", "created_at"=>"2024-03-20T08:08:02.917220827Z", "response"=>" would", "done"=>false}
|
147
|
+
# {"model"=>"llama2", "created_at"=>"2024-03-20T08:08:03.006731978Z", "response"=>" you", "done"=>false}
|
148
|
+
# {"model"=>"llama2", "created_at"=>"2024-03-20T08:08:03.098463277Z", "response"=>" like", "done"=>false}
|
149
|
+
# {"model"=>"llama2", "created_at"=>"2024-03-20T08:08:03.18839214Z", "response"=>" to", "done"=>false}
|
150
|
+
# {"model"=>"llama2", "created_at"=>"2024-03-20T08:08:03.277780622Z", "response"=>" chat", "done"=>false}
|
151
|
+
# {"model"=>"llama2", "created_at"=>"2024-03-20T08:08:03.367252189Z", "response"=>"?", "done"=>false}
|
152
|
+
# {"model"=>"llama2", "created_at"=>"2024-03-20T08:08:03.457281303Z", "response"=>"", "done"=>true, "context"=>[518, 25580, 29962, 3532, 14816, 29903, 29958, 5299, 829, 14816, 29903, 6778, 13, 13, 12199, 29991, 518, 29914, 25580, 29962, 13, 10994, 29991, 739, 29915, 29879, 7575, 304, 5870, 366, 29889, 1317, 727, 1554, 306, 508, 1371, 366, 411, 470, 723, 366, 763, 304, 13563, 29973], "total_duration"=>2432818562, "load_duration"=>1470557, "prompt_eval_duration"=>167895000, "eval_count"=>26, "eval_duration"=>2260824000}
|
153
|
+
```
|
154
|
+
|
155
|
+
### Create a Model
|
156
|
+
|
157
|
+
```ruby
|
158
|
+
ollama.models.create(name: "mario", modelfile: "FROM llama2\nSYSTEM You are mario from Super Mario Bros.") do |chunk|
|
159
|
+
puts chunk
|
160
|
+
end
|
161
|
+
# =>
|
162
|
+
# {"status"=>"reading model metadata"}
|
163
|
+
# {"status"=>"creating system layer"}
|
164
|
+
# {"status"=>"using already created layer sha256:8934d96d3f08982e95922b2b7a2c626a1fe873d7c3b06e8e56d7bc0a1fef9246"}
|
165
|
+
# {"status"=>"using already created layer sha256:8c17c2ebb0ea011be9981cc3922db8ca8fa61e828c5d3f44cb6ae342bf80460b"}
|
166
|
+
# {"status"=>"using already created layer sha256:7c23fb36d80141c4ab8cdbb61ee4790102ebd2bf7aeff414453177d4f2110e5d"}
|
167
|
+
# {"status"=>"using already created layer sha256:2e0493f67d0c8c9c68a8aeacdf6a38a2151cb3c4c1d42accf296e19810527988"}
|
168
|
+
# {"status"=>"using already created layer sha256:fa304d6750612c207b8705aca35391761f29492534e90b30575e4980d6ca82f6"}
|
169
|
+
# {"status"=>"writing layer sha256:1741cf59ce26ff01ac614d31efc700e21e44dd96aed60a7c91ab3f47e440ef94"}
|
170
|
+
# {"status"=>"writing layer sha256:786d77d232711e91aafad74df1bacc01e630525d8e83d57a758693725f08d511"}
|
171
|
+
# {"status"=>"writing manifest"}
|
172
|
+
# {"status"=>"success"}
|
173
|
+
```
|
174
|
+
|
175
|
+
### List Local Models
|
176
|
+
|
177
|
+
```ruby
|
178
|
+
response = ollama.models.list
|
179
|
+
response.result
|
180
|
+
# =>
|
181
|
+
# {"models"=>
|
182
|
+
# [{"name"=>"llama2:latest",
|
183
|
+
# "model"=>"llama2:latest",
|
184
|
+
# "modified_at"=>"2024-03-19T10:37:39.212281917Z",
|
185
|
+
# "size"=>3826793677,
|
186
|
+
# "digest"=>"78e26419b4469263f75331927a00a0284ef6544c1975b826b15abdaef17bb962",
|
187
|
+
# "details"=>{"parent_model"=>"", "format"=>"gguf", "family"=>"llama", "families"=>["llama"], "parameter_size"=>"7B", "quantization_level"=>"Q4_0"}},
|
188
|
+
# {"name"=>"mario:latest",
|
189
|
+
# "model"=>"mario:latest",
|
190
|
+
# "modified_at"=>"2024-03-20T08:21:20.316403821Z",
|
191
|
+
# "size"=>3826793787,
|
192
|
+
# "digest"=>"291f46d2fa687dfaff45de96a8cb6e32707bc16ec1e1dfe8d65e9634c34c660c",
|
193
|
+
# "details"=>{"parent_model"=>"", "format"=>"gguf", "family"=>"llama", "families"=>["llama"], "parameter_size"=>"7B", "quantization_level"=>"Q4_0"}}]}
|
194
|
+
```
|
195
|
+
|
196
|
+
### Show Model Information
|
197
|
+
|
198
|
+
```ruby
|
199
|
+
response = ollama.models.show("mario")
|
200
|
+
response.result
|
201
|
+
=>
|
202
|
+
{"license"=>
|
203
|
+
"LLAMA 2 COMMUNITY LICENSE AGREEMENT\t\nLlama 2 Version Release Date: July 18, 2023\n\n\"Agreement\" means the terms and conditions for use, reproduction, distribution and \nmodification of the Llama Materials set forth herein.\n\n\"...",
|
204
|
+
"modelfile"=>
|
205
|
+
"# Modelfile generated by \"ollama show\"\n# To build a new Modelfile based on this one, replace the FROM line with:\n# FROM mario:latest\n\nFROM llama2:latest\nTEMPLATE \
|
206
|
+
"\"\"[INST] <<SYS>>{{ .System }}<</SYS>>\n\n{{ .Prompt }} [/INST]\n\"\"\"\nSYSTEM \"\"\"You are mario from Super Mario Bros.\"\"\"\nPARAMETER stop \"[INST]\"\nPARAMETER stop
|
207
|
+
\"[/INST]\"\nPARAMETER stop \"<<SYS>>\"\nPARAMETER stop \"<</SYS>>\"",
|
208
|
+
"parameters"=>
|
209
|
+
"stop \"[INST]\"\nstop \"[/INST]\"\nstop \"<<SYS>>\"\nstop \"<</SYS
|
210
|
+
>>\"",
|
211
|
+
"template"=>"[INST] <<SYS>>{{ .System }}<</SYS>>\n\n{{ .Prompt }} [/INST]\n",
|
212
|
+
"system"=>"You are mario from Super Mario Bros.",
|
213
|
+
"details"=>{"parent_model"=>"llama2:latest", "format"=>"gguf", "family"=>"llama", "families"=>["llama"], "parameter_size"=>"7B", "quantization_level"=>"Q4_0"}}
|
214
|
+
```
|
215
|
+
|
216
|
+
### Copy a Model
|
217
|
+
|
218
|
+
```ruby
|
219
|
+
response = ollama.models.copy(source: "llama2")
|
220
|
+
# Same as
|
221
|
+
response = ollama.models.copy(source: "llama2", destination: "llama2-backup")
|
222
|
+
response.ok?
|
223
|
+
# => true
|
224
|
+
|
225
|
+
response = ollama.models.copy(source: "non-existence")
|
226
|
+
response.ok?
|
227
|
+
# => false
|
228
|
+
response.result
|
229
|
+
# => {"error"=>"model 'non-existence' not found"}
|
230
|
+
```
|
231
|
+
|
232
|
+
### Delete a Model
|
233
|
+
|
234
|
+
```ruby
|
235
|
+
response = ollama.models.delete("llama2-backup")
|
236
|
+
response.ok?
|
237
|
+
# => true
|
238
|
+
|
239
|
+
response = ollama.models.delete("non-existence")
|
240
|
+
response.ok?
|
241
|
+
# => false
|
242
|
+
response.result
|
243
|
+
# => {"error"=>"model 'non-existence' not found"}
|
244
|
+
```
|
245
|
+
|
246
|
+
### Pull a Model
|
247
|
+
|
248
|
+
```ruby
|
249
|
+
ollama.models.pull(name: "tinyllama") do |chunk|
|
250
|
+
puts chunk
|
251
|
+
end
|
252
|
+
# =>
|
253
|
+
# {"status"=>"pulling manifest"}
|
254
|
+
# {"status"=>"pulling 2af3b81862c6", "digest"=>"sha256:2af3b81862c6be03c769683af18efdadb2c33f60ff32ab6f83e42c043d6c7816", "total"=>637699456, "completed"=>637699456}
|
255
|
+
# {"status"=>"pulling af0ddbdaaa26", "digest"=>"sha256:af0ddbdaaa26f30d54d727f9dd944b76bdb926fdaf9a58f63f78c532f57c191f", "total"=>70, "completed"=>70}
|
256
|
+
# {"status"=>"pulling c8472cd9daed", "digest"=>"sha256:c8472cd9daed5e7c20aa53689e441e10620a002aacd58686aeac2cb188addb5c", "total"=>31, "completed"=>31}
|
257
|
+
# {"status"=>"pulling fa956ab37b8c", "digest"=>"sha256:fa956ab37b8c21152f975a7fcdd095c4fee8754674b21d9b44d710435697a00d", "total"=>98, "completed"=>98}
|
258
|
+
# {"status"=>"pulling 6331358be52a", "digest"=>"sha256:6331358be52a6ebc2fd0755a51ad1175734fd17a628ab5ea6897109396245362", "total"=>483, "completed"=>483}
|
259
|
+
# {"status"=>"verifying sha256 digest"}
|
260
|
+
# {"status"=>"writing manifest"}
|
261
|
+
# {"status"=>"removing any unused layers"}
|
262
|
+
# {"status"=>"success"}
|
263
|
+
```
|
264
|
+
|
265
|
+
### Push a Model
|
266
|
+
|
267
|
+
You need to create an account at https://ollama.ai and add your Public Key at https://ollama.ai/settings/keys to allow you push models to your namespace.
|
268
|
+
|
269
|
+
```ruby
|
270
|
+
ollama.models.copy(source: "mario", destination: "your-namespace/mario")
|
271
|
+
ollama.models.push(name: "your-namespace/mario") do |chunk|
|
272
|
+
puts chunk
|
273
|
+
end
|
274
|
+
```
|
275
|
+
|
276
|
+
### Generate Embeddings
|
277
|
+
|
278
|
+
```ruby
|
279
|
+
response = ollama.embeddings.create(model: "llama2", prompt: "Hello!")
|
280
|
+
response.result
|
281
|
+
# =>
|
282
|
+
{"embedding"=>
|
283
|
+
[1.3464512825012207,
|
284
|
+
-1.0983257293701172,
|
285
|
+
...
|
286
|
+
-2.2046988010406494, 0.3163630962371826] }
|
287
|
+
```
|
288
|
+
|
289
|
+
## Development
|
290
|
+
|
291
|
+
After checking out the repo, run `bin/setup` to install dependencies. Then, run `rake test` to run the tests. You can also run `bin/console` for an interactive prompt that will allow you to experiment.
|
292
|
+
|
293
|
+
To install this gem onto your local machine, run `bundle exec rake install`. To release a new version, update the version number in `version.rb`, and then run `bundle exec rake release`, which will create a git tag for the version, push git commits and the created tag, and push the `.gem` file to [rubygems.org](https://rubygems.org).
|
294
|
+
|
295
|
+
## Contributing
|
296
|
+
|
297
|
+
Bug reports and pull requests are welcome on GitHub at https://github.com/songjiz/ollama-rb.
|
298
|
+
|
299
|
+
## License
|
300
|
+
|
301
|
+
The gem is available as open source under the terms of the [MIT License](https://opensource.org/licenses/MIT).
|
data/Rakefile
ADDED
@@ -0,0 +1,20 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Ollama
|
4
|
+
module API
|
5
|
+
class Blobs
|
6
|
+
def initialize(client:)
|
7
|
+
@client = client
|
8
|
+
end
|
9
|
+
|
10
|
+
def create(digest)
|
11
|
+
@client.post "/api/blobs/#{digest}"
|
12
|
+
end
|
13
|
+
|
14
|
+
def exists?(digest)
|
15
|
+
response = @client.head "/api/blobs/#{digest}"
|
16
|
+
response.ok?
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
20
|
+
end
|
@@ -0,0 +1,22 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Ollama
|
4
|
+
module API
|
5
|
+
class Chat
|
6
|
+
def initialize(client:)
|
7
|
+
@client = client
|
8
|
+
end
|
9
|
+
|
10
|
+
def create(model:, messages:, **options, &block)
|
11
|
+
json = {
|
12
|
+
model: model,
|
13
|
+
messages: messages
|
14
|
+
}.merge(options).compact.transform_keys(&:to_sym)
|
15
|
+
|
16
|
+
json[:stream] = block_given?
|
17
|
+
|
18
|
+
@client.post "/api/chat", json: json, &block
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
22
|
+
end
|
@@ -0,0 +1,25 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Ollama
|
4
|
+
module API
|
5
|
+
class Completion
|
6
|
+
def initialize(client:)
|
7
|
+
@client = client
|
8
|
+
end
|
9
|
+
|
10
|
+
def create(model:, prompt:, images: [], **options, &block)
|
11
|
+
json = {
|
12
|
+
model: model,
|
13
|
+
prompt: prompt,
|
14
|
+
images: images
|
15
|
+
}.merge(options).compact.transform_keys(&:to_sym)
|
16
|
+
|
17
|
+
json[:stream] = block_given?
|
18
|
+
|
19
|
+
@client.post "/api/generate", json: json, &block
|
20
|
+
end
|
21
|
+
|
22
|
+
alias generate create
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
@@ -0,0 +1,15 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Ollama
|
4
|
+
module API
|
5
|
+
class Embeddings
|
6
|
+
def initialize(client:)
|
7
|
+
@client = client
|
8
|
+
end
|
9
|
+
|
10
|
+
def create(model:, prompt:, **options)
|
11
|
+
@client.post "/api/embeddings", json: { model: model, prompt: prompt }.merge(options).compact
|
12
|
+
end
|
13
|
+
end
|
14
|
+
end
|
15
|
+
end
|
@@ -0,0 +1,39 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Ollama
|
4
|
+
module API
|
5
|
+
class Models
|
6
|
+
def initialize(client:)
|
7
|
+
@client = client
|
8
|
+
end
|
9
|
+
|
10
|
+
def list
|
11
|
+
@client.get "/api/tags"
|
12
|
+
end
|
13
|
+
|
14
|
+
def create(name:, modelfile: nil, path: nil, &block)
|
15
|
+
@client.post "/api/create", json: { name: name, modelfile: modelfile, path: path, stream: block_given? }.compact, &block
|
16
|
+
end
|
17
|
+
|
18
|
+
def show(name)
|
19
|
+
@client.post "/api/show", json: { name: name }
|
20
|
+
end
|
21
|
+
|
22
|
+
def copy(source:, destination: nil)
|
23
|
+
@client.post "/api/copy", json: { source: source, destination: destination || "#{source}-backup" }
|
24
|
+
end
|
25
|
+
|
26
|
+
def delete(name)
|
27
|
+
@client.delete "/api/delete", json: { name: name }
|
28
|
+
end
|
29
|
+
|
30
|
+
def pull(name:, insecure: nil, &block)
|
31
|
+
@client.post "/api/pull", json: { name: name, insecure: insecure, stream: block_given? }.compact, &block
|
32
|
+
end
|
33
|
+
|
34
|
+
def push(name:, insecure: nil, &block)
|
35
|
+
@client.post "/api/push", json: { name: name, insecure: insecure, stream: block_given? }.compact, &block
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
@@ -0,0 +1,41 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "forwardable"
|
4
|
+
require_relative "connection"
|
5
|
+
require_relative "api/blobs"
|
6
|
+
require_relative "api/chat"
|
7
|
+
require_relative "api/completion"
|
8
|
+
require_relative "api/embeddings"
|
9
|
+
require_relative "api/models"
|
10
|
+
|
11
|
+
module Ollama
|
12
|
+
class Client
|
13
|
+
extend Forwardable
|
14
|
+
|
15
|
+
def_delegators :@connection, :post, :get, :delete, :head
|
16
|
+
|
17
|
+
def initialize(base_url: "http://localhost:11434", logger: nil, **options)
|
18
|
+
@connection = Connection.new(base_url, logger: logger, **options)
|
19
|
+
end
|
20
|
+
|
21
|
+
def chat
|
22
|
+
API::Chat.new client: self
|
23
|
+
end
|
24
|
+
|
25
|
+
def completion
|
26
|
+
API::Completion.new client: self
|
27
|
+
end
|
28
|
+
|
29
|
+
def models
|
30
|
+
API::Models.new client: self
|
31
|
+
end
|
32
|
+
|
33
|
+
def blobs
|
34
|
+
API::Blobs.new client: self
|
35
|
+
end
|
36
|
+
|
37
|
+
def embeddings
|
38
|
+
API::Embeddings.new client: self
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|
@@ -0,0 +1,84 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "net/https"
|
4
|
+
require "json"
|
5
|
+
require "uri"
|
6
|
+
require_relative "response"
|
7
|
+
|
8
|
+
module Ollama
|
9
|
+
class Connection
|
10
|
+
def initialize(base_url, logger: nil, **options)
|
11
|
+
@base_uri = URI.parse(base_url.to_s)
|
12
|
+
@logger = logger
|
13
|
+
@options = options
|
14
|
+
end
|
15
|
+
|
16
|
+
def post(path, json: nil, headers: {}, **options)
|
17
|
+
http = new_http
|
18
|
+
request = Net::HTTP::Post.new(path)
|
19
|
+
set_request_headers request, default_headers.merge(headers)
|
20
|
+
request.body = JSON.generate(json) if json
|
21
|
+
|
22
|
+
return Response.new(http.request(request)) unless block_given?
|
23
|
+
|
24
|
+
result = []
|
25
|
+
response = http.request(request) do |http_response|
|
26
|
+
# Raises if response is not successful
|
27
|
+
# http_response.value
|
28
|
+
http_response.read_body do |chunk|
|
29
|
+
parsed = JSON.parse(chunk)
|
30
|
+
result << parsed
|
31
|
+
yield parsed
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
Response.new(response).tap { |resp| resp.result = result }
|
36
|
+
end
|
37
|
+
|
38
|
+
def get(path, params: {}, headers: {}, **options)
|
39
|
+
http = new_http
|
40
|
+
query = URI.encode_www_form(params)
|
41
|
+
uri = URI.parse(path).tap { |uri| uri.query = query if query.length > 0 }
|
42
|
+
request = Net::HTTP::Get.new(uri.to_s)
|
43
|
+
set_request_headers request, default_headers.merge(headers)
|
44
|
+
Response.new http.request(request)
|
45
|
+
end
|
46
|
+
|
47
|
+
def delete(path, json: nil, headers: {}, **options)
|
48
|
+
http = new_http
|
49
|
+
request = Net::HTTP::Delete.new(path)
|
50
|
+
set_request_headers request, default_headers.merge(headers)
|
51
|
+
request.body = JSON.generate(json) if json
|
52
|
+
|
53
|
+
Response.new http.request(request)
|
54
|
+
end
|
55
|
+
|
56
|
+
def head(path, headers: {}, **options)
|
57
|
+
http = new_http
|
58
|
+
request = Net::HTTP::Head.new(path)
|
59
|
+
set_request_headers request, default_headers.merge(headers)
|
60
|
+
Response.new http.request(request)
|
61
|
+
end
|
62
|
+
|
63
|
+
private
|
64
|
+
def new_http
|
65
|
+
Net::HTTP.new(@base_uri.host, @base_uri.port).tap do |http|
|
66
|
+
http.set_debug_output @logger if @logger
|
67
|
+
end
|
68
|
+
end
|
69
|
+
|
70
|
+
def default_headers
|
71
|
+
{
|
72
|
+
"User-Agent" => USER_AGENT
|
73
|
+
}
|
74
|
+
end
|
75
|
+
|
76
|
+
def set_request_headers(request, headers)
|
77
|
+
request.tap do
|
78
|
+
headers.each do |key, value|
|
79
|
+
request[key] = value
|
80
|
+
end
|
81
|
+
end
|
82
|
+
end
|
83
|
+
end
|
84
|
+
end
|
@@ -0,0 +1,31 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "forwardable"
|
4
|
+
require "json"
|
5
|
+
|
6
|
+
module Ollama
|
7
|
+
class Response
|
8
|
+
extend Forwardable
|
9
|
+
def_delegators :@http_response, :code, :body
|
10
|
+
|
11
|
+
def initialize(http_response)
|
12
|
+
@http_response = http_response
|
13
|
+
end
|
14
|
+
|
15
|
+
def ok?
|
16
|
+
code == "200"
|
17
|
+
end
|
18
|
+
|
19
|
+
def error?
|
20
|
+
!ok?
|
21
|
+
end
|
22
|
+
|
23
|
+
def result=(value)
|
24
|
+
@result = value
|
25
|
+
end
|
26
|
+
|
27
|
+
def result
|
28
|
+
@result ||= JSON.parse(body) rescue {}
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
data/lib/ollama.rb
ADDED
metadata
ADDED
@@ -0,0 +1,61 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: ollama-rb
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 0.1.0
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- songji.zeng
|
8
|
+
autorequire:
|
9
|
+
bindir: exe
|
10
|
+
cert_chain: []
|
11
|
+
date: 2024-03-20 00:00:00.000000000 Z
|
12
|
+
dependencies: []
|
13
|
+
description: The Ollama Ruby library provides the easiest way to integrate your Ruby
|
14
|
+
project with Ollama.
|
15
|
+
email:
|
16
|
+
- songji.zeng@outlook.com
|
17
|
+
executables: []
|
18
|
+
extensions: []
|
19
|
+
extra_rdoc_files: []
|
20
|
+
files:
|
21
|
+
- ".rubocop.yml"
|
22
|
+
- CHANGELOG.md
|
23
|
+
- LICENSE.txt
|
24
|
+
- README.md
|
25
|
+
- Rakefile
|
26
|
+
- lib/ollama.rb
|
27
|
+
- lib/ollama/api/blobs.rb
|
28
|
+
- lib/ollama/api/chat.rb
|
29
|
+
- lib/ollama/api/completion.rb
|
30
|
+
- lib/ollama/api/embeddings.rb
|
31
|
+
- lib/ollama/api/models.rb
|
32
|
+
- lib/ollama/client.rb
|
33
|
+
- lib/ollama/connection.rb
|
34
|
+
- lib/ollama/response.rb
|
35
|
+
- lib/ollama/version.rb
|
36
|
+
homepage: https://github.com/songjiz/ollama-rb
|
37
|
+
licenses:
|
38
|
+
- MIT
|
39
|
+
metadata:
|
40
|
+
homepage_uri: https://github.com/songjiz/ollama-rb
|
41
|
+
source_code_uri: https://github.com/songjiz/ollama-rb
|
42
|
+
post_install_message:
|
43
|
+
rdoc_options: []
|
44
|
+
require_paths:
|
45
|
+
- lib
|
46
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
47
|
+
requirements:
|
48
|
+
- - ">="
|
49
|
+
- !ruby/object:Gem::Version
|
50
|
+
version: 3.0.0
|
51
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
52
|
+
requirements:
|
53
|
+
- - ">="
|
54
|
+
- !ruby/object:Gem::Version
|
55
|
+
version: '0'
|
56
|
+
requirements: []
|
57
|
+
rubygems_version: 3.5.6
|
58
|
+
signing_key:
|
59
|
+
specification_version: 4
|
60
|
+
summary: Ollama Ruby library
|
61
|
+
test_files: []
|