tiny_ollama 1.0.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/lib/tiny_ollama.rb +101 -0
- metadata +43 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA256:
|
3
|
+
metadata.gz: 598de0585c4437aec61f9963b09e6f623d7c3988f039440cfb1c6bd6303b7a0b
|
4
|
+
data.tar.gz: e5b5425dee8e809ba501fa0f8bc6951470191e41afd7d3a1c263d00e6ca238aa
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: ead24d28f0395d2431403b83401cd7bd0ac59e7df1861c8691be42932283c2f53612d25843b5382652ce30a255ae0a193250a15184e562ec2cab106f616137aa
|
7
|
+
data.tar.gz: e71614054eeecd5512b77748f69c9dddb3cf882d9e259b0cdce1baf47be372ff3c8993b5054d96977059779b93323d64bec3b90fac53500097fcdc3f32afa53d
|
data/lib/tiny_ollama.rb
ADDED
@@ -0,0 +1,101 @@
|
|
1
|
+
require 'net/http'
|
2
|
+
require 'json'
|
3
|
+
|
4
|
+
# tiny HTTP client for the /api/generate and /api/chat endpoints of ollama
|
5
|
+
# see also: https://ollama.com/
|
6
|
+
class TinyOllama
|
7
|
+
# a good rule of thumb would be to have a .tiny_ollama.yml config in your
|
8
|
+
# project, to parse that as YAML, and pass the parsed reulst into here.
|
9
|
+
def initialize(
|
10
|
+
model:,
|
11
|
+
format: nil,
|
12
|
+
host: 'localhost',
|
13
|
+
port: 11434,
|
14
|
+
context_size: 2048,
|
15
|
+
keep_alive: -1,
|
16
|
+
stream: false
|
17
|
+
)
|
18
|
+
|
19
|
+
@model = model
|
20
|
+
@host = host
|
21
|
+
@port = port
|
22
|
+
@context_size = context_size
|
23
|
+
@keep_alive = keep_alive
|
24
|
+
@stream = stream
|
25
|
+
@format = format
|
26
|
+
end
|
27
|
+
|
28
|
+
# sends a request to POST /api/generate
|
29
|
+
def generate(prompt)
|
30
|
+
request_body = {
|
31
|
+
model: @model,
|
32
|
+
prompt: prompt,
|
33
|
+
stream: @stream,
|
34
|
+
keep_alive: @keep_alive,
|
35
|
+
options: {
|
36
|
+
num_ctx: @context_size,
|
37
|
+
}.merge(@format ? { format: @format } : {})
|
38
|
+
}.to_json
|
39
|
+
|
40
|
+
uri = URI("http://#{@host}:#{@port}/api/generate")
|
41
|
+
headers = { 'Content-Type' => 'application/json' }
|
42
|
+
response = Net::HTTP.post(uri, request_body, headers)
|
43
|
+
|
44
|
+
# Handle potential errors (e.g., non-200 responses)
|
45
|
+
unless response.is_a?(Net::HTTPSuccess)
|
46
|
+
raise TinyOllamaModelError.new("Ollama API Error: #{response.code} - #{response.body}")
|
47
|
+
end
|
48
|
+
|
49
|
+
JSON.parse(response.body)['response']
|
50
|
+
end
|
51
|
+
|
52
|
+
# sends a request to POST /api/chat
|
53
|
+
#
|
54
|
+
# messages: an array of hashes in the following format:
|
55
|
+
# [
|
56
|
+
# {
|
57
|
+
# "role": "system",
|
58
|
+
# "content": <optional message to override model instructions>,
|
59
|
+
# },
|
60
|
+
# {
|
61
|
+
# "role": "user",
|
62
|
+
# "content": <the first user message>,
|
63
|
+
# },
|
64
|
+
# {
|
65
|
+
# "role": "assistant",
|
66
|
+
# "content": <the LLM's first response>,
|
67
|
+
# },
|
68
|
+
# {
|
69
|
+
# "role": "user",
|
70
|
+
# "content": <the next user message>,
|
71
|
+
# },
|
72
|
+
# ]
|
73
|
+
#
|
74
|
+
# NOTE: the messages parameter needs to include a system message if you want
|
75
|
+
# to override the model's default instructions
|
76
|
+
def chat(messages)
|
77
|
+
request_body = {
|
78
|
+
model: @model,
|
79
|
+
messages: messages,
|
80
|
+
stream: @stream,
|
81
|
+
format: @format,
|
82
|
+
keep_alive: @keep_alive,
|
83
|
+
options: {
|
84
|
+
num_ctx: @context_size,
|
85
|
+
}
|
86
|
+
}.to_json
|
87
|
+
|
88
|
+
uri = URI("http://#{@host}:#{@port}/api/chat")
|
89
|
+
headers = { 'Content-Type' => 'application/json' }
|
90
|
+
response = Net::HTTP.post(uri, request_body, headers)
|
91
|
+
|
92
|
+
# Handle potential errors (e.g., non-200 responses)
|
93
|
+
unless response.is_a?(Net::HTTPSuccess)
|
94
|
+
raise TinyOllamaModelError.new("Ollama API Error: #{response.code} - #{response.body}")
|
95
|
+
end
|
96
|
+
|
97
|
+
JSON.parse(response.body)['message']['content']
|
98
|
+
end
|
99
|
+
end
|
100
|
+
|
101
|
+
class TinyOllamaModelError; end
|
metadata
ADDED
@@ -0,0 +1,43 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: tiny_ollama
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 1.0.0
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- Jeff Lunt
|
8
|
+
autorequire:
|
9
|
+
bindir: bin
|
10
|
+
cert_chain: []
|
11
|
+
date: 2024-06-26 00:00:00.000000000 Z
|
12
|
+
dependencies: []
|
13
|
+
description: a tiny gem for using ollama's generate and chat endpoints
|
14
|
+
email: jefflunt@gmail.com
|
15
|
+
executables: []
|
16
|
+
extensions: []
|
17
|
+
extra_rdoc_files: []
|
18
|
+
files:
|
19
|
+
- lib/tiny_ollama.rb
|
20
|
+
homepage: https://github.com/jefflunt/tiny_ollama
|
21
|
+
licenses:
|
22
|
+
- MIT
|
23
|
+
metadata: {}
|
24
|
+
post_install_message:
|
25
|
+
rdoc_options: []
|
26
|
+
require_paths:
|
27
|
+
- lib
|
28
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
29
|
+
requirements:
|
30
|
+
- - ">="
|
31
|
+
- !ruby/object:Gem::Version
|
32
|
+
version: '0'
|
33
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
34
|
+
requirements:
|
35
|
+
- - ">="
|
36
|
+
- !ruby/object:Gem::Version
|
37
|
+
version: '0'
|
38
|
+
requirements: []
|
39
|
+
rubygems_version: 3.4.19
|
40
|
+
signing_key:
|
41
|
+
specification_version: 4
|
42
|
+
summary: a tiny gem for using ollama's generate and chat endpoints
|
43
|
+
test_files: []
|