llamafile 0.4.9 → 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/llamafile/llama.rb +2 -53
- data/lib/llamafile/version.rb +1 -1
- data/lib/llamafile.rb +78 -47
- data/llamafile.gemspec +1 -1
- metadata +1 -15
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: dd2f077f1692c00a61954000c64dc038004e24fe0df98c4a440316516e0d9a4a
|
4
|
+
data.tar.gz: 7a2dbf3c9191d974fea4fe1f0c2d8c51d35a01bd0c9a48b0adf5d5331e8aa0dc
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 96777f7209ab075189c7f01218a852d73d33c5bb218eb015e8ef9958024a3f371051d3cfc0effab4cdd588a060177cfb599becc7fa61eea94f05139ea4b55094
|
7
|
+
data.tar.gz: 834d4b313d6894b3c612cc2bd696d34d2d719bffa8eca6c6391ccae0b7a974e6c2c620052c8d2e3167cef2aaa4e71811c63661fc3e2cee4a2025f23f46963d46
|
data/lib/llamafile/llama.rb
CHANGED
@@ -1,4 +1,4 @@
|
|
1
|
-
|
1
|
+
|
2
2
|
module LLAMA
|
3
3
|
DEF = {
|
4
4
|
stream: false,
|
@@ -25,19 +25,12 @@ module LLAMA
|
|
25
25
|
class Llama
|
26
26
|
include HTTParty
|
27
27
|
base_uri ENV['LLAMA']
|
28
|
-
#default_timeout 120
|
29
28
|
end
|
30
29
|
def self.post h={}, &b
|
31
|
-
#puts %[LLAMA IN: #{h}]
|
32
30
|
hh = {
|
33
|
-
headers: {
|
34
|
-
"Content-Type": "application/json",
|
35
|
-
"Connection": 'keep-alive',
|
36
|
-
"Priority": 'u=0'
|
37
|
-
},
|
31
|
+
headers: { "Content-Type": "application/json", "Connection": 'keep-alive', "Priority": 'u=0' },
|
38
32
|
body: JSON.generate(DEF.merge(h))
|
39
33
|
}
|
40
|
-
#puts %[LLAMA PACK: #{hh}]
|
41
34
|
fiber = Fiber.new do |hhh|
|
42
35
|
begin
|
43
36
|
r = Llama.post('/completion', hhh)
|
@@ -59,49 +52,5 @@ module LLAMA
|
|
59
52
|
end
|
60
53
|
fiber.resume(hh)
|
61
54
|
end
|
62
|
-
|
63
|
-
def self.make h={}
|
64
|
-
fiber = Fiber.new do |hh|
|
65
|
-
if r = LLAMA.post(prompt: PROMPT.make(hh))
|
66
|
-
Fiber.yield r
|
67
|
-
else
|
68
|
-
Fiber.yield false
|
69
|
-
end
|
70
|
-
end
|
71
|
-
fiber.resume h
|
72
|
-
end
|
73
|
-
|
74
|
-
def self.if? h={}
|
75
|
-
fiber = Fiber.new do |hh|
|
76
|
-
if LLAMA.post({ n_predict: 4, grammar: GRAMMAR[:bool], prompt: PROMPT.test(hh) }) == 'yes'
|
77
|
-
Fiber.yield true
|
78
|
-
else
|
79
|
-
Fiber.yield false
|
80
|
-
end
|
81
|
-
end
|
82
|
-
fiber.resume h
|
83
|
-
end
|
84
|
-
|
85
|
-
def self.test h={}
|
86
|
-
fiber = Fiber.new do |hh|
|
87
|
-
hh[:grammar] = GRAMMAR[:bool]
|
88
|
-
hh[:n_predict] = 4
|
89
|
-
if LLAMA.post(hh) == 'yes'
|
90
|
-
Fiber.yield true
|
91
|
-
else
|
92
|
-
Fiber.yield false
|
93
|
-
end
|
94
|
-
end
|
95
|
-
fiber.resume h
|
96
|
-
end
|
97
|
-
|
98
|
-
def self.prompt h={}, &b
|
99
|
-
if block_given?
|
100
|
-
fiber = Fiber.new { |hh| Fiber.yield b.call(LLAMA.post(hh)) }
|
101
|
-
else
|
102
|
-
fiber = Fiber.new { |hh| Fiber.yield LLAMA.post(hh) }
|
103
|
-
end
|
104
|
-
fiber.resume h
|
105
|
-
end
|
106
55
|
end
|
107
56
|
|
data/lib/llamafile/version.rb
CHANGED
data/lib/llamafile.rb
CHANGED
@@ -1,19 +1,68 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require '
|
3
|
+
require 'httparty'
|
4
4
|
|
5
5
|
require_relative "llamafile/version"
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
6
|
+
|
7
|
+
module LLAMA
|
8
|
+
DEF = {
|
9
|
+
stream: false,
|
10
|
+
grammar: %[root ::= l+\nl ::= i "\n"\ni ::= [^\n\t{|}]+ [.!?]],
|
11
|
+
n_predict: 2048,
|
12
|
+
n_probs: 0,
|
13
|
+
cache_prompt: true,
|
14
|
+
min_keep: 10,
|
15
|
+
min_p: 0.05,
|
16
|
+
mirostat: 2,
|
17
|
+
mirostat_eta: 0.1,
|
18
|
+
mirostat_tau: 5,
|
19
|
+
repeat_lat_n: 256,
|
20
|
+
repeat_penalty: 2,
|
21
|
+
slot_id: -1,
|
22
|
+
temperature: 0,
|
23
|
+
tfs_z: 1,
|
24
|
+
top_k: 95,
|
25
|
+
top_p: 0.95,
|
26
|
+
typical_p: 1,
|
27
|
+
stop: ['</s>','Llama:','User:']
|
28
|
+
}
|
29
|
+
|
30
|
+
class Llama
|
31
|
+
include HTTParty
|
32
|
+
base_uri ENV['LLAMA']
|
33
|
+
end
|
34
|
+
def self.post h={}, &b
|
35
|
+
hh = {
|
36
|
+
headers: { "Content-Type": "application/json", "Connection": 'keep-alive', "Priority": 'u=0' },
|
37
|
+
body: JSON.generate(DEF.merge(h))
|
38
|
+
}
|
39
|
+
fiber = Fiber.new do |hhh|
|
40
|
+
begin
|
41
|
+
r = Llama.post('/completion', hhh)
|
42
|
+
#puts %[LLAMA CODE: #{r.code}]
|
43
|
+
if r.code == 200
|
44
|
+
rr = r['content'].gsub(/<.+>/, "").gsub(/\s\s+/, " ").gsub(/\n+/, "\n");
|
45
|
+
if block_given?
|
46
|
+
Fiber.yield b.call(rr);
|
47
|
+
else
|
48
|
+
Fiber.yield rr;
|
49
|
+
end
|
50
|
+
else
|
51
|
+
Fiber.yield false
|
52
|
+
end
|
53
|
+
rescue => err
|
54
|
+
puts %[LLAMA POST ERR: #{err}]
|
55
|
+
Fiber.yield false
|
56
|
+
end
|
57
|
+
end
|
58
|
+
fiber.resume(hh)
|
59
|
+
end
|
60
|
+
end
|
11
61
|
|
12
62
|
module Lf
|
13
63
|
AI = %[a helpful artificial intelligence who responds accurately and truthfully to User.]
|
14
64
|
|
15
65
|
AND_BOOL = %[ Respond yes or no.]
|
16
|
-
AND_WORD = %[ Respond with a single word.]
|
17
66
|
|
18
67
|
@@CHAR = AI
|
19
68
|
def self.character
|
@@ -22,57 +71,39 @@ module Lf
|
|
22
71
|
def self.inspector
|
23
72
|
%[#{@@CHAR} #{AND_BOOL}]
|
24
73
|
end
|
25
|
-
def self.researcher
|
26
|
-
%[#{@@CHAR} #{AND_WORD}]
|
27
|
-
end
|
28
74
|
|
75
|
+
@@GRAM = {
|
76
|
+
bool: %[root ::= "yes" | "no"],
|
77
|
+
string: %[root ::= en+ (" " en+)+ "\n"\nen ::= [a-zA-Z] | [.!?]],
|
78
|
+
}
|
79
|
+
def self.grammar
|
80
|
+
@@GRAM
|
81
|
+
end
|
82
|
+
|
29
83
|
@@TMPL = {
|
30
|
-
|
31
|
-
|
32
|
-
user: %[AI is <%= Lf.character %>\nUser: <%= @query %>\nAI: ],
|
33
|
-
task: %[AI is <%= Lf.character %>\nUser: <%= @query.join("\n") %>\nAI: ],
|
84
|
+
bool: %[AI is <%= Lf.inspector %>\nUser: Does "<%= @output %>" <%= @condition %> "<%= @input %>"\nAI: ],
|
85
|
+
string: %[AI is <%= Lf.character %>\nUser: <%= [@query].flatten.join("\n") %>\nAI: ],
|
34
86
|
}
|
35
87
|
def self.template
|
36
88
|
@@TMPL
|
37
89
|
end
|
38
90
|
|
39
|
-
def self.erb s
|
91
|
+
def self.erb s
|
40
92
|
ERB.new(s).result(binding)
|
41
93
|
end
|
42
94
|
|
43
|
-
def self.does?
|
44
|
-
@
|
45
|
-
@
|
46
|
-
@condition =
|
47
|
-
LLAMA.post(n_predict: 4, grammar:
|
95
|
+
def self.does? output, condition, input
|
96
|
+
@input = input
|
97
|
+
@output = output
|
98
|
+
@condition = condition
|
99
|
+
LLAMA.post(n_predict: 4, grammar: @@GRAM[:bool], prompt: Lf.erb(Lf.template[:bool])) == 'yes';
|
48
100
|
end
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
LLAMA.post(grammar: GRAMMAR[:number], prompt: Lf.erb(Lf.template[:user]));
|
53
|
-
end
|
54
|
-
def self.word q
|
55
|
-
@query = q
|
56
|
-
LLAMA.post(grammar: GRAMMAR[:word], prompt: Lf.erb(Lf.template[:word]));
|
57
|
-
end
|
58
|
-
def self.string q
|
59
|
-
@query = q
|
60
|
-
LLAMA.post(grammar: GRAMMAR[:string], prompt: Lf.erb(Lf.template[:user]));
|
61
|
-
end
|
62
|
-
def self.list q
|
63
|
-
@query = q
|
64
|
-
LLAMA.post(grammar: GRAMMAR[:list], prompt: Lf.erb(Lf.template[:user]));
|
101
|
+
def self.prompt input
|
102
|
+
@input = input
|
103
|
+
LLAMA.post(grammar: @@GRAM[:string], prompt: Lf.erb(Lf.template[:string]));
|
65
104
|
end
|
66
|
-
|
67
|
-
|
68
|
-
|
105
|
+
|
106
|
+
def llamafile
|
107
|
+
Lf
|
69
108
|
end
|
70
109
|
end
|
71
|
-
|
72
|
-
#require_relative "llamafile/actor"
|
73
|
-
|
74
|
-
#VOICE.hear = lambda { |tgt, voice, payload| puts %[VOICE #{tgt}: #{voice} #{payload}]; MIND.publish(%[#{tgt}/], %[Thinking like a #{voice}, #{payload}]) }
|
75
|
-
#MIND.input = lambda { |tgt, payload| puts %[THINK INPUT #{tgt}: #{payload}]; VOICE.hear(tgt, payload); }
|
76
|
-
#MIND.output = lambda { |tgt, payload| puts %[THINK OUTPUT #{tgt}: #{payload}]; }
|
77
|
-
|
78
|
-
#MIND.think!
|
data/llamafile.gemspec
CHANGED
@@ -33,7 +33,7 @@ Gem::Specification.new do |spec|
|
|
33
33
|
# Uncomment to register a new dependency of your gem
|
34
34
|
spec.add_dependency "httparty"
|
35
35
|
spec.add_dependency "multi_xml"
|
36
|
-
spec.add_dependency "mqtt"
|
36
|
+
# spec.add_dependency "mqtt"
|
37
37
|
spec.add_dependency "csv"
|
38
38
|
spec.add_dependency "bigdecimal"
|
39
39
|
# For more information and examples about making a new gem, check out our
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: llamafile
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.5.0
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Erik Olson
|
@@ -37,20 +37,6 @@ dependencies:
|
|
37
37
|
- - ">="
|
38
38
|
- !ruby/object:Gem::Version
|
39
39
|
version: '0'
|
40
|
-
- !ruby/object:Gem::Dependency
|
41
|
-
name: mqtt
|
42
|
-
requirement: !ruby/object:Gem::Requirement
|
43
|
-
requirements:
|
44
|
-
- - ">="
|
45
|
-
- !ruby/object:Gem::Version
|
46
|
-
version: '0'
|
47
|
-
type: :runtime
|
48
|
-
prerelease: false
|
49
|
-
version_requirements: !ruby/object:Gem::Requirement
|
50
|
-
requirements:
|
51
|
-
- - ">="
|
52
|
-
- !ruby/object:Gem::Version
|
53
|
-
version: '0'
|
54
40
|
- !ruby/object:Gem::Dependency
|
55
41
|
name: csv
|
56
42
|
requirement: !ruby/object:Gem::Requirement
|