llamafile 0.4.9 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 5045b21fc2338baff2c3035474ca7d3c4788e0ff2aafdeeea5b48a7993bcdacf
4
- data.tar.gz: f64deb0fdbe227cf4888d440e69f4deb8bad1ea6f302f464aad7aa52f868db53
3
+ metadata.gz: dd2f077f1692c00a61954000c64dc038004e24fe0df98c4a440316516e0d9a4a
4
+ data.tar.gz: 7a2dbf3c9191d974fea4fe1f0c2d8c51d35a01bd0c9a48b0adf5d5331e8aa0dc
5
5
  SHA512:
6
- metadata.gz: 8b839ab0fdd4b0ac3a37bb3ba9542476d7bc6cd769b6089a5f4db905739ca18b42d511bc1cf691b85de1b02391d41d9f80e56c99500eea0a44599999b154dd59
7
- data.tar.gz: d418a352e7c15d2d2910000ba7bae52a8ba24a81a250f47d904b9e33b4384cf62a06d22d602f0c55ad26980b0ba5b05980b4eaa387372d5a4eca08ff14e05dc9
6
+ metadata.gz: 96777f7209ab075189c7f01218a852d73d33c5bb218eb015e8ef9958024a3f371051d3cfc0effab4cdd588a060177cfb599becc7fa61eea94f05139ea4b55094
7
+ data.tar.gz: 834d4b313d6894b3c612cc2bd696d34d2d719bffa8eca6c6391ccae0b7a974e6c2c620052c8d2e3167cef2aaa4e71811c63661fc3e2cee4a2025f23f46963d46
@@ -1,4 +1,4 @@
1
- require 'httparty'
1
+
2
2
  module LLAMA
3
3
  DEF = {
4
4
  stream: false,
@@ -25,19 +25,12 @@ module LLAMA
25
25
  class Llama
26
26
  include HTTParty
27
27
  base_uri ENV['LLAMA']
28
- #default_timeout 120
29
28
  end
30
29
  def self.post h={}, &b
31
- #puts %[LLAMA IN: #{h}]
32
30
  hh = {
33
- headers: {
34
- "Content-Type": "application/json",
35
- "Connection": 'keep-alive',
36
- "Priority": 'u=0'
37
- },
31
+ headers: { "Content-Type": "application/json", "Connection": 'keep-alive', "Priority": 'u=0' },
38
32
  body: JSON.generate(DEF.merge(h))
39
33
  }
40
- #puts %[LLAMA PACK: #{hh}]
41
34
  fiber = Fiber.new do |hhh|
42
35
  begin
43
36
  r = Llama.post('/completion', hhh)
@@ -59,49 +52,5 @@ module LLAMA
59
52
  end
60
53
  fiber.resume(hh)
61
54
  end
62
-
63
- def self.make h={}
64
- fiber = Fiber.new do |hh|
65
- if r = LLAMA.post(prompt: PROMPT.make(hh))
66
- Fiber.yield r
67
- else
68
- Fiber.yield false
69
- end
70
- end
71
- fiber.resume h
72
- end
73
-
74
- def self.if? h={}
75
- fiber = Fiber.new do |hh|
76
- if LLAMA.post({ n_predict: 4, grammar: GRAMMAR[:bool], prompt: PROMPT.test(hh) }) == 'yes'
77
- Fiber.yield true
78
- else
79
- Fiber.yield false
80
- end
81
- end
82
- fiber.resume h
83
- end
84
-
85
- def self.test h={}
86
- fiber = Fiber.new do |hh|
87
- hh[:grammar] = GRAMMAR[:bool]
88
- hh[:n_predict] = 4
89
- if LLAMA.post(hh) == 'yes'
90
- Fiber.yield true
91
- else
92
- Fiber.yield false
93
- end
94
- end
95
- fiber.resume h
96
- end
97
-
98
- def self.prompt h={}, &b
99
- if block_given?
100
- fiber = Fiber.new { |hh| Fiber.yield b.call(LLAMA.post(hh)) }
101
- else
102
- fiber = Fiber.new { |hh| Fiber.yield LLAMA.post(hh) }
103
- end
104
- fiber.resume h
105
- end
106
55
  end
107
56
 
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Llamafile
4
- VERSION = "0.4.9"
4
+ VERSION = "0.5.0"
5
5
  end
data/lib/llamafile.rb CHANGED
@@ -1,19 +1,68 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require 'mqtt'
3
+ require 'httparty'
4
4
 
5
5
  require_relative "llamafile/version"
6
- #require_relative "llamafile/mind"
7
- require_relative "llamafile/llama"
8
- require_relative "llamafile/grammar"
9
- #require_relative "llamafile/prompt"
10
- #require_relative "llamafile/voice"
6
+
7
+ module LLAMA
8
+ DEF = {
9
+ stream: false,
10
+ grammar: %[root ::= l+\nl ::= i "\n"\ni ::= [^\n\t{|}]+ [.!?]],
11
+ n_predict: 2048,
12
+ n_probs: 0,
13
+ cache_prompt: true,
14
+ min_keep: 10,
15
+ min_p: 0.05,
16
+ mirostat: 2,
17
+ mirostat_eta: 0.1,
18
+ mirostat_tau: 5,
19
+ repeat_lat_n: 256,
20
+ repeat_penalty: 2,
21
+ slot_id: -1,
22
+ temperature: 0,
23
+ tfs_z: 1,
24
+ top_k: 95,
25
+ top_p: 0.95,
26
+ typical_p: 1,
27
+ stop: ['</s>','Llama:','User:']
28
+ }
29
+
30
+ class Llama
31
+ include HTTParty
32
+ base_uri ENV['LLAMA']
33
+ end
34
+ def self.post h={}, &b
35
+ hh = {
36
+ headers: { "Content-Type": "application/json", "Connection": 'keep-alive', "Priority": 'u=0' },
37
+ body: JSON.generate(DEF.merge(h))
38
+ }
39
+ fiber = Fiber.new do |hhh|
40
+ begin
41
+ r = Llama.post('/completion', hhh)
42
+ #puts %[LLAMA CODE: #{r.code}]
43
+ if r.code == 200
44
+ rr = r['content'].gsub(/<.+>/, "").gsub(/\s\s+/, " ").gsub(/\n+/, "\n");
45
+ if block_given?
46
+ Fiber.yield b.call(rr);
47
+ else
48
+ Fiber.yield rr;
49
+ end
50
+ else
51
+ Fiber.yield false
52
+ end
53
+ rescue => err
54
+ puts %[LLAMA POST ERR: #{err}]
55
+ Fiber.yield false
56
+ end
57
+ end
58
+ fiber.resume(hh)
59
+ end
60
+ end
11
61
 
12
62
  module Lf
13
63
  AI = %[a helpful artificial intelligence who responds accurately and truthfully to User.]
14
64
 
15
65
  AND_BOOL = %[ Respond yes or no.]
16
- AND_WORD = %[ Respond with a single word.]
17
66
 
18
67
  @@CHAR = AI
19
68
  def self.character
@@ -22,57 +71,39 @@ module Lf
22
71
  def self.inspector
23
72
  %[#{@@CHAR} #{AND_BOOL}]
24
73
  end
25
- def self.researcher
26
- %[#{@@CHAR} #{AND_WORD}]
27
- end
28
74
 
75
+ @@GRAM = {
76
+ bool: %[root ::= "yes" | "no"],
77
+ string: %[root ::= en+ (" " en+)+ "\n"\nen ::= [a-zA-Z] | [.!?]],
78
+ }
79
+ def self.grammar
80
+ @@GRAM
81
+ end
82
+
29
83
  @@TMPL = {
30
- does: %[AI is <%= Lf.inspector %>\nUser: Does "<%= @response %>" <%= @condition %> "<%= @query %>"\nAI: ],
31
- word: %[AI is <%= Lf.researcher %>\nUser: <%= @query %>\nAI: ],
32
- user: %[AI is <%= Lf.character %>\nUser: <%= @query %>\nAI: ],
33
- task: %[AI is <%= Lf.character %>\nUser: <%= @query.join("\n") %>\nAI: ],
84
+ bool: %[AI is <%= Lf.inspector %>\nUser: Does "<%= @output %>" <%= @condition %> "<%= @input %>"\nAI: ],
85
+ string: %[AI is <%= Lf.character %>\nUser: <%= [@query].flatten.join("\n") %>\nAI: ],
34
86
  }
35
87
  def self.template
36
88
  @@TMPL
37
89
  end
38
90
 
39
- def self.erb s, params={}
91
+ def self.erb s
40
92
  ERB.new(s).result(binding)
41
93
  end
42
94
 
43
- def self.does? r, c, q
44
- @query = q
45
- @response = r
46
- @condition = c
47
- LLAMA.post(n_predict: 4, grammar: GRAMMAR[:bool], prompt: Lf.erb(Lf.template[:does])) == 'yes';
95
+ def self.does? output, condition, input
96
+ @input = input
97
+ @output = output
98
+ @condition = condition
99
+ LLAMA.post(n_predict: 4, grammar: @@GRAM[:bool], prompt: Lf.erb(Lf.template[:bool])) == 'yes';
48
100
  end
49
-
50
- def self.number q
51
- @query = q
52
- LLAMA.post(grammar: GRAMMAR[:number], prompt: Lf.erb(Lf.template[:user]));
53
- end
54
- def self.word q
55
- @query = q
56
- LLAMA.post(grammar: GRAMMAR[:word], prompt: Lf.erb(Lf.template[:word]));
57
- end
58
- def self.string q
59
- @query = q
60
- LLAMA.post(grammar: GRAMMAR[:string], prompt: Lf.erb(Lf.template[:user]));
61
- end
62
- def self.list q
63
- @query = q
64
- LLAMA.post(grammar: GRAMMAR[:list], prompt: Lf.erb(Lf.template[:user]));
101
+ def self.prompt input
102
+ @input = input
103
+ LLAMA.post(grammar: @@GRAM[:string], prompt: Lf.erb(Lf.template[:string]));
65
104
  end
66
- def self.task *q
67
- @query = q
68
- LLAMA.post(grammar: GRAMMAR[:string], prompt: Lf.erb(Lf.template[:task]));
105
+
106
+ def llamafile
107
+ Lf
69
108
  end
70
109
  end
71
-
72
- #require_relative "llamafile/actor"
73
-
74
- #VOICE.hear = lambda { |tgt, voice, payload| puts %[VOICE #{tgt}: #{voice} #{payload}]; MIND.publish(%[#{tgt}/], %[Thinking like a #{voice}, #{payload}]) }
75
- #MIND.input = lambda { |tgt, payload| puts %[THINK INPUT #{tgt}: #{payload}]; VOICE.hear(tgt, payload); }
76
- #MIND.output = lambda { |tgt, payload| puts %[THINK OUTPUT #{tgt}: #{payload}]; }
77
-
78
- #MIND.think!
data/llamafile.gemspec CHANGED
@@ -33,7 +33,7 @@ Gem::Specification.new do |spec|
33
33
  # Uncomment to register a new dependency of your gem
34
34
  spec.add_dependency "httparty"
35
35
  spec.add_dependency "multi_xml"
36
- spec.add_dependency "mqtt"
36
+ # spec.add_dependency "mqtt"
37
37
  spec.add_dependency "csv"
38
38
  spec.add_dependency "bigdecimal"
39
39
  # For more information and examples about making a new gem, check out our
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llamafile
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.4.9
4
+ version: 0.5.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Erik Olson
@@ -37,20 +37,6 @@ dependencies:
37
37
  - - ">="
38
38
  - !ruby/object:Gem::Version
39
39
  version: '0'
40
- - !ruby/object:Gem::Dependency
41
- name: mqtt
42
- requirement: !ruby/object:Gem::Requirement
43
- requirements:
44
- - - ">="
45
- - !ruby/object:Gem::Version
46
- version: '0'
47
- type: :runtime
48
- prerelease: false
49
- version_requirements: !ruby/object:Gem::Requirement
50
- requirements:
51
- - - ">="
52
- - !ruby/object:Gem::Version
53
- version: '0'
54
40
  - !ruby/object:Gem::Dependency
55
41
  name: csv
56
42
  requirement: !ruby/object:Gem::Requirement