llamafile 0.1.2 → 0.1.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 2f4c56d4887f772001a9d141b7f66901a2c33d21b064aa754475f619a4d5b35b
4
- data.tar.gz: 60e47d63ec8430daf21f85b8ec2a5894bf9a7b197dd5f33dab154f810c981714
3
+ metadata.gz: ed6d4dcd5814cd4cb0c0740ce0cc09173bcf171cb47a1533f7d43af03070281a
4
+ data.tar.gz: 164ce8232580c07fdc77e08ffad3222104d1ece6d36d5c529482cda2b297aba5
5
5
  SHA512:
6
- metadata.gz: cc2e10c0384169ec2499687ab71e32d99b989dbc7171dc33385e417aacf3f279228385f1ddf61c77ce9751ada9195f0d14f93705fcd8d0ad283ce7f907ddb279
7
- data.tar.gz: 8921078534377357b9f46f48b2961aaf401037af283bfe3373f5cd9574e12c3fa31fc4686d542ebbfbb5f6c1ccb34f070857d59f0ebd3f8c86b24e18c2b0293e
6
+ metadata.gz: f159000c5f9274e216d87db2ae7223b23e3f513eb19a916c439890fb3484a081bb961e8db110f5df155f4e4600f2b2184769bfad98d8ed3d3424f29cf4f7c0c3
7
+ data.tar.gz: b98b44bf5e08c57f975c698c2ee19a5cce46045735a567b418b20430e3a83224a4fd4b772469fc93825aded3a69edcecac8aa2c58cab977aa0b8a1dce33a8a68
data/README.md CHANGED
@@ -19,7 +19,8 @@ gem install llamafile
19
19
  ### module
20
20
  Pass a raw string to the module and
21
21
  ```
22
- Llamafile.llama("What is the meaning of life?") => { input: "What is the meaning of life?", output: "42." }
22
+ Llamafile.llama("What is the meaning of life?") => "42."
23
+ Llamafile << "How can we achieve world peace?" => "I don't know."
23
24
  ```
24
25
  ### include
25
26
  ```
@@ -29,10 +30,8 @@ class X
29
30
  @id = k
30
31
  @db = My.data[k]
31
32
  end
32
- def what_is_my_name?
33
- @prompt = %[My name is #{@db[:name]}.]
34
- h = llama(%[#{@prompt} What is my name?])
35
- return h[:output]
33
+ def hello
34
+ llama(%[Hello, World!])
36
35
  end
37
36
  end
38
37
  ```
@@ -1,40 +1,67 @@
1
1
  require 'httparty'
2
- module LLAMA
3
- class Llama
4
- include HTTParty
5
- base_uri 'http://127.0.0.1:8080'
6
- end
7
-
8
- @@P = {
9
- respond: %[Respond simply and directly with as few words possible.],
10
- answer: %[Answer questions from the user as honestly and correctly as possible.],
11
- friend: %[Respond in a helpful friendly manner.],
12
- nanny: %[Construct a story based upon things you are told.],
13
- truth: %[Respond truthfully.]
14
- }
15
-
16
- def self.prompt
17
- @@P
18
- end
19
-
20
-
21
- def self.flagz
22
- Llama.get('/flagz')
23
- end
24
-
25
- def self.post p, *i
26
- h = LLAMA.flagz.to_h
27
- h['prompt'] = p
28
- h['messages'] = [i].flatten
29
- puts %[LLAMA POST #{h}]
30
- r = Llama.post('/v1/chat/completions',
31
- body: JSON.generate(h),
32
- headers: {
33
- 'Content-Type' => 'application/json',
34
- 'Accept' => 'application/json',
35
- 'Prefer' => 'wait'
36
- })
37
- r['choices'][0]['message']['content']
38
- end
2
+ module LLAMA
3
+ DEF = {
4
+ stream: false,
5
+ grammar: %[root ::= l+\nl ::= i "\n"\ni ::= [^\n\t{|}]+ [.!?]],
6
+ n_predict: 2048,
7
+ n_probs: 0,
8
+ cache_prompt: true,
9
+ min_keep: 10,
10
+ min_p: 0.05,
11
+ mirostat: 2,
12
+ mirostat_eta: 0.1,
13
+ mirostat_tau: 5,
14
+ repeat_lat_n: 256,
15
+ repeat_penalty: 2,
16
+ slot_id: -1,
17
+ temperature: 0,
18
+ tfs_z: 1,
19
+ top_k: 95,
20
+ top_p: 0.95,
21
+ typical_p: 1,
22
+ stop: ['</s>','Llama:','User:']
23
+ }
24
+
25
+ class Llama
26
+ include HTTParty
27
+ base_uri ENV['LLAMA']
28
+ #default_timeout 120
29
+ end
30
+ def self.post h={}
31
+ #puts %[LLAMA IN: #{h}]
32
+ hh = {
33
+ headers: {
34
+ "Content-Type": "application/json",
35
+ "Connection": 'keep-alive',
36
+ "Priority": 'u=0'
37
+ },
38
+ body: JSON.generate(DEF.merge(h))
39
+ }
40
+ #puts %[LLAMA PACK: #{hh}]
41
+ r = Llama.post('/completion', hh)
42
+ #puts %[LLAMA CODE: #{r.code}]
43
+ if r.code == 200
44
+ return r['content'].gsub(/<.+>/, "").gsub(/\s\s+/, " ").gsub(/\n+/, "\n");
45
+ else
46
+ return false
47
+ end
48
+ end
49
+
50
+ # LLAMA.post prompt: PROMPT.make { system: "system prompt", mode: "output mode", output: "previous output", input: "new input" }
51
+
52
+ def self.<< i
53
+ fiber = Fiber.new do |ii|
54
+ Fiber.yield LLAMA.post(prompt: PROMPT.make(input: ii))
55
+ end
56
+ fiber.resume i
57
+ end
58
+
59
+ def self.if? h={}
60
+ if LLAMA.post({ n_predict: 4, grammar: GRAMMAR[:bool], prompt: PROMPT.test(h) }) == 'yes'
61
+ return true
62
+ else
63
+ return false
64
+ end
65
+ end
39
66
  end
40
67
 
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Llamafile
4
- VERSION = "0.1.2"
4
+ VERSION = "0.1.5"
5
5
  end
data/lib/llamafile.rb CHANGED
@@ -1,31 +1,48 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  require_relative "llamafile/version"
4
-
4
+ require_relative "llamafile/mind"
5
5
  require_relative "llamafile/llama"
6
+ require_relative "llamafile/grammar"
7
+ require_relative "llamafile/prompt"
8
+ require_relative "llamafile/voice"
6
9
 
7
- module Llamafile
8
- class Error < StandardError; end
9
-
10
- attr_accessor :prompt
11
-
12
- def self.prompt
13
- LLAMA.prompt
14
- end
10
+ module Lf
11
+ def self.<< p
12
+ LLAMA << p
13
+ end
14
+ def self.if? h={}
15
+ LLAMA.if?(h)
16
+ end
17
+ def self.prompt h={}
18
+ LLAMA.post(prompt: PROMPT.make(h))
19
+ end
20
+ @@C = Hash.new { |h,k| h[k] = C.new(k) }
21
+ class C
22
+ def initialize k
23
+ @id = k
24
+ @input = ""
25
+ @output = ""
26
+ end
27
+ def << i
28
+ @output = Lf.prompt(output: %[User: #{@input}\nLlama: #{@output}], input: i)
29
+ @input = i
30
+ return @output
31
+ end
32
+ end
33
+ def self.[] k
34
+ @@C[k]
35
+ end
36
+ def self.keys
37
+ @@C.keys
38
+ end
39
+ def self.delete k
40
+ @@C.delete(k)
41
+ end
42
+ end
15
43
 
16
- def prompt
17
- Llamafile.prompt
18
- end
19
-
20
- def self.llama i
21
- LLAMA.post(LLAMA.prompt[:truth], { role: 'user', content: i })
22
- end
44
+ VOICE.hear = lambda { |tgt, voice, payload| puts %[VOICE #{tgt}: #{voice} #{payload}]; MIND.publish(%[#{tgt}/], %[Thinking like a #{voice}, #{payload}]) }
45
+ MIND.input = lambda { |tgt, payload| puts %[THINK INPUT #{tgt}: #{payload}]; VOICE.hear(tgt, payload); }
46
+ MIND.output = lambda { |tgt, payload| puts %[THINK OUTPUT #{tgt}: #{payload}]; }
23
47
 
24
- def llama i
25
- Llamafile.llama i
26
- end
27
-
28
- def self.<< i
29
- Llamafile.llama i
30
- end
31
- end
48
+ MIND.think!
data/llamafile.gemspec CHANGED
@@ -32,7 +32,10 @@ Gem::Specification.new do |spec|
32
32
 
33
33
  # Uncomment to register a new dependency of your gem
34
34
  spec.add_dependency "httparty"
35
-
35
+ spec.add_dependency "multi_xml"
36
+ spec.add_dependency "mqtt"
37
+ spec.add_dependency "csv"
38
+ spec.add_dependency "bigdecimal"
36
39
  # For more information and examples about making a new gem, check out our
37
40
  # guide at: https://bundler.io/guides/creating_gem.html
38
41
  end
metadata CHANGED
@@ -1,14 +1,13 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llamafile
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.2
4
+ version: 0.1.5
5
5
  platform: ruby
6
6
  authors:
7
7
  - Erik Olson
8
- autorequire:
9
8
  bindir: exe
10
9
  cert_chain: []
11
- date: 2025-01-30 00:00:00.000000000 Z
10
+ date: 2025-05-24 00:00:00.000000000 Z
12
11
  dependencies:
13
12
  - !ruby/object:Gem::Dependency
14
13
  name: httparty
@@ -24,6 +23,62 @@ dependencies:
24
23
  - - ">="
25
24
  - !ruby/object:Gem::Version
26
25
  version: '0'
26
+ - !ruby/object:Gem::Dependency
27
+ name: multi_xml
28
+ requirement: !ruby/object:Gem::Requirement
29
+ requirements:
30
+ - - ">="
31
+ - !ruby/object:Gem::Version
32
+ version: '0'
33
+ type: :runtime
34
+ prerelease: false
35
+ version_requirements: !ruby/object:Gem::Requirement
36
+ requirements:
37
+ - - ">="
38
+ - !ruby/object:Gem::Version
39
+ version: '0'
40
+ - !ruby/object:Gem::Dependency
41
+ name: mqtt
42
+ requirement: !ruby/object:Gem::Requirement
43
+ requirements:
44
+ - - ">="
45
+ - !ruby/object:Gem::Version
46
+ version: '0'
47
+ type: :runtime
48
+ prerelease: false
49
+ version_requirements: !ruby/object:Gem::Requirement
50
+ requirements:
51
+ - - ">="
52
+ - !ruby/object:Gem::Version
53
+ version: '0'
54
+ - !ruby/object:Gem::Dependency
55
+ name: csv
56
+ requirement: !ruby/object:Gem::Requirement
57
+ requirements:
58
+ - - ">="
59
+ - !ruby/object:Gem::Version
60
+ version: '0'
61
+ type: :runtime
62
+ prerelease: false
63
+ version_requirements: !ruby/object:Gem::Requirement
64
+ requirements:
65
+ - - ">="
66
+ - !ruby/object:Gem::Version
67
+ version: '0'
68
+ - !ruby/object:Gem::Dependency
69
+ name: bigdecimal
70
+ requirement: !ruby/object:Gem::Requirement
71
+ requirements:
72
+ - - ">="
73
+ - !ruby/object:Gem::Version
74
+ version: '0'
75
+ type: :runtime
76
+ prerelease: false
77
+ version_requirements: !ruby/object:Gem::Requirement
78
+ requirements:
79
+ - - ">="
80
+ - !ruby/object:Gem::Version
81
+ version: '0'
27
82
  description: Wraps a locally installed llamafile in an a pure ruby object.
28
83
  email:
29
84
  - xorgnak@gmail.com
@@ -49,7 +104,6 @@ metadata:
49
104
  homepage_uri: https://github.com/xorgnak/llamafile
50
105
  source_code_uri: https://github.com/xorgnak/llamafile
51
106
  changelog_uri: https://github.com/xorgnak/llamafile
52
- post_install_message:
53
107
  rdoc_options: []
54
108
  require_paths:
55
109
  - lib
@@ -64,8 +118,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
64
118
  - !ruby/object:Gem::Version
65
119
  version: '0'
66
120
  requirements: []
67
- rubygems_version: 3.4.20
68
- signing_key:
121
+ rubygems_version: 3.6.3
69
122
  specification_version: 4
70
123
  summary: llamafile wrapper
71
124
  test_files: []