llamafile 0.2.0 → 0.2.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: aa8d8b3b005c7f195b14adc3c06a6779302862704942399a8d622a0ab82d86c4
4
- data.tar.gz: caa2a7f2c3d092b402c37a352f85aed52d1a47472d4a32685f7a0a7bdd4bcdf0
3
+ metadata.gz: 7b22adb7e909226fefd56fde304cfb5ba59588656c29fa35d38295ce40044d7f
4
+ data.tar.gz: 5654991e8854d1681e562cc01ecd5d0c4237c271be718a4561bf842d7a77c22f
5
5
  SHA512:
6
- metadata.gz: 0fd5a0a86e10c80de170d2c4c4bed938036ac317ab9f866080a5c971a035605d38c9e22b711de2e53ea5c6ef7331c61d0b3a1a2d9dd58b943c0dd8451e53ab10
7
- data.tar.gz: 75af1fe10bbf5224e76aeda87ff15db852fdbbd43e8f4ef9c9933a36f81593f99763172460720b7ced9f8583a0f343e4354d47a5d088fd5c5b9d864a3e97f8cc
6
+ metadata.gz: ecc418a654492771d3325b5695296f0723ad19ec57d24e09940c4d64eb1730a8444248d54592ef5bf17ed10e2426ac0260c6953364b10027d37498849115aac1
7
+ data.tar.gz: 86658abe351a2b3536be1ef2000d019cb7030f4e4c2d9947f7a9411ebf749b48c3058126012766483e976f2f3b6756a4f61f323aa06a29c36361cf0dd99333b2
@@ -47,21 +47,31 @@ module LLAMA
47
47
  end
48
48
  end
49
49
 
50
- # LLAMA.post prompt: PROMPT.make { system: "system prompt", mode: "output mode", output: "previous output", input: "new input" }
50
+ # LLAMA.post prompt: PROMPT.make("prompt template", params={})
51
51
 
52
52
  def self.<< i
53
53
  fiber = Fiber.new do |ii|
54
- Fiber.yield LLAMA.post(prompt: PROMPT.make(input: ii))
54
+ Fiber.yield LLAMA.post(prompt: i)
55
55
  end
56
56
  fiber.resume i
57
57
  end
58
+
59
+ def self.make t, h={}
60
+ fiber = Fiber.new do |tt, hh|
61
+ Fiber.yield LLAMA.post(prompt: PROMPT.make(tt, hh))
62
+ end
63
+ fiber.resume t, h
64
+ end
58
65
 
59
66
  def self.if? h={}
60
- if LLAMA.post({ n_predict: 4, grammar: GRAMMAR[:bool], prompt: PROMPT.test(h) }) == 'yes'
61
- return true
62
- else
63
- return false
67
+ fiber = Fiber.new do |hh|
68
+ if LLAMA.post({ n_predict: 4, grammar: GRAMMAR[:bool], prompt: PROMPT.test(hh) }) == 'yes'
69
+ Fiber.yield true
70
+ else
71
+ Fiber.yield false
72
+ end
64
73
  end
74
+ fiber.resume h
65
75
  end
66
76
  end
67
77
 
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Llamafile
4
- VERSION = "0.2.0"
4
+ VERSION = "0.2.2"
5
5
  end
data/lib/llamafile.rb CHANGED
@@ -10,19 +10,20 @@ require_relative "llamafile/prompt"
10
10
  require_relative "llamafile/voice"
11
11
 
12
12
  module Lf
13
+ def self.make t, h={}
14
+ LLAMA.make t, h
15
+ end
13
16
  def self.<< p
14
17
  LLAMA << p
15
18
  end
16
19
  def self.if? h={}
17
20
  LLAMA.if?(h)
18
21
  end
19
- def self.prompt h={}
20
- LLAMA.post(prompt: PROMPT.make(h))
21
- end
22
+
22
23
  @@C = Hash.new { |h,k| h[k] = C.new(k) }
23
24
  class C
24
25
  attr_accessor :character, :actor, :example, :format
25
- attr_reader :output, :input
26
+ attr_reader :output, :input, :history
26
27
  def initialize k
27
28
  @id = k
28
29
  @input = ""
@@ -31,25 +32,24 @@ module Lf
31
32
  @actor = %[an honest person.]
32
33
  @format = %[Format your response as properly formatted markdown.]
33
34
  @example = %[# this is a heading\nThis is some general text about the heading.\n1. list item one.\n2. list item two.\n3. list item three.]
34
- @convo = []
35
+ @history = ["Hello","Hi."]
35
36
  end
36
- def pre q, a
37
- @convo << [ q, a ]
38
- end
39
- def convo
40
- o = []; @convo[-5..-1].each { |e| o << %[User: #{e[0]}\nLlama: #{e[1]}] };
37
+ def conversation
38
+ o = @history.map { |e| o << %[User: #{e[0]}\nLlama: #{e[1]}] };
41
39
  return o.join("\n\n")
42
40
  end
43
41
  def prompt
44
- %[Llama is #{@character}\nUser is #{@actor}\n#{@format}\nUse this example to guide your response:\n#{@example}\n#{convo}]
42
+ %[Llama is #{@character}\nUser is #{@actor}\n#{@format}\nUse this example to guide your response:\n#{@example}\n#{conversation}]
45
43
  end
46
44
  def << i
47
45
  chain i
48
46
  end
49
47
  def chain *p
50
48
  [p].flatten.compact.each { |e|
51
- @convo << [ i, Lf.prompt(output: prompt, input: i)]
49
+ puts %[<-- #{e}]
50
+ @history << [ e, Lf.make("<%= params[:output] %>\n<%= params[:input] %>", output: prompt, input: e)]
52
51
  @output = @convo[-1][1]
52
+ puts %[--> #{@output}]
53
53
  @input = e
54
54
  }
55
55
  return @output
@@ -64,11 +64,6 @@ module Lf
64
64
  def self.delete k
65
65
  @@C.delete(k)
66
66
  end
67
- def self.chain *p
68
- s = {}
69
- [p].flatten.compact.each { |e| o = []; s.each_pair {|k,v| o << %[User: #{k}\nLlama: #{v}] }; s[e] = Lf.prompt(output: o.join("\n"), input: e); }
70
- return s
71
- end
72
67
  end
73
68
 
74
69
  VOICE.hear = lambda { |tgt, voice, payload| puts %[VOICE #{tgt}: #{voice} #{payload}]; MIND.publish(%[#{tgt}/], %[Thinking like a #{voice}, #{payload}]) }
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llamafile
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.2.0
4
+ version: 0.2.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Erik Olson