llm_memory 0.1.2 → 0.1.3

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 0615d07f9e76aa0b4c762d027e757bb0cb03cc1448b11d5b5cf684d5a75b381a
4
- data.tar.gz: 6aa24ea5ddf2476876ca2d873400018785c3bcc6c385faf35cac3f59de94049a
3
+ metadata.gz: 85f3330ed767cc28b5a3276a7772678c36726ce04562c184a4e6dcf287d14d1d
4
+ data.tar.gz: 3e1ae83b2517f7f80bb857de5024b62b886bb6a045428f2d40159e48818cf286
5
5
  SHA512:
6
- metadata.gz: 7f6ac569b18f28502e3b0e34614498b559dff4b6e085988f3bae2f5160aef9eb6491a7e61f78edc3d5e0c9be0ac8878f4e0747cc2d02358aa389071eea393c0c
7
- data.tar.gz: b818a177d945d9e2544f1079f214231d792419772688b4907a50a8ceb2fde2435d5571e4e03307fa1bee8f8d22499e7ddf5821363203b1a1ecbd473d4edbdc2d
6
+ metadata.gz: dba158d07c4a97a5b2e6bb1a98987173ba427a18a1a06d0a422f366ab2f7335ac168aea0351657b0c7f957352e6ffe747a2d817deb7c8d5454010c2ec535999a
7
+ data.tar.gz: 1db3ccb501f3b56d6e6dc00997f3f085b74f395521ffbbc1775bc5799bc388410523f6f209cfc185efbbf1437177c07dd5ee1725a49f6a27b207c6fc296067aa
data/Gemfile.lock CHANGED
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- llm_memory (0.1.2)
4
+ llm_memory (0.1.3)
5
5
  redis (~> 4.6.0)
6
6
  ruby-openai (~> 3.7.0)
7
7
  tiktoken_ruby (~> 0.0.4)
data/README.md CHANGED
@@ -85,7 +85,7 @@ related_docs = hippocampus.query(query_str, limit: 3)
85
85
  #},,,]
86
86
 
87
87
  # ERB
88
- template = <<-TEMPLATE
88
+ prompt = <<-TEMPLATE
89
89
  Context information is below.
90
90
  ---------------------
91
91
  <% related_docs.each do |doc| %>
@@ -98,7 +98,7 @@ Given the context information and not prior knowledge,
98
98
  answer the question: <%= query_str %>
99
99
  TEMPLATE
100
100
 
101
- broca = LlmMemory::Broca.new(prompt: tempate, model: 'gpt-3.5-turbo')
101
+ broca = LlmMemory::Broca.new(prompt: prompt, model: 'gpt-3.5-turbo')
102
102
  messages = broca.respond(query_str: query_str, related_docs: related_docs)
103
103
 
104
104
  ...
@@ -20,26 +20,31 @@ module LlmMemory
20
20
  @max_token = max_token
21
21
  end
22
22
 
23
- def respond(*args)
24
- final_prompt = generate_prompt(*args)
23
+ def respond(args)
24
+ final_prompt = generate_prompt(args)
25
25
  @messages.push({role: "user", content: final_prompt})
26
26
  adjust_token_count
27
- response = client.chat(
28
- parameters: {
29
- model: @model,
30
- messages: @messages,
31
- temperature: @temperature
32
- }
33
- )
34
- response_conent = response.dig("choices", 0, "message", "content")
35
- @messages.push({role: "system", content: response_conent})
36
- response_conent
27
+ begin
28
+ response = client.chat(
29
+ parameters: {
30
+ model: @model,
31
+ messages: @messages,
32
+ temperature: @temperature
33
+ }
34
+ )
35
+ response_content = response.dig("choices", 0, "message", "content")
36
+ @messages.push({role: "system", content: response_content})
37
+ response_content
38
+ rescue => e
39
+ puts e.inspect
40
+ # @messages = []
41
+ nil
42
+ end
37
43
  end
38
44
 
39
- def generate_prompt(*args)
40
- merged_args = args.reduce(:merge)
45
+ def generate_prompt(args)
41
46
  erb = ERB.new(@prompt)
42
- erb.result_with_hash(merged_args)
47
+ erb.result_with_hash(args)
43
48
  end
44
49
 
45
50
  def adjust_token_count
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module LlmMemory
4
- VERSION = "0.1.2"
4
+ VERSION = "0.1.3"
5
5
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llm_memory
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.2
4
+ version: 0.1.3
5
5
  platform: ruby
6
6
  authors:
7
7
  - Shohei Kameda
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2023-05-09 00:00:00.000000000 Z
11
+ date: 2023-05-10 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: tiktoken_ruby