llm_memory 0.1.2 → 0.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 0615d07f9e76aa0b4c762d027e757bb0cb03cc1448b11d5b5cf684d5a75b381a
4
- data.tar.gz: 6aa24ea5ddf2476876ca2d873400018785c3bcc6c385faf35cac3f59de94049a
3
+ metadata.gz: 072b5fa983000a18bccb6e5d0185663cbb30547a0fd6cc6194e1d4235ae833fb
4
+ data.tar.gz: 7233ecf0cbbb254b08cbf7d1fe747b8374e1e3baa3b6214a1ba5fa64ecd0360d
5
5
  SHA512:
6
- metadata.gz: 7f6ac569b18f28502e3b0e34614498b559dff4b6e085988f3bae2f5160aef9eb6491a7e61f78edc3d5e0c9be0ac8878f4e0747cc2d02358aa389071eea393c0c
7
- data.tar.gz: b818a177d945d9e2544f1079f214231d792419772688b4907a50a8ceb2fde2435d5571e4e03307fa1bee8f8d22499e7ddf5821363203b1a1ecbd473d4edbdc2d
6
+ metadata.gz: 9e670ee60c3e780343317d2b2175ea5cf9fefdaa8b4ce81b13ec1b32caa3cbd13ad4363dec63ebaabf803e434897eb34a21d4b4e0b37b60bb4b3ff22b6a9a80c
7
+ data.tar.gz: e4defb25011469c10b36c6d4b98763dcb56a673eaedb2709741769de0fc888a8d30e4301f5444c88dbb07c31b8a290ae0fbfae584d9d93a21c6c7ce34d96c40b
data/Gemfile.lock CHANGED
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- llm_memory (0.1.2)
4
+ llm_memory (0.1.4)
5
5
  redis (~> 4.6.0)
6
6
  ruby-openai (~> 3.7.0)
7
7
  tiktoken_ruby (~> 0.0.4)
data/README.md CHANGED
@@ -85,7 +85,7 @@ related_docs = hippocampus.query(query_str, limit: 3)
85
85
  #},,,]
86
86
 
87
87
  # ERB
88
- template = <<-TEMPLATE
88
+ prompt = <<-TEMPLATE
89
89
  Context information is below.
90
90
  ---------------------
91
91
  <% related_docs.each do |doc| %>
@@ -98,7 +98,7 @@ Given the context information and not prior knowledge,
98
98
  answer the question: <%= query_str %>
99
99
  TEMPLATE
100
100
 
101
- broca = LlmMemory::Broca.new(prompt: tempate, model: 'gpt-3.5-turbo')
101
+ broca = LlmMemory::Broca.new(prompt: prompt, model: 'gpt-3.5-turbo')
102
102
  messages = broca.respond(query_str: query_str, related_docs: related_docs)
103
103
 
104
104
  ...
@@ -20,26 +20,31 @@ module LlmMemory
20
20
  @max_token = max_token
21
21
  end
22
22
 
23
- def respond(*args)
24
- final_prompt = generate_prompt(*args)
23
+ def respond(args)
24
+ final_prompt = generate_prompt(args)
25
25
  @messages.push({role: "user", content: final_prompt})
26
26
  adjust_token_count
27
- response = client.chat(
28
- parameters: {
29
- model: @model,
30
- messages: @messages,
31
- temperature: @temperature
32
- }
33
- )
34
- response_conent = response.dig("choices", 0, "message", "content")
35
- @messages.push({role: "system", content: response_conent})
36
- response_conent
27
+ begin
28
+ response = client.chat(
29
+ parameters: {
30
+ model: @model,
31
+ messages: @messages,
32
+ temperature: @temperature
33
+ }
34
+ )
35
+ response_content = response.dig("choices", 0, "message", "content")
36
+ @messages.push({role: "system", content: response_content})
37
+ response_content
38
+ rescue => e
39
+ puts e.inspect
40
+ # @messages = []
41
+ nil
42
+ end
37
43
  end
38
44
 
39
- def generate_prompt(*args)
40
- merged_args = args.reduce(:merge)
45
+ def generate_prompt(args)
41
46
  erb = ERB.new(@prompt)
42
- erb.result_with_hash(merged_args)
47
+ erb.result_with_hash(args)
43
48
  end
44
49
 
45
50
  def adjust_token_count
@@ -23,7 +23,7 @@ module LlmMemory
23
23
  raise "Store '#{store_name}' not found." unless store_class
24
24
  @store = store_class.new(index_name: index_name)
25
25
 
26
- # word count, not char count
26
+ # char count, not word count
27
27
  @chunk_size = chunk_size
28
28
  @chunk_overlap = chunk_overlap
29
29
  end
@@ -87,18 +87,14 @@ module LlmMemory
87
87
  docs.each do |item|
88
88
  content = item[:content]
89
89
  metadata = item[:metadata]
90
- words = content.split
91
-
92
- if words.length > @chunk_size
90
+ if content.length > @chunk_size
93
91
  start_index = 0
94
-
95
- while start_index < words.length
96
- end_index = [start_index + @chunk_size, words.length].min
97
- chunk_words = words[start_index...end_index]
98
- chunk = chunk_words.join(" ")
92
+ while start_index < content.length
93
+ end_index = [start_index + @chunk_size, content.length].min
94
+ chunk = content[start_index...end_index]
99
95
  result << {content: chunk, metadata: metadata}
100
-
101
- start_index += @chunk_size - @chunk_overlap # Move index to create a overlap
96
+ break if end_index == content.length
97
+ start_index += @chunk_size - @chunk_overlap
102
98
  end
103
99
  else
104
100
  result << {content: content, metadata: metadata}
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module LlmMemory
4
- VERSION = "0.1.2"
4
+ VERSION = "0.1.4"
5
5
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llm_memory
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.2
4
+ version: 0.1.4
5
5
  platform: ruby
6
6
  authors:
7
7
  - Shohei Kameda
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2023-05-09 00:00:00.000000000 Z
11
+ date: 2023-05-10 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: tiktoken_ruby