llm_memory 0.1.2 → 0.1.3
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/Gemfile.lock +1 -1
- data/README.md +2 -2
- data/lib/llm_memory/broca.rb +20 -15
- data/lib/llm_memory/version.rb +1 -1
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 85f3330ed767cc28b5a3276a7772678c36726ce04562c184a4e6dcf287d14d1d
|
4
|
+
data.tar.gz: 3e1ae83b2517f7f80bb857de5024b62b886bb6a045428f2d40159e48818cf286
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: dba158d07c4a97a5b2e6bb1a98987173ba427a18a1a06d0a422f366ab2f7335ac168aea0351657b0c7f957352e6ffe747a2d817deb7c8d5454010c2ec535999a
|
7
|
+
data.tar.gz: 1db3ccb501f3b56d6e6dc00997f3f085b74f395521ffbbc1775bc5799bc388410523f6f209cfc185efbbf1437177c07dd5ee1725a49f6a27b207c6fc296067aa
|
data/Gemfile.lock
CHANGED
data/README.md
CHANGED
@@ -85,7 +85,7 @@ related_docs = hippocampus.query(query_str, limit: 3)
|
|
85
85
|
#},,,]
|
86
86
|
|
87
87
|
# ERB
|
88
|
-
|
88
|
+
prompt = <<-TEMPLATE
|
89
89
|
Context information is below.
|
90
90
|
---------------------
|
91
91
|
<% related_docs.each do |doc| %>
|
@@ -98,7 +98,7 @@ Given the context information and not prior knowledge,
|
|
98
98
|
answer the question: <%= query_str %>
|
99
99
|
TEMPLATE
|
100
100
|
|
101
|
-
broca = LlmMemory::Broca.new(prompt:
|
101
|
+
broca = LlmMemory::Broca.new(prompt: prompt, model: 'gpt-3.5-turbo')
|
102
102
|
messages = broca.respond(query_str: query_str, related_docs: related_docs)
|
103
103
|
|
104
104
|
...
|
data/lib/llm_memory/broca.rb
CHANGED
@@ -20,26 +20,31 @@ module LlmMemory
|
|
20
20
|
@max_token = max_token
|
21
21
|
end
|
22
22
|
|
23
|
-
def respond(
|
24
|
-
final_prompt = generate_prompt(
|
23
|
+
def respond(args)
|
24
|
+
final_prompt = generate_prompt(args)
|
25
25
|
@messages.push({role: "user", content: final_prompt})
|
26
26
|
adjust_token_count
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
27
|
+
begin
|
28
|
+
response = client.chat(
|
29
|
+
parameters: {
|
30
|
+
model: @model,
|
31
|
+
messages: @messages,
|
32
|
+
temperature: @temperature
|
33
|
+
}
|
34
|
+
)
|
35
|
+
response_content = response.dig("choices", 0, "message", "content")
|
36
|
+
@messages.push({role: "system", content: response_content})
|
37
|
+
response_content
|
38
|
+
rescue => e
|
39
|
+
puts e.inspect
|
40
|
+
# @messages = []
|
41
|
+
nil
|
42
|
+
end
|
37
43
|
end
|
38
44
|
|
39
|
-
def generate_prompt(
|
40
|
-
merged_args = args.reduce(:merge)
|
45
|
+
def generate_prompt(args)
|
41
46
|
erb = ERB.new(@prompt)
|
42
|
-
erb.result_with_hash(
|
47
|
+
erb.result_with_hash(args)
|
43
48
|
end
|
44
49
|
|
45
50
|
def adjust_token_count
|
data/lib/llm_memory/version.rb
CHANGED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: llm_memory
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.1.
|
4
|
+
version: 0.1.3
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Shohei Kameda
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2023-05-
|
11
|
+
date: 2023-05-10 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: tiktoken_ruby
|