llm_memory 0.1.10 → 0.1.12

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 5117e4a12dc1c9a3848817dc949038d6c80aa3cfb62b2c10709331fd19baaea8
4
- data.tar.gz: 3117e77cd3f0741e94c419cfbc1cb7390aaf84f63aea5df181fdf29df77eb7a4
3
+ metadata.gz: 7d5876cf6c293ca2371eab3021704be2710c8b3d558e501543bde4acf6898595
4
+ data.tar.gz: 232d45abb20fb82b09b0f8ba6b22c12168d498cd8e23110d56446ae5265991aa
5
5
  SHA512:
6
- metadata.gz: 05ea60dbd58ea1aabe974e32b22253febe355701940e16f04da685ecefadcdf58323bd8ab28354af6675cc57f17a9a3cc30e64595684deb1be79d3c8251e1eb0
7
- data.tar.gz: da8cf3c76bb436e9665241a135ba1154b45581de08a6cb77a3022066c6c3898ea6beb749d561e7264e19cd2bf900e21c908980c4f03ab0dcdbf52b36036ccb26
6
+ metadata.gz: 128b6f275d904d2fafb8c7e0c1a26d33462edc015a33b1a3da6b4f1dc6f73ba51ae15f76f25d00221baa167956af06f1d5cd081fe8ab54e54b44c783c7380a98
7
+ data.tar.gz: 693ec575ca7b4add6b64afb6f3608a3ca6c989274c4f896aaf7520df5e82a0c051d062dd24bd6975a12c585bfaed91b6c4c1a96195e5f1831b05ee3c3679659a
data/Gemfile.lock CHANGED
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- llm_memory (0.1.10)
4
+ llm_memory (0.1.12)
5
5
  redis (~> 4.6.0)
6
6
  ruby-openai (~> 3.7.0)
7
7
  tokenizers (~> 0.3.3)
data/README.md CHANGED
@@ -54,8 +54,10 @@ end
54
54
 
55
55
  To use LLM Memory, follow these steps:
56
56
 
57
+ If you want to use pgvector instead of Redisearch. You can use the plugin. Please check the doc and change the setup steps(2&3)
58
+
57
59
  1. Install the gem: gem install llm_memory
58
- 2. Set up Redis with Redisearch module enabled - Go to [Reids Cloud](https://redis.com/redis-enterprise-cloud/overview/) and get the redis url
60
+ 2. Set up Redis with Redisearch module enabled - Go to [Reids Cloud](https://redis.com/redis-enterprise-cloud/overview/) and get the redis url.
59
61
  3. Configure LLM Memory to connect to your Redis instance
60
62
  4. Use LlmMemory::Wernicke to load data from your external sources
61
63
  5. Use LlmMemory::Hippocampus to search for relevant information based on user queries
@@ -71,12 +73,13 @@ docs = LlmMemory::Wernicke.load(:file, "/tmp/a_directory")
71
73
  # docs = [{
72
74
  # content: "Hi there",
73
75
  # metadata: {
74
- # file_name: "a.txt"
76
+ # file_name: "a.txt",
77
+ # timestamp: "20201231235959"
75
78
  # }
76
79
  # },,,]
77
80
 
78
81
  hippocampus = LlmMemory::Hippocampus.new
79
- hippocampus.memorize(docs)
82
+ res = hippocampus.memorize(docs)
80
83
 
81
84
  query_str = "What is my name?"
82
85
  related_docs = hippocampus.query(query_str, limit: 3)
@@ -43,6 +43,46 @@ module LlmMemory
43
43
  end
44
44
  end
45
45
 
46
+ def respond_with_schema(context: {}, schema: {})
47
+ response_content = respond(context)
48
+ begin
49
+ response = client.chat(
50
+ parameters: {
51
+ model: "gpt-3.5-turbo-0613", # as of July 3, 2023
52
+ messages: [
53
+ {
54
+ role: "user",
55
+ content: response_content
56
+ }
57
+ ],
58
+ functions: [
59
+ {
60
+ name: "broca",
61
+ description: "Formating the content with the specified schema",
62
+ parameters: schema
63
+ }
64
+ ]
65
+ }
66
+ )
67
+ LlmMemory.logger.debug(response)
68
+ message = response.dig("choices", 0, "message")
69
+ if message["role"] == "assistant" && message["function_call"]
70
+ function_name = message.dig("function_call", "name")
71
+ args =
72
+ JSON.parse(
73
+ message.dig("function_call", "arguments"),
74
+ {symbolize_names: true}
75
+ )
76
+ if function_name == "broca"
77
+ args
78
+ end
79
+ end
80
+ rescue => e
81
+ LlmMemory.logger.info(e.inspect)
82
+ nil
83
+ end
84
+ end
85
+
46
86
  def generate_prompt(args)
47
87
  erb = ERB.new(@prompt)
48
88
  erb.result_with_hash(args)
@@ -14,11 +14,13 @@ module LlmMemory
14
14
 
15
15
  file_name = File.basename(file_path)
16
16
  file_content = File.read(file_path)
17
+ ctime = File.ctime(file_path)
17
18
 
18
19
  files_array << {
19
20
  content: file_content,
20
21
  metadata: {
21
- file_name: file_name
22
+ file_name: file_name,
23
+ timestamp: ctime.strftime("%Y%m%d%H%M%S") # YYMMDDHHmmss
22
24
  }
23
25
  }
24
26
  end
@@ -79,7 +79,12 @@ module LlmMemory
79
79
  result = {}
80
80
  @client.pipelined do |pipeline|
81
81
  data.each_with_index do |d, i|
82
- key = "#{@index_name}:#{SecureRandom.uuid.delete("-")}"
82
+ key = @index_name # index_name:create_time:metadata_timestamp:uuid
83
+ timestamp = d.dig(:metadata, :timestamp)
84
+ key += ":#{Time.now.strftime("%Y%m%d%H%M%S")}"
85
+ key += ":#{timestamp}"
86
+ key += ":#{SecureRandom.hex(8)}"
87
+
83
88
  meta_json = d[:metadata].nil? ? "" : d[:metadata].to_json # serialize
84
89
  vector_value = d[:vector].map(&:to_f).pack("f*")
85
90
  pipeline.hset(
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module LlmMemory
4
- VERSION = "0.1.10"
4
+ VERSION = "0.1.12"
5
5
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llm_memory
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.1.10
4
+ version: 0.1.12
5
5
  platform: ruby
6
6
  authors:
7
7
  - Shohei Kameda
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2023-06-02 00:00:00.000000000 Z
11
+ date: 2023-07-27 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: tokenizers