langchainrb 0.9.1 → 0.9.2

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 4c9d0655d58ddff57b9c9163065908dd17d91c6bffc5b146bf7fc01b4c9fb96d
4
- data.tar.gz: ee82c644b7e38503fa0587ade2af0447819863303e9fa3755dce1676d68ad5f7
3
+ metadata.gz: cdaafd0889d6666c7aa39d8bee71763f8baf86e0566f9e87312fc6033d07709d
4
+ data.tar.gz: 7acc9f122aed92eab9ab2ac1d50f7ee59c0e24030335dc15878769b7664ffc6b
5
5
  SHA512:
6
- metadata.gz: 05faddd31c819e6d351ed99e05353e462341ab1744769a1b3a9932c37de4c68907b54f79bcd65f6b652d954d5301e80055c5ee8c57b66a3917256918c51cc61f
7
- data.tar.gz: c2fed05da349fdc9ebd9990ea5c2d5c70a68241c491c903c631eb0584bce01da17bab7b04c59fa9fded8282547798219b2a0b951c1c5a8d1062d08f2a930062c
6
+ metadata.gz: b76f62411f75eccba98371791e63b18f0f22225de1af7449cde40680acbd1dc09c4b5a6b7eeb1ac667dd9b029c52534fbfbe10a5fa465fa0859059ddb32a400c
7
+ data.tar.gz: 64052a22206ece081a2fd9fbf0233ba47f40539743b1def6d7fc9beeb31102f5559878514cbc0d503fd13681e5efbb34c2755cacbd62b1b01b67876bc9d6237e
data/CHANGELOG.md CHANGED
@@ -1,5 +1,9 @@
1
1
  ## [Unreleased]
2
2
 
3
+ ## [0.9.2]
4
+ - Fix vectorsearch#ask methods
5
+ - Bump cohere-ruby gem
6
+
3
7
  ## [0.9.1]
4
8
  - Add support for new OpenAI models
5
9
  - Add Ollama#chat method
@@ -126,7 +126,9 @@ module Langchain::Vectorsearch
126
126
 
127
127
  prompt = generate_rag_prompt(question: question, context: context)
128
128
 
129
- response = llm.chat(prompt: prompt, &block)
129
+ messages = [{role: "user", content: prompt}]
130
+ response = llm.chat(messages: messages, &block)
131
+
130
132
  response.context = context
131
133
  response
132
134
  end
@@ -141,7 +141,9 @@ module Langchain::Vectorsearch
141
141
 
142
142
  prompt = generate_rag_prompt(question: question, context: context)
143
143
 
144
- response = llm.chat(prompt: prompt, &block)
144
+ messages = [{role: "user", content: prompt}]
145
+ response = llm.chat(messages: messages, &block)
146
+
145
147
  response.context = context
146
148
  response
147
149
  end
@@ -139,7 +139,9 @@ module Langchain::Vectorsearch
139
139
 
140
140
  prompt = generate_rag_prompt(question: question, context: context)
141
141
 
142
- response = llm.chat(prompt: prompt, &block)
142
+ messages = [{role: "user", content: prompt}]
143
+ response = llm.chat(messages: messages, &block)
144
+
143
145
  response.context = context
144
146
  response
145
147
  end
@@ -151,7 +151,9 @@ module Langchain::Vectorsearch
151
151
 
152
152
  prompt = generate_rag_prompt(question: question, context: context)
153
153
 
154
- response = llm.chat(prompt: prompt, &block)
154
+ messages = [{role: "user", content: prompt}]
155
+ response = llm.chat(messages: messages, &block)
156
+
155
157
  response.context = context
156
158
  response
157
159
  end
@@ -148,7 +148,9 @@ module Langchain::Vectorsearch
148
148
 
149
149
  prompt = generate_rag_prompt(question: question, context: context)
150
150
 
151
- response = llm.chat(prompt: prompt, &block)
151
+ messages = [{role: "user", content: prompt}]
152
+ response = llm.chat(messages: messages, &block)
153
+
152
154
  response.context = context
153
155
  response
154
156
  end
@@ -181,7 +181,9 @@ module Langchain::Vectorsearch
181
181
 
182
182
  prompt = generate_rag_prompt(question: question, context: context)
183
183
 
184
- response = llm.chat(prompt: prompt, &block)
184
+ messages = [{role: "user", content: prompt}]
185
+ response = llm.chat(messages: messages, &block)
186
+
185
187
  response.context = context
186
188
  response
187
189
  end
@@ -137,7 +137,9 @@ module Langchain::Vectorsearch
137
137
 
138
138
  prompt = generate_rag_prompt(question: question, context: context)
139
139
 
140
- response = llm.chat(prompt: prompt, &block)
140
+ messages = [{role: "user", content: prompt}]
141
+ response = llm.chat(messages: messages, &block)
142
+
141
143
  response.context = context
142
144
  response
143
145
  end
@@ -137,7 +137,9 @@ module Langchain::Vectorsearch
137
137
 
138
138
  prompt = generate_rag_prompt(question: question, context: context)
139
139
 
140
- response = llm.chat(prompt: prompt, &block)
140
+ messages = [{role: "user", content: prompt}]
141
+ response = llm.chat(messages: messages, &block)
142
+
141
143
  response.context = context
142
144
  response
143
145
  end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Langchain
4
- VERSION = "0.9.1"
4
+ VERSION = "0.9.2"
5
5
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: langchainrb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.9.1
4
+ version: 0.9.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Andrei Bondarev
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2024-02-06 00:00:00.000000000 Z
11
+ date: 2024-02-11 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: baran
@@ -240,14 +240,14 @@ dependencies:
240
240
  requirements:
241
241
  - - "~>"
242
242
  - !ruby/object:Gem::Version
243
- version: 0.9.7
243
+ version: 0.9.8
244
244
  type: :development
245
245
  prerelease: false
246
246
  version_requirements: !ruby/object:Gem::Requirement
247
247
  requirements:
248
248
  - - "~>"
249
249
  - !ruby/object:Gem::Version
250
- version: 0.9.7
250
+ version: 0.9.8
251
251
  - !ruby/object:Gem::Dependency
252
252
  name: docx
253
253
  requirement: !ruby/object:Gem::Requirement