langchainrb 0.6.6 → 0.6.7

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: a9949f3ffd0338c90274f13b9862b0a6b9ec7b717b14b7ccaa8b6b8e0115f621
4
- data.tar.gz: 43ebcb26d51b286278d5098ba50defef0c8bd1a897fa744c4519cfa10bdfdf58
3
+ metadata.gz: 5e8dc16375bc94045a728b37d6d388a8670699c35af4213bc4d16163fbb0047c
4
+ data.tar.gz: c9695ac8ce5d6bf351afd3148d281a26733f37a43674fae71c3c81c7530cdc2e
5
5
  SHA512:
6
- metadata.gz: c95f6e104aaa9a8dab30c9e78e342fdf960ccfef332a2737218f3cc186521369e6f03216d5ccd08329d5110cd15ef10e10a3f460caecc02dd50e32b1b60ff8b3
7
- data.tar.gz: c8c059c760b361975ea7ba8eb8a7aa24c1dd7dde5264d7d8bdf20da4f7ec80fe3f1cf4f60dd16dd8028638f3335b1e1632b655ae6c4bdd01912d33371892b5a3
6
+ metadata.gz: 67a532ea9c46961a8636f1332590876f8d5d26e67384789c9a4ab7907c4a2150240a7c9d63ae188d19a269e345ff0dfafcf24ab1cb4a987e4eab5cc2439bbf06
7
+ data.tar.gz: 333888fdd41e2ce21b369fc0844ca3b181f2db603833bf8cbf8a46e59b8049dcb31a216fb4adcb12fe5a69dc33ec21725131e6e8ce8e7e7633814f5dc7c51c7b
data/CHANGELOG.md CHANGED
@@ -1,5 +1,9 @@
1
1
  ## [Unreleased]
2
2
 
3
+ ## [0.6.7] - 2023-07-19
4
+ - Support for OpenAI functions
5
+ - Streaming vectorsearch ask() responses
6
+
3
7
  ## [0.6.6] - 2023-07-13
4
8
  - Langchain::Chunker::RecursiveText
5
9
  - Fixes
data/Gemfile.lock CHANGED
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- langchainrb (0.6.6)
4
+ langchainrb (0.6.7)
5
5
  baran (~> 0.1.6)
6
6
  colorize (~> 0.8.1)
7
7
  json-schema (~> 4.0.0)
@@ -133,7 +133,7 @@ GEM
133
133
  faraday (>= 1.0)
134
134
  faraday_middleware
135
135
  graphql-client
136
- graphql (2.0.23)
136
+ graphql (2.0.24)
137
137
  graphql-client (0.18.0)
138
138
  activesupport (>= 3.0)
139
139
  graphql
@@ -264,7 +264,7 @@ GEM
264
264
  rubocop (>= 1.7.0, < 2.0)
265
265
  rubocop-ast (>= 0.4.0)
266
266
  ruby-next-core (0.15.3)
267
- ruby-openai (4.0.0)
267
+ ruby-openai (4.1.0)
268
268
  faraday (>= 1)
269
269
  faraday-multipart (>= 1)
270
270
  ruby-progressbar (1.13.0)
@@ -299,7 +299,7 @@ GEM
299
299
  tzinfo (2.0.6)
300
300
  concurrent-ruby (~> 1.0)
301
301
  unicode-display_width (2.4.2)
302
- weaviate-ruby (0.8.3)
302
+ weaviate-ruby (0.8.4)
303
303
  faraday (~> 1)
304
304
  faraday_middleware (~> 1)
305
305
  graphlient (~> 0.6.0)
@@ -344,7 +344,7 @@ DEPENDENCIES
344
344
  roo (~> 2.10.0)
345
345
  rspec (~> 3.0)
346
346
  rubocop
347
- ruby-openai (~> 4.0.0)
347
+ ruby-openai (~> 4.1.0)
348
348
  safe_ruby (~> 1.0.4)
349
349
  sequel (~> 5.68.0)
350
350
  standardrb
data/README.md CHANGED
@@ -145,6 +145,27 @@ openai.embed(text: "foo bar")
145
145
  openai.complete(prompt: "What is the meaning of life?")
146
146
  ```
147
147
 
148
+ ##### Open AI Function calls support
149
+
150
+ Conversation support
151
+
152
+ ```ruby
153
+ chat = Langchain::Conversation.new(llm: openai)
154
+ ```
155
+ ```ruby
156
+ chat.set_context("You are the climate bot")
157
+ chat.set_functions(functions)
158
+ ```
159
+
160
+ qdrant:
161
+
162
+ ```ruby
163
+ client.llm.functions = functions
164
+ client.llm.complete_response = true
165
+ ```
166
+
167
+ `complete_response` will return the entire choices data from the gpt response
168
+
148
169
  #### Cohere
149
170
  Add `gem "cohere-ruby", "~> 0.9.3"` to your Gemfile.
150
171
 
@@ -0,0 +1,41 @@
1
+ require "langchain"
2
+ require "dotenv/load"
3
+
4
+ functions = [
5
+ {
6
+ name: "get_current_weather",
7
+ description: "Get the current weather in a given location",
8
+ parameters: {
9
+ type: :object,
10
+ properties: {
11
+ location: {
12
+ type: :string,
13
+ description: "The city and state, e.g. San Francisco, CA"
14
+ },
15
+ unit: {
16
+ type: "string",
17
+ enum: %w[celsius fahrenheit]
18
+ }
19
+ },
20
+ required: ["location"]
21
+ }
22
+ }
23
+ ]
24
+
25
+ openai = Langchain::LLM::OpenAI.new(
26
+ api_key: ENV["OPENAI_API_KEY"],
27
+ default_options: {
28
+ chat_completion_model_name: "gpt-3.5-turbo-16k"
29
+ }
30
+ )
31
+
32
+ chat = Langchain::Conversation.new(llm: openai)
33
+
34
+ chat.set_context("You are the climate bot")
35
+ chat.set_functions(functions)
36
+
37
+ DONE = %w[done end eof exit].freeze
38
+
39
+ user_message = "what's the weather in NYC?"
40
+
41
+ puts chat.message(user_message)
@@ -0,0 +1,43 @@
1
+ require "langchain"
2
+ require "dotenv/load"
3
+
4
+ functions = [
5
+ {
6
+ name: "create_rails_controller",
7
+ description: "gives a command to create a rails controller",
8
+ parameters: {
9
+ type: :object,
10
+ properties: {
11
+ controller_name: {
12
+ type: :string,
13
+ description: "the controller name, e.g. users_controller"
14
+ },
15
+ unit: {
16
+ type: "string",
17
+ enum: %w[celsius fahrenheit]
18
+ }
19
+ },
20
+ required: ["controller_name"]
21
+ }
22
+ }
23
+ ]
24
+
25
+ openai = Langchain::LLM::OpenAI.new(
26
+ api_key: ENV["OPENAI_API_KEY"],
27
+ default_options: {
28
+ chat_completion_model_name: "gpt-3.5-turbo-16k"
29
+ }
30
+ )
31
+
32
+ client = Langchain::Vectorsearch::Qdrant.new(
33
+ url: ENV["QDRANT_URL"],
34
+ api_key: ENV["QDRANT_API_KEY"],
35
+ index_name: ENV["QDRANT_INDEX"],
36
+ llm: openai
37
+ )
38
+
39
+ client.llm.functions = functions
40
+ client.llm.complete_response = true
41
+ chat = client.ask(question: "create a users_controller")
42
+
43
+ puts chat
@@ -37,6 +37,11 @@ module Langchain
37
37
  @block = block
38
38
  end
39
39
 
40
+ def set_functions(functions)
41
+ @llm.functions = functions
42
+ @llm.complete_response = true
43
+ end
44
+
40
45
  # Set the context of the conversation. Usually used to set the model's persona.
41
46
  # @param message [String] The context of the conversation
42
47
  def set_context(message)
@@ -19,6 +19,8 @@ module Langchain::LLM
19
19
  }.freeze
20
20
  LENGTH_VALIDATOR = Langchain::Utils::TokenLength::OpenAIValidator
21
21
 
22
+ attr_accessor :functions, :complete_response
23
+
22
24
  def initialize(api_key:, llm_options: {}, default_options: {})
23
25
  depends_on "ruby-openai"
24
26
  require "openai"
@@ -115,20 +117,25 @@ module Langchain::LLM
115
117
 
116
118
  parameters = compose_parameters @defaults[:chat_completion_model_name], options
117
119
  parameters[:messages] = compose_chat_messages(prompt: prompt, messages: messages, context: context, examples: examples)
118
- parameters[:max_tokens] = validate_max_tokens(parameters[:messages], parameters[:model])
120
+
121
+ if functions
122
+ parameters[:functions] = functions
123
+ else
124
+ parameters[:max_tokens] = validate_max_tokens(parameters[:messages], parameters[:model])
125
+ end
119
126
 
120
127
  if (streaming = block_given?)
121
128
  parameters[:stream] = proc do |chunk, _bytesize|
122
- yield chunk.dig("choices", 0, "delta", "content")
129
+ yield chunk if complete_response
130
+ yield chunk.dig("choices", 0, "delta", "content") if !complete_response
123
131
  end
124
132
  end
125
133
 
126
134
  response = client.chat(parameters: parameters)
127
-
128
135
  raise Langchain::LLM::ApiError.new "Chat completion failed: #{response.dig("error", "message")}" if !response.empty? && response.dig("error")
129
-
130
136
  unless streaming
131
- response.dig("choices", 0, "message", "content")
137
+ return response.dig("choices", 0, "message", "content") if !complete_response
138
+ return response if complete_response
132
139
  end
133
140
  end
134
141
 
@@ -113,8 +113,9 @@ module Langchain::Vectorsearch
113
113
 
114
114
  # Ask a question and return the answer
115
115
  # @param question [String] The question to ask
116
+ # @yield [String] Stream responses back one String at a time
116
117
  # @return [String] The answer to the question
117
- def ask(question:)
118
+ def ask(question:, &block)
118
119
  search_results = similarity_search(query: question)
119
120
 
120
121
  context = search_results.map do |result|
@@ -125,7 +126,7 @@ module Langchain::Vectorsearch
125
126
 
126
127
  prompt = generate_prompt(question: question, context: context)
127
128
 
128
- llm.chat(prompt: prompt)
129
+ llm.chat(prompt: prompt, &block)
129
130
  end
130
131
 
131
132
  private
@@ -136,8 +136,9 @@ module Langchain::Vectorsearch
136
136
 
137
137
  # Ask a question and return the answer
138
138
  # @param question [String] The question to ask
139
+ # @yield [String] Stream responses back one String at a time
139
140
  # @return [String] The answer to the question
140
- def ask(question:)
141
+ def ask(question:, &block)
141
142
  search_results = similarity_search(query: question)
142
143
 
143
144
  context = search_results.map do |result|
@@ -147,7 +148,7 @@ module Langchain::Vectorsearch
147
148
 
148
149
  prompt = generate_prompt(question: question, context: context)
149
150
 
150
- llm.chat(prompt: prompt)
151
+ llm.chat(prompt: prompt, &block)
151
152
  end
152
153
  end
153
154
  end
@@ -139,8 +139,9 @@ module Langchain::Vectorsearch
139
139
  # @param question [String] The question to ask
140
140
  # @param namespace [String] The namespace to search in
141
141
  # @param filter [String] The filter to use
142
+ # @yield [String] Stream responses back one String at a time
142
143
  # @return [String] The answer to the question
143
- def ask(question:, namespace: "", filter: nil)
144
+ def ask(question:, namespace: "", filter: nil, &block)
144
145
  search_results = similarity_search(query: question, namespace: namespace, filter: filter)
145
146
 
146
147
  context = search_results.map do |result|
@@ -150,7 +151,7 @@ module Langchain::Vectorsearch
150
151
 
151
152
  prompt = generate_prompt(question: question, context: context)
152
153
 
153
- llm.chat(prompt: prompt)
154
+ llm.chat(prompt: prompt, &block)
154
155
  end
155
156
 
156
157
  # Pinecone index
@@ -112,8 +112,9 @@ module Langchain::Vectorsearch
112
112
 
113
113
  # Ask a question and return the answer
114
114
  # @param question [String] The question to ask
115
+ # @yield [String] Stream responses back one String at a time
115
116
  # @return [String] The answer to the question
116
- def ask(question:)
117
+ def ask(question:, &block)
117
118
  search_results = similarity_search(query: question)
118
119
 
119
120
  context = search_results.map do |result|
@@ -123,7 +124,7 @@ module Langchain::Vectorsearch
123
124
 
124
125
  prompt = generate_prompt(question: question, context: context)
125
126
 
126
- llm.chat(prompt: prompt)
127
+ llm.chat(prompt: prompt, &block)
127
128
  end
128
129
  end
129
130
  end
@@ -124,8 +124,9 @@ module Langchain::Vectorsearch
124
124
 
125
125
  # Ask a question and return the answer
126
126
  # @param question [String] The question to ask
127
+ # @yield [String] Stream responses back one String at a time
127
128
  # @return [Hash] The answer
128
- def ask(question:)
129
+ def ask(question:, &block)
129
130
  search_results = similarity_search(query: question)
130
131
 
131
132
  context = search_results.map do |result|
@@ -135,7 +136,7 @@ module Langchain::Vectorsearch
135
136
 
136
137
  prompt = generate_prompt(question: question, context: context)
137
138
 
138
- llm.chat(prompt: prompt)
139
+ llm.chat(prompt: prompt, &block)
139
140
  end
140
141
 
141
142
  private
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Langchain
4
- VERSION = "0.6.6"
4
+ VERSION = "0.6.7"
5
5
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: langchainrb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.6.6
4
+ version: 0.6.7
5
5
  platform: ruby
6
6
  authors:
7
7
  - Andrei Bondarev
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2023-07-14 00:00:00.000000000 Z
11
+ date: 2023-07-20 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: baran
@@ -408,14 +408,14 @@ dependencies:
408
408
  requirements:
409
409
  - - "~>"
410
410
  - !ruby/object:Gem::Version
411
- version: 4.0.0
411
+ version: 4.1.0
412
412
  type: :development
413
413
  prerelease: false
414
414
  version_requirements: !ruby/object:Gem::Requirement
415
415
  requirements:
416
416
  - - "~>"
417
417
  - !ruby/object:Gem::Version
418
- version: 4.0.0
418
+ version: 4.1.0
419
419
  - !ruby/object:Gem::Dependency
420
420
  name: safe_ruby
421
421
  requirement: !ruby/object:Gem::Requirement
@@ -493,6 +493,8 @@ files:
493
493
  - examples/create_and_manage_prompt_templates.rb
494
494
  - examples/create_and_manage_prompt_templates_using_structured_output_parser.rb
495
495
  - examples/llama_cpp.rb
496
+ - examples/open_ai_function_calls.rb
497
+ - examples/open_ai_qdrant_function_calls.rb
496
498
  - examples/pdf_store_and_query_with_chroma.rb
497
499
  - examples/store_and_query_with_pinecone.rb
498
500
  - examples/store_and_query_with_qdrant.rb
@@ -589,7 +591,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
589
591
  - !ruby/object:Gem::Version
590
592
  version: '0'
591
593
  requirements: []
592
- rubygems_version: 3.2.3
594
+ rubygems_version: 3.3.7
593
595
  signing_key:
594
596
  specification_version: 4
595
597
  summary: Build LLM-backed Ruby applications with Ruby's LangChain