langchainrb 0.6.14 → 0.6.15
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +4 -0
- data/README.md +12 -0
- data/lib/langchain/llm/ollama.rb +79 -0
- data/lib/langchain/utils/token_length/ollama_validator.rb +16 -0
- data/lib/langchain/version.rb +1 -1
- metadata +7 -5
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 804ffbb08baabf8d2b0372e6893ca31a8c0933425dcabc78b2b48381b045d0c9
|
4
|
+
data.tar.gz: a53ed993838ab79c343618b445533c285f35e186c3a1f4412f40f7da12b9911b
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: d4aa19658c6c6ffdd5268c6ab83abe3ba17c3bb84b3880a6347bb67fa5c1b4bf0e9304b22c477b27401394450b692d0ee545f5745c6e3a2ec2e5e2ba50779584
|
7
|
+
data.tar.gz: b1c918b8d28e86b11cde99e1b976cbffcca36dbc8ac354e08ce72d9056cc5eafd6ddb601f92edfcd28907687c2247c417477173746405cd1ca2b2ec0fc51df83
|
data/CHANGELOG.md
CHANGED
data/README.md
CHANGED
@@ -210,6 +210,18 @@ anthropic = Langchain::LLM::Anthropic.new(api_key: ENV["ANTHROPIC_API_KEY"])
|
|
210
210
|
anthropic.complete(prompt: "What is the meaning of life?")
|
211
211
|
```
|
212
212
|
|
213
|
+
#### Ollama
|
214
|
+
```ruby
|
215
|
+
ollama = Langchain::LLM::Ollama.new(url: ENV["OLLAMA_URL"])
|
216
|
+
```
|
217
|
+
|
218
|
+
```ruby
|
219
|
+
ollama.complete(prompt: "What is the meaning of life?")
|
220
|
+
```
|
221
|
+
```ruby
|
222
|
+
ollama.embed(text: "Hello world!")
|
223
|
+
```
|
224
|
+
|
213
225
|
### Using Prompts 📋
|
214
226
|
|
215
227
|
#### Prompt Templates
|
@@ -0,0 +1,79 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Langchain::LLM
|
4
|
+
# Interface to Ollama API.
|
5
|
+
# Available models: https://ollama.ai/library
|
6
|
+
#
|
7
|
+
# Usage:
|
8
|
+
# ollama = Langchain::LLM::Ollama.new(url: ENV["OLLAMA_URL"])
|
9
|
+
#
|
10
|
+
class Ollama < Base
|
11
|
+
attr_reader :url
|
12
|
+
|
13
|
+
DEFAULTS = {
|
14
|
+
temperature: 0.0,
|
15
|
+
completion_model_name: "llama2",
|
16
|
+
embeddings_model_name: "llama2"
|
17
|
+
}.freeze
|
18
|
+
|
19
|
+
# Initialize the Ollama client
|
20
|
+
# @param url [String] The URL of the Ollama instance
|
21
|
+
def initialize(url:)
|
22
|
+
@url = url
|
23
|
+
end
|
24
|
+
|
25
|
+
# Generate the completion for a given prompt
|
26
|
+
# @param prompt [String] The prompt to complete
|
27
|
+
# @param model [String] The model to use
|
28
|
+
# @param options [Hash] The options to use (https://github.com/jmorganca/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values)
|
29
|
+
# @return [String] The completed prompt
|
30
|
+
def complete(prompt:, model: nil, **options)
|
31
|
+
response = +""
|
32
|
+
|
33
|
+
client.post("api/generate") do |req|
|
34
|
+
req.body = {}
|
35
|
+
req.body["prompt"] = prompt
|
36
|
+
req.body["model"] = model || DEFAULTS[:completion_model_name]
|
37
|
+
|
38
|
+
req.body["options"] = options if options.any?
|
39
|
+
|
40
|
+
# TODO: Implement streaming support when a &block is passed in
|
41
|
+
req.options.on_data = proc do |chunk, size|
|
42
|
+
json_chunk = JSON.parse(chunk)
|
43
|
+
|
44
|
+
unless json_chunk.dig("done")
|
45
|
+
response.to_s << JSON.parse(chunk).dig("response")
|
46
|
+
end
|
47
|
+
end
|
48
|
+
end
|
49
|
+
|
50
|
+
response
|
51
|
+
end
|
52
|
+
|
53
|
+
# Generate an embedding for a given text
|
54
|
+
# @param text [String] The text to generate an embedding for
|
55
|
+
# @param model [String] The model to use
|
56
|
+
# @param options [Hash] The options to use (
|
57
|
+
def embed(text:, model: nil, **options)
|
58
|
+
response = client.post("api/embeddings") do |req|
|
59
|
+
req.body = {}
|
60
|
+
req.body["prompt"] = text
|
61
|
+
req.body["model"] = model || DEFAULTS[:embeddings_model_name]
|
62
|
+
|
63
|
+
req.body["options"] = options if options.any?
|
64
|
+
end
|
65
|
+
|
66
|
+
response.body.dig("embedding")
|
67
|
+
end
|
68
|
+
|
69
|
+
private
|
70
|
+
|
71
|
+
def client
|
72
|
+
@client ||= Faraday.new(url: url) do |conn|
|
73
|
+
conn.request :json
|
74
|
+
conn.response :json
|
75
|
+
conn.response :raise_error
|
76
|
+
end
|
77
|
+
end
|
78
|
+
end
|
79
|
+
end
|
@@ -0,0 +1,16 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "tiktoken_ruby"
|
4
|
+
|
5
|
+
module Langchain
|
6
|
+
module Utils
|
7
|
+
module TokenLength
|
8
|
+
#
|
9
|
+
# This class is meant to validate the length of the text passed in to Ollama.
|
10
|
+
# It is used to validate the token length before the API call is made
|
11
|
+
#
|
12
|
+
class OllamaValidator < BaseValidator
|
13
|
+
end
|
14
|
+
end
|
15
|
+
end
|
16
|
+
end
|
data/lib/langchain/version.rb
CHANGED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: langchainrb
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.6.
|
4
|
+
version: 0.6.15
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Andrei Bondarev
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2023-09-
|
11
|
+
date: 2023-09-22 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: baran
|
@@ -478,14 +478,14 @@ dependencies:
|
|
478
478
|
requirements:
|
479
479
|
- - "~>"
|
480
480
|
- !ruby/object:Gem::Version
|
481
|
-
version: 0.8.
|
481
|
+
version: 0.8.7
|
482
482
|
type: :development
|
483
483
|
prerelease: false
|
484
484
|
version_requirements: !ruby/object:Gem::Requirement
|
485
485
|
requirements:
|
486
486
|
- - "~>"
|
487
487
|
- !ruby/object:Gem::Version
|
488
|
-
version: 0.8.
|
488
|
+
version: 0.8.7
|
489
489
|
- !ruby/object:Gem::Dependency
|
490
490
|
name: wikipedia-client
|
491
491
|
requirement: !ruby/object:Gem::Requirement
|
@@ -535,6 +535,7 @@ files:
|
|
535
535
|
- lib/langchain/llm/google_palm.rb
|
536
536
|
- lib/langchain/llm/hugging_face.rb
|
537
537
|
- lib/langchain/llm/llama_cpp.rb
|
538
|
+
- lib/langchain/llm/ollama.rb
|
538
539
|
- lib/langchain/llm/openai.rb
|
539
540
|
- lib/langchain/llm/prompts/summarize_template.yaml
|
540
541
|
- lib/langchain/llm/replicate.rb
|
@@ -571,6 +572,7 @@ files:
|
|
571
572
|
- lib/langchain/utils/token_length/base_validator.rb
|
572
573
|
- lib/langchain/utils/token_length/cohere_validator.rb
|
573
574
|
- lib/langchain/utils/token_length/google_palm_validator.rb
|
575
|
+
- lib/langchain/utils/token_length/ollama_validator.rb
|
574
576
|
- lib/langchain/utils/token_length/openai_validator.rb
|
575
577
|
- lib/langchain/utils/token_length/token_limit_exceeded.rb
|
576
578
|
- lib/langchain/vectorsearch/base.rb
|
@@ -606,7 +608,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
606
608
|
- !ruby/object:Gem::Version
|
607
609
|
version: '0'
|
608
610
|
requirements: []
|
609
|
-
rubygems_version: 3.
|
611
|
+
rubygems_version: 3.3.7
|
610
612
|
signing_key:
|
611
613
|
specification_version: 4
|
612
614
|
summary: Build LLM-backed Ruby applications with Ruby's LangChain
|