langchainrb 0.10.0 → 0.10.3
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +12 -0
- data/README.md +7 -0
- data/lib/langchain/llm/mistral_ai.rb +68 -0
- data/lib/langchain/llm/ollama.rb +7 -2
- data/lib/langchain/llm/response/mistral_ai_response.rb +39 -0
- data/lib/langchain/tool/ruby_code_interpreter/ruby_code_interpreter.rb +30 -34
- data/lib/langchain/vectorsearch/pgvector.rb +7 -0
- data/lib/langchain/version.rb +1 -1
- data/lib/langchain.rb +7 -0
- metadata +23 -7
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 14c9c33c905c2df744cc7006735dbc55b86b767cfff4b655b03103072c804bb1
|
4
|
+
data.tar.gz: e5076e7322ff375b16463ff14e5c504d7b106e05874f6860f9669a2269346d61
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: f590a6f2f2adec60ee777a94886e178abfaf0789c0141bbd278d0a008d9ace75cb9f69d6afdf3ae3d58f7307246acfaf7f0fef43d43065bf8c1d83aa0e51562d
|
7
|
+
data.tar.gz: 3fef869c24edf5a14ea4ddbdae8af919b2082e8f95b676ca36dd9e606ba67812e88dcd6d67f15156b9cce8daaaab26acd9c8602e3c894199dd7eae43d0926443
|
data/CHANGELOG.md
CHANGED
@@ -1,5 +1,17 @@
|
|
1
1
|
## [Unreleased]
|
2
2
|
|
3
|
+
## [0.10.3]
|
4
|
+
- Bump dependencies
|
5
|
+
- Ollama#complete fix
|
6
|
+
- Misc fixes
|
7
|
+
|
8
|
+
## [0.10.2]
|
9
|
+
- New Langchain::LLM::Mistral
|
10
|
+
- Drop Ruby 3.0 support
|
11
|
+
- Fixes Zeitwerk::NameError
|
12
|
+
|
13
|
+
## [0.10.1] - GEM VERSION YANKED
|
14
|
+
|
3
15
|
## [0.10.0]
|
4
16
|
- Delete `Langchain::Conversation` class
|
5
17
|
|
data/README.md
CHANGED
@@ -29,6 +29,7 @@ Available for paid consulting engagements! [Email me](mailto:andrei@sourcelabs.i
|
|
29
29
|
- [Evaluations](#evaluations-evals)
|
30
30
|
- [Examples](#examples)
|
31
31
|
- [Logging](#logging)
|
32
|
+
- [Problems](#problems)
|
32
33
|
- [Development](#development)
|
33
34
|
- [Discord](#discord)
|
34
35
|
|
@@ -501,6 +502,12 @@ To show all log messages:
|
|
501
502
|
Langchain.logger.level = :debug
|
502
503
|
```
|
503
504
|
|
505
|
+
## Problems
|
506
|
+
If you're having issues installing `unicode` gem required by `pragmatic_segmenter`, try running:
|
507
|
+
```bash
|
508
|
+
gem install unicode -- --with-cflags="-Wno-incompatible-function-pointer-types"
|
509
|
+
```
|
510
|
+
|
504
511
|
## Development
|
505
512
|
|
506
513
|
1. `git clone https://github.com/andreibondarev/langchainrb.git`
|
@@ -0,0 +1,68 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Langchain::LLM
|
4
|
+
# Gem requirements:
|
5
|
+
# gem "mistral-ai"
|
6
|
+
#
|
7
|
+
# Usage:
|
8
|
+
# llm = Langchain::LLM::MistralAI.new(api_key: ENV["MISTRAL_AI_API_KEY"])
|
9
|
+
class MistralAI < Base
|
10
|
+
DEFAULTS = {
|
11
|
+
chat_completion_model_name: "mistral-medium",
|
12
|
+
embeddings_model_name: "mistral-embed"
|
13
|
+
}.freeze
|
14
|
+
|
15
|
+
attr_reader :defaults
|
16
|
+
|
17
|
+
def initialize(api_key:, default_options: {})
|
18
|
+
depends_on "mistral-ai"
|
19
|
+
|
20
|
+
@client = Mistral.new(
|
21
|
+
credentials: {api_key: api_key},
|
22
|
+
options: {server_sent_events: true}
|
23
|
+
)
|
24
|
+
|
25
|
+
@defaults = DEFAULTS.merge(default_options)
|
26
|
+
end
|
27
|
+
|
28
|
+
def chat(
|
29
|
+
messages:,
|
30
|
+
model: defaults[:chat_completion_model_name],
|
31
|
+
temperature: nil,
|
32
|
+
top_p: nil,
|
33
|
+
max_tokens: nil,
|
34
|
+
safe_prompt: nil,
|
35
|
+
random_seed: nil
|
36
|
+
)
|
37
|
+
params = {
|
38
|
+
messages: messages,
|
39
|
+
model: model
|
40
|
+
}
|
41
|
+
params[:temperature] = temperature if temperature
|
42
|
+
params[:top_p] = top_p if top_p
|
43
|
+
params[:max_tokens] = max_tokens if max_tokens
|
44
|
+
params[:safe_prompt] = safe_prompt if safe_prompt
|
45
|
+
params[:random_seed] = random_seed if random_seed
|
46
|
+
|
47
|
+
response = client.chat_completions(params)
|
48
|
+
|
49
|
+
Langchain::LLM::MistralAIResponse.new(response.to_h)
|
50
|
+
end
|
51
|
+
|
52
|
+
def embed(
|
53
|
+
text:,
|
54
|
+
model: defaults[:embeddings_model_name],
|
55
|
+
encoding_format: nil
|
56
|
+
)
|
57
|
+
params = {
|
58
|
+
input: text,
|
59
|
+
model: model
|
60
|
+
}
|
61
|
+
params[:encoding_format] = encoding_format if encoding_format
|
62
|
+
|
63
|
+
response = client.embeddings(params)
|
64
|
+
|
65
|
+
Langchain::LLM::MistralAIResponse.new(response.to_h)
|
66
|
+
end
|
67
|
+
end
|
68
|
+
end
|
data/lib/langchain/llm/ollama.rb
CHANGED
@@ -131,9 +131,14 @@ module Langchain::LLM
|
|
131
131
|
|
132
132
|
req.options.on_data = proc do |chunk, size|
|
133
133
|
chunk.split("\n").each do |line_chunk|
|
134
|
-
json_chunk =
|
134
|
+
json_chunk = begin
|
135
|
+
JSON.parse(line_chunk)
|
136
|
+
# In some instance the chunk exceeds the buffer size and the JSON parser fails
|
137
|
+
rescue JSON::ParserError
|
138
|
+
nil
|
139
|
+
end
|
135
140
|
|
136
|
-
response += json_chunk.dig("response")
|
141
|
+
response += json_chunk.dig("response") unless json_chunk.blank?
|
137
142
|
end
|
138
143
|
|
139
144
|
yield json_chunk, size if block
|
@@ -0,0 +1,39 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Langchain::LLM
|
4
|
+
class MistralAIResponse < BaseResponse
|
5
|
+
def model
|
6
|
+
raw_response["model"]
|
7
|
+
end
|
8
|
+
|
9
|
+
def chat_completion
|
10
|
+
raw_response.dig("choices", 0, "message", "content")
|
11
|
+
end
|
12
|
+
|
13
|
+
def role
|
14
|
+
raw_response.dig("choices", 0, "message", "role")
|
15
|
+
end
|
16
|
+
|
17
|
+
def embedding
|
18
|
+
raw_response.dig("data", 0, "embedding")
|
19
|
+
end
|
20
|
+
|
21
|
+
def prompt_tokens
|
22
|
+
raw_response.dig("usage", "prompt_tokens")
|
23
|
+
end
|
24
|
+
|
25
|
+
def total_tokens
|
26
|
+
raw_response.dig("usage", "total_tokens")
|
27
|
+
end
|
28
|
+
|
29
|
+
def completion_tokens
|
30
|
+
raw_response.dig("usage", "completion_tokens")
|
31
|
+
end
|
32
|
+
|
33
|
+
def created_at
|
34
|
+
if raw_response.dig("created_at")
|
35
|
+
Time.at(raw_response.dig("created_at"))
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
@@ -1,45 +1,41 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
#
|
17
|
-
NAME = "ruby_code_interpreter"
|
18
|
-
ANNOTATIONS_PATH = Langchain.root.join("./langchain/tool/#{NAME}/#{NAME}.json").to_path
|
3
|
+
module Langchain::Tool
|
4
|
+
class RubyCodeInterpreter < Base
|
5
|
+
#
|
6
|
+
# A tool that execute Ruby code in a sandboxed environment.
|
7
|
+
#
|
8
|
+
# Gem requirements:
|
9
|
+
# gem "safe_ruby", "~> 1.0.4"
|
10
|
+
#
|
11
|
+
# Usage:
|
12
|
+
# interpreter = Langchain::Tool::RubyCodeInterpreter.new
|
13
|
+
#
|
14
|
+
NAME = "ruby_code_interpreter"
|
15
|
+
ANNOTATIONS_PATH = Langchain.root.join("./langchain/tool/#{NAME}/#{NAME}.json").to_path
|
19
16
|
|
20
|
-
|
21
|
-
|
22
|
-
|
17
|
+
description <<~DESC
|
18
|
+
A Ruby code interpreter. Use this to execute ruby expressions. Input should be a valid ruby expression. If you want to see the output of the tool, make sure to return a value.
|
19
|
+
DESC
|
23
20
|
|
24
|
-
|
25
|
-
|
21
|
+
def initialize(timeout: 30)
|
22
|
+
depends_on "safe_ruby"
|
26
23
|
|
27
|
-
|
28
|
-
|
24
|
+
@timeout = timeout
|
25
|
+
end
|
29
26
|
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
27
|
+
# Executes Ruby code in a sandboxes environment.
|
28
|
+
#
|
29
|
+
# @param input [String] ruby code expression
|
30
|
+
# @return [String] Answer
|
31
|
+
def execute(input:)
|
32
|
+
Langchain.logger.info("Executing \"#{input}\"", for: self.class)
|
36
33
|
|
37
|
-
|
38
|
-
|
34
|
+
safe_eval(input)
|
35
|
+
end
|
39
36
|
|
40
|
-
|
41
|
-
|
42
|
-
end
|
37
|
+
def safe_eval(code)
|
38
|
+
SafeRuby.eval(code, timeout: @timeout)
|
43
39
|
end
|
44
40
|
end
|
45
41
|
end
|
@@ -89,6 +89,13 @@ module Langchain::Vectorsearch
|
|
89
89
|
upsert_texts(texts: texts, ids: ids)
|
90
90
|
end
|
91
91
|
|
92
|
+
# Remove a list of texts from the index
|
93
|
+
# @param ids [Array<Integer>] The ids of the texts to remove from the index
|
94
|
+
# @return [Integer] The number of texts removed from the index
|
95
|
+
def remove_texts(ids:)
|
96
|
+
@db[table_name.to_sym].where(id: ids).delete
|
97
|
+
end
|
98
|
+
|
92
99
|
# Create default schema
|
93
100
|
def create_default_schema
|
94
101
|
db.run "CREATE EXTENSION IF NOT EXISTS vector"
|
data/lib/langchain/version.rb
CHANGED
data/lib/langchain.rb
CHANGED
@@ -16,6 +16,8 @@ loader.inflector.inflect(
|
|
16
16
|
"json" => "JSON",
|
17
17
|
"jsonl" => "JSONL",
|
18
18
|
"llm" => "LLM",
|
19
|
+
"mistral_ai" => "MistralAI",
|
20
|
+
"mistral_ai_response" => "MistralAIResponse",
|
19
21
|
"openai" => "OpenAI",
|
20
22
|
"openai_validator" => "OpenAIValidator",
|
21
23
|
"openai_response" => "OpenAIResponse",
|
@@ -32,6 +34,11 @@ loader.collapse("#{__dir__}/langchain/tool/google_search")
|
|
32
34
|
loader.collapse("#{__dir__}/langchain/tool/ruby_code_interpreter")
|
33
35
|
loader.collapse("#{__dir__}/langchain/tool/weather")
|
34
36
|
loader.collapse("#{__dir__}/langchain/tool/wikipedia")
|
37
|
+
|
38
|
+
# RubyCodeInterpreter does not work with Ruby 3.3;
|
39
|
+
# https://github.com/ukutaht/safe_ruby/issues/4
|
40
|
+
loader.ignore("#{__dir__}/langchain/tool/ruby_code_interpreter") if RUBY_VERSION >= "3.3.0"
|
41
|
+
|
35
42
|
loader.setup
|
36
43
|
|
37
44
|
# Langchain.rb a is library for building LLM-backed Ruby applications. It is an abstraction layer that sits on top of the emerging AI-related tools that makes it easy for developers to consume and string those services together.
|
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: langchainrb
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.10.
|
4
|
+
version: 0.10.3
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Andrei Bondarev
|
8
8
|
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date: 2024-03-
|
11
|
+
date: 2024-03-28 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: activesupport
|
@@ -472,6 +472,20 @@ dependencies:
|
|
472
472
|
- - "~>"
|
473
473
|
- !ruby/object:Gem::Version
|
474
474
|
version: '2.8'
|
475
|
+
- !ruby/object:Gem::Dependency
|
476
|
+
name: mistral-ai
|
477
|
+
requirement: !ruby/object:Gem::Requirement
|
478
|
+
requirements:
|
479
|
+
- - ">="
|
480
|
+
- !ruby/object:Gem::Version
|
481
|
+
version: '0'
|
482
|
+
type: :development
|
483
|
+
prerelease: false
|
484
|
+
version_requirements: !ruby/object:Gem::Requirement
|
485
|
+
requirements:
|
486
|
+
- - ">="
|
487
|
+
- !ruby/object:Gem::Version
|
488
|
+
version: '0'
|
475
489
|
- !ruby/object:Gem::Dependency
|
476
490
|
name: open-weather-ruby-client
|
477
491
|
requirement: !ruby/object:Gem::Requirement
|
@@ -520,14 +534,14 @@ dependencies:
|
|
520
534
|
requirements:
|
521
535
|
- - "~>"
|
522
536
|
- !ruby/object:Gem::Version
|
523
|
-
version: '
|
537
|
+
version: '2.0'
|
524
538
|
type: :development
|
525
539
|
prerelease: false
|
526
540
|
version_requirements: !ruby/object:Gem::Requirement
|
527
541
|
requirements:
|
528
542
|
- - "~>"
|
529
543
|
- !ruby/object:Gem::Version
|
530
|
-
version: '
|
544
|
+
version: '2.0'
|
531
545
|
- !ruby/object:Gem::Dependency
|
532
546
|
name: pinecone
|
533
547
|
requirement: !ruby/object:Gem::Requirement
|
@@ -590,14 +604,14 @@ dependencies:
|
|
590
604
|
requirements:
|
591
605
|
- - "~>"
|
592
606
|
- !ruby/object:Gem::Version
|
593
|
-
version: 6.
|
607
|
+
version: 6.4.0
|
594
608
|
type: :development
|
595
609
|
prerelease: false
|
596
610
|
version_requirements: !ruby/object:Gem::Requirement
|
597
611
|
requirements:
|
598
612
|
- - "~>"
|
599
613
|
- !ruby/object:Gem::Version
|
600
|
-
version: 6.
|
614
|
+
version: 6.4.0
|
601
615
|
- !ruby/object:Gem::Dependency
|
602
616
|
name: safe_ruby
|
603
617
|
requirement: !ruby/object:Gem::Requirement
|
@@ -718,6 +732,7 @@ files:
|
|
718
732
|
- lib/langchain/llm/google_vertex_ai.rb
|
719
733
|
- lib/langchain/llm/hugging_face.rb
|
720
734
|
- lib/langchain/llm/llama_cpp.rb
|
735
|
+
- lib/langchain/llm/mistral_ai.rb
|
721
736
|
- lib/langchain/llm/ollama.rb
|
722
737
|
- lib/langchain/llm/openai.rb
|
723
738
|
- lib/langchain/llm/prompts/ollama/summarize_template.yaml
|
@@ -732,6 +747,7 @@ files:
|
|
732
747
|
- lib/langchain/llm/response/google_vertex_ai_response.rb
|
733
748
|
- lib/langchain/llm/response/hugging_face_response.rb
|
734
749
|
- lib/langchain/llm/response/llama_cpp_response.rb
|
750
|
+
- lib/langchain/llm/response/mistral_ai_response.rb
|
735
751
|
- lib/langchain/llm/response/ollama_response.rb
|
736
752
|
- lib/langchain/llm/response/openai_response.rb
|
737
753
|
- lib/langchain/llm/response/replicate_response.rb
|
@@ -806,7 +822,7 @@ required_ruby_version: !ruby/object:Gem::Requirement
|
|
806
822
|
requirements:
|
807
823
|
- - ">="
|
808
824
|
- !ruby/object:Gem::Version
|
809
|
-
version: 3.
|
825
|
+
version: 3.1.0
|
810
826
|
required_rubygems_version: !ruby/object:Gem::Requirement
|
811
827
|
requirements:
|
812
828
|
- - ">="
|