langchainrb 0.16.1 → 0.17.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +11 -0
- data/README.md +6 -1
- data/lib/langchain/assistants/assistant.rb +13 -318
- data/lib/langchain/assistants/llm/adapter.rb +27 -0
- data/lib/langchain/assistants/llm/adapters/_base.rb +21 -0
- data/lib/langchain/assistants/llm/adapters/anthropic.rb +62 -0
- data/lib/langchain/assistants/llm/adapters/google_gemini.rb +62 -0
- data/lib/langchain/assistants/llm/adapters/mistral_ai.rb +65 -0
- data/lib/langchain/assistants/llm/adapters/ollama.rb +57 -0
- data/lib/langchain/assistants/llm/adapters/openai.rb +65 -0
- data/lib/langchain/assistants/messages/base.rb +5 -1
- data/lib/langchain/assistants/messages/mistral_ai_message.rb +30 -8
- data/lib/langchain/assistants/messages/openai_message.rb +37 -8
- data/lib/langchain/llm/anthropic.rb +2 -1
- data/lib/langchain/llm/base.rb +1 -2
- data/lib/langchain/tool/database.rb +1 -1
- data/lib/langchain/vectorsearch/elasticsearch.rb +1 -1
- data/lib/langchain/vectorsearch/milvus.rb +45 -61
- data/lib/langchain/vectorsearch/qdrant.rb +3 -2
- data/lib/langchain/vectorsearch/weaviate.rb +3 -2
- data/lib/langchain/version.rb +1 -1
- metadata +15 -24
- data/lib/langchain/llm/google_palm.rb +0 -177
- data/lib/langchain/llm/response/google_palm_response.rb +0 -40
@@ -1,177 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module Langchain::LLM
|
4
|
-
#
|
5
|
-
# Wrapper around the Google PaLM (Pathways Language Model) APIs: https://ai.google/build/machine-learning/
|
6
|
-
#
|
7
|
-
# Gem requirements:
|
8
|
-
# gem "google_palm_api", "~> 0.1.3"
|
9
|
-
#
|
10
|
-
# Usage:
|
11
|
-
# google_palm = Langchain::LLM::GooglePalm.new(api_key: ENV["GOOGLE_PALM_API_KEY"])
|
12
|
-
#
|
13
|
-
class GooglePalm < Base
|
14
|
-
extend Gem::Deprecate
|
15
|
-
|
16
|
-
DEFAULTS = {
|
17
|
-
temperature: 0.0,
|
18
|
-
dimensions: 768, # This is what the `embedding-gecko-001` model generates
|
19
|
-
completion_model_name: "text-bison-001",
|
20
|
-
chat_completion_model_name: "chat-bison-001",
|
21
|
-
embeddings_model_name: "embedding-gecko-001"
|
22
|
-
}.freeze
|
23
|
-
|
24
|
-
ROLE_MAPPING = {
|
25
|
-
"assistant" => "ai"
|
26
|
-
}
|
27
|
-
|
28
|
-
attr_reader :defaults
|
29
|
-
|
30
|
-
# @deprecated Please use Langchain::LLM::GoogleGemini instead
|
31
|
-
#
|
32
|
-
# @param api_key [String] The API key for the Google PaLM API
|
33
|
-
def initialize(api_key:, default_options: {})
|
34
|
-
depends_on "google_palm_api"
|
35
|
-
|
36
|
-
@client = ::GooglePalmApi::Client.new(api_key: api_key)
|
37
|
-
@defaults = DEFAULTS.merge(default_options)
|
38
|
-
end
|
39
|
-
deprecate :initialize, "Langchain::LLM::GoogleGemini.new(api_key:)", 2024, 10
|
40
|
-
|
41
|
-
#
|
42
|
-
# Generate an embedding for a given text
|
43
|
-
#
|
44
|
-
# @param text [String] The text to generate an embedding for
|
45
|
-
# @return [Langchain::LLM::GooglePalmResponse] Response object
|
46
|
-
#
|
47
|
-
def embed(text:)
|
48
|
-
response = client.embed(text: text)
|
49
|
-
|
50
|
-
Langchain::LLM::GooglePalmResponse.new response,
|
51
|
-
model: @defaults[:embeddings_model_name]
|
52
|
-
end
|
53
|
-
|
54
|
-
#
|
55
|
-
# Generate a completion for a given prompt
|
56
|
-
#
|
57
|
-
# @param prompt [String] The prompt to generate a completion for
|
58
|
-
# @param params extra parameters passed to GooglePalmAPI::Client#generate_text
|
59
|
-
# @return [Langchain::LLM::GooglePalmResponse] Response object
|
60
|
-
#
|
61
|
-
def complete(prompt:, **params)
|
62
|
-
default_params = {
|
63
|
-
prompt: prompt,
|
64
|
-
temperature: @defaults[:temperature],
|
65
|
-
model: @defaults[:completion_model_name]
|
66
|
-
}
|
67
|
-
|
68
|
-
if params[:stop_sequences]
|
69
|
-
default_params[:stop_sequences] = params.delete(:stop_sequences)
|
70
|
-
end
|
71
|
-
|
72
|
-
if params[:max_tokens]
|
73
|
-
default_params[:max_output_tokens] = params.delete(:max_tokens)
|
74
|
-
end
|
75
|
-
|
76
|
-
default_params.merge!(params)
|
77
|
-
|
78
|
-
response = client.generate_text(**default_params)
|
79
|
-
|
80
|
-
Langchain::LLM::GooglePalmResponse.new response,
|
81
|
-
model: default_params[:model]
|
82
|
-
end
|
83
|
-
|
84
|
-
#
|
85
|
-
# Generate a chat completion for a given prompt
|
86
|
-
#
|
87
|
-
# @param prompt [String] The prompt to generate a chat completion for
|
88
|
-
# @param messages [Array<Hash>] The messages that have been sent in the conversation
|
89
|
-
# @param context [String] An initial context to provide as a system message, ie "You are RubyGPT, a helpful chat bot for helping people learn Ruby"
|
90
|
-
# @param examples [Array<Hash>] Examples of messages to provide to the model. Useful for Few-Shot Prompting
|
91
|
-
# @param options [Hash] extra parameters passed to GooglePalmAPI::Client#generate_chat_message
|
92
|
-
# @return [Langchain::LLM::GooglePalmResponse] Response object
|
93
|
-
#
|
94
|
-
def chat(prompt: "", messages: [], context: "", examples: [], **options)
|
95
|
-
raise ArgumentError.new(":prompt or :messages argument is expected") if prompt.empty? && messages.empty?
|
96
|
-
|
97
|
-
default_params = {
|
98
|
-
temperature: @defaults[:temperature],
|
99
|
-
model: @defaults[:chat_completion_model_name],
|
100
|
-
context: context,
|
101
|
-
messages: compose_chat_messages(prompt: prompt, messages: messages),
|
102
|
-
examples: compose_examples(examples)
|
103
|
-
}
|
104
|
-
|
105
|
-
if options[:stop_sequences]
|
106
|
-
default_params[:stop] = options.delete(:stop_sequences)
|
107
|
-
end
|
108
|
-
|
109
|
-
if options[:max_tokens]
|
110
|
-
default_params[:max_output_tokens] = options.delete(:max_tokens)
|
111
|
-
end
|
112
|
-
|
113
|
-
default_params.merge!(options)
|
114
|
-
|
115
|
-
response = client.generate_chat_message(**default_params)
|
116
|
-
raise "GooglePalm API returned an error: #{response}" if response.dig("error")
|
117
|
-
|
118
|
-
Langchain::LLM::GooglePalmResponse.new response,
|
119
|
-
model: default_params[:model]
|
120
|
-
# TODO: Pass in prompt_tokens: prompt_tokens
|
121
|
-
end
|
122
|
-
|
123
|
-
#
|
124
|
-
# Generate a summarization for a given text
|
125
|
-
#
|
126
|
-
# @param text [String] The text to generate a summarization for
|
127
|
-
# @return [String] The summarization
|
128
|
-
#
|
129
|
-
def summarize(text:)
|
130
|
-
prompt_template = Langchain::Prompt.load_from_path(
|
131
|
-
file_path: Langchain.root.join("langchain/llm/prompts/summarize_template.yaml")
|
132
|
-
)
|
133
|
-
prompt = prompt_template.format(text: text)
|
134
|
-
|
135
|
-
complete(
|
136
|
-
prompt: prompt,
|
137
|
-
temperature: @defaults[:temperature],
|
138
|
-
# Most models have a context length of 2048 tokens (except for the newest models, which support 4096).
|
139
|
-
max_tokens: 256
|
140
|
-
)
|
141
|
-
end
|
142
|
-
|
143
|
-
private
|
144
|
-
|
145
|
-
def compose_chat_messages(prompt:, messages:)
|
146
|
-
history = []
|
147
|
-
history.concat transform_messages(messages) unless messages.empty?
|
148
|
-
|
149
|
-
unless prompt.empty?
|
150
|
-
if history.last && history.last[:role] == "user"
|
151
|
-
history.last[:content] += "\n#{prompt}"
|
152
|
-
else
|
153
|
-
history.append({author: "user", content: prompt})
|
154
|
-
end
|
155
|
-
end
|
156
|
-
history
|
157
|
-
end
|
158
|
-
|
159
|
-
def compose_examples(examples)
|
160
|
-
examples.each_slice(2).map do |example|
|
161
|
-
{
|
162
|
-
input: {content: example.first[:content]},
|
163
|
-
output: {content: example.last[:content]}
|
164
|
-
}
|
165
|
-
end
|
166
|
-
end
|
167
|
-
|
168
|
-
def transform_messages(messages)
|
169
|
-
messages.map do |message|
|
170
|
-
{
|
171
|
-
author: ROLE_MAPPING.fetch(message[:role], message[:role]),
|
172
|
-
content: message[:content]
|
173
|
-
}
|
174
|
-
end
|
175
|
-
end
|
176
|
-
end
|
177
|
-
end
|
@@ -1,40 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module Langchain::LLM
|
4
|
-
class GooglePalmResponse < BaseResponse
|
5
|
-
attr_reader :prompt_tokens
|
6
|
-
|
7
|
-
def initialize(raw_response, model: nil, prompt_tokens: nil)
|
8
|
-
@prompt_tokens = prompt_tokens
|
9
|
-
super(raw_response, model: model)
|
10
|
-
end
|
11
|
-
|
12
|
-
def completion
|
13
|
-
completions&.dig(0, "output")
|
14
|
-
end
|
15
|
-
|
16
|
-
def embedding
|
17
|
-
embeddings.first
|
18
|
-
end
|
19
|
-
|
20
|
-
def completions
|
21
|
-
raw_response.dig("candidates")
|
22
|
-
end
|
23
|
-
|
24
|
-
def chat_completion
|
25
|
-
chat_completions&.dig(0, "content")
|
26
|
-
end
|
27
|
-
|
28
|
-
def chat_completions
|
29
|
-
raw_response.dig("candidates")
|
30
|
-
end
|
31
|
-
|
32
|
-
def embeddings
|
33
|
-
[raw_response.dig("embedding", "value")]
|
34
|
-
end
|
35
|
-
|
36
|
-
def role
|
37
|
-
"assistant"
|
38
|
-
end
|
39
|
-
end
|
40
|
-
end
|