last_llm 0.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/README.md +246 -0
- data/lib/last_llm/client.rb +100 -0
- data/lib/last_llm/completion.rb +19 -0
- data/lib/last_llm/configuration.rb +115 -0
- data/lib/last_llm/extensions/dry_schema_extensions.rb +76 -0
- data/lib/last_llm/provider.rb +152 -0
- data/lib/last_llm/providers/anthropic.rb +124 -0
- data/lib/last_llm/providers/constants.rb +33 -0
- data/lib/last_llm/providers/deepseek.rb +121 -0
- data/lib/last_llm/providers/google_gemini.rb +175 -0
- data/lib/last_llm/providers/ollama.rb +124 -0
- data/lib/last_llm/providers/openai.rb +184 -0
- data/lib/last_llm/providers/test_provider.rb +38 -0
- data/lib/last_llm/railtie.rb +38 -0
- data/lib/last_llm/schema.rb +241 -0
- data/lib/last_llm/structured_output.rb +58 -0
- data/lib/last_llm/tool.rb +110 -0
- data/lib/last_llm/version.rb +5 -0
- data/lib/last_llm.rb +88 -0
- metadata +144 -0
@@ -0,0 +1,152 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'faraday'
|
4
|
+
require 'faraday/typhoeus'
|
5
|
+
require 'active_support/core_ext/hash/keys'
|
6
|
+
|
7
|
+
module LastLLM
|
8
|
+
# Base class for all LLM providers
|
9
|
+
# Implements common functionality and defines the interface that all providers must implement
|
10
|
+
class Provider
|
11
|
+
attr_reader :name, :config
|
12
|
+
|
13
|
+
def initialize(name, config = {})
|
14
|
+
@name = name
|
15
|
+
@config = config
|
16
|
+
|
17
|
+
if instance_of?(Provider)
|
18
|
+
raise NotImplementedError, "#{self.class} is an abstract class and cannot be instantiated directly"
|
19
|
+
end
|
20
|
+
|
21
|
+
validate_config! unless config[:skip_validation]
|
22
|
+
end
|
23
|
+
|
24
|
+
# Generate text from a prompt
|
25
|
+
# @param prompt [String] The prompt to generate text from
|
26
|
+
# @param options [Hash] Generation options
|
27
|
+
# @option options [String] :model The model to use
|
28
|
+
# @option options [Float] :temperature (0.7) The temperature to use
|
29
|
+
# @option options [Integer] :max_tokens The maximum number of tokens to generate
|
30
|
+
# @return [String] The generated text
|
31
|
+
# @raise [NotImplementedError] If not implemented by subclass
|
32
|
+
def generate_text(prompt, options = {})
|
33
|
+
raise NotImplementedError, "#{self.class}##{__method__} must be implemented by subclass"
|
34
|
+
end
|
35
|
+
|
36
|
+
# Generate a structured object from a prompt
|
37
|
+
# @param prompt [String] The prompt to generate the object from
|
38
|
+
# @param schema [Dry::Schema::Params] The schema to validate against
|
39
|
+
# @param options [Hash] Generation options
|
40
|
+
# @option options [String] :model The model to use
|
41
|
+
# @option options [Float] :temperature (0.7) The temperature to use
|
42
|
+
# @return [Hash] The generated object
|
43
|
+
# @raise [NotImplementedError] If not implemented by subclass
|
44
|
+
def generate_object(prompt, schema, options = {})
|
45
|
+
raise NotImplementedError, "#{self.class}##{__method__} must be implemented by subclass"
|
46
|
+
end
|
47
|
+
|
48
|
+
# Handle request errors
|
49
|
+
# @param error [StandardError] The error to handle
|
50
|
+
# @raise [ApiError] A standardized API error
|
51
|
+
def handle_request_error(error)
|
52
|
+
status = nil
|
53
|
+
message = "API request failed: #{error.message}"
|
54
|
+
|
55
|
+
case error
|
56
|
+
when Faraday::ResourceNotFound
|
57
|
+
status = 404
|
58
|
+
when Faraday::ConnectionFailed
|
59
|
+
status = 503
|
60
|
+
when Faraday::TimeoutError
|
61
|
+
status = 504
|
62
|
+
when Faraday::Error
|
63
|
+
if error.respond_to?(:response) && error.response.is_a?(Hash)
|
64
|
+
status = error.response[:status]
|
65
|
+
body = error.response[:body]
|
66
|
+
|
67
|
+
# Try to extract a more helpful message from the response body
|
68
|
+
if body.is_a?(String) && !body.empty?
|
69
|
+
begin
|
70
|
+
parsed_body = JSON.parse(body)
|
71
|
+
message = "API error: #{parsed_body['error']['message'] || parsed_body['error']}" if parsed_body['error']
|
72
|
+
rescue JSON::ParserError
|
73
|
+
# Use default message if we can't parse the body
|
74
|
+
end
|
75
|
+
end
|
76
|
+
end
|
77
|
+
end
|
78
|
+
|
79
|
+
raise LastLLM::ApiError.new(message, status)
|
80
|
+
end
|
81
|
+
|
82
|
+
# Parse API response
|
83
|
+
# @param response [HTTParty::Response] The response to parse
|
84
|
+
# @return [Hash] The parsed response
|
85
|
+
# @raise [ApiError] If the response is invalid
|
86
|
+
def parse_response(response)
|
87
|
+
return {} if response.body.nil? || response.body.empty?
|
88
|
+
|
89
|
+
begin
|
90
|
+
response.body.deep_symbolize_keys
|
91
|
+
rescue JSON::ParserError
|
92
|
+
raise LastLLM::ApiError, "Invalid JSON response: #{response.body}"
|
93
|
+
end
|
94
|
+
end
|
95
|
+
|
96
|
+
private
|
97
|
+
|
98
|
+
# Validate provider configuration
|
99
|
+
# @raise [LastLLM::ConfigurationError] If the configuration is invalid
|
100
|
+
def validate_config!
|
101
|
+
raise LastLLM::ConfigurationError, 'API key is required' unless @config[:api_key]
|
102
|
+
end
|
103
|
+
|
104
|
+
def parse_error_body(body)
|
105
|
+
return {} if body.nil? || body.empty?
|
106
|
+
|
107
|
+
JSON.parse(body)
|
108
|
+
rescue JSON::ParserError
|
109
|
+
{ 'error' => body }
|
110
|
+
end
|
111
|
+
|
112
|
+
def deep_symbolize_keys(hash)
|
113
|
+
return hash unless hash.is_a?(Hash)
|
114
|
+
|
115
|
+
hash.each_with_object({}) do |(key, value), result|
|
116
|
+
result[key.to_sym] = case value
|
117
|
+
when Hash then deep_symbolize_keys(value)
|
118
|
+
when Array then value.map { |item| deep_symbolize_keys(item) }
|
119
|
+
else value
|
120
|
+
end
|
121
|
+
end
|
122
|
+
end
|
123
|
+
|
124
|
+
protected
|
125
|
+
|
126
|
+
def connection(base_url)
|
127
|
+
Faraday.new(url: base_url) do |f|
|
128
|
+
f.request :json
|
129
|
+
f.response :json
|
130
|
+
f.response :raise_error
|
131
|
+
|
132
|
+
# Try to use Typhoeus if available, otherwise fall back to default adapter
|
133
|
+
adapter = begin
|
134
|
+
Faraday::Adapter::Typhoeus
|
135
|
+
rescue StandardError
|
136
|
+
Faraday.default_adapter
|
137
|
+
end
|
138
|
+
f.adapter adapter
|
139
|
+
|
140
|
+
f.options.timeout = @config[:request_timeout] || 30
|
141
|
+
f.options.open_timeout = 10
|
142
|
+
f.options.proxy = @config[:proxy] if @config[:proxy]
|
143
|
+
|
144
|
+
if respond_to?(:setup_authorization, true)
|
145
|
+
setup_authorization(f)
|
146
|
+
elsif @config[:api_key]
|
147
|
+
f.headers['Authorization'] = "Bearer #{@config[:api_key]}"
|
148
|
+
end
|
149
|
+
end
|
150
|
+
end
|
151
|
+
end
|
152
|
+
end
|
@@ -0,0 +1,124 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'last_llm/providers/constants'
|
4
|
+
|
5
|
+
module LastLLM
|
6
|
+
module Providers
|
7
|
+
# Anthropic provider implementation
|
8
|
+
class Anthropic < LastLLM::Provider
|
9
|
+
BASE_ENDPOINT = 'https://api.anthropic.com'
|
10
|
+
|
11
|
+
def initialize(config)
|
12
|
+
super(:anthropic, config)
|
13
|
+
@conn = connection(config[:base_url] || BASE_ENDPOINT)
|
14
|
+
end
|
15
|
+
|
16
|
+
def generate_text(prompt, options = {})
|
17
|
+
options = options.dup
|
18
|
+
messages = format_messages(prompt, options)
|
19
|
+
|
20
|
+
body = {
|
21
|
+
model: options[:model] || @config[:model] || 'claude-3-5-haiku-latest',
|
22
|
+
messages: messages,
|
23
|
+
max_tokens: options[:max_tokens] || 8192,
|
24
|
+
temperature: options[:temperature] || 0.2,
|
25
|
+
top_p: options[:top_p] || 0.8,
|
26
|
+
stream: false
|
27
|
+
}
|
28
|
+
|
29
|
+
# Add system parameter if system prompt is provided
|
30
|
+
body[:system] = options[:system_prompt] if options[:system_prompt]
|
31
|
+
|
32
|
+
response = @conn.post('/v1/messages') do |req|
|
33
|
+
req.body = body.compact
|
34
|
+
end
|
35
|
+
|
36
|
+
result = parse_response(response)
|
37
|
+
content = result.dig(:content, 0, :text)
|
38
|
+
|
39
|
+
content.to_s
|
40
|
+
rescue Faraday::Error => e
|
41
|
+
handle_request_error(e)
|
42
|
+
end
|
43
|
+
|
44
|
+
def generate_object(prompt, schema, options = {})
|
45
|
+
options = options.dup
|
46
|
+
system_prompt = 'You are a helpful assistant that responds with valid JSON.'
|
47
|
+
formatted_prompt = LastLLM::StructuredOutput.format_prompt(prompt, schema)
|
48
|
+
|
49
|
+
options[:system_prompt] = system_prompt
|
50
|
+
|
51
|
+
body = {
|
52
|
+
model: options[:model] || @config[:model] || 'claude-3-5-haiku-latest',
|
53
|
+
messages: [{ role: 'user', content: formatted_prompt }],
|
54
|
+
max_tokens: options[:max_tokens] || 8192,
|
55
|
+
system: options[:system_prompt],
|
56
|
+
temperature: options[:temperature] || 0.2,
|
57
|
+
top_p: options[:top_p] || 0.8,
|
58
|
+
stream: false
|
59
|
+
}.compact
|
60
|
+
|
61
|
+
response = @conn.post('/v1/messages') do |req|
|
62
|
+
req.body = body
|
63
|
+
end
|
64
|
+
|
65
|
+
result = parse_response(response)
|
66
|
+
content = result.dig(:content, 0, :text)
|
67
|
+
|
68
|
+
begin
|
69
|
+
JSON.parse(content, symbolize_names: true)
|
70
|
+
rescue JSON::ParserError => e
|
71
|
+
raise ApiError, "Invalid JSON response: #{e.message}"
|
72
|
+
end
|
73
|
+
rescue Faraday::Error => e
|
74
|
+
handle_request_error(e)
|
75
|
+
end
|
76
|
+
|
77
|
+
# Format a tool for Anthropic tools format
|
78
|
+
# @param tool [LastLLM::Tool] The tool to format
|
79
|
+
# @return [Hash] The tool in Anthropic format
|
80
|
+
def self.format_tool(tool)
|
81
|
+
{
|
82
|
+
name: tool.name,
|
83
|
+
description: tool.description,
|
84
|
+
input_schema: tool.parameters
|
85
|
+
}
|
86
|
+
end
|
87
|
+
|
88
|
+
# Execute a tool from an Anthropic response
|
89
|
+
# @param tool [LastLLM::Tool] The tool to execute
|
90
|
+
# @param response [Hash] The Anthropic response containing tool use information
|
91
|
+
# @return [Hash, nil] The result of the function call or nil if the tool wasn't used
|
92
|
+
def self.execute_tool(tool, response)
|
93
|
+
tool_use = response[:tool_use]
|
94
|
+
return nil unless tool_use && tool_use[:name] == tool.name
|
95
|
+
|
96
|
+
tool.call(tool_use[:input])
|
97
|
+
end
|
98
|
+
|
99
|
+
private
|
100
|
+
|
101
|
+
def format_messages(prompt, options)
|
102
|
+
if prompt.is_a?(Array) && prompt.all? { |m| m.is_a?(Hash) && m[:role] && m[:content] }
|
103
|
+
# Extract system message if present
|
104
|
+
system_messages = prompt.select { |m| m[:role] == 'system' }
|
105
|
+
|
106
|
+
# Set system_prompt if a system message was found
|
107
|
+
if system_messages.any? && !options[:system_prompt]
|
108
|
+
options[:system_prompt] = system_messages.map { |m| m[:content] }.join("\n")
|
109
|
+
end
|
110
|
+
|
111
|
+
# Return only non-system messages
|
112
|
+
prompt.reject { |m| m[:role] == 'system' }
|
113
|
+
else
|
114
|
+
[{ role: 'user', content: prompt.to_s }]
|
115
|
+
end
|
116
|
+
end
|
117
|
+
|
118
|
+
def setup_authorization(conn)
|
119
|
+
conn.headers['x-api-key'] = @config[:api_key]
|
120
|
+
conn.headers['anthropic-version'] = '2023-06-01'
|
121
|
+
end
|
122
|
+
end
|
123
|
+
end
|
124
|
+
end
|
@@ -0,0 +1,33 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# Constants for provider names
|
4
|
+
# This module centralizes all provider name definitions to follow DRY principles
|
5
|
+
module Constants
|
6
|
+
OPENAI = :openai
|
7
|
+
ANTHROPIC = :anthropic
|
8
|
+
GOOGLE_GEMINI = :google_gemini
|
9
|
+
DEEPSEEK = :deepseek
|
10
|
+
OLLAMA = :ollama
|
11
|
+
TEST = :test
|
12
|
+
|
13
|
+
# Returns all available provider names
|
14
|
+
# @return [Array<Symbol>] List of all provider names
|
15
|
+
def self.all
|
16
|
+
[OPENAI, ANTHROPIC, GOOGLE_GEMINI, DEEPSEEK, OLLAMA, TEST]
|
17
|
+
end
|
18
|
+
|
19
|
+
# Check if a provider name is valid
|
20
|
+
# @param provider_name [Symbol] The provider name to check
|
21
|
+
# @return [Boolean] Whether the provider name is valid
|
22
|
+
def self.valid?(provider_name)
|
23
|
+
all.include?(provider_name)
|
24
|
+
end
|
25
|
+
end
|
26
|
+
|
27
|
+
# Also define it in the LastLLM namespace for consistency
|
28
|
+
module LastLLM
|
29
|
+
module Providers
|
30
|
+
# Reference to the Constants module defined above
|
31
|
+
Constants = ::Constants
|
32
|
+
end
|
33
|
+
end
|
@@ -0,0 +1,121 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'last_llm/providers/constants'
|
4
|
+
|
5
|
+
# Deepseek provider implementation
|
6
|
+
class Deepseek < LastLLM::Provider
|
7
|
+
BASE_ENDPOINT = 'https://api.deepseek.com'
|
8
|
+
|
9
|
+
def initialize(config)
|
10
|
+
super(Constants::DEEPSEEK, config)
|
11
|
+
@conn = connection(config[:base_url] || BASE_ENDPOINT)
|
12
|
+
end
|
13
|
+
|
14
|
+
def generate_text(prompt, options = {})
|
15
|
+
messages = format_messages(prompt, options)
|
16
|
+
|
17
|
+
response = @conn.post('/v1/chat/completions') do |req|
|
18
|
+
req.body = {
|
19
|
+
model: options[:model] || @config[:model] || 'deepseek-chat',
|
20
|
+
messages: messages,
|
21
|
+
temperature: options[:temperature] || 0.7,
|
22
|
+
top_p: options[:top_p] || 0.8,
|
23
|
+
max_tokens: options[:max_tokens],
|
24
|
+
stream: false
|
25
|
+
}.compact
|
26
|
+
end
|
27
|
+
|
28
|
+
result = parse_response(response)
|
29
|
+
content = result.dig(:choices, 0, :message, :content)
|
30
|
+
|
31
|
+
content.to_s
|
32
|
+
rescue Faraday::Error => e
|
33
|
+
handle_request_error(e)
|
34
|
+
end
|
35
|
+
|
36
|
+
def generate_object(prompt, schema, options = {})
|
37
|
+
system_prompt = 'You are a helpful assistant that responds with valid JSON.'
|
38
|
+
formatted_prompt = LastLLM::StructuredOutput.format_prompt(prompt, schema)
|
39
|
+
|
40
|
+
messages = [
|
41
|
+
{ role: 'system', content: system_prompt },
|
42
|
+
{ role: 'user', content: formatted_prompt }
|
43
|
+
]
|
44
|
+
|
45
|
+
response = @conn.post('/v1/chat/completions') do |req|
|
46
|
+
req.body = {
|
47
|
+
model: options[:model] || @config[:model] || 'deepseek-chat',
|
48
|
+
messages: messages,
|
49
|
+
temperature: options[:temperature] || 0.2,
|
50
|
+
top_p: options[:top_p] || 0.8,
|
51
|
+
stream: false
|
52
|
+
}.compact
|
53
|
+
end
|
54
|
+
|
55
|
+
result = parse_response(response)
|
56
|
+
content = result.dig(:choices, 0, :message, :content)
|
57
|
+
|
58
|
+
begin
|
59
|
+
JSON.parse(content, symbolize_names: true)
|
60
|
+
rescue JSON::ParserError => e
|
61
|
+
# Try to clean markdown code blocks and parse again
|
62
|
+
content.gsub!("```json\n", '').gsub!("\n```", '')
|
63
|
+
begin
|
64
|
+
JSON.parse(content, symbolize_names: true)
|
65
|
+
rescue JSON::ParserError
|
66
|
+
raise LastLLM::ApiError, "Invalid JSON response: #{e.message}"
|
67
|
+
end
|
68
|
+
end
|
69
|
+
rescue Faraday::Error => e
|
70
|
+
handle_request_error(e)
|
71
|
+
end
|
72
|
+
|
73
|
+
private
|
74
|
+
|
75
|
+
def format_messages(prompt, options)
|
76
|
+
if prompt.is_a?(Array) && prompt.all? { |m| m.is_a?(Hash) && m[:role] && m[:content] }
|
77
|
+
prompt
|
78
|
+
elsif options[:system_prompt]
|
79
|
+
[
|
80
|
+
{ role: 'system', content: options[:system_prompt] },
|
81
|
+
{ role: 'user', content: prompt.to_s }
|
82
|
+
]
|
83
|
+
else
|
84
|
+
[{ role: 'user', content: prompt.to_s }]
|
85
|
+
end
|
86
|
+
end
|
87
|
+
|
88
|
+
# Format a tool for Deepseek function calling
|
89
|
+
# @param tool [LastLLM::Tool] The tool to format
|
90
|
+
# @return [Hash] The tool in Deepseek format
|
91
|
+
def self.format_tool(tool)
|
92
|
+
{
|
93
|
+
type: 'function',
|
94
|
+
function: {
|
95
|
+
name: tool.name,
|
96
|
+
description: tool.description,
|
97
|
+
parameters: tool.parameters
|
98
|
+
}
|
99
|
+
}
|
100
|
+
end
|
101
|
+
|
102
|
+
# Execute a tool from a Deepseek response
|
103
|
+
# @param tool [LastLLM::Tool] The tool to execute
|
104
|
+
# @param response [Hash] The Deepseek response containing tool call information
|
105
|
+
# @return [Hash, nil] The result of the function call or nil if the tool wasn't called
|
106
|
+
def self.execute_tool(tool, response)
|
107
|
+
tool_call = response.dig(:choices, 0, :message, :tool_calls)&.first
|
108
|
+
return nil unless tool_call && tool_call[:function][:name] == tool.name
|
109
|
+
|
110
|
+
arguments = JSON.parse(tool_call[:function][:arguments], symbolize_names: true)
|
111
|
+
tool.call(arguments)
|
112
|
+
end
|
113
|
+
end
|
114
|
+
|
115
|
+
# Also define it in the LastLLM::Providers namespace for consistency
|
116
|
+
module LastLLM
|
117
|
+
module Providers
|
118
|
+
# Reference to the Deepseek class defined above
|
119
|
+
Deepseek = ::Deepseek
|
120
|
+
end
|
121
|
+
end
|
@@ -0,0 +1,175 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'last_llm/providers/constants'
|
4
|
+
|
5
|
+
# Google Gemini provider implementation
|
6
|
+
class GoogleGemini < LastLLM::Provider
|
7
|
+
BASE_ENDPOINT = 'https://generativelanguage.googleapis.com'
|
8
|
+
|
9
|
+
def initialize(config)
|
10
|
+
super(Constants::GOOGLE_GEMINI, config)
|
11
|
+
@api_key = config[:api_key]
|
12
|
+
@conn = connection(config[:base_url] || BASE_ENDPOINT)
|
13
|
+
end
|
14
|
+
|
15
|
+
def generate_text(prompt, options = {})
|
16
|
+
model = options[:model] || @config[:model] || 'gemini-1.5-flash'
|
17
|
+
contents = format_contents(prompt, options)
|
18
|
+
|
19
|
+
response = @conn.post("/v1beta/models/#{model}:generateContent?key=#{@api_key}") do |req|
|
20
|
+
req.body = {
|
21
|
+
contents: contents,
|
22
|
+
generationConfig: {
|
23
|
+
maxOutputTokens: options[:max_tokens],
|
24
|
+
temperature: options[:temperature] || 0.3,
|
25
|
+
topP: options[:top_p] || 0.95,
|
26
|
+
topK: options[:top_k] || 40
|
27
|
+
}.compact
|
28
|
+
}.compact
|
29
|
+
end
|
30
|
+
|
31
|
+
# Check for error responses even when they don't raise exceptions
|
32
|
+
if response.status != 200
|
33
|
+
error = Faraday::Error.new("HTTP #{response.status}")
|
34
|
+
error.instance_variable_set(:@response, { status: response.status, body: response.body.to_json })
|
35
|
+
return handle_gemini_error(error)
|
36
|
+
end
|
37
|
+
|
38
|
+
result = parse_response(response)
|
39
|
+
content = result.dig(:candidates, 0, :content, :parts, 0, :text)
|
40
|
+
|
41
|
+
content.to_s
|
42
|
+
rescue Faraday::Error => e
|
43
|
+
handle_gemini_error(e)
|
44
|
+
end
|
45
|
+
|
46
|
+
def generate_object(prompt, schema, options = {})
|
47
|
+
model = options[:model] || @config[:model] || 'gemini-1.5-flash'
|
48
|
+
contents = format_contents(prompt, options)
|
49
|
+
|
50
|
+
response = @conn.post("/v1beta/models/#{model}:generateContent?key=#{@api_key}") do |req|
|
51
|
+
req.body = {
|
52
|
+
contents: contents,
|
53
|
+
generationConfig: {
|
54
|
+
temperature: options[:temperature] || 0.7,
|
55
|
+
maxOutputTokens: options[:max_tokens],
|
56
|
+
topP: options[:top_p] || 0.95,
|
57
|
+
topK: options[:top_k] || 40,
|
58
|
+
responseMimeType: 'application/json',
|
59
|
+
responseSchema: schema
|
60
|
+
}.compact
|
61
|
+
}.compact
|
62
|
+
end
|
63
|
+
|
64
|
+
# Check for error responses even when they don't raise exceptions
|
65
|
+
if response.status != 200
|
66
|
+
error = Faraday::Error.new("HTTP #{response.status}")
|
67
|
+
error.instance_variable_set(:@response, { status: response.status, body: response.body.to_json })
|
68
|
+
return handle_gemini_error(error)
|
69
|
+
end
|
70
|
+
|
71
|
+
result = parse_response(response)
|
72
|
+
content = result.dig(:candidates, 0, :content, :parts, 0, :text)
|
73
|
+
|
74
|
+
begin
|
75
|
+
JSON.parse(content, symbolize_names: true)
|
76
|
+
rescue JSON::ParserError => e
|
77
|
+
raise LastLLM::ApiError, "Invalid JSON response: #{e.message}"
|
78
|
+
end
|
79
|
+
rescue Faraday::Error => e
|
80
|
+
handle_gemini_error(e)
|
81
|
+
end
|
82
|
+
|
83
|
+
private
|
84
|
+
|
85
|
+
def connection(endpoint)
|
86
|
+
Faraday.new(url: endpoint) do |faraday|
|
87
|
+
faraday.request :json
|
88
|
+
faraday.response :json, content_type: /\bjson$/
|
89
|
+
faraday.adapter Faraday.default_adapter
|
90
|
+
end
|
91
|
+
end
|
92
|
+
|
93
|
+
def format_contents(prompt, options)
|
94
|
+
if prompt.is_a?(Array)
|
95
|
+
prompt.map { |m| { role: m[:role], parts: [{ text: m[:content] }] } }
|
96
|
+
elsif options[:system_prompt]
|
97
|
+
[
|
98
|
+
{ role: 'user', parts: [{ text: options[:system_prompt] }] },
|
99
|
+
{ role: 'user', parts: [{ text: prompt.to_s }] }
|
100
|
+
]
|
101
|
+
else
|
102
|
+
[{ role: 'user', parts: [{ text: prompt.to_s }] }]
|
103
|
+
end
|
104
|
+
end
|
105
|
+
|
106
|
+
# Format a tool for Google Gemini function calling
|
107
|
+
# @param tool [LastLLM::Tool] The tool to format
|
108
|
+
# @return [Hash] The tool in Google Gemini format
|
109
|
+
def self.format_tool(tool)
|
110
|
+
{
|
111
|
+
name: tool.name,
|
112
|
+
description: tool.description,
|
113
|
+
parameters: tool.parameters
|
114
|
+
}
|
115
|
+
end
|
116
|
+
|
117
|
+
# Execute a tool from a Google Gemini response
|
118
|
+
# @param tool [LastLLM::Tool] The tool to execute
|
119
|
+
# @param response [Hash] The Google Gemini response containing function call information
|
120
|
+
# @return [Hash, nil] The result of the function call or nil if the tool wasn't called
|
121
|
+
def self.execute_tool(tool, response)
|
122
|
+
function_call = response.dig(:candidates, 0, :content, :parts, 0, :functionCall)
|
123
|
+
return nil unless function_call && function_call[:name] == tool.name
|
124
|
+
|
125
|
+
arguments = function_call[:args]
|
126
|
+
tool.call(arguments)
|
127
|
+
end
|
128
|
+
|
129
|
+
# Custom error handler for Gemini API responses
|
130
|
+
def handle_gemini_error(error)
|
131
|
+
status = nil
|
132
|
+
message = "API request failed: #{error.message}"
|
133
|
+
|
134
|
+
if error.respond_to?(:response) && error.response.is_a?(Hash)
|
135
|
+
status = error.response[:status]
|
136
|
+
body = error.response[:body]
|
137
|
+
|
138
|
+
if body.is_a?(String) && !body.empty?
|
139
|
+
begin
|
140
|
+
parsed_body = JSON.parse(body)
|
141
|
+
# Handle array response format
|
142
|
+
if parsed_body.is_a?(Array) && parsed_body[0] && parsed_body[0]['error']
|
143
|
+
error_obj = parsed_body[0]['error']
|
144
|
+
message = "API error: #{error_obj['message'] || error_obj}"
|
145
|
+
# Handle object response format
|
146
|
+
elsif parsed_body['error']
|
147
|
+
error_message = parsed_body['error']['message'] || parsed_body['error']
|
148
|
+
error_code = parsed_body['error']['code']
|
149
|
+
error_status = parsed_body['error']['status']
|
150
|
+
message = "API error (#{error_code}): #{error_message}"
|
151
|
+
# Handle authentication errors
|
152
|
+
if error_code == 401 && error_status == 'UNAUTHENTICATED'
|
153
|
+
message = 'Authentication failed: Invalid API key or credentials. Please check your Google API key.'
|
154
|
+
elsif error_code == 400 && error_message.include?('API key not valid')
|
155
|
+
message = "Authentication failed: Invalid API key format or credentials. \
|
156
|
+
Please check your Google API key."
|
157
|
+
end
|
158
|
+
end
|
159
|
+
rescue JSON::ParserError
|
160
|
+
# Use default message if we can't parse the body
|
161
|
+
end
|
162
|
+
end
|
163
|
+
end
|
164
|
+
|
165
|
+
raise LastLLM::ApiError.new(message, status)
|
166
|
+
end
|
167
|
+
end
|
168
|
+
|
169
|
+
# Also define it in the LastLLM::Providers namespace for consistency
|
170
|
+
module LastLLM
|
171
|
+
module Providers
|
172
|
+
# Reference to the GoogleGemini class defined above
|
173
|
+
GoogleGemini = ::GoogleGemini
|
174
|
+
end
|
175
|
+
end
|