last_llm 0.0.4 → 0.0.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/last_llm/providers/anthropic.rb +62 -48
- data/lib/last_llm/providers/deepseek.rb +115 -104
- data/lib/last_llm/providers/google_gemini.rb +169 -150
- data/lib/last_llm/providers/ollama.rb +119 -106
- data/lib/last_llm/providers/openai.rb +184 -176
- data/lib/last_llm/providers/test_provider.rb +51 -28
- data/lib/last_llm/version.rb +1 -1
- metadata +2 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: e8d63b9fe1cfc2adcf7d290b1deee351c2dbdde0922931f6bdc950b47e882884
|
4
|
+
data.tar.gz: 2ce5e77677f5734d6aa300083e1d62345e7da9b57db87dec88b4eb0945f5a35f
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 99ebc99586238fa7bdc6464d01dba3ec0bb933488f2a9b61ca2201f93e97689cca59893628185649524582d4fa2da9be86867ebb7a4373b8b32ca0cc5e1459ea
|
7
|
+
data.tar.gz: ed0ebd17190f1797ff6f45523461fca9813066aedbb05fa13e7e9660cad567aa10d1f082655ab6d5c498116a330347a4f998d6e0118ee5cf940a76d81b3fff3e
|
@@ -6,7 +6,23 @@ module LastLLM
|
|
6
6
|
module Providers
|
7
7
|
# Anthropic provider implementation
|
8
8
|
class Anthropic < LastLLM::Provider
|
9
|
+
# API Configuration
|
9
10
|
BASE_ENDPOINT = 'https://api.anthropic.com'
|
11
|
+
DEFAULT_MODEL = 'claude-3-5-haiku-latest'
|
12
|
+
API_VERSION = '2023-06-01'
|
13
|
+
|
14
|
+
# LLM Default Parameters
|
15
|
+
DEFAULT_TEMPERATURE = 0.2
|
16
|
+
DEFAULT_TOP_P = 0.8
|
17
|
+
DEFAULT_MAX_TOKENS = 4096
|
18
|
+
DEFAULT_MAX_TOKENS_OBJECT = 8192
|
19
|
+
|
20
|
+
# Response Configuration
|
21
|
+
SUCCESS_STATUS = 200
|
22
|
+
|
23
|
+
# Error Status Codes
|
24
|
+
UNAUTHORIZED_STATUS = 401
|
25
|
+
BAD_REQUEST_STATUS = 400
|
10
26
|
|
11
27
|
def initialize(config)
|
12
28
|
super(:anthropic, config)
|
@@ -14,31 +30,9 @@ module LastLLM
|
|
14
30
|
end
|
15
31
|
|
16
32
|
def generate_text(prompt, options = {})
|
17
|
-
options
|
18
|
-
|
19
|
-
|
20
|
-
body = {
|
21
|
-
model: options[:model] || @config[:model] || 'claude-3-5-haiku-latest',
|
22
|
-
messages: messages,
|
23
|
-
max_tokens: options[:max_tokens] || 8192,
|
24
|
-
temperature: options[:temperature] || 0.2,
|
25
|
-
top_p: options[:top_p] || 0.8,
|
26
|
-
stream: false
|
27
|
-
}
|
28
|
-
|
29
|
-
# Add system parameter if system prompt is provided
|
30
|
-
body[:system] = options[:system_prompt] if options[:system_prompt]
|
31
|
-
|
32
|
-
response = @conn.post('/v1/messages') do |req|
|
33
|
-
req.body = body.compact
|
33
|
+
make_request(prompt, options) do |result|
|
34
|
+
result.dig(:content, 0, :text).to_s
|
34
35
|
end
|
35
|
-
|
36
|
-
result = parse_response(response)
|
37
|
-
content = result.dig(:content, 0, :text)
|
38
|
-
|
39
|
-
content.to_s
|
40
|
-
rescue Faraday::Error => e
|
41
|
-
handle_request_error(e)
|
42
36
|
end
|
43
37
|
|
44
38
|
def generate_object(prompt, schema, options = {})
|
@@ -47,31 +41,12 @@ module LastLLM
|
|
47
41
|
formatted_prompt = LastLLM::StructuredOutput.format_prompt(prompt, schema)
|
48
42
|
|
49
43
|
options[:system_prompt] = system_prompt
|
44
|
+
options[:max_tokens] ||= DEFAULT_MAX_TOKENS_OBJECT
|
50
45
|
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
max_tokens: options[:max_tokens] || 8192,
|
55
|
-
system: options[:system_prompt],
|
56
|
-
temperature: options[:temperature] || 0.2,
|
57
|
-
top_p: options[:top_p] || 0.8,
|
58
|
-
stream: false
|
59
|
-
}.compact
|
60
|
-
|
61
|
-
response = @conn.post('/v1/messages') do |req|
|
62
|
-
req.body = body
|
46
|
+
make_request(formatted_prompt, options) do |result|
|
47
|
+
content = result.dig(:content, 0, :text)
|
48
|
+
parse_json_response(content)
|
63
49
|
end
|
64
|
-
|
65
|
-
result = parse_response(response)
|
66
|
-
content = result.dig(:content, 0, :text)
|
67
|
-
|
68
|
-
begin
|
69
|
-
JSON.parse(content, symbolize_names: true)
|
70
|
-
rescue JSON::ParserError => e
|
71
|
-
raise ApiError, "Invalid JSON response: #{e.message}"
|
72
|
-
end
|
73
|
-
rescue Faraday::Error => e
|
74
|
-
handle_request_error(e)
|
75
50
|
end
|
76
51
|
|
77
52
|
# Format a tool for Anthropic tools format
|
@@ -98,6 +73,31 @@ module LastLLM
|
|
98
73
|
|
99
74
|
private
|
100
75
|
|
76
|
+
def make_request(prompt, options = {})
|
77
|
+
messages = format_messages(prompt, options)
|
78
|
+
|
79
|
+
body = {
|
80
|
+
model: options[:model] || @config[:model] || DEFAULT_MODEL,
|
81
|
+
messages: messages,
|
82
|
+
max_tokens: options[:max_tokens] || DEFAULT_MAX_TOKENS,
|
83
|
+
temperature: options[:temperature] || DEFAULT_TEMPERATURE,
|
84
|
+
top_p: options[:top_p] || DEFAULT_TOP_P,
|
85
|
+
stream: false
|
86
|
+
}
|
87
|
+
|
88
|
+
# Add system parameter if system prompt is provided
|
89
|
+
body[:system] = options[:system_prompt] if options[:system_prompt]
|
90
|
+
|
91
|
+
response = @conn.post('/v1/messages') do |req|
|
92
|
+
req.body = body.compact
|
93
|
+
end
|
94
|
+
|
95
|
+
result = parse_response(response)
|
96
|
+
yield(result)
|
97
|
+
rescue Faraday::Error => e
|
98
|
+
handle_request_error(e)
|
99
|
+
end
|
100
|
+
|
101
101
|
def format_messages(prompt, options)
|
102
102
|
if prompt.is_a?(Array) && prompt.all? { |m| m.is_a?(Hash) && m[:role] && m[:content] }
|
103
103
|
# Extract system message if present
|
@@ -115,9 +115,23 @@ module LastLLM
|
|
115
115
|
end
|
116
116
|
end
|
117
117
|
|
118
|
+
def parse_json_response(content)
|
119
|
+
begin
|
120
|
+
JSON.parse(content, symbolize_names: true)
|
121
|
+
rescue JSON::ParserError => e
|
122
|
+
raise ApiError, "Invalid JSON response: #{e.message}"
|
123
|
+
end
|
124
|
+
end
|
125
|
+
|
118
126
|
def setup_authorization(conn)
|
119
127
|
conn.headers['x-api-key'] = @config[:api_key]
|
120
|
-
conn.headers['anthropic-version'] =
|
128
|
+
conn.headers['anthropic-version'] = API_VERSION
|
129
|
+
end
|
130
|
+
|
131
|
+
def handle_request_error(e)
|
132
|
+
message = "Anthropic API request failed: #{e.message}"
|
133
|
+
status = e.respond_to?(:response) && e.response.respond_to?(:status) ? e.response.status : nil
|
134
|
+
raise LastLLM::ApiError.new(message, status)
|
121
135
|
end
|
122
136
|
end
|
123
137
|
end
|
@@ -2,120 +2,131 @@
|
|
2
2
|
|
3
3
|
require 'last_llm/providers/constants'
|
4
4
|
|
5
|
-
|
6
|
-
|
7
|
-
|
5
|
+
module LastLLM
|
6
|
+
module Providers
|
7
|
+
# Deepseek provider implementation
|
8
|
+
class Deepseek < LastLLM::Provider
|
9
|
+
# API Configuration
|
10
|
+
BASE_ENDPOINT = 'https://api.deepseek.com'
|
11
|
+
DEFAULT_MODEL = 'deepseek-chat'
|
12
|
+
|
13
|
+
# LLM Default Parameters
|
14
|
+
DEFAULT_TEMPERATURE = 0.7
|
15
|
+
DEFAULT_TOP_P = 0.8
|
16
|
+
DEFAULT_TEMPERATURE_OBJECT = 0.2
|
17
|
+
|
18
|
+
# Response Configuration
|
19
|
+
SUCCESS_STATUS = 200
|
20
|
+
|
21
|
+
# Error Status Codes
|
22
|
+
UNAUTHORIZED_STATUS = 401
|
23
|
+
BAD_REQUEST_STATUS = 400
|
24
|
+
|
25
|
+
def initialize(config)
|
26
|
+
super(Constants::DEEPSEEK, config)
|
27
|
+
@conn = connection(config[:base_url] || BASE_ENDPOINT)
|
28
|
+
end
|
8
29
|
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
30
|
+
def generate_text(prompt, options = {})
|
31
|
+
make_request(prompt, options) do |result|
|
32
|
+
result.dig(:choices, 0, :message, :content).to_s
|
33
|
+
end
|
34
|
+
end
|
13
35
|
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
response = @conn.post('/v1/chat/completions') do |req|
|
18
|
-
req.body = {
|
19
|
-
model: options[:model] || @config[:model] || 'deepseek-chat',
|
20
|
-
messages: messages,
|
21
|
-
temperature: options[:temperature] || 0.7,
|
22
|
-
top_p: options[:top_p] || 0.8,
|
23
|
-
max_tokens: options[:max_tokens],
|
24
|
-
stream: false
|
25
|
-
}.compact
|
26
|
-
end
|
36
|
+
def generate_object(prompt, schema, options = {})
|
37
|
+
system_prompt = 'You are a helpful assistant that responds with valid JSON.'
|
38
|
+
formatted_prompt = LastLLM::StructuredOutput.format_prompt(prompt, schema)
|
27
39
|
|
28
|
-
|
29
|
-
|
40
|
+
options = options.dup
|
41
|
+
options[:system_prompt] = system_prompt
|
42
|
+
options[:temperature] ||= DEFAULT_TEMPERATURE_OBJECT
|
30
43
|
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
def generate_object(prompt, schema, options = {})
|
37
|
-
system_prompt = 'You are a helpful assistant that responds with valid JSON.'
|
38
|
-
formatted_prompt = LastLLM::StructuredOutput.format_prompt(prompt, schema)
|
39
|
-
|
40
|
-
messages = [
|
41
|
-
{ role: 'system', content: system_prompt },
|
42
|
-
{ role: 'user', content: formatted_prompt }
|
43
|
-
]
|
44
|
-
|
45
|
-
response = @conn.post('/v1/chat/completions') do |req|
|
46
|
-
req.body = {
|
47
|
-
model: options[:model] || @config[:model] || 'deepseek-chat',
|
48
|
-
messages: messages,
|
49
|
-
temperature: options[:temperature] || 0.2,
|
50
|
-
top_p: options[:top_p] || 0.8,
|
51
|
-
stream: false
|
52
|
-
}.compact
|
53
|
-
end
|
44
|
+
make_request(formatted_prompt, options) do |result|
|
45
|
+
content = result.dig(:choices, 0, :message, :content)
|
46
|
+
parse_json_response(content)
|
47
|
+
end
|
48
|
+
end
|
54
49
|
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
50
|
+
# Format a tool for Deepseek function calling
|
51
|
+
# @param tool [LastLLM::Tool] The tool to format
|
52
|
+
# @return [Hash] The tool in Deepseek format
|
53
|
+
def self.format_tool(tool)
|
54
|
+
{
|
55
|
+
type: 'function',
|
56
|
+
function: {
|
57
|
+
name: tool.name,
|
58
|
+
description: tool.description,
|
59
|
+
parameters: tool.parameters
|
60
|
+
}
|
61
|
+
}
|
67
62
|
end
|
68
|
-
end
|
69
|
-
rescue Faraday::Error => e
|
70
|
-
handle_request_error(e)
|
71
|
-
end
|
72
63
|
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
{ role: 'system', content: options[:system_prompt] },
|
81
|
-
{ role: 'user', content: prompt.to_s }
|
82
|
-
]
|
83
|
-
else
|
84
|
-
[{ role: 'user', content: prompt.to_s }]
|
85
|
-
end
|
86
|
-
end
|
64
|
+
# Execute a tool from a Deepseek response
|
65
|
+
# @param tool [LastLLM::Tool] The tool to execute
|
66
|
+
# @param response [Hash] The Deepseek response containing tool call information
|
67
|
+
# @return [Hash, nil] The result of the function call or nil if the tool wasn't called
|
68
|
+
def self.execute_tool(tool, response)
|
69
|
+
tool_call = response.dig(:choices, 0, :message, :tool_calls)&.first
|
70
|
+
return nil unless tool_call && tool_call[:function][:name] == tool.name
|
87
71
|
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
def self.format_tool(tool)
|
92
|
-
{
|
93
|
-
type: 'function',
|
94
|
-
function: {
|
95
|
-
name: tool.name,
|
96
|
-
description: tool.description,
|
97
|
-
parameters: tool.parameters
|
98
|
-
}
|
99
|
-
}
|
100
|
-
end
|
72
|
+
arguments = JSON.parse(tool_call[:function][:arguments], symbolize_names: true)
|
73
|
+
tool.call(arguments)
|
74
|
+
end
|
101
75
|
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
76
|
+
private
|
77
|
+
|
78
|
+
def make_request(prompt, options = {})
|
79
|
+
messages = format_messages(prompt, options)
|
80
|
+
|
81
|
+
response = @conn.post('/v1/chat/completions') do |req|
|
82
|
+
req.body = {
|
83
|
+
model: options[:model] || @config[:model] || DEFAULT_MODEL,
|
84
|
+
messages: messages,
|
85
|
+
temperature: options[:temperature] || DEFAULT_TEMPERATURE,
|
86
|
+
top_p: options[:top_p] || DEFAULT_TOP_P,
|
87
|
+
max_tokens: options[:max_tokens],
|
88
|
+
stream: false
|
89
|
+
}.compact
|
90
|
+
end
|
91
|
+
|
92
|
+
result = parse_response(response)
|
93
|
+
yield(result)
|
94
|
+
rescue Faraday::Error => e
|
95
|
+
handle_request_error(e)
|
96
|
+
end
|
109
97
|
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
98
|
+
def format_messages(prompt, options)
|
99
|
+
if prompt.is_a?(Array) && prompt.all? { |m| m.is_a?(Hash) && m[:role] && m[:content] }
|
100
|
+
prompt
|
101
|
+
elsif options[:system_prompt]
|
102
|
+
[
|
103
|
+
{ role: 'system', content: options[:system_prompt] },
|
104
|
+
{ role: 'user', content: prompt.to_s }
|
105
|
+
]
|
106
|
+
else
|
107
|
+
[{ role: 'user', content: prompt.to_s }]
|
108
|
+
end
|
109
|
+
end
|
114
110
|
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
111
|
+
def parse_json_response(content)
|
112
|
+
begin
|
113
|
+
JSON.parse(content, symbolize_names: true)
|
114
|
+
rescue JSON::ParserError => e
|
115
|
+
# Try to clean markdown code blocks and parse again
|
116
|
+
content.gsub!("```json\n", '').gsub!("\n```", '')
|
117
|
+
begin
|
118
|
+
JSON.parse(content, symbolize_names: true)
|
119
|
+
rescue JSON::ParserError
|
120
|
+
raise LastLLM::ApiError, "Invalid JSON response: #{e.message}"
|
121
|
+
end
|
122
|
+
end
|
123
|
+
end
|
124
|
+
|
125
|
+
def handle_request_error(error)
|
126
|
+
message = "Deepseek API request failed: #{error.message}"
|
127
|
+
status = error.respond_to?(:response) && error.response.respond_to?(:status) ? error.response.status : nil
|
128
|
+
raise LastLLM::ApiError.new(message, status)
|
129
|
+
end
|
130
|
+
end
|
120
131
|
end
|
121
132
|
end
|