last_llm 0.0.4 → 0.0.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/lib/generators/last_llm/install/install_generator.rb +25 -0
- data/lib/generators/last_llm/install/templates/README.md +20 -0
- data/lib/generators/last_llm/install/templates/initializer.rb +28 -0
- data/lib/generators/last_llm/install/templates/last_llm.yml +29 -0
- data/lib/last_llm/providers/anthropic.rb +62 -48
- data/lib/last_llm/providers/deepseek.rb +115 -104
- data/lib/last_llm/providers/google_gemini.rb +169 -150
- data/lib/last_llm/providers/ollama.rb +119 -106
- data/lib/last_llm/providers/openai.rb +184 -176
- data/lib/last_llm/providers/test_provider.rb +51 -28
- data/lib/last_llm/version.rb +1 -1
- metadata +6 -2
@@ -2,183 +2,191 @@
|
|
2
2
|
|
3
3
|
require 'last_llm/providers/constants'
|
4
4
|
|
5
|
-
# OpenAI provider implementation
|
6
|
-
class OpenAI < LastLLM::Provider
|
7
|
-
BASE_ENDPOINT = 'https://api.openai.com'
|
8
|
-
|
9
|
-
def initialize(config)
|
10
|
-
super(Constants::OPENAI, config)
|
11
|
-
@conn = connection(config[:base_url] || BASE_ENDPOINT)
|
12
|
-
end
|
13
|
-
|
14
|
-
def generate_text(prompt, options = {})
|
15
|
-
request_body = build_completion_request(prompt, options)
|
16
|
-
response = make_completion_request(request_body)
|
17
|
-
extract_content(response)
|
18
|
-
rescue Faraday::Error => e
|
19
|
-
handle_request_error(e)
|
20
|
-
end
|
21
|
-
|
22
|
-
def generate_object(prompt, schema, options = {})
|
23
|
-
request_body = build_json_request(prompt, schema, options)
|
24
|
-
response = make_completion_request(request_body)
|
25
|
-
parse_json_response(response)
|
26
|
-
rescue Faraday::Error => e
|
27
|
-
handle_request_error(e)
|
28
|
-
end
|
29
|
-
|
30
|
-
# Generate embeddings from text
|
31
|
-
# @param text [String] The text to generate embeddings for
|
32
|
-
# @param options [Hash] Options for the embedding generation
|
33
|
-
# @return [Array<Float>] The embedding vector as an array of floats
|
34
|
-
def embeddings(text, options = {})
|
35
|
-
# Ensure text is a string
|
36
|
-
text_str = text.to_s
|
37
|
-
|
38
|
-
response = @conn.post('/v1/embeddings') do |req|
|
39
|
-
req.body = {
|
40
|
-
model: options[:model] || 'text-embedding-ada-002',
|
41
|
-
input: text_str,
|
42
|
-
encoding_format: options[:encoding_format] || 'float'
|
43
|
-
}.compact
|
44
|
-
end
|
45
|
-
|
46
|
-
result = parse_response(response)
|
47
|
-
|
48
|
-
# Extract embeddings from response
|
49
|
-
embeddings = result.dig(:data, 0, :embedding)
|
50
|
-
|
51
|
-
raise LastLLM::ApiError.new('Invalid embeddings response format', nil) unless embeddings.is_a?(Array)
|
52
|
-
|
53
|
-
embeddings
|
54
|
-
rescue Faraday::Error => e
|
55
|
-
handle_request_error(e)
|
56
|
-
end
|
57
|
-
|
58
|
-
private
|
59
|
-
|
60
|
-
def build_completion_request(prompt, options)
|
61
|
-
{
|
62
|
-
model: options[:model] || @config[:model] || 'gpt-4o-mini',
|
63
|
-
messages: format_messages(prompt, options),
|
64
|
-
temperature: options[:temperature] || 0.7,
|
65
|
-
top_p: options[:top_p] || 0.7,
|
66
|
-
max_tokens: options[:max_tokens] || 4096,
|
67
|
-
stream: false
|
68
|
-
}.compact
|
69
|
-
end
|
70
|
-
|
71
|
-
def build_json_request(prompt, schema, options)
|
72
|
-
{
|
73
|
-
model: options[:model] || @config[:model] || 'gpt-4o-mini',
|
74
|
-
messages: format_json_messages(prompt, schema),
|
75
|
-
temperature: options[:temperature] || 0.2,
|
76
|
-
top_p: options[:top_p] || 0.7,
|
77
|
-
max_tokens: options[:max_tokens] || 8_192,
|
78
|
-
response_format: { type: 'json_object' },
|
79
|
-
stream: false
|
80
|
-
}.compact
|
81
|
-
end
|
82
|
-
|
83
|
-
def make_completion_request(body)
|
84
|
-
@conn.post('/v1/chat/completions') do |req|
|
85
|
-
req.body = body
|
86
|
-
end
|
87
|
-
end
|
88
|
-
|
89
|
-
def format_json_messages(prompt, schema)
|
90
|
-
system_prompt = 'You are a helpful assistant that responds with valid JSON.'
|
91
|
-
formatted_prompt = LastLLM::StructuredOutput.format_prompt(prompt, schema)
|
92
|
-
|
93
|
-
[
|
94
|
-
{ role: 'system', content: system_prompt },
|
95
|
-
{ role: 'user', content: formatted_prompt }
|
96
|
-
]
|
97
|
-
end
|
98
|
-
|
99
|
-
def format_messages(prompt, options)
|
100
|
-
if prompt.is_a?(Array) && prompt.all? { |m| m.is_a?(Hash) && m[:role] && m[:content] }
|
101
|
-
prompt
|
102
|
-
elsif options[:system_prompt]
|
103
|
-
[
|
104
|
-
{ role: 'system', content: options[:system_prompt] },
|
105
|
-
{ role: 'user', content: prompt.to_s }
|
106
|
-
]
|
107
|
-
else
|
108
|
-
[{ role: 'user', content: prompt.to_s }]
|
109
|
-
end
|
110
|
-
end
|
111
|
-
|
112
|
-
def extract_content(response)
|
113
|
-
result = parse_response(response)
|
114
|
-
result.dig(:choices, 0, :message, :content).to_s
|
115
|
-
end
|
116
|
-
|
117
|
-
def parse_json_response(response)
|
118
|
-
content = extract_content(response)
|
119
|
-
parsed_json = JSON.parse(content, symbolize_names: true)
|
120
|
-
|
121
|
-
if parsed_json.key?(:$schema) && parsed_json.key?(:properties)
|
122
|
-
parsed_json[:properties]
|
123
|
-
else
|
124
|
-
parsed_json
|
125
|
-
end
|
126
|
-
rescue JSON::ParserError => e
|
127
|
-
raise LastLLM::ApiError, "Invalid JSON response: #{e.message}"
|
128
|
-
end
|
129
|
-
|
130
|
-
def parse_response(response)
|
131
|
-
parsed = if response.body.is_a?(Hash)
|
132
|
-
response.body
|
133
|
-
else
|
134
|
-
JSON.parse(response.body)
|
135
|
-
end
|
136
|
-
|
137
|
-
validate_response(parsed)
|
138
|
-
deep_symbolize_keys(parsed)
|
139
|
-
rescue JSON::ParserError => e
|
140
|
-
raise LastLLM::ApiError.new("Failed to parse OpenAI response: #{e.message}", nil)
|
141
|
-
end
|
142
|
-
|
143
|
-
def validate_response(parsed)
|
144
|
-
if parsed.nil? || (!parsed.is_a?(Hash) && !parsed.respond_to?(:to_h))
|
145
|
-
raise LastLLM::ApiError.new('Invalid response format from OpenAI', nil)
|
146
|
-
end
|
147
|
-
|
148
|
-
raise LastLLM::ApiError.new(parsed[:error][:message], parsed[:error][:code]) if parsed[:error]
|
149
|
-
end
|
150
|
-
|
151
|
-
def handle_request_error(error)
|
152
|
-
message = "OpenAI API request failed: #{error.message}"
|
153
|
-
status = error.respond_to?(:response) && error.response.respond_to?(:status) ? error.response.status : nil
|
154
|
-
raise LastLLM::ApiError.new(message, status)
|
155
|
-
end
|
156
|
-
|
157
|
-
# Format a tool for OpenAI function calling
|
158
|
-
def self.format_tool(tool)
|
159
|
-
{
|
160
|
-
type: 'function',
|
161
|
-
function: {
|
162
|
-
name: tool.name,
|
163
|
-
description: tool.description,
|
164
|
-
parameters: tool.parameters
|
165
|
-
}
|
166
|
-
}
|
167
|
-
end
|
168
|
-
|
169
|
-
# Execute a tool from an OpenAI response
|
170
|
-
def self.execute_tool(tool, response)
|
171
|
-
tool_call = response[:tool_calls]&.first
|
172
|
-
return nil unless tool_call && tool_call[:function][:name] == tool.name
|
173
|
-
|
174
|
-
arguments = JSON.parse(tool_call[:function][:arguments], symbolize_names: true)
|
175
|
-
tool.call(arguments)
|
176
|
-
end
|
177
|
-
end
|
178
|
-
|
179
|
-
# Also define it in the LastLLM::Providers namespace for consistency
|
180
5
|
module LastLLM
|
181
6
|
module Providers
|
182
|
-
OpenAI
|
7
|
+
# OpenAI provider implementation
|
8
|
+
class OpenAI < LastLLM::Provider
|
9
|
+
# API Configuration
|
10
|
+
BASE_ENDPOINT = 'https://api.openai.com'
|
11
|
+
DEFAULT_MODEL = 'gpt-4o-mini'
|
12
|
+
EMBEDDINGS_MODEL = 'text-embedding-ada-002'
|
13
|
+
|
14
|
+
# LLM Default Parameters
|
15
|
+
DEFAULT_TEMPERATURE = 0.7
|
16
|
+
DEFAULT_TOP_P = 0.7
|
17
|
+
DEFAULT_MAX_TOKENS = 4096
|
18
|
+
DEFAULT_TEMPERATURE_OBJECT = 0.2
|
19
|
+
|
20
|
+
# Response Configuration
|
21
|
+
SUCCESS_STATUS = 200
|
22
|
+
|
23
|
+
# Error Status Codes
|
24
|
+
UNAUTHORIZED_STATUS = 401
|
25
|
+
BAD_REQUEST_STATUS = 400
|
26
|
+
|
27
|
+
def initialize(config)
|
28
|
+
super(Constants::OPENAI, config)
|
29
|
+
@conn = connection(config[:base_url] || BASE_ENDPOINT)
|
30
|
+
end
|
31
|
+
|
32
|
+
def generate_text(prompt, options = {})
|
33
|
+
make_text_request(prompt, options) do |result|
|
34
|
+
result.dig(:choices, 0, :message, :content).to_s
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
def generate_object(prompt, schema, options = {})
|
39
|
+
make_object_request(prompt, schema, options) do |content|
|
40
|
+
parsed_json = JSON.parse(content, symbolize_names: true)
|
41
|
+
|
42
|
+
if parsed_json.key?(:$schema) && parsed_json.key?(:properties)
|
43
|
+
parsed_json[:properties]
|
44
|
+
else
|
45
|
+
parsed_json
|
46
|
+
end
|
47
|
+
end
|
48
|
+
end
|
49
|
+
|
50
|
+
# Generate embeddings from text
|
51
|
+
# @param text [String] The text to generate embeddings for
|
52
|
+
# @param options [Hash] Options for the embedding generation
|
53
|
+
# @return [Array<Float>] The embedding vector as an array of floats
|
54
|
+
def embeddings(text, options = {})
|
55
|
+
# Ensure text is a string
|
56
|
+
text_str = text.to_s
|
57
|
+
|
58
|
+
response = @conn.post('/v1/embeddings') do |req|
|
59
|
+
req.body = {
|
60
|
+
model: options[:model] || EMBEDDINGS_MODEL,
|
61
|
+
input: text_str,
|
62
|
+
encoding_format: options[:encoding_format] || 'float'
|
63
|
+
}.compact
|
64
|
+
end
|
65
|
+
|
66
|
+
result = parse_response(response)
|
67
|
+
|
68
|
+
# Extract embeddings from response
|
69
|
+
embeddings = result.dig(:data, 0, :embedding)
|
70
|
+
|
71
|
+
raise LastLLM::ApiError.new('Invalid embeddings response format', nil) unless embeddings.is_a?(Array)
|
72
|
+
|
73
|
+
embeddings
|
74
|
+
rescue Faraday::Error => e
|
75
|
+
handle_request_error(e)
|
76
|
+
end
|
77
|
+
|
78
|
+
# Format a tool for OpenAI function calling
|
79
|
+
# @param tool [LastLLM::Tool] The tool to format
|
80
|
+
# @return [Hash] The tool in OpenAI format
|
81
|
+
def self.format_tool(tool)
|
82
|
+
{
|
83
|
+
type: 'function',
|
84
|
+
function: {
|
85
|
+
name: tool.name,
|
86
|
+
description: tool.description,
|
87
|
+
parameters: tool.parameters
|
88
|
+
}
|
89
|
+
}
|
90
|
+
end
|
91
|
+
|
92
|
+
# Execute a tool from an OpenAI response
|
93
|
+
# @param tool [LastLLM::Tool] The tool to execute
|
94
|
+
# @param response [Hash] The OpenAI response containing tool call information
|
95
|
+
# @return [Hash, nil] The result of the function call or nil if the tool wasn't called
|
96
|
+
def self.execute_tool(tool, response)
|
97
|
+
tool_call = response[:tool_calls]&.first
|
98
|
+
return nil unless tool_call && tool_call[:function][:name] == tool.name
|
99
|
+
|
100
|
+
arguments = JSON.parse(tool_call[:function][:arguments], symbolize_names: true)
|
101
|
+
tool.call(arguments)
|
102
|
+
end
|
103
|
+
|
104
|
+
private
|
105
|
+
|
106
|
+
def make_text_request(prompt, options = {})
|
107
|
+
request_body = build_completion_request(prompt, options)
|
108
|
+
response = make_completion_request(request_body)
|
109
|
+
result = parse_response(response)
|
110
|
+
yield(result)
|
111
|
+
rescue Faraday::Error => e
|
112
|
+
handle_request_error(e)
|
113
|
+
end
|
114
|
+
|
115
|
+
def make_object_request(prompt, schema, options = {})
|
116
|
+
request_body = build_json_request(prompt, schema, options)
|
117
|
+
response = make_completion_request(request_body)
|
118
|
+
result = parse_response(response)
|
119
|
+
content = result.dig(:choices, 0, :message, :content).to_s
|
120
|
+
yield(content)
|
121
|
+
rescue Faraday::Error => e
|
122
|
+
handle_request_error(e)
|
123
|
+
end
|
124
|
+
|
125
|
+
def build_completion_request(prompt, options)
|
126
|
+
{
|
127
|
+
model: options[:model] || @config[:model] || DEFAULT_MODEL,
|
128
|
+
messages: format_messages(prompt, options),
|
129
|
+
temperature: options[:temperature] || DEFAULT_TEMPERATURE,
|
130
|
+
top_p: options[:top_p] || DEFAULT_TOP_P,
|
131
|
+
max_tokens: options[:max_tokens] || DEFAULT_MAX_TOKENS,
|
132
|
+
stream: false
|
133
|
+
}.compact
|
134
|
+
end
|
135
|
+
|
136
|
+
def build_json_request(prompt, schema, options)
|
137
|
+
{
|
138
|
+
model: options[:model] || @config[:model] || DEFAULT_MODEL,
|
139
|
+
messages: format_json_messages(prompt, schema),
|
140
|
+
temperature: options[:temperature] || DEFAULT_TEMPERATURE_OBJECT,
|
141
|
+
top_p: options[:top_p] || DEFAULT_TOP_P,
|
142
|
+
max_tokens: options[:max_tokens] || DEFAULT_MAX_TOKENS,
|
143
|
+
response_format: { type: 'json_object' },
|
144
|
+
stream: false
|
145
|
+
}.compact
|
146
|
+
end
|
147
|
+
|
148
|
+
def make_completion_request(body)
|
149
|
+
@conn.post('/v1/chat/completions') do |req|
|
150
|
+
req.body = body
|
151
|
+
end
|
152
|
+
end
|
153
|
+
|
154
|
+
def format_json_messages(prompt, schema)
|
155
|
+
system_prompt = 'You are a helpful assistant that responds with valid JSON.'
|
156
|
+
formatted_prompt = LastLLM::StructuredOutput.format_prompt(prompt, schema)
|
157
|
+
|
158
|
+
[
|
159
|
+
{ role: 'system', content: system_prompt },
|
160
|
+
{ role: 'user', content: formatted_prompt }
|
161
|
+
]
|
162
|
+
end
|
163
|
+
|
164
|
+
def format_messages(prompt, options)
|
165
|
+
if prompt.is_a?(Array) && prompt.all? { |m| m.is_a?(Hash) && m[:role] && m[:content] }
|
166
|
+
prompt
|
167
|
+
elsif options[:system_prompt]
|
168
|
+
[
|
169
|
+
{ role: 'system', content: options[:system_prompt] },
|
170
|
+
{ role: 'user', content: prompt.to_s }
|
171
|
+
]
|
172
|
+
else
|
173
|
+
[{ role: 'user', content: prompt.to_s }]
|
174
|
+
end
|
175
|
+
end
|
176
|
+
|
177
|
+
def validate_response(parsed)
|
178
|
+
if parsed.nil? || (!parsed.is_a?(Hash) && !parsed.respond_to?(:to_h))
|
179
|
+
raise LastLLM::ApiError.new('Invalid response format from OpenAI', nil)
|
180
|
+
end
|
181
|
+
|
182
|
+
raise LastLLM::ApiError.new(parsed[:error][:message], parsed[:error][:code]) if parsed[:error]
|
183
|
+
end
|
184
|
+
|
185
|
+
def handle_request_error(error)
|
186
|
+
message = "OpenAI API request failed: #{error.message}"
|
187
|
+
status = error.respond_to?(:response) && error.response.respond_to?(:status) ? error.response.status : nil
|
188
|
+
raise LastLLM::ApiError.new(message, status)
|
189
|
+
end
|
190
|
+
end
|
183
191
|
end
|
184
192
|
end
|
@@ -2,37 +2,60 @@
|
|
2
2
|
|
3
3
|
require 'last_llm/providers/constants'
|
4
4
|
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
@config = config.is_a?(Hash) ? config : {}
|
13
|
-
@name = Constants::TEST
|
14
|
-
@text_response = 'Test response'
|
15
|
-
@object_response = {}
|
16
|
-
end
|
5
|
+
module LastLLM
|
6
|
+
module Providers
|
7
|
+
# A provider implementation for testing purposes
|
8
|
+
class TestProvider < LastLLM::Provider
|
9
|
+
# API Configuration (not used for testing but included for consistency)
|
10
|
+
BASE_ENDPOINT = 'http://test.example.com'
|
11
|
+
DEFAULT_MODEL = 'test-model'
|
17
12
|
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
end
|
13
|
+
# Default response values
|
14
|
+
DEFAULT_TEXT_RESPONSE = 'Test response'
|
15
|
+
DEFAULT_OBJECT_RESPONSE = {}
|
22
16
|
|
23
|
-
|
24
|
-
@text_response
|
25
|
-
end
|
17
|
+
attr_accessor :text_response, :object_response
|
26
18
|
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
19
|
+
def initialize(config = {})
|
20
|
+
# Skip parent's initialize which checks for API key
|
21
|
+
# Instead implement our own initialization
|
22
|
+
@config = config.is_a?(Hash) ? config : {}
|
23
|
+
@name = Constants::TEST
|
24
|
+
@text_response = DEFAULT_TEXT_RESPONSE
|
25
|
+
@object_response = DEFAULT_OBJECT_RESPONSE
|
26
|
+
end
|
31
27
|
|
32
|
-
#
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
28
|
+
# Override validate_config! to not require API key
|
29
|
+
def validate_config!
|
30
|
+
# No validation needed for test provider
|
31
|
+
end
|
32
|
+
|
33
|
+
def generate_text(_prompt, _options = {})
|
34
|
+
@text_response
|
35
|
+
end
|
36
|
+
|
37
|
+
def generate_object(_prompt, _schema, _options = {})
|
38
|
+
@object_response
|
39
|
+
end
|
40
|
+
|
41
|
+
# Format a tool for the test provider
|
42
|
+
# @param tool [LastLLM::Tool] The tool to format
|
43
|
+
# @return [Hash] The tool in test format
|
44
|
+
def self.format_tool(tool)
|
45
|
+
{
|
46
|
+
name: tool.name,
|
47
|
+
description: tool.description,
|
48
|
+
parameters: tool.parameters
|
49
|
+
}
|
50
|
+
end
|
51
|
+
|
52
|
+
# Execute a test tool
|
53
|
+
# @param tool [LastLLM::Tool] The tool to execute
|
54
|
+
# @param _response [Hash] Not used in test provider
|
55
|
+
# @return [Hash, nil] Always returns nil in test provider
|
56
|
+
def self.execute_tool(tool, _response)
|
57
|
+
nil # Test provider doesn't execute tools by default
|
58
|
+
end
|
59
|
+
end
|
37
60
|
end
|
38
61
|
end
|
data/lib/last_llm/version.rb
CHANGED
metadata
CHANGED
@@ -1,13 +1,13 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: last_llm
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.0.
|
4
|
+
version: 0.0.6
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Sam Obukwelu
|
8
8
|
bindir: bin
|
9
9
|
cert_chain: []
|
10
|
-
date: 2025-03-
|
10
|
+
date: 2025-03-07 00:00:00.000000000 Z
|
11
11
|
dependencies:
|
12
12
|
- !ruby/object:Gem::Dependency
|
13
13
|
name: dry-schema
|
@@ -101,6 +101,10 @@ extensions: []
|
|
101
101
|
extra_rdoc_files: []
|
102
102
|
files:
|
103
103
|
- README.md
|
104
|
+
- lib/generators/last_llm/install/install_generator.rb
|
105
|
+
- lib/generators/last_llm/install/templates/README.md
|
106
|
+
- lib/generators/last_llm/install/templates/initializer.rb
|
107
|
+
- lib/generators/last_llm/install/templates/last_llm.yml
|
104
108
|
- lib/last_llm.rb
|
105
109
|
- lib/last_llm/client.rb
|
106
110
|
- lib/last_llm/completion.rb
|