last_llm 0.0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,124 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'last_llm/providers/constants'
4
+
5
+ # Ollama provider implementation
6
+ class Ollama < LastLLM::Provider
7
+ BASE_ENDPOINT = 'http://172.17.0.1:11434'
8
+
9
+ def initialize(config)
10
+ super(Constants::OLLAMA, config)
11
+ @conn = connection(config[:base_url] || BASE_ENDPOINT)
12
+ end
13
+
14
+ def generate_text(prompt, options = {})
15
+ messages = format_messages(prompt, options)
16
+
17
+ response = @conn.post('/v1/chat/completions') do |req|
18
+ req.body = {
19
+ model: options[:model] || @config[:model] || 'llama3.2:latest',
20
+ messages: messages,
21
+ temperature: options[:temperature] || 0.7,
22
+ top_p: options[:top_p] || 0.7,
23
+ max_tokens: options[:max_tokens] || 24_576,
24
+ stream: false
25
+ }.compact
26
+ end
27
+
28
+ result = parse_response(response)
29
+ content = result.dig(:choices, 0, :message, :content)
30
+
31
+ content.to_s
32
+ rescue Faraday::Error => e
33
+ handle_request_error(e)
34
+ end
35
+
36
+ def generate_object(prompt, schema, options = {})
37
+ system_prompt = 'You are a helpful assistant that responds with valid JSON.'
38
+ formatted_prompt = LastLLM::StructuredOutput.format_prompt(prompt, schema)
39
+
40
+ messages = [
41
+ { role: 'system', content: system_prompt },
42
+ { role: 'user', content: formatted_prompt }
43
+ ]
44
+
45
+ response = @conn.post('/v1/chat/completions') do |req|
46
+ req.body = {
47
+ model: options[:model] || @config[:model] || 'llama3.2:latest',
48
+ messages: messages,
49
+ temperature: options[:temperature] || 0.2,
50
+ stream: false
51
+ }.compact
52
+ end
53
+
54
+ result = parse_response(response)
55
+ content = result.dig(:choices, 0, :message, :content)
56
+
57
+ begin
58
+ JSON.parse(content, symbolize_names: true)
59
+ rescue JSON::ParserError => e
60
+ raise LastLLM::ApiError, "Invalid JSON response: #{e.message}"
61
+ end
62
+ rescue Faraday::Error => e
63
+ handle_request_error(e)
64
+ end
65
+
66
+ private
67
+
68
+ def format_messages(prompt, options)
69
+ if prompt.is_a?(Array) && prompt.all? { |m| m.is_a?(Hash) && m[:role] && m[:content] }
70
+ prompt
71
+ elsif options[:system_prompt]
72
+ [
73
+ { role: 'system', content: options[:system_prompt] },
74
+ { role: 'user', content: prompt.to_s }
75
+ ]
76
+ else
77
+ [{ role: 'user', content: prompt.to_s }]
78
+ end
79
+ end
80
+
81
+ # Format a tool for Ollama function calling
82
+ # @param tool [LastLLM::Tool] The tool to format
83
+ # @return [Hash] The tool in Ollama format
84
+ def self.format_tool(tool)
85
+ {
86
+ name: tool.name,
87
+ description: tool.description,
88
+ parameters: tool.parameters
89
+ }
90
+ end
91
+
92
+ # Execute a tool from an Ollama response
93
+ # @param tool [LastLLM::Tool] The tool to execute
94
+ # @param response [Hash] The Ollama response containing tool call information
95
+ # @return [Hash, nil] The result of the function call or nil if the tool wasn't called
96
+ def self.execute_tool(tool, response)
97
+ # Ollama doesn't have native function calling, so we need to parse from the content
98
+ # This is a simplified implementation that would need to be enhanced for production
99
+ content = response.dig(:message, :content)
100
+ return nil unless content&.include?(tool.name)
101
+
102
+ # Simple regex to extract JSON from the content
103
+ # This is a basic implementation and might need enhancement
104
+ if content =~ /#{tool.name}\s*\(([^)]+)\)/i
105
+ args_str = ::Regexp.last_match(1)
106
+ begin
107
+ args = JSON.parse("{#{args_str}}", symbolize_names: true)
108
+ return tool.call(args)
109
+ rescue JSON::ParserError
110
+ return nil
111
+ end
112
+ end
113
+
114
+ nil
115
+ end
116
+ end
117
+
118
+ # Also define it in the LastLLM::Providers namespace for consistency
119
+ module LastLLM
120
+ module Providers
121
+ # Reference to the Ollama class defined above
122
+ Ollama = ::Ollama
123
+ end
124
+ end
@@ -0,0 +1,184 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'last_llm/providers/constants'
4
+
5
+ # OpenAI provider implementation
6
+ class OpenAI < LastLLM::Provider
7
+ BASE_ENDPOINT = 'https://api.openai.com'
8
+
9
+ def initialize(config)
10
+ super(Constants::OPENAI, config)
11
+ @conn = connection(config[:base_url] || BASE_ENDPOINT)
12
+ end
13
+
14
+ def generate_text(prompt, options = {})
15
+ request_body = build_completion_request(prompt, options)
16
+ response = make_completion_request(request_body)
17
+ extract_content(response)
18
+ rescue Faraday::Error => e
19
+ handle_request_error(e)
20
+ end
21
+
22
+ def generate_object(prompt, schema, options = {})
23
+ request_body = build_json_request(prompt, schema, options)
24
+ response = make_completion_request(request_body)
25
+ parse_json_response(response)
26
+ rescue Faraday::Error => e
27
+ handle_request_error(e)
28
+ end
29
+
30
+ # Generate embeddings from text
31
+ # @param text [String] The text to generate embeddings for
32
+ # @param options [Hash] Options for the embedding generation
33
+ # @return [Array<Float>] The embedding vector as an array of floats
34
+ def embeddings(text, options = {})
35
+ # Ensure text is a string
36
+ text_str = text.to_s
37
+
38
+ response = @conn.post('/v1/embeddings') do |req|
39
+ req.body = {
40
+ model: options[:model] || 'text-embedding-ada-002',
41
+ input: text_str,
42
+ encoding_format: options[:encoding_format] || 'float'
43
+ }.compact
44
+ end
45
+
46
+ result = parse_response(response)
47
+
48
+ # Extract embeddings from response
49
+ embeddings = result.dig(:data, 0, :embedding)
50
+
51
+ raise LastLLM::ApiError.new('Invalid embeddings response format', nil) unless embeddings.is_a?(Array)
52
+
53
+ embeddings
54
+ rescue Faraday::Error => e
55
+ handle_request_error(e)
56
+ end
57
+
58
+ private
59
+
60
+ def build_completion_request(prompt, options)
61
+ {
62
+ model: options[:model] || @config[:model] || 'gpt-4o-mini',
63
+ messages: format_messages(prompt, options),
64
+ temperature: options[:temperature] || 0.7,
65
+ top_p: options[:top_p] || 0.7,
66
+ max_tokens: options[:max_tokens] || 4096,
67
+ stream: false
68
+ }.compact
69
+ end
70
+
71
+ def build_json_request(prompt, schema, options)
72
+ {
73
+ model: options[:model] || @config[:model] || 'gpt-4o-mini',
74
+ messages: format_json_messages(prompt, schema),
75
+ temperature: options[:temperature] || 0.2,
76
+ top_p: options[:top_p] || 0.7,
77
+ max_tokens: options[:max_tokens] || 8_192,
78
+ response_format: { type: 'json_object' },
79
+ stream: false
80
+ }.compact
81
+ end
82
+
83
+ def make_completion_request(body)
84
+ @conn.post('/v1/chat/completions') do |req|
85
+ req.body = body
86
+ end
87
+ end
88
+
89
+ def format_json_messages(prompt, schema)
90
+ system_prompt = 'You are a helpful assistant that responds with valid JSON.'
91
+ formatted_prompt = LastLLM::StructuredOutput.format_prompt(prompt, schema)
92
+
93
+ [
94
+ { role: 'system', content: system_prompt },
95
+ { role: 'user', content: formatted_prompt }
96
+ ]
97
+ end
98
+
99
+ def format_messages(prompt, options)
100
+ if prompt.is_a?(Array) && prompt.all? { |m| m.is_a?(Hash) && m[:role] && m[:content] }
101
+ prompt
102
+ elsif options[:system_prompt]
103
+ [
104
+ { role: 'system', content: options[:system_prompt] },
105
+ { role: 'user', content: prompt.to_s }
106
+ ]
107
+ else
108
+ [{ role: 'user', content: prompt.to_s }]
109
+ end
110
+ end
111
+
112
+ def extract_content(response)
113
+ result = parse_response(response)
114
+ result.dig(:choices, 0, :message, :content).to_s
115
+ end
116
+
117
+ def parse_json_response(response)
118
+ content = extract_content(response)
119
+ parsed_json = JSON.parse(content, symbolize_names: true)
120
+
121
+ if parsed_json.key?(:$schema) && parsed_json.key?(:properties)
122
+ parsed_json[:properties]
123
+ else
124
+ parsed_json
125
+ end
126
+ rescue JSON::ParserError => e
127
+ raise LastLLM::ApiError, "Invalid JSON response: #{e.message}"
128
+ end
129
+
130
+ def parse_response(response)
131
+ parsed = if response.body.is_a?(Hash)
132
+ response.body
133
+ else
134
+ JSON.parse(response.body)
135
+ end
136
+
137
+ validate_response(parsed)
138
+ deep_symbolize_keys(parsed)
139
+ rescue JSON::ParserError => e
140
+ raise LastLLM::ApiError.new("Failed to parse OpenAI response: #{e.message}", nil)
141
+ end
142
+
143
+ def validate_response(parsed)
144
+ if parsed.nil? || (!parsed.is_a?(Hash) && !parsed.respond_to?(:to_h))
145
+ raise LastLLM::ApiError.new('Invalid response format from OpenAI', nil)
146
+ end
147
+
148
+ raise LastLLM::ApiError.new(parsed[:error][:message], parsed[:error][:code]) if parsed[:error]
149
+ end
150
+
151
+ def handle_request_error(error)
152
+ message = "OpenAI API request failed: #{error.message}"
153
+ status = error.respond_to?(:response) && error.response.respond_to?(:status) ? error.response.status : nil
154
+ raise LastLLM::ApiError.new(message, status)
155
+ end
156
+
157
+ # Format a tool for OpenAI function calling
158
+ def self.format_tool(tool)
159
+ {
160
+ type: 'function',
161
+ function: {
162
+ name: tool.name,
163
+ description: tool.description,
164
+ parameters: tool.parameters
165
+ }
166
+ }
167
+ end
168
+
169
+ # Execute a tool from an OpenAI response
170
+ def self.execute_tool(tool, response)
171
+ tool_call = response[:tool_calls]&.first
172
+ return nil unless tool_call && tool_call[:function][:name] == tool.name
173
+
174
+ arguments = JSON.parse(tool_call[:function][:arguments], symbolize_names: true)
175
+ tool.call(arguments)
176
+ end
177
+ end
178
+
179
+ # Also define it in the LastLLM::Providers namespace for consistency
180
+ module LastLLM
181
+ module Providers
182
+ OpenAI = ::OpenAI
183
+ end
184
+ end
@@ -0,0 +1,38 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'last_llm/providers/constants'
4
+
5
+ # A provider implementation for testing purposes
6
+ class TestProvider < LastLLM::Provider
7
+ attr_accessor :text_response, :object_response
8
+
9
+ def initialize(config = {})
10
+ # Skip parent's initialize which checks for API key
11
+ # Instead implement our own initialization
12
+ @config = config.is_a?(Hash) ? config : {}
13
+ @name = Constants::TEST
14
+ @text_response = 'Test response'
15
+ @object_response = {}
16
+ end
17
+
18
+ # Override validate_config! to not require API key
19
+ def validate_config!
20
+ # No validation needed for test provider
21
+ end
22
+
23
+ def generate_text(_prompt, _options = {})
24
+ @text_response
25
+ end
26
+
27
+ def generate_object(_prompt, _schema, _options = {})
28
+ @object_response
29
+ end
30
+ end
31
+
32
+ # Also define it in the LastLLM::Providers namespace for consistency
33
+ module LastLLM
34
+ module Providers
35
+ # Reference to the TestProvider class defined above
36
+ TestProvider = ::TestProvider
37
+ end
38
+ end
@@ -0,0 +1,38 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'rails'
4
+
5
+ # This file defines the Railtie constant directly to be compatible with Zeitwerk
6
+ # Rails integration for LastLLM
7
+ class Railtie < Rails::Railtie
8
+ generators do
9
+ require 'generators/last_llm/install/install_generator'
10
+ end
11
+
12
+ initializer 'last_llm.configure_rails_initialization' do
13
+ # Load configuration from config/last_llm.yml if it exists
14
+ config_file = Rails.root.join('config', 'last_llm.yml')
15
+ if File.exist?(config_file)
16
+ config = YAML.safe_load_file(config_file, symbolize_names: true)
17
+
18
+ # Configure LastLLM with the loaded configuration
19
+ LastLLM.configure do |c|
20
+ # Set global configuration
21
+ c.default_provider = config[:default_provider].to_sym if config[:default_provider]
22
+
23
+ c.default_model = config[:default_model] if config[:default_model]
24
+
25
+ # Configure global settings
26
+ config[:globals]&.each do |key, value|
27
+ c.set_global(key.to_sym, value)
28
+ end
29
+ end
30
+ end
31
+ end
32
+ end
33
+
34
+ # Also define it in the LastLLM namespace for backward compatibility
35
+ module LastLLM
36
+ # Reference to the Railtie class defined above
37
+ Railtie = ::Railtie
38
+ end
@@ -0,0 +1,241 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'dry-schema'
4
+ require 'json'
5
+
6
+ module LastLLM
7
+ # Schema utilities for structured data generation
8
+ class Schema
9
+ # Create a dry-schema from a hash definition
10
+ # @param schema_def [Hash] The schema definition in JSON Schema format
11
+ # @return [Dry::Schema::JSON] The created schema
12
+ def self.create(schema_def)
13
+ # Convert JSON Schema to dry-schema
14
+ Dry::Schema.JSON do
15
+ # Process properties
16
+ if schema_def[:properties] || schema_def['properties']
17
+ properties = schema_def[:properties] || schema_def['properties']
18
+ required = schema_def[:required] || schema_def['required'] || []
19
+
20
+ properties.each do |property_name, property_def|
21
+ property_name = property_name.to_sym
22
+ property_type = property_def[:type] || property_def['type']
23
+
24
+ # Handle required properties
25
+ if required.include?(property_name.to_s)
26
+ # Handle different property types
27
+ case property_type
28
+ when 'string'
29
+ required(property_name).filled(:string)
30
+ when 'integer'
31
+ required(property_name).filled(:integer)
32
+ when 'number'
33
+ required(property_name).filled(:float)
34
+ when 'boolean'
35
+ required(property_name).filled(:bool)
36
+ when 'array'
37
+ items_type = property_def.dig(:items, :type) || property_def.dig('items', 'type')
38
+ case items_type
39
+ when 'string'
40
+ required(property_name).array(:string)
41
+ when 'integer'
42
+ required(property_name).array(:integer)
43
+ when 'number'
44
+ required(property_name).array(:float)
45
+ when 'boolean'
46
+ required(property_name).array(:bool)
47
+ when 'object'
48
+ # For complex nested objects, we'd need a more sophisticated approach
49
+ # This is a simplified version
50
+ required(property_name).array(:hash)
51
+ else
52
+ required(property_name).array
53
+ end
54
+ when 'object'
55
+ # For nested objects, we can't recursively create a schema here
56
+ # Instead, we'll just create a hash schema
57
+ required(property_name).hash do
58
+ # Add nested properties if available
59
+ if property_def[:properties] || property_def['properties']
60
+ nested_props = property_def[:properties] || property_def['properties']
61
+ nested_required = property_def[:required] || property_def['required'] || []
62
+
63
+ nested_props.each do |nested_name, nested_def|
64
+ nested_name = nested_name.to_sym
65
+ nested_type = nested_def[:type] || nested_def['type']
66
+
67
+ if nested_required.include?(nested_name.to_s)
68
+ case nested_type
69
+ when 'string'
70
+ required(nested_name).filled(:string)
71
+ when 'integer'
72
+ required(nested_name).filled(:integer)
73
+ when 'number'
74
+ required(nested_name).filled(:float)
75
+ when 'boolean'
76
+ required(nested_name).filled(:bool)
77
+ else
78
+ required(nested_name).filled
79
+ end
80
+ else
81
+ optional(nested_name)
82
+ end
83
+ end
84
+ end
85
+ end
86
+ else
87
+ required(property_name).filled
88
+ end
89
+ else
90
+ # Handle optional properties
91
+ case property_type
92
+ when 'string'
93
+ optional(property_name).maybe(:string)
94
+ when 'integer'
95
+ optional(property_name).maybe(:integer)
96
+ when 'number'
97
+ optional(property_name).maybe(:float)
98
+ when 'boolean'
99
+ optional(property_name).maybe(:bool)
100
+ when 'array'
101
+ items_type = property_def.dig(:items, :type) || property_def.dig('items', 'type')
102
+ case items_type
103
+ when 'string'
104
+ optional(property_name).maybe(:array, :string)
105
+ when 'integer'
106
+ optional(property_name).maybe(:array, :integer)
107
+ when 'number'
108
+ optional(property_name).maybe(:array, :float)
109
+ when 'boolean'
110
+ optional(property_name).maybe(:array, :bool)
111
+ when 'object'
112
+ optional(property_name).maybe(:array, :hash)
113
+ else
114
+ optional(property_name).maybe(:array)
115
+ end
116
+ when 'object'
117
+ # For nested objects, we can't recursively create a schema here
118
+ # Instead, we'll just create a hash schema
119
+ optional(property_name).maybe(:hash) do
120
+ # Add nested properties if available
121
+ if property_def[:properties] || property_def['properties']
122
+ nested_props = property_def[:properties] || property_def['properties']
123
+ nested_required = property_def[:required] || property_def['required'] || []
124
+
125
+ nested_props.each do |nested_name, nested_def|
126
+ nested_name = nested_name.to_sym
127
+ nested_type = nested_def[:type] || nested_def['type']
128
+
129
+ if nested_required.include?(nested_name.to_s)
130
+ case nested_type
131
+ when 'string'
132
+ required(nested_name).filled(:string)
133
+ when 'integer'
134
+ required(nested_name).filled(:integer)
135
+ when 'number'
136
+ required(nested_name).filled(:float)
137
+ when 'boolean'
138
+ required(nested_name).filled(:bool)
139
+ else
140
+ required(nested_name).filled
141
+ end
142
+ else
143
+ optional(nested_name)
144
+ end
145
+ end
146
+ end
147
+ end
148
+ else
149
+ optional(property_name)
150
+ end
151
+ end
152
+ end
153
+ end
154
+ end
155
+ end
156
+
157
+ # Convert a JSON schema string to a dry-schema
158
+ # @param json_schema [String] The JSON schema as a string
159
+ # @return [Dry::Schema::JSON] The created schema
160
+ def self.from_json_schema(json_schema)
161
+ schema_def = JSON.parse(json_schema, symbolize_names: true)
162
+ create(schema_def)
163
+ end
164
+
165
+ # Convert a dry-schema to a JSON schema string
166
+ # @param schema [Dry::Schema::JSON, Hash] The dry-schema or JSON schema hash to convert
167
+ # @return [String] The JSON schema as a string
168
+ def self.to_json_schema(schema)
169
+ # If schema is already a Hash, assume it's a JSON schema
170
+ return JSON.pretty_generate(schema) if schema.is_a?(Hash)
171
+
172
+ # If schema has a json_schema method, use it
173
+ return JSON.pretty_generate(schema.json_schema) if schema.respond_to?(:json_schema)
174
+
175
+ # Otherwise, extract schema information from dry-schema
176
+ json_schema = {
177
+ type: 'object',
178
+ properties: {},
179
+ required: []
180
+ }
181
+
182
+ # Process each rule in the schema
183
+ if schema.respond_to?(:rules) && schema.rules.respond_to?(:each_value)
184
+ schema.rules.each_value do |rule|
185
+ # Skip if rule is not a proper rule object
186
+ next unless rule.respond_to?(:name)
187
+
188
+ property_name = rule.name.to_s
189
+ property_def = {}
190
+
191
+ # Determine if the property is required
192
+ json_schema[:required] << property_name if rule.is_a?(Dry::Schema::Rule::Required)
193
+
194
+ # Determine the property type
195
+ if rule.respond_to?(:type) && rule.type.is_a?(Dry::Types::Nominal)
196
+ case rule.type.primitive
197
+ when String
198
+ property_def['type'] = 'string'
199
+ when Integer
200
+ property_def['type'] = 'integer'
201
+ when Float
202
+ property_def['type'] = 'number'
203
+ when TrueClass, FalseClass
204
+ property_def['type'] = 'boolean'
205
+ when Array
206
+ property_def['type'] = 'array'
207
+ # Try to determine the item type
208
+ property_def['items'] = if rule.type.respond_to?(:member) && rule.type.member.respond_to?(:primitive)
209
+ case rule.type.member.primitive
210
+ when String
211
+ { 'type' => 'string' }
212
+ when Integer
213
+ { 'type' => 'integer' }
214
+ when Float
215
+ { 'type' => 'number' }
216
+ when TrueClass, FalseClass
217
+ { 'type' => 'boolean' }
218
+ when Hash
219
+ { 'type' => 'object' }
220
+ else
221
+ {}
222
+ end
223
+ else
224
+ {}
225
+ end
226
+ when Hash
227
+ property_def['type'] = 'object'
228
+ # For nested objects, we'd need a more sophisticated approach
229
+ else
230
+ property_def['type'] = 'string' # Default to string
231
+ end
232
+ end
233
+
234
+ json_schema[:properties][property_name] = property_def
235
+ end
236
+ end
237
+
238
+ JSON.pretty_generate(json_schema)
239
+ end
240
+ end
241
+ end
@@ -0,0 +1,58 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'json'
4
+
5
+ module LastLLM
6
+ # Class for generating structured data from LLM providers
7
+ class StructuredOutput
8
+ # Initialize a new structured output generator
9
+ # @param client [LastLLM::Client] The client to use for generation
10
+ def initialize(client)
11
+ @client = client
12
+ end
13
+
14
+ # Generate a structured object from a prompt
15
+ # @param prompt [String] The prompt to generate the object from
16
+ # @param schema [Dry::Schema::JSON] The schema to validate against
17
+ # @param options [Hash] Generation options
18
+ # @option options [String] :model The model to use
19
+ # @option options [Float] :temperature (0.2) The temperature to use
20
+ # @return [Hash] The generated object
21
+ # @raise [ValidationError] If the generated object fails validation
22
+ def generate(prompt, schema, options = {})
23
+ # Format the prompt with schema information
24
+ formatted_prompt = self.class.format_prompt(prompt, schema)
25
+
26
+ # Set a lower temperature by default for structured data
27
+ options = { temperature: 0.2 }.merge(options)
28
+
29
+ # Generate the object using the provider
30
+ result = @client.provider.generate_object(formatted_prompt, schema, options)
31
+
32
+ # Validate the result against the schema
33
+ validation = schema.call(result)
34
+
35
+ raise ValidationError, "Generated object failed validation: #{validation.errors.to_h}" unless validation.success?
36
+
37
+ result
38
+ end
39
+
40
+ # Format a prompt with schema information
41
+ # @param prompt [String] The original prompt
42
+ # @param schema [Dry::Schema::JSON] The schema to include
43
+ # @return [String] The formatted prompt
44
+ def self.format_prompt(prompt, schema)
45
+ schema_json = Schema.to_json_schema(schema)
46
+
47
+ <<~PROMPT
48
+ #{prompt}
49
+
50
+ Respond with valid JSON that matches the following schema:
51
+
52
+ #{schema_json}
53
+
54
+ Ensure your response is a valid JSON object that strictly follows this schema.
55
+ PROMPT
56
+ end
57
+ end
58
+ end