last_llm 0.0.9 → 0.0.10

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: fcfc9f768214734064aa171be872fc31ae283254a425e3cea60863d9ffe9c925
4
- data.tar.gz: 93ff7962c0e3c64936beaf30f8e5923d1d4ca88712592910d71ea609a0c15c7d
3
+ metadata.gz: d51977e16705b1b304ae0376e56f9fdc18331fba54c1ddfa8a6965567464885d
4
+ data.tar.gz: cac6eececcdd23f754e801f59e5ede6896f0178aa3ed51bd06339047869ba71b
5
5
  SHA512:
6
- metadata.gz: cdf524997506cd40183f9bcf60f35483f86caa42cf10ab7b9831d268d6aa3328bda96fbbbc598114ce8db1b33612e1b30d34a6d1f60f8dfd2dc2e65dc326714b
7
- data.tar.gz: 8b6c9da52eef3ed6c228ae6228dc127e78f5ea607ec149140790393ed99c3f334d814405dee0ee83a66295eda2d8144a2780e7bfaa481b57ea704144954d6a68
6
+ metadata.gz: 38f9a702d5d5c64b4dbb956d91ce631e6e15f3d3dd677fdb63ad470c15acca07171b212d986542ceb2a78444e7aa3b5d233eb9499b6bac33b1e5c6874d874c3b
7
+ data.tar.gz: ad44fd75520a81bdf31e7c7bc6cdf21e1a327edf9b9feeee2507aa305014f5ae8f2c5e8194ec9c3b20b545ce6a6582611f8579b3f9f30a957736acc308cf9591
@@ -28,14 +28,19 @@ module LastLLM
28
28
  else
29
29
  # When no config provided, default to test mode in test environment
30
30
  # Force test_mode to true when running in RSpec
31
- test_mode = true
32
- Configuration.new(test_mode: test_mode)
31
+ raise ConfigurationError, 'No configuration provided' unless defined?(RSpec)
32
+ Configuration.new(test_mode: defined?(RSpec))
33
33
  end
34
34
 
35
35
  provider_name = options[:provider] || @configuration.default_provider
36
36
  @provider = create_provider(provider_name)
37
37
  end
38
38
 
39
+ # Add provider setter method
40
+ def provider=(new_provider)
41
+ @provider = new_provider
42
+ end
43
+
39
44
  # Text generation methods
40
45
 
41
46
  # Generate text in a single call
@@ -3,16 +3,18 @@
3
3
  require 'faraday'
4
4
  require 'faraday/typhoeus'
5
5
  require 'active_support/core_ext/hash/keys'
6
+ require 'logger'
6
7
 
7
8
  module LastLLM
8
9
  # Base class for all LLM providers
9
10
  # Implements common functionality and defines the interface that all providers must implement
10
11
  class Provider
11
- attr_reader :name, :config
12
+ attr_reader :name, :config, :logger
12
13
 
13
14
  def initialize(name, config = {})
14
15
  @name = name
15
16
  @config = config
17
+ @logger = setup_logger(config[:logger])
16
18
 
17
19
  if instance_of?(Provider)
18
20
  raise NotImplementedError, "#{self.class} is an abstract class and cannot be instantiated directly"
@@ -93,35 +95,29 @@ module LastLLM
93
95
  end
94
96
  end
95
97
 
96
- private
97
-
98
- # Validate provider configuration
99
- # @raise [LastLLM::ConfigurationError] If the configuration is invalid
100
- def validate_config!
101
- raise LastLLM::ConfigurationError, 'API key is required' unless @config[:api_key]
102
- end
103
-
104
- def parse_error_body(body)
105
- return {} if body.nil? || body.empty?
98
+ protected
106
99
 
107
- JSON.parse(body)
108
- rescue JSON::ParserError
109
- { 'error' => body }
100
+ # Helper method to get the model from options or config with default fallback
101
+ # @param options [Hash] Options hash that might contain model
102
+ # @param default [String] Default model if none specified in options or config
103
+ # @return [String] The model to use
104
+ def get_model(options, default)
105
+ options[:model] || @config[:model] || default
110
106
  end
111
107
 
112
- def deep_symbolize_keys(hash)
113
- return hash unless hash.is_a?(Hash)
114
-
115
- hash.each_with_object({}) do |(key, value), result|
116
- result[key.to_sym] = case value
117
- when Hash then deep_symbolize_keys(value)
118
- when Array then value.map { |item| deep_symbolize_keys(item) }
119
- else value
120
- end
108
+ # Helper method to format prompt for logging with proper truncation
109
+ def format_prompt_for_logging(prompt)
110
+ if prompt.is_a?(Array)
111
+ prompt.map { |m| m[:content] }.join('...')
112
+ else
113
+ truncate_text(prompt.to_s)
121
114
  end
122
115
  end
123
116
 
124
- protected
117
+ # Helper method to truncate text for logging
118
+ def truncate_text(text, length = 100)
119
+ text.length > length ? "#{text[0...length]}..." : text
120
+ end
125
121
 
126
122
  def connection(base_url)
127
123
  Faraday.new(url: base_url) do |f|
@@ -160,78 +156,78 @@ module LastLLM
160
156
  yield(result)
161
157
  end
162
158
  rescue Faraday::Error => e
163
- @logger&.error("[#{@name}] Request failed: #{e.message}")
159
+ @logger.error("[#{@name}] Request failed: #{e.message}")
164
160
  handle_provider_error(e)
165
161
  end
166
162
 
167
- private
168
-
169
- def log_request(prompt, options)
170
- return unless @logger
163
+ def logger
164
+ @logger ||= LastLLM.configuration.logger
165
+ end
171
166
 
172
- sanitized_options = options.dup
173
- # Remove sensitive data
174
- sanitized_options.delete(:api_key)
167
+ private
175
168
 
176
- @logger.info("[#{@name}] Request - Model: #{options[:model]}")
177
- @logger.debug("[#{@name}] Prompt: #{prompt}")
178
- @logger.debug("[#{@name}] Options: #{sanitized_options.inspect}")
179
- end
169
+ # Set up a logger instance for the provider
170
+ def setup_logger(provided_logger = nil)
171
+ return provided_logger if provided_logger
180
172
 
181
- def log_response(response)
182
- return unless @logger
173
+ # Use LastLLM's global configuration logger if available
174
+ return LastLLM.configuration.logger if defined?(LastLLM.configuration) && LastLLM.configuration&.logger
183
175
 
184
- @logger.info("[#{@name}] Response received - Status: #{response.status}")
185
- @logger.debug("[#{@name}] Response body: #{response.body}")
186
- rescue StandardError => e
187
- @logger.error("[#{@name}] Failed to log response: #{e.message}")
176
+ logger = Logger.new($stdout)
177
+ logger.level = Logger::WARN
178
+ # Use a standard formatter without the provider name prefix
179
+ # This allows tests to match log output exactly
180
+ logger.formatter = proc do |severity, datetime, progname, msg|
181
+ "[#{datetime}] #{severity} -- : #{msg}\n"
182
+ end
183
+ logger
188
184
  end
189
185
 
190
- def handle_provider_error(error)
191
- @logger&.error("[#{@name}] #{error.class}: #{error.message}")
192
- raise ApiError.new(error.message, error.response&.status)
186
+ # Validate provider configuration
187
+ # @raise [LastLLM::ConfigurationError] If the configuration is invalid
188
+ def validate_config!
189
+ raise LastLLM::ConfigurationError, 'API key is required' unless @config[:api_key]
193
190
  end
194
191
 
195
- def make_request(prompt, options = {})
196
- log_request(prompt, options)
192
+ def parse_error_body(body)
193
+ return {} if body.nil? || body.empty?
197
194
 
198
- response = yield
195
+ JSON.parse(body)
196
+ rescue JSON::ParserError
197
+ { 'error' => body }
198
+ end
199
199
 
200
- log_response(response)
200
+ def deep_symbolize_keys(hash)
201
+ return hash unless hash.is_a?(Hash)
201
202
 
202
- handle_response(response) do |result|
203
- yield(result)
203
+ hash.each_with_object({}) do |(key, value), result|
204
+ result[key.to_sym] = case value
205
+ when Hash then deep_symbolize_keys(value)
206
+ when Array then value.map { |item| deep_symbolize_keys(item) }
207
+ else value
208
+ end
204
209
  end
205
- rescue Faraday::Error => e
206
- @logger&.error("[#{@name}] Request failed: #{e.message}")
207
- handle_provider_error(e)
208
210
  end
209
211
 
210
- private
211
-
212
212
  def log_request(prompt, options)
213
- return unless @logger
214
-
215
213
  sanitized_options = options.dup
216
214
  # Remove sensitive data
217
215
  sanitized_options.delete(:api_key)
218
216
 
219
- @logger.info("[#{@name}] Request - Model: #{options[:model]}")
220
- @logger.debug("[#{@name}] Prompt: #{prompt}")
221
- @logger.debug("[#{@name}] Options: #{sanitized_options.inspect}")
217
+ @logger.info("#{@name}: Request - Model: #{options[:model] || @config[:model] || 'default'}")
218
+ @logger.debug("#{@name}: Prompt: #{format_prompt_for_logging(prompt)}")
219
+ @logger.debug("#{@name}: Options: #{sanitized_options.inspect}")
222
220
  end
223
221
 
224
222
  def log_response(response)
225
- return unless @logger
226
-
227
- @logger.info("[#{@name}] Response received - Status: #{response.status}")
228
- @logger.debug("[#{@name}] Response body: #{response.body}")
223
+ @logger.info("#{@name}: Response received - Status: #{response.status}")
224
+ @logger.debug("#{@name}: Response body: #{response.body}")
229
225
  rescue StandardError => e
230
- @logger.error("[#{@name}] Failed to log response: #{e.message}")
226
+ @logger.error("#{@name}: Failed to log response: #{e.message}")
231
227
  end
232
228
 
233
229
  def handle_provider_error(error)
234
- @logger&.error("[#{@name}] #{error.class}: #{error.message}")
230
+ @logger.error("#{@name}: #{error.class}: #{error.message}")
235
231
  raise ApiError.new(error.message, error.response&.status)
236
232
  end
237
233
  end
@@ -27,15 +27,26 @@ module LastLLM
27
27
  def initialize(config)
28
28
  super(:anthropic, config)
29
29
  @conn = connection(config[:base_url] || BASE_ENDPOINT)
30
+ logger.debug("#{@name}: Initialized Anthropic provider with endpoint: #{config[:base_url] || BASE_ENDPOINT}")
30
31
  end
31
32
 
32
33
  def generate_text(prompt, options = {})
34
+ model = get_model(options, DEFAULT_MODEL)
35
+ logger.info("#{@name}: Generating text with model: #{model}")
36
+ logger.debug("#{@name}: Text prompt: #{format_prompt_for_logging(prompt)}")
37
+
33
38
  make_request(prompt, options) do |result|
34
- result.dig(:content, 0, :text).to_s
39
+ response = result.dig(:content, 0, :text).to_s
40
+ logger.debug("#{@name}: Generated response of #{response.length} characters")
41
+ response
35
42
  end
36
43
  end
37
44
 
38
45
  def generate_object(prompt, schema, options = {})
46
+ model = get_model(options, DEFAULT_MODEL)
47
+ logger.info("#{@name}: Generating object with model: #{model}")
48
+ logger.debug("#{@name}: Object prompt: #{format_prompt_for_logging(prompt)}")
49
+
39
50
  options = options.dup
40
51
  system_prompt = 'You are a helpful assistant that responds with valid JSON.'
41
52
  formatted_prompt = LastLLM::StructuredOutput.format_prompt(prompt, schema)
@@ -45,6 +56,7 @@ module LastLLM
45
56
 
46
57
  make_request(formatted_prompt, options) do |result|
47
58
  content = result.dig(:content, 0, :text)
59
+ logger.debug("#{@name}: Raw JSON response: #{content}")
48
60
  parse_json_response(content)
49
61
  end
50
62
  end
@@ -73,11 +85,30 @@ module LastLLM
73
85
 
74
86
  private
75
87
 
88
+ def format_prompt_for_logging(prompt)
89
+ if prompt.is_a?(Array)
90
+ prompt.map { |m| m[:content] }.join('...')
91
+ else
92
+ truncate_text(prompt.to_s)
93
+ end
94
+ end
95
+
96
+ def truncate_text(text, length = 100)
97
+ text.length > length ? "#{text[0...length]}..." : text
98
+ end
99
+
100
+ def get_model(options, default)
101
+ options[:model] || @config[:model] || default
102
+ end
103
+
76
104
  def make_request(prompt, options = {})
77
105
  messages = format_messages(prompt, options)
106
+ model = get_model(options, DEFAULT_MODEL)
107
+
108
+ logger.debug("#{@name}: Making API request to model: #{model}")
78
109
 
79
110
  body = {
80
- model: options[:model] || @config[:model] || DEFAULT_MODEL,
111
+ model: model,
81
112
  messages: messages,
82
113
  max_tokens: options[:max_tokens] || DEFAULT_MAX_TOKENS,
83
114
  temperature: options[:temperature] || DEFAULT_TEMPERATURE,
@@ -87,14 +118,17 @@ module LastLLM
87
118
 
88
119
  # Add system parameter if system prompt is provided
89
120
  body[:system] = options[:system_prompt] if options[:system_prompt]
121
+ logger.debug("#{@name}: Request body: #{body.compact.inspect}")
90
122
 
91
123
  response = @conn.post('/v1/messages') do |req|
92
124
  req.body = body.compact
93
125
  end
94
126
 
127
+ logger.info("#{@name}: API response status: #{response.status}")
95
128
  result = parse_response(response)
96
129
  yield(result)
97
130
  rescue Faraday::Error => e
131
+ logger.error("#{@name}: API request failed: #{e.message}")
98
132
  handle_request_error(e)
99
133
  end
100
134
 
@@ -116,9 +150,11 @@ module LastLLM
116
150
  end
117
151
 
118
152
  def parse_json_response(content)
153
+ logger.debug("#{@name}: Parsing JSON response")
119
154
  begin
120
155
  JSON.parse(content, symbolize_names: true)
121
156
  rescue JSON::ParserError => e
157
+ logger.error("#{@name}: JSON parsing error: #{e.message}")
122
158
  raise ApiError, "Invalid JSON response: #{e.message}"
123
159
  end
124
160
  end
@@ -129,8 +165,9 @@ module LastLLM
129
165
  end
130
166
 
131
167
  def handle_request_error(e)
132
- message = "Anthropic API request failed: #{e.message}"
168
+ message = "#{@name}: API request failed: #{e.message}"
133
169
  status = e.respond_to?(:response) && e.response.respond_to?(:status) ? e.response.status : nil
170
+ logger.error(message)
134
171
  raise LastLLM::ApiError.new(message, status)
135
172
  end
136
173
  end
@@ -29,33 +29,68 @@ module LastLLM
29
29
  super(Constants::GOOGLE_GEMINI, config)
30
30
  @api_key = config[:api_key]
31
31
  @conn = connection(config[:base_url] || BASE_ENDPOINT)
32
+ # Use plain format for initialization log to match test expectations
33
+ logger.debug("Initialized Google Gemini provider with endpoint: #{config[:base_url] || BASE_ENDPOINT}")
32
34
  end
33
35
 
34
36
  def generate_text(prompt, options = {})
35
- make_request(prompt, options) do |response|
36
- extract_text_content(response)
37
+ model = get_model(options, DEFAULT_MODEL)
38
+ logger.info("#{@name}: Generating text with model: #{model}")
39
+ logger.debug("#{@name}: Text prompt: #{format_prompt_for_logging(prompt)}")
40
+
41
+ make_request(prompt, options) do |response|
42
+ result = extract_text_content(response)
43
+ logger.debug("Generated response of #{result.length} characters")
44
+ result
37
45
  end
38
46
  end
39
47
 
40
48
  def generate_object(prompt, schema, options = {})
49
+ model = get_model(options, DEFAULT_MODEL)
50
+ logger.info("#{@name}: Generating object with model: #{model}")
51
+ logger.debug("#{@name}: Object prompt: #{format_prompt_for_logging(prompt)}")
52
+
41
53
  options = options.merge(response_mime_type: JSON_MIME_TYPE, response_schema: schema)
42
54
  make_request(prompt, options) do |response|
43
- parse_json_response(extract_text_content(response))
55
+ text_response = extract_text_content(response)
56
+ logger.debug("Raw JSON response: #{text_response}")
57
+ parse_json_response(text_response)
44
58
  end
45
59
  end
46
60
 
47
61
  private
48
62
 
63
+ def format_prompt_for_logging(prompt)
64
+ if prompt.is_a?(Array)
65
+ prompt.map { |m| m[:content] }.join('...')
66
+ else
67
+ truncate_text(prompt.to_s)
68
+ end
69
+ end
70
+
71
+ def truncate_text(text, length = 100)
72
+ text.length > length ? "#{text[0...length]}..." : text
73
+ end
74
+
49
75
  def make_request(prompt, options = {})
50
- model = options[:model] || @config[:model] || DEFAULT_MODEL
76
+ model = get_model(options, DEFAULT_MODEL)
51
77
  contents = format_contents(prompt, options)
52
78
 
79
+ logger.debug("#{@name}: Making API request to model: #{model}")
80
+ logger.debug("#{@name}: Request contents: #{contents.inspect}")
81
+
53
82
  response = @conn.post("/v1beta/models/#{model}:generateContent?key=#{@api_key}") do |req|
54
83
  req.body = build_request_body(contents, options)
84
+ if logger.debug?
85
+ sanitized_body = req.body.to_s.gsub(@api_key, '[REDACTED]')
86
+ logger.debug("Request body: #{sanitized_body}")
87
+ end
55
88
  end
56
89
 
90
+ logger.info("API response status: #{response.status}")
57
91
  handle_response(response) { |result| yield(result) }
58
92
  rescue Faraday::Error => e
93
+ logger.error("API request failed: #{e.message}")
59
94
  handle_gemini_error(e)
60
95
  end
61
96
 
@@ -75,10 +110,13 @@ module LastLLM
75
110
 
76
111
  def handle_response(response)
77
112
  if response.status != SUCCESS_STATUS
113
+ logger.error("#{@name}: API error status: #{response.status}")
114
+ logger.debug("#{@name}: Error response body: #{response.body}")
78
115
  error = build_error(response)
79
116
  return handle_gemini_error(error)
80
117
  end
81
118
 
119
+ logger.debug("#{@name}: Processing successful response")
82
120
  result = parse_response(response)
83
121
  yield(result)
84
122
  end
@@ -100,8 +138,10 @@ module LastLLM
100
138
  end
101
139
 
102
140
  def parse_json_response(content)
141
+ logger.debug("#{@name}: Parsing JSON response")
103
142
  JSON.parse(content, symbolize_names: true)
104
143
  rescue JSON::ParserError => e
144
+ logger.error("#{@name}: JSON parsing error: #{e.message}")
105
145
  raise LastLLM::ApiError, "Invalid JSON response: #{e.message}"
106
146
  end
107
147
 
@@ -145,6 +185,7 @@ module LastLLM
145
185
  status = error.response&.dig(:status)
146
186
  message = parse_error_message(error)
147
187
 
188
+ logger.error("#{@name}: API error (status: #{status}): #{message}")
148
189
  raise LastLLM::ApiError.new(message, status)
149
190
  end
150
191
 
@@ -0,0 +1,24 @@
1
+ # ...existing code...
2
+
3
+ def log_request(prompt)
4
+ LastLlm.logger.debug "LLM request: #{prompt}"
5
+ end
6
+
7
+ def log_response(response)
8
+ LastLlm.logger.debug "LLM response: #{response}"
9
+ end
10
+
11
+ def query(prompt, config = {})
12
+ log_request(prompt)
13
+
14
+ begin
15
+ response = client.complete(prompt, **config)
16
+ log_response(response)
17
+ response
18
+ rescue => e
19
+ LastLlm.logger.error "LLM error: #{e.message}"
20
+ raise
21
+ end
22
+ end
23
+
24
+ # ...existing code...
@@ -26,15 +26,26 @@ module LastLLM
26
26
  def initialize(config)
27
27
  super(Constants::OLLAMA, config)
28
28
  @conn = connection(config[:base_url] || BASE_ENDPOINT)
29
+ logger.debug("#{@name}: Initialized Ollama provider with endpoint: #{config[:base_url] || BASE_ENDPOINT}")
29
30
  end
30
31
 
31
32
  def generate_text(prompt, options = {})
33
+ model = get_model(options, DEFAULT_MODEL)
34
+ logger.info("#{@name}: Generating text with model: #{model}")
35
+ logger.debug("#{@name}: Text prompt: #{format_prompt_for_logging(prompt)}")
36
+
32
37
  make_request(prompt, options) do |result|
33
- result.dig(:choices, 0, :message, :content).to_s
38
+ response = result.dig(:choices, 0, :message, :content).to_s
39
+ logger.debug("#{@name}: Generated response of #{response.length} characters")
40
+ response
34
41
  end
35
42
  end
36
43
 
37
44
  def generate_object(prompt, schema, options = {})
45
+ model = get_model(options, DEFAULT_MODEL)
46
+ logger.info("#{@name}: Generating object with model: #{model}")
47
+ logger.debug("#{@name}: Object prompt: #{format_prompt_for_logging(prompt)}")
48
+
38
49
  system_prompt = 'You are a helpful assistant that responds with valid JSON.'
39
50
  formatted_prompt = LastLLM::StructuredOutput.format_prompt(prompt, schema)
40
51
 
@@ -44,6 +55,7 @@ module LastLLM
44
55
 
45
56
  make_request(formatted_prompt, options) do |result|
46
57
  content = result.dig(:choices, 0, :message, :content)
58
+ logger.debug("#{@name}: Raw JSON response: #{content}")
47
59
  parse_json_response(content)
48
60
  end
49
61
  end
@@ -86,23 +98,48 @@ module LastLLM
86
98
 
87
99
  private
88
100
 
101
+ def format_prompt_for_logging(prompt)
102
+ if prompt.is_a?(Array)
103
+ prompt.map { |m| m[:content] }.join('...')
104
+ else
105
+ truncate_text(prompt.to_s)
106
+ end
107
+ end
108
+
109
+ def truncate_text(text, length = 100)
110
+ text.length > length ? "#{text[0...length]}..." : text
111
+ end
112
+
113
+ def get_model(options, default)
114
+ options[:model] || @config[:model] || default
115
+ end
116
+
89
117
  def make_request(prompt, options = {})
90
118
  messages = format_messages(prompt, options)
119
+ model = get_model(options, DEFAULT_MODEL)
120
+
121
+ logger.debug("#{@name}: Making API request to model: #{model}")
122
+
123
+ body = {
124
+ model: model,
125
+ messages: messages,
126
+ temperature: options[:temperature] || DEFAULT_TEMPERATURE,
127
+ top_p: options[:top_p] || DEFAULT_TOP_P,
128
+ max_tokens: options[:max_tokens] || DEFAULT_MAX_TOKENS,
129
+ stream: false
130
+ }.compact
131
+
132
+ logger.debug("#{@name}: Request body: #{body.inspect}")
91
133
 
92
134
  response = @conn.post('/v1/chat/completions') do |req|
93
- req.body = {
94
- model: options[:model] || @config[:model] || DEFAULT_MODEL,
95
- messages: messages,
96
- temperature: options[:temperature] || DEFAULT_TEMPERATURE,
97
- top_p: options[:top_p] || DEFAULT_TOP_P,
98
- max_tokens: options[:max_tokens] || DEFAULT_MAX_TOKENS,
99
- stream: false
100
- }.compact
135
+ req.body = body
101
136
  end
102
137
 
138
+ logger.info("#{@name}: API response status: #{response.status}")
103
139
  result = parse_response(response)
104
140
  yield(result)
105
141
  rescue Faraday::Error => e
142
+ logger.error("#{@name}: API request failed: #{e.message}")
106
143
  handle_request_error(e)
107
144
  end
108
145
 
@@ -120,16 +157,19 @@ module LastLLM
120
157
  end
121
158
 
122
159
  def parse_json_response(content)
160
+ logger.debug("#{@name}: Parsing JSON response")
123
161
  begin
124
162
  JSON.parse(content, symbolize_names: true)
125
163
  rescue JSON::ParserError => e
164
+ logger.error("#{@name}: JSON parsing error: #{e.message}")
126
165
  raise LastLLM::ApiError, "Invalid JSON response: #{e.message}"
127
166
  end
128
167
  end
129
168
 
130
169
  def handle_request_error(error)
131
- message = "Ollama API request failed: #{error.message}"
170
+ message = "#{@name}: API request failed: #{error.message}"
132
171
  status = error.respond_to?(:response) && error.response.respond_to?(:status) ? error.response.status : nil
172
+ logger.error(message)
133
173
  raise LastLLM::ApiError.new(message, status)
134
174
  end
135
175
  end
@@ -27,16 +27,28 @@ module LastLLM
27
27
  def initialize(config)
28
28
  super(Constants::OPENAI, config)
29
29
  @conn = connection(config[:base_url] || BASE_ENDPOINT)
30
+ logger.debug("#{@name}: Initialized OpenAI provider with endpoint: #{config[:base_url] || BASE_ENDPOINT}")
30
31
  end
31
32
 
32
33
  def generate_text(prompt, options = {})
34
+ model = get_model(options, DEFAULT_MODEL)
35
+ logger.info("#{@name}: Generating text with model: #{model}")
36
+ logger.debug("#{@name}: Text prompt: #{format_prompt_for_logging(prompt)}")
37
+
33
38
  make_text_request(prompt, options) do |result|
34
- result.dig(:choices, 0, :message, :content).to_s
39
+ response = result.dig(:choices, 0, :message, :content).to_s
40
+ logger.debug("#{@name}: Generated response of #{response.length} characters")
41
+ response
35
42
  end
36
43
  end
37
44
 
38
45
  def generate_object(prompt, schema, options = {})
46
+ model = get_model(options, DEFAULT_MODEL)
47
+ logger.info("#{@name}: Generating object with model: #{model}")
48
+ logger.debug("#{@name}: Object prompt: #{format_prompt_for_logging(prompt)}")
49
+
39
50
  make_object_request(prompt, schema, options) do |content|
51
+ logger.debug("#{@name}: Raw JSON response: #{content}")
40
52
  parsed_json = JSON.parse(content, symbolize_names: true)
41
53
 
42
54
  if parsed_json.key?(:$schema) && parsed_json.key?(:properties)
@@ -54,6 +66,8 @@ module LastLLM
54
66
  def embeddings(text, options = {})
55
67
  # Ensure text is a string
56
68
  text_str = text.to_s
69
+ logger.info("#{@name}: Generating embeddings with model: #{options[:model] || EMBEDDINGS_MODEL}")
70
+ logger.debug("#{@name}: Text for embeddings: #{truncate_text(text_str)}")
57
71
 
58
72
  response = @conn.post('/v1/embeddings') do |req|
59
73
  req.body = {
@@ -61,17 +75,21 @@ module LastLLM
61
75
  input: text_str,
62
76
  encoding_format: options[:encoding_format] || 'float'
63
77
  }.compact
78
+ logger.debug("#{@name}: Embedding request body: #{req.body.inspect}")
64
79
  end
65
80
 
81
+ logger.info("#{@name}: API response status: #{response.status}")
66
82
  result = parse_response(response)
67
83
 
68
84
  # Extract embeddings from response
69
85
  embeddings = result.dig(:data, 0, :embedding)
86
+ logger.debug("#{@name}: Generated embeddings vector of length: #{embeddings&.length || 0}")
70
87
 
71
88
  raise LastLLM::ApiError.new('Invalid embeddings response format', nil) unless embeddings.is_a?(Array)
72
89
 
73
90
  embeddings
74
91
  rescue Faraday::Error => e
92
+ logger.error("#{@name}: API request failed: #{e.message}")
75
93
  handle_request_error(e)
76
94
  end
77
95
 
@@ -103,28 +121,54 @@ module LastLLM
103
121
 
104
122
  private
105
123
 
124
+ def format_prompt_for_logging(prompt)
125
+ if prompt.is_a?(Array)
126
+ prompt.map { |m| m[:content] }.join('...')
127
+ else
128
+ truncate_text(prompt.to_s)
129
+ end
130
+ end
131
+
132
+ def truncate_text(text, length = 100)
133
+ text.length > length ? "#{text[0...length]}..." : text
134
+ end
135
+
136
+ def get_model(options, default)
137
+ options[:model] || @config[:model] || default
138
+ end
139
+
106
140
  def make_text_request(prompt, options = {})
107
141
  request_body = build_completion_request(prompt, options)
142
+ logger.debug("#{@name}: Request body: #{request_body.inspect}")
143
+
108
144
  response = make_completion_request(request_body)
145
+ logger.info("#{@name}: API response status: #{response.status}")
146
+
109
147
  result = parse_response(response)
110
148
  yield(result)
111
149
  rescue Faraday::Error => e
150
+ logger.error("#{@name}: API request failed: #{e.message}")
112
151
  handle_request_error(e)
113
152
  end
114
153
 
115
154
  def make_object_request(prompt, schema, options = {})
116
155
  request_body = build_json_request(prompt, schema, options)
156
+ logger.debug("#{@name}: Request body: #{request_body.inspect}")
157
+
117
158
  response = make_completion_request(request_body)
159
+ logger.info("#{@name}: API response status: #{response.status}")
160
+
118
161
  result = parse_response(response)
119
162
  content = result.dig(:choices, 0, :message, :content).to_s
120
163
  yield(content)
121
164
  rescue Faraday::Error => e
165
+ logger.error("#{@name}: API request failed: #{e.message}")
122
166
  handle_request_error(e)
123
167
  end
124
168
 
125
169
  def build_completion_request(prompt, options)
126
170
  {
127
- model: options[:model] || @config[:model] || DEFAULT_MODEL,
171
+ model: get_model(options, DEFAULT_MODEL),
128
172
  messages: format_messages(prompt, options),
129
173
  temperature: options[:temperature] || DEFAULT_TEMPERATURE,
130
174
  top_p: options[:top_p] || DEFAULT_TOP_P,
@@ -135,7 +179,7 @@ module LastLLM
135
179
 
136
180
  def build_json_request(prompt, schema, options)
137
181
  {
138
- model: options[:model] || @config[:model] || DEFAULT_MODEL,
182
+ model: get_model(options, DEFAULT_MODEL),
139
183
  messages: format_json_messages(prompt, schema),
140
184
  temperature: options[:temperature] || DEFAULT_TEMPERATURE_OBJECT,
141
185
  top_p: options[:top_p] || DEFAULT_TOP_P,
@@ -146,6 +190,7 @@ module LastLLM
146
190
  end
147
191
 
148
192
  def make_completion_request(body)
193
+ logger.debug("#{@name}: Making API request to model: #{body[:model]}")
149
194
  @conn.post('/v1/chat/completions') do |req|
150
195
  req.body = body
151
196
  end
@@ -176,15 +221,20 @@ module LastLLM
176
221
 
177
222
  def validate_response(parsed)
178
223
  if parsed.nil? || (!parsed.is_a?(Hash) && !parsed.respond_to?(:to_h))
224
+ logger.error("#{@name}: Invalid response format")
179
225
  raise LastLLM::ApiError.new('Invalid response format from OpenAI', nil)
180
226
  end
181
227
 
182
- raise LastLLM::ApiError.new(parsed[:error][:message], parsed[:error][:code]) if parsed[:error]
228
+ if parsed[:error]
229
+ logger.error("#{@name}: API error: #{parsed[:error][:message]}")
230
+ raise LastLLM::ApiError.new(parsed[:error][:message], parsed[:error][:code])
231
+ end
183
232
  end
184
233
 
185
234
  def handle_request_error(error)
186
- message = "OpenAI API request failed: #{error.message}"
235
+ message = "#{@name}: API request failed: #{error.message}"
187
236
  status = error.respond_to?(:response) && error.response.respond_to?(:status) ? error.response.status : nil
237
+ logger.error(message)
188
238
  raise LastLLM::ApiError.new(message, status)
189
239
  end
190
240
  end
@@ -23,6 +23,7 @@ module LastLLM
23
23
  @name = Constants::TEST
24
24
  @text_response = DEFAULT_TEXT_RESPONSE
25
25
  @object_response = DEFAULT_OBJECT_RESPONSE
26
+ logger.debug("#{@name}: Initialized test provider")
26
27
  end
27
28
 
28
29
  # Override validate_config! to not require API key
@@ -30,11 +31,19 @@ module LastLLM
30
31
  # No validation needed for test provider
31
32
  end
32
33
 
33
- def generate_text(_prompt, _options = {})
34
+ def generate_text(prompt, options = {})
35
+ model = options[:model] || @config[:model] || DEFAULT_MODEL
36
+ logger.info("#{@name}: Generating text with model: #{model}")
37
+ logger.debug("#{@name}: Text prompt: #{truncate_text(prompt.to_s)}")
38
+ logger.debug("#{@name}: Generated test response of #{@text_response.length} characters")
34
39
  @text_response
35
40
  end
36
41
 
37
- def generate_object(_prompt, _schema, _options = {})
42
+ def generate_object(prompt, schema, options = {})
43
+ model = options[:model] || @config[:model] || DEFAULT_MODEL
44
+ logger.info("#{@name}: Generating object with model: #{model}")
45
+ logger.debug("#{@name}: Object prompt with schema: #{schema.inspect}")
46
+ logger.debug("#{@name}: Generated test object response")
38
47
  @object_response
39
48
  end
40
49
 
@@ -56,6 +65,12 @@ module LastLLM
56
65
  def self.execute_tool(tool, _response)
57
66
  nil # Test provider doesn't execute tools by default
58
67
  end
68
+
69
+ private
70
+
71
+ def truncate_text(text, length = 100)
72
+ text.length > length ? "#{text[0...length]}..." : text
73
+ end
59
74
  end
60
75
  end
61
76
  end
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module LastLLM
4
- VERSION = '0.0.9'
4
+ VERSION = '0.0.10'
5
5
  end
metadata CHANGED
@@ -1,13 +1,13 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: last_llm
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.0.9
4
+ version: 0.0.10
5
5
  platform: ruby
6
6
  authors:
7
7
  - Sam Obukwelu
8
8
  bindir: bin
9
9
  cert_chain: []
10
- date: 2025-03-07 00:00:00.000000000 Z
10
+ date: 2025-03-08 00:00:00.000000000 Z
11
11
  dependencies:
12
12
  - !ruby/object:Gem::Dependency
13
13
  name: dry-schema
@@ -115,6 +115,7 @@ files:
115
115
  - lib/last_llm/providers/constants.rb
116
116
  - lib/last_llm/providers/deepseek.rb
117
117
  - lib/last_llm/providers/google_gemini.rb
118
+ - lib/last_llm/providers/llm.rb
118
119
  - lib/last_llm/providers/ollama.rb
119
120
  - lib/last_llm/providers/openai.rb
120
121
  - lib/last_llm/providers/test_provider.rb