boxcars 0.8.3 → 0.8.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -19,10 +19,11 @@ module Boxcars
19
19
  "You should ask targeted questions"
20
20
 
21
21
  def initialize(name: DEFAULT_NAME, description: DEFAULT_DESCRIPTION, prompts: [], batch_size: 20, **kwargs)
22
+ user_id = kwargs.delete(:user_id)
22
23
  @groq_params = DEFAULT_PARAMS.merge(kwargs) # Corrected typo here
23
24
  @prompts = prompts
24
25
  @batch_size = batch_size
25
- super(description:, name:)
26
+ super(description:, name:, user_id:)
26
27
  end
27
28
 
28
29
  # Renamed from open_ai_client to groq_client for clarity
@@ -60,7 +61,8 @@ module Boxcars
60
61
  request_context = {
61
62
  prompt: current_prompt_object,
62
63
  inputs:,
63
- conversation_for_api: api_request_params&.dig(:messages)
64
+ conversation_for_api: api_request_params&.dig(:messages),
65
+ user_id:
64
66
  }
65
67
  track_ai_generation(
66
68
  duration_ms:,
@@ -74,23 +76,23 @@ module Boxcars
74
76
  # If there's an error, raise it to maintain backward compatibility with existing tests
75
77
  raise response_data[:error] if response_data[:error]
76
78
 
77
- response_data
79
+ response_data[:parsed_json]
78
80
  end
79
81
 
80
82
  def run(question, **)
81
83
  prompt = Prompt.new(template: question)
82
- response_data = client(prompt:, inputs: {}, **)
83
- answer = _groq_handle_call_outcome(response_data:)
84
+ response = client(prompt:, inputs: {}, **)
85
+ answer = extract_answer(response)
84
86
  Boxcars.debug("Answer: #{answer}", :cyan)
85
87
  answer
86
88
  end
87
89
 
90
+ private
91
+
88
92
  def default_params
89
- @groq_params # Use instance variable
93
+ @groq_params
90
94
  end
91
95
 
92
- private
93
-
94
96
  # Helper methods for the client method
95
97
  def _prepare_groq_request_params(prompt_object, inputs, current_params)
96
98
  messages_hash_from_prompt = prompt_object.as_messages(inputs)
@@ -17,10 +17,11 @@ module Boxcars
17
17
  # @param batch_size [Integer] The number of prompts to send to the Engine at a time.
18
18
  # @param kwargs [Hash] Additional parameters to pass to the Engine.
19
19
  def initialize(provider:, description:, name:, prompts: [], batch_size: 20, **kwargs)
20
+ user_id = kwargs.delete(:user_id)
20
21
  @provider = provider
21
22
  # Start with defaults, merge other kwargs, then explicitly set model if provided in initialize
22
23
  @all_params = default_model_params.merge(kwargs)
23
- super(description:, name:, prompts:, batch_size:)
24
+ super(description:, name:, prompts:, batch_size:, user_id:)
24
25
  end
25
26
 
26
27
  # can be overridden by provider subclass
@@ -68,7 +69,7 @@ module Boxcars
68
69
 
69
70
  adapter = adapter(api_key:, params:)
70
71
  convo = prompt.as_intelligence_conversation(inputs:)
71
- request_context = { prompt: prompt&.as_prompt(inputs:)&.[](:prompt), inputs:, conversation_for_api: convo.to_h }
72
+ request_context = { user_id:, prompt: prompt&.as_prompt(inputs:)&.[](:prompt), inputs:, conversation_for_api: convo.to_h }
72
73
  request = Intelligence::ChatRequest.new(adapter:)
73
74
 
74
75
  start_time = Process.clock_gettime(Process::CLOCK_MONOTONIC)
@@ -118,17 +119,6 @@ module Boxcars
118
119
 
119
120
  private
120
121
 
121
- def extract_answer(response)
122
- # Handle different response formats
123
- if response["choices"]
124
- response["choices"].map { |c| c.dig("message", "content") || c["text"] }.join("\n").strip
125
- elsif response["candidates"]
126
- response["candidates"].map { |c| c.dig("content", "parts", 0, "text") }.join("\n").strip
127
- else
128
- response["output"] || response.to_s
129
- end
130
- end
131
-
132
122
  def check_response(response)
133
123
  return if response.is_a?(Hash) && response.key?("choices")
134
124
 
@@ -19,10 +19,11 @@ module Boxcars
19
19
  "You should ask targeted questions"
20
20
 
21
21
  def initialize(name: DEFAULT_NAME, description: DEFAULT_DESCRIPTION, prompts: [], batch_size: 2, **kwargs)
22
+ user_id = kwargs.delete(:user_id)
22
23
  @ollama_params = DEFAULT_PARAMS.merge(kwargs)
23
24
  @prompts = prompts
24
25
  @batch_size = batch_size # Retain if used by other methods
25
- super(description:, name:)
26
+ super(description:, name:, user_id:)
26
27
  end
27
28
 
28
29
  # Renamed from open_ai_client to ollama_client for clarity
@@ -63,7 +64,8 @@ module Boxcars
63
64
  request_context = {
64
65
  prompt: current_prompt_object,
65
66
  inputs:,
66
- conversation_for_api: api_request_params&.dig(:messages)
67
+ conversation_for_api: api_request_params&.dig(:messages),
68
+ user_id:
67
69
  }
68
70
  track_ai_generation(
69
71
  duration_ms:,
@@ -1,244 +1,256 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require 'openai'
4
- require 'json'
5
- require 'securerandom'
3
+ require "openai"
4
+ require "json"
5
+ require "securerandom"
6
6
 
7
7
  module Boxcars
8
- # A engine that uses OpenAI's API.
8
+ # Engine that talks to OpenAIs REST API.
9
9
  class Openai < Engine
10
10
  include UnifiedObservability
11
- attr_reader :prompts, :open_ai_params, :model_kwargs, :batch_size
11
+
12
+ CHAT_MODEL_REGEX = /(^gpt-4)|(-turbo\b)|(^o\d)|(gpt-3\.5-turbo)/
13
+ O_SERIES_REGEX = /^o/
12
14
 
13
15
  DEFAULT_PARAMS = {
14
16
  model: "gpt-4o-mini",
15
17
  temperature: 0.1,
16
18
  max_tokens: 4096
17
19
  }.freeze
18
- DEFAULT_NAME = "OpenAI engine"
19
- DEFAULT_DESCRIPTION = "useful for when you need to use AI to answer questions. " \
20
- "You should ask targeted questions"
21
-
22
- def initialize(name: DEFAULT_NAME, description: DEFAULT_DESCRIPTION, prompts: [], batch_size: 20, **kwargs)
23
- @open_ai_params = DEFAULT_PARAMS.merge(kwargs)
24
- # Special handling for o1-mini model (deprecated?)
25
- if @open_ai_params[:model] =~ /^o/ && @open_ai_params[:max_tokens]
26
- @open_ai_params[:max_completion_tokens] = @open_ai_params.delete(:max_tokens)
27
- @open_ai_params.delete(:temperature) # o1-mini might not support temperature
28
- end
29
20
 
30
- @prompts = prompts
31
- @batch_size = batch_size
32
- super(description:, name:)
21
+ DEFAULT_NAME = "OpenAI engine"
22
+ DEFAULT_DESCRIPTION = "Useful when you need AI to answer questions. Ask targeted questions."
23
+
24
+ attr_reader :prompts, :open_ai_params, :batch_size
25
+
26
+ # --------------------------------------------------------------------------
27
+ # Construction
28
+ # --------------------------------------------------------------------------
29
+ def initialize(name: DEFAULT_NAME,
30
+ description: DEFAULT_DESCRIPTION,
31
+ prompts: [],
32
+ batch_size: 20,
33
+ **kwargs)
34
+ user_id = kwargs.delete(:user_id)
35
+ @open_ai_params = adjust_for_o_series!(DEFAULT_PARAMS.merge(kwargs))
36
+ @prompts = prompts
37
+ @batch_size = batch_size
38
+ super(description:, name:, user_id:)
33
39
  end
34
40
 
35
- def self.open_ai_client(openai_access_token: nil)
36
- access_token = Boxcars.configuration.openai_access_token(openai_access_token:)
37
- organization_id = Boxcars.configuration.organization_id
38
- # log_errors is good for the gem's own logging
39
- ::OpenAI::Client.new(access_token:, organization_id:, log_errors: true)
40
- end
41
+ # --------------------------------------------------------------------------
42
+ # Public API
43
+ # --------------------------------------------------------------------------
44
+ def client(prompt:, inputs: {}, openai_access_token: nil, **kwargs)
45
+ start_time = Time.now
46
+ response_data = { response_obj: nil, parsed_json: nil,
47
+ success: false, error: nil, status_code: nil }
48
+ current_params = open_ai_params.merge(kwargs)
49
+ is_chat_model = chat_model?(current_params[:model])
50
+ prompt_object = prompt.is_a?(Array) ? prompt.first : prompt
51
+ api_request = build_api_request(prompt_object, inputs, current_params, chat: is_chat_model)
41
52
 
42
- def conversation_model?(model_name)
43
- !!(model_name =~ /(^gpt-4)|(-turbo\b)|(^o\d)|(gpt-3\.5-turbo)/) # Added gpt-3.5-turbo
53
+ begin
54
+ raw_response = execute_api_call(
55
+ self.class.open_ai_client(openai_access_token:),
56
+ is_chat_model,
57
+ api_request
58
+ )
59
+ process_response(raw_response, response_data)
60
+ rescue ::OpenAI::Error, StandardError => e
61
+ handle_error(e, response_data)
62
+ ensure
63
+ track_openai_observability(
64
+ {
65
+ start_time:,
66
+ prompt_object: prompt_object,
67
+ inputs: inputs,
68
+ api_request: api_request,
69
+ current_params: current_params,
70
+ is_chat_model: is_chat_model
71
+ },
72
+ response_data
73
+ )
74
+ end
75
+
76
+ handle_call_outcome(response_data:)
44
77
  end
45
78
 
46
- def _prepare_openai_chat_request(prompt_object, inputs, current_params)
47
- get_params(prompt_object, inputs, current_params.dup)
79
+ # Convenience one-shot helper used by Engine#generate
80
+ def run(question, **)
81
+ prompt = Prompt.new(template: question)
82
+ raw_json = client(prompt:, inputs: {}, **)
83
+ extract_answer_from_choices(raw_json["choices"]).tap do |ans|
84
+ Boxcars.debug("Answer: #{ans}", :cyan)
85
+ end
48
86
  end
49
87
 
50
- def _prepare_openai_completion_request(prompt_object, inputs, current_params)
51
- prompt_text_for_api = prompt_object.as_prompt(inputs:)
52
- prompt_text_for_api = prompt_text_for_api[:prompt] if prompt_text_for_api.is_a?(Hash) && prompt_text_for_api.key?(:prompt)
53
- { prompt: prompt_text_for_api }.merge(current_params).tap { |p| p.delete(:messages) }
88
+ # Expose the defaults so callers can introspect or dup/merge them
89
+ def default_params = open_ai_params
90
+
91
+ # --------------------------------------------------------------------------
92
+ # Class helpers
93
+ # --------------------------------------------------------------------------
94
+ def self.open_ai_client(openai_access_token: nil)
95
+ ::OpenAI::Client.new(
96
+ access_token: Boxcars.configuration.openai_access_token(openai_access_token:),
97
+ organization_id: Boxcars.configuration.organization_id,
98
+ log_errors: true
99
+ )
54
100
  end
55
101
 
56
- def _execute_openai_api_call(client, is_chat_model, api_request_params)
57
- if is_chat_model
58
- log_messages_debug(api_request_params[:messages]) if Boxcars.configuration.log_prompts && api_request_params[:messages]
59
- client.chat(parameters: api_request_params)
60
- else
61
- Boxcars.debug("Prompt after formatting:\n#{api_request_params[:prompt]}", :cyan) if Boxcars.configuration.log_prompts
62
- client.completions(parameters: api_request_params)
102
+ # -- Public helper -------------------------------------------------------------
103
+ # Some callers outside this class still invoke `check_response` directly.
104
+ # It simply raises if the JSON body contains an "error" payload.
105
+ def check_response(response) # rubocop:disable Naming/PredicateMethod
106
+ if (msg = openai_error_message(response))
107
+ raise Boxcars::Error, msg
63
108
  end
109
+
110
+ true
64
111
  end
65
112
 
66
- def _process_openai_response(raw_response, response_data)
67
- response_data[:response_obj] = raw_response
68
- response_data[:parsed_json] = raw_response # Already parsed by OpenAI gem
113
+ private
69
114
 
70
- if raw_response && !raw_response["error"]
71
- response_data[:success] = true
72
- response_data[:status_code] = 200 # Inferred
115
+ # -- Request construction ---------------------------------------------------
116
+ def build_api_request(prompt_object, inputs, params, chat:)
117
+ if chat
118
+ build_chat_params(prompt_object, inputs, params.dup)
73
119
  else
74
- response_data[:success] = false
75
- err_details = raw_response["error"] if raw_response
76
- msg = err_details ? "#{err_details['type']}: #{err_details['message']}" : "Unknown OpenAI API Error"
77
- response_data[:error] ||= StandardError.new(msg) # Use ||= to not overwrite existing exception
120
+ build_completion_params(prompt_object, inputs, params.dup)
78
121
  end
79
122
  end
80
123
 
81
- def _handle_openai_api_error(error, response_data)
82
- response_data[:error] = error
83
- response_data[:success] = false
84
- response_data[:status_code] = error.http_status if error.respond_to?(:http_status)
124
+ def build_chat_params(prompt_object, inputs, params)
125
+ po = if prompt_object.is_a?(Boxcars::Prompt)
126
+ prompt_object
127
+ else
128
+ Boxcars::Prompt.new(template: prompt_object.to_s)
129
+ end
130
+ formatted = po.as_messages(inputs).merge(params)
131
+ adjust_for_o_series!(formatted)
85
132
  end
86
133
 
87
- def _handle_openai_standard_error(error, response_data)
88
- response_data[:error] = error
89
- response_data[:success] = false
134
+ def build_completion_params(prompt_object, inputs, params)
135
+ prompt_txt = prompt_object.as_prompt(inputs:)
136
+ prompt_txt = prompt_txt[:prompt] if prompt_txt.is_a?(Hash) && prompt_txt.key?(:prompt)
137
+ { prompt: prompt_txt }.merge(params).tap { |h| h.delete(:messages) }
90
138
  end
91
139
 
92
- def client(prompt:, inputs: {}, openai_access_token: nil, **kwargs)
93
- start_time = Time.now
94
- response_data = { response_obj: nil, parsed_json: nil, success: false, error: nil, status_code: nil }
95
- current_params = open_ai_params.merge(kwargs)
96
- current_prompt_object = prompt.is_a?(Array) ? prompt.first : prompt
97
- api_request_params = nil
98
- is_chat_model = conversation_model?(current_params[:model])
99
-
100
- begin
101
- clnt = Openai.open_ai_client(openai_access_token:)
102
- api_request_params = if is_chat_model
103
- _prepare_openai_chat_request(current_prompt_object, inputs, current_params)
104
- else
105
- _prepare_openai_completion_request(current_prompt_object, inputs, current_params)
106
- end
107
- raw_response = _execute_openai_api_call(clnt, is_chat_model, api_request_params)
108
- _process_openai_response(raw_response, response_data)
109
- rescue ::OpenAI::Error => e
110
- _handle_openai_api_error(e, response_data)
111
- rescue StandardError => e
112
- _handle_openai_standard_error(e, response_data)
113
- ensure
114
- call_context = {
115
- start_time:,
116
- prompt_object: current_prompt_object,
117
- inputs:,
118
- api_request_params:,
119
- current_params:,
120
- is_chat_model:
121
- }
122
- _track_openai_observability(call_context, response_data)
140
+ # -- API call / response ----------------------------------------------------
141
+ def execute_api_call(client, chat_mode, api_request)
142
+ if chat_mode
143
+ log_messages_debug(api_request[:messages]) if Boxcars.configuration.log_prompts
144
+ client.chat(parameters: api_request)
145
+ else
146
+ Boxcars.debug("Prompt after formatting:\n#{api_request[:prompt]}", :cyan) if Boxcars.configuration.log_prompts
147
+ client.completions(parameters: api_request)
123
148
  end
124
-
125
- _openai_handle_call_outcome(response_data:)
126
149
  end
127
150
 
128
- # Called by Engine#generate to check the response from the client.
129
- # @param response [Hash] The parsed JSON response from the OpenAI API.
130
- # @raise [Boxcars::Error] if the response contains an error.
131
- # rubocop:disable Naming/PredicateMethod
132
- def check_response(response)
133
- if response.is_a?(Hash) && response["error"]
134
- err_details = response["error"]
135
- msg = err_details ? "#{err_details['type']}: #{err_details['message']}" : "Unknown OpenAI API Error in check_response"
136
- raise Boxcars::Error, msg
151
+ def process_response(raw, data)
152
+ data[:response_obj] = raw
153
+ data[:parsed_json] = raw
154
+
155
+ if (msg = openai_error_message(raw))
156
+ data[:success] = false
157
+ data[:status_code] = raw&.dig("error", "code") || 500
158
+ data[:error] = StandardError.new(msg)
159
+ else
160
+ data[:success] = true
161
+ data[:status_code] = 200
137
162
  end
138
- true
139
163
  end
140
- # rubocop:enable Naming/PredicateMethod
141
164
 
142
- def run(question, **)
143
- prompt = Prompt.new(template: question)
144
- # client now returns the raw JSON response. We need to extract the answer.
145
- raw_response = client(prompt:, inputs: {}, **)
146
- answer = _extract_openai_answer_from_choices(raw_response["choices"])
147
- Boxcars.debug("Answer: #{answer}", :cyan)
148
- answer
165
+ def handle_error(error, data)
166
+ data[:error] = error
167
+ data[:success] = false
168
+ data[:status_code] = error.respond_to?(:http_status) ? error.http_status : 500
149
169
  end
150
170
 
151
- def default_params
152
- open_ai_params
171
+ def handle_call_outcome(response_data:)
172
+ return response_data[:parsed_json] if response_data[:success]
173
+
174
+ if response_data[:error]
175
+ raise_api_error(response_data[:error])
176
+ else
177
+ raise_body_error(response_data[:response_obj])
178
+ end
153
179
  end
154
180
 
155
- private
181
+ # -- Extraction helpers -----------------------------------------------------
182
+ def extract_answer_from_choices(choices)
183
+ raise Error, "OpenAI: No choices found in response" unless choices.is_a?(Array) && choices.any?
156
184
 
157
- def log_messages_debug(messages)
158
- return unless messages.is_a?(Array)
185
+ content = choices.map { |c| c.dig("message", "content") }.compact
186
+ return content.join("\n").strip unless content.empty?
187
+
188
+ text = choices.map { |c| c["text"] }.compact
189
+ return text.join("\n").strip unless text.empty?
159
190
 
160
- Boxcars.debug(messages.last(2).map { |p| ">>>>>> Role: #{p[:role]} <<<<<<\n#{p[:content]}" }.join("\n"), :cyan)
191
+ raise Error, "OpenAI: Could not extract answer from choices"
161
192
  end
162
193
 
163
- def get_params(prompt_object, inputs, params)
164
- # Ensure prompt_object is a Boxcars::Prompt
165
- current_prompt_object = if prompt_object.is_a?(Boxcars::Prompt)
166
- prompt_object
167
- else
168
- Boxcars::Prompt.new(template: prompt_object.to_s)
169
- end
194
+ # -- Utility helpers --------------------------------------------------------
195
+ def chat_model?(model_name) = CHAT_MODEL_REGEX.match?(model_name)
170
196
 
171
- # Use as_messages for chat models
172
- formatted_params = current_prompt_object.as_messages(inputs).merge(params)
197
+ def openai_error_message(json)
198
+ err = json&.dig("error")
199
+ return unless err
173
200
 
174
- # Handle models like o1-mini that don't support the system role
175
- if formatted_params[:model] =~ /^o/ && formatted_params[:messages].first&.fetch(:role)&.to_s == 'system'
176
- formatted_params[:messages].first[:role] = :user
177
- end
178
- # o1-mini specific param adjustments (already in initialize, but good to ensure here if params are rebuilt)
179
- if formatted_params[:model] =~ /^o/
180
- formatted_params.delete(:response_format)
181
- formatted_params.delete(:stop)
182
- if formatted_params.key?(:max_tokens) && !formatted_params.key?(:max_completion_tokens)
183
- formatted_params[:max_completion_tokens] = formatted_params.delete(:max_tokens)
184
- end
185
- formatted_params.delete(:temperature)
186
- end
187
- formatted_params
201
+ err.is_a?(Hash) ? "#{err['type']}: #{err['message']}" : err.to_s
188
202
  end
189
203
 
190
- def _handle_openai_error_outcome(error_data)
191
- detailed_error_message = error_data.message
192
- if error_data.respond_to?(:json_body) && error_data.json_body
193
- detailed_error_message += " - Details: #{error_data.json_body}"
204
+ def adjust_for_o_series!(params)
205
+ return params unless params[:model] =~ O_SERIES_REGEX
206
+
207
+ params[:messages][0][:role] = :user if params.dig(:messages, 0, :role).to_s == "system"
208
+ params.delete(:response_format)
209
+ params.delete(:stop)
210
+ if params.key?(:max_tokens) && !params.key?(:max_completion_tokens)
211
+ params[:max_completion_tokens] =
212
+ params.delete(:max_tokens)
194
213
  end
195
- Boxcars.error("OpenAI Error: #{detailed_error_message} (#{error_data.class.name})", :red)
196
- raise error_data
214
+ params.delete(:temperature)
215
+ params
197
216
  end
198
217
 
199
- def _handle_openai_response_body_error(response_obj)
200
- err_details = response_obj&.dig("error")
201
- msg = err_details ? "#{err_details['type']}: #{err_details['message']}" : "Unknown error from OpenAI API"
202
- raise Error, msg
203
- end
218
+ def log_messages_debug(messages)
219
+ return unless messages.is_a?(Array)
204
220
 
205
- def _extract_openai_answer_from_choices(choices)
206
- raise Error, "OpenAI: No choices found in response" unless choices.is_a?(Array) && !choices.empty?
221
+ Boxcars.debug(
222
+ messages.last(2).map { |m| ">>>>>> Role: #{m[:role]} <<<<<<\n#{m[:content]}" }.join("\n"),
223
+ :cyan
224
+ )
225
+ end
207
226
 
208
- if choices.first&.dig("message", "content")
209
- choices.map { |c| c.dig("message", "content") }.join("\n").strip
210
- elsif choices.first&.dig("text")
211
- choices.map { |c| c["text"] }.join("\n").strip
212
- else
213
- raise Error, "OpenAI: Could not extract answer from choices"
214
- end
227
+ # -- Error raising ----------------------------------------------------------
228
+ def raise_api_error(err)
229
+ msg = err.message
230
+ msg += " - Details: #{err.json_body}" if err.respond_to?(:json_body) && err.json_body
231
+ Boxcars.error("OpenAI Error: #{msg} (#{err.class})", :red)
232
+ raise err
215
233
  end
216
234
 
217
- def _openai_handle_call_outcome(response_data:)
218
- if response_data[:error]
219
- _handle_openai_error_outcome(response_data[:error])
220
- elsif !response_data[:success] # e.g. raw_response["error"] was present
221
- _handle_openai_response_body_error(response_data[:response_obj]) # Raises an error
222
- else
223
- response_data[:parsed_json] # Return the raw parsed JSON for Engine#generate
224
- end
235
+ def raise_body_error(response_obj)
236
+ raise Error, openai_error_message(response_obj) || "Unknown error from OpenAI API"
225
237
  end
226
238
 
227
- def _track_openai_observability(call_context, response_data)
228
- duration_ms = ((Time.now - call_context[:start_time]) * 1000).round
229
- is_chat_model = call_context[:is_chat_model]
230
- api_request_params = call_context[:api_request_params] || {}
231
- request_context = {
232
- prompt: call_context[:prompt_object],
233
- inputs: call_context[:inputs],
234
- conversation_for_api: is_chat_model ? api_request_params[:messages] : api_request_params[:prompt]
235
- }
239
+ # -- Observability ----------------------------------------------------------
240
+ def track_openai_observability(call_ctx, response_data)
241
+ duration_ms = ((Time.now - call_ctx[:start_time]) * 1000).round
242
+ api_req = call_ctx[:api_request] || {}
236
243
 
237
244
  track_ai_generation(
238
- duration_ms:,
239
- current_params: call_context[:current_params],
240
- request_context:,
241
- response_data:,
245
+ duration_ms: duration_ms,
246
+ current_params: call_ctx[:current_params],
247
+ request_context: {
248
+ prompt: call_ctx[:prompt_object],
249
+ inputs: call_ctx[:inputs],
250
+ user_id: user_id,
251
+ conversation_for_api: call_ctx[:is_chat_model] ? api_req[:messages] : api_req[:prompt]
252
+ },
253
+ response_data: response_data,
242
254
  provider: :openai
243
255
  )
244
256
  end
@@ -20,10 +20,11 @@ module Boxcars
20
20
  "You should ask targeted questions"
21
21
 
22
22
  def initialize(name: DEFAULT_NAME, description: DEFAULT_DESCRIPTION, prompts: [], batch_size: 20, **kwargs)
23
+ user_id = kwargs.delete(:user_id)
23
24
  @perplexity_params = DEFAULT_PARAMS.merge(kwargs)
24
25
  @prompts = prompts
25
26
  @batch_size = batch_size # Retain if used by generate
26
- super(description:, name:)
27
+ super(description:, name:, user_id:)
27
28
  end
28
29
 
29
30
  # Perplexity models are conversational.
@@ -96,6 +97,7 @@ module Boxcars
96
97
  request_context = {
97
98
  prompt: current_prompt_object,
98
99
  inputs:,
100
+ user_id:,
99
101
  conversation_for_api: api_request_params&.dig(:messages)
100
102
  }
101
103
  track_ai_generation(
@@ -52,7 +52,8 @@ module Boxcars
52
52
  '$ai_latency': duration_seconds,
53
53
  '$ai_http_status': extract_status_code(response_data) || (response_data[:success] ? 200 : 500),
54
54
  '$ai_base_url': get_base_url_for_provider(provider),
55
- '$ai_is_error': !response_data[:success]
55
+ '$ai_is_error': !response_data[:success],
56
+ user_id:
56
57
  }
57
58
 
58
59
  # Add error details if present
@@ -3,18 +3,20 @@
3
3
  module Boxcars
4
4
  # @abstract
5
5
  class Engine
6
- attr_reader :prompts, :batch_size
6
+ attr_reader :prompts, :batch_size, :user_id
7
7
 
8
8
  # An Engine is used by Boxcars to generate output from prompts
9
9
  # @param name [String] The name of the Engine. Defaults to classname.
10
10
  # @param description [String] A description of the Engine.
11
11
  # @param prompts [Array<Prompt>] The prompts to use for the Engine.
12
12
  # @param batch_size [Integer] The number of prompts to send to the Engine at a time.
13
- def initialize(description: 'Engine', name: nil, prompts: [], batch_size: 20)
13
+ # @param user_id [String, Integer] The ID of the user using this Engine (optional for observability).
14
+ def initialize(description: 'Engine', name: nil, prompts: [], batch_size: 20, user_id: nil)
14
15
  @name = name || self.class.name
15
16
  @description = description
16
17
  @prompts = prompts
17
18
  @batch_size = batch_size
19
+ @user_id = user_id
18
20
  end
19
21
 
20
22
  # Get an answer from the Engine.
@@ -60,14 +62,8 @@ module Boxcars
60
62
  sub_prompts.each do |sprompt, inputs|
61
63
  client_response = client(prompt: sprompt, inputs:, **params)
62
64
 
63
- # Handle different response formats:
64
- # - New format: response_data hash with :parsed_json key (Groq, Gemini)
65
- # - Legacy format: direct API response hash (OpenAI, others)
66
- api_response_hash = if client_response.is_a?(Hash) && client_response.key?(:parsed_json)
67
- client_response[:parsed_json]
68
- else
69
- client_response
70
- end
65
+ # All engines now return the parsed API response hash directly
66
+ api_response_hash = client_response
71
67
 
72
68
  # Ensure we have a hash to work with
73
69
  unless api_response_hash.is_a?(Hash)
@@ -101,6 +97,17 @@ module Boxcars
101
97
  end
102
98
  EngineResult.new(generations:, engine_output: { token_usage: })
103
99
  end
100
+
101
+ def extract_answer(response)
102
+ # Handle different response formats
103
+ if response["choices"]
104
+ response["choices"].map { |c| c.dig("message", "content") || c["text"] }.join("\n").strip
105
+ elsif response["candidates"]
106
+ response["candidates"].map { |c| c.dig("content", "parts", 0, "text") }.join("\n").strip
107
+ else
108
+ response["output"] || response.to_s
109
+ end
110
+ end
104
111
  end
105
112
  end
106
113