boxcars 0.8.4 → 0.8.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 40c67f759e97f22570fba131b967c73b871b1f96cfbc97df78b1c0438bdf4291
4
- data.tar.gz: 55f5e64fa79b8035e1bceb95912cbc5a8c587c29fa3b09d89a65ab83fe4375a6
3
+ metadata.gz: c1970a509d4c6eebaeaa06792993f7aaa1ba8d2d030e878138a8d6f821726fcc
4
+ data.tar.gz: 66ae9f13950ab9a0dd2369a1450fd14d4426faa95d8a62da7b4c7deeddbc6b3d
5
5
  SHA512:
6
- metadata.gz: ba4b4434d263c9b4a875f5ab2db607d11a91795592f39c608143c058a38b773e6349ece90946f7a2137a6edd56d375ebd5992195921660ceac2fa5b4262e6ef5
7
- data.tar.gz: c8d58cdfeb28d9a2d6bcba655478d7dd3572db4c2fbd72a566cca6a71c37941d0b7592c02c75f3430e8e7821dcf959942de2d960335864d70da1976e9f7ae63a
6
+ metadata.gz: 7e6d1f2f4788bb24de873a2e7270b52f44ce31017ccd39e1db321b91bd595d1f832c6c9aa4b9ca587626c3390928e70025014a360ba8052b9e870c50566ccb29
7
+ data.tar.gz: 4073745656e6944e8e686eee4502916038470ff32261990c1444e50d951a819288a24b993dc51659168b9246dab7020f40fd20f210b9421ab5890e6ec4402d66
data/Gemfile.lock CHANGED
@@ -1,7 +1,7 @@
1
1
  PATH
2
2
  remote: .
3
3
  specs:
4
- boxcars (0.8.4)
4
+ boxcars (0.8.5)
5
5
  faraday-retry (~> 2.0)
6
6
  google_search_results (~> 2.2)
7
7
  gpt4all (~> 0.0.5)
@@ -47,7 +47,7 @@ GEM
47
47
  protocol-http1 (~> 0.19.0)
48
48
  protocol-http2 (~> 0.16.0)
49
49
  traces (>= 0.10.0)
50
- async-http-faraday (0.21.0)
50
+ async-http-faraday (0.22.0)
51
51
  async-http (~> 0.42)
52
52
  faraday
53
53
  async-io (1.43.2)
@@ -67,7 +67,7 @@ GEM
67
67
  bigdecimal
68
68
  rexml
69
69
  date (3.4.1)
70
- debug (1.10.0)
70
+ debug (1.11.0)
71
71
  irb (~> 1.10)
72
72
  reline (>= 0.3.8)
73
73
  diff-lcs (1.6.2)
@@ -83,11 +83,11 @@ GEM
83
83
  logger
84
84
  faraday-http-cache (2.5.1)
85
85
  faraday (>= 0.8)
86
- faraday-multipart (1.1.0)
86
+ faraday-multipart (1.1.1)
87
87
  multipart-post (~> 2.0)
88
- faraday-net_http (3.4.0)
88
+ faraday-net_http (3.4.1)
89
89
  net-http (>= 0.5.0)
90
- faraday-retry (2.3.1)
90
+ faraday-retry (2.3.2)
91
91
  faraday (~> 2.0)
92
92
  fiber-annotation (0.2.0)
93
93
  fiber-local (1.1.0)
@@ -114,8 +114,8 @@ GEM
114
114
  domain_name (~> 0.5)
115
115
  i18n (1.14.7)
116
116
  concurrent-ruby (~> 1.0)
117
- intelligence (0.8.0)
118
- dynamicschema (~> 1.0.0.beta03)
117
+ intelligence (1.0.0)
118
+ dynamicschema (~> 1.0)
119
119
  faraday (~> 2.7)
120
120
  json-repair (~> 0.2)
121
121
  mime-types (~> 3.6)
@@ -132,7 +132,7 @@ GEM
132
132
  mime-types (3.7.0)
133
133
  logger
134
134
  mime-types-data (~> 3.2025, >= 3.2025.0507)
135
- mime-types-data (3.2025.0603)
135
+ mime-types-data (3.2025.0624)
136
136
  minitest (5.25.5)
137
137
  multi_json (1.15.0)
138
138
  multipart-post (2.4.1)
@@ -186,7 +186,7 @@ GEM
186
186
  racc (1.8.1)
187
187
  rainbow (3.1.1)
188
188
  rake (13.3.0)
189
- rdoc (6.14.0)
189
+ rdoc (6.14.1)
190
190
  erb
191
191
  psych (>= 4.0.0)
192
192
  regexp_parser (2.10.0)
@@ -202,7 +202,7 @@ GEM
202
202
  rspec-core (~> 3.13.0)
203
203
  rspec-expectations (~> 3.13.0)
204
204
  rspec-mocks (~> 3.13.0)
205
- rspec-core (3.13.4)
205
+ rspec-core (3.13.5)
206
206
  rspec-support (~> 3.13.0)
207
207
  rspec-expectations (3.13.5)
208
208
  diff-lcs (>= 1.2.0, < 2.0)
@@ -211,7 +211,7 @@ GEM
211
211
  diff-lcs (>= 1.2.0, < 2.0)
212
212
  rspec-support (~> 3.13.0)
213
213
  rspec-support (3.13.4)
214
- rubocop (1.76.1)
214
+ rubocop (1.77.0)
215
215
  json (~> 2.3)
216
216
  language_server-protocol (~> 3.17.0.2)
217
217
  lint_roller (~> 1.1.0)
@@ -219,7 +219,7 @@ GEM
219
219
  parser (>= 3.3.0.2)
220
220
  rainbow (>= 2.2.2, < 4.0)
221
221
  regexp_parser (>= 2.9.3, < 3.0)
222
- rubocop-ast (>= 1.45.0, < 2.0)
222
+ rubocop-ast (>= 1.45.1, < 2.0)
223
223
  ruby-progressbar (~> 1.7)
224
224
  unicode-display_width (>= 2.4.0, < 4.0)
225
225
  rubocop-ast (1.45.1)
@@ -243,14 +243,14 @@ GEM
243
243
  addressable (>= 2.3.5)
244
244
  faraday (>= 0.17.3, < 3)
245
245
  securerandom (0.4.1)
246
- sqlite3 (2.7.0-aarch64-linux-gnu)
247
- sqlite3 (2.7.0-aarch64-linux-musl)
248
- sqlite3 (2.7.0-arm-linux-gnu)
249
- sqlite3 (2.7.0-arm-linux-musl)
250
- sqlite3 (2.7.0-arm64-darwin)
251
- sqlite3 (2.7.0-x86_64-darwin)
252
- sqlite3 (2.7.0-x86_64-linux-gnu)
253
- sqlite3 (2.7.0-x86_64-linux-musl)
246
+ sqlite3 (2.7.1-aarch64-linux-gnu)
247
+ sqlite3 (2.7.1-aarch64-linux-musl)
248
+ sqlite3 (2.7.1-arm-linux-gnu)
249
+ sqlite3 (2.7.1-arm-linux-musl)
250
+ sqlite3 (2.7.1-arm64-darwin)
251
+ sqlite3 (2.7.1-x86_64-darwin)
252
+ sqlite3 (2.7.1-x86_64-linux-gnu)
253
+ sqlite3 (2.7.1-x86_64-linux-musl)
254
254
  stringio (3.1.7)
255
255
  strings-ansi (0.2.0)
256
256
  timeout (0.4.3)
@@ -1,246 +1,256 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require 'openai'
4
- require 'json'
5
- require 'securerandom'
3
+ require "openai"
4
+ require "json"
5
+ require "securerandom"
6
6
 
7
7
  module Boxcars
8
- # A engine that uses OpenAI's API.
8
+ # Engine that talks to OpenAIs REST API.
9
9
  class Openai < Engine
10
10
  include UnifiedObservability
11
- attr_reader :prompts, :open_ai_params, :model_kwargs, :batch_size
11
+
12
+ CHAT_MODEL_REGEX = /(^gpt-4)|(-turbo\b)|(^o\d)|(gpt-3\.5-turbo)/
13
+ O_SERIES_REGEX = /^o/
12
14
 
13
15
  DEFAULT_PARAMS = {
14
16
  model: "gpt-4o-mini",
15
17
  temperature: 0.1,
16
18
  max_tokens: 4096
17
19
  }.freeze
18
- DEFAULT_NAME = "OpenAI engine"
19
- DEFAULT_DESCRIPTION = "useful for when you need to use AI to answer questions. " \
20
- "You should ask targeted questions"
21
-
22
- def initialize(name: DEFAULT_NAME, description: DEFAULT_DESCRIPTION, prompts: [], batch_size: 20, **kwargs)
23
- user_id = kwargs.delete(:user_id)
24
- @open_ai_params = DEFAULT_PARAMS.merge(kwargs)
25
- # Special handling for o1-mini model (deprecated?)
26
- if @open_ai_params[:model] =~ /^o/ && @open_ai_params[:max_tokens]
27
- @open_ai_params[:max_completion_tokens] = @open_ai_params.delete(:max_tokens)
28
- @open_ai_params.delete(:temperature) # o1-mini might not support temperature
29
- end
30
20
 
31
- @prompts = prompts
32
- @batch_size = batch_size
21
+ DEFAULT_NAME = "OpenAI engine"
22
+ DEFAULT_DESCRIPTION = "Useful when you need AI to answer questions. Ask targeted questions."
23
+
24
+ attr_reader :prompts, :open_ai_params, :batch_size
25
+
26
+ # --------------------------------------------------------------------------
27
+ # Construction
28
+ # --------------------------------------------------------------------------
29
+ def initialize(name: DEFAULT_NAME,
30
+ description: DEFAULT_DESCRIPTION,
31
+ prompts: [],
32
+ batch_size: 20,
33
+ **kwargs)
34
+ user_id = kwargs.delete(:user_id)
35
+ @open_ai_params = adjust_for_o_series!(DEFAULT_PARAMS.merge(kwargs))
36
+ @prompts = prompts
37
+ @batch_size = batch_size
33
38
  super(description:, name:, user_id:)
34
39
  end
35
40
 
36
- def self.open_ai_client(openai_access_token: nil)
37
- access_token = Boxcars.configuration.openai_access_token(openai_access_token:)
38
- organization_id = Boxcars.configuration.organization_id
39
- # log_errors is good for the gem's own logging
40
- ::OpenAI::Client.new(access_token:, organization_id:, log_errors: true)
41
- end
41
+ # --------------------------------------------------------------------------
42
+ # Public API
43
+ # --------------------------------------------------------------------------
44
+ def client(prompt:, inputs: {}, openai_access_token: nil, **kwargs)
45
+ start_time = Time.now
46
+ response_data = { response_obj: nil, parsed_json: nil,
47
+ success: false, error: nil, status_code: nil }
48
+ current_params = open_ai_params.merge(kwargs)
49
+ is_chat_model = chat_model?(current_params[:model])
50
+ prompt_object = prompt.is_a?(Array) ? prompt.first : prompt
51
+ api_request = build_api_request(prompt_object, inputs, current_params, chat: is_chat_model)
42
52
 
43
- def conversation_model?(model_name)
44
- !!(model_name =~ /(^gpt-4)|(-turbo\b)|(^o\d)|(gpt-3\.5-turbo)/) # Added gpt-3.5-turbo
53
+ begin
54
+ raw_response = execute_api_call(
55
+ self.class.open_ai_client(openai_access_token:),
56
+ is_chat_model,
57
+ api_request
58
+ )
59
+ process_response(raw_response, response_data)
60
+ rescue ::OpenAI::Error, StandardError => e
61
+ handle_error(e, response_data)
62
+ ensure
63
+ track_openai_observability(
64
+ {
65
+ start_time:,
66
+ prompt_object: prompt_object,
67
+ inputs: inputs,
68
+ api_request: api_request,
69
+ current_params: current_params,
70
+ is_chat_model: is_chat_model
71
+ },
72
+ response_data
73
+ )
74
+ end
75
+
76
+ handle_call_outcome(response_data:)
45
77
  end
46
78
 
47
- def _prepare_openai_chat_request(prompt_object, inputs, current_params)
48
- get_params(prompt_object, inputs, current_params.dup)
79
+ # Convenience one-shot helper used by Engine#generate
80
+ def run(question, **)
81
+ prompt = Prompt.new(template: question)
82
+ raw_json = client(prompt:, inputs: {}, **)
83
+ extract_answer_from_choices(raw_json["choices"]).tap do |ans|
84
+ Boxcars.debug("Answer: #{ans}", :cyan)
85
+ end
49
86
  end
50
87
 
51
- def _prepare_openai_completion_request(prompt_object, inputs, current_params)
52
- prompt_text_for_api = prompt_object.as_prompt(inputs:)
53
- prompt_text_for_api = prompt_text_for_api[:prompt] if prompt_text_for_api.is_a?(Hash) && prompt_text_for_api.key?(:prompt)
54
- { prompt: prompt_text_for_api }.merge(current_params).tap { |p| p.delete(:messages) }
88
+ # Expose the defaults so callers can introspect or dup/merge them
89
+ def default_params = open_ai_params
90
+
91
+ # --------------------------------------------------------------------------
92
+ # Class helpers
93
+ # --------------------------------------------------------------------------
94
+ def self.open_ai_client(openai_access_token: nil)
95
+ ::OpenAI::Client.new(
96
+ access_token: Boxcars.configuration.openai_access_token(openai_access_token:),
97
+ organization_id: Boxcars.configuration.organization_id,
98
+ log_errors: true
99
+ )
55
100
  end
56
101
 
57
- def _execute_openai_api_call(client, is_chat_model, api_request_params)
58
- if is_chat_model
59
- log_messages_debug(api_request_params[:messages]) if Boxcars.configuration.log_prompts && api_request_params[:messages]
60
- client.chat(parameters: api_request_params)
61
- else
62
- Boxcars.debug("Prompt after formatting:\n#{api_request_params[:prompt]}", :cyan) if Boxcars.configuration.log_prompts
63
- client.completions(parameters: api_request_params)
102
+ # -- Public helper -------------------------------------------------------------
103
+ # Some callers outside this class still invoke `check_response` directly.
104
+ # It simply raises if the JSON body contains an "error" payload.
105
+ def check_response(response) # rubocop:disable Naming/PredicateMethod
106
+ if (msg = openai_error_message(response))
107
+ raise Boxcars::Error, msg
64
108
  end
109
+
110
+ true
65
111
  end
66
112
 
67
- def _process_openai_response(raw_response, response_data)
68
- response_data[:response_obj] = raw_response
69
- response_data[:parsed_json] = raw_response # Already parsed by OpenAI gem
113
+ private
70
114
 
71
- if raw_response && !raw_response["error"]
72
- response_data[:success] = true
73
- response_data[:status_code] = 200 # Inferred
115
+ # -- Request construction ---------------------------------------------------
116
+ def build_api_request(prompt_object, inputs, params, chat:)
117
+ if chat
118
+ build_chat_params(prompt_object, inputs, params.dup)
74
119
  else
75
- response_data[:success] = false
76
- err_details = raw_response["error"] if raw_response
77
- msg = err_details ? "#{err_details['type']}: #{err_details['message']}" : "Unknown OpenAI API Error"
78
- response_data[:error] ||= StandardError.new(msg) # Use ||= to not overwrite existing exception
120
+ build_completion_params(prompt_object, inputs, params.dup)
79
121
  end
80
122
  end
81
123
 
82
- def _handle_openai_api_error(error, response_data)
83
- response_data[:error] = error
84
- response_data[:success] = false
85
- response_data[:status_code] = error.http_status if error.respond_to?(:http_status)
124
+ def build_chat_params(prompt_object, inputs, params)
125
+ po = if prompt_object.is_a?(Boxcars::Prompt)
126
+ prompt_object
127
+ else
128
+ Boxcars::Prompt.new(template: prompt_object.to_s)
129
+ end
130
+ formatted = po.as_messages(inputs).merge(params)
131
+ adjust_for_o_series!(formatted)
86
132
  end
87
133
 
88
- def _handle_openai_standard_error(error, response_data)
89
- response_data[:error] = error
90
- response_data[:success] = false
134
+ def build_completion_params(prompt_object, inputs, params)
135
+ prompt_txt = prompt_object.as_prompt(inputs:)
136
+ prompt_txt = prompt_txt[:prompt] if prompt_txt.is_a?(Hash) && prompt_txt.key?(:prompt)
137
+ { prompt: prompt_txt }.merge(params).tap { |h| h.delete(:messages) }
91
138
  end
92
139
 
93
- def client(prompt:, inputs: {}, openai_access_token: nil, **kwargs)
94
- start_time = Time.now
95
- response_data = { response_obj: nil, parsed_json: nil, success: false, error: nil, status_code: nil }
96
- current_params = open_ai_params.merge(kwargs)
97
- current_prompt_object = prompt.is_a?(Array) ? prompt.first : prompt
98
- api_request_params = nil
99
- is_chat_model = conversation_model?(current_params[:model])
100
-
101
- begin
102
- clnt = Openai.open_ai_client(openai_access_token:)
103
- api_request_params = if is_chat_model
104
- _prepare_openai_chat_request(current_prompt_object, inputs, current_params)
105
- else
106
- _prepare_openai_completion_request(current_prompt_object, inputs, current_params)
107
- end
108
- raw_response = _execute_openai_api_call(clnt, is_chat_model, api_request_params)
109
- _process_openai_response(raw_response, response_data)
110
- rescue ::OpenAI::Error => e
111
- _handle_openai_api_error(e, response_data)
112
- rescue StandardError => e
113
- _handle_openai_standard_error(e, response_data)
114
- ensure
115
- call_context = {
116
- start_time:,
117
- prompt_object: current_prompt_object,
118
- inputs:,
119
- api_request_params:,
120
- current_params:,
121
- is_chat_model:
122
- }
123
- _track_openai_observability(call_context, response_data)
140
+ # -- API call / response ----------------------------------------------------
141
+ def execute_api_call(client, chat_mode, api_request)
142
+ if chat_mode
143
+ log_messages_debug(api_request[:messages]) if Boxcars.configuration.log_prompts
144
+ client.chat(parameters: api_request)
145
+ else
146
+ Boxcars.debug("Prompt after formatting:\n#{api_request[:prompt]}", :cyan) if Boxcars.configuration.log_prompts
147
+ client.completions(parameters: api_request)
124
148
  end
125
-
126
- _openai_handle_call_outcome(response_data:)
127
149
  end
128
150
 
129
- # Called by Engine#generate to check the response from the client.
130
- # @param response [Hash] The parsed JSON response from the OpenAI API.
131
- # @raise [Boxcars::Error] if the response contains an error.
132
- # rubocop:disable Naming/PredicateMethod
133
- def check_response(response)
134
- if response.is_a?(Hash) && response["error"]
135
- err_details = response["error"]
136
- msg = err_details ? "#{err_details['type']}: #{err_details['message']}" : "Unknown OpenAI API Error in check_response"
137
- raise Boxcars::Error, msg
151
+ def process_response(raw, data)
152
+ data[:response_obj] = raw
153
+ data[:parsed_json] = raw
154
+
155
+ if (msg = openai_error_message(raw))
156
+ data[:success] = false
157
+ data[:status_code] = raw&.dig("error", "code") || 500
158
+ data[:error] = StandardError.new(msg)
159
+ else
160
+ data[:success] = true
161
+ data[:status_code] = 200
138
162
  end
139
- true
140
163
  end
141
- # rubocop:enable Naming/PredicateMethod
142
164
 
143
- def run(question, **)
144
- prompt = Prompt.new(template: question)
145
- # client now returns the raw JSON response. We need to extract the answer.
146
- raw_response = client(prompt:, inputs: {}, **)
147
- answer = _extract_openai_answer_from_choices(raw_response["choices"])
148
- Boxcars.debug("Answer: #{answer}", :cyan)
149
- answer
165
+ def handle_error(error, data)
166
+ data[:error] = error
167
+ data[:success] = false
168
+ data[:status_code] = error.respond_to?(:http_status) ? error.http_status : 500
150
169
  end
151
170
 
152
- def default_params
153
- open_ai_params
171
+ def handle_call_outcome(response_data:)
172
+ return response_data[:parsed_json] if response_data[:success]
173
+
174
+ if response_data[:error]
175
+ raise_api_error(response_data[:error])
176
+ else
177
+ raise_body_error(response_data[:response_obj])
178
+ end
154
179
  end
155
180
 
156
- private
181
+ # -- Extraction helpers -----------------------------------------------------
182
+ def extract_answer_from_choices(choices)
183
+ raise Error, "OpenAI: No choices found in response" unless choices.is_a?(Array) && choices.any?
157
184
 
158
- def log_messages_debug(messages)
159
- return unless messages.is_a?(Array)
185
+ content = choices.map { |c| c.dig("message", "content") }.compact
186
+ return content.join("\n").strip unless content.empty?
187
+
188
+ text = choices.map { |c| c["text"] }.compact
189
+ return text.join("\n").strip unless text.empty?
160
190
 
161
- Boxcars.debug(messages.last(2).map { |p| ">>>>>> Role: #{p[:role]} <<<<<<\n#{p[:content]}" }.join("\n"), :cyan)
191
+ raise Error, "OpenAI: Could not extract answer from choices"
162
192
  end
163
193
 
164
- def get_params(prompt_object, inputs, params)
165
- # Ensure prompt_object is a Boxcars::Prompt
166
- current_prompt_object = if prompt_object.is_a?(Boxcars::Prompt)
167
- prompt_object
168
- else
169
- Boxcars::Prompt.new(template: prompt_object.to_s)
170
- end
194
+ # -- Utility helpers --------------------------------------------------------
195
+ def chat_model?(model_name) = CHAT_MODEL_REGEX.match?(model_name)
171
196
 
172
- # Use as_messages for chat models
173
- formatted_params = current_prompt_object.as_messages(inputs).merge(params)
197
+ def openai_error_message(json)
198
+ err = json&.dig("error")
199
+ return unless err
174
200
 
175
- # Handle models like o1-mini that don't support the system role
176
- if formatted_params[:model] =~ /^o/ && formatted_params[:messages].first&.fetch(:role)&.to_s == 'system'
177
- formatted_params[:messages].first[:role] = :user
178
- end
179
- # o1-mini specific param adjustments (already in initialize, but good to ensure here if params are rebuilt)
180
- if formatted_params[:model] =~ /^o/
181
- formatted_params.delete(:response_format)
182
- formatted_params.delete(:stop)
183
- if formatted_params.key?(:max_tokens) && !formatted_params.key?(:max_completion_tokens)
184
- formatted_params[:max_completion_tokens] = formatted_params.delete(:max_tokens)
185
- end
186
- formatted_params.delete(:temperature)
187
- end
188
- formatted_params
201
+ err.is_a?(Hash) ? "#{err['type']}: #{err['message']}" : err.to_s
189
202
  end
190
203
 
191
- def _handle_openai_error_outcome(error_data)
192
- detailed_error_message = error_data.message
193
- if error_data.respond_to?(:json_body) && error_data.json_body
194
- detailed_error_message += " - Details: #{error_data.json_body}"
204
+ def adjust_for_o_series!(params)
205
+ return params unless params[:model] =~ O_SERIES_REGEX
206
+
207
+ params[:messages][0][:role] = :user if params.dig(:messages, 0, :role).to_s == "system"
208
+ params.delete(:response_format)
209
+ params.delete(:stop)
210
+ if params.key?(:max_tokens) && !params.key?(:max_completion_tokens)
211
+ params[:max_completion_tokens] =
212
+ params.delete(:max_tokens)
195
213
  end
196
- Boxcars.error("OpenAI Error: #{detailed_error_message} (#{error_data.class.name})", :red)
197
- raise error_data
214
+ params.delete(:temperature)
215
+ params
198
216
  end
199
217
 
200
- def _handle_openai_response_body_error(response_obj)
201
- err_details = response_obj&.dig("error")
202
- msg = err_details ? "#{err_details['type']}: #{err_details['message']}" : "Unknown error from OpenAI API"
203
- raise Error, msg
204
- end
218
+ def log_messages_debug(messages)
219
+ return unless messages.is_a?(Array)
205
220
 
206
- def _extract_openai_answer_from_choices(choices)
207
- raise Error, "OpenAI: No choices found in response" unless choices.is_a?(Array) && !choices.empty?
221
+ Boxcars.debug(
222
+ messages.last(2).map { |m| ">>>>>> Role: #{m[:role]} <<<<<<\n#{m[:content]}" }.join("\n"),
223
+ :cyan
224
+ )
225
+ end
208
226
 
209
- if choices.first&.dig("message", "content")
210
- choices.map { |c| c.dig("message", "content") }.join("\n").strip
211
- elsif choices.first&.dig("text")
212
- choices.map { |c| c["text"] }.join("\n").strip
213
- else
214
- raise Error, "OpenAI: Could not extract answer from choices"
215
- end
227
+ # -- Error raising ----------------------------------------------------------
228
+ def raise_api_error(err)
229
+ msg = err.message
230
+ msg += " - Details: #{err.json_body}" if err.respond_to?(:json_body) && err.json_body
231
+ Boxcars.error("OpenAI Error: #{msg} (#{err.class})", :red)
232
+ raise err
216
233
  end
217
234
 
218
- def _openai_handle_call_outcome(response_data:)
219
- if response_data[:error]
220
- _handle_openai_error_outcome(response_data[:error])
221
- elsif !response_data[:success] # e.g. raw_response["error"] was present
222
- _handle_openai_response_body_error(response_data[:response_obj]) # Raises an error
223
- else
224
- response_data[:parsed_json] # Return the raw parsed JSON for Engine#generate
225
- end
235
+ def raise_body_error(response_obj)
236
+ raise Error, openai_error_message(response_obj) || "Unknown error from OpenAI API"
226
237
  end
227
238
 
228
- def _track_openai_observability(call_context, response_data)
229
- duration_ms = ((Time.now - call_context[:start_time]) * 1000).round
230
- is_chat_model = call_context[:is_chat_model]
231
- api_request_params = call_context[:api_request_params] || {}
232
- request_context = {
233
- prompt: call_context[:prompt_object],
234
- inputs: call_context[:inputs],
235
- user_id:,
236
- conversation_for_api: is_chat_model ? api_request_params[:messages] : api_request_params[:prompt]
237
- }
239
+ # -- Observability ----------------------------------------------------------
240
+ def track_openai_observability(call_ctx, response_data)
241
+ duration_ms = ((Time.now - call_ctx[:start_time]) * 1000).round
242
+ api_req = call_ctx[:api_request] || {}
238
243
 
239
244
  track_ai_generation(
240
- duration_ms:,
241
- current_params: call_context[:current_params],
242
- request_context:,
243
- response_data:,
245
+ duration_ms: duration_ms,
246
+ current_params: call_ctx[:current_params],
247
+ request_context: {
248
+ prompt: call_ctx[:prompt_object],
249
+ inputs: call_ctx[:inputs],
250
+ user_id: user_id,
251
+ conversation_for_api: call_ctx[:is_chat_model] ? api_req[:messages] : api_req[:prompt]
252
+ },
253
+ response_data: response_data,
244
254
  provider: :openai
245
255
  )
246
256
  end
@@ -2,5 +2,5 @@
2
2
 
3
3
  module Boxcars
4
4
  # The current version of the gem.
5
- VERSION = "0.8.4"
5
+ VERSION = "0.8.5"
6
6
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: boxcars
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.8.4
4
+ version: 0.8.5
5
5
  platform: ruby
6
6
  authors:
7
7
  - Francis Sullivan