braintrust 0.0.11 → 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +213 -180
  3. data/exe/braintrust +143 -0
  4. data/lib/braintrust/contrib/anthropic/deprecated.rb +24 -0
  5. data/lib/braintrust/contrib/anthropic/instrumentation/common.rb +53 -0
  6. data/lib/braintrust/contrib/anthropic/instrumentation/messages.rb +232 -0
  7. data/lib/braintrust/contrib/anthropic/integration.rb +53 -0
  8. data/lib/braintrust/contrib/anthropic/patcher.rb +62 -0
  9. data/lib/braintrust/contrib/context.rb +56 -0
  10. data/lib/braintrust/contrib/integration.rb +160 -0
  11. data/lib/braintrust/contrib/openai/deprecated.rb +22 -0
  12. data/lib/braintrust/contrib/openai/instrumentation/chat.rb +298 -0
  13. data/lib/braintrust/contrib/openai/instrumentation/common.rb +134 -0
  14. data/lib/braintrust/contrib/openai/instrumentation/responses.rb +187 -0
  15. data/lib/braintrust/contrib/openai/integration.rb +58 -0
  16. data/lib/braintrust/contrib/openai/patcher.rb +130 -0
  17. data/lib/braintrust/contrib/patcher.rb +76 -0
  18. data/lib/braintrust/contrib/rails/railtie.rb +16 -0
  19. data/lib/braintrust/contrib/registry.rb +107 -0
  20. data/lib/braintrust/contrib/ruby_llm/deprecated.rb +45 -0
  21. data/lib/braintrust/contrib/ruby_llm/instrumentation/chat.rb +464 -0
  22. data/lib/braintrust/contrib/ruby_llm/instrumentation/common.rb +58 -0
  23. data/lib/braintrust/contrib/ruby_llm/integration.rb +54 -0
  24. data/lib/braintrust/contrib/ruby_llm/patcher.rb +44 -0
  25. data/lib/braintrust/contrib/ruby_openai/deprecated.rb +24 -0
  26. data/lib/braintrust/contrib/ruby_openai/instrumentation/chat.rb +149 -0
  27. data/lib/braintrust/contrib/ruby_openai/instrumentation/common.rb +138 -0
  28. data/lib/braintrust/contrib/ruby_openai/instrumentation/responses.rb +146 -0
  29. data/lib/braintrust/contrib/ruby_openai/integration.rb +58 -0
  30. data/lib/braintrust/contrib/ruby_openai/patcher.rb +85 -0
  31. data/lib/braintrust/contrib/setup.rb +168 -0
  32. data/lib/braintrust/contrib/support/openai.rb +72 -0
  33. data/lib/braintrust/contrib/support/otel.rb +23 -0
  34. data/lib/braintrust/contrib.rb +205 -0
  35. data/lib/braintrust/internal/encoding.rb +40 -0
  36. data/lib/braintrust/internal/env.rb +33 -0
  37. data/lib/braintrust/internal/time.rb +44 -0
  38. data/lib/braintrust/setup.rb +50 -0
  39. data/lib/braintrust/state.rb +5 -0
  40. data/lib/braintrust/trace/attachment.rb +2 -2
  41. data/lib/braintrust/trace.rb +0 -51
  42. data/lib/braintrust/version.rb +1 -1
  43. data/lib/braintrust.rb +10 -1
  44. metadata +39 -7
  45. data/lib/braintrust/trace/contrib/anthropic.rb +0 -316
  46. data/lib/braintrust/trace/contrib/github.com/alexrudall/ruby-openai/ruby-openai.rb +0 -377
  47. data/lib/braintrust/trace/contrib/github.com/crmne/ruby_llm.rb +0 -560
  48. data/lib/braintrust/trace/contrib/openai.rb +0 -611
  49. data/lib/braintrust/trace/tokens.rb +0 -109
@@ -1,560 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- require "opentelemetry/sdk"
4
- require "json"
5
- require_relative "../../../tokens"
6
- require_relative "../../../../logger"
7
-
8
- module Braintrust
9
- module Trace
10
- module Contrib
11
- module Github
12
- module Crmne
13
- module RubyLLM
14
- # Helper to safely set a JSON attribute on a span
15
- # Only sets the attribute if obj is present
16
- # @param span [OpenTelemetry::Trace::Span] the span to set attribute on
17
- # @param attr_name [String] the attribute name (e.g., "braintrust.output_json")
18
- # @param obj [Object] the object to serialize to JSON
19
- # @return [void]
20
- def self.set_json_attr(span, attr_name, obj)
21
- return unless obj
22
- span.set_attribute(attr_name, JSON.generate(obj))
23
- rescue => e
24
- Log.debug("Failed to serialize #{attr_name}: #{e.message}")
25
- end
26
-
27
- # Parse usage tokens from RubyLLM response
28
- # RubyLLM uses Anthropic-style field naming (input_tokens, output_tokens)
29
- # @param usage [Hash, Object] usage object from RubyLLM response
30
- # @return [Hash<String, Integer>] metrics hash with normalized names
31
- def self.parse_usage_tokens(usage)
32
- Braintrust::Trace.parse_anthropic_usage_tokens(usage)
33
- end
34
-
35
- # Wrap RubyLLM to automatically create spans for chat requests
36
- # Supports both synchronous and streaming requests
37
- #
38
- # Usage:
39
- # # Wrap the class once (affects all future instances):
40
- # Braintrust::Trace::Contrib::Github::Crmne::RubyLLM.wrap
41
- #
42
- # # Or wrap a specific instance:
43
- # chat = RubyLLM.chat(model: "gpt-4o-mini")
44
- # Braintrust::Trace::Contrib::Github::Crmne::RubyLLM.wrap(chat)
45
- #
46
- # @param chat [RubyLLM::Chat, nil] the RubyLLM chat instance to wrap (if nil, wraps the class)
47
- # @param tracer_provider [OpenTelemetry::SDK::Trace::TracerProvider] the tracer provider (defaults to global)
48
- def self.wrap(chat = nil, tracer_provider: nil)
49
- tracer_provider ||= ::OpenTelemetry.tracer_provider
50
-
51
- # If no chat instance provided, wrap the class globally via initialize hook
52
- if chat.nil?
53
- return if defined?(::RubyLLM::Chat) && ::RubyLLM::Chat.instance_variable_defined?(:@braintrust_wrapper_module)
54
-
55
- # Create module that wraps initialize to auto-wrap each new instance
56
- wrapper_module = Module.new do
57
- define_method(:initialize) do |*args, **kwargs, &block|
58
- super(*args, **kwargs, &block)
59
- # Auto-wrap this instance during initialization
60
- Braintrust::Trace::Contrib::Github::Crmne::RubyLLM.wrap(self, tracer_provider: tracer_provider)
61
- self
62
- end
63
- end
64
-
65
- # Store reference to wrapper module for cleanup
66
- ::RubyLLM::Chat.instance_variable_set(:@braintrust_wrapper_module, wrapper_module)
67
- ::RubyLLM::Chat.prepend(wrapper_module)
68
- return nil
69
- end
70
-
71
- # Check if already wrapped to make this idempotent
72
- return chat if chat.instance_variable_get(:@braintrust_wrapped)
73
-
74
- # Create a wrapper module that intercepts chat.complete
75
- wrapper = create_wrapper_module(tracer_provider)
76
-
77
- # Mark as wrapped and prepend the wrapper to the chat instance
78
- chat.instance_variable_set(:@braintrust_wrapped, true)
79
- chat.singleton_class.prepend(wrapper)
80
-
81
- # Register tool callbacks for tool span creation
82
- register_tool_callbacks(chat, tracer_provider)
83
-
84
- chat
85
- end
86
-
87
- # Register callbacks for tool execution tracing
88
- # @param chat [RubyLLM::Chat] the chat instance
89
- # @param tracer_provider [OpenTelemetry::SDK::Trace::TracerProvider] the tracer provider
90
- def self.register_tool_callbacks(chat, tracer_provider)
91
- tracer = tracer_provider.tracer("braintrust")
92
-
93
- # Track tool spans by tool_call_id
94
- tool_spans = {}
95
-
96
- # Start tool span when tool is called
97
- chat.on_tool_call do |tool_call|
98
- span = tracer.start_span("ruby_llm.tool.#{tool_call.name}")
99
- set_json_attr(span, "braintrust.span_attributes", {type: "tool"})
100
- span.set_attribute("tool.name", tool_call.name)
101
- span.set_attribute("tool.call_id", tool_call.id)
102
-
103
- # Store tool input
104
- input = {
105
- "name" => tool_call.name,
106
- "arguments" => tool_call.arguments
107
- }
108
- set_json_attr(span, "braintrust.input_json", input)
109
-
110
- tool_spans[tool_call.id] = span
111
- end
112
-
113
- # End tool span when result is received
114
- chat.on_tool_result do |result|
115
- # Find the most recent tool span (RubyLLM doesn't pass tool_call_id to on_tool_result)
116
- # The spans are processed in order, so we can use the first unfinished one
117
- tool_call_id, span = tool_spans.find { |_id, s| s }
118
- if span
119
- # Store tool output
120
- set_json_attr(span, "braintrust.output_json", result)
121
- span.finish
122
- tool_spans.delete(tool_call_id)
123
- end
124
- end
125
- end
126
-
127
- # Unwrap RubyLLM to remove Braintrust tracing
128
- # For class-level unwrapping, removes the initialize override from the wrapper module
129
- # For instance-level unwrapping, clears the wrapped flag
130
- #
131
- # @param chat [RubyLLM::Chat, nil] the RubyLLM chat instance to unwrap (if nil, unwraps the class)
132
- def self.unwrap(chat = nil)
133
- # If no chat instance provided, unwrap the class globally
134
- if chat.nil?
135
- if defined?(::RubyLLM::Chat) && ::RubyLLM::Chat.instance_variable_defined?(:@braintrust_wrapper_module)
136
- wrapper_module = ::RubyLLM::Chat.instance_variable_get(:@braintrust_wrapper_module)
137
- # Redefine initialize to just call super (disables auto-wrapping)
138
- # We can't actually remove a prepended module, so we make it a no-op
139
- wrapper_module.module_eval do
140
- define_method(:initialize) do |*args, **kwargs, &block|
141
- super(*args, **kwargs, &block)
142
- end
143
- end
144
- ::RubyLLM::Chat.remove_instance_variable(:@braintrust_wrapper_module)
145
- end
146
- return nil
147
- end
148
-
149
- # Unwrap instance
150
- chat.remove_instance_variable(:@braintrust_wrapped) if chat.instance_variable_defined?(:@braintrust_wrapped)
151
- chat
152
- end
153
-
154
- # Wrap the RubyLLM::Chat class globally
155
- # @param tracer_provider [OpenTelemetry::SDK::Trace::TracerProvider] the tracer provider
156
- def self.wrap_class(tracer_provider)
157
- return unless defined?(::RubyLLM::Chat)
158
-
159
- wrapper = create_wrapper_module(tracer_provider)
160
- ::RubyLLM::Chat.prepend(wrapper)
161
- end
162
-
163
- # Create the wrapper module that intercepts chat.complete
164
- # We wrap complete() instead of ask() because:
165
- # - ask() internally calls complete() for the actual API call
166
- # - ActiveRecord integration (acts_as_chat) calls complete() directly
167
- # - This ensures all LLM calls are traced regardless of entry point
168
- #
169
- # Important: RubyLLM's complete() calls itself recursively for tool execution.
170
- # We only create a span for the outermost call to avoid duplicate spans.
171
- # Tool execution is traced separately via on_tool_call/on_tool_result callbacks.
172
- #
173
- # @param tracer_provider [OpenTelemetry::SDK::Trace::TracerProvider] the tracer provider
174
- # @return [Module] the wrapper module
175
- def self.create_wrapper_module(tracer_provider)
176
- Module.new do
177
- define_method(:complete) do |&block|
178
- # Check if we're already inside a traced complete() call
179
- # If so, just call super without creating a new span
180
- if @braintrust_in_complete
181
- if block
182
- return super(&block)
183
- else
184
- return super()
185
- end
186
- end
187
-
188
- tracer = tracer_provider.tracer("braintrust")
189
-
190
- # Mark that we're inside a complete() call
191
- @braintrust_in_complete = true
192
-
193
- begin
194
- if block
195
- # Handle streaming request
196
- wrapped_block = proc do |chunk|
197
- block.call(chunk)
198
- end
199
- Braintrust::Trace::Contrib::Github::Crmne::RubyLLM.handle_streaming_complete(self, tracer, block) do |aggregated_chunks|
200
- super(&proc do |chunk|
201
- aggregated_chunks << chunk
202
- wrapped_block.call(chunk)
203
- end)
204
- end
205
- else
206
- # Handle non-streaming request
207
- Braintrust::Trace::Contrib::Github::Crmne::RubyLLM.handle_non_streaming_complete(self, tracer) do
208
- super()
209
- end
210
- end
211
- ensure
212
- @braintrust_in_complete = false
213
- end
214
- end
215
- end
216
- end
217
-
218
- # Handle streaming complete request with tracing
219
- # @param chat [RubyLLM::Chat] the chat instance
220
- # @param tracer [OpenTelemetry::Trace::Tracer] the tracer
221
- # @param block [Proc] the streaming block
222
- def self.handle_streaming_complete(chat, tracer, block)
223
- # Start span immediately for accurate timing
224
- span = tracer.start_span("ruby_llm.chat")
225
-
226
- aggregated_chunks = []
227
-
228
- # Extract metadata and build input messages
229
- # For complete(), messages are already in chat history (no prompt param)
230
- metadata = extract_metadata(chat, stream: true)
231
- input_messages = build_input_messages(chat, nil)
232
-
233
- # Set input and metadata
234
- set_json_attr(span, "braintrust.input_json", input_messages) if input_messages.any?
235
- set_json_attr(span, "braintrust.metadata", metadata)
236
-
237
- # Call original method, passing aggregated_chunks to the block
238
- begin
239
- result = yield aggregated_chunks
240
- rescue => e
241
- span.record_exception(e)
242
- span.status = ::OpenTelemetry::Trace::Status.error("RubyLLM error: #{e.message}")
243
- span.finish
244
- raise
245
- end
246
-
247
- # Set output and metrics from aggregated chunks
248
- capture_streaming_output(span, aggregated_chunks, result)
249
- span.finish
250
- result
251
- end
252
-
253
- # Handle non-streaming complete request with tracing
254
- # @param chat [RubyLLM::Chat] the chat instance
255
- # @param tracer [OpenTelemetry::Trace::Tracer] the tracer
256
- def self.handle_non_streaming_complete(chat, tracer)
257
- # Start span immediately for accurate timing
258
- span = tracer.start_span("ruby_llm.chat")
259
-
260
- begin
261
- # Extract metadata and build input messages
262
- # For complete(), messages are already in chat history (no prompt param)
263
- metadata = extract_metadata(chat)
264
- input_messages = build_input_messages(chat, nil)
265
- set_json_attr(span, "braintrust.input_json", input_messages) if input_messages.any?
266
-
267
- # Remember message count before the call (for tool call detection)
268
- messages_before_count = (chat.respond_to?(:messages) && chat.messages) ? chat.messages.length : 0
269
-
270
- # Call the original method
271
- response = yield
272
-
273
- # Capture output and metrics
274
- capture_non_streaming_output(span, chat, response, messages_before_count)
275
-
276
- # Set metadata
277
- set_json_attr(span, "braintrust.metadata", metadata)
278
-
279
- response
280
- ensure
281
- span.finish
282
- end
283
- end
284
-
285
- # Extract metadata from chat instance (provider, model, tools, stream flag)
286
- # @param chat [RubyLLM::Chat] the chat instance
287
- # @param stream [Boolean] whether this is a streaming request
288
- # @return [Hash] metadata hash
289
- def self.extract_metadata(chat, stream: false)
290
- metadata = {"provider" => "ruby_llm"}
291
- metadata["stream"] = true if stream
292
-
293
- # Extract model
294
- if chat.respond_to?(:model) && chat.model
295
- model = chat.model.respond_to?(:id) ? chat.model.id : chat.model.to_s
296
- metadata["model"] = model
297
- end
298
-
299
- # Extract tools (only for non-streaming)
300
- if !stream && chat.respond_to?(:tools) && chat.tools&.any?
301
- metadata["tools"] = extract_tools_metadata(chat)
302
- end
303
-
304
- metadata
305
- end
306
-
307
- # Extract tools metadata from chat instance
308
- # @param chat [RubyLLM::Chat] the chat instance
309
- # @return [Array<Hash>] array of tool schemas
310
- def self.extract_tools_metadata(chat)
311
- provider = chat.instance_variable_get(:@provider) if chat.instance_variable_defined?(:@provider)
312
-
313
- chat.tools.map do |_name, tool|
314
- format_tool_schema(tool, provider)
315
- end
316
- end
317
-
318
- # Format a tool into OpenAI-compatible schema
319
- # @param tool [Object] the tool object
320
- # @param provider [Object, nil] the provider instance
321
- # @return [Hash] tool schema
322
- def self.format_tool_schema(tool, provider)
323
- tool_schema = nil
324
-
325
- # Use provider-specific tool_for method if available
326
- if provider
327
- begin
328
- tool_schema = if provider.is_a?(::RubyLLM::Providers::OpenAI)
329
- ::RubyLLM::Providers::OpenAI::Tools.tool_for(tool)
330
- elsif defined?(::RubyLLM::Providers::Anthropic) && provider.is_a?(::RubyLLM::Providers::Anthropic)
331
- ::RubyLLM::Providers::Anthropic::Tools.tool_for(tool)
332
- elsif tool.respond_to?(:params_schema) && tool.params_schema
333
- build_basic_tool_schema(tool)
334
- else
335
- build_minimal_tool_schema(tool)
336
- end
337
- rescue NameError, ArgumentError => e
338
- # If provider-specific tool_for fails, fall back to basic format
339
- Log.debug("Failed to extract tool schema using provider-specific method: #{e.class.name}: #{e.message}")
340
- tool_schema = (tool.respond_to?(:params_schema) && tool.params_schema) ? build_basic_tool_schema(tool) : build_minimal_tool_schema(tool)
341
- end
342
- else
343
- # No provider, use basic format with params_schema if available
344
- tool_schema = (tool.respond_to?(:params_schema) && tool.params_schema) ? build_basic_tool_schema(tool) : build_minimal_tool_schema(tool)
345
- end
346
-
347
- # Strip RubyLLM-specific fields to match native OpenAI format
348
- # Handle both symbol and string keys
349
- function_key = tool_schema&.key?(:function) ? :function : "function"
350
- if tool_schema && tool_schema[function_key]
351
- tool_params = tool_schema[function_key][:parameters] || tool_schema[function_key]["parameters"]
352
- if tool_params.is_a?(Hash)
353
- # Create a mutable copy if the hash is frozen
354
- tool_params = tool_params.dup if tool_params.frozen?
355
- tool_params.delete("strict")
356
- tool_params.delete(:strict)
357
- tool_params.delete("additionalProperties")
358
- tool_params.delete(:additionalProperties)
359
- # Assign the modified copy back
360
- params_key = tool_schema[function_key].key?(:parameters) ? :parameters : "parameters"
361
- tool_schema[function_key][params_key] = tool_params
362
- end
363
- end
364
-
365
- tool_schema
366
- end
367
-
368
- # Build a basic tool schema with parameters
369
- # @param tool [Object] the tool object
370
- # @return [Hash] tool schema
371
- def self.build_basic_tool_schema(tool)
372
- {
373
- "type" => "function",
374
- "function" => {
375
- "name" => tool.name.to_s,
376
- "description" => tool.description,
377
- "parameters" => tool.params_schema
378
- }
379
- }
380
- end
381
-
382
- # Build a minimal tool schema without parameters
383
- # @param tool [Object] the tool object
384
- # @return [Hash] tool schema
385
- def self.build_minimal_tool_schema(tool)
386
- {
387
- "type" => "function",
388
- "function" => {
389
- "name" => tool.name.to_s,
390
- "description" => tool.description,
391
- "parameters" => {}
392
- }
393
- }
394
- end
395
-
396
- # Build input messages array from chat history and prompt
397
- # Formats messages to match OpenAI's message format
398
- # @param chat [RubyLLM::Chat] the chat instance
399
- # @param prompt [String, nil] the user prompt
400
- # @return [Array<Hash>] array of message hashes
401
- def self.build_input_messages(chat, prompt)
402
- input_messages = []
403
-
404
- # Add conversation history, formatting each message to OpenAI format
405
- if chat.respond_to?(:messages) && chat.messages&.any?
406
- input_messages = chat.messages.map { |m| format_message_for_input(m) }
407
- end
408
-
409
- # Add current prompt
410
- input_messages << {"role" => "user", "content" => prompt} if prompt
411
-
412
- input_messages
413
- end
414
-
415
- # Format a RubyLLM message to OpenAI-compatible format
416
- # @param msg [Object] the RubyLLM message
417
- # @return [Hash] OpenAI-formatted message
418
- def self.format_message_for_input(msg)
419
- formatted = {
420
- "role" => msg.role.to_s
421
- }
422
-
423
- # Handle content
424
- if msg.respond_to?(:content) && msg.content
425
- # Convert Ruby hash notation to JSON string for tool results
426
- content = msg.content
427
- if msg.role.to_s == "tool" && content.is_a?(String) && content.start_with?("{:")
428
- # Ruby hash string like "{:location=>...}" - try to parse and re-serialize as JSON
429
- begin
430
- # Simple conversion: replace Ruby hash syntax with JSON
431
- content = content.gsub(/(?<=\{|, ):(\w+)=>/, '"\1":').gsub("=>", ":")
432
- rescue
433
- # Keep original if conversion fails
434
- end
435
- end
436
- formatted["content"] = content
437
- end
438
-
439
- # Handle tool_calls for assistant messages
440
- if msg.respond_to?(:tool_calls) && msg.tool_calls&.any?
441
- formatted["tool_calls"] = format_tool_calls(msg.tool_calls)
442
- formatted["content"] = nil
443
- end
444
-
445
- # Handle tool_call_id for tool result messages
446
- if msg.respond_to?(:tool_call_id) && msg.tool_call_id
447
- formatted["tool_call_id"] = msg.tool_call_id
448
- end
449
-
450
- formatted
451
- end
452
-
453
- # Capture streaming output and metrics
454
- # @param span [OpenTelemetry::Trace::Span] the span
455
- # @param aggregated_chunks [Array] the aggregated chunks
456
- # @param result [Object] the result object
457
- def self.capture_streaming_output(span, aggregated_chunks, result)
458
- return if aggregated_chunks.empty?
459
-
460
- # Aggregate content from chunks
461
- aggregated_content = aggregated_chunks.map { |c|
462
- c.respond_to?(:content) ? c.content : c.to_s
463
- }.join
464
-
465
- output = [{
466
- role: "assistant",
467
- content: aggregated_content
468
- }]
469
- set_json_attr(span, "braintrust.output_json", output)
470
-
471
- # Try to extract usage from the result
472
- if result.respond_to?(:usage) && result.usage
473
- metrics = parse_usage_tokens(result.usage)
474
- set_json_attr(span, "braintrust.metrics", metrics) unless metrics.empty?
475
- end
476
- end
477
-
478
- # Capture non-streaming output and metrics
479
- # @param span [OpenTelemetry::Trace::Span] the span
480
- # @param chat [RubyLLM::Chat] the chat instance
481
- # @param response [Object] the response object
482
- # @param messages_before_count [Integer] message count before the call
483
- def self.capture_non_streaming_output(span, chat, response, messages_before_count)
484
- return unless response
485
-
486
- # Build message object from response
487
- message = {
488
- "role" => "assistant",
489
- "content" => nil
490
- }
491
-
492
- # Add content if it's a simple text response
493
- if response.respond_to?(:content) && response.content && !response.content.empty?
494
- message["content"] = response.content
495
- end
496
-
497
- # Check if there are tool calls in the messages history
498
- # Look at messages added during this complete() call
499
- if chat.respond_to?(:messages) && chat.messages
500
- assistant_msg = chat.messages[messages_before_count..].find { |m|
501
- m.role.to_s == "assistant" && m.respond_to?(:tool_calls) && m.tool_calls&.any?
502
- }
503
-
504
- if assistant_msg&.tool_calls&.any?
505
- message["tool_calls"] = format_tool_calls(assistant_msg.tool_calls)
506
- message["content"] = nil
507
- end
508
- end
509
-
510
- # Format as OpenAI choices[] structure
511
- output = [{
512
- "index" => 0,
513
- "message" => message,
514
- "finish_reason" => message["tool_calls"] ? "tool_calls" : "stop"
515
- }]
516
-
517
- set_json_attr(span, "braintrust.output_json", output)
518
-
519
- # Set metrics (token usage)
520
- if response.respond_to?(:to_h)
521
- response_hash = response.to_h
522
- usage = {
523
- "input_tokens" => response_hash[:input_tokens],
524
- "output_tokens" => response_hash[:output_tokens],
525
- "cached_tokens" => response_hash[:cached_tokens],
526
- "cache_creation_tokens" => response_hash[:cache_creation_tokens]
527
- }.compact
528
-
529
- unless usage.empty?
530
- metrics = parse_usage_tokens(usage)
531
- set_json_attr(span, "braintrust.metrics", metrics) unless metrics.empty?
532
- end
533
- end
534
- end
535
-
536
- # Format tool calls into OpenAI format
537
- # @param tool_calls [Hash, Array] the tool calls
538
- # @return [Array<Hash>] formatted tool calls
539
- def self.format_tool_calls(tool_calls)
540
- tool_calls.map do |_id, tc|
541
- # Ensure arguments is a JSON string (OpenAI format)
542
- args = tc.arguments
543
- args_string = args.is_a?(String) ? args : JSON.generate(args)
544
-
545
- {
546
- "id" => tc.id,
547
- "type" => "function",
548
- "function" => {
549
- "name" => tc.name,
550
- "arguments" => args_string
551
- }
552
- }
553
- end
554
- end
555
- end
556
- end
557
- end
558
- end
559
- end
560
- end