braintrust 0.0.5 → 0.0.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 6321acf7b780922ed97ea3cc57dde47a52947a10650a082dcfd9af780056d99a
4
- data.tar.gz: 67c181e53537829931de704c7503cc056646652f9c1a61d914bc1ee0b7af69a2
3
+ metadata.gz: ad055b60c4efb984bce955b1c00c684c760c849dea7a54d49a452f925dbab629
4
+ data.tar.gz: fe271abfac7810e53ff88efb139bc41b519799ed7dbd196ab5bb64fcbc35b62c
5
5
  SHA512:
6
- metadata.gz: bb8546fdbf0a448016a1d31ceb8729a40be59e0d8d081ef275f763a11dbb2f5df0134ec52fc3b1c15c41d9dcdf42fbbe6becaf00ab7ac882c8f2f7e173a9a61f
7
- data.tar.gz: 41e6d13504302a3b3ec26697cb50ce4736040d910c8e293d37088311922daa77274fc4214c86543ca21209bf56e82b2f6f00d5fbc2d7d0d0baad6f1e77cc48ff
6
+ metadata.gz: a33fc58073542bf7d7dbf45092d9f3a6669d7f13fa98d3d8753d8fccd456c7e010b8a3ac0be5625e6761e164d2223f20b7fbaa3f02ead978f47398153c6c8ac2
7
+ data.tar.gz: b24f1377f4ec25f09c7c5e1366e1428c9164d7b3b7bdcf82331aa84d10603b250c61983ea811a13e7be1a1276558ef2995292fbefb20e99a374e59e9eaefb8b1
data/README.md CHANGED
@@ -155,7 +155,7 @@ message = tracer.in_span("chat-message") do |span|
155
155
  root_span = span
156
156
 
157
157
  client.messages.create(
158
- model: "claude-3-5-sonnet-20241022",
158
+ model: "claude-3-haiku-20240307",
159
159
  max_tokens: 100,
160
160
  system: "You are a helpful assistant.",
161
161
  messages: [
@@ -171,6 +171,34 @@ puts "View trace at: #{Braintrust::Trace.permalink(root_span)}"
171
171
  OpenTelemetry.tracer_provider.shutdown
172
172
  ```
173
173
 
174
+ ### RubyLLM Tracing
175
+
176
+ ```ruby
177
+ require "braintrust"
178
+ require "ruby_llm"
179
+
180
+ Braintrust.init
181
+
182
+ # Wrap RubyLLM globally (wraps all Chat instances)
183
+ Braintrust::Trace::Contrib::Github::Crmne::RubyLLM.wrap
184
+
185
+ tracer = OpenTelemetry.tracer_provider.tracer("ruby-llm-app")
186
+ root_span = nil
187
+
188
+ response = tracer.in_span("chat") do |span|
189
+ root_span = span
190
+
191
+ chat = RubyLLM.chat(model: "gpt-4o-mini")
192
+ chat.ask("Say hello!")
193
+ end
194
+
195
+ puts "Response: #{response.content}"
196
+
197
+ puts "View trace at: #{Braintrust::Trace.permalink(root_span)}"
198
+
199
+ OpenTelemetry.tracer_provider.shutdown
200
+ ```
201
+
174
202
  ### Attachments
175
203
 
176
204
  Attachments allow you to log binary data (images, PDFs, audio, etc.) as part of your traces. This is particularly useful for multimodal AI applications like vision models.
@@ -236,7 +264,9 @@ Check out the [`examples/`](./examples/) directory for complete working examples
236
264
  - [eval.rb](./examples/eval.rb) - Create and run evaluations with custom test cases and scoring functions
237
265
  - [trace.rb](./examples/trace.rb) - Manual span creation and tracing
238
266
  - [openai.rb](./examples/openai.rb) - Automatically trace OpenAI API calls
267
+ - [alexrudall_openai.rb](./examples/alexrudall_openai.rb) - Automatically trace ruby-openai gem API calls
239
268
  - [anthropic.rb](./examples/anthropic.rb) - Automatically trace Anthropic API calls
269
+ - [ruby_llm.rb](./examples/ruby_llm.rb) - Automatically trace RubyLLM API calls
240
270
  - [trace/trace_attachments.rb](./examples/trace/trace_attachments.rb) - Log attachments (images, PDFs) in traces
241
271
  - [eval/dataset.rb](./examples/eval/dataset.rb) - Run evaluations using datasets stored in Braintrust
242
272
  - [eval/remote_functions.rb](./examples/eval/remote_functions.rb) - Use remote scoring functions
@@ -49,6 +49,20 @@ module Braintrust
49
49
  )
50
50
  end
51
51
 
52
+ # Create a State object directly with explicit parameters
53
+ # @param api_key [String] Braintrust API key (required)
54
+ # @param org_name [String, nil] Organization name
55
+ # @param org_id [String, nil] Organization ID (if provided, skips login - useful for testing)
56
+ # @param default_project [String, nil] Default project name
57
+ # @param app_url [String, nil] App URL (default: https://www.braintrust.dev)
58
+ # @param api_url [String, nil] API URL
59
+ # @param proxy_url [String, nil] Proxy URL
60
+ # @param blocking_login [Boolean] Login synchronously (default: false)
61
+ # @param enable_tracing [Boolean] Enable OpenTelemetry tracing (default: true)
62
+ # @param tracer_provider [TracerProvider, nil] Optional tracer provider
63
+ # @param config [Config, nil] Optional config object
64
+ # @param exporter [Exporter, nil] Optional exporter for testing
65
+ # @return [State] the created state
52
66
  def initialize(api_key: nil, org_name: nil, org_id: nil, default_project: nil, app_url: nil, api_url: nil, proxy_url: nil, blocking_login: false, enable_tracing: true, tracer_provider: nil, config: nil, exporter: nil)
53
67
  # Instance-level mutex for thread-safe login
54
68
  @login_mutex = Mutex.new
@@ -61,13 +75,17 @@ module Braintrust
61
75
  @app_url = app_url || "https://www.braintrust.dev"
62
76
  @api_url = api_url
63
77
  @proxy_url = proxy_url
64
- @logged_in = false
65
78
  @config = config
66
79
 
67
- # Perform login after state setup
68
- if blocking_login
80
+ # If org_id is provided, we're already "logged in" (useful for testing)
81
+ # Otherwise, perform login to discover org info
82
+ if org_id
83
+ @logged_in = true
84
+ elsif blocking_login
85
+ @logged_in = false
69
86
  login
70
87
  else
88
+ @logged_in = false
71
89
  login_in_thread
72
90
  end
73
91
 
@@ -2,6 +2,7 @@
2
2
 
3
3
  require "opentelemetry/sdk"
4
4
  require "json"
5
+ require_relative "../tokens"
5
6
 
6
7
  module Braintrust
7
8
  module Trace
@@ -17,61 +18,11 @@ module Braintrust
17
18
  span.set_attribute(attr_name, JSON.generate(obj))
18
19
  end
19
20
 
20
- # Parse usage tokens from Anthropic API response, handling cache tokens
21
- # Maps Anthropic field names to Braintrust standard names:
22
- # - input_tokens → contributes to prompt_tokens
23
- # - cache_creation_input_tokens → prompt_cache_creation_tokens (and adds to prompt_tokens)
24
- # - cache_read_input_tokens → prompt_cached_tokens (and adds to prompt_tokens)
25
- # - output_tokens → completion_tokens
26
- # - total_tokens → tokens (or calculated if missing)
27
- #
21
+ # Parse usage tokens from Anthropic API response
28
22
  # @param usage [Hash, Object] usage object from Anthropic response
29
23
  # @return [Hash<String, Integer>] metrics hash with normalized names
30
24
  def self.parse_usage_tokens(usage)
31
- metrics = {}
32
- return metrics unless usage
33
-
34
- # Convert to hash if it's an object
35
- usage_hash = usage.respond_to?(:to_h) ? usage.to_h : usage
36
-
37
- # Extract base values for calculation
38
- input_tokens = 0
39
- cache_creation_tokens = 0
40
- cache_read_tokens = 0
41
-
42
- usage_hash.each do |key, value|
43
- next unless value.is_a?(Numeric)
44
- key_str = key.to_s
45
-
46
- case key_str
47
- when "input_tokens"
48
- input_tokens = value.to_i
49
- when "cache_creation_input_tokens"
50
- cache_creation_tokens = value.to_i
51
- metrics["prompt_cache_creation_tokens"] = value.to_i
52
- when "cache_read_input_tokens"
53
- cache_read_tokens = value.to_i
54
- metrics["prompt_cached_tokens"] = value.to_i
55
- when "output_tokens"
56
- metrics["completion_tokens"] = value.to_i
57
- when "total_tokens"
58
- metrics["tokens"] = value.to_i
59
- else
60
- # Keep other numeric fields as-is (future-proofing)
61
- metrics[key_str] = value.to_i
62
- end
63
- end
64
-
65
- # Calculate total prompt tokens (input + cache creation + cache read)
66
- total_prompt_tokens = input_tokens + cache_creation_tokens + cache_read_tokens
67
- metrics["prompt_tokens"] = total_prompt_tokens
68
-
69
- # Calculate total tokens if not provided by Anthropic
70
- if !metrics.key?("tokens") && metrics.key?("completion_tokens")
71
- metrics["tokens"] = total_prompt_tokens + metrics["completion_tokens"]
72
- end
73
-
74
- metrics
25
+ Braintrust::Trace.parse_anthropic_usage_tokens(usage)
75
26
  end
76
27
 
77
28
  # Wrap an Anthropic::Client to automatically create spans for messages and responses
@@ -2,140 +2,134 @@
2
2
 
3
3
  require "opentelemetry/sdk"
4
4
  require "json"
5
+ require_relative "../../../../tokens"
5
6
 
6
7
  module Braintrust
7
8
  module Trace
8
- module AlexRudall
9
- module RubyOpenAI
10
- # Helper to safely set a JSON attribute on a span
11
- # Only sets the attribute if obj is present
12
- # @param span [OpenTelemetry::Trace::Span] the span to set attribute on
13
- # @param attr_name [String] the attribute name (e.g., "braintrust.output_json")
14
- # @param obj [Object] the object to serialize to JSON
15
- # @return [void]
16
- def self.set_json_attr(span, attr_name, obj)
17
- return unless obj
18
- span.set_attribute(attr_name, JSON.generate(obj))
19
- end
9
+ module Contrib
10
+ module Github
11
+ module Alexrudall
12
+ module RubyOpenAI
13
+ # Helper to safely set a JSON attribute on a span
14
+ # Only sets the attribute if obj is present
15
+ # @param span [OpenTelemetry::Trace::Span] the span to set attribute on
16
+ # @param attr_name [String] the attribute name (e.g., "braintrust.output_json")
17
+ # @param obj [Object] the object to serialize to JSON
18
+ # @return [void]
19
+ def self.set_json_attr(span, attr_name, obj)
20
+ return unless obj
21
+ span.set_attribute(attr_name, JSON.generate(obj))
22
+ end
20
23
 
21
- # Parse usage tokens from OpenAI API response, handling nested token_details
22
- # Maps OpenAI field names to Braintrust standard names:
23
- # - prompt_tokens prompt_tokens
24
- # - completion_tokens → completion_tokens
25
- # - total_tokens → tokens
26
- #
27
- # @param usage [Hash] usage hash from OpenAI response
28
- # @return [Hash<String, Integer>] metrics hash with normalized names
29
- def self.parse_usage_tokens(usage)
30
- metrics = {}
31
- return metrics unless usage
32
-
33
- # Basic token counts
34
- metrics["prompt_tokens"] = usage["prompt_tokens"].to_i if usage["prompt_tokens"]
35
- metrics["completion_tokens"] = usage["completion_tokens"].to_i if usage["completion_tokens"]
36
- metrics["total_tokens"] = usage["total_tokens"].to_i if usage["total_tokens"]
37
-
38
- # Rename total_tokens to tokens for consistency
39
- metrics["tokens"] = metrics.delete("total_tokens") if metrics["total_tokens"]
40
-
41
- metrics
42
- end
24
+ # Parse usage tokens from OpenAI API response
25
+ # @param usage [Hash] usage hash from OpenAI response
26
+ # @return [Hash<String, Integer>] metrics hash with normalized names
27
+ def self.parse_usage_tokens(usage)
28
+ Braintrust::Trace.parse_openai_usage_tokens(usage)
29
+ end
43
30
 
44
- # Wrap an OpenAI::Client (ruby-openai gem) to automatically create spans
45
- # Supports both synchronous and streaming requests
46
- # @param client [OpenAI::Client] the OpenAI client to wrap
47
- # @param tracer_provider [OpenTelemetry::SDK::Trace::TracerProvider] the tracer provider (defaults to global)
48
- def self.wrap(client, tracer_provider: nil)
49
- tracer_provider ||= ::OpenTelemetry.tracer_provider
31
+ # Wrap an OpenAI::Client (ruby-openai gem) to automatically create spans
32
+ # Supports both synchronous and streaming requests
33
+ # @param client [OpenAI::Client] the OpenAI client to wrap
34
+ # @param tracer_provider [OpenTelemetry::SDK::Trace::TracerProvider] the tracer provider (defaults to global)
35
+ def self.wrap(client, tracer_provider: nil)
36
+ tracer_provider ||= ::OpenTelemetry.tracer_provider
50
37
 
51
- # Wrap chat completions
52
- wrap_chat(client, tracer_provider)
38
+ # Wrap chat completions
39
+ wrap_chat(client, tracer_provider)
53
40
 
54
- client
55
- end
41
+ client
42
+ end
56
43
 
57
- # Wrap chat API
58
- # @param client [OpenAI::Client] the OpenAI client
59
- # @param tracer_provider [OpenTelemetry::SDK::Trace::TracerProvider] the tracer provider
60
- def self.wrap_chat(client, tracer_provider)
61
- # Create a wrapper module that intercepts the chat method
62
- wrapper = Module.new do
63
- define_method(:chat) do |parameters:|
64
- tracer = tracer_provider.tracer("braintrust")
65
-
66
- tracer.in_span("openai.chat.completions.create") do |span|
67
- # Initialize metadata hash
68
- metadata = {
69
- "provider" => "openai",
70
- "endpoint" => "/v1/chat/completions"
71
- }
72
-
73
- # Capture request metadata fields
74
- metadata_fields = %w[
75
- model frequency_penalty logit_bias logprobs max_tokens n
76
- presence_penalty response_format seed service_tier stop
77
- stream stream_options temperature top_p top_logprobs
78
- tools tool_choice parallel_tool_calls user functions function_call
79
- ]
80
-
81
- metadata_fields.each do |field|
82
- field_sym = field.to_sym
83
- if parameters.key?(field_sym)
84
- # Special handling for stream parameter (it's a Proc)
85
- metadata[field] = if field == "stream"
86
- true # Just mark as streaming
87
- else
88
- parameters[field_sym]
44
+ # Wrap chat API
45
+ # @param client [OpenAI::Client] the OpenAI client
46
+ # @param tracer_provider [OpenTelemetry::SDK::Trace::TracerProvider] the tracer provider
47
+ def self.wrap_chat(client, tracer_provider)
48
+ # Create a wrapper module that intercepts the chat method
49
+ wrapper = Module.new do
50
+ define_method(:chat) do |parameters:|
51
+ tracer = tracer_provider.tracer("braintrust")
52
+
53
+ tracer.in_span("openai.chat.completions.create") do |span|
54
+ # Initialize metadata hash
55
+ metadata = {
56
+ "provider" => "openai",
57
+ "endpoint" => "/v1/chat/completions"
58
+ }
59
+
60
+ # Capture request metadata fields
61
+ metadata_fields = %w[
62
+ model frequency_penalty logit_bias logprobs max_tokens n
63
+ presence_penalty response_format seed service_tier stop
64
+ stream stream_options temperature top_p top_logprobs
65
+ tools tool_choice parallel_tool_calls user functions function_call
66
+ ]
67
+
68
+ metadata_fields.each do |field|
69
+ field_sym = field.to_sym
70
+ if parameters.key?(field_sym)
71
+ # Special handling for stream parameter (it's a Proc)
72
+ metadata[field] = if field == "stream"
73
+ true # Just mark as streaming
74
+ else
75
+ parameters[field_sym]
76
+ end
77
+ end
89
78
  end
90
- end
91
- end
92
-
93
- # Set input messages as JSON
94
- if parameters[:messages]
95
- span.set_attribute("braintrust.input_json", JSON.generate(parameters[:messages]))
96
- end
97
-
98
- begin
99
- # Call the original method
100
- response = super(parameters: parameters)
101
79
 
102
- # Set output (choices) as JSON
103
- if response && response["choices"]&.any?
104
- span.set_attribute("braintrust.output_json", JSON.generate(response["choices"]))
105
- end
106
-
107
- # Set metrics (token usage)
108
- if response && response["usage"]
109
- metrics = Braintrust::Trace::AlexRudall::RubyOpenAI.parse_usage_tokens(response["usage"])
110
- span.set_attribute("braintrust.metrics", JSON.generate(metrics)) unless metrics.empty?
111
- end
80
+ # Set input messages as JSON
81
+ if parameters[:messages]
82
+ span.set_attribute("braintrust.input_json", JSON.generate(parameters[:messages]))
83
+ end
112
84
 
113
- # Add response metadata fields
114
- if response
115
- metadata["id"] = response["id"] if response["id"]
116
- metadata["created"] = response["created"] if response["created"]
117
- metadata["system_fingerprint"] = response["system_fingerprint"] if response["system_fingerprint"]
118
- metadata["service_tier"] = response["service_tier"] if response["service_tier"]
85
+ begin
86
+ # Call the original method
87
+ response = super(parameters: parameters)
88
+
89
+ # Set output (choices) as JSON
90
+ if response && response["choices"]&.any?
91
+ span.set_attribute("braintrust.output_json", JSON.generate(response["choices"]))
92
+ end
93
+
94
+ # Set metrics (token usage)
95
+ if response && response["usage"]
96
+ metrics = Braintrust::Trace::Contrib::Github::Alexrudall::RubyOpenAI.parse_usage_tokens(response["usage"])
97
+ span.set_attribute("braintrust.metrics", JSON.generate(metrics)) unless metrics.empty?
98
+ end
99
+
100
+ # Add response metadata fields
101
+ if response
102
+ metadata["id"] = response["id"] if response["id"]
103
+ metadata["created"] = response["created"] if response["created"]
104
+ metadata["system_fingerprint"] = response["system_fingerprint"] if response["system_fingerprint"]
105
+ metadata["service_tier"] = response["service_tier"] if response["service_tier"]
106
+ end
107
+
108
+ # Set metadata ONCE at the end with complete hash
109
+ span.set_attribute("braintrust.metadata", JSON.generate(metadata))
110
+
111
+ response
112
+ rescue => e
113
+ # Record exception in span
114
+ span.record_exception(e)
115
+ span.status = OpenTelemetry::Trace::Status.error("Exception: #{e.class} - #{e.message}")
116
+ raise
117
+ end
119
118
  end
120
-
121
- # Set metadata ONCE at the end with complete hash
122
- span.set_attribute("braintrust.metadata", JSON.generate(metadata))
123
-
124
- response
125
- rescue => e
126
- # Record exception in span
127
- span.record_exception(e)
128
- span.status = OpenTelemetry::Trace::Status.error("Exception: #{e.class} - #{e.message}")
129
- raise
130
119
  end
131
120
  end
121
+
122
+ # Prepend the wrapper to the client's singleton class
123
+ client.singleton_class.prepend(wrapper)
132
124
  end
133
125
  end
134
-
135
- # Prepend the wrapper to the client's singleton class
136
- client.singleton_class.prepend(wrapper)
137
126
  end
138
127
  end
139
128
  end
129
+
130
+ # Backwards compatibility: this module was originally at Braintrust::Trace::AlexRudall::RubyOpenAI
131
+ module AlexRudall
132
+ RubyOpenAI = Contrib::Github::Alexrudall::RubyOpenAI
133
+ end
140
134
  end
141
135
  end
@@ -0,0 +1,555 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "opentelemetry/sdk"
4
+ require "json"
5
+ require_relative "../../../tokens"
6
+ require_relative "../../../../logger"
7
+
8
+ module Braintrust
9
+ module Trace
10
+ module Contrib
11
+ module Github
12
+ module Crmne
13
+ module RubyLLM
14
+ # Helper to safely set a JSON attribute on a span
15
+ # Only sets the attribute if obj is present
16
+ # @param span [OpenTelemetry::Trace::Span] the span to set attribute on
17
+ # @param attr_name [String] the attribute name (e.g., "braintrust.output_json")
18
+ # @param obj [Object] the object to serialize to JSON
19
+ # @return [void]
20
+ def self.set_json_attr(span, attr_name, obj)
21
+ return unless obj
22
+ span.set_attribute(attr_name, JSON.generate(obj))
23
+ rescue => e
24
+ Log.debug("Failed to serialize #{attr_name}: #{e.message}")
25
+ end
26
+
27
+ # Parse usage tokens from RubyLLM response
28
+ # RubyLLM uses Anthropic-style field naming (input_tokens, output_tokens)
29
+ # @param usage [Hash, Object] usage object from RubyLLM response
30
+ # @return [Hash<String, Integer>] metrics hash with normalized names
31
+ def self.parse_usage_tokens(usage)
32
+ Braintrust::Trace.parse_anthropic_usage_tokens(usage)
33
+ end
34
+
35
+ # Wrap RubyLLM to automatically create spans for chat requests
36
+ # Supports both synchronous and streaming requests
37
+ #
38
+ # Usage:
39
+ # # Wrap the class once (affects all future instances):
40
+ # Braintrust::Trace::Contrib::Github::Crmne::RubyLLM.wrap
41
+ #
42
+ # # Or wrap a specific instance:
43
+ # chat = RubyLLM.chat(model: "gpt-4o-mini")
44
+ # Braintrust::Trace::Contrib::Github::Crmne::RubyLLM.wrap(chat)
45
+ #
46
+ # @param chat [RubyLLM::Chat, nil] the RubyLLM chat instance to wrap (if nil, wraps the class)
47
+ # @param tracer_provider [OpenTelemetry::SDK::Trace::TracerProvider] the tracer provider (defaults to global)
48
+ def self.wrap(chat = nil, tracer_provider: nil)
49
+ tracer_provider ||= ::OpenTelemetry.tracer_provider
50
+
51
+ # If no chat instance provided, wrap the class globally via initialize hook
52
+ if chat.nil?
53
+ return if defined?(::RubyLLM::Chat) && ::RubyLLM::Chat.instance_variable_defined?(:@braintrust_wrapper_module)
54
+
55
+ # Create module that wraps initialize to auto-wrap each new instance
56
+ wrapper_module = Module.new do
57
+ define_method(:initialize) do |*args, **kwargs, &block|
58
+ super(*args, **kwargs, &block)
59
+ # Auto-wrap this instance during initialization
60
+ Braintrust::Trace::Contrib::Github::Crmne::RubyLLM.wrap(self, tracer_provider: tracer_provider)
61
+ self
62
+ end
63
+ end
64
+
65
+ # Store reference to wrapper module for cleanup
66
+ ::RubyLLM::Chat.instance_variable_set(:@braintrust_wrapper_module, wrapper_module)
67
+ ::RubyLLM::Chat.prepend(wrapper_module)
68
+ return nil
69
+ end
70
+
71
+ # Check if already wrapped to make this idempotent
72
+ return chat if chat.instance_variable_get(:@braintrust_wrapped)
73
+
74
+ # Create a wrapper module that intercepts chat.complete
75
+ wrapper = create_wrapper_module(tracer_provider)
76
+
77
+ # Mark as wrapped and prepend the wrapper to the chat instance
78
+ chat.instance_variable_set(:@braintrust_wrapped, true)
79
+ chat.singleton_class.prepend(wrapper)
80
+
81
+ # Register tool callbacks for tool span creation
82
+ register_tool_callbacks(chat, tracer_provider)
83
+
84
+ chat
85
+ end
86
+
87
+ # Register callbacks for tool execution tracing
88
+ # @param chat [RubyLLM::Chat] the chat instance
89
+ # @param tracer_provider [OpenTelemetry::SDK::Trace::TracerProvider] the tracer provider
90
+ def self.register_tool_callbacks(chat, tracer_provider)
91
+ tracer = tracer_provider.tracer("braintrust")
92
+
93
+ # Track tool spans by tool_call_id
94
+ tool_spans = {}
95
+
96
+ # Start tool span when tool is called
97
+ chat.on_tool_call do |tool_call|
98
+ span = tracer.start_span("ruby_llm.tool.#{tool_call.name}")
99
+ set_json_attr(span, "braintrust.span_attributes", {type: "tool"})
100
+ span.set_attribute("tool.name", tool_call.name)
101
+ span.set_attribute("tool.call_id", tool_call.id)
102
+
103
+ # Store tool input
104
+ input = {
105
+ "name" => tool_call.name,
106
+ "arguments" => tool_call.arguments
107
+ }
108
+ set_json_attr(span, "braintrust.input_json", input)
109
+
110
+ tool_spans[tool_call.id] = span
111
+ end
112
+
113
+ # End tool span when result is received
114
+ chat.on_tool_result do |result|
115
+ # Find the most recent tool span (RubyLLM doesn't pass tool_call_id to on_tool_result)
116
+ # The spans are processed in order, so we can use the first unfinished one
117
+ tool_call_id, span = tool_spans.find { |_id, s| s }
118
+ if span
119
+ # Store tool output
120
+ set_json_attr(span, "braintrust.output_json", result)
121
+ span.finish
122
+ tool_spans.delete(tool_call_id)
123
+ end
124
+ end
125
+ end
126
+
127
+ # Unwrap RubyLLM to remove Braintrust tracing
128
+ # For class-level unwrapping, removes the initialize override from the wrapper module
129
+ # For instance-level unwrapping, clears the wrapped flag
130
+ #
131
+ # @param chat [RubyLLM::Chat, nil] the RubyLLM chat instance to unwrap (if nil, unwraps the class)
132
+ def self.unwrap(chat = nil)
133
+ # If no chat instance provided, unwrap the class globally
134
+ if chat.nil?
135
+ if defined?(::RubyLLM::Chat) && ::RubyLLM::Chat.instance_variable_defined?(:@braintrust_wrapper_module)
136
+ wrapper_module = ::RubyLLM::Chat.instance_variable_get(:@braintrust_wrapper_module)
137
+ # Redefine initialize to just call super (disables auto-wrapping)
138
+ # We can't actually remove a prepended module, so we make it a no-op
139
+ wrapper_module.module_eval do
140
+ define_method(:initialize) do |*args, **kwargs, &block|
141
+ super(*args, **kwargs, &block)
142
+ end
143
+ end
144
+ ::RubyLLM::Chat.remove_instance_variable(:@braintrust_wrapper_module)
145
+ end
146
+ return nil
147
+ end
148
+
149
+ # Unwrap instance
150
+ chat.remove_instance_variable(:@braintrust_wrapped) if chat.instance_variable_defined?(:@braintrust_wrapped)
151
+ chat
152
+ end
153
+
154
+ # Wrap the RubyLLM::Chat class globally
155
+ # @param tracer_provider [OpenTelemetry::SDK::Trace::TracerProvider] the tracer provider
156
+ def self.wrap_class(tracer_provider)
157
+ return unless defined?(::RubyLLM::Chat)
158
+
159
+ wrapper = create_wrapper_module(tracer_provider)
160
+ ::RubyLLM::Chat.prepend(wrapper)
161
+ end
162
+
163
+ # Create the wrapper module that intercepts chat.complete
164
+ # We wrap complete() instead of ask() because:
165
+ # - ask() internally calls complete() for the actual API call
166
+ # - ActiveRecord integration (acts_as_chat) calls complete() directly
167
+ # - This ensures all LLM calls are traced regardless of entry point
168
+ #
169
+ # Important: RubyLLM's complete() calls itself recursively for tool execution.
170
+ # We only create a span for the outermost call to avoid duplicate spans.
171
+ # Tool execution is traced separately via on_tool_call/on_tool_result callbacks.
172
+ #
173
+ # @param tracer_provider [OpenTelemetry::SDK::Trace::TracerProvider] the tracer provider
174
+ # @return [Module] the wrapper module
175
+ def self.create_wrapper_module(tracer_provider)
176
+ Module.new do
177
+ define_method(:complete) do |&block|
178
+ # Check if we're already inside a traced complete() call
179
+ # If so, just call super without creating a new span
180
+ if @braintrust_in_complete
181
+ if block
182
+ return super(&block)
183
+ else
184
+ return super()
185
+ end
186
+ end
187
+
188
+ tracer = tracer_provider.tracer("braintrust")
189
+
190
+ # Mark that we're inside a complete() call
191
+ @braintrust_in_complete = true
192
+
193
+ begin
194
+ if block
195
+ # Handle streaming request
196
+ wrapped_block = proc do |chunk|
197
+ block.call(chunk)
198
+ end
199
+ Braintrust::Trace::Contrib::Github::Crmne::RubyLLM.handle_streaming_complete(self, tracer, block) do |aggregated_chunks|
200
+ super(&proc do |chunk|
201
+ aggregated_chunks << chunk
202
+ wrapped_block.call(chunk)
203
+ end)
204
+ end
205
+ else
206
+ # Handle non-streaming request
207
+ Braintrust::Trace::Contrib::Github::Crmne::RubyLLM.handle_non_streaming_complete(self, tracer) do
208
+ super()
209
+ end
210
+ end
211
+ ensure
212
+ @braintrust_in_complete = false
213
+ end
214
+ end
215
+ end
216
+ end
217
+
218
+ # Handle streaming complete request with tracing
219
+ # @param chat [RubyLLM::Chat] the chat instance
220
+ # @param tracer [OpenTelemetry::Trace::Tracer] the tracer
221
+ # @param block [Proc] the streaming block
222
+ def self.handle_streaming_complete(chat, tracer, block)
223
+ # Start span immediately for accurate timing
224
+ span = tracer.start_span("ruby_llm.chat")
225
+
226
+ aggregated_chunks = []
227
+
228
+ # Extract metadata and build input messages
229
+ # For complete(), messages are already in chat history (no prompt param)
230
+ metadata = extract_metadata(chat, stream: true)
231
+ input_messages = build_input_messages(chat, nil)
232
+
233
+ # Set input and metadata
234
+ set_json_attr(span, "braintrust.input_json", input_messages) if input_messages.any?
235
+ set_json_attr(span, "braintrust.metadata", metadata)
236
+
237
+ # Call original method, passing aggregated_chunks to the block
238
+ begin
239
+ result = yield aggregated_chunks
240
+ rescue => e
241
+ span.record_exception(e)
242
+ span.status = ::OpenTelemetry::Trace::Status.error("RubyLLM error: #{e.message}")
243
+ span.finish
244
+ raise
245
+ end
246
+
247
+ # Set output and metrics from aggregated chunks
248
+ capture_streaming_output(span, aggregated_chunks, result)
249
+ span.finish
250
+ result
251
+ end
252
+
253
+ # Handle non-streaming complete request with tracing
254
+ # @param chat [RubyLLM::Chat] the chat instance
255
+ # @param tracer [OpenTelemetry::Trace::Tracer] the tracer
256
+ def self.handle_non_streaming_complete(chat, tracer)
257
+ # Start span immediately for accurate timing
258
+ span = tracer.start_span("ruby_llm.chat")
259
+
260
+ begin
261
+ # Extract metadata and build input messages
262
+ # For complete(), messages are already in chat history (no prompt param)
263
+ metadata = extract_metadata(chat)
264
+ input_messages = build_input_messages(chat, nil)
265
+ set_json_attr(span, "braintrust.input_json", input_messages) if input_messages.any?
266
+
267
+ # Remember message count before the call (for tool call detection)
268
+ messages_before_count = (chat.respond_to?(:messages) && chat.messages) ? chat.messages.length : 0
269
+
270
+ # Call the original method
271
+ response = yield
272
+
273
+ # Capture output and metrics
274
+ capture_non_streaming_output(span, chat, response, messages_before_count)
275
+
276
+ # Set metadata
277
+ set_json_attr(span, "braintrust.metadata", metadata)
278
+
279
+ response
280
+ ensure
281
+ span.finish
282
+ end
283
+ end
284
+
285
+ # Extract metadata from chat instance (provider, model, tools, stream flag)
286
+ # @param chat [RubyLLM::Chat] the chat instance
287
+ # @param stream [Boolean] whether this is a streaming request
288
+ # @return [Hash] metadata hash
289
+ def self.extract_metadata(chat, stream: false)
290
+ metadata = {"provider" => "ruby_llm"}
291
+ metadata["stream"] = true if stream
292
+
293
+ # Extract model
294
+ if chat.respond_to?(:model) && chat.model
295
+ model = chat.model.respond_to?(:id) ? chat.model.id : chat.model.to_s
296
+ metadata["model"] = model
297
+ end
298
+
299
+ # Extract tools (only for non-streaming)
300
+ if !stream && chat.respond_to?(:tools) && chat.tools&.any?
301
+ metadata["tools"] = extract_tools_metadata(chat)
302
+ end
303
+
304
+ metadata
305
+ end
306
+
307
+ # Extract tools metadata from chat instance
308
+ # @param chat [RubyLLM::Chat] the chat instance
309
+ # @return [Array<Hash>] array of tool schemas
310
+ def self.extract_tools_metadata(chat)
311
+ provider = chat.instance_variable_get(:@provider) if chat.instance_variable_defined?(:@provider)
312
+
313
+ chat.tools.map do |_name, tool|
314
+ format_tool_schema(tool, provider)
315
+ end
316
+ end
317
+
318
+ # Format a tool into OpenAI-compatible schema
319
+ # @param tool [Object] the tool object
320
+ # @param provider [Object, nil] the provider instance
321
+ # @return [Hash] tool schema
322
+ def self.format_tool_schema(tool, provider)
323
+ tool_schema = nil
324
+
325
+ # Use provider-specific tool_for method if available
326
+ if provider
327
+ begin
328
+ tool_schema = if provider.is_a?(::RubyLLM::Providers::OpenAI)
329
+ ::RubyLLM::Providers::OpenAI::Tools.tool_for(tool)
330
+ elsif defined?(::RubyLLM::Providers::Anthropic) && provider.is_a?(::RubyLLM::Providers::Anthropic)
331
+ ::RubyLLM::Providers::Anthropic::Tools.tool_for(tool)
332
+ elsif tool.respond_to?(:params_schema) && tool.params_schema
333
+ build_basic_tool_schema(tool)
334
+ else
335
+ build_minimal_tool_schema(tool)
336
+ end
337
+ rescue NameError, ArgumentError => e
338
+ # If provider-specific tool_for fails, fall back to basic format
339
+ Log.debug("Failed to extract tool schema using provider-specific method: #{e.class.name}: #{e.message}")
340
+ tool_schema = (tool.respond_to?(:params_schema) && tool.params_schema) ? build_basic_tool_schema(tool) : build_minimal_tool_schema(tool)
341
+ end
342
+ else
343
+ # No provider, use basic format with params_schema if available
344
+ tool_schema = (tool.respond_to?(:params_schema) && tool.params_schema) ? build_basic_tool_schema(tool) : build_minimal_tool_schema(tool)
345
+ end
346
+
347
+ # Strip RubyLLM-specific fields to match native OpenAI format
348
+ # Handle both symbol and string keys
349
+ function_key = tool_schema&.key?(:function) ? :function : "function"
350
+ if tool_schema && tool_schema[function_key]
351
+ tool_params = tool_schema[function_key][:parameters] || tool_schema[function_key]["parameters"]
352
+ if tool_params.is_a?(Hash)
353
+ tool_params.delete("strict")
354
+ tool_params.delete(:strict)
355
+ tool_params.delete("additionalProperties")
356
+ tool_params.delete(:additionalProperties)
357
+ end
358
+ end
359
+
360
+ tool_schema
361
+ end
362
+
363
+ # Build a basic tool schema with parameters
364
+ # @param tool [Object] the tool object
365
+ # @return [Hash] tool schema
366
+ def self.build_basic_tool_schema(tool)
367
+ {
368
+ "type" => "function",
369
+ "function" => {
370
+ "name" => tool.name.to_s,
371
+ "description" => tool.description,
372
+ "parameters" => tool.params_schema
373
+ }
374
+ }
375
+ end
376
+
377
+ # Build a minimal tool schema without parameters
378
+ # @param tool [Object] the tool object
379
+ # @return [Hash] tool schema
380
+ def self.build_minimal_tool_schema(tool)
381
+ {
382
+ "type" => "function",
383
+ "function" => {
384
+ "name" => tool.name.to_s,
385
+ "description" => tool.description,
386
+ "parameters" => {}
387
+ }
388
+ }
389
+ end
390
+
391
+ # Build input messages array from chat history and prompt
392
+ # Formats messages to match OpenAI's message format
393
+ # @param chat [RubyLLM::Chat] the chat instance
394
+ # @param prompt [String, nil] the user prompt
395
+ # @return [Array<Hash>] array of message hashes
396
+ def self.build_input_messages(chat, prompt)
397
+ input_messages = []
398
+
399
+ # Add conversation history, formatting each message to OpenAI format
400
+ if chat.respond_to?(:messages) && chat.messages&.any?
401
+ input_messages = chat.messages.map { |m| format_message_for_input(m) }
402
+ end
403
+
404
+ # Add current prompt
405
+ input_messages << {"role" => "user", "content" => prompt} if prompt
406
+
407
+ input_messages
408
+ end
409
+
410
+ # Format a RubyLLM message to OpenAI-compatible format
411
+ # @param msg [Object] the RubyLLM message
412
+ # @return [Hash] OpenAI-formatted message
413
+ def self.format_message_for_input(msg)
414
+ formatted = {
415
+ "role" => msg.role.to_s
416
+ }
417
+
418
+ # Handle content
419
+ if msg.respond_to?(:content) && msg.content
420
+ # Convert Ruby hash notation to JSON string for tool results
421
+ content = msg.content
422
+ if msg.role.to_s == "tool" && content.is_a?(String) && content.start_with?("{:")
423
+ # Ruby hash string like "{:location=>...}" - try to parse and re-serialize as JSON
424
+ begin
425
+ # Simple conversion: replace Ruby hash syntax with JSON
426
+ content = content.gsub(/(?<=\{|, ):(\w+)=>/, '"\1":').gsub("=>", ":")
427
+ rescue
428
+ # Keep original if conversion fails
429
+ end
430
+ end
431
+ formatted["content"] = content
432
+ end
433
+
434
+ # Handle tool_calls for assistant messages
435
+ if msg.respond_to?(:tool_calls) && msg.tool_calls&.any?
436
+ formatted["tool_calls"] = format_tool_calls(msg.tool_calls)
437
+ formatted["content"] = nil
438
+ end
439
+
440
+ # Handle tool_call_id for tool result messages
441
+ if msg.respond_to?(:tool_call_id) && msg.tool_call_id
442
+ formatted["tool_call_id"] = msg.tool_call_id
443
+ end
444
+
445
+ formatted
446
+ end
447
+
448
+ # Capture streaming output and metrics
449
+ # @param span [OpenTelemetry::Trace::Span] the span
450
+ # @param aggregated_chunks [Array] the aggregated chunks
451
+ # @param result [Object] the result object
452
+ def self.capture_streaming_output(span, aggregated_chunks, result)
453
+ return if aggregated_chunks.empty?
454
+
455
+ # Aggregate content from chunks
456
+ aggregated_content = aggregated_chunks.map { |c|
457
+ c.respond_to?(:content) ? c.content : c.to_s
458
+ }.join
459
+
460
+ output = [{
461
+ role: "assistant",
462
+ content: aggregated_content
463
+ }]
464
+ set_json_attr(span, "braintrust.output_json", output)
465
+
466
+ # Try to extract usage from the result
467
+ if result.respond_to?(:usage) && result.usage
468
+ metrics = parse_usage_tokens(result.usage)
469
+ set_json_attr(span, "braintrust.metrics", metrics) unless metrics.empty?
470
+ end
471
+ end
472
+
473
+ # Capture non-streaming output and metrics
474
+ # @param span [OpenTelemetry::Trace::Span] the span
475
+ # @param chat [RubyLLM::Chat] the chat instance
476
+ # @param response [Object] the response object
477
+ # @param messages_before_count [Integer] message count before the call
478
+ def self.capture_non_streaming_output(span, chat, response, messages_before_count)
479
+ return unless response
480
+
481
+ # Build message object from response
482
+ message = {
483
+ "role" => "assistant",
484
+ "content" => nil
485
+ }
486
+
487
+ # Add content if it's a simple text response
488
+ if response.respond_to?(:content) && response.content && !response.content.empty?
489
+ message["content"] = response.content
490
+ end
491
+
492
+ # Check if there are tool calls in the messages history
493
+ # Look at messages added during this complete() call
494
+ if chat.respond_to?(:messages) && chat.messages
495
+ assistant_msg = chat.messages[messages_before_count..].find { |m|
496
+ m.role.to_s == "assistant" && m.respond_to?(:tool_calls) && m.tool_calls&.any?
497
+ }
498
+
499
+ if assistant_msg&.tool_calls&.any?
500
+ message["tool_calls"] = format_tool_calls(assistant_msg.tool_calls)
501
+ message["content"] = nil
502
+ end
503
+ end
504
+
505
+ # Format as OpenAI choices[] structure
506
+ output = [{
507
+ "index" => 0,
508
+ "message" => message,
509
+ "finish_reason" => message["tool_calls"] ? "tool_calls" : "stop"
510
+ }]
511
+
512
+ set_json_attr(span, "braintrust.output_json", output)
513
+
514
+ # Set metrics (token usage)
515
+ if response.respond_to?(:to_h)
516
+ response_hash = response.to_h
517
+ usage = {
518
+ "input_tokens" => response_hash[:input_tokens],
519
+ "output_tokens" => response_hash[:output_tokens],
520
+ "cached_tokens" => response_hash[:cached_tokens],
521
+ "cache_creation_tokens" => response_hash[:cache_creation_tokens]
522
+ }.compact
523
+
524
+ unless usage.empty?
525
+ metrics = parse_usage_tokens(usage)
526
+ set_json_attr(span, "braintrust.metrics", metrics) unless metrics.empty?
527
+ end
528
+ end
529
+ end
530
+
531
+ # Format tool calls into OpenAI format
532
+ # @param tool_calls [Hash, Array] the tool calls
533
+ # @return [Array<Hash>] formatted tool calls
534
+ def self.format_tool_calls(tool_calls)
535
+ tool_calls.map do |_id, tc|
536
+ # Ensure arguments is a JSON string (OpenAI format)
537
+ args = tc.arguments
538
+ args_string = args.is_a?(String) ? args : JSON.generate(args)
539
+
540
+ {
541
+ "id" => tc.id,
542
+ "type" => "function",
543
+ "function" => {
544
+ "name" => tc.name,
545
+ "arguments" => args_string
546
+ }
547
+ }
548
+ end
549
+ end
550
+ end
551
+ end
552
+ end
553
+ end
554
+ end
555
+ end
@@ -2,6 +2,7 @@
2
2
 
3
3
  require "opentelemetry/sdk"
4
4
  require "json"
5
+ require_relative "../tokens"
5
6
 
6
7
  module Braintrust
7
8
  module Trace
@@ -17,72 +18,11 @@ module Braintrust
17
18
  span.set_attribute(attr_name, JSON.generate(obj))
18
19
  end
19
20
 
20
- # Parse usage tokens from OpenAI API response, handling nested token_details
21
- # Maps OpenAI field names to Braintrust standard names:
22
- # - input_tokens → prompt_tokens
23
- # - output_tokens → completion_tokens
24
- # - total_tokens → tokens
25
- # - *_tokens_details.* → prefix_*
26
- #
21
+ # Parse usage tokens from OpenAI API response
27
22
  # @param usage [Hash, Object] usage object from OpenAI response
28
23
  # @return [Hash<String, Integer>] metrics hash with normalized names
29
24
  def self.parse_usage_tokens(usage)
30
- metrics = {}
31
- return metrics unless usage
32
-
33
- # Convert to hash if it's an object
34
- usage_hash = usage.respond_to?(:to_h) ? usage.to_h : usage
35
-
36
- usage_hash.each do |key, value|
37
- key_str = key.to_s
38
-
39
- # Handle nested *_tokens_details objects
40
- if key_str.end_with?("_tokens_details")
41
- # Convert to hash if it's an object (OpenAI gem returns objects)
42
- details_hash = value.respond_to?(:to_h) ? value.to_h : value
43
- next unless details_hash.is_a?(Hash)
44
-
45
- # Extract prefix (e.g., "prompt" from "prompt_tokens_details")
46
- prefix = key_str.sub(/_tokens_details$/, "")
47
- # Translate "input" → "prompt", "output" → "completion"
48
- prefix = translate_metric_prefix(prefix)
49
-
50
- # Process nested fields (e.g., cached_tokens, reasoning_tokens)
51
- details_hash.each do |detail_key, detail_value|
52
- next unless detail_value.is_a?(Numeric)
53
- metrics["#{prefix}_#{detail_key}"] = detail_value.to_i
54
- end
55
- elsif value.is_a?(Numeric)
56
- # Handle top-level token fields
57
- case key_str
58
- when "input_tokens"
59
- metrics["prompt_tokens"] = value.to_i
60
- when "output_tokens"
61
- metrics["completion_tokens"] = value.to_i
62
- when "total_tokens"
63
- metrics["tokens"] = value.to_i
64
- else
65
- # Keep other numeric fields as-is (future-proofing)
66
- metrics[key_str] = value.to_i
67
- end
68
- end
69
- end
70
-
71
- metrics
72
- end
73
-
74
- # Translate metric prefix to be consistent between different API formats
75
- # @param prefix [String] the prefix to translate
76
- # @return [String] translated prefix
77
- def self.translate_metric_prefix(prefix)
78
- case prefix
79
- when "input"
80
- "prompt"
81
- when "output"
82
- "completion"
83
- else
84
- prefix
85
- end
25
+ Braintrust::Trace.parse_openai_usage_tokens(usage)
86
26
  end
87
27
 
88
28
  # Aggregate streaming chunks into a single response structure
@@ -0,0 +1,101 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Braintrust
4
+ module Trace
5
+ # Parse OpenAI usage tokens into normalized Braintrust metrics.
6
+ # Handles standard fields and *_tokens_details nested objects.
7
+ # @param usage [Hash, Object] usage object from OpenAI response
8
+ # @return [Hash<String, Integer>] normalized metrics
9
+ def self.parse_openai_usage_tokens(usage)
10
+ metrics = {}
11
+ return metrics unless usage
12
+
13
+ usage_hash = usage.respond_to?(:to_h) ? usage.to_h : usage
14
+ return metrics unless usage_hash.is_a?(Hash)
15
+
16
+ # Field mappings: OpenAI → Braintrust
17
+ field_map = {
18
+ "prompt_tokens" => "prompt_tokens",
19
+ "completion_tokens" => "completion_tokens",
20
+ "total_tokens" => "tokens"
21
+ }
22
+
23
+ # Prefix mappings for *_tokens_details
24
+ prefix_map = {
25
+ "prompt" => "prompt",
26
+ "completion" => "completion"
27
+ }
28
+
29
+ usage_hash.each do |key, value|
30
+ key_str = key.to_s
31
+
32
+ if value.is_a?(Numeric)
33
+ target = field_map[key_str]
34
+ metrics[target] = value.to_i if target
35
+ elsif key_str.end_with?("_tokens_details")
36
+ # Convert to hash if it's an object (OpenAI SDK returns objects)
37
+ details_hash = value.respond_to?(:to_h) ? value.to_h : value
38
+ next unless details_hash.is_a?(Hash)
39
+
40
+ raw_prefix = key_str.sub(/_tokens_details$/, "")
41
+ prefix = prefix_map[raw_prefix] || raw_prefix
42
+ details_hash.each do |detail_key, detail_value|
43
+ next unless detail_value.is_a?(Numeric)
44
+ metrics["#{prefix}_#{detail_key}"] = detail_value.to_i
45
+ end
46
+ end
47
+ end
48
+
49
+ # Calculate total if missing
50
+ if !metrics.key?("tokens") && metrics.key?("prompt_tokens") && metrics.key?("completion_tokens")
51
+ metrics["tokens"] = metrics["prompt_tokens"] + metrics["completion_tokens"]
52
+ end
53
+
54
+ metrics
55
+ end
56
+
57
+ # Parse Anthropic usage tokens into normalized Braintrust metrics.
58
+ # Accumulates cache tokens into prompt_tokens and calculates total.
59
+ # @param usage [Hash, Object] usage object from Anthropic response
60
+ # @return [Hash<String, Integer>] normalized metrics
61
+ def self.parse_anthropic_usage_tokens(usage)
62
+ metrics = {}
63
+ return metrics unless usage
64
+
65
+ usage_hash = usage.respond_to?(:to_h) ? usage.to_h : usage
66
+ return metrics unless usage_hash.is_a?(Hash)
67
+
68
+ # Field mappings: Anthropic → Braintrust
69
+ # Also handles RubyLLM's simplified cache field names
70
+ field_map = {
71
+ "input_tokens" => "prompt_tokens",
72
+ "output_tokens" => "completion_tokens",
73
+ "cache_read_input_tokens" => "prompt_cached_tokens",
74
+ "cache_creation_input_tokens" => "prompt_cache_creation_tokens",
75
+ # RubyLLM uses simplified names
76
+ "cached_tokens" => "prompt_cached_tokens",
77
+ "cache_creation_tokens" => "prompt_cache_creation_tokens"
78
+ }
79
+
80
+ usage_hash.each do |key, value|
81
+ next unless value.is_a?(Numeric)
82
+ key_str = key.to_s
83
+ target = field_map[key_str]
84
+ metrics[target] = value.to_i if target
85
+ end
86
+
87
+ # Accumulate cache tokens into prompt_tokens (matching TS/Python SDKs)
88
+ prompt_tokens = (metrics["prompt_tokens"] || 0) +
89
+ (metrics["prompt_cached_tokens"] || 0) +
90
+ (metrics["prompt_cache_creation_tokens"] || 0)
91
+ metrics["prompt_tokens"] = prompt_tokens if prompt_tokens > 0
92
+
93
+ # Calculate total
94
+ if metrics.key?("prompt_tokens") && metrics.key?("completion_tokens")
95
+ metrics["tokens"] = metrics["prompt_tokens"] + metrics["completion_tokens"]
96
+ end
97
+
98
+ metrics
99
+ end
100
+ end
101
+ end
@@ -40,6 +40,23 @@ rescue LoadError
40
40
  # Anthropic gem not installed - integration will not be available
41
41
  end
42
42
 
43
+ # RubyLLM integration is optional - automatically loaded if ruby_llm gem is available
44
+ #
45
+ # Usage:
46
+ # # Wrap the class once (affects all instances):
47
+ # Braintrust::Trace::RubyLLM.wrap
48
+ #
49
+ # # Or wrap a specific instance:
50
+ # chat = RubyLLM.chat(model: "gpt-4o-mini")
51
+ # Braintrust::Trace::RubyLLM.wrap(chat)
52
+ #
53
+ begin
54
+ require "ruby_llm"
55
+ require_relative "trace/contrib/github.com/crmne/ruby_llm"
56
+ rescue LoadError
57
+ # RubyLLM gem not installed - integration will not be available
58
+ end
59
+
43
60
  module Braintrust
44
61
  module Trace
45
62
  # Set up OpenTelemetry tracing with Braintrust
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module Braintrust
4
- VERSION = "0.0.5"
4
+ VERSION = "0.0.7"
5
5
  end
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: braintrust
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.0.5
4
+ version: 0.0.7
5
5
  platform: ruby
6
6
  authors:
7
7
  - Braintrust
@@ -205,9 +205,11 @@ files:
205
205
  - lib/braintrust/trace/attachment.rb
206
206
  - lib/braintrust/trace/contrib/anthropic.rb
207
207
  - lib/braintrust/trace/contrib/github.com/alexrudall/ruby-openai/ruby-openai.rb
208
+ - lib/braintrust/trace/contrib/github.com/crmne/ruby_llm.rb
208
209
  - lib/braintrust/trace/contrib/openai.rb
209
210
  - lib/braintrust/trace/span_filter.rb
210
211
  - lib/braintrust/trace/span_processor.rb
212
+ - lib/braintrust/trace/tokens.rb
211
213
  - lib/braintrust/version.rb
212
214
  homepage: https://github.com/braintrustdata/braintrust-sdk-ruby
213
215
  licenses: