braintrust 0.0.12 → 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +213 -180
  3. data/exe/braintrust +143 -0
  4. data/lib/braintrust/contrib/anthropic/deprecated.rb +24 -0
  5. data/lib/braintrust/contrib/anthropic/instrumentation/common.rb +53 -0
  6. data/lib/braintrust/contrib/anthropic/instrumentation/messages.rb +232 -0
  7. data/lib/braintrust/contrib/anthropic/integration.rb +53 -0
  8. data/lib/braintrust/contrib/anthropic/patcher.rb +62 -0
  9. data/lib/braintrust/contrib/context.rb +56 -0
  10. data/lib/braintrust/contrib/integration.rb +160 -0
  11. data/lib/braintrust/contrib/openai/deprecated.rb +22 -0
  12. data/lib/braintrust/contrib/openai/instrumentation/chat.rb +298 -0
  13. data/lib/braintrust/contrib/openai/instrumentation/common.rb +134 -0
  14. data/lib/braintrust/contrib/openai/instrumentation/responses.rb +187 -0
  15. data/lib/braintrust/contrib/openai/integration.rb +58 -0
  16. data/lib/braintrust/contrib/openai/patcher.rb +130 -0
  17. data/lib/braintrust/contrib/patcher.rb +76 -0
  18. data/lib/braintrust/contrib/rails/railtie.rb +16 -0
  19. data/lib/braintrust/contrib/registry.rb +107 -0
  20. data/lib/braintrust/contrib/ruby_llm/deprecated.rb +45 -0
  21. data/lib/braintrust/contrib/ruby_llm/instrumentation/chat.rb +464 -0
  22. data/lib/braintrust/contrib/ruby_llm/instrumentation/common.rb +58 -0
  23. data/lib/braintrust/contrib/ruby_llm/integration.rb +54 -0
  24. data/lib/braintrust/contrib/ruby_llm/patcher.rb +44 -0
  25. data/lib/braintrust/contrib/ruby_openai/deprecated.rb +24 -0
  26. data/lib/braintrust/contrib/ruby_openai/instrumentation/chat.rb +149 -0
  27. data/lib/braintrust/contrib/ruby_openai/instrumentation/common.rb +138 -0
  28. data/lib/braintrust/contrib/ruby_openai/instrumentation/responses.rb +146 -0
  29. data/lib/braintrust/contrib/ruby_openai/integration.rb +58 -0
  30. data/lib/braintrust/contrib/ruby_openai/patcher.rb +85 -0
  31. data/lib/braintrust/contrib/setup.rb +168 -0
  32. data/lib/braintrust/contrib/support/openai.rb +72 -0
  33. data/lib/braintrust/contrib/support/otel.rb +23 -0
  34. data/lib/braintrust/contrib.rb +205 -0
  35. data/lib/braintrust/internal/env.rb +33 -0
  36. data/lib/braintrust/internal/time.rb +44 -0
  37. data/lib/braintrust/setup.rb +50 -0
  38. data/lib/braintrust/state.rb +5 -0
  39. data/lib/braintrust/trace.rb +0 -51
  40. data/lib/braintrust/version.rb +1 -1
  41. data/lib/braintrust.rb +10 -1
  42. metadata +38 -7
  43. data/lib/braintrust/trace/contrib/anthropic.rb +0 -316
  44. data/lib/braintrust/trace/contrib/github.com/alexrudall/ruby-openai/ruby-openai.rb +0 -377
  45. data/lib/braintrust/trace/contrib/github.com/crmne/ruby_llm.rb +0 -631
  46. data/lib/braintrust/trace/contrib/openai.rb +0 -611
  47. data/lib/braintrust/trace/tokens.rb +0 -109
@@ -0,0 +1,53 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Braintrust
4
+ module Contrib
5
+ module Anthropic
6
+ module Instrumentation
7
+ # Common utilities for Anthropic SDK instrumentation.
8
+ module Common
9
+ # Parse Anthropic SDK usage tokens into normalized Braintrust metrics.
10
+ # Accumulates cache tokens into prompt_tokens and calculates total.
11
+ # Works with both Hash objects and SDK response objects (via to_h).
12
+ # @param usage [Hash, Object] usage object from Anthropic response
13
+ # @return [Hash<String, Integer>] normalized metrics
14
+ def self.parse_usage_tokens(usage)
15
+ metrics = {}
16
+ return metrics unless usage
17
+
18
+ usage_hash = usage.respond_to?(:to_h) ? usage.to_h : usage
19
+ return metrics unless usage_hash.is_a?(Hash)
20
+
21
+ # Anthropic SDK field mappings → Braintrust metrics
22
+ field_map = {
23
+ "input_tokens" => "prompt_tokens",
24
+ "output_tokens" => "completion_tokens",
25
+ "cache_read_input_tokens" => "prompt_cached_tokens",
26
+ "cache_creation_input_tokens" => "prompt_cache_creation_tokens"
27
+ }
28
+
29
+ usage_hash.each do |key, value|
30
+ next unless value.is_a?(Numeric)
31
+ key_str = key.to_s
32
+ target = field_map[key_str]
33
+ metrics[target] = value.to_i if target
34
+ end
35
+
36
+ # Accumulate cache tokens into prompt_tokens (matching TS/Python SDKs)
37
+ prompt_tokens = (metrics["prompt_tokens"] || 0) +
38
+ (metrics["prompt_cached_tokens"] || 0) +
39
+ (metrics["prompt_cache_creation_tokens"] || 0)
40
+ metrics["prompt_tokens"] = prompt_tokens if prompt_tokens > 0
41
+
42
+ # Calculate total
43
+ if metrics.key?("prompt_tokens") && metrics.key?("completion_tokens")
44
+ metrics["tokens"] = metrics["prompt_tokens"] + metrics["completion_tokens"]
45
+ end
46
+
47
+ metrics
48
+ end
49
+ end
50
+ end
51
+ end
52
+ end
53
+ end
@@ -0,0 +1,232 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "opentelemetry/sdk"
4
+ require "json"
5
+ require_relative "../../support/otel"
6
+ require_relative "common"
7
+ require_relative "../../../internal/time"
8
+
9
+ module Braintrust
10
+ module Contrib
11
+ module Anthropic
12
+ module Instrumentation
13
+ # Messages instrumentation for Anthropic.
14
+ # Wraps create() and stream() methods to create spans.
15
+ module Messages
16
+ def self.included(base)
17
+ base.prepend(InstanceMethods) unless applied?(base)
18
+ end
19
+
20
+ def self.applied?(base)
21
+ base.ancestors.include?(InstanceMethods)
22
+ end
23
+
24
+ module InstanceMethods
25
+ METADATA_FIELDS = %i[
26
+ model max_tokens temperature top_p top_k stop_sequences
27
+ stream tools tool_choice thinking metadata service_tier
28
+ ].freeze
29
+
30
+ # Wrap synchronous messages.create
31
+ def create(**params)
32
+ client = instance_variable_get(:@client)
33
+ tracer = Braintrust::Contrib.tracer_for(client)
34
+
35
+ tracer.in_span("anthropic.messages.create") do |span|
36
+ metadata = build_metadata(params)
37
+ set_input(span, params)
38
+
39
+ response = nil
40
+ time_to_first_token = Braintrust::Internal::Time.measure do
41
+ response = super(**params)
42
+ end
43
+
44
+ set_output(span, response)
45
+ set_metrics(span, response, time_to_first_token)
46
+ finalize_metadata(span, metadata, response)
47
+
48
+ response
49
+ end
50
+ end
51
+
52
+ # Wrap streaming messages.stream
53
+ # Stores context on stream object for span creation during consumption
54
+ def stream(**params)
55
+ client = instance_variable_get(:@client)
56
+ tracer = Braintrust::Contrib.tracer_for(client)
57
+ metadata = build_metadata(params, stream: true)
58
+
59
+ stream_obj = super
60
+ Braintrust::Contrib::Context.set!(stream_obj,
61
+ tracer: tracer,
62
+ params: params,
63
+ metadata: metadata,
64
+ messages_instance: self,
65
+ start_time: Braintrust::Internal::Time.measure)
66
+ stream_obj
67
+ end
68
+
69
+ private
70
+
71
+ def finalize_stream_span(span, stream_obj, metadata, time_to_first_token)
72
+ if stream_obj.respond_to?(:accumulated_message)
73
+ begin
74
+ msg = stream_obj.accumulated_message
75
+ set_output(span, msg)
76
+ set_metrics(span, msg, time_to_first_token)
77
+ metadata["stop_reason"] = msg.stop_reason if msg.respond_to?(:stop_reason) && msg.stop_reason
78
+ metadata["model"] = msg.model if msg.respond_to?(:model) && msg.model
79
+ rescue => e
80
+ Braintrust::Log.debug("Failed to get accumulated message: #{e.message}")
81
+ end
82
+ end
83
+ Support::OTel.set_json_attr(span, "braintrust.metadata", metadata)
84
+ end
85
+
86
+ def build_metadata(params, stream: false)
87
+ metadata = {
88
+ "provider" => "anthropic",
89
+ "endpoint" => "/v1/messages"
90
+ }
91
+ metadata["stream"] = true if stream
92
+ METADATA_FIELDS.each do |field|
93
+ metadata[field.to_s] = params[field] if params.key?(field)
94
+ end
95
+ metadata
96
+ end
97
+
98
+ def set_input(span, params)
99
+ input_messages = []
100
+
101
+ if params[:system]
102
+ system_content = params[:system]
103
+ if system_content.is_a?(Array)
104
+ system_text = system_content.map { |blk|
105
+ blk.is_a?(Hash) ? blk[:text] : blk
106
+ }.join("\n")
107
+ input_messages << {role: "system", content: system_text}
108
+ else
109
+ input_messages << {role: "system", content: system_content}
110
+ end
111
+ end
112
+
113
+ if params[:messages]
114
+ messages_array = params[:messages].map(&:to_h)
115
+ input_messages.concat(messages_array)
116
+ end
117
+
118
+ Support::OTel.set_json_attr(span, "braintrust.input_json", input_messages) if input_messages.any?
119
+ end
120
+
121
+ def set_output(span, response)
122
+ return unless response.respond_to?(:content) && response.content
123
+
124
+ content_array = response.content.map(&:to_h)
125
+ output = [{
126
+ role: response.respond_to?(:role) ? response.role : "assistant",
127
+ content: content_array
128
+ }]
129
+ Support::OTel.set_json_attr(span, "braintrust.output_json", output)
130
+ end
131
+
132
+ def set_metrics(span, response, time_to_first_token)
133
+ metrics = {}
134
+ if response.respond_to?(:usage) && response.usage
135
+ metrics = Common.parse_usage_tokens(response.usage)
136
+ end
137
+ metrics["time_to_first_token"] = time_to_first_token if time_to_first_token
138
+ Support::OTel.set_json_attr(span, "braintrust.metrics", metrics) unless metrics.empty?
139
+ end
140
+
141
+ def finalize_metadata(span, metadata, response)
142
+ metadata["stop_reason"] = response.stop_reason if response.respond_to?(:stop_reason) && response.stop_reason
143
+ metadata["stop_sequence"] = response.stop_sequence if response.respond_to?(:stop_sequence) && response.stop_sequence
144
+ metadata["model"] = response.model if response.respond_to?(:model) && response.model
145
+ Support::OTel.set_json_attr(span, "braintrust.metadata", metadata)
146
+ end
147
+ end
148
+ end
149
+
150
+ # MessageStream instrumentation for Anthropic.
151
+ # Prepended to Anthropic::Helpers::Streaming::MessageStream to create spans on consumption.
152
+ module MessageStream
153
+ def self.included(base)
154
+ base.prepend(InstanceMethods) unless applied?(base)
155
+ end
156
+
157
+ def self.applied?(base)
158
+ base.ancestors.include?(InstanceMethods)
159
+ end
160
+
161
+ module InstanceMethods
162
+ def each(&block)
163
+ ctx = Braintrust::Contrib::Context.from(self)
164
+ return super unless ctx&.[](:tracer) && !ctx[:consumed]
165
+
166
+ trace_consumption(ctx) do
167
+ super do |*args|
168
+ ctx[:time_to_first_token] ||= Braintrust::Internal::Time.measure(ctx[:start_time])
169
+ block.call(*args)
170
+ end
171
+ end
172
+ end
173
+
174
+ def text
175
+ ctx = Braintrust::Contrib::Context.from(self)
176
+ return super unless ctx&.[](:tracer) && !ctx[:consumed]
177
+
178
+ original_text_enum = super
179
+ Enumerator.new do |output|
180
+ trace_consumption(ctx) do
181
+ original_text_enum.each do |text_chunk|
182
+ ctx[:time_to_first_token] ||= Braintrust::Internal::Time.measure(ctx[:start_time])
183
+ output << text_chunk
184
+ end
185
+ end
186
+ end
187
+ end
188
+
189
+ def close
190
+ ctx = Braintrust::Contrib::Context.from(self)
191
+ if ctx&.[](:tracer) && !ctx[:consumed]
192
+ # Stream closed without consumption - create minimal span
193
+ ctx[:consumed] = true
194
+ tracer = ctx[:tracer]
195
+ params = ctx[:params]
196
+ metadata = ctx[:metadata]
197
+ messages_instance = ctx[:messages_instance]
198
+
199
+ tracer.in_span("anthropic.messages.create") do |span|
200
+ messages_instance.send(:set_input, span, params)
201
+ Support::OTel.set_json_attr(span, "braintrust.metadata", metadata)
202
+ end
203
+ end
204
+ super
205
+ end
206
+
207
+ private
208
+
209
+ def trace_consumption(ctx)
210
+ # Mark as consumed to prevent re-entry (accumulated_message calls each internally)
211
+ ctx[:consumed] = true
212
+
213
+ tracer = ctx[:tracer]
214
+ params = ctx[:params]
215
+ metadata = ctx[:metadata]
216
+ messages_instance = ctx[:messages_instance]
217
+
218
+ tracer.in_span("anthropic.messages.create") do |span|
219
+ messages_instance.send(:set_input, span, params)
220
+ Support::OTel.set_json_attr(span, "braintrust.metadata", metadata)
221
+
222
+ yield
223
+
224
+ messages_instance.send(:finalize_stream_span, span, self, metadata, ctx[:time_to_first_token])
225
+ end
226
+ end
227
+ end
228
+ end
229
+ end
230
+ end
231
+ end
232
+ end
@@ -0,0 +1,53 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "../integration"
4
+
5
+ module Braintrust
6
+ module Contrib
7
+ module Anthropic
8
+ # Anthropic integration for automatic instrumentation.
9
+ # Instruments the anthropic gem (https://github.com/anthropics/anthropic-sdk-ruby).
10
+ class Integration
11
+ include Braintrust::Contrib::Integration
12
+
13
+ MINIMUM_VERSION = "0.3.0"
14
+
15
+ GEM_NAMES = ["anthropic"].freeze
16
+ REQUIRE_PATHS = ["anthropic"].freeze
17
+
18
+ # @return [Symbol] Unique identifier for this integration
19
+ def self.integration_name
20
+ :anthropic
21
+ end
22
+
23
+ # @return [Array<String>] Gem names this integration supports
24
+ def self.gem_names
25
+ GEM_NAMES
26
+ end
27
+
28
+ # @return [Array<String>] Require paths for auto-instrument detection
29
+ def self.require_paths
30
+ REQUIRE_PATHS
31
+ end
32
+
33
+ # @return [String] Minimum compatible version
34
+ def self.minimum_version
35
+ MINIMUM_VERSION
36
+ end
37
+
38
+ # @return [Boolean] true if anthropic gem is available
39
+ def self.loaded?
40
+ defined?(::Anthropic::Client) ? true : false
41
+ end
42
+
43
+ # Lazy-load the patcher only when actually patching.
44
+ # This keeps the integration stub lightweight.
45
+ # @return [Array<Class>] The patcher classes
46
+ def self.patchers
47
+ require_relative "patcher"
48
+ [MessagesPatcher]
49
+ end
50
+ end
51
+ end
52
+ end
53
+ end
@@ -0,0 +1,62 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "../patcher"
4
+ require_relative "instrumentation/messages"
5
+
6
+ module Braintrust
7
+ module Contrib
8
+ module Anthropic
9
+ # Patcher for Anthropic messages.
10
+ # Instruments Anthropic::Messages#create and #stream methods.
11
+ class MessagesPatcher < Braintrust::Contrib::Patcher
12
+ class << self
13
+ def applicable?
14
+ defined?(::Anthropic::Client)
15
+ end
16
+
17
+ def patched?(**options)
18
+ target_class = get_singleton_class(options[:target]) || ::Anthropic::Resources::Messages
19
+ Instrumentation::Messages.applied?(target_class)
20
+ end
21
+
22
+ # Perform the actual patching.
23
+ # @param options [Hash] Configuration options passed from integration
24
+ # @option options [Object] :target Optional target instance to patch
25
+ # @option options [OpenTelemetry::SDK::Trace::TracerProvider] :tracer_provider Optional tracer provider
26
+ # @return [void]
27
+ def perform_patch(**options)
28
+ return unless applicable?
29
+
30
+ # MessageStream is shared across all clients, so patch at class level.
31
+ # The instrumentation short-circuits when no context is present,
32
+ # so uninstrumented clients' streams pass through unaffected.
33
+ patch_message_stream
34
+
35
+ if options[:target]
36
+ # Instance-level (for only this client instance)
37
+ raise ArgumentError, "target must be a kind of ::Anthropic::Client" unless options[:target].is_a?(::Anthropic::Client)
38
+
39
+ get_singleton_class(options[:target]).include(Instrumentation::Messages)
40
+ else
41
+ # Class-level (for all client instances)
42
+ ::Anthropic::Resources::Messages.include(Instrumentation::Messages)
43
+ end
44
+ end
45
+
46
+ private
47
+
48
+ def get_singleton_class(client)
49
+ client&.messages&.singleton_class
50
+ end
51
+
52
+ def patch_message_stream
53
+ return unless defined?(::Anthropic::Helpers::Streaming::MessageStream)
54
+ return if Instrumentation::MessageStream.applied?(::Anthropic::Helpers::Streaming::MessageStream)
55
+
56
+ ::Anthropic::Helpers::Streaming::MessageStream.include(Instrumentation::MessageStream)
57
+ end
58
+ end
59
+ end
60
+ end
61
+ end
62
+ end
@@ -0,0 +1,56 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Braintrust
4
+ module Contrib
5
+ # Per-instance or per-class configuration context.
6
+ # Allows attaching generic configuration to specific objects or classes.
7
+ class Context
8
+ # Set or update context on a target object.
9
+ # Creates a new context if one doesn't exist, or updates existing context.
10
+ # @param target [Object] The object to attach context to
11
+ # @param options [Hash] Configuration options to store
12
+ # @return [Context, nil] The existing context if updated, nil if created new or options empty
13
+ def self.set!(target, **options)
14
+ return nil if options.empty?
15
+
16
+ if (ctx = from(target))
17
+ # Update existing context
18
+ options.each { |k, v| ctx[k] = v }
19
+ else
20
+ # Create and attach new context
21
+ target.instance_variable_set(:@braintrust_context, new(**options))
22
+ end
23
+
24
+ ctx
25
+ end
26
+
27
+ # Retrieve context from a target.
28
+ # @param target [Object] The object to retrieve context from
29
+ # @return [Context, nil] The context if found, nil otherwise
30
+ def self.from(target)
31
+ target&.instance_variable_get(:@braintrust_context)
32
+ end
33
+
34
+ # @param options [Hash] Configuration options
35
+ def initialize(**options)
36
+ @options = options
37
+ end
38
+
39
+ def [](key)
40
+ @options[key]
41
+ end
42
+
43
+ def []=(key, value)
44
+ @options[key] = value
45
+ end
46
+
47
+ # Get an option value with a default fallback.
48
+ # @param key [Symbol, String] The option key
49
+ # @param default [Object] The default value if key not found
50
+ # @return [Object] The option value, or default if not found
51
+ def fetch(key, default)
52
+ @options.fetch(key, default)
53
+ end
54
+ end
55
+ end
56
+ end
@@ -0,0 +1,160 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Braintrust
4
+ module Contrib
5
+ # Base module defining the integration contract.
6
+ # Include this module in integration classes to define the schema.
7
+ # Delegates actual patching to a Patcher subclass.
8
+ module Integration
9
+ def self.included(base)
10
+ base.extend(ClassMethods)
11
+ end
12
+
13
+ module ClassMethods
14
+ # Unique symbol name for this integration (e.g., :openai, :anthropic).
15
+ # @return [Symbol]
16
+ def integration_name
17
+ raise NotImplementedError, "#{self} must implement integration_name"
18
+ end
19
+
20
+ # Array of gem names this integration supports.
21
+ # @return [Array<String>]
22
+ def gem_names
23
+ raise NotImplementedError, "#{self} must implement gem_names"
24
+ end
25
+
26
+ # Require paths for auto-instrument detection.
27
+ # Default implementation returns gem_names.
28
+ # @return [Array<String>]
29
+ def require_paths
30
+ gem_names
31
+ end
32
+
33
+ # Is the target library available for loading?
34
+ # @return [Boolean]
35
+ def available?
36
+ gem_names.any? { |name| Gem.loaded_specs.key?(name) }
37
+ end
38
+
39
+ # Is the target library loaded?
40
+ # @return [Boolean]
41
+ def loaded?
42
+ raise NotImplementedError, "#{self} must implement loaded?"
43
+ end
44
+
45
+ # Minimum compatible version (optional, inclusive).
46
+ # @return [String, nil]
47
+ def minimum_version
48
+ nil
49
+ end
50
+
51
+ # Maximum compatible version (optional, inclusive).
52
+ # @return [String, nil]
53
+ def maximum_version
54
+ nil
55
+ end
56
+
57
+ # Is the library version compatible?
58
+ # @return [Boolean]
59
+ def compatible?
60
+ return false unless available?
61
+
62
+ gem_names.each do |name|
63
+ spec = Gem.loaded_specs[name]
64
+ next unless spec
65
+
66
+ version = spec.version
67
+ return false if minimum_version && version < Gem::Version.new(minimum_version)
68
+ return false if maximum_version && version > Gem::Version.new(maximum_version)
69
+ return true
70
+ end
71
+ false
72
+ end
73
+
74
+ # Array of patcher classes for this integration.
75
+ # Override to return multiple patchers for version-specific logic.
76
+ # @return [Array<Class>] Array of patcher classes
77
+ def patchers
78
+ [patcher] # Default: single patcher
79
+ end
80
+
81
+ # Convenience method for single patcher (existing pattern).
82
+ # Override this OR patchers (not both).
83
+ # @return [Class] The patcher class
84
+ def patcher
85
+ raise NotImplementedError, "#{self} must implement patcher or patchers"
86
+ end
87
+
88
+ # Instrument this integration with optional configuration.
89
+ # If a target is provided, configures the target instance specifically.
90
+ # Otherwise, applies class-level instrumentation to all instances.
91
+ #
92
+ # @param options [Hash] Configuration options
93
+ # @option options [Object] :target Optional target instance to instrument
94
+ # @option options [OpenTelemetry::SDK::Trace::TracerProvider] :tracer_provider Optional tracer provider
95
+ # @return [Boolean] true if patching succeeded or was already done
96
+ #
97
+ # @example Class-level instrumentation (all clients)
98
+ # integration.instrument!(tracer_provider: my_provider)
99
+ #
100
+ # @example Instance-level instrumentation (specific client)
101
+ # integration.instrument!(target: client, tracer_provider: my_provider)
102
+ def instrument!(**options)
103
+ if options.empty?
104
+ Braintrust::Log.debug("#{integration_name}.instrument! called")
105
+ else
106
+ Braintrust::Log.debug("#{integration_name}.instrument! called (#{options.keys.join(", ")})")
107
+ end
108
+
109
+ if options[:target]
110
+ # Configure the target with provided options (exclude :target from context)
111
+ context_options = options.except(:target)
112
+ Contrib::Context.set!(options[:target], **context_options) unless context_options.empty?
113
+ end
114
+
115
+ patch!(**options)
116
+ end
117
+
118
+ # Apply instrumentation (idempotent). Tries all applicable patchers.
119
+ # This method is typically called by instrument! after configuration.
120
+ #
121
+ # @param options [Hash] Configuration options
122
+ # @option options [Object] :target Optional target instance to patch
123
+ # @option options [OpenTelemetry::SDK::Trace::TracerProvider] :tracer_provider Optional tracer provider
124
+ # @return [Boolean] true if any patching succeeded or was already done
125
+ def patch!(**options)
126
+ unless available?
127
+ Braintrust::Log.debug("#{integration_name}.patch! skipped: gem not available")
128
+ return false
129
+ end
130
+ unless loaded?
131
+ Braintrust::Log.debug("#{integration_name}.patch! skipped: library not loaded")
132
+ return false
133
+ end
134
+ unless compatible?
135
+ Braintrust::Log.debug("#{integration_name}.patch! skipped: version not compatible")
136
+ return false
137
+ end
138
+
139
+ # Try all applicable patchers
140
+ success = false
141
+ patchers.each do |patch|
142
+ # Check if this patcher is applicable
143
+ next unless patch.applicable?
144
+
145
+ # Attempt to patch (patcher checks applicable? again under lock)
146
+ success = true if patch.patch!(**options)
147
+ end
148
+
149
+ Braintrust::Log.debug("#{integration_name}.patch! skipped: no applicable patcher") unless success
150
+ success
151
+ end
152
+
153
+ # Register this integration with the global registry.
154
+ def register!
155
+ Registry.instance.register(self)
156
+ end
157
+ end
158
+ end
159
+ end
160
+ end
@@ -0,0 +1,22 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Backward compatibility shim for the old OpenAI integration API.
4
+ # This file now just delegates to the new API.
5
+
6
+ module Braintrust
7
+ module Trace
8
+ module OpenAI
9
+ # Wrap an OpenAI::Client to automatically create spans for chat completions and responses.
10
+ # This is the legacy API - delegates to the new contrib framework.
11
+ #
12
+ # @param client [OpenAI::Client] the OpenAI client to wrap
13
+ # @param tracer_provider [OpenTelemetry::SDK::Trace::TracerProvider] the tracer provider (defaults to global)
14
+ # @return [OpenAI::Client] the wrapped client
15
+ def self.wrap(client, tracer_provider: nil)
16
+ Log.warn("Braintrust::Trace::OpenAI.wrap() is deprecated and will be removed in a future version: use Braintrust.instrument!() instead.")
17
+ Braintrust.instrument!(:openai, target: client, tracer_provider: tracer_provider)
18
+ client
19
+ end
20
+ end
21
+ end
22
+ end