braintrust 0.0.12 → 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +214 -180
  3. data/exe/braintrust +143 -0
  4. data/lib/braintrust/contrib/anthropic/deprecated.rb +24 -0
  5. data/lib/braintrust/contrib/anthropic/instrumentation/beta_messages.rb +242 -0
  6. data/lib/braintrust/contrib/anthropic/instrumentation/common.rb +53 -0
  7. data/lib/braintrust/contrib/anthropic/instrumentation/messages.rb +232 -0
  8. data/lib/braintrust/contrib/anthropic/integration.rb +53 -0
  9. data/lib/braintrust/contrib/anthropic/patcher.rb +145 -0
  10. data/lib/braintrust/contrib/context.rb +56 -0
  11. data/lib/braintrust/contrib/integration.rb +160 -0
  12. data/lib/braintrust/contrib/openai/deprecated.rb +22 -0
  13. data/lib/braintrust/contrib/openai/instrumentation/chat.rb +298 -0
  14. data/lib/braintrust/contrib/openai/instrumentation/common.rb +134 -0
  15. data/lib/braintrust/contrib/openai/instrumentation/moderations.rb +93 -0
  16. data/lib/braintrust/contrib/openai/instrumentation/responses.rb +187 -0
  17. data/lib/braintrust/contrib/openai/integration.rb +58 -0
  18. data/lib/braintrust/contrib/openai/patcher.rb +173 -0
  19. data/lib/braintrust/contrib/patcher.rb +76 -0
  20. data/lib/braintrust/contrib/rails/railtie.rb +16 -0
  21. data/lib/braintrust/contrib/registry.rb +107 -0
  22. data/lib/braintrust/contrib/ruby_llm/deprecated.rb +45 -0
  23. data/lib/braintrust/contrib/ruby_llm/instrumentation/chat.rb +464 -0
  24. data/lib/braintrust/contrib/ruby_llm/instrumentation/common.rb +58 -0
  25. data/lib/braintrust/contrib/ruby_llm/integration.rb +54 -0
  26. data/lib/braintrust/contrib/ruby_llm/patcher.rb +44 -0
  27. data/lib/braintrust/contrib/ruby_openai/deprecated.rb +24 -0
  28. data/lib/braintrust/contrib/ruby_openai/instrumentation/chat.rb +149 -0
  29. data/lib/braintrust/contrib/ruby_openai/instrumentation/common.rb +138 -0
  30. data/lib/braintrust/contrib/ruby_openai/instrumentation/moderations.rb +94 -0
  31. data/lib/braintrust/contrib/ruby_openai/instrumentation/responses.rb +146 -0
  32. data/lib/braintrust/contrib/ruby_openai/integration.rb +58 -0
  33. data/lib/braintrust/contrib/ruby_openai/patcher.rb +120 -0
  34. data/lib/braintrust/contrib/setup.rb +168 -0
  35. data/lib/braintrust/contrib/support/openai.rb +72 -0
  36. data/lib/braintrust/contrib/support/otel.rb +23 -0
  37. data/lib/braintrust/contrib.rb +205 -0
  38. data/lib/braintrust/internal/env.rb +39 -0
  39. data/lib/braintrust/internal/time.rb +44 -0
  40. data/lib/braintrust/setup.rb +50 -0
  41. data/lib/braintrust/state.rb +6 -1
  42. data/lib/braintrust/trace.rb +41 -51
  43. data/lib/braintrust/version.rb +1 -1
  44. data/lib/braintrust.rb +10 -1
  45. metadata +41 -7
  46. data/lib/braintrust/trace/contrib/anthropic.rb +0 -316
  47. data/lib/braintrust/trace/contrib/github.com/alexrudall/ruby-openai/ruby-openai.rb +0 -377
  48. data/lib/braintrust/trace/contrib/github.com/crmne/ruby_llm.rb +0 -631
  49. data/lib/braintrust/trace/contrib/openai.rb +0 -611
  50. data/lib/braintrust/trace/tokens.rb +0 -109
@@ -0,0 +1,134 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Braintrust
4
+ module Contrib
5
+ module OpenAI
6
+ module Instrumentation
7
+ # Aggregation utilities for official OpenAI SDK instrumentation.
8
+ # These are specific to the official openai gem's data structures (symbol keys, SDK objects).
9
+ module Common
10
+ # Aggregate streaming chunks into a single response structure.
11
+ # Specific to official OpenAI SDK which uses symbol keys and SDK objects.
12
+ # @param chunks [Array<Hash>] array of chunk hashes from stream (symbol keys)
13
+ # @return [Hash] aggregated response with choices, usage, etc. (symbol keys)
14
+ def self.aggregate_streaming_chunks(chunks)
15
+ return {} if chunks.empty?
16
+
17
+ # Initialize aggregated structure
18
+ aggregated = {
19
+ id: nil,
20
+ created: nil,
21
+ model: nil,
22
+ system_fingerprint: nil,
23
+ choices: [],
24
+ usage: nil
25
+ }
26
+
27
+ # Track aggregated content and tool_calls for each choice index
28
+ choice_data = {}
29
+
30
+ chunks.each do |chunk|
31
+ # Capture top-level fields from any chunk that has them
32
+ aggregated[:id] ||= chunk[:id]
33
+ aggregated[:created] ||= chunk[:created]
34
+ aggregated[:model] ||= chunk[:model]
35
+ aggregated[:system_fingerprint] ||= chunk[:system_fingerprint]
36
+
37
+ # Aggregate usage (usually only in last chunk if stream_options.include_usage is set)
38
+ aggregated[:usage] = chunk[:usage] if chunk[:usage]
39
+
40
+ # Process choices
41
+ next unless chunk[:choices].is_a?(Array)
42
+ chunk[:choices].each do |choice|
43
+ index = choice[:index] || 0
44
+ choice_data[index] ||= {
45
+ index: index,
46
+ role: nil,
47
+ content: +"",
48
+ tool_calls: [],
49
+ finish_reason: nil
50
+ }
51
+
52
+ delta = choice[:delta] || {}
53
+
54
+ # Aggregate role (set once from first delta that has it)
55
+ choice_data[index][:role] ||= delta[:role]
56
+
57
+ # Aggregate content
58
+ choice_data[index][:content] << delta[:content] if delta[:content]
59
+
60
+ # Aggregate tool_calls
61
+ if delta[:tool_calls].is_a?(Array) && delta[:tool_calls].any?
62
+ delta[:tool_calls].each do |tool_call_delta|
63
+ if tool_call_delta[:id] && !tool_call_delta[:id].empty?
64
+ # New tool call (dup strings to avoid mutating input)
65
+ choice_data[index][:tool_calls] << {
66
+ id: tool_call_delta[:id],
67
+ type: tool_call_delta[:type],
68
+ function: {
69
+ name: +(tool_call_delta.dig(:function, :name) || ""),
70
+ arguments: +(tool_call_delta.dig(:function, :arguments) || "")
71
+ }
72
+ }
73
+ elsif choice_data[index][:tool_calls].any?
74
+ # Continuation - append arguments to last tool call
75
+ last_tool_call = choice_data[index][:tool_calls].last
76
+ if tool_call_delta.dig(:function, :arguments)
77
+ last_tool_call[:function][:arguments] << tool_call_delta[:function][:arguments]
78
+ end
79
+ end
80
+ end
81
+ end
82
+
83
+ # Capture finish_reason
84
+ choice_data[index][:finish_reason] = choice[:finish_reason] if choice[:finish_reason]
85
+ end
86
+ end
87
+
88
+ # Build final choices array
89
+ aggregated[:choices] = choice_data.values.sort_by { |c| c[:index] }.map do |choice|
90
+ message = {
91
+ role: choice[:role],
92
+ content: choice[:content].empty? ? nil : choice[:content]
93
+ }
94
+
95
+ # Add tool_calls to message if any
96
+ message[:tool_calls] = choice[:tool_calls] if choice[:tool_calls].any?
97
+
98
+ {
99
+ index: choice[:index],
100
+ message: message,
101
+ finish_reason: choice[:finish_reason]
102
+ }
103
+ end
104
+
105
+ aggregated
106
+ end
107
+
108
+ # Aggregate responses streaming events into a single response structure.
109
+ # Specific to official OpenAI SDK which returns typed event objects.
110
+ # @param events [Array] array of event objects from stream
111
+ # @return [Hash] aggregated response with output, usage, etc.
112
+ def self.aggregate_responses_events(events)
113
+ return {} if events.empty?
114
+
115
+ # Find the response.completed event which has the final response
116
+ completed_event = events.find { |e| e.respond_to?(:type) && e.type == :"response.completed" }
117
+
118
+ if completed_event&.respond_to?(:response)
119
+ response = completed_event.response
120
+ return {
121
+ id: response.respond_to?(:id) ? response.id : nil,
122
+ output: response.respond_to?(:output) ? response.output : nil,
123
+ usage: response.respond_to?(:usage) ? response.usage : nil
124
+ }
125
+ end
126
+
127
+ # Fallback if no completed event found
128
+ {}
129
+ end
130
+ end
131
+ end
132
+ end
133
+ end
134
+ end
@@ -0,0 +1,93 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "opentelemetry/sdk"
4
+ require "json"
5
+
6
+ require_relative "../../../internal/time"
7
+ require_relative "../../support/otel"
8
+
9
+ module Braintrust
10
+ module Contrib
11
+ module OpenAI
12
+ module Instrumentation
13
+ # Moderations API instrumentation for OpenAI.
14
+ # Wraps create() method to create spans.
15
+ module Moderations
16
+ def self.included(base)
17
+ base.prepend(InstanceMethods) unless applied?(base)
18
+ end
19
+
20
+ def self.applied?(base)
21
+ base.ancestors.include?(InstanceMethods)
22
+ end
23
+
24
+ METADATA_FIELDS = %i[
25
+ model
26
+ ].freeze
27
+
28
+ module InstanceMethods
29
+ # Wrap non-streaming create method
30
+ def create(**params)
31
+ client = instance_variable_get(:@client)
32
+ tracer = Braintrust::Contrib.tracer_for(client)
33
+
34
+ tracer.in_span("openai.moderations.create") do |span|
35
+ metadata = build_metadata(params)
36
+
37
+ set_input(span, params)
38
+
39
+ response = nil
40
+ time_to_first_token = Braintrust::Internal::Time.measure do
41
+ response = super
42
+ end
43
+
44
+ set_output(span, response)
45
+ set_metrics(span, time_to_first_token)
46
+ finalize_metadata(span, metadata, response)
47
+
48
+ response
49
+ end
50
+ end
51
+
52
+ private
53
+
54
+ def build_metadata(params)
55
+ metadata = {
56
+ "provider" => "openai",
57
+ "endpoint" => "/v1/moderations"
58
+ }
59
+ Moderations::METADATA_FIELDS.each do |field|
60
+ metadata[field.to_s] = params[field] if params.key?(field)
61
+ end
62
+ metadata
63
+ end
64
+
65
+ def set_input(span, params)
66
+ return unless params[:input]
67
+
68
+ Support::OTel.set_json_attr(span, "braintrust.input_json", params[:input])
69
+ end
70
+
71
+ def set_output(span, response)
72
+ return unless response.respond_to?(:results) && response.results
73
+
74
+ Support::OTel.set_json_attr(span, "braintrust.output_json", response.results)
75
+ end
76
+
77
+ def set_metrics(span, time_to_first_token)
78
+ metrics = {}
79
+ metrics["time_to_first_token"] = time_to_first_token
80
+ Support::OTel.set_json_attr(span, "braintrust.metrics", metrics) unless metrics.empty?
81
+ end
82
+
83
+ def finalize_metadata(span, metadata, response)
84
+ metadata["id"] = response.id if response.respond_to?(:id) && response.id
85
+ metadata["model"] = response.model if response.respond_to?(:model) && response.model
86
+ Support::OTel.set_json_attr(span, "braintrust.metadata", metadata)
87
+ end
88
+ end
89
+ end
90
+ end
91
+ end
92
+ end
93
+ end
@@ -0,0 +1,187 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "opentelemetry/sdk"
4
+ require "json"
5
+
6
+ require_relative "common"
7
+ require_relative "../../../internal/time"
8
+ require_relative "../../support/otel"
9
+ require_relative "../../support/openai"
10
+
11
+ module Braintrust
12
+ module Contrib
13
+ module OpenAI
14
+ module Instrumentation
15
+ # Responses API instrumentation for OpenAI.
16
+ # Wraps create() and stream() methods to create spans.
17
+ module Responses
18
+ def self.included(base)
19
+ base.prepend(InstanceMethods) unless applied?(base)
20
+ end
21
+
22
+ def self.applied?(base)
23
+ base.ancestors.include?(InstanceMethods)
24
+ end
25
+
26
+ METADATA_FIELDS = %i[
27
+ model instructions modalities tools parallel_tool_calls
28
+ tool_choice temperature max_tokens top_p frequency_penalty
29
+ presence_penalty seed user metadata store response_format
30
+ reasoning previous_response_id truncation
31
+ ].freeze
32
+
33
+ module InstanceMethods
34
+ # Wrap non-streaming create method
35
+ def create(**params)
36
+ client = instance_variable_get(:@client)
37
+ tracer = Braintrust::Contrib.tracer_for(client)
38
+
39
+ tracer.in_span("openai.responses.create") do |span|
40
+ metadata = build_metadata(params)
41
+
42
+ set_input(span, params)
43
+
44
+ response = nil
45
+ time_to_first_token = Braintrust::Internal::Time.measure do
46
+ response = super
47
+ end
48
+
49
+ set_output(span, response)
50
+ set_metrics(span, response, time_to_first_token)
51
+ finalize_metadata(span, metadata, response)
52
+
53
+ response
54
+ end
55
+ end
56
+
57
+ # Wrap streaming method
58
+ # Stores context on stream object for span creation during consumption
59
+ def stream(**params)
60
+ client = instance_variable_get(:@client)
61
+ tracer = Braintrust::Contrib.tracer_for(client)
62
+ metadata = build_metadata(params, stream: true)
63
+
64
+ stream_obj = super
65
+
66
+ Braintrust::Contrib::Context.set!(stream_obj,
67
+ tracer: tracer,
68
+ params: params,
69
+ metadata: metadata,
70
+ responses_instance: self)
71
+ stream_obj
72
+ end
73
+
74
+ private
75
+
76
+ def build_metadata(params, stream: false)
77
+ metadata = {
78
+ "provider" => "openai",
79
+ "endpoint" => "/v1/responses"
80
+ }
81
+ metadata["stream"] = true if stream
82
+ Responses::METADATA_FIELDS.each do |field|
83
+ metadata[field.to_s] = params[field] if params.key?(field)
84
+ end
85
+ metadata
86
+ end
87
+
88
+ def set_input(span, params)
89
+ return unless params[:input]
90
+
91
+ Support::OTel.set_json_attr(span, "braintrust.input_json", params[:input])
92
+ end
93
+
94
+ def set_output(span, response)
95
+ return unless response.respond_to?(:output) && response.output
96
+
97
+ Support::OTel.set_json_attr(span, "braintrust.output_json", response.output)
98
+ end
99
+
100
+ def set_metrics(span, response, time_to_first_token)
101
+ metrics = {}
102
+ if response.respond_to?(:usage) && response.usage
103
+ metrics = Support::OpenAI.parse_usage_tokens(response.usage)
104
+ end
105
+ metrics["time_to_first_token"] = time_to_first_token
106
+ Support::OTel.set_json_attr(span, "braintrust.metrics", metrics) unless metrics.empty?
107
+ end
108
+
109
+ def finalize_metadata(span, metadata, response)
110
+ metadata["id"] = response.id if response.respond_to?(:id) && response.id
111
+ Support::OTel.set_json_attr(span, "braintrust.metadata", metadata)
112
+ end
113
+ end
114
+ end
115
+
116
+ # Instrumentation for ResponseStream (returned by stream())
117
+ # Aggregates events and creates span lazily when consumed
118
+ module ResponseStream
119
+ def self.included(base)
120
+ base.prepend(InstanceMethods) unless applied?(base)
121
+ end
122
+
123
+ def self.applied?(base)
124
+ base.ancestors.include?(InstanceMethods)
125
+ end
126
+
127
+ module InstanceMethods
128
+ def each(&block)
129
+ ctx = Braintrust::Contrib::Context.from(self)
130
+ return super unless ctx&.[](:tracer) && !ctx[:consumed]
131
+
132
+ ctx[:consumed] = true
133
+
134
+ tracer = ctx[:tracer]
135
+ params = ctx[:params]
136
+ metadata = ctx[:metadata]
137
+ responses_instance = ctx[:responses_instance]
138
+ aggregated_events = []
139
+ start_time = Braintrust::Internal::Time.measure
140
+ time_to_first_token = nil
141
+
142
+ tracer.in_span("openai.responses.create") do |span|
143
+ responses_instance.send(:set_input, span, params)
144
+ Support::OTel.set_json_attr(span, "braintrust.metadata", metadata)
145
+
146
+ begin
147
+ super do |event|
148
+ time_to_first_token ||= Braintrust::Internal::Time.measure(start_time)
149
+ aggregated_events << event
150
+ block&.call(event)
151
+ end
152
+ rescue => e
153
+ span.record_exception(e)
154
+ span.status = ::OpenTelemetry::Trace::Status.error("Streaming error: #{e.message}")
155
+ raise
156
+ end
157
+
158
+ finalize_stream_span(span, aggregated_events, time_to_first_token, metadata)
159
+ end
160
+ end
161
+
162
+ private
163
+
164
+ def finalize_stream_span(span, aggregated_events, time_to_first_token, metadata)
165
+ return if aggregated_events.empty?
166
+
167
+ aggregated_output = Common.aggregate_responses_events(aggregated_events)
168
+ Support::OTel.set_json_attr(span, "braintrust.output_json", aggregated_output[:output]) if aggregated_output[:output]
169
+
170
+ # Set metrics
171
+ metrics = {}
172
+ if aggregated_output[:usage]
173
+ metrics = Support::OpenAI.parse_usage_tokens(aggregated_output[:usage])
174
+ end
175
+ metrics["time_to_first_token"] = time_to_first_token
176
+ Support::OTel.set_json_attr(span, "braintrust.metrics", metrics) unless metrics.empty?
177
+
178
+ # Update metadata with response fields
179
+ metadata["id"] = aggregated_output[:id] if aggregated_output[:id]
180
+ Support::OTel.set_json_attr(span, "braintrust.metadata", metadata)
181
+ end
182
+ end
183
+ end
184
+ end
185
+ end
186
+ end
187
+ end
@@ -0,0 +1,58 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "../integration"
4
+ require_relative "deprecated"
5
+
6
+ module Braintrust
7
+ module Contrib
8
+ module OpenAI
9
+ # OpenAI integration for automatic instrumentation.
10
+ # Instruments the official openai gem (not ruby-openai).
11
+ class Integration
12
+ include Braintrust::Contrib::Integration
13
+
14
+ MINIMUM_VERSION = "0.1.0"
15
+
16
+ GEM_NAMES = ["openai"].freeze
17
+ REQUIRE_PATHS = ["openai"].freeze
18
+
19
+ # @return [Symbol] Unique identifier for this integration
20
+ def self.integration_name
21
+ :openai
22
+ end
23
+
24
+ # @return [Array<String>] Gem names this integration supports
25
+ def self.gem_names
26
+ GEM_NAMES
27
+ end
28
+
29
+ # @return [Array<String>] Require paths for auto-instrument detection
30
+ def self.require_paths
31
+ REQUIRE_PATHS
32
+ end
33
+
34
+ # @return [String] Minimum compatible version
35
+ def self.minimum_version
36
+ MINIMUM_VERSION
37
+ end
38
+
39
+ # @return [Boolean] true if official openai gem is available
40
+ def self.loaded?
41
+ # Check if the official openai gem is loaded (not ruby-openai).
42
+ # The ruby-openai gem also uses "require 'openai'", so we need to distinguish them.
43
+
44
+ # This module is defined ONLY in the official OpenAI gem
45
+ defined?(::OpenAI::Internal) ? true : false
46
+ end
47
+
48
+ # Lazy-load the patcher only when actually patching.
49
+ # This keeps the integration stub lightweight.
50
+ # @return [Class] The patcher class
51
+ def self.patchers
52
+ require_relative "patcher"
53
+ [ChatPatcher, ResponsesPatcher, ModerationsPatcher]
54
+ end
55
+ end
56
+ end
57
+ end
58
+ end
@@ -0,0 +1,173 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "../patcher"
4
+ require_relative "instrumentation/chat"
5
+ require_relative "instrumentation/responses"
6
+ require_relative "instrumentation/moderations"
7
+
8
+ module Braintrust
9
+ module Contrib
10
+ module OpenAI
11
+ # Patcher for OpenAI integration - implements class-level patching.
12
+ # All new OpenAI::Client instances created after patch! will be automatically instrumented.
13
+ class ChatPatcher < Braintrust::Contrib::Patcher
14
+ class << self
15
+ def applicable?
16
+ defined?(::OpenAI::Client)
17
+ end
18
+
19
+ def patched?(**options)
20
+ # Use the target's singleton class if provided, otherwise check the base class.
21
+ target_class = get_singleton_class(options[:target]) || ::OpenAI::Resources::Chat::Completions
22
+
23
+ Instrumentation::Chat::Completions.applied?(target_class)
24
+ end
25
+
26
+ # Perform the actual patching.
27
+ # @param options [Hash] Configuration options passed from integration
28
+ # @option options [Object] :target Optional target instance to patch
29
+ # @option options [OpenTelemetry::SDK::Trace::TracerProvider] :tracer_provider Optional tracer provider
30
+ # @return [void]
31
+ def perform_patch(**options)
32
+ return unless applicable?
33
+
34
+ # Stream classes are shared across all clients, patch at class level.
35
+ # The instrumentation short-circuits when no context is present,
36
+ # so uninstrumented clients' streams pass through unaffected.
37
+ patch_stream_classes
38
+
39
+ if options[:target]
40
+ # Instance-level (for only this client)
41
+ raise ArgumentError, "target must be a kind of ::OpenAI::Client" unless options[:target].is_a?(::OpenAI::Client)
42
+
43
+ get_singleton_class(options[:target]).include(Instrumentation::Chat::Completions)
44
+ else
45
+ # Class-level (for all clients)
46
+ ::OpenAI::Resources::Chat::Completions.include(Instrumentation::Chat::Completions)
47
+ end
48
+ end
49
+
50
+ def patch_stream_classes
51
+ # Patch ChatCompletionStream for stream() method
52
+ if defined?(::OpenAI::Helpers::Streaming::ChatCompletionStream)
53
+ unless Instrumentation::Chat::ChatCompletionStream.applied?(::OpenAI::Helpers::Streaming::ChatCompletionStream)
54
+ ::OpenAI::Helpers::Streaming::ChatCompletionStream.include(Instrumentation::Chat::ChatCompletionStream)
55
+ end
56
+ end
57
+
58
+ # Patch Internal::Stream for stream_raw() method
59
+ if defined?(::OpenAI::Internal::Stream)
60
+ unless Instrumentation::Chat::InternalStream.applied?(::OpenAI::Internal::Stream)
61
+ ::OpenAI::Internal::Stream.include(Instrumentation::Chat::InternalStream)
62
+ end
63
+ end
64
+ end
65
+
66
+ private
67
+
68
+ def get_singleton_class(client)
69
+ client&.chat&.completions&.singleton_class
70
+ end
71
+ end
72
+ end
73
+
74
+ # Patcher for OpenAI integration - implements class-level patching.
75
+ # All new OpenAI::Client instances created after patch! will be automatically instrumented.
76
+ class ResponsesPatcher < Braintrust::Contrib::Patcher
77
+ class << self
78
+ def applicable?
79
+ defined?(::OpenAI::Client) && ::OpenAI::Client.instance_methods.include?(:responses)
80
+ end
81
+
82
+ def patched?(**options)
83
+ # Use the target's singleton class if provided, otherwise check the base class.
84
+ target_class = get_singleton_class(options[:target]) || ::OpenAI::Resources::Responses
85
+
86
+ Instrumentation::Responses.applied?(target_class)
87
+ end
88
+
89
+ # Perform the actual patching.
90
+ # @param options [Hash] Configuration options passed from integration
91
+ # @option options [Object] :target Optional target instance to patch
92
+ # @option options [OpenTelemetry::SDK::Trace::TracerProvider] :tracer_provider Optional tracer provider
93
+ # @return [void]
94
+ def perform_patch(**options)
95
+ return unless applicable?
96
+
97
+ # Stream class is shared across all clients, patch at class level.
98
+ # The instrumentation short-circuits when no context is present,
99
+ # so uninstrumented clients' streams pass through unaffected.
100
+ patch_response_stream
101
+
102
+ if options[:target]
103
+ # Instance-level (for only this client)
104
+ raise ArgumentError, "target must be a kind of ::OpenAI::Client" unless options[:target].is_a?(::OpenAI::Client)
105
+
106
+ get_singleton_class(options[:target]).include(Instrumentation::Responses)
107
+ else
108
+ # Class-level (for all clients)
109
+ ::OpenAI::Resources::Responses.include(Instrumentation::Responses)
110
+ end
111
+ end
112
+
113
+ def patch_response_stream
114
+ # Patch ResponseStream for stream() method
115
+ if defined?(::OpenAI::Helpers::Streaming::ResponseStream)
116
+ unless Instrumentation::ResponseStream.applied?(::OpenAI::Helpers::Streaming::ResponseStream)
117
+ ::OpenAI::Helpers::Streaming::ResponseStream.include(Instrumentation::ResponseStream)
118
+ end
119
+ end
120
+ end
121
+
122
+ private
123
+
124
+ def get_singleton_class(client)
125
+ client&.responses&.singleton_class
126
+ end
127
+ end
128
+ end
129
+
130
+ # Patcher for OpenAI Moderations API - implements class-level patching.
131
+ # All new OpenAI::Client instances created after patch! will be automatically instrumented.
132
+ class ModerationsPatcher < Braintrust::Contrib::Patcher
133
+ class << self
134
+ def applicable?
135
+ defined?(::OpenAI::Client) && ::OpenAI::Client.instance_methods.include?(:moderations)
136
+ end
137
+
138
+ def patched?(**options)
139
+ # Use the target's singleton class if provided, otherwise check the base class.
140
+ target_class = get_singleton_class(options[:target]) || ::OpenAI::Resources::Moderations
141
+
142
+ Instrumentation::Moderations.applied?(target_class)
143
+ end
144
+
145
+ # Perform the actual patching.
146
+ # @param options [Hash] Configuration options passed from integration
147
+ # @option options [Object] :target Optional target instance to patch
148
+ # @option options [OpenTelemetry::SDK::Trace::TracerProvider] :tracer_provider Optional tracer provider
149
+ # @return [void]
150
+ def perform_patch(**options)
151
+ return unless applicable?
152
+
153
+ if options[:target]
154
+ # Instance-level (for only this client)
155
+ raise ArgumentError, "target must be a kind of ::OpenAI::Client" unless options[:target].is_a?(::OpenAI::Client)
156
+
157
+ get_singleton_class(options[:target]).include(Instrumentation::Moderations)
158
+ else
159
+ # Class-level (for all clients)
160
+ ::OpenAI::Resources::Moderations.include(Instrumentation::Moderations)
161
+ end
162
+ end
163
+
164
+ private
165
+
166
+ def get_singleton_class(client)
167
+ client&.moderations&.singleton_class
168
+ end
169
+ end
170
+ end
171
+ end
172
+ end
173
+ end