braintrust 0.0.12 → 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +213 -180
  3. data/exe/braintrust +143 -0
  4. data/lib/braintrust/contrib/anthropic/deprecated.rb +24 -0
  5. data/lib/braintrust/contrib/anthropic/instrumentation/common.rb +53 -0
  6. data/lib/braintrust/contrib/anthropic/instrumentation/messages.rb +232 -0
  7. data/lib/braintrust/contrib/anthropic/integration.rb +53 -0
  8. data/lib/braintrust/contrib/anthropic/patcher.rb +62 -0
  9. data/lib/braintrust/contrib/context.rb +56 -0
  10. data/lib/braintrust/contrib/integration.rb +160 -0
  11. data/lib/braintrust/contrib/openai/deprecated.rb +22 -0
  12. data/lib/braintrust/contrib/openai/instrumentation/chat.rb +298 -0
  13. data/lib/braintrust/contrib/openai/instrumentation/common.rb +134 -0
  14. data/lib/braintrust/contrib/openai/instrumentation/responses.rb +187 -0
  15. data/lib/braintrust/contrib/openai/integration.rb +58 -0
  16. data/lib/braintrust/contrib/openai/patcher.rb +130 -0
  17. data/lib/braintrust/contrib/patcher.rb +76 -0
  18. data/lib/braintrust/contrib/rails/railtie.rb +16 -0
  19. data/lib/braintrust/contrib/registry.rb +107 -0
  20. data/lib/braintrust/contrib/ruby_llm/deprecated.rb +45 -0
  21. data/lib/braintrust/contrib/ruby_llm/instrumentation/chat.rb +464 -0
  22. data/lib/braintrust/contrib/ruby_llm/instrumentation/common.rb +58 -0
  23. data/lib/braintrust/contrib/ruby_llm/integration.rb +54 -0
  24. data/lib/braintrust/contrib/ruby_llm/patcher.rb +44 -0
  25. data/lib/braintrust/contrib/ruby_openai/deprecated.rb +24 -0
  26. data/lib/braintrust/contrib/ruby_openai/instrumentation/chat.rb +149 -0
  27. data/lib/braintrust/contrib/ruby_openai/instrumentation/common.rb +138 -0
  28. data/lib/braintrust/contrib/ruby_openai/instrumentation/responses.rb +146 -0
  29. data/lib/braintrust/contrib/ruby_openai/integration.rb +58 -0
  30. data/lib/braintrust/contrib/ruby_openai/patcher.rb +85 -0
  31. data/lib/braintrust/contrib/setup.rb +168 -0
  32. data/lib/braintrust/contrib/support/openai.rb +72 -0
  33. data/lib/braintrust/contrib/support/otel.rb +23 -0
  34. data/lib/braintrust/contrib.rb +205 -0
  35. data/lib/braintrust/internal/env.rb +33 -0
  36. data/lib/braintrust/internal/time.rb +44 -0
  37. data/lib/braintrust/setup.rb +50 -0
  38. data/lib/braintrust/state.rb +5 -0
  39. data/lib/braintrust/trace.rb +0 -51
  40. data/lib/braintrust/version.rb +1 -1
  41. data/lib/braintrust.rb +10 -1
  42. metadata +38 -7
  43. data/lib/braintrust/trace/contrib/anthropic.rb +0 -316
  44. data/lib/braintrust/trace/contrib/github.com/alexrudall/ruby-openai/ruby-openai.rb +0 -377
  45. data/lib/braintrust/trace/contrib/github.com/crmne/ruby_llm.rb +0 -631
  46. data/lib/braintrust/trace/contrib/openai.rb +0 -611
  47. data/lib/braintrust/trace/tokens.rb +0 -109
@@ -0,0 +1,149 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "opentelemetry/sdk"
4
+ require "json"
5
+
6
+ require_relative "common"
7
+ require_relative "../../support/otel"
8
+ require_relative "../../support/openai"
9
+ require_relative "../../../internal/time"
10
+
11
+ module Braintrust
12
+ module Contrib
13
+ module RubyOpenAI
14
+ module Instrumentation
15
+ # Chat completions instrumentation for ruby-openai.
16
+ # Provides module that can be prepended to OpenAI::Client to instrument the chat method.
17
+ module Chat
18
+ def self.included(base)
19
+ # Guard against double-wrapping: Check if patch is already in the ancestor chain.
20
+ # This prevents double instrumentation if class-level patching was already applied,
21
+ # and this patch is being applied to a singleton-class. (Special case.)
22
+ #
23
+ # Ruby's prepend() doesn't check the full inheritance chain, so without this guard,
24
+ # the instrumentation could be added twice.
25
+ base.prepend(InstanceMethods) unless applied?(base)
26
+ end
27
+
28
+ def self.applied?(base)
29
+ base.ancestors.include?(InstanceMethods)
30
+ end
31
+
32
+ METADATA_FIELDS = %i[
33
+ model frequency_penalty logit_bias logprobs max_tokens n
34
+ presence_penalty response_format seed service_tier stop
35
+ stream stream_options temperature top_p top_logprobs
36
+ tools tool_choice parallel_tool_calls user functions function_call
37
+ ].freeze
38
+
39
+ module InstanceMethods
40
+ # Wrap chat method for ruby-openai gem
41
+ # ruby-openai API: client.chat(parameters: {...})
42
+ def chat(parameters:)
43
+ tracer = Braintrust::Contrib.tracer_for(self)
44
+
45
+ tracer.in_span("Chat Completion") do |span|
46
+ is_streaming = streaming?(parameters)
47
+ metadata = build_metadata(parameters)
48
+ set_input(span, parameters)
49
+
50
+ aggregated_chunks = []
51
+ time_to_first_token = nil
52
+ response = nil
53
+ response_data = {}
54
+
55
+ if is_streaming
56
+ # Setup a time measurement for the first chunk from the stream
57
+ start_time = nil
58
+ parameters = wrap_stream_callback(parameters, aggregated_chunks) do
59
+ time_to_first_token ||= Braintrust::Internal::Time.measure(start_time)
60
+ end
61
+ start_time = Braintrust::Internal::Time.measure
62
+
63
+ # Then initiate the stream
64
+ response = super(parameters: parameters)
65
+
66
+ if !aggregated_chunks.empty?
67
+ response_data = Common.aggregate_streaming_chunks(aggregated_chunks)
68
+ end
69
+ else
70
+ # Make a time measurement synchronously around the API call
71
+ time_to_first_token = Braintrust::Internal::Time.measure do
72
+ response = super(parameters: parameters)
73
+ response_data = response if response
74
+ end
75
+ end
76
+
77
+ set_output(span, response_data)
78
+ set_metrics(span, response_data, time_to_first_token)
79
+ finalize_metadata(span, metadata, response_data)
80
+
81
+ response
82
+ end
83
+ end
84
+
85
+ private
86
+
87
+ def streaming?(parameters)
88
+ parameters.key?(:stream) && parameters[:stream].is_a?(Proc)
89
+ end
90
+
91
+ def wrap_stream_callback(parameters, aggregated_chunks)
92
+ original_stream_proc = parameters[:stream]
93
+ parameters = parameters.dup
94
+
95
+ parameters[:stream] = proc do |chunk, bytesize|
96
+ yield if aggregated_chunks.empty?
97
+ aggregated_chunks << chunk
98
+ original_stream_proc.call(chunk, bytesize)
99
+ end
100
+
101
+ parameters
102
+ end
103
+
104
+ def build_metadata(parameters)
105
+ metadata = {
106
+ "provider" => "openai",
107
+ "endpoint" => "/v1/chat/completions"
108
+ }
109
+
110
+ Chat::METADATA_FIELDS.each do |field|
111
+ next unless parameters.key?(field)
112
+ # Stream param is a Proc - just mark as true
113
+ metadata[field.to_s] = (field == :stream) ? true : parameters[field]
114
+ end
115
+
116
+ metadata
117
+ end
118
+
119
+ def set_input(span, parameters)
120
+ return unless parameters[:messages]
121
+ Support::OTel.set_json_attr(span, "braintrust.input_json", parameters[:messages])
122
+ end
123
+
124
+ def set_output(span, response_data)
125
+ choices = response_data[:choices] || response_data["choices"]
126
+ return unless choices&.any?
127
+ Support::OTel.set_json_attr(span, "braintrust.output_json", choices)
128
+ end
129
+
130
+ def set_metrics(span, response_data, time_to_first_token)
131
+ usage = response_data[:usage] || response_data["usage"]
132
+ metrics = usage ? Support::OpenAI.parse_usage_tokens(usage) : {}
133
+ metrics["time_to_first_token"] = time_to_first_token || 0.0
134
+ Support::OTel.set_json_attr(span, "braintrust.metrics", metrics) unless metrics.empty?
135
+ end
136
+
137
+ def finalize_metadata(span, metadata, response_data)
138
+ %w[id created model system_fingerprint service_tier].each do |field|
139
+ value = response_data[field.to_sym] || response_data[field]
140
+ metadata[field] = value if value
141
+ end
142
+ Support::OTel.set_json_attr(span, "braintrust.metadata", metadata)
143
+ end
144
+ end
145
+ end
146
+ end
147
+ end
148
+ end
149
+ end
@@ -0,0 +1,138 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Braintrust
4
+ module Contrib
5
+ module RubyOpenAI
6
+ module Instrumentation
7
+ # Aggregation utilities for ruby-openai gem instrumentation.
8
+ # These are specific to the ruby-openai gem's data structures (string keys, plain hashes).
9
+ module Common
10
+ # Aggregate streaming chunks into a single response structure.
11
+ # Specific to ruby-openai gem which uses string keys and plain hashes.
12
+ # @param chunks [Array<Hash>] array of chunk hashes from stream (string keys)
13
+ # @return [Hash] aggregated response with choices, usage, etc. (string keys)
14
+ def self.aggregate_streaming_chunks(chunks)
15
+ return {} if chunks.empty?
16
+
17
+ # Initialize aggregated structure
18
+ aggregated = {
19
+ "id" => nil,
20
+ "created" => nil,
21
+ "model" => nil,
22
+ "system_fingerprint" => nil,
23
+ "usage" => nil,
24
+ "choices" => []
25
+ }
26
+
27
+ # Track aggregated content and tool_calls for each choice index
28
+ choice_data = {}
29
+
30
+ chunks.each do |chunk|
31
+ # Capture top-level fields from any chunk that has them
32
+ aggregated["id"] ||= chunk["id"]
33
+ aggregated["created"] ||= chunk["created"]
34
+ aggregated["model"] ||= chunk["model"]
35
+ aggregated["system_fingerprint"] ||= chunk["system_fingerprint"]
36
+
37
+ # Aggregate usage (usually only in last chunk if stream_options.include_usage is set)
38
+ aggregated["usage"] = chunk["usage"] if chunk["usage"]
39
+
40
+ # Process choices
41
+ choices = chunk["choices"]
42
+ next unless choices.is_a?(Array)
43
+
44
+ choices.each do |choice|
45
+ index = choice["index"] || 0
46
+ choice_data[index] ||= {
47
+ "index" => index,
48
+ "role" => nil,
49
+ "content" => +"",
50
+ "tool_calls" => [],
51
+ "finish_reason" => nil
52
+ }
53
+
54
+ delta = choice["delta"] || {}
55
+
56
+ # Aggregate role (set once from first delta that has it)
57
+ choice_data[index]["role"] ||= delta["role"]
58
+
59
+ # Aggregate content
60
+ choice_data[index]["content"] << delta["content"] if delta["content"]
61
+
62
+ # Aggregate tool_calls
63
+ tool_calls = delta["tool_calls"]
64
+ if tool_calls.is_a?(Array) && tool_calls.any?
65
+ tool_calls.each do |tool_call_delta|
66
+ tc_id = tool_call_delta["id"]
67
+ if tc_id && !tc_id.empty?
68
+ # New tool call
69
+ choice_data[index]["tool_calls"] << {
70
+ "id" => tc_id,
71
+ "type" => tool_call_delta["type"],
72
+ "function" => {
73
+ "name" => +(tool_call_delta.dig("function", "name") || ""),
74
+ "arguments" => +(tool_call_delta.dig("function", "arguments") || "")
75
+ }
76
+ }
77
+ elsif choice_data[index]["tool_calls"].any?
78
+ # Continuation - append arguments to last tool call
79
+ last_tool_call = choice_data[index]["tool_calls"].last
80
+ if tool_call_delta.dig("function", "arguments")
81
+ last_tool_call["function"]["arguments"] << tool_call_delta["function"]["arguments"]
82
+ end
83
+ end
84
+ end
85
+ end
86
+
87
+ # Capture finish_reason
88
+ choice_data[index]["finish_reason"] = choice["finish_reason"] if choice["finish_reason"]
89
+ end
90
+ end
91
+
92
+ # Build final choices array
93
+ aggregated["choices"] = choice_data.values.sort_by { |c| c["index"] }.map do |choice|
94
+ message = {
95
+ "role" => choice["role"],
96
+ "content" => choice["content"].empty? ? nil : choice["content"]
97
+ }
98
+
99
+ # Add tool_calls to message if any
100
+ message["tool_calls"] = choice["tool_calls"] if choice["tool_calls"].any?
101
+
102
+ {
103
+ "index" => choice["index"],
104
+ "message" => message,
105
+ "finish_reason" => choice["finish_reason"]
106
+ }
107
+ end
108
+
109
+ aggregated
110
+ end
111
+
112
+ # Aggregate responses streaming chunks into a single response structure.
113
+ # Specific to ruby-openai gem which uses string keys and plain hashes.
114
+ # @param chunks [Array<Hash>] array of chunk hashes from stream (string keys)
115
+ # @return [Hash] aggregated response with output, usage, id (string keys)
116
+ def self.aggregate_responses_chunks(chunks)
117
+ return {} if chunks.empty?
118
+
119
+ # Find the response.completed event which has the final response
120
+ completed_chunk = chunks.find { |c| c["type"] == "response.completed" }
121
+
122
+ if completed_chunk && completed_chunk["response"]
123
+ response = completed_chunk["response"]
124
+ return {
125
+ "id" => response["id"],
126
+ "output" => response["output"],
127
+ "usage" => response["usage"]
128
+ }
129
+ end
130
+
131
+ # Fallback if no completed event found
132
+ {}
133
+ end
134
+ end
135
+ end
136
+ end
137
+ end
138
+ end
@@ -0,0 +1,146 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "opentelemetry/sdk"
4
+ require "json"
5
+
6
+ require_relative "common"
7
+ require_relative "../../support/otel"
8
+ require_relative "../../support/openai"
9
+ require_relative "../../../internal/time"
10
+
11
+ module Braintrust
12
+ module Contrib
13
+ module RubyOpenAI
14
+ module Instrumentation
15
+ # Responses API instrumentation for ruby-openai.
16
+ # Provides module that can be prepended to OpenAI::Responses to instrument the create method.
17
+ module Responses
18
+ def self.included(base)
19
+ # Guard against double-wrapping: Check if patch is already in the ancestor chain.
20
+ # This prevents double instrumentation if class-level patching was already applied,
21
+ # and this patch is being applied to a singleton-class. (Special case.)
22
+ #
23
+ # Ruby's prepend() doesn't check the full inheritance chain, so without this guard,
24
+ # the instrumentation could be added twice.
25
+ base.prepend(InstanceMethods) unless applied?(base)
26
+ end
27
+
28
+ def self.applied?(base)
29
+ base.ancestors.include?(InstanceMethods)
30
+ end
31
+
32
+ METADATA_FIELDS = %i[
33
+ model instructions modalities tools parallel_tool_calls
34
+ tool_choice temperature max_tokens top_p frequency_penalty
35
+ presence_penalty seed user metadata store response_format
36
+ reasoning previous_response_id truncation
37
+ ].freeze
38
+
39
+ module InstanceMethods
40
+ # Wrap create method for ruby-openai responses API
41
+ # ruby-openai API: client.responses.create(parameters: {...})
42
+ def create(parameters:)
43
+ client = instance_variable_get(:@client)
44
+ tracer = Braintrust::Contrib.tracer_for(client)
45
+
46
+ tracer.in_span("openai.responses.create") do |span|
47
+ is_streaming = streaming?(parameters)
48
+ metadata = build_metadata(parameters)
49
+ set_input(span, parameters)
50
+
51
+ aggregated_chunks = []
52
+ time_to_first_token = nil
53
+ response = nil
54
+ response_data = {}
55
+
56
+ if is_streaming
57
+ # Setup a time measurement for the first chunk from the stream
58
+ start_time = nil
59
+ parameters = wrap_stream_callback(parameters, aggregated_chunks) do
60
+ time_to_first_token ||= Braintrust::Internal::Time.measure(start_time)
61
+ end
62
+ start_time = Braintrust::Internal::Time.measure
63
+
64
+ # Then initiate the stream
65
+ response = super(parameters: parameters)
66
+
67
+ if !aggregated_chunks.empty?
68
+ response_data = Common.aggregate_responses_chunks(aggregated_chunks)
69
+ end
70
+ else
71
+ # Make a time measurement synchronously around the API call
72
+ time_to_first_token = Braintrust::Internal::Time.measure do
73
+ response = super(parameters: parameters)
74
+ response_data = response if response
75
+ end
76
+ end
77
+
78
+ set_output(span, response_data)
79
+ set_metrics(span, response_data, time_to_first_token)
80
+ finalize_metadata(span, metadata, response_data)
81
+
82
+ response
83
+ end
84
+ end
85
+
86
+ private
87
+
88
+ def streaming?(parameters)
89
+ parameters.key?(:stream) && parameters[:stream].is_a?(Proc)
90
+ end
91
+
92
+ def wrap_stream_callback(parameters, aggregated_chunks)
93
+ original_stream_proc = parameters[:stream]
94
+ parameters = parameters.dup
95
+
96
+ parameters[:stream] = proc do |chunk, event|
97
+ yield if aggregated_chunks.empty?
98
+ aggregated_chunks << chunk
99
+ original_stream_proc.call(chunk, event)
100
+ end
101
+
102
+ parameters
103
+ end
104
+
105
+ def build_metadata(parameters)
106
+ metadata = {
107
+ "provider" => "openai",
108
+ "endpoint" => "/v1/responses"
109
+ }
110
+
111
+ Responses::METADATA_FIELDS.each do |field|
112
+ metadata[field.to_s] = parameters[field] if parameters.key?(field)
113
+ end
114
+
115
+ metadata["stream"] = true if streaming?(parameters)
116
+ metadata
117
+ end
118
+
119
+ def set_input(span, parameters)
120
+ return unless parameters[:input]
121
+ Support::OTel.set_json_attr(span, "braintrust.input_json", parameters[:input])
122
+ end
123
+
124
+ def set_output(span, response_data)
125
+ output = response_data["output"]
126
+ return unless output
127
+ Support::OTel.set_json_attr(span, "braintrust.output_json", output)
128
+ end
129
+
130
+ def set_metrics(span, response_data, time_to_first_token)
131
+ usage = response_data["usage"]
132
+ metrics = usage ? Support::OpenAI.parse_usage_tokens(usage) : {}
133
+ metrics["time_to_first_token"] = time_to_first_token || 0.0
134
+ Support::OTel.set_json_attr(span, "braintrust.metrics", metrics) unless metrics.empty?
135
+ end
136
+
137
+ def finalize_metadata(span, metadata, response_data)
138
+ metadata["id"] = response_data["id"] if response_data["id"]
139
+ Support::OTel.set_json_attr(span, "braintrust.metadata", metadata)
140
+ end
141
+ end
142
+ end
143
+ end
144
+ end
145
+ end
146
+ end
@@ -0,0 +1,58 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "../integration"
4
+ require_relative "deprecated"
5
+
6
+ module Braintrust
7
+ module Contrib
8
+ module RubyOpenAI
9
+ # RubyOpenAI integration for automatic instrumentation.
10
+ # Instruments the alexrudall ruby-openai gem (not the official openai gem).
11
+ class Integration
12
+ include Braintrust::Contrib::Integration
13
+
14
+ MINIMUM_VERSION = "7.0.0"
15
+
16
+ GEM_NAMES = ["ruby-openai"].freeze
17
+ REQUIRE_PATHS = ["openai"].freeze
18
+
19
+ # @return [Symbol] Unique identifier for this integration
20
+ def self.integration_name
21
+ :ruby_openai
22
+ end
23
+
24
+ # @return [Array<String>] Gem names this integration supports
25
+ def self.gem_names
26
+ GEM_NAMES
27
+ end
28
+
29
+ # @return [Array<String>] Require paths for auto-instrument detection
30
+ def self.require_paths
31
+ REQUIRE_PATHS
32
+ end
33
+
34
+ # @return [String] Minimum compatible version
35
+ def self.minimum_version
36
+ MINIMUM_VERSION
37
+ end
38
+
39
+ # @return [Boolean] true if ruby-openai gem is available (not official openai gem)
40
+ def self.loaded?
41
+ # Check if ruby-openai gem is loaded (not the official openai gem).
42
+ # Both gems use "require 'openai'", so we need to distinguish them.
43
+ #
44
+ # OpenAI::Internal is defined ONLY in the official OpenAI gem
45
+ (defined?(::OpenAI::Client) && !defined?(::OpenAI::Internal)) ? true : false
46
+ end
47
+
48
+ # Lazy-load the patcher only when actually patching.
49
+ # This keeps the integration stub lightweight.
50
+ # @return [Array<Class>] The patcher classes
51
+ def self.patchers
52
+ require_relative "patcher"
53
+ [ChatPatcher, ResponsesPatcher]
54
+ end
55
+ end
56
+ end
57
+ end
58
+ end
@@ -0,0 +1,85 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "../patcher"
4
+ require_relative "instrumentation/chat"
5
+ require_relative "instrumentation/responses"
6
+
7
+ module Braintrust
8
+ module Contrib
9
+ module RubyOpenAI
10
+ # Patcher for ruby-openai chat completions.
11
+ # Instruments OpenAI::Client#chat method.
12
+ class ChatPatcher < Braintrust::Contrib::Patcher
13
+ class << self
14
+ def applicable?
15
+ defined?(::OpenAI::Client)
16
+ end
17
+
18
+ def patched?(**options)
19
+ target_class = options[:target]&.singleton_class || ::OpenAI::Client
20
+ Instrumentation::Chat.applied?(target_class)
21
+ end
22
+
23
+ # Perform the actual patching.
24
+ # @param options [Hash] Configuration options passed from integration
25
+ # @option options [Object] :target Optional target instance to patch
26
+ # @option options [OpenTelemetry::SDK::Trace::TracerProvider] :tracer_provider Optional tracer provider
27
+ # @return [void]
28
+ def perform_patch(**options)
29
+ return unless applicable?
30
+
31
+ if options[:target]
32
+ # Instance-level (for only this client)
33
+ raise ArgumentError, "target must be a kind of ::OpenAI::Client" unless options[:target].is_a?(::OpenAI::Client)
34
+
35
+ options[:target].singleton_class.include(Instrumentation::Chat)
36
+ else
37
+ # Class-level (for all clients)
38
+ ::OpenAI::Client.include(Instrumentation::Chat)
39
+ end
40
+ end
41
+ end
42
+ end
43
+
44
+ # Patcher for ruby-openai responses API.
45
+ # Instruments OpenAI::Responses#create method.
46
+ class ResponsesPatcher < Braintrust::Contrib::Patcher
47
+ class << self
48
+ def applicable?
49
+ defined?(::OpenAI::Client) && ::OpenAI::Client.method_defined?(:responses)
50
+ end
51
+
52
+ def patched?(**options)
53
+ if options[:target]
54
+ responses_obj = options[:target].responses
55
+ Instrumentation::Responses.applied?(responses_obj.singleton_class)
56
+ else
57
+ # For class-level, check if the responses class is patched
58
+ defined?(::OpenAI::Responses) && Instrumentation::Responses.applied?(::OpenAI::Responses)
59
+ end
60
+ end
61
+
62
+ # Perform the actual patching.
63
+ # @param options [Hash] Configuration options passed from integration
64
+ # @option options [Object] :target Optional target instance to patch
65
+ # @option options [OpenTelemetry::SDK::Trace::TracerProvider] :tracer_provider Optional tracer provider
66
+ # @return [void]
67
+ def perform_patch(**options)
68
+ return unless applicable?
69
+
70
+ if options[:target]
71
+ # Instance-level (for only this client)
72
+ raise ArgumentError, "target must be a kind of ::OpenAI::Client" unless options[:target].is_a?(::OpenAI::Client)
73
+
74
+ responses_obj = options[:target].responses
75
+ responses_obj.singleton_class.include(Instrumentation::Responses)
76
+ else
77
+ # Class-level (for all clients)
78
+ ::OpenAI::Responses.include(Instrumentation::Responses)
79
+ end
80
+ end
81
+ end
82
+ end
83
+ end
84
+ end
85
+ end