braintrust 0.0.12 → 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +214 -180
  3. data/exe/braintrust +143 -0
  4. data/lib/braintrust/contrib/anthropic/deprecated.rb +24 -0
  5. data/lib/braintrust/contrib/anthropic/instrumentation/beta_messages.rb +242 -0
  6. data/lib/braintrust/contrib/anthropic/instrumentation/common.rb +53 -0
  7. data/lib/braintrust/contrib/anthropic/instrumentation/messages.rb +232 -0
  8. data/lib/braintrust/contrib/anthropic/integration.rb +53 -0
  9. data/lib/braintrust/contrib/anthropic/patcher.rb +145 -0
  10. data/lib/braintrust/contrib/context.rb +56 -0
  11. data/lib/braintrust/contrib/integration.rb +160 -0
  12. data/lib/braintrust/contrib/openai/deprecated.rb +22 -0
  13. data/lib/braintrust/contrib/openai/instrumentation/chat.rb +298 -0
  14. data/lib/braintrust/contrib/openai/instrumentation/common.rb +134 -0
  15. data/lib/braintrust/contrib/openai/instrumentation/moderations.rb +93 -0
  16. data/lib/braintrust/contrib/openai/instrumentation/responses.rb +187 -0
  17. data/lib/braintrust/contrib/openai/integration.rb +58 -0
  18. data/lib/braintrust/contrib/openai/patcher.rb +173 -0
  19. data/lib/braintrust/contrib/patcher.rb +76 -0
  20. data/lib/braintrust/contrib/rails/railtie.rb +16 -0
  21. data/lib/braintrust/contrib/registry.rb +107 -0
  22. data/lib/braintrust/contrib/ruby_llm/deprecated.rb +45 -0
  23. data/lib/braintrust/contrib/ruby_llm/instrumentation/chat.rb +464 -0
  24. data/lib/braintrust/contrib/ruby_llm/instrumentation/common.rb +58 -0
  25. data/lib/braintrust/contrib/ruby_llm/integration.rb +54 -0
  26. data/lib/braintrust/contrib/ruby_llm/patcher.rb +44 -0
  27. data/lib/braintrust/contrib/ruby_openai/deprecated.rb +24 -0
  28. data/lib/braintrust/contrib/ruby_openai/instrumentation/chat.rb +149 -0
  29. data/lib/braintrust/contrib/ruby_openai/instrumentation/common.rb +138 -0
  30. data/lib/braintrust/contrib/ruby_openai/instrumentation/moderations.rb +94 -0
  31. data/lib/braintrust/contrib/ruby_openai/instrumentation/responses.rb +146 -0
  32. data/lib/braintrust/contrib/ruby_openai/integration.rb +58 -0
  33. data/lib/braintrust/contrib/ruby_openai/patcher.rb +120 -0
  34. data/lib/braintrust/contrib/setup.rb +168 -0
  35. data/lib/braintrust/contrib/support/openai.rb +72 -0
  36. data/lib/braintrust/contrib/support/otel.rb +23 -0
  37. data/lib/braintrust/contrib.rb +205 -0
  38. data/lib/braintrust/internal/env.rb +39 -0
  39. data/lib/braintrust/internal/time.rb +44 -0
  40. data/lib/braintrust/setup.rb +50 -0
  41. data/lib/braintrust/state.rb +6 -1
  42. data/lib/braintrust/trace.rb +41 -51
  43. data/lib/braintrust/version.rb +1 -1
  44. data/lib/braintrust.rb +10 -1
  45. metadata +41 -7
  46. data/lib/braintrust/trace/contrib/anthropic.rb +0 -316
  47. data/lib/braintrust/trace/contrib/github.com/alexrudall/ruby-openai/ruby-openai.rb +0 -377
  48. data/lib/braintrust/trace/contrib/github.com/crmne/ruby_llm.rb +0 -631
  49. data/lib/braintrust/trace/contrib/openai.rb +0 -611
  50. data/lib/braintrust/trace/tokens.rb +0 -109
@@ -0,0 +1,242 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "opentelemetry/sdk"
4
+ require "json"
5
+ require_relative "../../support/otel"
6
+ require_relative "common"
7
+ require_relative "../../../internal/time"
8
+
9
+ module Braintrust
10
+ module Contrib
11
+ module Anthropic
12
+ module Instrumentation
13
+ # Beta Messages instrumentation for Anthropic.
14
+ # Wraps client.beta.messages.create() and stream() methods to create spans.
15
+ #
16
+ # @note Beta APIs are experimental and subject to change between SDK versions.
17
+ # This module includes defensive coding to handle response format changes.
18
+ module BetaMessages
19
+ def self.included(base)
20
+ base.prepend(InstanceMethods) unless applied?(base)
21
+ end
22
+
23
+ def self.applied?(base)
24
+ base.ancestors.include?(InstanceMethods)
25
+ end
26
+
27
+ module InstanceMethods
28
+ # Standard metadata fields (shared with stable API)
29
+ METADATA_FIELDS = %i[
30
+ model max_tokens temperature top_p top_k stop_sequences
31
+ stream tools tool_choice thinking metadata service_tier
32
+ ].freeze
33
+
34
+ # Beta-specific metadata fields
35
+ BETA_METADATA_FIELDS = %i[
36
+ betas output_format
37
+ ].freeze
38
+
39
+ # Wrap synchronous beta.messages.create
40
+ def create(**params)
41
+ client = instance_variable_get(:@client)
42
+ tracer = Braintrust::Contrib.tracer_for(client)
43
+
44
+ tracer.in_span("anthropic.messages.create") do |span|
45
+ # Pre-call instrumentation (swallow errors)
46
+ metadata = nil
47
+ begin
48
+ metadata = build_metadata(params)
49
+ set_input(span, params)
50
+ rescue => e
51
+ Braintrust::Log.debug("Beta API: Failed to capture request: #{e.message}")
52
+ metadata ||= {"provider" => "anthropic", "api_version" => "beta"}
53
+ end
54
+
55
+ # API call - let errors propagate naturally
56
+ response = nil
57
+ time_to_first_token = Braintrust::Internal::Time.measure do
58
+ response = super(**params)
59
+ end
60
+
61
+ # Post-call instrumentation (swallow errors)
62
+ begin
63
+ set_output(span, response)
64
+ set_metrics(span, response, time_to_first_token)
65
+ finalize_metadata(span, metadata, response)
66
+ rescue => e
67
+ Braintrust::Log.debug("Beta API: Failed to capture response: #{e.message}")
68
+ end
69
+
70
+ response
71
+ end
72
+ end
73
+
74
+ # Wrap streaming beta.messages.stream
75
+ # Stores context on stream object for span creation during consumption
76
+ def stream(**params)
77
+ client = instance_variable_get(:@client)
78
+ tracer = Braintrust::Contrib.tracer_for(client)
79
+
80
+ # Pre-call instrumentation (swallow errors)
81
+ metadata = nil
82
+ begin
83
+ metadata = build_metadata(params, stream: true)
84
+ rescue => e
85
+ Braintrust::Log.debug("Beta API: Failed to build stream metadata: #{e.message}")
86
+ metadata = {"provider" => "anthropic", "api_version" => "beta", "stream" => true}
87
+ end
88
+
89
+ # API call - let errors propagate naturally
90
+ stream_obj = super
91
+
92
+ # Post-call instrumentation (swallow errors)
93
+ begin
94
+ Braintrust::Contrib::Context.set!(stream_obj,
95
+ tracer: tracer,
96
+ params: params,
97
+ metadata: metadata,
98
+ messages_instance: self,
99
+ start_time: Braintrust::Internal::Time.measure)
100
+ rescue => e
101
+ Braintrust::Log.debug("Beta API: Failed to set stream context: #{e.message}")
102
+ end
103
+
104
+ stream_obj
105
+ end
106
+
107
+ private
108
+
109
+ def finalize_stream_span(span, stream_obj, metadata, time_to_first_token)
110
+ if stream_obj.respond_to?(:accumulated_message)
111
+ begin
112
+ msg = stream_obj.accumulated_message
113
+ set_output(span, msg)
114
+ set_metrics(span, msg, time_to_first_token)
115
+ metadata["stop_reason"] = msg.stop_reason if msg.respond_to?(:stop_reason) && msg.stop_reason
116
+ metadata["model"] = msg.model if msg.respond_to?(:model) && msg.model
117
+ rescue => e
118
+ Braintrust::Log.debug("Beta API: Failed to get accumulated message: #{e.message}")
119
+ end
120
+ end
121
+ Support::OTel.set_json_attr(span, "braintrust.metadata", metadata)
122
+ end
123
+
124
+ def build_metadata(params, stream: false)
125
+ metadata = {
126
+ "provider" => "anthropic",
127
+ "endpoint" => "/v1/messages",
128
+ "api_version" => "beta"
129
+ }
130
+ metadata["stream"] = true if stream
131
+
132
+ # Capture standard fields
133
+ METADATA_FIELDS.each do |field|
134
+ metadata[field.to_s] = params[field] if params.key?(field)
135
+ end
136
+
137
+ # Capture beta-specific fields with defensive handling
138
+ capture_beta_fields(metadata, params)
139
+
140
+ metadata
141
+ rescue => e
142
+ Braintrust::Log.debug("Beta API: Failed to build metadata: #{e.message}")
143
+ {"provider" => "anthropic", "api_version" => "beta"}
144
+ end
145
+
146
+ def capture_beta_fields(metadata, params)
147
+ # Capture betas array (e.g., ["structured-outputs-2025-11-13"])
148
+ if params.key?(:betas)
149
+ betas = params[:betas]
150
+ metadata["betas"] = betas.is_a?(Array) ? betas : [betas]
151
+ end
152
+
153
+ # Capture output_format for structured outputs
154
+ if params.key?(:output_format)
155
+ output_format = params[:output_format]
156
+ metadata["output_format"] = begin
157
+ if output_format.respond_to?(:to_h)
158
+ output_format.to_h
159
+ else
160
+ output_format
161
+ end
162
+ rescue
163
+ output_format.to_s
164
+ end
165
+ end
166
+ end
167
+
168
+ def set_input(span, params)
169
+ input_messages = []
170
+
171
+ begin
172
+ if params[:system]
173
+ system_content = params[:system]
174
+ if system_content.is_a?(Array)
175
+ system_text = system_content.map { |blk|
176
+ blk.is_a?(Hash) ? blk[:text] : blk
177
+ }.join("\n")
178
+ input_messages << {role: "system", content: system_text}
179
+ else
180
+ input_messages << {role: "system", content: system_content}
181
+ end
182
+ end
183
+
184
+ if params[:messages]
185
+ messages_array = params[:messages].map { |m| m.respond_to?(:to_h) ? m.to_h : m }
186
+ input_messages.concat(messages_array)
187
+ end
188
+
189
+ Support::OTel.set_json_attr(span, "braintrust.input_json", input_messages) if input_messages.any?
190
+ rescue => e
191
+ Braintrust::Log.debug("Beta API: Failed to capture input: #{e.message}")
192
+ end
193
+ end
194
+
195
+ def set_output(span, response)
196
+ return unless response
197
+
198
+ begin
199
+ return unless response.respond_to?(:content) && response.content
200
+
201
+ content_array = response.content.map { |c| c.respond_to?(:to_h) ? c.to_h : c }
202
+ output = [{
203
+ role: response.respond_to?(:role) ? response.role : "assistant",
204
+ content: content_array
205
+ }]
206
+ Support::OTel.set_json_attr(span, "braintrust.output_json", output)
207
+ rescue => e
208
+ Braintrust::Log.debug("Beta API: Failed to capture output: #{e.message}")
209
+ end
210
+ end
211
+
212
+ def set_metrics(span, response, time_to_first_token)
213
+ metrics = {}
214
+
215
+ begin
216
+ if response.respond_to?(:usage) && response.usage
217
+ metrics = Common.parse_usage_tokens(response.usage)
218
+ end
219
+ metrics["time_to_first_token"] = time_to_first_token if time_to_first_token
220
+ Support::OTel.set_json_attr(span, "braintrust.metrics", metrics) unless metrics.empty?
221
+ rescue => e
222
+ Braintrust::Log.debug("Beta API: Failed to capture metrics: #{e.message}")
223
+ end
224
+ end
225
+
226
+ def finalize_metadata(span, metadata, response)
227
+ begin
228
+ metadata["stop_reason"] = response.stop_reason if response.respond_to?(:stop_reason) && response.stop_reason
229
+ metadata["stop_sequence"] = response.stop_sequence if response.respond_to?(:stop_sequence) && response.stop_sequence
230
+ metadata["model"] = response.model if response.respond_to?(:model) && response.model
231
+ rescue => e
232
+ Braintrust::Log.debug("Beta API: Failed to finalize metadata: #{e.message}")
233
+ end
234
+
235
+ Support::OTel.set_json_attr(span, "braintrust.metadata", metadata)
236
+ end
237
+ end
238
+ end
239
+ end
240
+ end
241
+ end
242
+ end
@@ -0,0 +1,53 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Braintrust
4
+ module Contrib
5
+ module Anthropic
6
+ module Instrumentation
7
+ # Common utilities for Anthropic SDK instrumentation.
8
+ module Common
9
+ # Parse Anthropic SDK usage tokens into normalized Braintrust metrics.
10
+ # Accumulates cache tokens into prompt_tokens and calculates total.
11
+ # Works with both Hash objects and SDK response objects (via to_h).
12
+ # @param usage [Hash, Object] usage object from Anthropic response
13
+ # @return [Hash<String, Integer>] normalized metrics
14
+ def self.parse_usage_tokens(usage)
15
+ metrics = {}
16
+ return metrics unless usage
17
+
18
+ usage_hash = usage.respond_to?(:to_h) ? usage.to_h : usage
19
+ return metrics unless usage_hash.is_a?(Hash)
20
+
21
+ # Anthropic SDK field mappings → Braintrust metrics
22
+ field_map = {
23
+ "input_tokens" => "prompt_tokens",
24
+ "output_tokens" => "completion_tokens",
25
+ "cache_read_input_tokens" => "prompt_cached_tokens",
26
+ "cache_creation_input_tokens" => "prompt_cache_creation_tokens"
27
+ }
28
+
29
+ usage_hash.each do |key, value|
30
+ next unless value.is_a?(Numeric)
31
+ key_str = key.to_s
32
+ target = field_map[key_str]
33
+ metrics[target] = value.to_i if target
34
+ end
35
+
36
+ # Accumulate cache tokens into prompt_tokens (matching TS/Python SDKs)
37
+ prompt_tokens = (metrics["prompt_tokens"] || 0) +
38
+ (metrics["prompt_cached_tokens"] || 0) +
39
+ (metrics["prompt_cache_creation_tokens"] || 0)
40
+ metrics["prompt_tokens"] = prompt_tokens if prompt_tokens > 0
41
+
42
+ # Calculate total
43
+ if metrics.key?("prompt_tokens") && metrics.key?("completion_tokens")
44
+ metrics["tokens"] = metrics["prompt_tokens"] + metrics["completion_tokens"]
45
+ end
46
+
47
+ metrics
48
+ end
49
+ end
50
+ end
51
+ end
52
+ end
53
+ end
@@ -0,0 +1,232 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "opentelemetry/sdk"
4
+ require "json"
5
+ require_relative "../../support/otel"
6
+ require_relative "common"
7
+ require_relative "../../../internal/time"
8
+
9
+ module Braintrust
10
+ module Contrib
11
+ module Anthropic
12
+ module Instrumentation
13
+ # Messages instrumentation for Anthropic.
14
+ # Wraps create() and stream() methods to create spans.
15
+ module Messages
16
+ def self.included(base)
17
+ base.prepend(InstanceMethods) unless applied?(base)
18
+ end
19
+
20
+ def self.applied?(base)
21
+ base.ancestors.include?(InstanceMethods)
22
+ end
23
+
24
+ module InstanceMethods
25
+ METADATA_FIELDS = %i[
26
+ model max_tokens temperature top_p top_k stop_sequences
27
+ stream tools tool_choice thinking metadata service_tier
28
+ ].freeze
29
+
30
+ # Wrap synchronous messages.create
31
+ def create(**params)
32
+ client = instance_variable_get(:@client)
33
+ tracer = Braintrust::Contrib.tracer_for(client)
34
+
35
+ tracer.in_span("anthropic.messages.create") do |span|
36
+ metadata = build_metadata(params)
37
+ set_input(span, params)
38
+
39
+ response = nil
40
+ time_to_first_token = Braintrust::Internal::Time.measure do
41
+ response = super(**params)
42
+ end
43
+
44
+ set_output(span, response)
45
+ set_metrics(span, response, time_to_first_token)
46
+ finalize_metadata(span, metadata, response)
47
+
48
+ response
49
+ end
50
+ end
51
+
52
+ # Wrap streaming messages.stream
53
+ # Stores context on stream object for span creation during consumption
54
+ def stream(**params)
55
+ client = instance_variable_get(:@client)
56
+ tracer = Braintrust::Contrib.tracer_for(client)
57
+ metadata = build_metadata(params, stream: true)
58
+
59
+ stream_obj = super
60
+ Braintrust::Contrib::Context.set!(stream_obj,
61
+ tracer: tracer,
62
+ params: params,
63
+ metadata: metadata,
64
+ messages_instance: self,
65
+ start_time: Braintrust::Internal::Time.measure)
66
+ stream_obj
67
+ end
68
+
69
+ private
70
+
71
+ def finalize_stream_span(span, stream_obj, metadata, time_to_first_token)
72
+ if stream_obj.respond_to?(:accumulated_message)
73
+ begin
74
+ msg = stream_obj.accumulated_message
75
+ set_output(span, msg)
76
+ set_metrics(span, msg, time_to_first_token)
77
+ metadata["stop_reason"] = msg.stop_reason if msg.respond_to?(:stop_reason) && msg.stop_reason
78
+ metadata["model"] = msg.model if msg.respond_to?(:model) && msg.model
79
+ rescue => e
80
+ Braintrust::Log.debug("Failed to get accumulated message: #{e.message}")
81
+ end
82
+ end
83
+ Support::OTel.set_json_attr(span, "braintrust.metadata", metadata)
84
+ end
85
+
86
+ def build_metadata(params, stream: false)
87
+ metadata = {
88
+ "provider" => "anthropic",
89
+ "endpoint" => "/v1/messages"
90
+ }
91
+ metadata["stream"] = true if stream
92
+ METADATA_FIELDS.each do |field|
93
+ metadata[field.to_s] = params[field] if params.key?(field)
94
+ end
95
+ metadata
96
+ end
97
+
98
+ def set_input(span, params)
99
+ input_messages = []
100
+
101
+ if params[:system]
102
+ system_content = params[:system]
103
+ if system_content.is_a?(Array)
104
+ system_text = system_content.map { |blk|
105
+ blk.is_a?(Hash) ? blk[:text] : blk
106
+ }.join("\n")
107
+ input_messages << {role: "system", content: system_text}
108
+ else
109
+ input_messages << {role: "system", content: system_content}
110
+ end
111
+ end
112
+
113
+ if params[:messages]
114
+ messages_array = params[:messages].map(&:to_h)
115
+ input_messages.concat(messages_array)
116
+ end
117
+
118
+ Support::OTel.set_json_attr(span, "braintrust.input_json", input_messages) if input_messages.any?
119
+ end
120
+
121
+ def set_output(span, response)
122
+ return unless response.respond_to?(:content) && response.content
123
+
124
+ content_array = response.content.map(&:to_h)
125
+ output = [{
126
+ role: response.respond_to?(:role) ? response.role : "assistant",
127
+ content: content_array
128
+ }]
129
+ Support::OTel.set_json_attr(span, "braintrust.output_json", output)
130
+ end
131
+
132
+ def set_metrics(span, response, time_to_first_token)
133
+ metrics = {}
134
+ if response.respond_to?(:usage) && response.usage
135
+ metrics = Common.parse_usage_tokens(response.usage)
136
+ end
137
+ metrics["time_to_first_token"] = time_to_first_token if time_to_first_token
138
+ Support::OTel.set_json_attr(span, "braintrust.metrics", metrics) unless metrics.empty?
139
+ end
140
+
141
+ def finalize_metadata(span, metadata, response)
142
+ metadata["stop_reason"] = response.stop_reason if response.respond_to?(:stop_reason) && response.stop_reason
143
+ metadata["stop_sequence"] = response.stop_sequence if response.respond_to?(:stop_sequence) && response.stop_sequence
144
+ metadata["model"] = response.model if response.respond_to?(:model) && response.model
145
+ Support::OTel.set_json_attr(span, "braintrust.metadata", metadata)
146
+ end
147
+ end
148
+ end
149
+
150
+ # MessageStream instrumentation for Anthropic.
151
+ # Prepended to Anthropic::Helpers::Streaming::MessageStream to create spans on consumption.
152
+ module MessageStream
153
+ def self.included(base)
154
+ base.prepend(InstanceMethods) unless applied?(base)
155
+ end
156
+
157
+ def self.applied?(base)
158
+ base.ancestors.include?(InstanceMethods)
159
+ end
160
+
161
+ module InstanceMethods
162
+ def each(&block)
163
+ ctx = Braintrust::Contrib::Context.from(self)
164
+ return super unless ctx&.[](:tracer) && !ctx[:consumed]
165
+
166
+ trace_consumption(ctx) do
167
+ super do |*args|
168
+ ctx[:time_to_first_token] ||= Braintrust::Internal::Time.measure(ctx[:start_time])
169
+ block.call(*args)
170
+ end
171
+ end
172
+ end
173
+
174
+ def text
175
+ ctx = Braintrust::Contrib::Context.from(self)
176
+ return super unless ctx&.[](:tracer) && !ctx[:consumed]
177
+
178
+ original_text_enum = super
179
+ Enumerator.new do |output|
180
+ trace_consumption(ctx) do
181
+ original_text_enum.each do |text_chunk|
182
+ ctx[:time_to_first_token] ||= Braintrust::Internal::Time.measure(ctx[:start_time])
183
+ output << text_chunk
184
+ end
185
+ end
186
+ end
187
+ end
188
+
189
+ def close
190
+ ctx = Braintrust::Contrib::Context.from(self)
191
+ if ctx&.[](:tracer) && !ctx[:consumed]
192
+ # Stream closed without consumption - create minimal span
193
+ ctx[:consumed] = true
194
+ tracer = ctx[:tracer]
195
+ params = ctx[:params]
196
+ metadata = ctx[:metadata]
197
+ messages_instance = ctx[:messages_instance]
198
+
199
+ tracer.in_span("anthropic.messages.create") do |span|
200
+ messages_instance.send(:set_input, span, params)
201
+ Support::OTel.set_json_attr(span, "braintrust.metadata", metadata)
202
+ end
203
+ end
204
+ super
205
+ end
206
+
207
+ private
208
+
209
+ def trace_consumption(ctx)
210
+ # Mark as consumed to prevent re-entry (accumulated_message calls each internally)
211
+ ctx[:consumed] = true
212
+
213
+ tracer = ctx[:tracer]
214
+ params = ctx[:params]
215
+ metadata = ctx[:metadata]
216
+ messages_instance = ctx[:messages_instance]
217
+
218
+ tracer.in_span("anthropic.messages.create") do |span|
219
+ messages_instance.send(:set_input, span, params)
220
+ Support::OTel.set_json_attr(span, "braintrust.metadata", metadata)
221
+
222
+ yield
223
+
224
+ messages_instance.send(:finalize_stream_span, span, self, metadata, ctx[:time_to_first_token])
225
+ end
226
+ end
227
+ end
228
+ end
229
+ end
230
+ end
231
+ end
232
+ end
@@ -0,0 +1,53 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "../integration"
4
+
5
+ module Braintrust
6
+ module Contrib
7
+ module Anthropic
8
+ # Anthropic integration for automatic instrumentation.
9
+ # Instruments the anthropic gem (https://github.com/anthropics/anthropic-sdk-ruby).
10
+ class Integration
11
+ include Braintrust::Contrib::Integration
12
+
13
+ MINIMUM_VERSION = "0.3.0"
14
+
15
+ GEM_NAMES = ["anthropic"].freeze
16
+ REQUIRE_PATHS = ["anthropic"].freeze
17
+
18
+ # @return [Symbol] Unique identifier for this integration
19
+ def self.integration_name
20
+ :anthropic
21
+ end
22
+
23
+ # @return [Array<String>] Gem names this integration supports
24
+ def self.gem_names
25
+ GEM_NAMES
26
+ end
27
+
28
+ # @return [Array<String>] Require paths for auto-instrument detection
29
+ def self.require_paths
30
+ REQUIRE_PATHS
31
+ end
32
+
33
+ # @return [String] Minimum compatible version
34
+ def self.minimum_version
35
+ MINIMUM_VERSION
36
+ end
37
+
38
+ # @return [Boolean] true if anthropic gem is available
39
+ def self.loaded?
40
+ defined?(::Anthropic::Client) ? true : false
41
+ end
42
+
43
+ # Lazy-load the patchers only when actually patching.
44
+ # This keeps the integration stub lightweight.
45
+ # @return [Array<Class>] The patcher classes
46
+ def self.patchers
47
+ require_relative "patcher"
48
+ [MessagesPatcher, BetaMessagesPatcher]
49
+ end
50
+ end
51
+ end
52
+ end
53
+ end