braintrust 0.0.12 → 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +213 -180
  3. data/exe/braintrust +143 -0
  4. data/lib/braintrust/contrib/anthropic/deprecated.rb +24 -0
  5. data/lib/braintrust/contrib/anthropic/instrumentation/common.rb +53 -0
  6. data/lib/braintrust/contrib/anthropic/instrumentation/messages.rb +232 -0
  7. data/lib/braintrust/contrib/anthropic/integration.rb +53 -0
  8. data/lib/braintrust/contrib/anthropic/patcher.rb +62 -0
  9. data/lib/braintrust/contrib/context.rb +56 -0
  10. data/lib/braintrust/contrib/integration.rb +160 -0
  11. data/lib/braintrust/contrib/openai/deprecated.rb +22 -0
  12. data/lib/braintrust/contrib/openai/instrumentation/chat.rb +298 -0
  13. data/lib/braintrust/contrib/openai/instrumentation/common.rb +134 -0
  14. data/lib/braintrust/contrib/openai/instrumentation/responses.rb +187 -0
  15. data/lib/braintrust/contrib/openai/integration.rb +58 -0
  16. data/lib/braintrust/contrib/openai/patcher.rb +130 -0
  17. data/lib/braintrust/contrib/patcher.rb +76 -0
  18. data/lib/braintrust/contrib/rails/railtie.rb +16 -0
  19. data/lib/braintrust/contrib/registry.rb +107 -0
  20. data/lib/braintrust/contrib/ruby_llm/deprecated.rb +45 -0
  21. data/lib/braintrust/contrib/ruby_llm/instrumentation/chat.rb +464 -0
  22. data/lib/braintrust/contrib/ruby_llm/instrumentation/common.rb +58 -0
  23. data/lib/braintrust/contrib/ruby_llm/integration.rb +54 -0
  24. data/lib/braintrust/contrib/ruby_llm/patcher.rb +44 -0
  25. data/lib/braintrust/contrib/ruby_openai/deprecated.rb +24 -0
  26. data/lib/braintrust/contrib/ruby_openai/instrumentation/chat.rb +149 -0
  27. data/lib/braintrust/contrib/ruby_openai/instrumentation/common.rb +138 -0
  28. data/lib/braintrust/contrib/ruby_openai/instrumentation/responses.rb +146 -0
  29. data/lib/braintrust/contrib/ruby_openai/integration.rb +58 -0
  30. data/lib/braintrust/contrib/ruby_openai/patcher.rb +85 -0
  31. data/lib/braintrust/contrib/setup.rb +168 -0
  32. data/lib/braintrust/contrib/support/openai.rb +72 -0
  33. data/lib/braintrust/contrib/support/otel.rb +23 -0
  34. data/lib/braintrust/contrib.rb +205 -0
  35. data/lib/braintrust/internal/env.rb +33 -0
  36. data/lib/braintrust/internal/time.rb +44 -0
  37. data/lib/braintrust/setup.rb +50 -0
  38. data/lib/braintrust/state.rb +5 -0
  39. data/lib/braintrust/trace.rb +0 -51
  40. data/lib/braintrust/version.rb +1 -1
  41. data/lib/braintrust.rb +10 -1
  42. metadata +38 -7
  43. data/lib/braintrust/trace/contrib/anthropic.rb +0 -316
  44. data/lib/braintrust/trace/contrib/github.com/alexrudall/ruby-openai/ruby-openai.rb +0 -377
  45. data/lib/braintrust/trace/contrib/github.com/crmne/ruby_llm.rb +0 -631
  46. data/lib/braintrust/trace/contrib/openai.rb +0 -611
  47. data/lib/braintrust/trace/tokens.rb +0 -109
@@ -0,0 +1,464 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "opentelemetry/sdk"
4
+ require "json"
5
+
6
+ require_relative "common"
7
+ require_relative "../../support/otel"
8
+ require_relative "../../../internal/encoding"
9
+ require_relative "../../../internal/time"
10
+
11
+ module Braintrust
12
+ module Contrib
13
+ module RubyLLM
14
+ module Instrumentation
15
+ # Chat instrumentation for RubyLLM.
16
+ # Wraps complete() and execute_tool() methods to create spans.
17
+ module Chat
18
+ def self.included(base)
19
+ # Guard against double-wrapping
20
+ base.prepend(InstanceMethods) unless applied?(base)
21
+ end
22
+
23
+ def self.applied?(base)
24
+ base.ancestors.include?(InstanceMethods)
25
+ end
26
+
27
+ module InstanceMethods
28
+ # Wrap complete() to trace chat completions.
29
+ # Each call creates a span - recursive calls from tool execution
30
+ # create nested spans (each is a separate API call).
31
+ def complete(&block)
32
+ return block ? super : super() unless tracing_enabled?
33
+
34
+ tracer = Braintrust::Contrib.tracer_for(self)
35
+
36
+ tracer.in_span("ruby_llm.chat") do |span|
37
+ if block
38
+ # Streaming: pass a block that calls super() with the wrapper
39
+ handle_streaming_complete(span, block) do |&wrapper|
40
+ super(&wrapper)
41
+ end
42
+ else
43
+ # Non-streaming: pass a block that calls super()
44
+ handle_non_streaming_complete(span) do
45
+ super()
46
+ end
47
+ end
48
+ end
49
+ end
50
+
51
+ private
52
+
53
+ # Wrap execute_tool() to trace tool executions.
54
+ # This is a private method in RubyLLM - wrapping it avoids
55
+ # conflicting with user-registered on_tool_call/on_tool_result callbacks.
56
+ def execute_tool(tool_call)
57
+ return super unless tracing_enabled?
58
+
59
+ tracer = Braintrust::Contrib.tracer_for(self)
60
+
61
+ tracer.in_span("ruby_llm.tool.#{tool_call.name}") do |span|
62
+ Support::OTel.set_json_attr(span, "braintrust.span_attributes", {type: "tool"})
63
+ span.set_attribute("tool.name", tool_call.name)
64
+ span.set_attribute("tool.call_id", tool_call.id)
65
+
66
+ Support::OTel.set_json_attr(span, "braintrust.input_json", {
67
+ "name" => tool_call.name,
68
+ "arguments" => tool_call.arguments
69
+ })
70
+
71
+ result = super
72
+
73
+ Support::OTel.set_json_attr(span, "braintrust.output_json", result)
74
+ result
75
+ end
76
+ end
77
+
78
+ # DEPRECATED: Support legacy unwrap()
79
+ # Checks Context for enabled: false on instance or class.
80
+ # This will be removed in a future version.
81
+ def tracing_enabled?
82
+ ctx = Braintrust::Contrib.context_for(self)
83
+ class_ctx = Braintrust::Contrib.context_for(self.class)
84
+ ctx&.[](:enabled) != false && class_ctx&.[](:enabled) != false
85
+ end
86
+
87
+ # Handle streaming complete request with tracing.
88
+ # Calls the provided block with a wrapper that aggregates chunks.
89
+ # @param span [OpenTelemetry::Trace::Span] the span to record to
90
+ # @param user_block [Proc] the streaming block from user
91
+ # @param super_caller [Proc] block that calls super(&wrapper)
92
+ def handle_streaming_complete(span, user_block, &super_caller)
93
+ aggregated_chunks = []
94
+ metadata = extract_metadata(stream: true)
95
+ input_messages = build_input_messages
96
+ start_time = Braintrust::Internal::Time.measure
97
+ time_to_first_token = nil
98
+
99
+ Support::OTel.set_json_attr(span, "braintrust.input_json", input_messages) if input_messages.any?
100
+ Support::OTel.set_json_attr(span, "braintrust.metadata", metadata)
101
+
102
+ # Wrapper block that RubyLLM calls once per chunk.
103
+ # Aggregates chunks for span recording and forwards to user's block.
104
+ wrapper = proc do |chunk|
105
+ time_to_first_token ||= Braintrust::Internal::Time.measure(start_time)
106
+ aggregated_chunks << chunk
107
+ user_block.call(chunk)
108
+ end
109
+
110
+ begin
111
+ result = super_caller.call(&wrapper)
112
+
113
+ capture_streaming_output(span, aggregated_chunks, result, time_to_first_token)
114
+ result
115
+ rescue => e
116
+ span.record_exception(e)
117
+ span.status = ::OpenTelemetry::Trace::Status.error("RubyLLM error: #{e.message}")
118
+ raise
119
+ end
120
+ end
121
+
122
+ # Handle non-streaming complete request with tracing.
123
+ # Calls the provided block to invoke super() and returns the response.
124
+ # @param span [OpenTelemetry::Trace::Span] the span to record to
125
+ # @param super_caller [Proc] block that calls super()
126
+ def handle_non_streaming_complete(span, &super_caller)
127
+ metadata = extract_metadata
128
+ input_messages = build_input_messages
129
+ Support::OTel.set_json_attr(span, "braintrust.input_json", input_messages) if input_messages.any?
130
+
131
+ messages_before_count = messages&.length || 0
132
+
133
+ begin
134
+ response = nil
135
+ time_to_first_token = Braintrust::Internal::Time.measure do
136
+ response = super_caller.call
137
+ end
138
+
139
+ capture_non_streaming_output(span, response, messages_before_count, time_to_first_token)
140
+ Support::OTel.set_json_attr(span, "braintrust.metadata", metadata)
141
+
142
+ response
143
+ rescue => e
144
+ span.record_exception(e)
145
+ span.status = ::OpenTelemetry::Trace::Status.error("RubyLLM error: #{e.message}")
146
+ raise
147
+ end
148
+ end
149
+
150
+ # Extract metadata from chat instance (provider, model, tools, stream flag)
151
+ def extract_metadata(stream: false)
152
+ metadata = {"provider" => "ruby_llm"}
153
+ metadata["stream"] = true if stream
154
+
155
+ # Extract model
156
+ if respond_to?(:model) && model
157
+ model_id = model.respond_to?(:id) ? model.id : model.to_s
158
+ metadata["model"] = model_id
159
+ end
160
+
161
+ # Extract tools (only for non-streaming)
162
+ if !stream && respond_to?(:tools) && tools&.any?
163
+ metadata["tools"] = extract_tools_metadata
164
+ end
165
+
166
+ metadata
167
+ end
168
+
169
+ # Extract tools metadata from chat instance
170
+ def extract_tools_metadata
171
+ provider = instance_variable_get(:@provider) if instance_variable_defined?(:@provider)
172
+
173
+ tools.map do |_name, tool|
174
+ format_tool_schema(tool, provider)
175
+ end
176
+ end
177
+
178
+ # Format a tool into OpenAI-compatible schema
179
+ def format_tool_schema(tool, provider)
180
+ tool_schema = nil
181
+
182
+ # Use provider-specific tool_for method if available
183
+ if provider
184
+ begin
185
+ tool_schema = if provider.is_a?(::RubyLLM::Providers::OpenAI)
186
+ ::RubyLLM::Providers::OpenAI::Tools.tool_for(tool)
187
+ elsif defined?(::RubyLLM::Providers::Anthropic) && provider.is_a?(::RubyLLM::Providers::Anthropic)
188
+ ::RubyLLM::Providers::Anthropic::Tools.tool_for(tool)
189
+ elsif tool.respond_to?(:params_schema) && tool.params_schema
190
+ build_basic_tool_schema(tool)
191
+ else
192
+ build_minimal_tool_schema(tool)
193
+ end
194
+ rescue NameError, ArgumentError => e
195
+ Braintrust::Log.debug("Failed to extract tool schema using provider-specific method: #{e.class.name}: #{e.message}")
196
+ tool_schema = (tool.respond_to?(:params_schema) && tool.params_schema) ? build_basic_tool_schema(tool) : build_minimal_tool_schema(tool)
197
+ end
198
+ else
199
+ tool_schema = (tool.respond_to?(:params_schema) && tool.params_schema) ? build_basic_tool_schema(tool) : build_minimal_tool_schema(tool)
200
+ end
201
+
202
+ # Strip RubyLLM-specific fields to match native OpenAI format
203
+ function_key = tool_schema&.key?(:function) ? :function : "function"
204
+ if tool_schema && tool_schema[function_key]
205
+ tool_params = tool_schema[function_key][:parameters] || tool_schema[function_key]["parameters"]
206
+ if tool_params.is_a?(Hash)
207
+ tool_params = tool_params.dup if tool_params.frozen?
208
+ tool_params.delete("strict")
209
+ tool_params.delete(:strict)
210
+ tool_params.delete("additionalProperties")
211
+ tool_params.delete(:additionalProperties)
212
+ params_key = tool_schema[function_key].key?(:parameters) ? :parameters : "parameters"
213
+ tool_schema[function_key][params_key] = tool_params
214
+ end
215
+ end
216
+
217
+ tool_schema
218
+ end
219
+
220
+ # Build a basic tool schema with parameters
221
+ def build_basic_tool_schema(tool)
222
+ {
223
+ "type" => "function",
224
+ "function" => {
225
+ "name" => tool.name.to_s,
226
+ "description" => tool.description,
227
+ "parameters" => tool.params_schema
228
+ }
229
+ }
230
+ end
231
+
232
+ # Build a minimal tool schema without parameters
233
+ def build_minimal_tool_schema(tool)
234
+ {
235
+ "type" => "function",
236
+ "function" => {
237
+ "name" => tool.name.to_s,
238
+ "description" => tool.description,
239
+ "parameters" => {}
240
+ }
241
+ }
242
+ end
243
+
244
+ # Build input messages array from chat history
245
+ def build_input_messages
246
+ return [] unless respond_to?(:messages) && messages&.any?
247
+
248
+ messages.map { |m| format_message_for_input(m) }
249
+ end
250
+
251
+ # Format a RubyLLM message to OpenAI-compatible format
252
+ def format_message_for_input(msg)
253
+ formatted = {"role" => msg.role.to_s}
254
+
255
+ # Handle content
256
+ if msg.respond_to?(:content) && msg.content
257
+ raw_content = msg.content
258
+
259
+ # Check if content is a Content object with attachments (issue #71)
260
+ formatted["content"] = if raw_content.respond_to?(:text) && raw_content.respond_to?(:attachments) && raw_content.attachments&.any?
261
+ format_multipart_content(raw_content)
262
+ else
263
+ format_simple_content(raw_content, msg.role.to_s)
264
+ end
265
+ end
266
+
267
+ # Handle tool_calls for assistant messages
268
+ if msg.respond_to?(:tool_calls) && msg.tool_calls&.any?
269
+ formatted["tool_calls"] = format_tool_calls(msg.tool_calls)
270
+ formatted["content"] = nil
271
+ end
272
+
273
+ # Handle tool_call_id for tool result messages
274
+ if msg.respond_to?(:tool_call_id) && msg.tool_call_id
275
+ formatted["tool_call_id"] = msg.tool_call_id
276
+ end
277
+
278
+ formatted
279
+ end
280
+
281
+ # Format multipart content with text and attachments
282
+ # @param content_obj [Object] Content object with text and attachments
283
+ # @return [Array<Hash>] array of content parts
284
+ def format_multipart_content(content_obj)
285
+ content_parts = []
286
+
287
+ # Add text part
288
+ content_parts << {"type" => "text", "text" => content_obj.text} if content_obj.text
289
+
290
+ # Add attachment parts (convert to Braintrust format)
291
+ content_obj.attachments.each do |attachment|
292
+ content_parts << format_attachment_for_input(attachment)
293
+ end
294
+
295
+ content_parts
296
+ end
297
+
298
+ # Format simple text content
299
+ # @param raw_content [Object] String or Content object with text
300
+ # @param role [String] the message role
301
+ # @return [String] formatted text content
302
+ def format_simple_content(raw_content, role)
303
+ content = raw_content
304
+ content = content.text if content.respond_to?(:text)
305
+
306
+ # Convert Ruby hash string to JSON for tool results
307
+ if role == "tool" && content.is_a?(String) && content.start_with?("{:")
308
+ begin
309
+ content = content.gsub(/(?<=\{|, ):(\w+)=>/, '"\1":').gsub("=>", ":")
310
+ rescue
311
+ # Keep original if conversion fails
312
+ end
313
+ end
314
+
315
+ content
316
+ end
317
+
318
+ # Format a RubyLLM attachment to OpenAI-compatible format
319
+ # @param attachment [Object] the RubyLLM attachment
320
+ # @return [Hash] OpenAI image_url format for consistency with other integrations
321
+ def format_attachment_for_input(attachment)
322
+ # RubyLLM Attachment has: source (Pathname), filename, mime_type
323
+ if attachment.respond_to?(:source) && attachment.source
324
+ begin
325
+ data = File.binread(attachment.source.to_s)
326
+ encoded = Braintrust::Internal::Encoding::Base64.strict_encode64(data)
327
+ mime_type = attachment.respond_to?(:mime_type) ? attachment.mime_type : "application/octet-stream"
328
+
329
+ # Use OpenAI's image_url format for consistency
330
+ {
331
+ "type" => "image_url",
332
+ "image_url" => {
333
+ "url" => "data:#{mime_type};base64,#{encoded}"
334
+ }
335
+ }
336
+ rescue => e
337
+ Braintrust::Log.debug("Failed to read attachment file: #{e.message}")
338
+ # Return a placeholder if we can't read the file
339
+ {"type" => "text", "text" => "[attachment: #{attachment.respond_to?(:filename) ? attachment.filename : "unknown"}]"}
340
+ end
341
+ elsif attachment.respond_to?(:to_h)
342
+ # Try to use attachment's own serialization
343
+ attachment.to_h
344
+ else
345
+ {"type" => "text", "text" => "[attachment]"}
346
+ end
347
+ end
348
+
349
+ # Format tool calls into OpenAI format
350
+ def format_tool_calls(tool_calls)
351
+ tool_calls.map do |_id, tc|
352
+ args = tc.arguments
353
+ args_string = args.is_a?(String) ? args : JSON.generate(args)
354
+
355
+ {
356
+ "id" => tc.id,
357
+ "type" => "function",
358
+ "function" => {
359
+ "name" => tc.name,
360
+ "arguments" => args_string
361
+ }
362
+ }
363
+ end
364
+ end
365
+
366
+ # Capture streaming output and metrics
367
+ def capture_streaming_output(span, aggregated_chunks, result, time_to_first_token)
368
+ return if aggregated_chunks.empty?
369
+
370
+ # Aggregate content from chunks
371
+ # Extract text from Content objects if present (issue #71)
372
+ aggregated_content = aggregated_chunks.map { |c|
373
+ content = c.respond_to?(:content) ? c.content : c.to_s
374
+ content = content.text if content.respond_to?(:text)
375
+ content
376
+ }.join
377
+
378
+ output = [{
379
+ role: "assistant",
380
+ content: aggregated_content
381
+ }]
382
+ Support::OTel.set_json_attr(span, "braintrust.output_json", output)
383
+
384
+ # Set metrics (token usage + time_to_first_token)
385
+ # RubyLLM stores tokens directly in the response hash, not in a nested usage object
386
+ metrics = {}
387
+ if result.respond_to?(:to_h)
388
+ result_hash = result.to_h
389
+ usage = {
390
+ "input_tokens" => result_hash[:input_tokens],
391
+ "output_tokens" => result_hash[:output_tokens],
392
+ "cached_tokens" => result_hash[:cached_tokens],
393
+ "cache_creation_tokens" => result_hash[:cache_creation_tokens]
394
+ }.compact
395
+
396
+ unless usage.empty?
397
+ metrics = Common.parse_usage_tokens(usage)
398
+ end
399
+ end
400
+ metrics["time_to_first_token"] = time_to_first_token if time_to_first_token
401
+ Support::OTel.set_json_attr(span, "braintrust.metrics", metrics) unless metrics.empty?
402
+ end
403
+
404
+ # Capture non-streaming output and metrics
405
+ def capture_non_streaming_output(span, response, messages_before_count, time_to_first_token)
406
+ return unless response
407
+
408
+ message = {
409
+ "role" => "assistant",
410
+ "content" => nil
411
+ }
412
+
413
+ # Add content if it's a simple text response
414
+ # Extract text from Content objects if present (issue #71)
415
+ if response.respond_to?(:content) && response.content && !response.content.empty?
416
+ content = response.content
417
+ content = content.text if content.respond_to?(:text)
418
+ message["content"] = content
419
+ end
420
+
421
+ # Check if there are tool calls in the messages history
422
+ if respond_to?(:messages) && messages
423
+ assistant_msg = messages[messages_before_count..]&.find { |m|
424
+ m.role.to_s == "assistant" && m.respond_to?(:tool_calls) && m.tool_calls&.any?
425
+ }
426
+
427
+ if assistant_msg&.tool_calls&.any?
428
+ message["tool_calls"] = format_tool_calls(assistant_msg.tool_calls)
429
+ message["content"] = nil
430
+ end
431
+ end
432
+
433
+ output = [{
434
+ "index" => 0,
435
+ "message" => message,
436
+ "finish_reason" => message["tool_calls"] ? "tool_calls" : "stop"
437
+ }]
438
+
439
+ Support::OTel.set_json_attr(span, "braintrust.output_json", output)
440
+
441
+ # Set metrics (token usage + time_to_first_token)
442
+ metrics = {}
443
+ if response.respond_to?(:to_h)
444
+ response_hash = response.to_h
445
+ usage = {
446
+ "input_tokens" => response_hash[:input_tokens],
447
+ "output_tokens" => response_hash[:output_tokens],
448
+ "cached_tokens" => response_hash[:cached_tokens],
449
+ "cache_creation_tokens" => response_hash[:cache_creation_tokens]
450
+ }.compact
451
+
452
+ unless usage.empty?
453
+ metrics = Common.parse_usage_tokens(usage)
454
+ end
455
+ end
456
+ metrics["time_to_first_token"] = time_to_first_token if time_to_first_token
457
+ Support::OTel.set_json_attr(span, "braintrust.metrics", metrics) unless metrics.empty?
458
+ end
459
+ end
460
+ end
461
+ end
462
+ end
463
+ end
464
+ end
@@ -0,0 +1,58 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Braintrust
4
+ module Contrib
5
+ module RubyLLM
6
+ module Instrumentation
7
+ # Common utilities for RubyLLM instrumentation.
8
+ module Common
9
+ # Parse RubyLLM usage tokens into normalized Braintrust metrics.
10
+ # RubyLLM normalizes token fields from all providers (OpenAI, Anthropic, etc.)
11
+ # into a consistent format:
12
+ # - input_tokens: prompt tokens sent
13
+ # - output_tokens: completion tokens received
14
+ # - cached_tokens: tokens read from cache
15
+ # - cache_creation_tokens: tokens written to cache
16
+ #
17
+ # @param usage [Hash, Object] usage object from RubyLLM response
18
+ # @return [Hash<String, Integer>] normalized metrics for Braintrust
19
+ def self.parse_usage_tokens(usage)
20
+ metrics = {}
21
+ return metrics unless usage
22
+
23
+ usage_hash = usage.respond_to?(:to_h) ? usage.to_h : usage
24
+ return metrics unless usage_hash.is_a?(Hash)
25
+
26
+ # RubyLLM normalized field mappings → Braintrust metrics
27
+ field_map = {
28
+ "input_tokens" => "prompt_tokens",
29
+ "output_tokens" => "completion_tokens",
30
+ "cached_tokens" => "prompt_cached_tokens",
31
+ "cache_creation_tokens" => "prompt_cache_creation_tokens"
32
+ }
33
+
34
+ usage_hash.each do |key, value|
35
+ next unless value.is_a?(Numeric)
36
+ key_str = key.to_s
37
+ target = field_map[key_str]
38
+ metrics[target] = value.to_i if target
39
+ end
40
+
41
+ # Accumulate cache tokens into prompt_tokens (matching TS/Python SDKs)
42
+ prompt_tokens = (metrics["prompt_tokens"] || 0) +
43
+ (metrics["prompt_cached_tokens"] || 0) +
44
+ (metrics["prompt_cache_creation_tokens"] || 0)
45
+ metrics["prompt_tokens"] = prompt_tokens if prompt_tokens > 0
46
+
47
+ # Calculate total
48
+ if metrics.key?("prompt_tokens") && metrics.key?("completion_tokens")
49
+ metrics["tokens"] = metrics["prompt_tokens"] + metrics["completion_tokens"]
50
+ end
51
+
52
+ metrics
53
+ end
54
+ end
55
+ end
56
+ end
57
+ end
58
+ end
@@ -0,0 +1,54 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "../integration"
4
+ require_relative "deprecated"
5
+
6
+ module Braintrust
7
+ module Contrib
8
+ module RubyLLM
9
+ # RubyLLM integration for automatic instrumentation.
10
+ # Instruments the crmne/ruby_llm gem.
11
+ class Integration
12
+ include Braintrust::Contrib::Integration
13
+
14
+ MINIMUM_VERSION = "1.8.0"
15
+
16
+ GEM_NAMES = ["ruby_llm"].freeze
17
+ REQUIRE_PATHS = ["ruby_llm"].freeze
18
+
19
+ # @return [Symbol] Unique identifier for this integration
20
+ def self.integration_name
21
+ :ruby_llm
22
+ end
23
+
24
+ # @return [Array<String>] Gem names this integration supports
25
+ def self.gem_names
26
+ GEM_NAMES
27
+ end
28
+
29
+ # @return [Array<String>] Require paths for auto-instrument detection
30
+ def self.require_paths
31
+ REQUIRE_PATHS
32
+ end
33
+
34
+ # @return [String] Minimum compatible version
35
+ def self.minimum_version
36
+ MINIMUM_VERSION
37
+ end
38
+
39
+ # @return [Boolean] true if ruby_llm gem is available
40
+ def self.loaded?
41
+ defined?(::RubyLLM::Chat) ? true : false
42
+ end
43
+
44
+ # Lazy-load the patcher only when actually patching.
45
+ # This keeps the integration stub lightweight.
46
+ # @return [Array<Class>] The patcher classes
47
+ def self.patchers
48
+ require_relative "patcher"
49
+ [ChatPatcher]
50
+ end
51
+ end
52
+ end
53
+ end
54
+ end
@@ -0,0 +1,44 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "../patcher"
4
+ require_relative "instrumentation/chat"
5
+
6
+ module Braintrust
7
+ module Contrib
8
+ module RubyLLM
9
+ # Patcher for RubyLLM chat completions.
10
+ # Instruments RubyLLM::Chat#complete and #execute_tool methods.
11
+ class ChatPatcher < Braintrust::Contrib::Patcher
12
+ class << self
13
+ def applicable?
14
+ defined?(::RubyLLM::Chat)
15
+ end
16
+
17
+ def patched?(**options)
18
+ target_class = options[:target]&.singleton_class || ::RubyLLM::Chat
19
+ Instrumentation::Chat.applied?(target_class)
20
+ end
21
+
22
+ # Perform the actual patching.
23
+ # @param options [Hash] Configuration options passed from integration
24
+ # @option options [Object] :target Optional target instance to patch
25
+ # @option options [OpenTelemetry::SDK::Trace::TracerProvider] :tracer_provider Optional tracer provider
26
+ # @return [void]
27
+ def perform_patch(**options)
28
+ return unless applicable?
29
+
30
+ if options[:target]
31
+ # Instance-level (for only this chat instance)
32
+ raise ArgumentError, "target must be a kind of ::RubyLLM::Chat" unless options[:target].is_a?(::RubyLLM::Chat)
33
+
34
+ options[:target].singleton_class.include(Instrumentation::Chat)
35
+ else
36
+ # Class-level (for all chat instances)
37
+ ::RubyLLM::Chat.include(Instrumentation::Chat)
38
+ end
39
+ end
40
+ end
41
+ end
42
+ end
43
+ end
44
+ end
@@ -0,0 +1,24 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Backward compatibility shim for the old ruby-openai integration API.
4
+ # This file now just delegates to the new API.
5
+
6
+ module Braintrust
7
+ module Trace
8
+ module AlexRudall
9
+ module RubyOpenAI
10
+ # Wrap an OpenAI::Client (ruby-openai gem) to automatically create spans.
11
+ # This is the legacy API - delegates to the new contrib framework.
12
+ #
13
+ # @param client [OpenAI::Client] the OpenAI client to wrap
14
+ # @param tracer_provider [OpenTelemetry::SDK::Trace::TracerProvider] the tracer provider (defaults to global)
15
+ # @return [OpenAI::Client] the wrapped client
16
+ def self.wrap(client, tracer_provider: nil)
17
+ Log.warn("Braintrust::Trace::AlexRudall::RubyOpenAI.wrap() is deprecated and will be removed in a future version: use Braintrust.instrument!() instead.")
18
+ Braintrust.instrument!(:ruby_openai, target: client, tracer_provider: tracer_provider)
19
+ client
20
+ end
21
+ end
22
+ end
23
+ end
24
+ end