braintrust 0.0.12 → 0.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +214 -180
- data/exe/braintrust +143 -0
- data/lib/braintrust/contrib/anthropic/deprecated.rb +24 -0
- data/lib/braintrust/contrib/anthropic/instrumentation/beta_messages.rb +242 -0
- data/lib/braintrust/contrib/anthropic/instrumentation/common.rb +53 -0
- data/lib/braintrust/contrib/anthropic/instrumentation/messages.rb +232 -0
- data/lib/braintrust/contrib/anthropic/integration.rb +53 -0
- data/lib/braintrust/contrib/anthropic/patcher.rb +145 -0
- data/lib/braintrust/contrib/context.rb +56 -0
- data/lib/braintrust/contrib/integration.rb +160 -0
- data/lib/braintrust/contrib/openai/deprecated.rb +22 -0
- data/lib/braintrust/contrib/openai/instrumentation/chat.rb +298 -0
- data/lib/braintrust/contrib/openai/instrumentation/common.rb +134 -0
- data/lib/braintrust/contrib/openai/instrumentation/moderations.rb +93 -0
- data/lib/braintrust/contrib/openai/instrumentation/responses.rb +187 -0
- data/lib/braintrust/contrib/openai/integration.rb +58 -0
- data/lib/braintrust/contrib/openai/patcher.rb +173 -0
- data/lib/braintrust/contrib/patcher.rb +76 -0
- data/lib/braintrust/contrib/rails/railtie.rb +16 -0
- data/lib/braintrust/contrib/registry.rb +107 -0
- data/lib/braintrust/contrib/ruby_llm/deprecated.rb +45 -0
- data/lib/braintrust/contrib/ruby_llm/instrumentation/chat.rb +464 -0
- data/lib/braintrust/contrib/ruby_llm/instrumentation/common.rb +58 -0
- data/lib/braintrust/contrib/ruby_llm/integration.rb +54 -0
- data/lib/braintrust/contrib/ruby_llm/patcher.rb +44 -0
- data/lib/braintrust/contrib/ruby_openai/deprecated.rb +24 -0
- data/lib/braintrust/contrib/ruby_openai/instrumentation/chat.rb +149 -0
- data/lib/braintrust/contrib/ruby_openai/instrumentation/common.rb +138 -0
- data/lib/braintrust/contrib/ruby_openai/instrumentation/moderations.rb +94 -0
- data/lib/braintrust/contrib/ruby_openai/instrumentation/responses.rb +146 -0
- data/lib/braintrust/contrib/ruby_openai/integration.rb +58 -0
- data/lib/braintrust/contrib/ruby_openai/patcher.rb +120 -0
- data/lib/braintrust/contrib/setup.rb +168 -0
- data/lib/braintrust/contrib/support/openai.rb +72 -0
- data/lib/braintrust/contrib/support/otel.rb +23 -0
- data/lib/braintrust/contrib.rb +205 -0
- data/lib/braintrust/internal/env.rb +39 -0
- data/lib/braintrust/internal/time.rb +44 -0
- data/lib/braintrust/setup.rb +50 -0
- data/lib/braintrust/state.rb +6 -1
- data/lib/braintrust/trace.rb +41 -51
- data/lib/braintrust/version.rb +1 -1
- data/lib/braintrust.rb +10 -1
- metadata +41 -7
- data/lib/braintrust/trace/contrib/anthropic.rb +0 -316
- data/lib/braintrust/trace/contrib/github.com/alexrudall/ruby-openai/ruby-openai.rb +0 -377
- data/lib/braintrust/trace/contrib/github.com/crmne/ruby_llm.rb +0 -631
- data/lib/braintrust/trace/contrib/openai.rb +0 -611
- data/lib/braintrust/trace/tokens.rb +0 -109
|
@@ -0,0 +1,145 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative "../patcher"
|
|
4
|
+
require_relative "instrumentation/messages"
|
|
5
|
+
require_relative "instrumentation/beta_messages"
|
|
6
|
+
|
|
7
|
+
module Braintrust
|
|
8
|
+
module Contrib
|
|
9
|
+
module Anthropic
|
|
10
|
+
# Patcher for Anthropic messages.
|
|
11
|
+
# Instruments Anthropic::Messages#create and #stream methods.
|
|
12
|
+
class MessagesPatcher < Braintrust::Contrib::Patcher
|
|
13
|
+
class << self
|
|
14
|
+
def applicable?
|
|
15
|
+
defined?(::Anthropic::Client)
|
|
16
|
+
end
|
|
17
|
+
|
|
18
|
+
def patched?(**options)
|
|
19
|
+
target_class = get_singleton_class(options[:target]) || ::Anthropic::Resources::Messages
|
|
20
|
+
Instrumentation::Messages.applied?(target_class)
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
# Perform the actual patching.
|
|
24
|
+
# @param options [Hash] Configuration options passed from integration
|
|
25
|
+
# @option options [Object] :target Optional target instance to patch
|
|
26
|
+
# @option options [OpenTelemetry::SDK::Trace::TracerProvider] :tracer_provider Optional tracer provider
|
|
27
|
+
# @return [void]
|
|
28
|
+
def perform_patch(**options)
|
|
29
|
+
return unless applicable?
|
|
30
|
+
|
|
31
|
+
# MessageStream is shared across all clients, so patch at class level.
|
|
32
|
+
# The instrumentation short-circuits when no context is present,
|
|
33
|
+
# so uninstrumented clients' streams pass through unaffected.
|
|
34
|
+
patch_message_stream
|
|
35
|
+
|
|
36
|
+
if options[:target]
|
|
37
|
+
# Instance-level (for only this client instance)
|
|
38
|
+
raise ArgumentError, "target must be a kind of ::Anthropic::Client" unless options[:target].is_a?(::Anthropic::Client)
|
|
39
|
+
|
|
40
|
+
get_singleton_class(options[:target]).include(Instrumentation::Messages)
|
|
41
|
+
else
|
|
42
|
+
# Class-level (for all client instances)
|
|
43
|
+
::Anthropic::Resources::Messages.include(Instrumentation::Messages)
|
|
44
|
+
end
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
private
|
|
48
|
+
|
|
49
|
+
def get_singleton_class(client)
|
|
50
|
+
client&.messages&.singleton_class
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
def patch_message_stream
|
|
54
|
+
return unless defined?(::Anthropic::Helpers::Streaming::MessageStream)
|
|
55
|
+
return if Instrumentation::MessageStream.applied?(::Anthropic::Helpers::Streaming::MessageStream)
|
|
56
|
+
|
|
57
|
+
::Anthropic::Helpers::Streaming::MessageStream.include(Instrumentation::MessageStream)
|
|
58
|
+
end
|
|
59
|
+
end
|
|
60
|
+
end
|
|
61
|
+
|
|
62
|
+
# Patcher for Anthropic beta messages API.
|
|
63
|
+
# Instruments client.beta.messages.create and stream methods.
|
|
64
|
+
#
|
|
65
|
+
# @note Beta APIs are experimental and subject to change between SDK versions.
|
|
66
|
+
# Braintrust will make reasonable efforts to maintain compatibility, but
|
|
67
|
+
# breaking changes may require SDK updates.
|
|
68
|
+
#
|
|
69
|
+
# @see https://docs.anthropic.com/en/docs/build-with-claude/structured-outputs
|
|
70
|
+
# for structured outputs documentation
|
|
71
|
+
class BetaMessagesPatcher < Braintrust::Contrib::Patcher
|
|
72
|
+
# Version constraints for beta patcher.
|
|
73
|
+
# Set MAXIMUM_VERSION when a breaking change is discovered to disable
|
|
74
|
+
# beta instrumentation on incompatible versions until a fix is released.
|
|
75
|
+
# Currently nil = rely on class existence check only.
|
|
76
|
+
MAXIMUM_VERSION = nil
|
|
77
|
+
|
|
78
|
+
class << self
|
|
79
|
+
def applicable?
|
|
80
|
+
return false unless defined?(::Anthropic::Client)
|
|
81
|
+
return false unless defined?(::Anthropic::Resources::Beta::Messages)
|
|
82
|
+
return false if MAXIMUM_VERSION && !version_compatible?
|
|
83
|
+
true
|
|
84
|
+
end
|
|
85
|
+
|
|
86
|
+
def patched?(**options)
|
|
87
|
+
target_class = get_singleton_class(options[:target]) || ::Anthropic::Resources::Beta::Messages
|
|
88
|
+
Instrumentation::BetaMessages.applied?(target_class)
|
|
89
|
+
end
|
|
90
|
+
|
|
91
|
+
# Perform the actual patching.
|
|
92
|
+
# @param options [Hash] Configuration options passed from integration
|
|
93
|
+
# @option options [Object] :target Optional target instance to patch
|
|
94
|
+
# @option options [OpenTelemetry::SDK::Trace::TracerProvider] :tracer_provider Optional tracer provider
|
|
95
|
+
# @return [void]
|
|
96
|
+
def perform_patch(**options)
|
|
97
|
+
return unless applicable?
|
|
98
|
+
|
|
99
|
+
Braintrust::Log.debug("Instrumenting Anthropic beta.messages API (experimental)")
|
|
100
|
+
|
|
101
|
+
# MessageStream is shared with stable API - already patched by MessagesPatcher.
|
|
102
|
+
# The BetaMessages instrumentation sets api_version: "beta" in context,
|
|
103
|
+
# which MessageStream uses to include in metadata.
|
|
104
|
+
patch_message_stream
|
|
105
|
+
|
|
106
|
+
if options[:target]
|
|
107
|
+
# Instance-level (for only this client instance)
|
|
108
|
+
raise ArgumentError, "target must be a kind of ::Anthropic::Client" unless options[:target].is_a?(::Anthropic::Client)
|
|
109
|
+
|
|
110
|
+
get_singleton_class(options[:target]).include(Instrumentation::BetaMessages)
|
|
111
|
+
else
|
|
112
|
+
# Class-level (for all client instances)
|
|
113
|
+
::Anthropic::Resources::Beta::Messages.include(Instrumentation::BetaMessages)
|
|
114
|
+
end
|
|
115
|
+
end
|
|
116
|
+
|
|
117
|
+
private
|
|
118
|
+
|
|
119
|
+
def version_compatible?
|
|
120
|
+
return true unless MAXIMUM_VERSION
|
|
121
|
+
|
|
122
|
+
spec = Gem.loaded_specs["anthropic"]
|
|
123
|
+
return true unless spec
|
|
124
|
+
|
|
125
|
+
spec.version <= Gem::Version.new(MAXIMUM_VERSION)
|
|
126
|
+
end
|
|
127
|
+
|
|
128
|
+
def get_singleton_class(client)
|
|
129
|
+
client&.beta&.messages&.singleton_class
|
|
130
|
+
rescue
|
|
131
|
+
# Defensive: beta namespace may not exist or may have changed
|
|
132
|
+
nil
|
|
133
|
+
end
|
|
134
|
+
|
|
135
|
+
def patch_message_stream
|
|
136
|
+
return unless defined?(::Anthropic::Helpers::Streaming::MessageStream)
|
|
137
|
+
return if Instrumentation::MessageStream.applied?(::Anthropic::Helpers::Streaming::MessageStream)
|
|
138
|
+
|
|
139
|
+
::Anthropic::Helpers::Streaming::MessageStream.include(Instrumentation::MessageStream)
|
|
140
|
+
end
|
|
141
|
+
end
|
|
142
|
+
end
|
|
143
|
+
end
|
|
144
|
+
end
|
|
145
|
+
end
|
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Braintrust
|
|
4
|
+
module Contrib
|
|
5
|
+
# Per-instance or per-class configuration context.
|
|
6
|
+
# Allows attaching generic configuration to specific objects or classes.
|
|
7
|
+
class Context
|
|
8
|
+
# Set or update context on a target object.
|
|
9
|
+
# Creates a new context if one doesn't exist, or updates existing context.
|
|
10
|
+
# @param target [Object] The object to attach context to
|
|
11
|
+
# @param options [Hash] Configuration options to store
|
|
12
|
+
# @return [Context, nil] The existing context if updated, nil if created new or options empty
|
|
13
|
+
def self.set!(target, **options)
|
|
14
|
+
return nil if options.empty?
|
|
15
|
+
|
|
16
|
+
if (ctx = from(target))
|
|
17
|
+
# Update existing context
|
|
18
|
+
options.each { |k, v| ctx[k] = v }
|
|
19
|
+
else
|
|
20
|
+
# Create and attach new context
|
|
21
|
+
target.instance_variable_set(:@braintrust_context, new(**options))
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
ctx
|
|
25
|
+
end
|
|
26
|
+
|
|
27
|
+
# Retrieve context from a target.
|
|
28
|
+
# @param target [Object] The object to retrieve context from
|
|
29
|
+
# @return [Context, nil] The context if found, nil otherwise
|
|
30
|
+
def self.from(target)
|
|
31
|
+
target&.instance_variable_get(:@braintrust_context)
|
|
32
|
+
end
|
|
33
|
+
|
|
34
|
+
# @param options [Hash] Configuration options
|
|
35
|
+
def initialize(**options)
|
|
36
|
+
@options = options
|
|
37
|
+
end
|
|
38
|
+
|
|
39
|
+
def [](key)
|
|
40
|
+
@options[key]
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
def []=(key, value)
|
|
44
|
+
@options[key] = value
|
|
45
|
+
end
|
|
46
|
+
|
|
47
|
+
# Get an option value with a default fallback.
|
|
48
|
+
# @param key [Symbol, String] The option key
|
|
49
|
+
# @param default [Object] The default value if key not found
|
|
50
|
+
# @return [Object] The option value, or default if not found
|
|
51
|
+
def fetch(key, default)
|
|
52
|
+
@options.fetch(key, default)
|
|
53
|
+
end
|
|
54
|
+
end
|
|
55
|
+
end
|
|
56
|
+
end
|
|
@@ -0,0 +1,160 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Braintrust
|
|
4
|
+
module Contrib
|
|
5
|
+
# Base module defining the integration contract.
|
|
6
|
+
# Include this module in integration classes to define the schema.
|
|
7
|
+
# Delegates actual patching to a Patcher subclass.
|
|
8
|
+
module Integration
|
|
9
|
+
def self.included(base)
|
|
10
|
+
base.extend(ClassMethods)
|
|
11
|
+
end
|
|
12
|
+
|
|
13
|
+
module ClassMethods
|
|
14
|
+
# Unique symbol name for this integration (e.g., :openai, :anthropic).
|
|
15
|
+
# @return [Symbol]
|
|
16
|
+
def integration_name
|
|
17
|
+
raise NotImplementedError, "#{self} must implement integration_name"
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
# Array of gem names this integration supports.
|
|
21
|
+
# @return [Array<String>]
|
|
22
|
+
def gem_names
|
|
23
|
+
raise NotImplementedError, "#{self} must implement gem_names"
|
|
24
|
+
end
|
|
25
|
+
|
|
26
|
+
# Require paths for auto-instrument detection.
|
|
27
|
+
# Default implementation returns gem_names.
|
|
28
|
+
# @return [Array<String>]
|
|
29
|
+
def require_paths
|
|
30
|
+
gem_names
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
# Is the target library available for loading?
|
|
34
|
+
# @return [Boolean]
|
|
35
|
+
def available?
|
|
36
|
+
gem_names.any? { |name| Gem.loaded_specs.key?(name) }
|
|
37
|
+
end
|
|
38
|
+
|
|
39
|
+
# Is the target library loaded?
|
|
40
|
+
# @return [Boolean]
|
|
41
|
+
def loaded?
|
|
42
|
+
raise NotImplementedError, "#{self} must implement loaded?"
|
|
43
|
+
end
|
|
44
|
+
|
|
45
|
+
# Minimum compatible version (optional, inclusive).
|
|
46
|
+
# @return [String, nil]
|
|
47
|
+
def minimum_version
|
|
48
|
+
nil
|
|
49
|
+
end
|
|
50
|
+
|
|
51
|
+
# Maximum compatible version (optional, inclusive).
|
|
52
|
+
# @return [String, nil]
|
|
53
|
+
def maximum_version
|
|
54
|
+
nil
|
|
55
|
+
end
|
|
56
|
+
|
|
57
|
+
# Is the library version compatible?
|
|
58
|
+
# @return [Boolean]
|
|
59
|
+
def compatible?
|
|
60
|
+
return false unless available?
|
|
61
|
+
|
|
62
|
+
gem_names.each do |name|
|
|
63
|
+
spec = Gem.loaded_specs[name]
|
|
64
|
+
next unless spec
|
|
65
|
+
|
|
66
|
+
version = spec.version
|
|
67
|
+
return false if minimum_version && version < Gem::Version.new(minimum_version)
|
|
68
|
+
return false if maximum_version && version > Gem::Version.new(maximum_version)
|
|
69
|
+
return true
|
|
70
|
+
end
|
|
71
|
+
false
|
|
72
|
+
end
|
|
73
|
+
|
|
74
|
+
# Array of patcher classes for this integration.
|
|
75
|
+
# Override to return multiple patchers for version-specific logic.
|
|
76
|
+
# @return [Array<Class>] Array of patcher classes
|
|
77
|
+
def patchers
|
|
78
|
+
[patcher] # Default: single patcher
|
|
79
|
+
end
|
|
80
|
+
|
|
81
|
+
# Convenience method for single patcher (existing pattern).
|
|
82
|
+
# Override this OR patchers (not both).
|
|
83
|
+
# @return [Class] The patcher class
|
|
84
|
+
def patcher
|
|
85
|
+
raise NotImplementedError, "#{self} must implement patcher or patchers"
|
|
86
|
+
end
|
|
87
|
+
|
|
88
|
+
# Instrument this integration with optional configuration.
|
|
89
|
+
# If a target is provided, configures the target instance specifically.
|
|
90
|
+
# Otherwise, applies class-level instrumentation to all instances.
|
|
91
|
+
#
|
|
92
|
+
# @param options [Hash] Configuration options
|
|
93
|
+
# @option options [Object] :target Optional target instance to instrument
|
|
94
|
+
# @option options [OpenTelemetry::SDK::Trace::TracerProvider] :tracer_provider Optional tracer provider
|
|
95
|
+
# @return [Boolean] true if patching succeeded or was already done
|
|
96
|
+
#
|
|
97
|
+
# @example Class-level instrumentation (all clients)
|
|
98
|
+
# integration.instrument!(tracer_provider: my_provider)
|
|
99
|
+
#
|
|
100
|
+
# @example Instance-level instrumentation (specific client)
|
|
101
|
+
# integration.instrument!(target: client, tracer_provider: my_provider)
|
|
102
|
+
def instrument!(**options)
|
|
103
|
+
if options.empty?
|
|
104
|
+
Braintrust::Log.debug("#{integration_name}.instrument! called")
|
|
105
|
+
else
|
|
106
|
+
Braintrust::Log.debug("#{integration_name}.instrument! called (#{options.keys.join(", ")})")
|
|
107
|
+
end
|
|
108
|
+
|
|
109
|
+
if options[:target]
|
|
110
|
+
# Configure the target with provided options (exclude :target from context)
|
|
111
|
+
context_options = options.except(:target)
|
|
112
|
+
Contrib::Context.set!(options[:target], **context_options) unless context_options.empty?
|
|
113
|
+
end
|
|
114
|
+
|
|
115
|
+
patch!(**options)
|
|
116
|
+
end
|
|
117
|
+
|
|
118
|
+
# Apply instrumentation (idempotent). Tries all applicable patchers.
|
|
119
|
+
# This method is typically called by instrument! after configuration.
|
|
120
|
+
#
|
|
121
|
+
# @param options [Hash] Configuration options
|
|
122
|
+
# @option options [Object] :target Optional target instance to patch
|
|
123
|
+
# @option options [OpenTelemetry::SDK::Trace::TracerProvider] :tracer_provider Optional tracer provider
|
|
124
|
+
# @return [Boolean] true if any patching succeeded or was already done
|
|
125
|
+
def patch!(**options)
|
|
126
|
+
unless available?
|
|
127
|
+
Braintrust::Log.debug("#{integration_name}.patch! skipped: gem not available")
|
|
128
|
+
return false
|
|
129
|
+
end
|
|
130
|
+
unless loaded?
|
|
131
|
+
Braintrust::Log.debug("#{integration_name}.patch! skipped: library not loaded")
|
|
132
|
+
return false
|
|
133
|
+
end
|
|
134
|
+
unless compatible?
|
|
135
|
+
Braintrust::Log.debug("#{integration_name}.patch! skipped: version not compatible")
|
|
136
|
+
return false
|
|
137
|
+
end
|
|
138
|
+
|
|
139
|
+
# Try all applicable patchers
|
|
140
|
+
success = false
|
|
141
|
+
patchers.each do |patch|
|
|
142
|
+
# Check if this patcher is applicable
|
|
143
|
+
next unless patch.applicable?
|
|
144
|
+
|
|
145
|
+
# Attempt to patch (patcher checks applicable? again under lock)
|
|
146
|
+
success = true if patch.patch!(**options)
|
|
147
|
+
end
|
|
148
|
+
|
|
149
|
+
Braintrust::Log.debug("#{integration_name}.patch! skipped: no applicable patcher") unless success
|
|
150
|
+
success
|
|
151
|
+
end
|
|
152
|
+
|
|
153
|
+
# Register this integration with the global registry.
|
|
154
|
+
def register!
|
|
155
|
+
Registry.instance.register(self)
|
|
156
|
+
end
|
|
157
|
+
end
|
|
158
|
+
end
|
|
159
|
+
end
|
|
160
|
+
end
|
|
@@ -0,0 +1,22 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
# Backward compatibility shim for the old OpenAI integration API.
|
|
4
|
+
# This file now just delegates to the new API.
|
|
5
|
+
|
|
6
|
+
module Braintrust
|
|
7
|
+
module Trace
|
|
8
|
+
module OpenAI
|
|
9
|
+
# Wrap an OpenAI::Client to automatically create spans for chat completions and responses.
|
|
10
|
+
# This is the legacy API - delegates to the new contrib framework.
|
|
11
|
+
#
|
|
12
|
+
# @param client [OpenAI::Client] the OpenAI client to wrap
|
|
13
|
+
# @param tracer_provider [OpenTelemetry::SDK::Trace::TracerProvider] the tracer provider (defaults to global)
|
|
14
|
+
# @return [OpenAI::Client] the wrapped client
|
|
15
|
+
def self.wrap(client, tracer_provider: nil)
|
|
16
|
+
Log.warn("Braintrust::Trace::OpenAI.wrap() is deprecated and will be removed in a future version: use Braintrust.instrument!() instead.")
|
|
17
|
+
Braintrust.instrument!(:openai, target: client, tracer_provider: tracer_provider)
|
|
18
|
+
client
|
|
19
|
+
end
|
|
20
|
+
end
|
|
21
|
+
end
|
|
22
|
+
end
|
|
@@ -0,0 +1,298 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require "opentelemetry/sdk"
|
|
4
|
+
require "json"
|
|
5
|
+
|
|
6
|
+
require_relative "common"
|
|
7
|
+
require_relative "../../../internal/time"
|
|
8
|
+
require_relative "../../support/otel"
|
|
9
|
+
require_relative "../../support/openai"
|
|
10
|
+
|
|
11
|
+
module Braintrust
|
|
12
|
+
module Contrib
|
|
13
|
+
module OpenAI
|
|
14
|
+
module Instrumentation
|
|
15
|
+
# Chat completions instrumentation for OpenAI.
|
|
16
|
+
# Wraps create(), stream(), and stream_raw() methods to create spans.
|
|
17
|
+
module Chat
|
|
18
|
+
module Completions
|
|
19
|
+
def self.included(base)
|
|
20
|
+
base.prepend(InstanceMethods) unless applied?(base)
|
|
21
|
+
end
|
|
22
|
+
|
|
23
|
+
def self.applied?(base)
|
|
24
|
+
base.ancestors.include?(InstanceMethods)
|
|
25
|
+
end
|
|
26
|
+
|
|
27
|
+
METADATA_FIELDS = %i[
|
|
28
|
+
model frequency_penalty logit_bias logprobs max_tokens n
|
|
29
|
+
presence_penalty response_format seed service_tier stop
|
|
30
|
+
stream stream_options temperature top_p top_logprobs
|
|
31
|
+
tools tool_choice parallel_tool_calls user functions function_call
|
|
32
|
+
].freeze
|
|
33
|
+
|
|
34
|
+
module InstanceMethods
|
|
35
|
+
# Wrap create method for non-streaming completions
|
|
36
|
+
def create(**params)
|
|
37
|
+
client = instance_variable_get(:@client)
|
|
38
|
+
tracer = Braintrust::Contrib.tracer_for(client)
|
|
39
|
+
|
|
40
|
+
tracer.in_span("Chat Completion") do |span|
|
|
41
|
+
metadata = build_metadata(params)
|
|
42
|
+
|
|
43
|
+
set_input(span, params)
|
|
44
|
+
|
|
45
|
+
response = nil
|
|
46
|
+
time_to_first_token = Braintrust::Internal::Time.measure do
|
|
47
|
+
response = super
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
set_output(span, response)
|
|
51
|
+
set_metrics(span, response, time_to_first_token)
|
|
52
|
+
finalize_metadata(span, metadata, response)
|
|
53
|
+
|
|
54
|
+
response
|
|
55
|
+
end
|
|
56
|
+
end
|
|
57
|
+
|
|
58
|
+
# Wrap stream_raw for streaming chat completions (returns Internal::Stream)
|
|
59
|
+
# Stores context on stream object for span creation during consumption
|
|
60
|
+
def stream_raw(**params)
|
|
61
|
+
client = instance_variable_get(:@client)
|
|
62
|
+
tracer = Braintrust::Contrib.tracer_for(client)
|
|
63
|
+
metadata = build_metadata(params, stream: true)
|
|
64
|
+
|
|
65
|
+
stream_obj = super
|
|
66
|
+
Braintrust::Contrib::Context.set!(stream_obj,
|
|
67
|
+
tracer: tracer,
|
|
68
|
+
params: params,
|
|
69
|
+
metadata: metadata,
|
|
70
|
+
completions_instance: self,
|
|
71
|
+
stream_type: :raw)
|
|
72
|
+
stream_obj
|
|
73
|
+
end
|
|
74
|
+
|
|
75
|
+
# Wrap stream for streaming chat completions (returns ChatCompletionStream)
|
|
76
|
+
# Stores context on stream object for span creation during consumption
|
|
77
|
+
def stream(**params)
|
|
78
|
+
client = instance_variable_get(:@client)
|
|
79
|
+
tracer = Braintrust::Contrib.tracer_for(client)
|
|
80
|
+
metadata = build_metadata(params, stream: true)
|
|
81
|
+
|
|
82
|
+
stream_obj = super
|
|
83
|
+
Braintrust::Contrib::Context.set!(stream_obj,
|
|
84
|
+
tracer: tracer,
|
|
85
|
+
params: params,
|
|
86
|
+
metadata: metadata,
|
|
87
|
+
completions_instance: self,
|
|
88
|
+
stream_type: :chat_completion)
|
|
89
|
+
stream_obj
|
|
90
|
+
end
|
|
91
|
+
|
|
92
|
+
private
|
|
93
|
+
|
|
94
|
+
def build_metadata(params, stream: false)
|
|
95
|
+
metadata = {
|
|
96
|
+
"provider" => "openai",
|
|
97
|
+
"endpoint" => "/v1/chat/completions"
|
|
98
|
+
}
|
|
99
|
+
metadata["stream"] = true if stream
|
|
100
|
+
Completions::METADATA_FIELDS.each do |field|
|
|
101
|
+
metadata[field.to_s] = params[field] if params.key?(field)
|
|
102
|
+
end
|
|
103
|
+
metadata
|
|
104
|
+
end
|
|
105
|
+
|
|
106
|
+
def set_input(span, params)
|
|
107
|
+
return unless params[:messages]
|
|
108
|
+
|
|
109
|
+
messages_array = params[:messages].map(&:to_h)
|
|
110
|
+
Support::OTel.set_json_attr(span, "braintrust.input_json", messages_array)
|
|
111
|
+
end
|
|
112
|
+
|
|
113
|
+
def set_output(span, response)
|
|
114
|
+
return unless response.respond_to?(:choices) && response.choices&.any?
|
|
115
|
+
|
|
116
|
+
choices_array = response.choices.map(&:to_h)
|
|
117
|
+
Support::OTel.set_json_attr(span, "braintrust.output_json", choices_array)
|
|
118
|
+
end
|
|
119
|
+
|
|
120
|
+
def set_metrics(span, response, time_to_first_token)
|
|
121
|
+
metrics = {}
|
|
122
|
+
if response.respond_to?(:usage) && response.usage
|
|
123
|
+
metrics = Support::OpenAI.parse_usage_tokens(response.usage)
|
|
124
|
+
end
|
|
125
|
+
metrics["time_to_first_token"] = time_to_first_token
|
|
126
|
+
Support::OTel.set_json_attr(span, "braintrust.metrics", metrics) unless metrics.empty?
|
|
127
|
+
end
|
|
128
|
+
|
|
129
|
+
def finalize_metadata(span, metadata, response)
|
|
130
|
+
metadata["id"] = response.id if response.respond_to?(:id) && response.id
|
|
131
|
+
metadata["created"] = response.created if response.respond_to?(:created) && response.created
|
|
132
|
+
metadata["model"] = response.model if response.respond_to?(:model) && response.model
|
|
133
|
+
metadata["system_fingerprint"] = response.system_fingerprint if response.respond_to?(:system_fingerprint) && response.system_fingerprint
|
|
134
|
+
metadata["service_tier"] = response.service_tier if response.respond_to?(:service_tier) && response.service_tier
|
|
135
|
+
Support::OTel.set_json_attr(span, "braintrust.metadata", metadata)
|
|
136
|
+
end
|
|
137
|
+
end
|
|
138
|
+
end
|
|
139
|
+
|
|
140
|
+
# Instrumentation for ChatCompletionStream (returned by stream())
|
|
141
|
+
# Uses current_completion_snapshot for accumulated output
|
|
142
|
+
module ChatCompletionStream
|
|
143
|
+
def self.included(base)
|
|
144
|
+
base.prepend(InstanceMethods) unless applied?(base)
|
|
145
|
+
end
|
|
146
|
+
|
|
147
|
+
def self.applied?(base)
|
|
148
|
+
base.ancestors.include?(InstanceMethods)
|
|
149
|
+
end
|
|
150
|
+
|
|
151
|
+
module InstanceMethods
|
|
152
|
+
def each(&block)
|
|
153
|
+
ctx = Braintrust::Contrib::Context.from(self)
|
|
154
|
+
return super unless ctx&.[](:tracer) && !ctx[:consumed]
|
|
155
|
+
|
|
156
|
+
trace_consumption(ctx) { super(&block) }
|
|
157
|
+
end
|
|
158
|
+
|
|
159
|
+
def text
|
|
160
|
+
ctx = Braintrust::Contrib::Context.from(self)
|
|
161
|
+
return super unless ctx&.[](:tracer) && !ctx[:consumed]
|
|
162
|
+
|
|
163
|
+
original_enum = super
|
|
164
|
+
Enumerator.new do |y|
|
|
165
|
+
trace_consumption(ctx) do
|
|
166
|
+
original_enum.each { |t| y << t }
|
|
167
|
+
end
|
|
168
|
+
end
|
|
169
|
+
end
|
|
170
|
+
|
|
171
|
+
private
|
|
172
|
+
|
|
173
|
+
def trace_consumption(ctx)
|
|
174
|
+
ctx[:consumed] = true
|
|
175
|
+
|
|
176
|
+
tracer = ctx[:tracer]
|
|
177
|
+
params = ctx[:params]
|
|
178
|
+
metadata = ctx[:metadata]
|
|
179
|
+
completions_instance = ctx[:completions_instance]
|
|
180
|
+
start_time = Braintrust::Internal::Time.measure
|
|
181
|
+
|
|
182
|
+
tracer.in_span("Chat Completion") do |span|
|
|
183
|
+
completions_instance.send(:set_input, span, params)
|
|
184
|
+
Support::OTel.set_json_attr(span, "braintrust.metadata", metadata)
|
|
185
|
+
|
|
186
|
+
yield
|
|
187
|
+
|
|
188
|
+
finalize_stream_span(span, start_time, metadata, completions_instance)
|
|
189
|
+
end
|
|
190
|
+
end
|
|
191
|
+
|
|
192
|
+
def finalize_stream_span(span, start_time, metadata, completions_instance)
|
|
193
|
+
time_to_first_token = Braintrust::Internal::Time.measure(start_time)
|
|
194
|
+
|
|
195
|
+
begin
|
|
196
|
+
snapshot = current_completion_snapshot
|
|
197
|
+
return unless snapshot
|
|
198
|
+
|
|
199
|
+
# Set output from accumulated choices
|
|
200
|
+
if snapshot.choices&.any?
|
|
201
|
+
choices_array = snapshot.choices.map(&:to_h)
|
|
202
|
+
Support::OTel.set_json_attr(span, "braintrust.output_json", choices_array)
|
|
203
|
+
end
|
|
204
|
+
|
|
205
|
+
# Set metrics
|
|
206
|
+
metrics = {}
|
|
207
|
+
if snapshot.usage
|
|
208
|
+
metrics = Support::OpenAI.parse_usage_tokens(snapshot.usage)
|
|
209
|
+
end
|
|
210
|
+
metrics["time_to_first_token"] = time_to_first_token
|
|
211
|
+
Support::OTel.set_json_attr(span, "braintrust.metrics", metrics) unless metrics.empty?
|
|
212
|
+
|
|
213
|
+
# Update metadata with response fields
|
|
214
|
+
metadata["id"] = snapshot.id if snapshot.respond_to?(:id) && snapshot.id
|
|
215
|
+
metadata["created"] = snapshot.created if snapshot.respond_to?(:created) && snapshot.created
|
|
216
|
+
metadata["model"] = snapshot.model if snapshot.respond_to?(:model) && snapshot.model
|
|
217
|
+
metadata["system_fingerprint"] = snapshot.system_fingerprint if snapshot.respond_to?(:system_fingerprint) && snapshot.system_fingerprint
|
|
218
|
+
metadata["service_tier"] = snapshot.service_tier if snapshot.respond_to?(:service_tier) && snapshot.service_tier
|
|
219
|
+
Support::OTel.set_json_attr(span, "braintrust.metadata", metadata)
|
|
220
|
+
rescue => e
|
|
221
|
+
Braintrust::Log.debug("Failed to get completion snapshot: #{e.message}")
|
|
222
|
+
end
|
|
223
|
+
end
|
|
224
|
+
end
|
|
225
|
+
end
|
|
226
|
+
|
|
227
|
+
# Instrumentation for Internal::Stream (returned by stream_raw())
|
|
228
|
+
# Aggregates chunks manually since Internal::Stream has no built-in accumulation
|
|
229
|
+
module InternalStream
|
|
230
|
+
def self.included(base)
|
|
231
|
+
base.prepend(InstanceMethods) unless applied?(base)
|
|
232
|
+
end
|
|
233
|
+
|
|
234
|
+
def self.applied?(base)
|
|
235
|
+
base.ancestors.include?(InstanceMethods)
|
|
236
|
+
end
|
|
237
|
+
|
|
238
|
+
module InstanceMethods
|
|
239
|
+
def each(&block)
|
|
240
|
+
ctx = Braintrust::Contrib::Context.from(self)
|
|
241
|
+
# Only trace if context present and is for chat completions (not other endpoints)
|
|
242
|
+
return super unless ctx&.[](:tracer) && !ctx[:consumed] && ctx[:stream_type] == :raw
|
|
243
|
+
|
|
244
|
+
ctx[:consumed] = true
|
|
245
|
+
|
|
246
|
+
tracer = ctx[:tracer]
|
|
247
|
+
params = ctx[:params]
|
|
248
|
+
metadata = ctx[:metadata]
|
|
249
|
+
completions_instance = ctx[:completions_instance]
|
|
250
|
+
aggregated_chunks = []
|
|
251
|
+
start_time = Braintrust::Internal::Time.measure
|
|
252
|
+
time_to_first_token = nil
|
|
253
|
+
|
|
254
|
+
tracer.in_span("Chat Completion") do |span|
|
|
255
|
+
completions_instance.send(:set_input, span, params)
|
|
256
|
+
Support::OTel.set_json_attr(span, "braintrust.metadata", metadata)
|
|
257
|
+
|
|
258
|
+
super do |chunk|
|
|
259
|
+
time_to_first_token ||= Braintrust::Internal::Time.measure(start_time)
|
|
260
|
+
aggregated_chunks << chunk.to_h
|
|
261
|
+
block&.call(chunk)
|
|
262
|
+
end
|
|
263
|
+
|
|
264
|
+
finalize_stream_span(span, aggregated_chunks, time_to_first_token, metadata)
|
|
265
|
+
end
|
|
266
|
+
end
|
|
267
|
+
|
|
268
|
+
private
|
|
269
|
+
|
|
270
|
+
def finalize_stream_span(span, aggregated_chunks, time_to_first_token, metadata)
|
|
271
|
+
return if aggregated_chunks.empty?
|
|
272
|
+
|
|
273
|
+
aggregated_output = Common.aggregate_streaming_chunks(aggregated_chunks)
|
|
274
|
+
Support::OTel.set_json_attr(span, "braintrust.output_json", aggregated_output[:choices])
|
|
275
|
+
|
|
276
|
+
# Set metrics
|
|
277
|
+
metrics = {}
|
|
278
|
+
if aggregated_output[:usage]
|
|
279
|
+
metrics = Support::OpenAI.parse_usage_tokens(aggregated_output[:usage])
|
|
280
|
+
end
|
|
281
|
+
metrics["time_to_first_token"] = time_to_first_token
|
|
282
|
+
Support::OTel.set_json_attr(span, "braintrust.metrics", metrics) unless metrics.empty?
|
|
283
|
+
|
|
284
|
+
# Update metadata with response fields
|
|
285
|
+
metadata["id"] = aggregated_output[:id] if aggregated_output[:id]
|
|
286
|
+
metadata["created"] = aggregated_output[:created] if aggregated_output[:created]
|
|
287
|
+
metadata["model"] = aggregated_output[:model] if aggregated_output[:model]
|
|
288
|
+
metadata["system_fingerprint"] = aggregated_output[:system_fingerprint] if aggregated_output[:system_fingerprint]
|
|
289
|
+
metadata["service_tier"] = aggregated_output[:service_tier] if aggregated_output[:service_tier]
|
|
290
|
+
Support::OTel.set_json_attr(span, "braintrust.metadata", metadata)
|
|
291
|
+
end
|
|
292
|
+
end
|
|
293
|
+
end
|
|
294
|
+
end
|
|
295
|
+
end
|
|
296
|
+
end
|
|
297
|
+
end
|
|
298
|
+
end
|