braintrust 0.0.4 → 0.0.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +31 -1
- data/lib/braintrust/state.rb +21 -3
- data/lib/braintrust/trace/contrib/anthropic.rb +85 -208
- data/lib/braintrust/trace/contrib/github.com/alexrudall/ruby-openai/ruby-openai.rb +135 -0
- data/lib/braintrust/trace/contrib/github.com/crmne/ruby_llm.rb +447 -0
- data/lib/braintrust/trace/contrib/openai.rb +121 -68
- data/lib/braintrust/trace/tokens.rb +101 -0
- data/lib/braintrust/trace.rb +38 -3
- data/lib/braintrust/version.rb +1 -1
- metadata +6 -3
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Braintrust
|
|
4
|
+
module Trace
|
|
5
|
+
# Parse OpenAI usage tokens into normalized Braintrust metrics.
|
|
6
|
+
# Handles standard fields and *_tokens_details nested objects.
|
|
7
|
+
# @param usage [Hash, Object] usage object from OpenAI response
|
|
8
|
+
# @return [Hash<String, Integer>] normalized metrics
|
|
9
|
+
def self.parse_openai_usage_tokens(usage)
|
|
10
|
+
metrics = {}
|
|
11
|
+
return metrics unless usage
|
|
12
|
+
|
|
13
|
+
usage_hash = usage.respond_to?(:to_h) ? usage.to_h : usage
|
|
14
|
+
return metrics unless usage_hash.is_a?(Hash)
|
|
15
|
+
|
|
16
|
+
# Field mappings: OpenAI → Braintrust
|
|
17
|
+
field_map = {
|
|
18
|
+
"prompt_tokens" => "prompt_tokens",
|
|
19
|
+
"completion_tokens" => "completion_tokens",
|
|
20
|
+
"total_tokens" => "tokens"
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
# Prefix mappings for *_tokens_details
|
|
24
|
+
prefix_map = {
|
|
25
|
+
"prompt" => "prompt",
|
|
26
|
+
"completion" => "completion"
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
usage_hash.each do |key, value|
|
|
30
|
+
key_str = key.to_s
|
|
31
|
+
|
|
32
|
+
if value.is_a?(Numeric)
|
|
33
|
+
target = field_map[key_str]
|
|
34
|
+
metrics[target] = value.to_i if target
|
|
35
|
+
elsif key_str.end_with?("_tokens_details")
|
|
36
|
+
# Convert to hash if it's an object (OpenAI SDK returns objects)
|
|
37
|
+
details_hash = value.respond_to?(:to_h) ? value.to_h : value
|
|
38
|
+
next unless details_hash.is_a?(Hash)
|
|
39
|
+
|
|
40
|
+
raw_prefix = key_str.sub(/_tokens_details$/, "")
|
|
41
|
+
prefix = prefix_map[raw_prefix] || raw_prefix
|
|
42
|
+
details_hash.each do |detail_key, detail_value|
|
|
43
|
+
next unless detail_value.is_a?(Numeric)
|
|
44
|
+
metrics["#{prefix}_#{detail_key}"] = detail_value.to_i
|
|
45
|
+
end
|
|
46
|
+
end
|
|
47
|
+
end
|
|
48
|
+
|
|
49
|
+
# Calculate total if missing
|
|
50
|
+
if !metrics.key?("tokens") && metrics.key?("prompt_tokens") && metrics.key?("completion_tokens")
|
|
51
|
+
metrics["tokens"] = metrics["prompt_tokens"] + metrics["completion_tokens"]
|
|
52
|
+
end
|
|
53
|
+
|
|
54
|
+
metrics
|
|
55
|
+
end
|
|
56
|
+
|
|
57
|
+
# Parse Anthropic usage tokens into normalized Braintrust metrics.
|
|
58
|
+
# Accumulates cache tokens into prompt_tokens and calculates total.
|
|
59
|
+
# @param usage [Hash, Object] usage object from Anthropic response
|
|
60
|
+
# @return [Hash<String, Integer>] normalized metrics
|
|
61
|
+
def self.parse_anthropic_usage_tokens(usage)
|
|
62
|
+
metrics = {}
|
|
63
|
+
return metrics unless usage
|
|
64
|
+
|
|
65
|
+
usage_hash = usage.respond_to?(:to_h) ? usage.to_h : usage
|
|
66
|
+
return metrics unless usage_hash.is_a?(Hash)
|
|
67
|
+
|
|
68
|
+
# Field mappings: Anthropic → Braintrust
|
|
69
|
+
# Also handles RubyLLM's simplified cache field names
|
|
70
|
+
field_map = {
|
|
71
|
+
"input_tokens" => "prompt_tokens",
|
|
72
|
+
"output_tokens" => "completion_tokens",
|
|
73
|
+
"cache_read_input_tokens" => "prompt_cached_tokens",
|
|
74
|
+
"cache_creation_input_tokens" => "prompt_cache_creation_tokens",
|
|
75
|
+
# RubyLLM uses simplified names
|
|
76
|
+
"cached_tokens" => "prompt_cached_tokens",
|
|
77
|
+
"cache_creation_tokens" => "prompt_cache_creation_tokens"
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
usage_hash.each do |key, value|
|
|
81
|
+
next unless value.is_a?(Numeric)
|
|
82
|
+
key_str = key.to_s
|
|
83
|
+
target = field_map[key_str]
|
|
84
|
+
metrics[target] = value.to_i if target
|
|
85
|
+
end
|
|
86
|
+
|
|
87
|
+
# Accumulate cache tokens into prompt_tokens (matching TS/Python SDKs)
|
|
88
|
+
prompt_tokens = (metrics["prompt_tokens"] || 0) +
|
|
89
|
+
(metrics["prompt_cached_tokens"] || 0) +
|
|
90
|
+
(metrics["prompt_cache_creation_tokens"] || 0)
|
|
91
|
+
metrics["prompt_tokens"] = prompt_tokens if prompt_tokens > 0
|
|
92
|
+
|
|
93
|
+
# Calculate total
|
|
94
|
+
if metrics.key?("prompt_tokens") && metrics.key?("completion_tokens")
|
|
95
|
+
metrics["tokens"] = metrics["prompt_tokens"] + metrics["completion_tokens"]
|
|
96
|
+
end
|
|
97
|
+
|
|
98
|
+
metrics
|
|
99
|
+
end
|
|
100
|
+
end
|
|
101
|
+
end
|
data/lib/braintrust/trace.rb
CHANGED
|
@@ -6,12 +6,30 @@ require_relative "trace/span_processor"
|
|
|
6
6
|
require_relative "trace/span_filter"
|
|
7
7
|
require_relative "logger"
|
|
8
8
|
|
|
9
|
-
# OpenAI
|
|
9
|
+
# OpenAI integrations - both ruby-openai and openai gems use require "openai"
|
|
10
|
+
# so we detect which one actually loaded the code and require the appropriate integration
|
|
10
11
|
begin
|
|
11
12
|
require "openai"
|
|
12
|
-
|
|
13
|
+
|
|
14
|
+
# Check which OpenAI gem's code is actually loaded by inspecting $LOADED_FEATURES
|
|
15
|
+
# (both gems can be in Gem.loaded_specs, but only one's code can be loaded)
|
|
16
|
+
openai_load_path = $LOADED_FEATURES.find { |f| f.end_with?("/openai.rb") }
|
|
17
|
+
|
|
18
|
+
if openai_load_path&.include?("ruby-openai")
|
|
19
|
+
# alexrudall/ruby-openai gem (path contains "ruby-openai-X.Y.Z")
|
|
20
|
+
require_relative "trace/contrib/github.com/alexrudall/ruby-openai/ruby-openai"
|
|
21
|
+
elsif openai_load_path&.include?("/openai-")
|
|
22
|
+
# Official openai gem (path contains "openai-X.Y.Z")
|
|
23
|
+
require_relative "trace/contrib/openai"
|
|
24
|
+
elsif Gem.loaded_specs["ruby-openai"]
|
|
25
|
+
# Fallback: ruby-openai in loaded_specs (for unusual installation paths)
|
|
26
|
+
require_relative "trace/contrib/github.com/alexrudall/ruby-openai/ruby-openai"
|
|
27
|
+
elsif Gem.loaded_specs["openai"]
|
|
28
|
+
# Fallback: official openai in loaded_specs (for unusual installation paths)
|
|
29
|
+
require_relative "trace/contrib/openai"
|
|
30
|
+
end
|
|
13
31
|
rescue LoadError
|
|
14
|
-
# OpenAI gem
|
|
32
|
+
# No OpenAI gem installed - integration will not be available
|
|
15
33
|
end
|
|
16
34
|
|
|
17
35
|
# Anthropic integration is optional - automatically loaded if anthropic gem is available
|
|
@@ -22,6 +40,23 @@ rescue LoadError
|
|
|
22
40
|
# Anthropic gem not installed - integration will not be available
|
|
23
41
|
end
|
|
24
42
|
|
|
43
|
+
# RubyLLM integration is optional - automatically loaded if ruby_llm gem is available
|
|
44
|
+
#
|
|
45
|
+
# Usage:
|
|
46
|
+
# # Wrap the class once (affects all instances):
|
|
47
|
+
# Braintrust::Trace::RubyLLM.wrap
|
|
48
|
+
#
|
|
49
|
+
# # Or wrap a specific instance:
|
|
50
|
+
# chat = RubyLLM.chat(model: "gpt-4o-mini")
|
|
51
|
+
# Braintrust::Trace::RubyLLM.wrap(chat)
|
|
52
|
+
#
|
|
53
|
+
begin
|
|
54
|
+
require "ruby_llm"
|
|
55
|
+
require_relative "trace/contrib/github.com/crmne/ruby_llm"
|
|
56
|
+
rescue LoadError
|
|
57
|
+
# RubyLLM gem not installed - integration will not be available
|
|
58
|
+
end
|
|
59
|
+
|
|
25
60
|
module Braintrust
|
|
26
61
|
module Trace
|
|
27
62
|
# Set up OpenTelemetry tracing with Braintrust
|
data/lib/braintrust/version.rb
CHANGED
metadata
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: braintrust
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version: 0.0.
|
|
4
|
+
version: 0.0.6
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- Braintrust
|
|
@@ -15,14 +15,14 @@ dependencies:
|
|
|
15
15
|
requirements:
|
|
16
16
|
- - "~>"
|
|
17
17
|
- !ruby/object:Gem::Version
|
|
18
|
-
version: '1.
|
|
18
|
+
version: '1.3'
|
|
19
19
|
type: :runtime
|
|
20
20
|
prerelease: false
|
|
21
21
|
version_requirements: !ruby/object:Gem::Requirement
|
|
22
22
|
requirements:
|
|
23
23
|
- - "~>"
|
|
24
24
|
- !ruby/object:Gem::Version
|
|
25
|
-
version: '1.
|
|
25
|
+
version: '1.3'
|
|
26
26
|
- !ruby/object:Gem::Dependency
|
|
27
27
|
name: opentelemetry-exporter-otlp
|
|
28
28
|
requirement: !ruby/object:Gem::Requirement
|
|
@@ -204,9 +204,12 @@ files:
|
|
|
204
204
|
- lib/braintrust/trace.rb
|
|
205
205
|
- lib/braintrust/trace/attachment.rb
|
|
206
206
|
- lib/braintrust/trace/contrib/anthropic.rb
|
|
207
|
+
- lib/braintrust/trace/contrib/github.com/alexrudall/ruby-openai/ruby-openai.rb
|
|
208
|
+
- lib/braintrust/trace/contrib/github.com/crmne/ruby_llm.rb
|
|
207
209
|
- lib/braintrust/trace/contrib/openai.rb
|
|
208
210
|
- lib/braintrust/trace/span_filter.rb
|
|
209
211
|
- lib/braintrust/trace/span_processor.rb
|
|
212
|
+
- lib/braintrust/trace/tokens.rb
|
|
210
213
|
- lib/braintrust/version.rb
|
|
211
214
|
homepage: https://github.com/braintrustdata/braintrust-sdk-ruby
|
|
212
215
|
licenses:
|