braintrust 0.0.12 → 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +213 -180
  3. data/exe/braintrust +143 -0
  4. data/lib/braintrust/contrib/anthropic/deprecated.rb +24 -0
  5. data/lib/braintrust/contrib/anthropic/instrumentation/common.rb +53 -0
  6. data/lib/braintrust/contrib/anthropic/instrumentation/messages.rb +232 -0
  7. data/lib/braintrust/contrib/anthropic/integration.rb +53 -0
  8. data/lib/braintrust/contrib/anthropic/patcher.rb +62 -0
  9. data/lib/braintrust/contrib/context.rb +56 -0
  10. data/lib/braintrust/contrib/integration.rb +160 -0
  11. data/lib/braintrust/contrib/openai/deprecated.rb +22 -0
  12. data/lib/braintrust/contrib/openai/instrumentation/chat.rb +298 -0
  13. data/lib/braintrust/contrib/openai/instrumentation/common.rb +134 -0
  14. data/lib/braintrust/contrib/openai/instrumentation/responses.rb +187 -0
  15. data/lib/braintrust/contrib/openai/integration.rb +58 -0
  16. data/lib/braintrust/contrib/openai/patcher.rb +130 -0
  17. data/lib/braintrust/contrib/patcher.rb +76 -0
  18. data/lib/braintrust/contrib/rails/railtie.rb +16 -0
  19. data/lib/braintrust/contrib/registry.rb +107 -0
  20. data/lib/braintrust/contrib/ruby_llm/deprecated.rb +45 -0
  21. data/lib/braintrust/contrib/ruby_llm/instrumentation/chat.rb +464 -0
  22. data/lib/braintrust/contrib/ruby_llm/instrumentation/common.rb +58 -0
  23. data/lib/braintrust/contrib/ruby_llm/integration.rb +54 -0
  24. data/lib/braintrust/contrib/ruby_llm/patcher.rb +44 -0
  25. data/lib/braintrust/contrib/ruby_openai/deprecated.rb +24 -0
  26. data/lib/braintrust/contrib/ruby_openai/instrumentation/chat.rb +149 -0
  27. data/lib/braintrust/contrib/ruby_openai/instrumentation/common.rb +138 -0
  28. data/lib/braintrust/contrib/ruby_openai/instrumentation/responses.rb +146 -0
  29. data/lib/braintrust/contrib/ruby_openai/integration.rb +58 -0
  30. data/lib/braintrust/contrib/ruby_openai/patcher.rb +85 -0
  31. data/lib/braintrust/contrib/setup.rb +168 -0
  32. data/lib/braintrust/contrib/support/openai.rb +72 -0
  33. data/lib/braintrust/contrib/support/otel.rb +23 -0
  34. data/lib/braintrust/contrib.rb +205 -0
  35. data/lib/braintrust/internal/env.rb +33 -0
  36. data/lib/braintrust/internal/time.rb +44 -0
  37. data/lib/braintrust/setup.rb +50 -0
  38. data/lib/braintrust/state.rb +5 -0
  39. data/lib/braintrust/trace.rb +0 -51
  40. data/lib/braintrust/version.rb +1 -1
  41. data/lib/braintrust.rb +10 -1
  42. metadata +38 -7
  43. data/lib/braintrust/trace/contrib/anthropic.rb +0 -316
  44. data/lib/braintrust/trace/contrib/github.com/alexrudall/ruby-openai/ruby-openai.rb +0 -377
  45. data/lib/braintrust/trace/contrib/github.com/crmne/ruby_llm.rb +0 -631
  46. data/lib/braintrust/trace/contrib/openai.rb +0 -611
  47. data/lib/braintrust/trace/tokens.rb +0 -109
@@ -1,109 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module Braintrust
4
- module Trace
5
- # Parse OpenAI usage tokens into normalized Braintrust metrics.
6
- # Handles standard fields and *_tokens_details nested objects.
7
- # @param usage [Hash, Object] usage object from OpenAI response
8
- # @return [Hash<String, Integer>] normalized metrics
9
- def self.parse_openai_usage_tokens(usage)
10
- metrics = {}
11
- return metrics unless usage
12
-
13
- usage_hash = usage.respond_to?(:to_h) ? usage.to_h : usage
14
- return metrics unless usage_hash.is_a?(Hash)
15
-
16
- # Field mappings: OpenAI → Braintrust
17
- # Supports both Chat Completions API (prompt_tokens, completion_tokens)
18
- # and Responses API (input_tokens, output_tokens)
19
- field_map = {
20
- "prompt_tokens" => "prompt_tokens",
21
- "completion_tokens" => "completion_tokens",
22
- "total_tokens" => "tokens",
23
- # Responses API uses different field names
24
- "input_tokens" => "prompt_tokens",
25
- "output_tokens" => "completion_tokens"
26
- }
27
-
28
- # Prefix mappings for *_tokens_details
29
- prefix_map = {
30
- "prompt" => "prompt",
31
- "completion" => "completion",
32
- # Responses API uses input/output prefixes
33
- "input" => "prompt",
34
- "output" => "completion"
35
- }
36
-
37
- usage_hash.each do |key, value|
38
- key_str = key.to_s
39
-
40
- if value.is_a?(Numeric)
41
- target = field_map[key_str]
42
- metrics[target] = value.to_i if target
43
- elsif key_str.end_with?("_tokens_details")
44
- # Convert to hash if it's an object (OpenAI SDK returns objects)
45
- details_hash = value.respond_to?(:to_h) ? value.to_h : value
46
- next unless details_hash.is_a?(Hash)
47
-
48
- raw_prefix = key_str.sub(/_tokens_details$/, "")
49
- prefix = prefix_map[raw_prefix] || raw_prefix
50
- details_hash.each do |detail_key, detail_value|
51
- next unless detail_value.is_a?(Numeric)
52
- metrics["#{prefix}_#{detail_key}"] = detail_value.to_i
53
- end
54
- end
55
- end
56
-
57
- # Calculate total if missing
58
- if !metrics.key?("tokens") && metrics.key?("prompt_tokens") && metrics.key?("completion_tokens")
59
- metrics["tokens"] = metrics["prompt_tokens"] + metrics["completion_tokens"]
60
- end
61
-
62
- metrics
63
- end
64
-
65
- # Parse Anthropic usage tokens into normalized Braintrust metrics.
66
- # Accumulates cache tokens into prompt_tokens and calculates total.
67
- # @param usage [Hash, Object] usage object from Anthropic response
68
- # @return [Hash<String, Integer>] normalized metrics
69
- def self.parse_anthropic_usage_tokens(usage)
70
- metrics = {}
71
- return metrics unless usage
72
-
73
- usage_hash = usage.respond_to?(:to_h) ? usage.to_h : usage
74
- return metrics unless usage_hash.is_a?(Hash)
75
-
76
- # Field mappings: Anthropic → Braintrust
77
- # Also handles RubyLLM's simplified cache field names
78
- field_map = {
79
- "input_tokens" => "prompt_tokens",
80
- "output_tokens" => "completion_tokens",
81
- "cache_read_input_tokens" => "prompt_cached_tokens",
82
- "cache_creation_input_tokens" => "prompt_cache_creation_tokens",
83
- # RubyLLM uses simplified names
84
- "cached_tokens" => "prompt_cached_tokens",
85
- "cache_creation_tokens" => "prompt_cache_creation_tokens"
86
- }
87
-
88
- usage_hash.each do |key, value|
89
- next unless value.is_a?(Numeric)
90
- key_str = key.to_s
91
- target = field_map[key_str]
92
- metrics[target] = value.to_i if target
93
- end
94
-
95
- # Accumulate cache tokens into prompt_tokens (matching TS/Python SDKs)
96
- prompt_tokens = (metrics["prompt_tokens"] || 0) +
97
- (metrics["prompt_cached_tokens"] || 0) +
98
- (metrics["prompt_cache_creation_tokens"] || 0)
99
- metrics["prompt_tokens"] = prompt_tokens if prompt_tokens > 0
100
-
101
- # Calculate total
102
- if metrics.key?("prompt_tokens") && metrics.key?("completion_tokens")
103
- metrics["tokens"] = metrics["prompt_tokens"] + metrics["completion_tokens"]
104
- end
105
-
106
- metrics
107
- end
108
- end
109
- end