bloop-sdk 0.2.0 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 1764f66fe2c239ea3e9d45a71ac8f8102a9156d15c3059c166bac8a21e94c1cf
4
- data.tar.gz: 69f5b714a508254bf6e63523edbdfb639f0bf771bb983b3ab11015f9f96729b8
3
+ metadata.gz: b3b81ab4b7b39d4506aea43446dcbe3edd0dc5a0e6a48cfcfc43f7099d431de8
4
+ data.tar.gz: 97a70ca0526d687aee37a1521adf3c9c4b219e861d1bd2d77dd3b120c8ef332a
5
5
  SHA512:
6
- metadata.gz: 7d94fbd7aa25b6d80b40d6b9caa611af131c82343482d7d2cab19293537a29482863b6bdb2a0c9c6ed4930570d7e599b5e1fe7ac381a544b783af1753b353647
7
- data.tar.gz: e3937e140ffd35c5d7522728a29c7850663c65dcdac91084dcc54f074a4883ed73aa3533567d72df2fb6553a6d97ee1b1388f04be5cb3d8a8a99e6a6b1ed5889
6
+ metadata.gz: 7c30b8bd8af65b7e6863a6b00e45e101930a44deaf7b5de5828dd7ae963f150ed6886bab600e3eb229be08633a053004621b52df45db1974701dc099d6e4ddf2
7
+ data.tar.gz: 1d1dabab850bb3302a23ffd05eb6c519a6d400fc00c9b4b15c0eb635a933bb38a5fae1c7e2b5354f9391bdfe8b3880e83acc6c05778f29bcc13a2f7a1cd46c0b
@@ -0,0 +1,362 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Bloop error reporting and LLM tracing client for Ruby.
4
+ #
5
+ # Zero external runtime dependencies -- uses only Ruby stdlib.
6
+
7
+ require "json"
8
+ require "net/http"
9
+ require "openssl"
10
+ require "securerandom"
11
+ require "uri"
12
+
13
+ module Bloop
14
+ # Captures and sends error events and LLM traces to a bloop server.
15
+ #
16
+ # Usage:
17
+ # client = Bloop::Client.new(endpoint: "https://bloop.example.com", project_key: "your-key")
18
+ # client.capture(error_type: "TypeError", message: "something went wrong")
19
+ # client.close
20
+ #
21
+ class Client
22
+ attr_reader :endpoint, :project_key, :environment, :release
23
+
24
+ def initialize(
25
+ endpoint:,
26
+ project_key:,
27
+ flush_interval: 5.0,
28
+ max_buffer_size: 100,
29
+ environment: "production",
30
+ release: ""
31
+ )
32
+ @endpoint = endpoint.chomp("/")
33
+ @project_key = project_key
34
+ @flush_interval = flush_interval
35
+ @max_buffer_size = max_buffer_size
36
+ @environment = environment
37
+ @release = release
38
+
39
+ @buffer = []
40
+ @trace_buffer = []
41
+ @lock = Mutex.new
42
+ @closed = false
43
+
44
+ _schedule_flush
45
+ end
46
+
47
+ # -- Error Tracking --
48
+
49
+ def capture(
50
+ error_type:,
51
+ message:,
52
+ source: "ruby",
53
+ stack: "",
54
+ route_or_procedure: "",
55
+ screen: "",
56
+ metadata: nil
57
+ )
58
+ event = {
59
+ "timestamp" => Time.now.to_i,
60
+ "source" => source,
61
+ "environment" => @environment,
62
+ "release" => @release,
63
+ "error_type" => error_type,
64
+ "message" => message,
65
+ "stack" => stack,
66
+ "route_or_procedure" => route_or_procedure,
67
+ "screen" => screen,
68
+ "metadata" => metadata || {},
69
+ }
70
+
71
+ @lock.synchronize do
72
+ @buffer << event
73
+ _flush_locked if @buffer.size >= @max_buffer_size
74
+ end
75
+ end
76
+
77
+ # -- LLM Tracing --
78
+
79
+ # Create a new LLM trace.
80
+ #
81
+ # trace = client.trace(name: "chat")
82
+ # span = trace.span(model: "gpt-4o")
83
+ # span.set_tokens(100, 50)
84
+ # span.end
85
+ # trace.end
86
+ #
87
+ def trace(name: "", trace_id: nil, session_id: nil, user_id: nil, prompt_name: nil, prompt_version: nil)
88
+ Trace.new(
89
+ client: self,
90
+ name: name,
91
+ trace_id: trace_id || SecureRandom.uuid.delete("-"),
92
+ session_id: session_id,
93
+ user_id: user_id,
94
+ prompt_name: prompt_name,
95
+ prompt_version: prompt_version,
96
+ )
97
+ end
98
+
99
+ # Buffer a completed trace for batch sending.
100
+ def _send_trace(trace_data)
101
+ @lock.synchronize do
102
+ @trace_buffer << trace_data
103
+ _flush_traces_locked if @trace_buffer.size >= 10
104
+ end
105
+ end
106
+
107
+ # -- Auto-Instrumentation --
108
+
109
+ # Wrap an OpenAI-compatible client for automatic LLM tracing.
110
+ #
111
+ # Works with the ruby-openai gem. Returns a wrapped client that
112
+ # auto-traces all chat() calls.
113
+ #
114
+ # require "openai"
115
+ # openai = client.wrap_openai(OpenAI::Client.new(access_token: "..."))
116
+ # response = openai.chat(parameters: { model: "gpt-4o", messages: [...] })
117
+ #
118
+ def wrap_openai(openai_client)
119
+ Integrations::OpenAI.wrap(openai_client, self)
120
+ end
121
+
122
+ # Wrap an Anthropic client for automatic LLM tracing.
123
+ #
124
+ # Works with the anthropic gem. Returns a wrapped client that
125
+ # auto-traces all messages() calls.
126
+ #
127
+ # require "anthropic"
128
+ # anthropic = client.wrap_anthropic(Anthropic::Client.new(api_key: "..."))
129
+ # response = anthropic.messages(parameters: { model: "claude-3-5-sonnet", messages: [...] })
130
+ #
131
+ def wrap_anthropic(anthropic_client)
132
+ Integrations::Anthropic.wrap(anthropic_client, self)
133
+ end
134
+
135
+ # -- Flush & Transport --
136
+
137
+ # Send all buffered events and traces immediately.
138
+ def flush
139
+ @lock.synchronize do
140
+ _flush_locked
141
+ _flush_traces_locked
142
+ end
143
+ end
144
+
145
+ # Flush remaining events and stop the background timer.
146
+ def close
147
+ @closed = true
148
+ @timer&.kill
149
+ flush
150
+ end
151
+
152
+ private
153
+
154
+ def _flush_locked
155
+ return if @buffer.empty?
156
+
157
+ events = @buffer.dup
158
+ @buffer.clear
159
+ _send_events(events)
160
+ end
161
+
162
+ def _flush_traces_locked
163
+ return if @trace_buffer.empty?
164
+
165
+ traces = @trace_buffer.dup
166
+ @trace_buffer.clear
167
+ Thread.new { _send_traces_batch(traces) }
168
+ end
169
+
170
+ def _send_events(events)
171
+ if events.size == 1
172
+ _post("/v1/ingest", events[0])
173
+ else
174
+ _post("/v1/ingest/batch", { "events" => events })
175
+ end
176
+ end
177
+
178
+ def _send_traces_batch(traces)
179
+ _post("/v1/traces/batch", { "traces" => traces })
180
+ rescue StandardError
181
+ # Fire-and-forget for trace telemetry
182
+ end
183
+
184
+ def _post(path, payload)
185
+ body = JSON.generate(payload)
186
+ sig = _sign(body)
187
+ uri = URI("#{@endpoint}#{path}")
188
+
189
+ http = Net::HTTP.new(uri.host, uri.port)
190
+ http.use_ssl = (uri.scheme == "https")
191
+ http.open_timeout = 5
192
+ http.read_timeout = 5
193
+
194
+ request = Net::HTTP::Post.new(uri.path)
195
+ request["Content-Type"] = "application/json"
196
+ request["X-Signature"] = sig
197
+ request.body = body
198
+
199
+ http.request(request)
200
+ rescue StandardError
201
+ # Silently drop on failure (never crash the app)
202
+ end
203
+
204
+ def _sign(body)
205
+ OpenSSL::HMAC.hexdigest("SHA256", @project_key, body)
206
+ end
207
+
208
+ def _schedule_flush
209
+ return if @closed
210
+
211
+ @timer = Thread.new do
212
+ loop do
213
+ sleep @flush_interval
214
+ break if @closed
215
+
216
+ flush
217
+ end
218
+ end
219
+ @timer.abort_on_exception = false
220
+ end
221
+ end
222
+
223
+ # Represents an LLM trace with spans.
224
+ class Trace
225
+ attr_accessor :id, :name, :status, :session_id, :user_id,
226
+ :input, :output, :metadata, :prompt_name, :prompt_version
227
+
228
+ def initialize(client:, name:, trace_id:, session_id: nil, user_id: nil, prompt_name: nil, prompt_version: nil)
229
+ @client = client
230
+ @id = trace_id
231
+ @name = name
232
+ @session_id = session_id
233
+ @user_id = user_id
234
+ @prompt_name = prompt_name
235
+ @prompt_version = prompt_version
236
+ @status = "completed"
237
+ @input = nil
238
+ @output = nil
239
+ @metadata = nil
240
+ @spans = []
241
+ @started_at = (Time.now.to_f * 1000).to_i
242
+ @ended_at = nil
243
+ end
244
+
245
+ # Create a new span within this trace.
246
+ def span(span_type: "generation", name: "", model: nil, provider: nil, parent_span_id: nil)
247
+ Span.new(
248
+ trace: self,
249
+ span_type: span_type,
250
+ name: name,
251
+ model: model,
252
+ provider: provider,
253
+ parent_span_id: parent_span_id,
254
+ )
255
+ end
256
+
257
+ # Add serialized span data to this trace (internal).
258
+ def _add_span(span_data)
259
+ @spans << span_data
260
+ end
261
+
262
+ # End the trace and send it to bloop.
263
+ def end
264
+ @ended_at = (Time.now.to_f * 1000).to_i
265
+
266
+ trace_data = {
267
+ "id" => @id,
268
+ "name" => @name,
269
+ "status" => @status,
270
+ "started_at" => @started_at,
271
+ "ended_at" => @ended_at,
272
+ "spans" => @spans,
273
+ }
274
+ trace_data["session_id"] = @session_id if @session_id
275
+ trace_data["user_id"] = @user_id if @user_id
276
+ trace_data["input"] = @input unless @input.nil?
277
+ trace_data["output"] = @output unless @output.nil?
278
+ trace_data["metadata"] = @metadata unless @metadata.nil?
279
+ trace_data["prompt_name"] = @prompt_name unless @prompt_name.nil?
280
+ trace_data["prompt_version"] = @prompt_version unless @prompt_version.nil?
281
+
282
+ @client._send_trace(trace_data)
283
+ end
284
+ end
285
+
286
+ # Represents a single LLM call span.
287
+ class Span
288
+ attr_accessor :id, :span_type, :name, :model, :provider, :parent_span_id,
289
+ :input_tokens, :output_tokens, :cost, :latency_ms,
290
+ :time_to_first_token_ms, :status, :error_message,
291
+ :input, :output, :metadata
292
+
293
+ def initialize(trace:, span_type: "generation", name: "", model: nil, provider: nil, parent_span_id: nil)
294
+ @trace = trace
295
+ @id = SecureRandom.uuid.delete("-")
296
+ @span_type = span_type
297
+ @name = name
298
+ @model = model
299
+ @provider = provider
300
+ @parent_span_id = parent_span_id
301
+ @input_tokens = 0
302
+ @output_tokens = 0
303
+ @cost = 0.0
304
+ @latency_ms = 0
305
+ @time_to_first_token_ms = nil
306
+ @status = "ok"
307
+ @error_message = nil
308
+ @input = nil
309
+ @output = nil
310
+ @metadata = nil
311
+ @started_at = (Time.now.to_f * 1000).to_i
312
+ end
313
+
314
+ def set_tokens(input_tokens = 0, output_tokens = 0)
315
+ @input_tokens = input_tokens
316
+ @output_tokens = output_tokens
317
+ end
318
+
319
+ def set_cost(cost)
320
+ @cost = cost
321
+ end
322
+
323
+ def set_latency(latency_ms, time_to_first_token_ms = nil)
324
+ @latency_ms = latency_ms
325
+ @time_to_first_token_ms = time_to_first_token_ms
326
+ end
327
+
328
+ def set_error(message)
329
+ @status = "error"
330
+ @error_message = message
331
+ end
332
+
333
+ # End the span and add it to the parent trace.
334
+ def end
335
+ ended_at = (Time.now.to_f * 1000).to_i
336
+ @latency_ms = ended_at - @started_at if @latency_ms == 0
337
+
338
+ span_data = {
339
+ "id" => @id,
340
+ "span_type" => @span_type,
341
+ "name" => @name,
342
+ "input_tokens" => @input_tokens,
343
+ "output_tokens" => @output_tokens,
344
+ "cost" => @cost,
345
+ "latency_ms" => @latency_ms,
346
+ "status" => @status,
347
+ "started_at" => @started_at,
348
+ "ended_at" => ended_at,
349
+ }
350
+ span_data["model"] = @model if @model
351
+ span_data["provider"] = @provider if @provider
352
+ span_data["parent_span_id"] = @parent_span_id if @parent_span_id
353
+ span_data["time_to_first_token_ms"] = @time_to_first_token_ms unless @time_to_first_token_ms.nil?
354
+ span_data["error_message"] = @error_message if @error_message
355
+ span_data["input"] = @input unless @input.nil?
356
+ span_data["output"] = @output unless @output.nil?
357
+ span_data["metadata"] = @metadata unless @metadata.nil?
358
+
359
+ @trace._add_span(span_data)
360
+ end
361
+ end
362
+ end
@@ -0,0 +1,185 @@
1
+ # frozen_string_literal: true
2
+
3
+ # Anthropic auto-instrumentation for bloop LLM tracing.
4
+ #
5
+ # Wraps the Anthropic Ruby client's client.messages(parameters: { ... }) method
6
+ # to automatically capture: model, tokens, latency, TTFT (streaming), errors.
7
+ # Cost is always 0 -- calculated server-side from pricing table.
8
+
9
+ require "uri"
10
+
11
+ module Bloop
12
+ module Integrations
13
+ module Anthropic
14
+ # Detect provider from the client's base URL string.
15
+ def self.detect_provider(base_url)
16
+ url = base_url.to_s
17
+ return "anthropic" if url.include?("anthropic.com")
18
+
19
+ host = URI.parse(url).host || "anthropic"
20
+ host.split(".").first
21
+ rescue StandardError
22
+ "anthropic"
23
+ end
24
+
25
+ # Wrap an Anthropic client for automatic LLM tracing.
26
+ #
27
+ # The anthropic gem exposes:
28
+ # client.messages(parameters: { model: "claude-3-5-sonnet", messages: [...] })
29
+ # client.messages(parameters: { model: "...", messages: [...], stream: proc { |event| } })
30
+ #
31
+ # Returns the wrapped client (patched in-place).
32
+ def self.wrap(anthropic_client, bloop_client)
33
+ base_url = if anthropic_client.respond_to?(:base_url)
34
+ anthropic_client.base_url
35
+ else
36
+ "https://api.anthropic.com"
37
+ end
38
+ provider = detect_provider(base_url)
39
+
40
+ original_messages = anthropic_client.method(:messages)
41
+
42
+ anthropic_client.define_singleton_method(:messages) do |parameters: {}|
43
+ model = parameters[:model] || parameters["model"] || "unknown"
44
+ stream_proc = parameters[:stream] || parameters["stream"]
45
+ start_ms = (Time.now.to_f * 1000).to_i
46
+
47
+ trace = bloop_client.trace(name: "#{provider}/#{model}")
48
+ span = trace.span(
49
+ span_type: "generation",
50
+ name: "messages.create",
51
+ model: model,
52
+ provider: provider,
53
+ )
54
+
55
+ if stream_proc
56
+ # Streaming mode: wrap the user's stream proc
57
+ first_token_seen = false
58
+ input_tokens = 0
59
+ output_tokens = 0
60
+ actual_model = model
61
+
62
+ wrapped_stream = proc do |event|
63
+ event_type = event.is_a?(Hash) ? (event["type"] || event[:type]) : (event.respond_to?(:type) ? event.type : nil)
64
+
65
+ # Track TTFT on first content delta
66
+ if !first_token_seen && event_type == "content_block_delta"
67
+ first_token_seen = true
68
+ ttft = (Time.now.to_f * 1000).to_i - start_ms
69
+ span.time_to_first_token_ms = ttft
70
+ end
71
+
72
+ # Track usage from message_start
73
+ if event_type == "message_start"
74
+ msg = event.is_a?(Hash) ? (event["message"] || event[:message]) : (event.respond_to?(:message) ? event.message : nil)
75
+ if msg
76
+ msg_model = msg.is_a?(Hash) ? (msg["model"] || msg[:model]) : (msg.respond_to?(:model) ? msg.model : nil)
77
+ actual_model = msg_model if msg_model
78
+ usage = msg.is_a?(Hash) ? (msg["usage"] || msg[:usage]) : (msg.respond_to?(:usage) ? msg.usage : nil)
79
+ if usage
80
+ input_tokens = if usage.is_a?(Hash)
81
+ usage["input_tokens"] || usage[:input_tokens] || 0
82
+ else
83
+ usage.respond_to?(:input_tokens) ? (usage.input_tokens || 0) : 0
84
+ end
85
+ end
86
+ end
87
+ end
88
+
89
+ # Track output tokens from message_delta
90
+ if event_type == "message_delta"
91
+ usage = event.is_a?(Hash) ? (event["usage"] || event[:usage]) : (event.respond_to?(:usage) ? event.usage : nil)
92
+ if usage
93
+ output_tokens = if usage.is_a?(Hash)
94
+ usage["output_tokens"] || usage[:output_tokens] || 0
95
+ else
96
+ usage.respond_to?(:output_tokens) ? (usage.output_tokens || 0) : 0
97
+ end
98
+ end
99
+ end
100
+
101
+ # Forward to user's proc
102
+ stream_proc.call(event)
103
+ end
104
+
105
+ modified_params = parameters.dup
106
+ modified_params[:stream] = wrapped_stream
107
+ modified_params.delete("stream")
108
+
109
+ begin
110
+ result = original_messages.call(parameters: modified_params)
111
+ end_ms = (Time.now.to_f * 1000).to_i
112
+ span.set_tokens(input_tokens, output_tokens)
113
+ span.set_latency(end_ms - start_ms)
114
+ span.cost = 0.0
115
+ span.model = actual_model
116
+ span.end
117
+ trace.end
118
+ result
119
+ rescue StandardError => e
120
+ end_ms = (Time.now.to_f * 1000).to_i
121
+ span.set_tokens(input_tokens, output_tokens)
122
+ span.set_latency(end_ms - start_ms)
123
+ span.set_error(e.message)
124
+ span.end
125
+ trace.status = "error"
126
+ trace.end
127
+ raise
128
+ end
129
+ else
130
+ # Non-streaming mode
131
+ begin
132
+ response = original_messages.call(parameters: parameters)
133
+ end_ms = (Time.now.to_f * 1000).to_i
134
+
135
+ # Extract usage
136
+ usage = if response.respond_to?(:usage)
137
+ response.usage
138
+ elsif response.is_a?(Hash)
139
+ response["usage"] || response[:usage]
140
+ end
141
+
142
+ if usage
143
+ if usage.is_a?(Hash)
144
+ span.set_tokens(
145
+ usage["input_tokens"] || usage[:input_tokens] || 0,
146
+ usage["output_tokens"] || usage[:output_tokens] || 0,
147
+ )
148
+ else
149
+ span.set_tokens(
150
+ usage.respond_to?(:input_tokens) ? (usage.input_tokens || 0) : 0,
151
+ usage.respond_to?(:output_tokens) ? (usage.output_tokens || 0) : 0,
152
+ )
153
+ end
154
+ end
155
+
156
+ resp_model = if response.respond_to?(:model)
157
+ response.model
158
+ elsif response.is_a?(Hash)
159
+ response["model"] || response[:model]
160
+ end
161
+
162
+ span.set_latency(end_ms - start_ms)
163
+ span.cost = 0.0
164
+ span.model = resp_model || model
165
+ span.end
166
+ trace.end
167
+ response
168
+
169
+ rescue StandardError => e
170
+ end_ms = (Time.now.to_f * 1000).to_i
171
+ span.set_latency(end_ms - start_ms)
172
+ span.set_error(e.message)
173
+ span.end
174
+ trace.status = "error"
175
+ trace.end
176
+ raise
177
+ end
178
+ end
179
+ end
180
+
181
+ anthropic_client
182
+ end
183
+ end
184
+ end
185
+ end
@@ -0,0 +1,174 @@
1
+ # frozen_string_literal: true
2
+
3
+ # OpenAI auto-instrumentation for bloop LLM tracing.
4
+ #
5
+ # Wraps the ruby-openai gem's client.chat(parameters: { ... }) method
6
+ # to automatically capture: model, tokens, latency, TTFT (streaming), errors.
7
+ # Cost is always 0 -- calculated server-side from pricing table.
8
+
9
+ require "uri"
10
+
11
+ module Bloop
12
+ module Integrations
13
+ module OpenAI
14
+ # Provider auto-detection from base URL
15
+ PROVIDER_MAP = {
16
+ "api.openai.com" => "openai",
17
+ "api.minimax.io" => "minimax",
18
+ "api.moonshot.ai" => "kimi",
19
+ "generativelanguage.googleapis.com" => "google",
20
+ }.freeze
21
+
22
+ # Detect provider from the client's base URL string.
23
+ def self.detect_provider(uri_base)
24
+ base_url = uri_base.to_s
25
+ PROVIDER_MAP.each do |domain, provider|
26
+ return provider if base_url.include?(domain)
27
+ end
28
+ # Fallback: extract hostname
29
+ host = URI.parse(base_url).host || "openai"
30
+ host.split(".").first
31
+ rescue StandardError
32
+ "openai"
33
+ end
34
+
35
+ # Wrap an OpenAI-compatible client for automatic LLM tracing.
36
+ #
37
+ # The ruby-openai gem exposes:
38
+ # client.chat(parameters: { model: "gpt-4o", messages: [...] })
39
+ # client.chat(parameters: { model: "gpt-4o", messages: [...], stream: proc { |chunk| } })
40
+ #
41
+ # Returns the wrapped client (patched in-place).
42
+ def self.wrap(openai_client, bloop_client)
43
+ provider = detect_provider(openai_client.respond_to?(:uri_base) ? openai_client.uri_base : "https://api.openai.com/")
44
+
45
+ original_chat = openai_client.method(:chat)
46
+
47
+ openai_client.define_singleton_method(:chat) do |parameters: {}|
48
+ model = parameters[:model] || parameters["model"] || "unknown"
49
+ stream_proc = parameters[:stream] || parameters["stream"]
50
+ start_ms = (Time.now.to_f * 1000).to_i
51
+
52
+ trace = bloop_client.trace(name: "#{provider}/#{model}")
53
+ span = trace.span(
54
+ span_type: "generation",
55
+ name: "chat.completions.create",
56
+ model: model,
57
+ provider: provider,
58
+ )
59
+
60
+ if stream_proc
61
+ # Streaming mode: wrap the user's stream proc
62
+ first_token_seen = false
63
+ input_tokens = 0
64
+ output_tokens = 0
65
+ actual_model = model
66
+
67
+ wrapped_stream = proc do |chunk|
68
+ unless first_token_seen
69
+ first_token_seen = true
70
+ ttft = (Time.now.to_f * 1000).to_i - start_ms
71
+ span.time_to_first_token_ms = ttft
72
+ end
73
+
74
+ # Track model from chunk
75
+ chunk_model = chunk.is_a?(Hash) ? (chunk["model"] || chunk[:model]) : (chunk.respond_to?(:model) ? chunk.model : nil)
76
+ actual_model = chunk_model if chunk_model
77
+
78
+ # Track usage from final chunk
79
+ usage = chunk.is_a?(Hash) ? (chunk["usage"] || chunk[:usage]) : (chunk.respond_to?(:usage) ? chunk.usage : nil)
80
+ if usage
81
+ if usage.is_a?(Hash)
82
+ input_tokens = usage["prompt_tokens"] || usage[:prompt_tokens] || 0
83
+ output_tokens = usage["completion_tokens"] || usage[:completion_tokens] || 0
84
+ else
85
+ input_tokens = usage.respond_to?(:prompt_tokens) ? (usage.prompt_tokens || 0) : 0
86
+ output_tokens = usage.respond_to?(:completion_tokens) ? (usage.completion_tokens || 0) : 0
87
+ end
88
+ end
89
+
90
+ # Forward to user's proc
91
+ stream_proc.call(chunk)
92
+ end
93
+
94
+ modified_params = parameters.dup
95
+ modified_params[:stream] = wrapped_stream
96
+ modified_params.delete("stream")
97
+
98
+ begin
99
+ result = original_chat.call(parameters: modified_params)
100
+ end_ms = (Time.now.to_f * 1000).to_i
101
+ span.set_tokens(input_tokens, output_tokens)
102
+ span.set_latency(end_ms - start_ms)
103
+ span.cost = 0.0
104
+ span.model = actual_model
105
+ span.end
106
+ trace.end
107
+ result
108
+ rescue StandardError => e
109
+ end_ms = (Time.now.to_f * 1000).to_i
110
+ span.set_tokens(input_tokens, output_tokens)
111
+ span.set_latency(end_ms - start_ms)
112
+ span.set_error(e.message)
113
+ span.end
114
+ trace.status = "error"
115
+ trace.end
116
+ raise
117
+ end
118
+ else
119
+ # Non-streaming mode
120
+ begin
121
+ response = original_chat.call(parameters: parameters)
122
+ end_ms = (Time.now.to_f * 1000).to_i
123
+
124
+ # Extract usage
125
+ usage = if response.respond_to?(:usage)
126
+ response.usage
127
+ elsif response.is_a?(Hash)
128
+ response["usage"] || response[:usage]
129
+ end
130
+
131
+ if usage
132
+ if usage.is_a?(Hash)
133
+ span.set_tokens(
134
+ usage["prompt_tokens"] || usage[:prompt_tokens] || 0,
135
+ usage["completion_tokens"] || usage[:completion_tokens] || 0,
136
+ )
137
+ else
138
+ span.set_tokens(
139
+ usage.respond_to?(:prompt_tokens) ? (usage.prompt_tokens || 0) : 0,
140
+ usage.respond_to?(:completion_tokens) ? (usage.completion_tokens || 0) : 0,
141
+ )
142
+ end
143
+ end
144
+
145
+ resp_model = if response.respond_to?(:model)
146
+ response.model
147
+ elsif response.is_a?(Hash)
148
+ response["model"] || response[:model]
149
+ end
150
+
151
+ span.set_latency(end_ms - start_ms)
152
+ span.cost = 0.0
153
+ span.model = resp_model || model
154
+ span.end
155
+ trace.end
156
+ response
157
+
158
+ rescue StandardError => e
159
+ end_ms = (Time.now.to_f * 1000).to_i
160
+ span.set_latency(end_ms - start_ms)
161
+ span.set_error(e.message)
162
+ span.end
163
+ trace.status = "error"
164
+ trace.end
165
+ raise
166
+ end
167
+ end
168
+ end
169
+
170
+ openai_client
171
+ end
172
+ end
173
+ end
174
+ end
@@ -0,0 +1,4 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "integrations/openai"
4
+ require_relative "integrations/anthropic"
data/lib/bloop.rb CHANGED
@@ -1,266 +1,8 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "openssl"
4
- require "net/http"
5
- require "json"
6
- require "uri"
7
- require "securerandom"
8
- require_relative "bloop/tracing"
3
+ require_relative "bloop/client"
4
+ require_relative "bloop/integrations"
9
5
 
10
6
  module Bloop
11
7
  VERSION = "0.2.0"
12
-
13
- class Client
14
- attr_reader :endpoint, :project_key
15
-
16
- # @param endpoint [String] Bloop server URL
17
- # @param project_key [String] Project API key for HMAC signing
18
- # @param environment [String] Environment tag (default: "production")
19
- # @param release [String] Release version tag
20
- # @param flush_interval [Numeric] Seconds between auto-flushes (default: 5)
21
- # @param max_buffer_size [Integer] Flush when buffer reaches this size (default: 100)
22
- def initialize(endpoint:, project_key:, environment: "production", release: "", flush_interval: 5, max_buffer_size: 100)
23
- @endpoint = endpoint.chomp("/")
24
- @project_key = project_key
25
- @environment = environment
26
- @release = release
27
- @flush_interval = flush_interval
28
- @max_buffer_size = max_buffer_size
29
-
30
- @buffer = []
31
- @trace_buffer = []
32
- @mutex = Mutex.new
33
- @closed = false
34
-
35
- start_flush_thread
36
- install_at_exit
37
- end
38
-
39
- # Capture an error event.
40
- #
41
- # @param error_type [String] The error class name
42
- # @param message [String] Human-readable error message
43
- # @param source [String] Source platform (default: "ruby")
44
- # @param stack [String] Stack trace
45
- # @param route_or_procedure [String] Route or method
46
- # @param screen [String] Screen name (mobile)
47
- # @param metadata [Hash] Arbitrary metadata
48
- def capture(error_type:, message:, source: "ruby", stack: "", route_or_procedure: "", screen: "", metadata: nil, **kwargs)
49
- return if @closed
50
-
51
- event = {
52
- timestamp: (Time.now.to_f * 1000).to_i,
53
- source: source,
54
- environment: @environment,
55
- error_type: error_type,
56
- message: message,
57
- }
58
- event[:release] = @release unless @release.empty?
59
- event[:stack] = stack unless stack.empty?
60
- event[:route_or_procedure] = route_or_procedure unless route_or_procedure.empty?
61
- event[:screen] = screen unless screen.empty?
62
- event[:metadata] = metadata if metadata
63
-
64
- kwargs.each { |k, v| event[k] = v unless event.key?(k) }
65
-
66
- @mutex.synchronize do
67
- @buffer << event
68
- flush_locked if @buffer.size >= @max_buffer_size
69
- end
70
- end
71
-
72
- # Capture a Ruby exception.
73
- #
74
- # @param exception [Exception] The exception to capture
75
- def capture_exception(exception, **kwargs)
76
- capture(
77
- error_type: exception.class.name,
78
- message: exception.message,
79
- stack: (exception.backtrace || []).join("\n"),
80
- **kwargs
81
- )
82
- end
83
-
84
- # Wrap a block and capture any raised exception, then re-raise.
85
- #
86
- # @param kwargs [Hash] Extra context passed to capture_exception (e.g. route_or_procedure:, metadata:)
87
- # @yield The block to execute
88
- # @return The block's return value
89
- def with_error_capture(**kwargs, &block)
90
- block.call
91
- rescue Exception => e
92
- capture_exception(e, **kwargs)
93
- raise
94
- end
95
-
96
- # Flush buffered events immediately.
97
- def flush
98
- @mutex.synchronize { flush_locked }
99
- end
100
-
101
- # Flush and stop the background thread.
102
- def close
103
- @closed = true
104
- flush
105
- @flush_thread&.kill
106
- end
107
-
108
- # Start a new LLM trace for observability.
109
- #
110
- # @param name [String] Trace name (e.g. "chat-completion")
111
- # @param session_id [String] Optional session identifier
112
- # @param user_id [String] Optional user identifier
113
- # @param input [Object] Optional input data
114
- # @param metadata [Hash] Optional metadata
115
- # @param prompt_name [String] Optional prompt template name
116
- # @param prompt_version [String] Optional prompt version
117
- # @return [Bloop::Trace]
118
- def start_trace(name:, session_id: nil, user_id: nil, input: nil, metadata: nil,
119
- prompt_name: nil, prompt_version: nil)
120
- Bloop::Trace.new(client: self, name: name, session_id: session_id,
121
- user_id: user_id, input: input, metadata: metadata,
122
- prompt_name: prompt_name, prompt_version: prompt_version)
123
- end
124
-
125
- # Wrap a block in a trace. Auto-finishes on success or error.
126
- #
127
- # @param name [String] Trace name
128
- # @param kwargs [Hash] Extra args passed to start_trace
129
- # @yield [Bloop::Trace] The trace object
130
- # @return [Bloop::Trace]
131
- def with_trace(name, **kwargs)
132
- trace = start_trace(name: name, **kwargs)
133
- yield trace
134
- trace.finish(status: :completed) if trace.status == "running"
135
- trace
136
- rescue Exception => e
137
- trace.finish(status: :error, output: e.message) if trace.status == "running"
138
- raise
139
- end
140
-
141
- private
142
-
143
- def flush_locked
144
- unless @buffer.empty?
145
- events = @buffer.dup
146
- @buffer.clear
147
- Thread.new { send_events(events) }
148
- end
149
-
150
- flush_traces_locked
151
- end
152
-
153
- def send_events(events)
154
- if events.size == 1
155
- path = "/v1/ingest"
156
- body = JSON.generate(events.first)
157
- else
158
- path = "/v1/ingest/batch"
159
- body = JSON.generate({ events: events })
160
- end
161
-
162
- signature = OpenSSL::HMAC.hexdigest("SHA256", @project_key, body)
163
-
164
- uri = URI("#{@endpoint}#{path}")
165
- http = Net::HTTP.new(uri.host, uri.port)
166
- http.use_ssl = (uri.scheme == "https")
167
- http.open_timeout = 5
168
- http.read_timeout = 10
169
-
170
- req = Net::HTTP::Post.new(uri.path)
171
- req["Content-Type"] = "application/json"
172
- req["X-Signature"] = signature
173
- req["X-Project-Key"] = @project_key
174
- req.body = body
175
-
176
- http.request(req)
177
- rescue StandardError
178
- # Fire and forget — don't crash the host app
179
- end
180
-
181
- def enqueue_trace(trace)
182
- @mutex.synchronize do
183
- @trace_buffer << trace.to_h
184
- flush_traces_locked if @trace_buffer.size >= @max_buffer_size
185
- end
186
- end
187
-
188
- def flush_traces_locked
189
- return if @trace_buffer.empty?
190
-
191
- traces = @trace_buffer.dup
192
- @trace_buffer.clear
193
- Thread.new { send_traces(traces) }
194
- end
195
-
196
- def send_traces(traces)
197
- traces.each_slice(50) do |batch|
198
- body = JSON.generate({ traces: batch })
199
- signature = OpenSSL::HMAC.hexdigest("SHA256", @project_key, body)
200
- uri = URI("#{@endpoint}/v1/traces/batch")
201
- http = Net::HTTP.new(uri.host, uri.port)
202
- http.use_ssl = (uri.scheme == "https")
203
- http.open_timeout = 5
204
- http.read_timeout = 10
205
- req = Net::HTTP::Post.new(uri.path)
206
- req["Content-Type"] = "application/json"
207
- req["X-Signature"] = signature
208
- req["X-Project-Key"] = @project_key
209
- req.body = body
210
- http.request(req)
211
- end
212
- rescue StandardError
213
- # Fire and forget
214
- end
215
-
216
- def start_flush_thread
217
- @flush_thread = Thread.new do
218
- loop do
219
- sleep @flush_interval
220
- flush unless @closed
221
- rescue StandardError
222
- # Ignore flush errors
223
- end
224
- end
225
- @flush_thread.abort_on_exception = false
226
- end
227
-
228
- def install_at_exit
229
- client = self
230
- at_exit { client.close }
231
- end
232
- end
233
-
234
- # Rack middleware that captures unhandled exceptions and reports them to bloop.
235
- #
236
- # Works with Rails, Sinatra, Grape, and any Rack-compatible framework.
237
- #
238
- # @example Rails
239
- # # config/application.rb
240
- # config.middleware.use Bloop::RackMiddleware, client: Bloop::Client.new(...)
241
- #
242
- # @example Sinatra
243
- # use Bloop::RackMiddleware, client: Bloop::Client.new(...)
244
- class RackMiddleware
245
- # @param app [#call] The Rack application
246
- # @param client [Bloop::Client] A configured bloop client instance
247
- def initialize(app, client:)
248
- @app = app
249
- @client = client
250
- end
251
-
252
- def call(env)
253
- @app.call(env)
254
- rescue Exception => e
255
- @client.capture_exception(e,
256
- route_or_procedure: env["PATH_INFO"],
257
- metadata: {
258
- method: env["REQUEST_METHOD"],
259
- query: env["QUERY_STRING"],
260
- remote_ip: env["REMOTE_ADDR"],
261
- }
262
- )
263
- raise
264
- end
265
- end
266
8
  end
metadata CHANGED
@@ -1,30 +1,31 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: bloop-sdk
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.2.0
4
+ version: 0.3.0
5
5
  platform: ruby
6
6
  authors:
7
- - bloop
8
- autorequire:
7
+ - Bloop
9
8
  bindir: bin
10
9
  cert_chain: []
11
- date: 2026-02-14 00:00:00.000000000 Z
10
+ date: 1980-01-02 00:00:00.000000000 Z
12
11
  dependencies: []
13
- description: Capture and send error events to a bloop server. Zero external dependencies.
14
- email:
12
+ description: Self-hosted error observability and LLM tracing. Captures errors, deduplicates
13
+ via fingerprinting, and traces AI/LLM calls (token usage, costs, latency). Zero
14
+ external runtime dependencies.
15
15
  executables: []
16
16
  extensions: []
17
17
  extra_rdoc_files: []
18
18
  files:
19
19
  - lib/bloop.rb
20
- - lib/bloop/tracing.rb
21
- homepage: https://github.com/your-org/bloop
20
+ - lib/bloop/client.rb
21
+ - lib/bloop/integrations.rb
22
+ - lib/bloop/integrations/anthropic.rb
23
+ - lib/bloop/integrations/openai.rb
24
+ homepage: https://github.com/jaikoo/bloop-ruby
22
25
  licenses:
23
26
  - MIT
24
27
  metadata:
25
- homepage_uri: https://github.com/your-org/bloop
26
- source_code_uri: https://github.com/your-org/bloop/tree/main/sdks/ruby
27
- post_install_message:
28
+ source_code_uri: https://github.com/jaikoo/bloop-ruby
28
29
  rdoc_options: []
29
30
  require_paths:
30
31
  - lib
@@ -39,8 +40,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
39
40
  - !ruby/object:Gem::Version
40
41
  version: '0'
41
42
  requirements: []
42
- rubygems_version: 3.5.22
43
- signing_key:
43
+ rubygems_version: 3.7.1
44
44
  specification_version: 4
45
- summary: Ruby SDK for bloop error observability
45
+ summary: Bloop error reporting and LLM tracing SDK for Ruby
46
46
  test_files: []
data/lib/bloop/tracing.rb DELETED
@@ -1,127 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module Bloop
4
- class Span
5
- attr_reader :id, :parent_span_id, :span_type, :name, :model, :provider,
6
- :started_at, :input, :metadata
7
- attr_accessor :input_tokens, :output_tokens, :cost, :latency_ms,
8
- :time_to_first_token_ms, :status, :error_message, :output
9
-
10
- def initialize(span_type:, name: "", model: "", provider: "", input: nil,
11
- metadata: nil, parent_span_id: nil)
12
- @id = SecureRandom.uuid
13
- @parent_span_id = parent_span_id
14
- @span_type = span_type.to_s
15
- @name = name
16
- @model = model
17
- @provider = provider
18
- @input = input
19
- @metadata = metadata
20
- @started_at = (Time.now.to_f * 1000).to_i
21
- end
22
-
23
- def finish(status: :ok, input_tokens: nil, output_tokens: nil, cost: nil,
24
- error_message: nil, output: nil, time_to_first_token_ms: nil)
25
- @latency_ms = (Time.now.to_f * 1000).to_i - @started_at
26
- @status = status.to_s
27
- @input_tokens = input_tokens if input_tokens
28
- @output_tokens = output_tokens if output_tokens
29
- @cost = cost if cost
30
- @error_message = error_message if error_message
31
- @output = output if output
32
- @time_to_first_token_ms = time_to_first_token_ms if time_to_first_token_ms
33
- self
34
- end
35
-
36
- def set_usage(input_tokens: nil, output_tokens: nil, cost: nil)
37
- @input_tokens = input_tokens if input_tokens
38
- @output_tokens = output_tokens if output_tokens
39
- @cost = cost if cost
40
- end
41
-
42
- def to_h
43
- h = {
44
- id: @id, span_type: @span_type, name: @name,
45
- started_at: @started_at, status: @status || "ok",
46
- }
47
- h[:parent_span_id] = @parent_span_id if @parent_span_id
48
- h[:model] = @model unless @model.empty?
49
- h[:provider] = @provider unless @provider.empty?
50
- h[:input_tokens] = @input_tokens if @input_tokens
51
- h[:output_tokens] = @output_tokens if @output_tokens
52
- h[:cost] = @cost if @cost
53
- h[:latency_ms] = @latency_ms if @latency_ms
54
- h[:time_to_first_token_ms] = @time_to_first_token_ms if @time_to_first_token_ms
55
- h[:error_message] = @error_message if @error_message
56
- h[:input] = @input if @input
57
- h[:output] = @output if @output
58
- h[:metadata] = @metadata if @metadata
59
- h
60
- end
61
- end
62
-
63
- class Trace
64
- attr_reader :id, :name, :session_id, :user_id, :started_at, :input, :metadata,
65
- :prompt_name, :prompt_version, :spans
66
- attr_accessor :status, :output, :ended_at
67
-
68
- def initialize(client:, name:, session_id: nil, user_id: nil, input: nil,
69
- metadata: nil, prompt_name: nil, prompt_version: nil)
70
- @id = SecureRandom.uuid
71
- @client = client
72
- @name = name
73
- @session_id = session_id
74
- @user_id = user_id
75
- @status = "running"
76
- @input = input
77
- @metadata = metadata
78
- @prompt_name = prompt_name
79
- @prompt_version = prompt_version
80
- @started_at = (Time.now.to_f * 1000).to_i
81
- @spans = []
82
- end
83
-
84
- def start_span(span_type: :custom, name: "", model: "", provider: "",
85
- input: nil, metadata: nil, parent_span_id: nil)
86
- span = Span.new(span_type: span_type, name: name, model: model,
87
- provider: provider, input: input, metadata: metadata,
88
- parent_span_id: parent_span_id)
89
- @spans << span
90
- span
91
- end
92
-
93
- def with_generation(model: "", provider: "", name: "", input: nil, metadata: nil)
94
- span = start_span(span_type: :generation, name: name, model: model,
95
- provider: provider, input: input, metadata: metadata)
96
- yield span
97
- span.finish(status: :ok) unless span.status
98
- span
99
- rescue Exception => e
100
- span.finish(status: :error, error_message: e.message) unless span.status
101
- raise
102
- end
103
-
104
- def finish(status: :completed, output: nil)
105
- @ended_at = (Time.now.to_f * 1000).to_i
106
- @status = status.to_s
107
- @output = output if output
108
- @client.send(:enqueue_trace, self)
109
- end
110
-
111
- def to_h
112
- h = {
113
- id: @id, name: @name, status: @status, started_at: @started_at,
114
- spans: @spans.map(&:to_h),
115
- }
116
- h[:session_id] = @session_id if @session_id
117
- h[:user_id] = @user_id if @user_id
118
- h[:input] = @input if @input
119
- h[:output] = @output if @output
120
- h[:metadata] = @metadata if @metadata
121
- h[:prompt_name] = @prompt_name if @prompt_name
122
- h[:prompt_version] = @prompt_version if @prompt_version
123
- h[:ended_at] = @ended_at if @ended_at
124
- h
125
- end
126
- end
127
- end