opentelemetry-instrumentation-ruby_llm 0.3.0 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: dfb17d4d74d31d2a53792683e0d5d070cd25e9160ee3c5018c0a1dbfb5bbeb4b
4
- data.tar.gz: 91c3b7ac277d43ddf80a6fc49e2228d8005bf40f9321cd10cd5ac2539c96dfc7
3
+ metadata.gz: 07574aa081ca21455a0e4384ee35109310b70f878be921a6567b3126e9809602
4
+ data.tar.gz: 5a42c5a5f695c9b72cdf6a29caf5cd8d8ad8cf06895aebc0e170b9b4a02892c3
5
5
  SHA512:
6
- metadata.gz: df479838c253aa695c1b36b393b4f7eac31dd041a581df3d04d9f9e9ca1253d026b8582b32d2cdf398e812d53459ef0c012406654342d37a9ae45c45dc68c5e7
7
- data.tar.gz: 9c18a05253193ce23af8bc0f342cd726778c6daccff7d1010602a63b36f20d07eb13852ea6f01d6e40bf2439236fbf4b495e32e144e701d521aaeec18972f6a4
6
+ metadata.gz: 27d39b8ca21c044fc793c4e4d4b093b3b71d8010aecc52f518da5687943c7819f4eba144e1b4d10de889ab668344b0c952cbca5ac6f67e69e0110701b7f771ad
7
+ data.tar.gz: db4b50b51fb6f93a60cdee7738d9dbc181e3d18aec04fc0e6dda1a684b8e0c887be67b6bb1636c3f67580aa4decc3077b926128b40f45c216d8c4bd52ea99c7f
@@ -16,6 +16,7 @@ jobs:
16
16
  - '3.4'
17
17
  - '3.3'
18
18
  - '3.2'
19
+ - '3.1'
19
20
 
20
21
  steps:
21
22
  - uses: actions/checkout@v4
data/README.md CHANGED
@@ -59,6 +59,32 @@ When enabled, the following attributes are added to chat spans:
59
59
  > [!WARNING]
60
60
  > Captured content may include sensitive or personally identifiable information (PII). Use with caution in production environments.
61
61
 
62
+ ### Custom attributes
63
+
64
+ Use `with_otel_attributes` to add arbitrary attributes to the span for each request. This is useful for adding per-request metadata like Langfuse prompt linking or trace-level tags:
65
+
66
+ ```ruby
67
+ chat = RubyLLM.chat
68
+ chat.with_otel_attributes(
69
+ "langfuse.observation.prompt.name" => "supplement-assistant",
70
+ "langfuse.observation.prompt.version" => 1,
71
+ "langfuse.trace.tags" => ["vitamins"],
72
+ "langfuse.trace.metadata" => { category: "health" }.to_json
73
+ )
74
+ chat.ask("What are the side effects of Vitamin D3?")
75
+ ```
76
+
77
+ Values can also be callables (Procs/lambdas) that are evaluated after each completion, giving access to response data:
78
+
79
+ ```ruby
80
+ chat.with_otel_attributes(
81
+ "langfuse.observation.prompt.name" => "supplement-assistant",
82
+ "langfuse.observation.output" => -> { chat.messages.last&.content.to_s }
83
+ )
84
+ ```
85
+
86
+ Attributes persist across calls on the same chat instance and the method returns `self` for chaining.
87
+
62
88
  ## What's traced?
63
89
 
64
90
  | Feature | Status |
@@ -68,8 +94,8 @@ When enabled, the following attributes are added to chat spans:
68
94
  | Error handling | Supported |
69
95
  | Opt-in input/output content capture | Supported |
70
96
  | Conversation tracking (`gen_ai.conversation.id`) | Planned |
71
- | System instructions capture | Planned |
72
- | Custom attributes on traces and spans | Planned |
97
+ | System instructions capture | Supported (via `capture_content`) |
98
+ | Custom attributes on traces and spans | Supported (via `with_otel_attributes`) |
73
99
  | Embeddings | Planned |
74
100
  | Streaming | Planned |
75
101
 
@@ -36,6 +36,10 @@ end
36
36
 
37
37
  chat = RubyLLM.chat
38
38
  chat.with_instructions("You are a helpful assistant that provides concise answers.")
39
+ chat.with_otel_attributes(
40
+ "langfuse.observation.prompt.name" => "helpful-assistant",
41
+ "langfuse.observation.prompt.version" => 1
42
+ )
39
43
  response = chat.ask("What is the meaning of life?")
40
44
  puts "\nResponse: #{response.content}"
41
45
 
@@ -46,6 +46,10 @@ end
46
46
  chat = RubyLLM.chat
47
47
  chat.with_instructions("You are a helpful assistant that provides concise answers.")
48
48
  chat.with_tool(Calculator)
49
+ chat.with_otel_attributes(
50
+ "langfuse.observation.prompt.name" => "helpful-assistant",
51
+ "langfuse.observation.prompt.version" => 1
52
+ )
49
53
  response = chat.ask("Use the calculator tool to compute 123 * 456")
50
54
  puts "\nResponse: #{response.content}"
51
55
  response = chat.ask("Use the tool again to compute 789 + 1011")
@@ -0,0 +1,110 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "bundler/inline"
4
+
5
+ gemfile(true) do
6
+ source "https://rubygems.org"
7
+ gem "ruby_llm"
8
+ gem "opentelemetry-api"
9
+ gem "opentelemetry-sdk"
10
+ gem "opentelemetry-exporter-otlp"
11
+ gem "opentelemetry-instrumentation-ruby_llm", path: "../"
12
+ gem "base64"
13
+ gem "dotenv"
14
+ end
15
+
16
+ require "base64"
17
+ require "dotenv/load"
18
+
19
+ credentials = Base64.strict_encode64("#{ENV['LANGFUSE_PUBLIC_KEY']}:#{ENV['LANGFUSE_SECRET_KEY']}")
20
+
21
+ OpenTelemetry::SDK.configure do |c|
22
+ c.service_name = "ruby_llm-demo"
23
+ c.add_span_processor(
24
+ OpenTelemetry::SDK::Trace::Export::BatchSpanProcessor.new(
25
+ OpenTelemetry::Exporter::OTLP::Exporter.new(
26
+ endpoint: "https://us.cloud.langfuse.com/api/public/otel/v1/traces",
27
+ headers: { "Authorization" => "Basic #{credentials}" }
28
+ )
29
+ )
30
+ )
31
+ c.use "OpenTelemetry::Instrumentation::RubyLLM", capture_content: true
32
+ end
33
+
34
+ RubyLLM.configure do |c|
35
+ c.openai_api_key = ENV["OPENAI_API_KEY"]
36
+ c.default_model = "gpt-5-nano"
37
+ end
38
+
39
+ INGREDIENT_DATABASE = {
40
+ "vitamin d3" => {
41
+ name: "Vitamin D3 (Cholecalciferol)",
42
+ common_doses: "1,000-5,000 IU daily",
43
+ side_effects: ["Nausea", "Vomiting", "Constipation", "Loss of appetite", "Excessive thirst", "Frequent urination", "Kidney stones (at very high doses)"],
44
+ interactions: ["Corticosteroids", "Orlistat", "Statins", "Thiazide diuretics"],
45
+ notes: "Fat-soluble vitamin. Toxicity risk at sustained doses above 10,000 IU/day."
46
+ },
47
+ "magnesium glycinate" => {
48
+ name: "Magnesium Glycinate",
49
+ common_doses: "200-400 mg daily",
50
+ side_effects: ["Diarrhea", "Nausea", "Abdominal cramping"],
51
+ interactions: ["Antibiotics (tetracyclines, quinolones)", "Bisphosphonates", "Diuretics"],
52
+ notes: "Better absorbed and gentler on the stomach than magnesium oxide."
53
+ },
54
+ "zinc" => {
55
+ name: "Zinc",
56
+ common_doses: "15-30 mg daily",
57
+ side_effects: ["Nausea", "Metallic taste", "Headache", "Copper deficiency (long-term use)"],
58
+ interactions: ["Antibiotics", "Penicillamine", "Copper supplements"],
59
+ notes: "Best taken with food to reduce nausea. Long-term use above 40 mg/day may deplete copper."
60
+ }
61
+ }
62
+
63
+ class SearchForIngredientDetails < RubyLLM::Tool
64
+ description "Searches a database for detailed information about a supplement ingredient, including side effects, interactions, and dosage"
65
+ param :ingredient_name, type: "string", desc: "The name of the ingredient to search for (e.g., 'vitamin d3', 'magnesium glycinate')"
66
+
67
+ def execute(ingredient_name:)
68
+ key = ingredient_name.downcase.strip
69
+ match = INGREDIENT_DATABASE.find { |k, _| key.include?(k) || k.include?(key) }
70
+
71
+ if match
72
+ _, details = match
73
+ details.map { |k, v| "#{k}: #{Array(v).join(', ')}" }.join("\n")
74
+ else
75
+ "No information found for '#{ingredient_name}'. Available ingredients: #{INGREDIENT_DATABASE.keys.join(', ')}"
76
+ end
77
+ end
78
+ end
79
+
80
+ chat = RubyLLM.chat
81
+ chat.with_instructions("You are a knowledgeable health supplement assistant. Use the search tool to look up ingredient details before answering questions.")
82
+ chat.with_tool(SearchForIngredientDetails)
83
+
84
+ questions = [
85
+ { text: "What are the side effects of Vitamin D3?", ingredient: "vitamin d3" },
86
+ { text: "What are the common interactions with magnesium glycinate?", ingredient: "magnesium glycinate" },
87
+ { text: "What is the recommended dosage for zinc?", ingredient: "zinc" },
88
+ { text: "Are there any interactions I should be aware of with zinc?", ingredient: "zinc" }
89
+ ]
90
+
91
+ questions.each do |q|
92
+ puts "\n---\n\n"
93
+ puts "Question: #{q[:text]}\n\n"
94
+
95
+ chat.with_otel_attributes(
96
+ "langfuse.observation.prompt.name" => "supplement-assistant",
97
+ "langfuse.observation.prompt.version" => 1,
98
+ "langfuse.observation.input" => q[:text],
99
+ "langfuse.observation.output" => -> { chat.messages.last&.content.to_s },
100
+ "langfuse.observation.metadata" => { ingredient: q[:ingredient] }.to_json,
101
+ "langfuse.trace.metadata" => { ingredient: q[:ingredient] }.to_json,
102
+ "langfuse.trace.tags" => [q[:ingredient]]
103
+ )
104
+
105
+ response = chat.ask(q[:text])
106
+ puts "\nResponse: #{response.content}"
107
+ end
108
+
109
+ # This line is only necessary in short-lived scripts. In a long-running application, spans will be flushed automatically.
110
+ OpenTelemetry.tracer_provider.force_flush
@@ -15,7 +15,9 @@ module OpenTelemetry
15
15
 
16
16
  install do |_config|
17
17
  require_relative "patches/chat"
18
+ require_relative "patches/embedding"
18
19
  ::RubyLLM::Chat.prepend(Patches::Chat)
20
+ ::RubyLLM::Embedding.singleton_class.prepend(Patches::Embedding)
19
21
  end
20
22
  end
21
23
  end
@@ -5,6 +5,11 @@ module OpenTelemetry
5
5
  module RubyLLM
6
6
  module Patches
7
7
  module Chat
8
+ def with_otel_attributes(attributes)
9
+ @otel_attributes = attributes
10
+ self
11
+ end
12
+
8
13
  def complete(&)
9
14
  provider = @model&.provider || "unknown"
10
15
  model_id = @model&.id || "unknown"
@@ -45,11 +50,10 @@ module OpenTelemetry
45
50
  end
46
51
  end
47
52
 
53
+ @otel_attributes&.each { |key, value| span.set_attribute(key, value.respond_to?(:call) ? value.call : value) }
54
+
48
55
  result
49
56
  end
50
- rescue StandardError => e
51
- OpenTelemetry.handle_error(exception: e)
52
- super
53
57
  end
54
58
 
55
59
  def execute_tool(tool_call)
@@ -66,9 +70,6 @@ module OpenTelemetry
66
70
  span.set_attribute("gen_ai.tool.call.result", result_str[0..500])
67
71
  result
68
72
  end
69
- rescue StandardError => e
70
- OpenTelemetry.handle_error(exception: e)
71
- super
72
73
  end
73
74
 
74
75
  private
@@ -0,0 +1,55 @@
1
+ # frozen_string_literal: true
2
+
3
+ module OpenTelemetry
4
+ module Instrumentation
5
+ module RubyLLM
6
+ module Patches
7
+ module Embedding
8
+ def embed(text, model: nil, provider: nil, assume_model_exists: false, context: nil, dimensions: nil)
9
+ config = context&.config || ::RubyLLM.config
10
+ resolved_model = model || config.default_embedding_model
11
+ model_obj, _provider_instance = ::RubyLLM::Models.resolve(
12
+ resolved_model, provider: provider, assume_exists: assume_model_exists, config: config
13
+ )
14
+ model_id = model_obj.id
15
+ provider_name = model_obj.provider || "unknown"
16
+
17
+ attributes = {
18
+ "gen_ai.operation.name" => "embeddings",
19
+ "gen_ai.provider.name" => provider_name,
20
+ "gen_ai.request.model" => model_id
21
+ }
22
+
23
+ tracer.in_span("embeddings #{model_id}", attributes: attributes, kind: OpenTelemetry::Trace::SpanKind::CLIENT) do |span|
24
+ begin
25
+ result = super
26
+ rescue => e
27
+ span.record_exception(e)
28
+ span.status = OpenTelemetry::Trace::Status.error(e.message)
29
+ span.set_attribute("error.type", e.class.name)
30
+ raise
31
+ end
32
+
33
+ span.set_attribute("gen_ai.response.model", result.model) if result.model
34
+ span.set_attribute("gen_ai.usage.input_tokens", result.input_tokens) if result.input_tokens&.positive?
35
+
36
+ if result.vectors.is_a?(Array)
37
+ first = result.vectors.first
38
+ vector = first.is_a?(Array) ? first : result.vectors
39
+ span.set_attribute("gen_ai.embeddings.dimension.count", vector.length) if vector.is_a?(Array)
40
+ end
41
+
42
+ result
43
+ end
44
+ end
45
+
46
+ private
47
+
48
+ def tracer
49
+ RubyLLM::Instrumentation.instance.tracer
50
+ end
51
+ end
52
+ end
53
+ end
54
+ end
55
+ end
@@ -3,7 +3,7 @@
3
3
  module OpenTelemetry
4
4
  module Instrumentation
5
5
  module RubyLLM
6
- VERSION = "0.3.0"
6
+ VERSION = "0.5.0"
7
7
  end
8
8
  end
9
9
  end
@@ -11,7 +11,7 @@ Gem::Specification.new do |spec|
11
11
  spec.description = "Adds OpenTelemetry tracing to RubyLLM chat operations"
12
12
  spec.homepage = "https://github.com/thoughtbot/opentelemetry-instrumentation-ruby_llm"
13
13
 
14
- spec.required_ruby_version = ">= 3.2.0"
14
+ spec.required_ruby_version = ">= 3.1.3"
15
15
 
16
16
  spec.metadata["homepage_uri"] = spec.homepage
17
17
 
@@ -67,31 +67,6 @@ class InstrumentationTest < Minitest::Test
67
67
  assert_equal OpenTelemetry::Trace::Status::ERROR, span.status.code
68
68
  end
69
69
 
70
- def test_complete_still_works_when_instrumentation_fails
71
- stub_request(:post, "https://api.openai.com/v1/chat/completions")
72
- .to_return(
73
- status: 200,
74
- headers: { "Content-Type" => "application/json" },
75
- body: {
76
- id: "chatcmpl-123",
77
- object: "chat.completion",
78
- model: "gpt-4o-mini",
79
- choices: [{
80
- index: 0,
81
- message: { role: "assistant", content: "Hello!" },
82
- finish_reason: "stop"
83
- }],
84
- usage: { prompt_tokens: 10, completion_tokens: 5, total_tokens: 15 }
85
- }.to_json
86
- )
87
-
88
- chat = RubyLLM.chat(model: "gpt-4o-mini")
89
- chat.define_singleton_method(:tracer) { raise StandardError, "instrumentation bug" }
90
-
91
- response = chat.ask("Hi")
92
- assert_equal "Hello!", response.content
93
- end
94
-
95
70
  def test_instruments_complete_called_directly
96
71
  stub_request(:post, "https://api.openai.com/v1/chat/completions")
97
72
  .to_return(
@@ -200,68 +175,6 @@ class InstrumentationTest < Minitest::Test
200
175
  assert_equal "function", tool_span.attributes["gen_ai.tool.type"]
201
176
  end
202
177
 
203
- def test_execute_tool_still_works_when_instrumentation_fails
204
- calculator = Class.new(RubyLLM::Tool) do
205
- def self.name = "calculator"
206
- description "Performs math"
207
- param :expression, type: "string", desc: "Math expression"
208
-
209
- def execute(expression:)
210
- eval(expression).to_s
211
- end
212
- end
213
-
214
- stub_request(:post, "https://api.openai.com/v1/chat/completions")
215
- .to_return(
216
- {
217
- status: 200,
218
- headers: { "Content-Type" => "application/json" },
219
- body: {
220
- id: "chatcmpl-123",
221
- object: "chat.completion",
222
- model: "gpt-4o-mini",
223
- choices: [{
224
- index: 0,
225
- message: {
226
- role: "assistant",
227
- content: nil,
228
- tool_calls: [{
229
- id: "call_abc123",
230
- type: "function",
231
- function: { name: "calculator", arguments: '{"expression":"2+2"}' }
232
- }]
233
- },
234
- finish_reason: "tool_calls"
235
- }],
236
- usage: { prompt_tokens: 10, completion_tokens: 5, total_tokens: 15 }
237
- }.to_json
238
- },
239
- {
240
- status: 200,
241
- headers: { "Content-Type" => "application/json" },
242
- body: {
243
- id: "chatcmpl-456",
244
- object: "chat.completion",
245
- model: "gpt-4o-mini",
246
- choices: [{
247
- index: 0,
248
- message: { role: "assistant", content: "The answer is 4" },
249
- finish_reason: "stop"
250
- }],
251
- usage: { prompt_tokens: 20, completion_tokens: 5, total_tokens: 25 }
252
- }.to_json
253
- }
254
- )
255
-
256
- chat = RubyLLM.chat(model: "gpt-4o-mini")
257
- chat.with_tool(calculator)
258
-
259
- chat.define_singleton_method(:tracer) { raise StandardError, "instrumentation bug" }
260
-
261
- response = chat.ask("What is 2+2?")
262
- assert_equal "The answer is 4", response.content
263
- end
264
-
265
178
  def test_does_not_capture_content_by_default
266
179
  stub_request(:post, "https://api.openai.com/v1/chat/completions")
267
180
  .to_return(
@@ -332,6 +245,161 @@ class InstrumentationTest < Minitest::Test
332
245
  OpenTelemetry::Instrumentation::RubyLLM::Instrumentation.instance.config[:capture_content] = false
333
246
  end
334
247
 
248
+ def test_creates_span_for_embedding
249
+ stub_request(:post, "https://api.openai.com/v1/embeddings")
250
+ .to_return(
251
+ status: 200,
252
+ headers: { "Content-Type" => "application/json" },
253
+ body: {
254
+ object: "list",
255
+ model: "text-embedding-3-small",
256
+ data: [
257
+ { object: "embedding", index: 0, embedding: [0.1, 0.2, 0.3] }
258
+ ],
259
+ usage: { prompt_tokens: 8, total_tokens: 8 }
260
+ }.to_json
261
+ )
262
+
263
+ RubyLLM.embed("Hello, world!", model: "text-embedding-3-small")
264
+
265
+ spans = EXPORTER.finished_spans
266
+ assert_equal 1, spans.length
267
+
268
+ span = spans.first
269
+ assert_equal OpenTelemetry::Trace::SpanKind::CLIENT, span.kind
270
+ assert_equal "embeddings text-embedding-3-small", span.name
271
+ assert_equal "embeddings", span.attributes["gen_ai.operation.name"]
272
+ assert_equal "openai", span.attributes["gen_ai.provider.name"]
273
+ assert_equal "text-embedding-3-small", span.attributes["gen_ai.request.model"]
274
+ assert_equal "text-embedding-3-small", span.attributes["gen_ai.response.model"]
275
+ assert_equal 8, span.attributes["gen_ai.usage.input_tokens"]
276
+ assert_equal 3, span.attributes["gen_ai.embeddings.dimension.count"]
277
+ end
278
+
279
+ def test_records_error_on_embedding_api_failure
280
+ stub_request(:post, "https://api.openai.com/v1/embeddings")
281
+ .to_return(status: 500, body: "Internal Server Error")
282
+
283
+ assert_raises do
284
+ RubyLLM.embed("Hello", model: "text-embedding-3-small")
285
+ end
286
+
287
+ spans = EXPORTER.finished_spans
288
+ span = spans.last
289
+
290
+ assert_equal "embeddings text-embedding-3-small", span.name
291
+ assert span.attributes["error.type"]
292
+ assert_equal OpenTelemetry::Trace::Status::ERROR, span.status.code
293
+ end
294
+
295
+ def test_with_otel_attributes_sets_span_attributes
296
+ stub_request(:post, "https://api.openai.com/v1/chat/completions")
297
+ .to_return(
298
+ status: 200,
299
+ headers: { "Content-Type" => "application/json" },
300
+ body: {
301
+ id: "chatcmpl-123",
302
+ object: "chat.completion",
303
+ model: "gpt-4o-mini",
304
+ choices: [{
305
+ index: 0,
306
+ message: { role: "assistant", content: "Hello!" },
307
+ finish_reason: "stop"
308
+ }],
309
+ usage: { prompt_tokens: 10, completion_tokens: 5, total_tokens: 15 }
310
+ }.to_json
311
+ )
312
+
313
+ chat = RubyLLM.chat(model: "gpt-4o-mini")
314
+ chat.with_otel_attributes(
315
+ "langfuse.trace.tags" => ["vitamin_d3"],
316
+ "custom.category" => "supplements"
317
+ )
318
+ chat.ask("Hi")
319
+
320
+ span = EXPORTER.finished_spans.first
321
+ assert_equal ["vitamin_d3"], span.attributes["langfuse.trace.tags"]
322
+ assert_equal "supplements", span.attributes["custom.category"]
323
+ end
324
+
325
+ def test_with_otel_attributes_returns_self_for_chaining
326
+ stub_request(:post, "https://api.openai.com/v1/chat/completions")
327
+ .to_return(
328
+ status: 200,
329
+ headers: { "Content-Type" => "application/json" },
330
+ body: {
331
+ id: "chatcmpl-123",
332
+ object: "chat.completion",
333
+ model: "gpt-4o-mini",
334
+ choices: [{
335
+ index: 0,
336
+ message: { role: "assistant", content: "Hello!" },
337
+ finish_reason: "stop"
338
+ }],
339
+ usage: { prompt_tokens: 10, completion_tokens: 5, total_tokens: 15 }
340
+ }.to_json
341
+ )
342
+
343
+ chat = RubyLLM.chat(model: "gpt-4o-mini")
344
+ result = chat.with_otel_attributes("custom.category" => "test")
345
+
346
+ assert_same chat, result
347
+ end
348
+
349
+ def test_with_otel_attributes_evaluates_callables
350
+ stub_request(:post, "https://api.openai.com/v1/chat/completions")
351
+ .to_return(
352
+ status: 200,
353
+ headers: { "Content-Type" => "application/json" },
354
+ body: {
355
+ id: "chatcmpl-123",
356
+ object: "chat.completion",
357
+ model: "gpt-4o-mini",
358
+ choices: [{
359
+ index: 0,
360
+ message: { role: "assistant", content: "Hello!" },
361
+ finish_reason: "stop"
362
+ }],
363
+ usage: { prompt_tokens: 10, completion_tokens: 5, total_tokens: 15 }
364
+ }.to_json
365
+ )
366
+
367
+ chat = RubyLLM.chat(model: "gpt-4o-mini")
368
+ chat.with_otel_attributes(
369
+ "custom.last_role" => -> { chat.messages.last&.role.to_s },
370
+ "custom.static" => "fixed"
371
+ )
372
+ chat.ask("Hi")
373
+
374
+ span = EXPORTER.finished_spans.first
375
+ assert_equal "assistant", span.attributes["custom.last_role"]
376
+ assert_equal "fixed", span.attributes["custom.static"]
377
+ end
378
+
379
+ def test_works_without_otel_attributes
380
+ stub_request(:post, "https://api.openai.com/v1/chat/completions")
381
+ .to_return(
382
+ status: 200,
383
+ headers: { "Content-Type" => "application/json" },
384
+ body: {
385
+ id: "chatcmpl-123",
386
+ object: "chat.completion",
387
+ model: "gpt-4o-mini",
388
+ choices: [{
389
+ index: 0,
390
+ message: { role: "assistant", content: "Hello!" },
391
+ finish_reason: "stop"
392
+ }],
393
+ usage: { prompt_tokens: 10, completion_tokens: 5, total_tokens: 15 }
394
+ }.to_json
395
+ )
396
+
397
+ chat = RubyLLM.chat(model: "gpt-4o-mini")
398
+ response = chat.ask("Hi")
399
+
400
+ assert_equal "Hello!", response.content
401
+ end
402
+
335
403
  def test_captures_content_when_enabled_via_env_var
336
404
  ENV["OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT"] = "true"
337
405
 
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: opentelemetry-instrumentation-ruby_llm
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.3.0
4
+ version: 0.5.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Clarissa Borges
@@ -58,10 +58,12 @@ files:
58
58
  - example/trace_demonstration.rb
59
59
  - example/trace_demonstration_with_langfuse.rb
60
60
  - example/trace_demonstration_with_langfuse_and_tools.rb
61
+ - example/trace_demonstration_with_langfuse_ingredient_search.rb
61
62
  - example/trace_demonstration_with_tools.rb
62
63
  - lib/opentelemetry-instrumentation-ruby_llm.rb
63
64
  - lib/opentelemetry/instrumentation/ruby_llm/instrumentation.rb
64
65
  - lib/opentelemetry/instrumentation/ruby_llm/patches/chat.rb
66
+ - lib/opentelemetry/instrumentation/ruby_llm/patches/embedding.rb
65
67
  - lib/opentelemetry/instrumentation/ruby_llm/version.rb
66
68
  - opentelemetry-instrumentation-ruby_llm.gemspec
67
69
  - test/instrumentation_test.rb
@@ -78,7 +80,7 @@ required_ruby_version: !ruby/object:Gem::Requirement
78
80
  requirements:
79
81
  - - ">="
80
82
  - !ruby/object:Gem::Version
81
- version: 3.2.0
83
+ version: 3.1.3
82
84
  required_rubygems_version: !ruby/object:Gem::Requirement
83
85
  requirements:
84
86
  - - ">="