llm.rb 4.7.0 → 4.8.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +32 -31
  3. data/lib/llm/eventstream/parser.rb +0 -5
  4. data/lib/llm/model.rb +115 -0
  5. data/lib/llm/provider.rb +36 -23
  6. data/lib/llm/providers/anthropic/error_handler.rb +1 -1
  7. data/lib/llm/providers/anthropic/models.rb +1 -1
  8. data/lib/llm/providers/anthropic/request_adapter.rb +20 -3
  9. data/lib/llm/providers/anthropic/response_adapter/models.rb +13 -0
  10. data/lib/llm/providers/anthropic/response_adapter.rb +2 -0
  11. data/lib/llm/providers/anthropic.rb +2 -1
  12. data/lib/llm/providers/gemini/error_handler.rb +18 -3
  13. data/lib/llm/providers/gemini/response_adapter/models.rb +4 -6
  14. data/lib/llm/providers/ollama/error_handler.rb +1 -1
  15. data/lib/llm/providers/ollama/models.rb +1 -1
  16. data/lib/llm/providers/ollama/response_adapter/models.rb +13 -0
  17. data/lib/llm/providers/ollama/response_adapter.rb +2 -0
  18. data/lib/llm/providers/openai/error_handler.rb +18 -3
  19. data/lib/llm/providers/openai/images.rb +17 -11
  20. data/lib/llm/providers/openai/models.rb +1 -1
  21. data/lib/llm/providers/openai/response_adapter/models.rb +13 -0
  22. data/lib/llm/providers/openai/response_adapter.rb +2 -0
  23. data/lib/llm/providers/openai/responses.rb +7 -0
  24. data/lib/llm/providers/openai.rb +9 -2
  25. data/lib/llm/providers/xai/images.rb +7 -6
  26. data/lib/llm/schema/enum.rb +16 -0
  27. data/lib/llm/schema.rb +1 -0
  28. data/lib/llm/tool/param.rb +1 -1
  29. data/lib/llm/tool.rb +1 -1
  30. data/lib/llm/tracer/langsmith.rb +144 -0
  31. data/lib/llm/tracer/logger.rb +8 -0
  32. data/lib/llm/tracer/null.rb +8 -0
  33. data/lib/llm/tracer/telemetry.rb +91 -71
  34. data/lib/llm/tracer.rb +108 -4
  35. data/lib/llm/version.rb +1 -1
  36. data/lib/llm.rb +1 -0
  37. metadata +7 -1
@@ -7,8 +7,7 @@ module LLM
7
7
  # The {LLM::Tracer::Telemetry LLM::Tracer::Telemetry} tracer provides
8
8
  # telemetry support through the [opentelemetry-ruby](https://github.com/open-telemetry/opentelemetry-ruby)
9
9
  # RubyGem. The gem should be installed separately since this feature is opt-in
10
- # and disabled by default. This feature exists to support integration with tools
11
- # like [LangSmith](https://www.langsmith.com).
10
+ # and disabled by default.
12
11
  #
13
12
  # @see https://github.com/open-telemetry/semantic-conventions/blob/main/docs/gen-ai Telemetry specs (index)
14
13
  # @see https://github.com/open-telemetry/semantic-conventions/blob/main/docs/gen-ai/openai.md Telemetry specs (OpenAI)
@@ -48,7 +47,6 @@ module LLM
48
47
  def initialize(provider, options = {})
49
48
  super
50
49
  @exporter = options.delete(:exporter)
51
- setup_langsmith!(options.delete(:langsmith))
52
50
  setup!
53
51
  end
54
52
 
@@ -59,7 +57,7 @@ module LLM
59
57
  #
60
58
  # @param (see LLM::Tracer#start_trace)
61
59
  # @return [self]
62
- def start_trace(trace_group_id: nil, name: "llm", attributes: {})
60
+ def start_trace(trace_group_id: nil, name: "llm", attributes: {}, metadata: nil)
63
61
  return self if trace_group_id.to_s.empty?
64
62
 
65
63
  span_context = span_context_from_trace_group_id(trace_group_id.to_s)
@@ -74,25 +72,25 @@ module LLM
74
72
  attributes: attrs,
75
73
  with_parent: parent_ctx
76
74
  )
77
- thread[thread_root_span_key] = root_span
78
- thread[thread_root_context_key] = ::OpenTelemetry::Trace.context_with_span(root_span)
75
+ @root_span = root_span
76
+ @root_context = ::OpenTelemetry::Trace.context_with_span(root_span)
79
77
  self
80
78
  end
81
79
 
82
80
  ##
83
81
  # @return [self]
84
82
  def stop_trace
85
- thread[thread_root_span_key]&.finish
86
- thread[thread_root_span_key] = nil
87
- thread[thread_root_context_key] = nil
83
+ @root_span&.finish
84
+ @root_span = nil
85
+ @root_context = nil
88
86
  self
89
87
  end
90
88
 
91
89
  ##
92
90
  # @param (see LLM::Tracer#on_request_start)
93
- def on_request_start(operation:, model: nil)
91
+ def on_request_start(operation:, model: nil, inputs: nil)
94
92
  case operation
95
- when "chat" then start_chat(operation:, model:)
93
+ when "chat" then start_chat(operation:, model:, inputs:)
96
94
  when "retrieval" then start_retrieval(operation:)
97
95
  else nil
98
96
  end
@@ -100,10 +98,10 @@ module LLM
100
98
 
101
99
  ##
102
100
  # @param (see LLM::Tracer#on_request_finish)
103
- def on_request_finish(operation:, res:, model: nil, span: nil)
101
+ def on_request_finish(operation:, res:, model: nil, span: nil, outputs: nil, metadata: nil)
104
102
  return nil unless span
105
103
  case operation
106
- when "chat" then finish_chat(operation:, model:, res:, span:)
104
+ when "chat" then finish_chat(operation:, model:, res:, span:, outputs:, metadata:)
107
105
  when "retrieval" then finish_retrieval(operation:, res:, span:)
108
106
  else nil
109
107
  end
@@ -133,7 +131,7 @@ module LLM
133
131
  "gen_ai.provider.name" => provider_name,
134
132
  "server.address" => provider_host,
135
133
  "server.port" => provider_port
136
- }.merge!(langsmith_attributes(span_kind: "tool")).compact
134
+ }.merge!(trace_attributes(span_kind: "tool")).compact
137
135
  span_name = ["execute_tool", name].compact.join(" ")
138
136
  span = create_span(span_name.empty? ? "gen_ai.tool" : span_name, attributes:)
139
137
  span.add_event("gen_ai.tool.start")
@@ -197,30 +195,12 @@ module LLM
197
195
  ##
198
196
  # @api private
199
197
  def create_span(name, kind: :client, attributes: {})
200
- root_context = thread[thread_root_context_key]
198
+ root_context = @root_context
201
199
  opts = {kind:, attributes:}
202
200
  opts[:with_parent] = root_context if root_context
203
201
  @tracer.start_span(name, **opts)
204
202
  end
205
203
 
206
- ##
207
- # @api private
208
- def thread_root_span_key
209
- @thread_root_span_key ||= :"llm.telemetry.root_span.#{object_id}"
210
- end
211
-
212
- ##
213
- # @api private
214
- def thread_root_context_key
215
- @thread_root_context_key ||= :"llm.telemetry.root_context.#{object_id}"
216
- end
217
-
218
- ##
219
- # @api private
220
- def thread
221
- Thread.current
222
- end
223
-
224
204
  ##
225
205
  # Converts a string trace_group_id to an OpenTelemetry SpanContext so all
226
206
  # spans created with this context share the same trace_id.
@@ -282,16 +262,20 @@ module LLM
282
262
  ##
283
263
  # start_*
284
264
 
285
- def start_chat(operation:, model:)
265
+ def start_chat(operation:, model:, inputs: nil)
266
+ request_metadata = consume_request_metadata
267
+ input_value = request_metadata[:user_input]
286
268
  attributes = {
287
269
  "gen_ai.operation.name" => operation,
288
270
  "gen_ai.request.model" => model,
289
271
  "gen_ai.provider.name" => provider_name,
290
272
  "server.address" => provider_host,
291
- "server.port" => provider_port
292
- }.merge!(langsmith_attributes(span_kind: "llm")).compact
273
+ "server.port" => provider_port,
274
+ "input.value" => serialize_request_value(input_value)
275
+ }.merge!(trace_attributes(span_kind: "llm")).compact
293
276
  span_name = [operation, model].compact.join(" ")
294
277
  span = create_span(span_name.empty? ? "gen_ai.request" : span_name, attributes:)
278
+ set_span_attributes(span, consume_extra_inputs.merge(inputs || {}))
295
279
  span.add_event("gen_ai.request.start")
296
280
  span
297
281
  end
@@ -302,7 +286,7 @@ module LLM
302
286
  "gen_ai.provider.name" => provider_name,
303
287
  "server.address" => provider_host,
304
288
  "server.port" => provider_port
305
- }.merge!(langsmith_attributes(span_kind: "retriever")).compact
289
+ }.merge!(trace_attributes(span_kind: "retriever")).compact
306
290
  span = create_span(operation, attributes:)
307
291
  span.add_event("gen_ai.request.start")
308
292
  span
@@ -311,16 +295,26 @@ module LLM
311
295
  ##
312
296
  # finish_*
313
297
 
314
- def finish_chat(operation:, model:, res:, span:)
298
+ def finish_chat(operation:, model:, res:, span:, outputs: nil, metadata: nil)
299
+ output_value = if res.respond_to?(:output_text)
300
+ res.output_text
301
+ else
302
+ (res.respond_to?(:content) ? res.content : nil)
303
+ end
315
304
  attributes = {
316
305
  "gen_ai.operation.name" => operation,
317
306
  "gen_ai.request.model" => model,
318
307
  "gen_ai.response.id" => res.id,
319
308
  "gen_ai.response.model" => model,
320
309
  "gen_ai.usage.input_tokens" => res.usage.input_tokens,
321
- "gen_ai.usage.output_tokens" => res.usage.output_tokens
310
+ "gen_ai.usage.output_tokens" => res.usage.output_tokens,
311
+ "output.value" => serialize_request_value(output_value)
322
312
  }.merge!(finish_attributes(operation, res)).compact
323
313
  attributes.each { span.set_attribute(_1, _2) }
314
+ set_span_attributes(span, consume_extra_outputs.merge(outputs || {}))
315
+ finish_metadata = consume_finish_metadata_proc(res)
316
+ metadata = (metadata || {}).merge(finish_metadata || {})
317
+ set_span_attributes(span, metadata.transform_keys { "langsmith.metadata.#{_1}" })
324
318
  span.add_event("gen_ai.request.finish")
325
319
  span.tap(&:finish)
326
320
  end
@@ -329,57 +323,83 @@ module LLM
329
323
  attributes = {
330
324
  "gen_ai.operation.name" => operation
331
325
  }.merge!(finish_attributes(operation, res)).compact
326
+ chunks_json = retrieval_chunks_json(res)
327
+ attributes["langsmith.metadata.chunks"] = chunks_json if chunks_json
332
328
  attributes.each { span.set_attribute(_1, _2) }
333
329
  span.add_event("gen_ai.request.finish")
334
330
  span.tap(&:finish)
335
331
  end
336
332
 
337
- def setup_langsmith!(options)
338
- options ||= {}
339
- @langsmith_metadata = options[:metadata] || {}
340
- @langsmith_session_id = normalize_langsmith_session_id(options[:session_id], metadata: @langsmith_metadata)
341
- @langsmith_tags = options[:tags] || []
333
+ ##
334
+ # @api private
335
+ # Serialize retrieval response chunks for span attributes (e.g. langsmith.metadata.chunks).
336
+ # Returns a JSON string or nil when res has no data.
337
+ def consume_finish_metadata_proc(res)
338
+ key = LLM::Tracer::FINISH_METADATA_PROC_KEY
339
+ proc = Thread.current[key]
340
+ Thread.current[key] = nil
341
+ return {} unless proc.respond_to?(:call)
342
+
343
+ proc.call(res) || {}
344
+ rescue
345
+ {}
342
346
  end
343
347
 
344
- def langsmith_attributes(span_kind:)
345
- attributes = {}
346
- unless @langsmith_session_id.to_s.empty?
347
- attributes["langsmith.trace.session_id"] = @langsmith_session_id
348
- end
349
- @langsmith_metadata.each do |key, value|
350
- next if value.nil?
348
+ def retrieval_chunks_json(res)
349
+ return nil unless res.respond_to?(:data)
351
350
 
352
- attributes["langsmith.metadata.#{key}"] = serialize_langsmith_value(value)
353
- end
354
- unless @langsmith_tags.empty?
355
- attributes["langsmith.span.tags"] = @langsmith_tags.map(&:to_s).join(",")
351
+ data = res.data
352
+ return nil unless data.is_a?(Array)
353
+
354
+ payload = data.map { |c| c.respond_to?(:to_h) ? c.to_h : c }
355
+ LLM.json.dump(payload)
356
+ rescue
357
+ nil
358
+ end
359
+
360
+ ##
361
+ # @api private
362
+ # Hook for tracer-specific span attributes.
363
+ # Subclasses can override this to inject provider-agnostic tags.
364
+ def trace_attributes(span_kind:)
365
+ {}
366
+ end
367
+
368
+ ##
369
+ # @api private
370
+ # Sets attribute key-value pairs on the span, serializing non-primitive values to JSON.
371
+ def set_span_attributes(span, attrs)
372
+ return if attrs.nil? || attrs.empty?
373
+
374
+ attrs.each do |key, value|
375
+ span.set_attribute(key.to_s, serialize_span_value(value))
356
376
  end
357
- attributes["langsmith.span.kind"] = span_kind
358
- attributes
359
377
  end
360
378
 
361
- def serialize_langsmith_value(value)
379
+ ##
380
+ # @api private
381
+ # OpenTelemetry attributes accept String, Numeric, Boolean, or Array of those.
382
+ # Complex values (hashes, arrays of objects) are serialized to JSON strings.
383
+ def serialize_span_value(value)
362
384
  case value
363
385
  when String, Numeric, TrueClass, FalseClass
364
386
  value
387
+ when Array
388
+ value.all? { |v| v.is_a?(String) || v.is_a?(Numeric) || v == true || v == false } ? value : LLM.json.dump(value)
365
389
  else
366
390
  LLM.json.dump(value)
367
391
  end
368
392
  end
369
393
 
370
- def normalize_langsmith_session_id(session_id, metadata:)
371
- raw = session_id&.to_s
372
- return nil if raw.to_s.empty?
373
- return raw if uuid?(raw)
374
-
375
- # Keep arbitrary thread identifiers in metadata instead of forcing
376
- # them into langsmith.trace.session_id, which expects a known UUID.
377
- metadata[:session_id] ||= raw
378
- nil
379
- end
380
-
381
- def uuid?(value)
382
- value.match?(/\A[0-9a-f]{8}-[0-9a-f]{4}-[1-5][0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}\z/i)
394
+ def serialize_request_value(value)
395
+ case value
396
+ when nil
397
+ nil
398
+ when String
399
+ value
400
+ else
401
+ LLM.json.dump(value)
402
+ end
383
403
  end
384
404
  end
385
405
  end
data/lib/llm/tracer.rb CHANGED
@@ -11,6 +11,7 @@ module LLM
11
11
  class Tracer
12
12
  require_relative "tracer/logger"
13
13
  require_relative "tracer/telemetry"
14
+ require_relative "tracer/langsmith"
14
15
  require_relative "tracer/null"
15
16
 
16
17
  ##
@@ -27,19 +28,22 @@ module LLM
27
28
  # Called before an LLM provider request is executed.
28
29
  # @param [String] operation
29
30
  # @param [String] model
31
+ # @param [Hash, nil] inputs Optional span attributes (e.g. gen_ai.input.messages) from llm.rb or caller.
30
32
  # @return [void]
31
- def on_request_start(operation:, model: nil)
33
+ def on_request_start(operation:, model: nil, inputs: nil)
32
34
  raise NotImplementedError, "#{self.class} does not implement '#{__method__}'"
33
35
  end
34
36
 
35
37
  ##
36
38
  # Called after an LLM provider request succeeds.
37
39
  # @param [String] operation
38
- # @param [String] model
39
40
  # @param [LLM::Response] res
40
41
  # @param [Object, nil] span
42
+ # @param [String] model
43
+ # @param [Hash, nil] outputs Optional span attributes (e.g. gen_ai.output.messages) from llm.rb or caller.
44
+ # @param [Hash, nil] metadata Optional metadata (emitted as langsmith.metadata.*) from llm.rb or caller.
41
45
  # @return [void]
42
- def on_request_finish(operation:, res:, model: nil, span: nil)
46
+ def on_request_finish(operation:, res:, model: nil, span: nil, outputs: nil, metadata: nil)
43
47
  raise NotImplementedError, "#{self.class} does not implement '#{__method__}'"
44
48
  end
45
49
 
@@ -101,8 +105,11 @@ module LLM
101
105
  # Name for the root span (e.g. "chatbot.turn").
102
106
  # @param [Hash] attributes
103
107
  # OpenTelemetry attributes to set on the root span.
108
+ # @param [Hash, nil] metadata
109
+ # Optional. Trace-level metadata merged into the trace (e.g. langsmith.metadata.*).
110
+ # Only used by tracers that support it (e.g. {LLM::Tracer::Langsmith}).
104
111
  # @return [self]
105
- def start_trace(trace_group_id: nil, name: "llm", attributes: {})
112
+ def start_trace(trace_group_id: nil, name: "llm", attributes: {}, metadata: nil)
106
113
  self
107
114
  end
108
115
 
@@ -136,8 +143,105 @@ module LLM
136
143
  nil
137
144
  end
138
145
 
146
+ ##
147
+ # Merges extra attributes for the current trace/span. Used by applications
148
+ # (e.g. chatbot) to add metadata, span inputs, or span outputs to the next
149
+ # span or to the trace. No-op by default; {LLM::Tracer::Langsmith} merges
150
+ # into thread-local storage and emits them as langsmith/GenAI attributes.
151
+ #
152
+ # @param [Hash, nil] metadata
153
+ # Key-value pairs merged into trace/span metadata (e.g. langsmith.metadata.*).
154
+ # @param [Hash, nil] inputs
155
+ # Key-value pairs set on the next span at start (e.g. gen_ai.input.messages).
156
+ # Consumed when the span is created.
157
+ # @param [Hash, nil] outputs
158
+ # Key-value pairs set on the current span at finish (e.g. gen_ai.output.messages).
159
+ # Must be set before the request finishes (e.g. in a block passed to the provider).
160
+ # @return [self]
161
+ def merge_extra(metadata: nil, inputs: nil, outputs: nil)
162
+ self
163
+ end
164
+
165
+ ##
166
+ # Optional: set a proc to supply metadata when the next chat span finishes.
167
+ # The proc is called with the response (res) and should return a Hash of
168
+ # metadata (e.g. { intent: "...", confidence: 1.0 }) to merge onto the span
169
+ # as langsmith.metadata.*. Cleared after use. Used by apps to attach
170
+ # routing/intent that is only known after the response.
171
+ #
172
+ # @param [Proc, nil] proc (res) -> Hash or nil
173
+ # @return [self]
174
+ def set_finish_metadata_proc(proc)
175
+ thread[FINISH_METADATA_PROC_KEY] = proc
176
+ self
177
+ end
178
+
179
+ FINISH_METADATA_PROC_KEY = :"llm.tracer.finish_metadata_proc"
180
+
181
+ ##
182
+ # Returns the current extra bag (metadata, inputs, outputs) for the current
183
+ # thread/trace. Used by subclasses; default returns empty hashes.
184
+ #
185
+ # @return [Hash] { metadata: {}, inputs: {}, outputs: {} }
186
+ def current_extra
187
+ {}
188
+ end
189
+
190
+ ##
191
+ # Returns and clears extra inputs for the next span. Called by the telemetry
192
+ # tracer when starting a span. Subclasses (e.g. Langsmith) override to
193
+ # return thread-local inputs; default returns {}.
194
+ #
195
+ # @return [Hash] Attribute key => value to set on the span at start
196
+ def consume_extra_inputs
197
+ {}
198
+ end
199
+
200
+ ##
201
+ # Returns and clears extra outputs for the current span. Called by the
202
+ # telemetry tracer when finishing a span. Subclasses override to return
203
+ # thread-local outputs; default returns {}.
204
+ #
205
+ # @return [Hash] Attribute key => value to set on the span at finish
206
+ def consume_extra_outputs
207
+ {}
208
+ end
209
+
210
+ ##
211
+ # Store per-request metadata (e.g. user_input) to be consumed by tracers
212
+ # when starting the next span. Used for plain-text input.value / output.value.
213
+ #
214
+ # @param [Hash] metadata e.g. { user_input: "the user question" }
215
+ # @return [nil]
216
+ def set_request_metadata(metadata)
217
+ return nil unless metadata && !metadata.empty?
218
+ key = thread_request_metadata_key
219
+ current = thread[key] || {}
220
+ thread[key] = current.merge(metadata.compact)
221
+ nil
222
+ end
223
+
224
+ ##
225
+ # Consume and clear per-request metadata. Called by the telemetry tracer at span start.
226
+ #
227
+ # @return [Hash]
228
+ def consume_request_metadata
229
+ key = thread_request_metadata_key
230
+ data = thread[key] || {}
231
+ thread[key] = nil
232
+ data
233
+ end
234
+
139
235
  private
140
236
 
237
+ def thread_request_metadata_key
238
+ @thread_request_metadata_key ||= :"llm.tracer.request_metadata.#{object_id}"
239
+ end
240
+
241
+ def thread
242
+ Thread.current
243
+ end
244
+
141
245
  ##
142
246
  # @return [String]
143
247
  def provider_name
data/lib/llm/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module LLM
4
- VERSION = "4.7.0"
4
+ VERSION = "4.8.0"
5
5
  end
data/lib/llm.rb CHANGED
@@ -10,6 +10,7 @@ module LLM
10
10
  require_relative "llm/prompt"
11
11
  require_relative "llm/schema"
12
12
  require_relative "llm/object"
13
+ require_relative "llm/model"
13
14
  require_relative "llm/version"
14
15
  require_relative "llm/utils"
15
16
  require_relative "llm/message"
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llm.rb
3
3
  version: !ruby/object:Gem::Version
4
- version: 4.7.0
4
+ version: 4.8.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Antar Azri
@@ -223,6 +223,7 @@ files:
223
223
  - lib/llm/json_adapter.rb
224
224
  - lib/llm/message.rb
225
225
  - lib/llm/mime.rb
226
+ - lib/llm/model.rb
226
227
  - lib/llm/multipart.rb
227
228
  - lib/llm/multipart/enumerator_io.rb
228
229
  - lib/llm/object.rb
@@ -240,6 +241,7 @@ files:
240
241
  - lib/llm/providers/anthropic/response_adapter/completion.rb
241
242
  - lib/llm/providers/anthropic/response_adapter/enumerable.rb
242
243
  - lib/llm/providers/anthropic/response_adapter/file.rb
244
+ - lib/llm/providers/anthropic/response_adapter/models.rb
243
245
  - lib/llm/providers/anthropic/response_adapter/web_search.rb
244
246
  - lib/llm/providers/anthropic/stream_parser.rb
245
247
  - lib/llm/providers/deepseek.rb
@@ -271,6 +273,7 @@ files:
271
273
  - lib/llm/providers/ollama/response_adapter.rb
272
274
  - lib/llm/providers/ollama/response_adapter/completion.rb
273
275
  - lib/llm/providers/ollama/response_adapter/embedding.rb
276
+ - lib/llm/providers/ollama/response_adapter/models.rb
274
277
  - lib/llm/providers/ollama/stream_parser.rb
275
278
  - lib/llm/providers/openai.rb
276
279
  - lib/llm/providers/openai/audio.rb
@@ -290,6 +293,7 @@ files:
290
293
  - lib/llm/providers/openai/response_adapter/enumerable.rb
291
294
  - lib/llm/providers/openai/response_adapter/file.rb
292
295
  - lib/llm/providers/openai/response_adapter/image.rb
296
+ - lib/llm/providers/openai/response_adapter/models.rb
293
297
  - lib/llm/providers/openai/response_adapter/moderations.rb
294
298
  - lib/llm/providers/openai/response_adapter/responds.rb
295
299
  - lib/llm/providers/openai/response_adapter/web_search.rb
@@ -304,6 +308,7 @@ files:
304
308
  - lib/llm/schema.rb
305
309
  - lib/llm/schema/array.rb
306
310
  - lib/llm/schema/boolean.rb
311
+ - lib/llm/schema/enum.rb
307
312
  - lib/llm/schema/integer.rb
308
313
  - lib/llm/schema/leaf.rb
309
314
  - lib/llm/schema/null.rb
@@ -317,6 +322,7 @@ files:
317
322
  - lib/llm/tool.rb
318
323
  - lib/llm/tool/param.rb
319
324
  - lib/llm/tracer.rb
325
+ - lib/llm/tracer/langsmith.rb
320
326
  - lib/llm/tracer/logger.rb
321
327
  - lib/llm/tracer/null.rb
322
328
  - lib/llm/tracer/telemetry.rb