llm.rb 4.6.0 → 4.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +40 -37
- data/lib/llm/contract/completion.rb +14 -0
- data/lib/llm/eventstream/parser.rb +0 -5
- data/lib/llm/model.rb +115 -0
- data/lib/llm/provider.rb +50 -23
- data/lib/llm/providers/anthropic/error_handler.rb +1 -1
- data/lib/llm/providers/anthropic/models.rb +1 -1
- data/lib/llm/providers/anthropic/request_adapter.rb +20 -3
- data/lib/llm/providers/anthropic/response_adapter/completion.rb +12 -0
- data/lib/llm/providers/anthropic/response_adapter/models.rb +13 -0
- data/lib/llm/providers/anthropic/response_adapter.rb +2 -0
- data/lib/llm/providers/anthropic.rb +2 -1
- data/lib/llm/providers/gemini/error_handler.rb +18 -3
- data/lib/llm/providers/gemini/response_adapter/completion.rb +12 -0
- data/lib/llm/providers/gemini/response_adapter/models.rb +4 -6
- data/lib/llm/providers/ollama/error_handler.rb +1 -1
- data/lib/llm/providers/ollama/models.rb +1 -1
- data/lib/llm/providers/ollama/response_adapter/completion.rb +12 -0
- data/lib/llm/providers/ollama/response_adapter/models.rb +13 -0
- data/lib/llm/providers/ollama/response_adapter.rb +2 -0
- data/lib/llm/providers/openai/error_handler.rb +18 -3
- data/lib/llm/providers/openai/images.rb +17 -11
- data/lib/llm/providers/openai/models.rb +1 -1
- data/lib/llm/providers/openai/response_adapter/completion.rb +12 -0
- data/lib/llm/providers/openai/response_adapter/models.rb +13 -0
- data/lib/llm/providers/openai/response_adapter.rb +2 -0
- data/lib/llm/providers/openai/responses.rb +7 -0
- data/lib/llm/providers/openai.rb +9 -2
- data/lib/llm/providers/xai/images.rb +7 -6
- data/lib/llm/schema/enum.rb +16 -0
- data/lib/llm/schema.rb +1 -0
- data/lib/llm/tool/param.rb +1 -1
- data/lib/llm/tool.rb +1 -1
- data/lib/llm/tracer/langsmith.rb +144 -0
- data/lib/llm/tracer/logger.rb +8 -0
- data/lib/llm/tracer/null.rb +8 -0
- data/lib/llm/tracer/telemetry.rb +107 -38
- data/lib/llm/tracer.rb +108 -4
- data/lib/llm/version.rb +1 -1
- data/lib/llm.rb +1 -0
- metadata +7 -1
data/lib/llm/tracer/telemetry.rb
CHANGED
|
@@ -7,8 +7,7 @@ module LLM
|
|
|
7
7
|
# The {LLM::Tracer::Telemetry LLM::Tracer::Telemetry} tracer provides
|
|
8
8
|
# telemetry support through the [opentelemetry-ruby](https://github.com/open-telemetry/opentelemetry-ruby)
|
|
9
9
|
# RubyGem. The gem should be installed separately since this feature is opt-in
|
|
10
|
-
# and disabled by default.
|
|
11
|
-
# like [LangSmith](https://www.langsmith.com).
|
|
10
|
+
# and disabled by default.
|
|
12
11
|
#
|
|
13
12
|
# @see https://github.com/open-telemetry/semantic-conventions/blob/main/docs/gen-ai Telemetry specs (index)
|
|
14
13
|
# @see https://github.com/open-telemetry/semantic-conventions/blob/main/docs/gen-ai/openai.md Telemetry specs (OpenAI)
|
|
@@ -58,7 +57,7 @@ module LLM
|
|
|
58
57
|
#
|
|
59
58
|
# @param (see LLM::Tracer#start_trace)
|
|
60
59
|
# @return [self]
|
|
61
|
-
def start_trace(trace_group_id: nil, name: "llm", attributes: {})
|
|
60
|
+
def start_trace(trace_group_id: nil, name: "llm", attributes: {}, metadata: nil)
|
|
62
61
|
return self if trace_group_id.to_s.empty?
|
|
63
62
|
|
|
64
63
|
span_context = span_context_from_trace_group_id(trace_group_id.to_s)
|
|
@@ -73,25 +72,25 @@ module LLM
|
|
|
73
72
|
attributes: attrs,
|
|
74
73
|
with_parent: parent_ctx
|
|
75
74
|
)
|
|
76
|
-
|
|
77
|
-
|
|
75
|
+
@root_span = root_span
|
|
76
|
+
@root_context = ::OpenTelemetry::Trace.context_with_span(root_span)
|
|
78
77
|
self
|
|
79
78
|
end
|
|
80
79
|
|
|
81
80
|
##
|
|
82
81
|
# @return [self]
|
|
83
82
|
def stop_trace
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
83
|
+
@root_span&.finish
|
|
84
|
+
@root_span = nil
|
|
85
|
+
@root_context = nil
|
|
87
86
|
self
|
|
88
87
|
end
|
|
89
88
|
|
|
90
89
|
##
|
|
91
90
|
# @param (see LLM::Tracer#on_request_start)
|
|
92
|
-
def on_request_start(operation:, model: nil)
|
|
91
|
+
def on_request_start(operation:, model: nil, inputs: nil)
|
|
93
92
|
case operation
|
|
94
|
-
when "chat" then start_chat(operation:, model:)
|
|
93
|
+
when "chat" then start_chat(operation:, model:, inputs:)
|
|
95
94
|
when "retrieval" then start_retrieval(operation:)
|
|
96
95
|
else nil
|
|
97
96
|
end
|
|
@@ -99,10 +98,10 @@ module LLM
|
|
|
99
98
|
|
|
100
99
|
##
|
|
101
100
|
# @param (see LLM::Tracer#on_request_finish)
|
|
102
|
-
def on_request_finish(operation:, res:, model: nil, span: nil)
|
|
101
|
+
def on_request_finish(operation:, res:, model: nil, span: nil, outputs: nil, metadata: nil)
|
|
103
102
|
return nil unless span
|
|
104
103
|
case operation
|
|
105
|
-
when "chat" then finish_chat(operation:, model:, res:, span:)
|
|
104
|
+
when "chat" then finish_chat(operation:, model:, res:, span:, outputs:, metadata:)
|
|
106
105
|
when "retrieval" then finish_retrieval(operation:, res:, span:)
|
|
107
106
|
else nil
|
|
108
107
|
end
|
|
@@ -132,7 +131,7 @@ module LLM
|
|
|
132
131
|
"gen_ai.provider.name" => provider_name,
|
|
133
132
|
"server.address" => provider_host,
|
|
134
133
|
"server.port" => provider_port
|
|
135
|
-
}.compact
|
|
134
|
+
}.merge!(trace_attributes(span_kind: "tool")).compact
|
|
136
135
|
span_name = ["execute_tool", name].compact.join(" ")
|
|
137
136
|
span = create_span(span_name.empty? ? "gen_ai.tool" : span_name, attributes:)
|
|
138
137
|
span.add_event("gen_ai.tool.start")
|
|
@@ -196,30 +195,12 @@ module LLM
|
|
|
196
195
|
##
|
|
197
196
|
# @api private
|
|
198
197
|
def create_span(name, kind: :client, attributes: {})
|
|
199
|
-
root_context =
|
|
198
|
+
root_context = @root_context
|
|
200
199
|
opts = {kind:, attributes:}
|
|
201
200
|
opts[:with_parent] = root_context if root_context
|
|
202
201
|
@tracer.start_span(name, **opts)
|
|
203
202
|
end
|
|
204
203
|
|
|
205
|
-
##
|
|
206
|
-
# @api private
|
|
207
|
-
def thread_root_span_key
|
|
208
|
-
@thread_root_span_key ||= :"llm.telemetry.root_span.#{object_id}"
|
|
209
|
-
end
|
|
210
|
-
|
|
211
|
-
##
|
|
212
|
-
# @api private
|
|
213
|
-
def thread_root_context_key
|
|
214
|
-
@thread_root_context_key ||= :"llm.telemetry.root_context.#{object_id}"
|
|
215
|
-
end
|
|
216
|
-
|
|
217
|
-
##
|
|
218
|
-
# @api private
|
|
219
|
-
def thread
|
|
220
|
-
Thread.current
|
|
221
|
-
end
|
|
222
|
-
|
|
223
204
|
##
|
|
224
205
|
# Converts a string trace_group_id to an OpenTelemetry SpanContext so all
|
|
225
206
|
# spans created with this context share the same trace_id.
|
|
@@ -281,16 +262,20 @@ module LLM
|
|
|
281
262
|
##
|
|
282
263
|
# start_*
|
|
283
264
|
|
|
284
|
-
def start_chat(operation:, model:)
|
|
265
|
+
def start_chat(operation:, model:, inputs: nil)
|
|
266
|
+
request_metadata = consume_request_metadata
|
|
267
|
+
input_value = request_metadata[:user_input]
|
|
285
268
|
attributes = {
|
|
286
269
|
"gen_ai.operation.name" => operation,
|
|
287
270
|
"gen_ai.request.model" => model,
|
|
288
271
|
"gen_ai.provider.name" => provider_name,
|
|
289
272
|
"server.address" => provider_host,
|
|
290
|
-
"server.port" => provider_port
|
|
291
|
-
|
|
273
|
+
"server.port" => provider_port,
|
|
274
|
+
"input.value" => serialize_request_value(input_value)
|
|
275
|
+
}.merge!(trace_attributes(span_kind: "llm")).compact
|
|
292
276
|
span_name = [operation, model].compact.join(" ")
|
|
293
277
|
span = create_span(span_name.empty? ? "gen_ai.request" : span_name, attributes:)
|
|
278
|
+
set_span_attributes(span, consume_extra_inputs.merge(inputs || {}))
|
|
294
279
|
span.add_event("gen_ai.request.start")
|
|
295
280
|
span
|
|
296
281
|
end
|
|
@@ -301,7 +286,7 @@ module LLM
|
|
|
301
286
|
"gen_ai.provider.name" => provider_name,
|
|
302
287
|
"server.address" => provider_host,
|
|
303
288
|
"server.port" => provider_port
|
|
304
|
-
}.compact
|
|
289
|
+
}.merge!(trace_attributes(span_kind: "retriever")).compact
|
|
305
290
|
span = create_span(operation, attributes:)
|
|
306
291
|
span.add_event("gen_ai.request.start")
|
|
307
292
|
span
|
|
@@ -310,16 +295,26 @@ module LLM
|
|
|
310
295
|
##
|
|
311
296
|
# finish_*
|
|
312
297
|
|
|
313
|
-
def finish_chat(operation:, model:, res:, span:)
|
|
298
|
+
def finish_chat(operation:, model:, res:, span:, outputs: nil, metadata: nil)
|
|
299
|
+
output_value = if res.respond_to?(:output_text)
|
|
300
|
+
res.output_text
|
|
301
|
+
else
|
|
302
|
+
(res.respond_to?(:content) ? res.content : nil)
|
|
303
|
+
end
|
|
314
304
|
attributes = {
|
|
315
305
|
"gen_ai.operation.name" => operation,
|
|
316
306
|
"gen_ai.request.model" => model,
|
|
317
307
|
"gen_ai.response.id" => res.id,
|
|
318
308
|
"gen_ai.response.model" => model,
|
|
319
309
|
"gen_ai.usage.input_tokens" => res.usage.input_tokens,
|
|
320
|
-
"gen_ai.usage.output_tokens" => res.usage.output_tokens
|
|
310
|
+
"gen_ai.usage.output_tokens" => res.usage.output_tokens,
|
|
311
|
+
"output.value" => serialize_request_value(output_value)
|
|
321
312
|
}.merge!(finish_attributes(operation, res)).compact
|
|
322
313
|
attributes.each { span.set_attribute(_1, _2) }
|
|
314
|
+
set_span_attributes(span, consume_extra_outputs.merge(outputs || {}))
|
|
315
|
+
finish_metadata = consume_finish_metadata_proc(res)
|
|
316
|
+
metadata = (metadata || {}).merge(finish_metadata || {})
|
|
317
|
+
set_span_attributes(span, metadata.transform_keys { "langsmith.metadata.#{_1}" })
|
|
323
318
|
span.add_event("gen_ai.request.finish")
|
|
324
319
|
span.tap(&:finish)
|
|
325
320
|
end
|
|
@@ -328,9 +323,83 @@ module LLM
|
|
|
328
323
|
attributes = {
|
|
329
324
|
"gen_ai.operation.name" => operation
|
|
330
325
|
}.merge!(finish_attributes(operation, res)).compact
|
|
326
|
+
chunks_json = retrieval_chunks_json(res)
|
|
327
|
+
attributes["langsmith.metadata.chunks"] = chunks_json if chunks_json
|
|
331
328
|
attributes.each { span.set_attribute(_1, _2) }
|
|
332
329
|
span.add_event("gen_ai.request.finish")
|
|
333
330
|
span.tap(&:finish)
|
|
334
331
|
end
|
|
332
|
+
|
|
333
|
+
##
|
|
334
|
+
# @api private
|
|
335
|
+
# Serialize retrieval response chunks for span attributes (e.g. langsmith.metadata.chunks).
|
|
336
|
+
# Returns a JSON string or nil when res has no data.
|
|
337
|
+
def consume_finish_metadata_proc(res)
|
|
338
|
+
key = LLM::Tracer::FINISH_METADATA_PROC_KEY
|
|
339
|
+
proc = Thread.current[key]
|
|
340
|
+
Thread.current[key] = nil
|
|
341
|
+
return {} unless proc.respond_to?(:call)
|
|
342
|
+
|
|
343
|
+
proc.call(res) || {}
|
|
344
|
+
rescue
|
|
345
|
+
{}
|
|
346
|
+
end
|
|
347
|
+
|
|
348
|
+
def retrieval_chunks_json(res)
|
|
349
|
+
return nil unless res.respond_to?(:data)
|
|
350
|
+
|
|
351
|
+
data = res.data
|
|
352
|
+
return nil unless data.is_a?(Array)
|
|
353
|
+
|
|
354
|
+
payload = data.map { |c| c.respond_to?(:to_h) ? c.to_h : c }
|
|
355
|
+
LLM.json.dump(payload)
|
|
356
|
+
rescue
|
|
357
|
+
nil
|
|
358
|
+
end
|
|
359
|
+
|
|
360
|
+
##
|
|
361
|
+
# @api private
|
|
362
|
+
# Hook for tracer-specific span attributes.
|
|
363
|
+
# Subclasses can override this to inject provider-agnostic tags.
|
|
364
|
+
def trace_attributes(span_kind:)
|
|
365
|
+
{}
|
|
366
|
+
end
|
|
367
|
+
|
|
368
|
+
##
|
|
369
|
+
# @api private
|
|
370
|
+
# Sets attribute key-value pairs on the span, serializing non-primitive values to JSON.
|
|
371
|
+
def set_span_attributes(span, attrs)
|
|
372
|
+
return if attrs.nil? || attrs.empty?
|
|
373
|
+
|
|
374
|
+
attrs.each do |key, value|
|
|
375
|
+
span.set_attribute(key.to_s, serialize_span_value(value))
|
|
376
|
+
end
|
|
377
|
+
end
|
|
378
|
+
|
|
379
|
+
##
|
|
380
|
+
# @api private
|
|
381
|
+
# OpenTelemetry attributes accept String, Numeric, Boolean, or Array of those.
|
|
382
|
+
# Complex values (hashes, arrays of objects) are serialized to JSON strings.
|
|
383
|
+
def serialize_span_value(value)
|
|
384
|
+
case value
|
|
385
|
+
when String, Numeric, TrueClass, FalseClass
|
|
386
|
+
value
|
|
387
|
+
when Array
|
|
388
|
+
value.all? { |v| v.is_a?(String) || v.is_a?(Numeric) || v == true || v == false } ? value : LLM.json.dump(value)
|
|
389
|
+
else
|
|
390
|
+
LLM.json.dump(value)
|
|
391
|
+
end
|
|
392
|
+
end
|
|
393
|
+
|
|
394
|
+
def serialize_request_value(value)
|
|
395
|
+
case value
|
|
396
|
+
when nil
|
|
397
|
+
nil
|
|
398
|
+
when String
|
|
399
|
+
value
|
|
400
|
+
else
|
|
401
|
+
LLM.json.dump(value)
|
|
402
|
+
end
|
|
403
|
+
end
|
|
335
404
|
end
|
|
336
405
|
end
|
data/lib/llm/tracer.rb
CHANGED
|
@@ -11,6 +11,7 @@ module LLM
|
|
|
11
11
|
class Tracer
|
|
12
12
|
require_relative "tracer/logger"
|
|
13
13
|
require_relative "tracer/telemetry"
|
|
14
|
+
require_relative "tracer/langsmith"
|
|
14
15
|
require_relative "tracer/null"
|
|
15
16
|
|
|
16
17
|
##
|
|
@@ -27,19 +28,22 @@ module LLM
|
|
|
27
28
|
# Called before an LLM provider request is executed.
|
|
28
29
|
# @param [String] operation
|
|
29
30
|
# @param [String] model
|
|
31
|
+
# @param [Hash, nil] inputs Optional span attributes (e.g. gen_ai.input.messages) from llm.rb or caller.
|
|
30
32
|
# @return [void]
|
|
31
|
-
def on_request_start(operation:, model: nil)
|
|
33
|
+
def on_request_start(operation:, model: nil, inputs: nil)
|
|
32
34
|
raise NotImplementedError, "#{self.class} does not implement '#{__method__}'"
|
|
33
35
|
end
|
|
34
36
|
|
|
35
37
|
##
|
|
36
38
|
# Called after an LLM provider request succeeds.
|
|
37
39
|
# @param [String] operation
|
|
38
|
-
# @param [String] model
|
|
39
40
|
# @param [LLM::Response] res
|
|
40
41
|
# @param [Object, nil] span
|
|
42
|
+
# @param [String] model
|
|
43
|
+
# @param [Hash, nil] outputs Optional span attributes (e.g. gen_ai.output.messages) from llm.rb or caller.
|
|
44
|
+
# @param [Hash, nil] metadata Optional metadata (emitted as langsmith.metadata.*) from llm.rb or caller.
|
|
41
45
|
# @return [void]
|
|
42
|
-
def on_request_finish(operation:, res:, model: nil, span: nil)
|
|
46
|
+
def on_request_finish(operation:, res:, model: nil, span: nil, outputs: nil, metadata: nil)
|
|
43
47
|
raise NotImplementedError, "#{self.class} does not implement '#{__method__}'"
|
|
44
48
|
end
|
|
45
49
|
|
|
@@ -101,8 +105,11 @@ module LLM
|
|
|
101
105
|
# Name for the root span (e.g. "chatbot.turn").
|
|
102
106
|
# @param [Hash] attributes
|
|
103
107
|
# OpenTelemetry attributes to set on the root span.
|
|
108
|
+
# @param [Hash, nil] metadata
|
|
109
|
+
# Optional. Trace-level metadata merged into the trace (e.g. langsmith.metadata.*).
|
|
110
|
+
# Only used by tracers that support it (e.g. {LLM::Tracer::Langsmith}).
|
|
104
111
|
# @return [self]
|
|
105
|
-
def start_trace(trace_group_id: nil, name: "llm", attributes: {})
|
|
112
|
+
def start_trace(trace_group_id: nil, name: "llm", attributes: {}, metadata: nil)
|
|
106
113
|
self
|
|
107
114
|
end
|
|
108
115
|
|
|
@@ -136,8 +143,105 @@ module LLM
|
|
|
136
143
|
nil
|
|
137
144
|
end
|
|
138
145
|
|
|
146
|
+
##
|
|
147
|
+
# Merges extra attributes for the current trace/span. Used by applications
|
|
148
|
+
# (e.g. chatbot) to add metadata, span inputs, or span outputs to the next
|
|
149
|
+
# span or to the trace. No-op by default; {LLM::Tracer::Langsmith} merges
|
|
150
|
+
# into thread-local storage and emits them as langsmith/GenAI attributes.
|
|
151
|
+
#
|
|
152
|
+
# @param [Hash, nil] metadata
|
|
153
|
+
# Key-value pairs merged into trace/span metadata (e.g. langsmith.metadata.*).
|
|
154
|
+
# @param [Hash, nil] inputs
|
|
155
|
+
# Key-value pairs set on the next span at start (e.g. gen_ai.input.messages).
|
|
156
|
+
# Consumed when the span is created.
|
|
157
|
+
# @param [Hash, nil] outputs
|
|
158
|
+
# Key-value pairs set on the current span at finish (e.g. gen_ai.output.messages).
|
|
159
|
+
# Must be set before the request finishes (e.g. in a block passed to the provider).
|
|
160
|
+
# @return [self]
|
|
161
|
+
def merge_extra(metadata: nil, inputs: nil, outputs: nil)
|
|
162
|
+
self
|
|
163
|
+
end
|
|
164
|
+
|
|
165
|
+
##
|
|
166
|
+
# Optional: set a proc to supply metadata when the next chat span finishes.
|
|
167
|
+
# The proc is called with the response (res) and should return a Hash of
|
|
168
|
+
# metadata (e.g. { intent: "...", confidence: 1.0 }) to merge onto the span
|
|
169
|
+
# as langsmith.metadata.*. Cleared after use. Used by apps to attach
|
|
170
|
+
# routing/intent that is only known after the response.
|
|
171
|
+
#
|
|
172
|
+
# @param [Proc, nil] proc (res) -> Hash or nil
|
|
173
|
+
# @return [self]
|
|
174
|
+
def set_finish_metadata_proc(proc)
|
|
175
|
+
thread[FINISH_METADATA_PROC_KEY] = proc
|
|
176
|
+
self
|
|
177
|
+
end
|
|
178
|
+
|
|
179
|
+
FINISH_METADATA_PROC_KEY = :"llm.tracer.finish_metadata_proc"
|
|
180
|
+
|
|
181
|
+
##
|
|
182
|
+
# Returns the current extra bag (metadata, inputs, outputs) for the current
|
|
183
|
+
# thread/trace. Used by subclasses; default returns empty hashes.
|
|
184
|
+
#
|
|
185
|
+
# @return [Hash] { metadata: {}, inputs: {}, outputs: {} }
|
|
186
|
+
def current_extra
|
|
187
|
+
{}
|
|
188
|
+
end
|
|
189
|
+
|
|
190
|
+
##
|
|
191
|
+
# Returns and clears extra inputs for the next span. Called by the telemetry
|
|
192
|
+
# tracer when starting a span. Subclasses (e.g. Langsmith) override to
|
|
193
|
+
# return thread-local inputs; default returns {}.
|
|
194
|
+
#
|
|
195
|
+
# @return [Hash] Attribute key => value to set on the span at start
|
|
196
|
+
def consume_extra_inputs
|
|
197
|
+
{}
|
|
198
|
+
end
|
|
199
|
+
|
|
200
|
+
##
|
|
201
|
+
# Returns and clears extra outputs for the current span. Called by the
|
|
202
|
+
# telemetry tracer when finishing a span. Subclasses override to return
|
|
203
|
+
# thread-local outputs; default returns {}.
|
|
204
|
+
#
|
|
205
|
+
# @return [Hash] Attribute key => value to set on the span at finish
|
|
206
|
+
def consume_extra_outputs
|
|
207
|
+
{}
|
|
208
|
+
end
|
|
209
|
+
|
|
210
|
+
##
|
|
211
|
+
# Store per-request metadata (e.g. user_input) to be consumed by tracers
|
|
212
|
+
# when starting the next span. Used for plain-text input.value / output.value.
|
|
213
|
+
#
|
|
214
|
+
# @param [Hash] metadata e.g. { user_input: "the user question" }
|
|
215
|
+
# @return [nil]
|
|
216
|
+
def set_request_metadata(metadata)
|
|
217
|
+
return nil unless metadata && !metadata.empty?
|
|
218
|
+
key = thread_request_metadata_key
|
|
219
|
+
current = thread[key] || {}
|
|
220
|
+
thread[key] = current.merge(metadata.compact)
|
|
221
|
+
nil
|
|
222
|
+
end
|
|
223
|
+
|
|
224
|
+
##
|
|
225
|
+
# Consume and clear per-request metadata. Called by the telemetry tracer at span start.
|
|
226
|
+
#
|
|
227
|
+
# @return [Hash]
|
|
228
|
+
def consume_request_metadata
|
|
229
|
+
key = thread_request_metadata_key
|
|
230
|
+
data = thread[key] || {}
|
|
231
|
+
thread[key] = nil
|
|
232
|
+
data
|
|
233
|
+
end
|
|
234
|
+
|
|
139
235
|
private
|
|
140
236
|
|
|
237
|
+
def thread_request_metadata_key
|
|
238
|
+
@thread_request_metadata_key ||= :"llm.tracer.request_metadata.#{object_id}"
|
|
239
|
+
end
|
|
240
|
+
|
|
241
|
+
def thread
|
|
242
|
+
Thread.current
|
|
243
|
+
end
|
|
244
|
+
|
|
141
245
|
##
|
|
142
246
|
# @return [String]
|
|
143
247
|
def provider_name
|
data/lib/llm/version.rb
CHANGED
data/lib/llm.rb
CHANGED
metadata
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: llm.rb
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version: 4.
|
|
4
|
+
version: 4.8.0
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- Antar Azri
|
|
@@ -223,6 +223,7 @@ files:
|
|
|
223
223
|
- lib/llm/json_adapter.rb
|
|
224
224
|
- lib/llm/message.rb
|
|
225
225
|
- lib/llm/mime.rb
|
|
226
|
+
- lib/llm/model.rb
|
|
226
227
|
- lib/llm/multipart.rb
|
|
227
228
|
- lib/llm/multipart/enumerator_io.rb
|
|
228
229
|
- lib/llm/object.rb
|
|
@@ -240,6 +241,7 @@ files:
|
|
|
240
241
|
- lib/llm/providers/anthropic/response_adapter/completion.rb
|
|
241
242
|
- lib/llm/providers/anthropic/response_adapter/enumerable.rb
|
|
242
243
|
- lib/llm/providers/anthropic/response_adapter/file.rb
|
|
244
|
+
- lib/llm/providers/anthropic/response_adapter/models.rb
|
|
243
245
|
- lib/llm/providers/anthropic/response_adapter/web_search.rb
|
|
244
246
|
- lib/llm/providers/anthropic/stream_parser.rb
|
|
245
247
|
- lib/llm/providers/deepseek.rb
|
|
@@ -271,6 +273,7 @@ files:
|
|
|
271
273
|
- lib/llm/providers/ollama/response_adapter.rb
|
|
272
274
|
- lib/llm/providers/ollama/response_adapter/completion.rb
|
|
273
275
|
- lib/llm/providers/ollama/response_adapter/embedding.rb
|
|
276
|
+
- lib/llm/providers/ollama/response_adapter/models.rb
|
|
274
277
|
- lib/llm/providers/ollama/stream_parser.rb
|
|
275
278
|
- lib/llm/providers/openai.rb
|
|
276
279
|
- lib/llm/providers/openai/audio.rb
|
|
@@ -290,6 +293,7 @@ files:
|
|
|
290
293
|
- lib/llm/providers/openai/response_adapter/enumerable.rb
|
|
291
294
|
- lib/llm/providers/openai/response_adapter/file.rb
|
|
292
295
|
- lib/llm/providers/openai/response_adapter/image.rb
|
|
296
|
+
- lib/llm/providers/openai/response_adapter/models.rb
|
|
293
297
|
- lib/llm/providers/openai/response_adapter/moderations.rb
|
|
294
298
|
- lib/llm/providers/openai/response_adapter/responds.rb
|
|
295
299
|
- lib/llm/providers/openai/response_adapter/web_search.rb
|
|
@@ -304,6 +308,7 @@ files:
|
|
|
304
308
|
- lib/llm/schema.rb
|
|
305
309
|
- lib/llm/schema/array.rb
|
|
306
310
|
- lib/llm/schema/boolean.rb
|
|
311
|
+
- lib/llm/schema/enum.rb
|
|
307
312
|
- lib/llm/schema/integer.rb
|
|
308
313
|
- lib/llm/schema/leaf.rb
|
|
309
314
|
- lib/llm/schema/null.rb
|
|
@@ -317,6 +322,7 @@ files:
|
|
|
317
322
|
- lib/llm/tool.rb
|
|
318
323
|
- lib/llm/tool/param.rb
|
|
319
324
|
- lib/llm/tracer.rb
|
|
325
|
+
- lib/llm/tracer/langsmith.rb
|
|
320
326
|
- lib/llm/tracer/logger.rb
|
|
321
327
|
- lib/llm/tracer/null.rb
|
|
322
328
|
- lib/llm/tracer/telemetry.rb
|