llm.rb 4.0.0 → 4.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. checksums.yaml +4 -4
  2. data/LICENSE +2 -2
  3. data/README.md +226 -192
  4. data/lib/llm/agent.rb +226 -0
  5. data/lib/llm/bot.rb +57 -28
  6. data/lib/llm/error.rb +4 -0
  7. data/lib/llm/function/tracing.rb +19 -0
  8. data/lib/llm/function.rb +16 -3
  9. data/lib/llm/json_adapter.rb +1 -1
  10. data/lib/llm/message.rb +7 -0
  11. data/lib/llm/prompt.rb +85 -0
  12. data/lib/llm/provider.rb +74 -10
  13. data/lib/llm/providers/anthropic/error_handler.rb +27 -5
  14. data/lib/llm/providers/anthropic/files.rb +22 -16
  15. data/lib/llm/providers/anthropic/models.rb +4 -3
  16. data/lib/llm/providers/anthropic.rb +6 -5
  17. data/lib/llm/providers/deepseek.rb +3 -3
  18. data/lib/llm/providers/gemini/error_handler.rb +34 -12
  19. data/lib/llm/providers/gemini/files.rb +18 -13
  20. data/lib/llm/providers/gemini/images.rb +4 -3
  21. data/lib/llm/providers/gemini/models.rb +4 -3
  22. data/lib/llm/providers/gemini.rb +36 -13
  23. data/lib/llm/providers/llamacpp.rb +3 -3
  24. data/lib/llm/providers/ollama/error_handler.rb +28 -6
  25. data/lib/llm/providers/ollama/models.rb +4 -3
  26. data/lib/llm/providers/ollama.rb +9 -7
  27. data/lib/llm/providers/openai/audio.rb +10 -7
  28. data/lib/llm/providers/openai/error_handler.rb +41 -14
  29. data/lib/llm/providers/openai/files.rb +19 -14
  30. data/lib/llm/providers/openai/images.rb +10 -7
  31. data/lib/llm/providers/openai/models.rb +4 -3
  32. data/lib/llm/providers/openai/moderations.rb +4 -3
  33. data/lib/llm/providers/openai/responses.rb +10 -7
  34. data/lib/llm/providers/openai/vector_stores.rb +34 -23
  35. data/lib/llm/providers/openai.rb +9 -7
  36. data/lib/llm/providers/xai.rb +3 -3
  37. data/lib/llm/providers/zai.rb +2 -2
  38. data/lib/llm/schema/object.rb +2 -2
  39. data/lib/llm/schema.rb +16 -2
  40. data/lib/llm/server_tool.rb +3 -3
  41. data/lib/llm/session.rb +3 -0
  42. data/lib/llm/tracer/logger.rb +192 -0
  43. data/lib/llm/tracer/null.rb +49 -0
  44. data/lib/llm/tracer/telemetry.rb +255 -0
  45. data/lib/llm/tracer.rb +134 -0
  46. data/lib/llm/version.rb +1 -1
  47. data/lib/llm.rb +5 -3
  48. data/llm.gemspec +4 -1
  49. metadata +39 -3
  50. data/lib/llm/builder.rb +0 -61
data/lib/llm/schema.rb CHANGED
@@ -9,14 +9,26 @@
9
9
  # @see https://json-schema.org/ JSON Schema Specification
10
10
  # @see https://tour.json-schema.org/ JSON Schema Tour
11
11
  #
12
- # @example
12
+ # @example JavaScript-style
13
13
  # schema = LLM::Schema.new
14
14
  # schema.object({
15
15
  # name: schema.string.enum("John", "Jane").required,
16
16
  # age: schema.integer.required,
17
- # hobbies: schema.array(schema.string, schema.null).required,
17
+ # hobbies: schema.array(schema.string).required,
18
18
  # address: schema.object({street: schema.string}).required,
19
19
  # })
20
+ #
21
+ # @example Ruby-style
22
+ # class Address < LLM::Schema
23
+ # property :street, String, "Street address", required: true
24
+ # end
25
+ #
26
+ # class Person < LLM::Schema
27
+ # property :name, String, "Person's name", required: true
28
+ # property :age, Integer, "Person's age", required: true
29
+ # property :hobbies, Array[String], "Person's hobbies", required: true
30
+ # property :address, Address, "Person's address", required: true
31
+ # end
20
32
  class LLM::Schema
21
33
  require_relative "schema/version"
22
34
  require_relative "schema/leaf"
@@ -50,6 +62,8 @@ class LLM::Schema
50
62
  lock do
51
63
  if LLM::Schema::Leaf === type
52
64
  prop = type
65
+ elsif Class === type && type.respond_to?(:object)
66
+ prop = type.object
53
67
  else
54
68
  target = type.name.split("::").last.downcase
55
69
  prop = schema.public_send(target)
@@ -9,9 +9,9 @@
9
9
  # @example
10
10
  # #!/usr/bin/env ruby
11
11
  # llm = LLM.gemini ENV["KEY"]
12
- # bot = LLM::Bot.new(llm, tools: [LLM::ServerTool.new(:google_search)])
13
- # bot.chat("Summarize today's news", role: :user)
14
- # print bot.messages.find(&:assistant?).content, "\n"
12
+ # ses = LLM::Session.new(llm, tools: [LLM::ServerTool.new(:google_search)])
13
+ # ses.talk("Summarize today's news", role: :user)
14
+ # print ses.messages.find(&:assistant?).content, "\n"
15
15
  class LLM::ServerTool < Struct.new(:name, :options, :provider)
16
16
  ##
17
17
  # @return [String]
@@ -0,0 +1,3 @@
1
+ # frozen_string_literal: true
2
+
3
+ require_relative "bot"
@@ -0,0 +1,192 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM
4
+ ##
5
+ # The {LLM::Tracer::Logger LLM::Tracer::Logger} class provides a
6
+ # tracer that provides logging facilities through Ruby's
7
+ # standard library.
8
+ #
9
+ # @example
10
+ # llm = LLM.openai(key: ENV["KEY"])
11
+ # # Log to a file
12
+ # llm.tracer = LLM::Tracer::Logger.new(llm, path: "/tmp/log.txt")
13
+ # # Log to $stdout (default)
14
+ # llm.tracer = LLM::Tracer::Logger.new(llm, io: $stdout)
15
+ class Tracer::Logger < Tracer
16
+ ##
17
+ # @param (see LLM::Tracer#initialize)
18
+ def initialize(provider, options = {})
19
+ super
20
+ setup!(**options)
21
+ end
22
+
23
+ ##
24
+ # @param (see LLM::Tracer#on_request_start)
25
+ # @return [void]
26
+ def on_request_start(operation:, model: nil)
27
+ case operation
28
+ when "chat" then start_chat(operation:, model:)
29
+ when "retrieval" then start_retrieval(operation:)
30
+ else nil
31
+ end
32
+ end
33
+
34
+ ##
35
+ # @param (see LLM::Tracer#on_request_finish)
36
+ # @return [void]
37
+ def on_request_finish(operation:, res:, model: nil, **)
38
+ case operation
39
+ when "chat" then finish_chat(operation:, res:, model:)
40
+ when "retrieval" then finish_retrieval(operation:, res:)
41
+ else nil
42
+ end
43
+ end
44
+
45
+ ##
46
+ # @param (see LLM::Tracer#on_request_error)
47
+ # @return [void]
48
+ def on_request_error(ex:, **)
49
+ @logger.error(
50
+ tracer: "llm.rb (logger)",
51
+ event: "request.error",
52
+ provider: provider_name,
53
+ error_class: ex.class.to_s,
54
+ error_message: ex.message
55
+ )
56
+ end
57
+
58
+ ##
59
+ # @param (see LLM::Tracer#on_tool_start)
60
+ # @return [void]
61
+ def on_tool_start(id:, name:, arguments:, model:, **)
62
+ @logger.info(
63
+ tracer: "llm.rb (logger)",
64
+ event: "tool.start",
65
+ provider: provider_name,
66
+ operation: "execute_tool",
67
+ tool_id: id,
68
+ tool_name: name,
69
+ tool_arguments: arguments,
70
+ model:
71
+ )
72
+ end
73
+
74
+ ##
75
+ # @param (see LLM::Tracer#on_tool_finish)
76
+ # @return [void]
77
+ def on_tool_finish(result:, **)
78
+ @logger.info(
79
+ tracer: "llm.rb (logger)",
80
+ event: "tool.finish",
81
+ provider: provider_name,
82
+ operation: "execute_tool",
83
+ tool_id: result.id,
84
+ tool_name: result.name,
85
+ tool_result: result.value
86
+ )
87
+ end
88
+
89
+ ##
90
+ # @param (see LLM::Tracer#on_tool_error)
91
+ # @return [void]
92
+ def on_tool_error(ex:, **)
93
+ @logger.error(
94
+ tracer: "llm.rb (logger)",
95
+ event: "tool.error",
96
+ provider: provider_name,
97
+ operation: "execute_tool",
98
+ error_class: ex.class.to_s,
99
+ error_message: ex.message
100
+ )
101
+ end
102
+
103
+ private
104
+
105
+ ##
106
+ # @api private
107
+ def setup!(path: nil, io: $stdout)
108
+ require "logger" unless defined?(::Logger)
109
+ @logger = ::Logger.new(path || io)
110
+ end
111
+
112
+ ##
113
+ # @param [String] operation
114
+ # @param [LLM::Response] res
115
+ # @api private
116
+ def finish_attributes(operation, res)
117
+ case @provider.class.to_s
118
+ when "LLM::OpenAI" then openai_attributes(operation, res)
119
+ else {}
120
+ end
121
+ end
122
+
123
+ ##
124
+ # @param [String] operation
125
+ # @param [LLM::Response] res
126
+ # @api private
127
+ def openai_attributes(operation, res)
128
+ case operation
129
+ when "chat"
130
+ {
131
+ openai_service_tier: res.service_tier,
132
+ openai_system_fingerprint: res.system_fingerprint
133
+ }.compact
134
+ when "retrieval"
135
+ {
136
+ openai_vector_store_search_result_count: res.size,
137
+ openai_vector_store_search_has_more: res.has_more
138
+ }.compact
139
+ else {}
140
+ end
141
+ end
142
+
143
+ ##
144
+ # start_*
145
+
146
+ def start_chat(operation:, model:)
147
+ @logger.info(
148
+ tracer: "llm.rb (logger)",
149
+ event: "request.start",
150
+ provider: provider_name,
151
+ operation:,
152
+ model:
153
+ )
154
+ end
155
+
156
+ def start_retrieval(operation:)
157
+ @logger.info(
158
+ tracer: "llm.rb (logger)",
159
+ event: "request.start",
160
+ provider: provider_name,
161
+ operation:
162
+ )
163
+ end
164
+
165
+ ##
166
+ # finish_*
167
+
168
+ def finish_chat(operation:, model:, res:)
169
+ @logger.info(
170
+ tracer: "llm.rb (logger)",
171
+ event: "request.finish",
172
+ provider: provider_name,
173
+ operation:,
174
+ model:,
175
+ response_id: res.id,
176
+ input_tokens: res.usage.input_tokens,
177
+ output_tokens: res.usage.output_tokens,
178
+ **finish_attributes(operation, res)
179
+ )
180
+ end
181
+
182
+ def finish_retrieval(operation:, res:)
183
+ @logger.info(
184
+ tracer: "llm.rb (logger)",
185
+ event: "request.finish",
186
+ provider: provider_name,
187
+ operation:,
188
+ **finish_attributes(operation, res)
189
+ )
190
+ end
191
+ end
192
+ end
@@ -0,0 +1,49 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM
4
+ ##
5
+ # A no-op tracer that ignores all tracing callbacks.
6
+ class Tracer::Null < Tracer
7
+ ##
8
+ # @param (see LLM::Tracer#on_request_start)
9
+ # @return [nil]
10
+ def on_request_start(**)
11
+ nil
12
+ end
13
+
14
+ ##
15
+ # @param (see LLM::Tracer#on_request_finish)
16
+ # @return [nil]
17
+ def on_request_finish(**)
18
+ nil
19
+ end
20
+
21
+ ##
22
+ # @param (see LLM::Tracer#on_request_error)
23
+ # @return [nil]
24
+ def on_request_error(**)
25
+ nil
26
+ end
27
+
28
+ ##
29
+ # @param (see LLM::Tracer#on_tool_start)
30
+ # @return [nil]
31
+ def on_tool_start(**)
32
+ nil
33
+ end
34
+
35
+ ##
36
+ # @param (see LLM::Tracer#on_tool_finish)
37
+ # @return [nil]
38
+ def on_tool_finish(**)
39
+ nil
40
+ end
41
+
42
+ ##
43
+ # @param (see LLM::Tracer#on_tool_error)
44
+ # @return [nil]
45
+ def on_tool_error(**)
46
+ nil
47
+ end
48
+ end
49
+ end
@@ -0,0 +1,255 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM
4
+ ##
5
+ # The {LLM::Tracer::Telemetry LLM::Tracer::Telemetry} tracer provides
6
+ # telemetry support through the [opentelemetry-ruby](https://github.com/open-telemetry/opentelemetry-ruby)
7
+ # RubyGem. The gem should be installed separately since this feature is opt-in
8
+ # and disabled by default. This feature exists to support integration with tools
9
+ # like [LangSmith](https://www.langsmith.com).
10
+ #
11
+ # @see https://github.com/open-telemetry/semantic-conventions/blob/main/docs/gen-ai Telemetry specs (index)
12
+ # @see https://github.com/open-telemetry/semantic-conventions/blob/main/docs/gen-ai/openai.md Telemetry specs (OpenAI)
13
+ #
14
+ # @example InMemory export
15
+ # #!/usr/bin/env ruby
16
+ # require "llm"
17
+ # require "pp"
18
+ #
19
+ # llm = LLM.openai(key: ENV["KEY"])
20
+ # llm.tracer = LLM::Tracer::Telemetry.new(llm)
21
+ #
22
+ # ses = LLM::Session.new(llm)
23
+ # ses.talk "hello"
24
+ # ses.talk "how are you?"
25
+ # ses.tracer.spans.each { |span| pp span }
26
+ #
27
+ # @example OTLP export
28
+ # #!/usr/bin/env ruby
29
+ # require "llm"
30
+ # require "opentelemetry-exporter-otlp"
31
+ #
32
+ # endpoint = "https://api.smith.langchain.com/otel/v1/traces"
33
+ # exporter = OpenTelemetry::Exporter::OTLP::Exporter.new(endpoint:)
34
+ #
35
+ # llm = LLM.openai(key: ENV["KEY"])
36
+ # llm.tracer = LLM::Tracer::Telemetry.new(llm, exporter:)
37
+ #
38
+ # ses = LLM::Session.new(llm)
39
+ # ses.talk "hello"
40
+ # ses.talk "how are you?"
41
+ class Tracer::Telemetry < Tracer
42
+ ##
43
+ # param [LLM::Provider] provider
44
+ # An LLM provider
45
+ # @return [LLM::Tracer::Telemetry]
46
+ def initialize(provider, options = {})
47
+ super
48
+ @exporter = options.delete(:exporter)
49
+ setup!
50
+ end
51
+
52
+ ##
53
+ # @param (see LLM::Tracer#on_request_start)
54
+ def on_request_start(operation:, model: nil)
55
+ case operation
56
+ when "chat" then start_chat(operation:, model:)
57
+ when "retrieval" then start_retrieval(operation:)
58
+ else nil
59
+ end
60
+ end
61
+
62
+ ##
63
+ # @param (see LLM::Tracer#on_request_finish)
64
+ def on_request_finish(operation:, res:, model: nil, span: nil)
65
+ return nil unless span
66
+ case operation
67
+ when "chat" then finish_chat(operation:, model:, res:, span:)
68
+ when "retrieval" then finish_retrieval(operation:, res:, span:)
69
+ else nil
70
+ end
71
+ end
72
+
73
+ ##
74
+ # @param (see LLM::Tracer#on_request_error)
75
+ def on_request_error(ex:, span:)
76
+ return nil unless span
77
+ attributes = {"error.type" => ex.class.to_s}.compact
78
+ attributes.each { span.set_attribute(_1, _2) }
79
+ span.add_event("gen_ai.request.finish")
80
+ span.status = ::OpenTelemetry::Trace::Status.error(ex.message)
81
+ span.tap(&:finish)
82
+ end
83
+
84
+ ##
85
+ # @param (see LLM::Tracer#on_tool_start)
86
+ # @return (see LLM::Tracer#on_tool_start)
87
+ def on_tool_start(id:, name:, arguments:, model:)
88
+ attributes = {
89
+ "gen_ai.operation.name" => "execute_tool",
90
+ "gen_ai.request.model" => model,
91
+ "gen_ai.tool.call.id" => id,
92
+ "gen_ai.tool.name" => name,
93
+ "gen_ai.tool.call.arguments" => LLM.json.dump(arguments),
94
+ "gen_ai.provider.name" => provider_name,
95
+ "server.address" => provider_host,
96
+ "server.port" => provider_port
97
+ }.compact
98
+ span_name = ["execute_tool", name].compact.join(" ")
99
+ span = @tracer.start_span(span_name.empty? ? "gen_ai.tool" : span_name, kind: :client, attributes:)
100
+ span.add_event("gen_ai.tool.start")
101
+ span
102
+ end
103
+
104
+ ##
105
+ # @param (see LLM::Tracer#on_tool_finish)
106
+ # @return (see LLM::Tracer#on_tool_finish)
107
+ def on_tool_finish(result:, span:)
108
+ return nil unless span
109
+ attributes = {
110
+ "gen_ai.tool.call.id" => result.id,
111
+ "gen_ai.tool.name" => result.name,
112
+ "gen_ai.tool.call.result" => LLM.json.dump(result.value)
113
+ }.compact
114
+ attributes.each { span.set_attribute(_1, _2) }
115
+ span.add_event("gen_ai.tool.finish")
116
+ span.tap(&:finish)
117
+ end
118
+
119
+ ##
120
+ # @param (see LLM::Tracer#on_tool_error)
121
+ # @return (see LLM::Tracer#on_tool_error)
122
+ def on_tool_error(ex:, span:)
123
+ return nil unless span
124
+ attributes = {"error.type" => ex.class.to_s}.compact
125
+ attributes.each { span.set_attribute(_1, _2) }
126
+ span.add_event("gen_ai.tool.finish")
127
+ span.status = ::OpenTelemetry::Trace::Status.error(ex.message)
128
+ span.tap(&:finish)
129
+ end
130
+
131
+ ##
132
+ # @note
133
+ # This method returns an empty array for exporters that
134
+ # do not implement 'finished_spans' such as the OTLP
135
+ # exporter
136
+ # @return [Array<OpenTelemetry::SDK::Trace::SpanData>]
137
+ def spans
138
+ return [] unless @exporter.respond_to?(:finished_spans)
139
+ flush!
140
+ @exporter.finished_spans
141
+ end
142
+
143
+ ##
144
+ # Flushes queued telemetry to the configured exporter.
145
+ # @note
146
+ # Exports are batched in the background by default.
147
+ # Long-lived processes usually do not need to call this method.
148
+ # Short-lived scripts should call {#flush!} before exit to reduce
149
+ # the risk of losing spans that are still buffered.
150
+ # @return (see LLM::Tracer#flush!)
151
+ def flush!
152
+ @tracer_provider.force_flush
153
+ nil
154
+ end
155
+
156
+ private
157
+
158
+ ##
159
+ # @api private
160
+ def setup!
161
+ require "opentelemetry/sdk" unless defined?(OpenTelemetry)
162
+ @exporter ||= OpenTelemetry::SDK::Trace::Export::InMemorySpanExporter.new
163
+ processor = OpenTelemetry::SDK::Trace::Export::BatchSpanProcessor.new(@exporter)
164
+ @tracer_provider = OpenTelemetry::SDK::Trace::TracerProvider.new
165
+ @tracer_provider.add_span_processor(processor)
166
+ @tracer = @tracer_provider.tracer("llm.rb", LLM::VERSION)
167
+ end
168
+
169
+ ##
170
+ # @param [String] operation
171
+ # @param [LLM::Response] res
172
+ # @api private
173
+ def finish_attributes(operation, res)
174
+ case @provider.class.to_s
175
+ when "LLM::OpenAI" then openai_attributes(operation, res)
176
+ else {}
177
+ end
178
+ end
179
+
180
+ ##
181
+ # @param [String] operation
182
+ # @param [LLM::Response] res
183
+ # @api private
184
+ def openai_attributes(operation, res)
185
+ case operation
186
+ when "chat"
187
+ {
188
+ "openai.response.service_tier" => res.service_tier,
189
+ "openai.response.system_fingerprint" => res.system_fingerprint
190
+ }
191
+ when "retrieval"
192
+ {
193
+ "openai.vector_store.search.result_count" => res.size,
194
+ "openai.vector_store.search.has_more" => res.has_more
195
+ }
196
+ else {}
197
+ end
198
+ end
199
+
200
+ ##
201
+ # start_*
202
+
203
+ def start_chat(operation:, model:)
204
+ attributes = {
205
+ "gen_ai.operation.name" => operation,
206
+ "gen_ai.request.model" => model,
207
+ "gen_ai.provider.name" => provider_name,
208
+ "server.address" => provider_host,
209
+ "server.port" => provider_port
210
+ }.compact
211
+ span_name = [operation, model].compact.join(" ")
212
+ span = @tracer.start_span(span_name.empty? ? "gen_ai.request" : span_name, kind: :client, attributes:)
213
+ span.add_event("gen_ai.request.start")
214
+ span
215
+ end
216
+
217
+ def start_retrieval(operation:)
218
+ attributes = {
219
+ "gen_ai.operation.name" => operation,
220
+ "gen_ai.provider.name" => provider_name,
221
+ "server.address" => provider_host,
222
+ "server.port" => provider_port
223
+ }.compact
224
+ span = @tracer.start_span(operation, kind: :client, attributes:)
225
+ span.add_event("gen_ai.request.start")
226
+ span
227
+ end
228
+
229
+ ##
230
+ # finish_*
231
+
232
+ def finish_chat(operation:, model:, res:, span:)
233
+ attributes = {
234
+ "gen_ai.operation.name" => operation,
235
+ "gen_ai.request.model" => model,
236
+ "gen_ai.response.id" => res.id,
237
+ "gen_ai.response.model" => model,
238
+ "gen_ai.usage.input_tokens" => res.usage.input_tokens,
239
+ "gen_ai.usage.output_tokens" => res.usage.output_tokens
240
+ }.merge!(finish_attributes(operation, res)).compact
241
+ attributes.each { span.set_attribute(_1, _2) }
242
+ span.add_event("gen_ai.request.finish")
243
+ span.tap(&:finish)
244
+ end
245
+
246
+ def finish_retrieval(operation:, res:, span:)
247
+ attributes = {
248
+ "gen_ai.operation.name" => operation
249
+ }.merge!(finish_attributes(operation, res)).compact
250
+ attributes.each { span.set_attribute(_1, _2) }
251
+ span.add_event("gen_ai.request.finish")
252
+ span.tap(&:finish)
253
+ end
254
+ end
255
+ end
data/lib/llm/tracer.rb ADDED
@@ -0,0 +1,134 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM
4
+ ##
5
+ # The {LLM::Tracer LLM::Tracer} is the superclass of all
6
+ # LLM tracers. It can be helpful for implementing instrumentation
7
+ # and hooking into the lifecycle of an LLM request. See
8
+ # {LLM::Tracer::Telemetry LLM::Tracer::Telemetry}, and
9
+ # {LLM::Tracer::Logger LLM::Tracer::Logger} for example
10
+ # tracer implementations.
11
+ class Tracer
12
+ require_relative "tracer/logger"
13
+ require_relative "tracer/telemetry"
14
+ require_relative "tracer/null"
15
+
16
+ ##
17
+ # @param [LLM::Provider] provider
18
+ # A provider
19
+ # @param [Hash] options
20
+ # A hash of options
21
+ def initialize(provider, options = {})
22
+ @provider = provider
23
+ @options = {}
24
+ end
25
+
26
+ ##
27
+ # Called before an LLM provider request is executed.
28
+ # @param [String] operation
29
+ # @param [String] model
30
+ # @return [void]
31
+ def on_request_start(operation:, model: nil)
32
+ raise NotImplementedError, "#{self.class} does not implement '#{__method__}'"
33
+ end
34
+
35
+ ##
36
+ # Called after an LLM provider request succeeds.
37
+ # @param [String] operation
38
+ # @param [String] model
39
+ # @param [LLM::Response] res
40
+ # @param [Object, nil] span
41
+ # @return [void]
42
+ def on_request_finish(operation:, res:, model: nil, span: nil)
43
+ raise NotImplementedError, "#{self.class} does not implement '#{__method__}'"
44
+ end
45
+
46
+ ##
47
+ # Called when an LLM provider request fails.
48
+ # @param [LLM::Error] ex
49
+ # @param [Object, nil] span
50
+ # @return [void]
51
+ def on_request_error(ex:, span:)
52
+ raise NotImplementedError, "#{self.class} does not implement '#{__method__}'"
53
+ end
54
+
55
+ ##
56
+ # Called before a local tool/function executes.
57
+ # @param [String] id
58
+ # The tool call ID assigned by the model/provider
59
+ # @param [String] name
60
+ # The tool (function) name.
61
+ # @param [Hash] arguments
62
+ # The parsed tool arguments.
63
+ # @param [String] model
64
+ # The model name
65
+ # @return [void]
66
+ def on_tool_start(id:, name:, arguments:, model:)
67
+ raise NotImplementedError, "#{self.class} does not implement '#{__method__}'"
68
+ end
69
+
70
+ ##
71
+ # Called after a local tool/function succeeds.
72
+ # @param [LLM::Function::Return] result
73
+ # The tool return object.
74
+ # @param [Object, nil] span
75
+ # The span/context object returned by {#on_tool_start}.
76
+ # @return [void]
77
+ def on_tool_finish(result:, span:)
78
+ raise NotImplementedError, "#{self.class} does not implement '#{__method__}'"
79
+ end
80
+
81
+ ##
82
+ # Called when a local tool/function raises.
83
+ # @param [Exception] ex
84
+ # The raised error.
85
+ # @param [Object, nil] span
86
+ # The span/context object returned by {#on_tool_start}.
87
+ # @return [void]
88
+ def on_tool_error(ex:, span:)
89
+ raise NotImplementedError, "#{self.class} does not implement '#{__method__}'"
90
+ end
91
+
92
+ ##
93
+ # @return [String]
94
+ def inspect
95
+ "#<#{self.class.name}:0x#{object_id.to_s(16)} @provider=#{@provider.class} @tracer=#{@tracer.inspect}>"
96
+ end
97
+
98
+ ##
99
+ # @return [Array]
100
+ def spans
101
+ []
102
+ end
103
+
104
+ ##
105
+ # Flush the tracer
106
+ # @note
107
+ # This method is only implemented by the {LLM::Tracer::Telemetry} tracer.
108
+ # It is a noop for other tracers.
109
+ # @return [nil]
110
+ def flush!
111
+ nil
112
+ end
113
+
114
+ private
115
+
116
+ ##
117
+ # @return [String]
118
+ def provider_name
119
+ @provider.class.name.split("::").last.downcase
120
+ end
121
+
122
+ ##
123
+ # @return [String]
124
+ def provider_host
125
+ @provider.instance_variable_get(:@host)
126
+ end
127
+
128
+ ##
129
+ # @return [String]
130
+ def provider_port
131
+ @provider.instance_variable_get(:@port)
132
+ end
133
+ end
134
+ end
data/lib/llm/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module LLM
4
- VERSION = "4.0.0"
4
+ VERSION = "4.2.0"
5
5
  end