llm.rb 4.8.0 → 4.10.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +356 -583
  3. data/data/anthropic.json +770 -0
  4. data/data/deepseek.json +75 -0
  5. data/data/google.json +1050 -0
  6. data/data/openai.json +1421 -0
  7. data/data/xai.json +792 -0
  8. data/data/zai.json +330 -0
  9. data/lib/llm/agent.rb +42 -41
  10. data/lib/llm/bot.rb +1 -263
  11. data/lib/llm/buffer.rb +7 -0
  12. data/lib/llm/{session → context}/deserializer.rb +4 -3
  13. data/lib/llm/context.rb +292 -0
  14. data/lib/llm/cost.rb +26 -0
  15. data/lib/llm/error.rb +8 -0
  16. data/lib/llm/function/array.rb +61 -0
  17. data/lib/llm/function/fiber_group.rb +91 -0
  18. data/lib/llm/function/task_group.rb +89 -0
  19. data/lib/llm/function/thread_group.rb +94 -0
  20. data/lib/llm/function.rb +75 -10
  21. data/lib/llm/mcp/command.rb +108 -0
  22. data/lib/llm/mcp/error.rb +31 -0
  23. data/lib/llm/mcp/pipe.rb +82 -0
  24. data/lib/llm/mcp/rpc.rb +118 -0
  25. data/lib/llm/mcp/transport/http/event_handler.rb +66 -0
  26. data/lib/llm/mcp/transport/http.rb +122 -0
  27. data/lib/llm/mcp/transport/stdio.rb +85 -0
  28. data/lib/llm/mcp.rb +116 -0
  29. data/lib/llm/message.rb +13 -11
  30. data/lib/llm/model.rb +2 -2
  31. data/lib/llm/prompt.rb +17 -7
  32. data/lib/llm/provider.rb +32 -17
  33. data/lib/llm/providers/anthropic/files.rb +3 -3
  34. data/lib/llm/providers/anthropic.rb +19 -4
  35. data/lib/llm/providers/deepseek.rb +10 -3
  36. data/lib/llm/providers/{gemini → google}/audio.rb +6 -6
  37. data/lib/llm/providers/{gemini → google}/error_handler.rb +2 -2
  38. data/lib/llm/providers/{gemini → google}/files.rb +11 -11
  39. data/lib/llm/providers/{gemini → google}/images.rb +7 -7
  40. data/lib/llm/providers/{gemini → google}/models.rb +5 -5
  41. data/lib/llm/providers/{gemini → google}/request_adapter/completion.rb +7 -3
  42. data/lib/llm/providers/{gemini → google}/request_adapter.rb +1 -1
  43. data/lib/llm/providers/{gemini → google}/response_adapter/completion.rb +7 -7
  44. data/lib/llm/providers/{gemini → google}/response_adapter/embedding.rb +1 -1
  45. data/lib/llm/providers/{gemini → google}/response_adapter/file.rb +1 -1
  46. data/lib/llm/providers/{gemini → google}/response_adapter/files.rb +1 -1
  47. data/lib/llm/providers/{gemini → google}/response_adapter/image.rb +1 -1
  48. data/lib/llm/providers/{gemini → google}/response_adapter/models.rb +1 -1
  49. data/lib/llm/providers/{gemini → google}/response_adapter/web_search.rb +2 -2
  50. data/lib/llm/providers/{gemini → google}/response_adapter.rb +8 -8
  51. data/lib/llm/providers/{gemini → google}/stream_parser.rb +3 -3
  52. data/lib/llm/providers/{gemini.rb → google.rb} +41 -26
  53. data/lib/llm/providers/llamacpp.rb +10 -3
  54. data/lib/llm/providers/ollama.rb +19 -4
  55. data/lib/llm/providers/openai/files.rb +3 -3
  56. data/lib/llm/providers/openai/response_adapter/completion.rb +9 -1
  57. data/lib/llm/providers/openai/response_adapter/responds.rb +9 -1
  58. data/lib/llm/providers/openai/responses.rb +9 -1
  59. data/lib/llm/providers/openai/stream_parser.rb +2 -0
  60. data/lib/llm/providers/openai.rb +19 -4
  61. data/lib/llm/providers/xai.rb +10 -3
  62. data/lib/llm/providers/zai.rb +9 -2
  63. data/lib/llm/registry.rb +81 -0
  64. data/lib/llm/schema/all_of.rb +31 -0
  65. data/lib/llm/schema/any_of.rb +31 -0
  66. data/lib/llm/schema/one_of.rb +31 -0
  67. data/lib/llm/schema/parser.rb +145 -0
  68. data/lib/llm/schema.rb +49 -8
  69. data/lib/llm/server_tool.rb +5 -5
  70. data/lib/llm/session.rb +10 -1
  71. data/lib/llm/tool.rb +88 -6
  72. data/lib/llm/tracer/logger.rb +1 -1
  73. data/lib/llm/tracer/telemetry.rb +7 -7
  74. data/lib/llm/tracer.rb +3 -3
  75. data/lib/llm/usage.rb +5 -0
  76. data/lib/llm/version.rb +1 -1
  77. data/lib/llm.rb +39 -6
  78. data/llm.gemspec +45 -8
  79. metadata +86 -28
data/lib/llm/bot.rb CHANGED
@@ -1,265 +1,3 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- module LLM
4
- ##
5
- # {LLM::Session LLM::Session} provides an object that can maintain a
6
- # conversation. A conversation can use the chat completions API
7
- # that all LLM providers support or the responses API that currently
8
- # only OpenAI supports.
9
- #
10
- # @example
11
- # #!/usr/bin/env ruby
12
- # require "llm"
13
- #
14
- # llm = LLM.openai(key: ENV["KEY"])
15
- # ses = LLM::Session.new(llm)
16
- #
17
- # prompt = LLM::Prompt.new(llm) do
18
- # system "Be concise and show your reasoning briefly."
19
- # user "If a train goes 60 mph for 1.5 hours, how far does it travel?"
20
- # user "Now double the speed for the same time."
21
- # end
22
- #
23
- # ses.talk(prompt)
24
- # ses.messages.each { |m| puts "[#{m.role}] #{m.content}" }
25
- class Session
26
- require_relative "session/deserializer"
27
- include Deserializer
28
-
29
- ##
30
- # Returns an Enumerable for the messages in a conversation
31
- # @return [LLM::Buffer<LLM::Message>]
32
- attr_reader :messages
33
-
34
- ##
35
- # @param [LLM::Provider] provider
36
- # A provider
37
- # @param [Hash] params
38
- # The parameters to maintain throughout the conversation.
39
- # Any parameter the provider supports can be included and
40
- # not only those listed here.
41
- # @option params [String] :model Defaults to the provider's default model
42
- # @option params [Array<LLM::Function>, nil] :tools Defaults to nil
43
- def initialize(provider, params = {})
44
- @provider = provider
45
- @params = {model: provider.default_model, schema: nil}.compact.merge!(params)
46
- @messages = LLM::Buffer.new(provider)
47
- end
48
-
49
- ##
50
- # Maintain a conversation via the chat completions API.
51
- # This method immediately sends a request to the LLM and returns the response.
52
- #
53
- # @param prompt (see LLM::Provider#complete)
54
- # @param params The params, including optional :role (defaults to :user), :stream, :tools, :schema etc.
55
- # @return [LLM::Response] Returns the LLM's response for this turn.
56
- # @example
57
- # llm = LLM.openai(key: ENV["KEY"])
58
- # ses = LLM::Session.new(llm)
59
- # res = ses.talk("Hello, what is your name?")
60
- # puts res.messages[0].content
61
- def talk(prompt, params = {})
62
- prompt, params, messages = fetch(prompt, params)
63
- params = params.merge(messages: [*@messages.to_a, *messages])
64
- params = @params.merge(params)
65
- res = @provider.complete(prompt, params)
66
- role = params[:role] || @provider.user_role
67
- role = @provider.tool_role if params[:role].nil? && [*prompt].grep(LLM::Function::Return).any?
68
- @messages.concat [LLM::Message.new(role, prompt)]
69
- @messages.concat messages
70
- @messages.concat [res.choices[-1]]
71
- res
72
- end
73
- alias_method :chat, :talk
74
-
75
- ##
76
- # Maintain a conversation via the responses API.
77
- # This method immediately sends a request to the LLM and returns the response.
78
- #
79
- # @note Not all LLM providers support this API
80
- # @param prompt (see LLM::Provider#complete)
81
- # @param params The params, including optional :role (defaults to :user), :stream, :tools, :schema etc.
82
- # @return [LLM::Response] Returns the LLM's response for this turn.
83
- # @example
84
- # llm = LLM.openai(key: ENV["KEY"])
85
- # ses = LLM::Session.new(llm)
86
- # res = ses.respond("What is the capital of France?")
87
- # puts res.output_text
88
- def respond(prompt, params = {})
89
- prompt, params, messages = fetch(prompt, params)
90
- res_id = @messages.find(&:assistant?)&.response&.response_id
91
- params = params.merge(previous_response_id: res_id, input: messages).compact
92
- params = @params.merge(params)
93
- res = @provider.responses.create(prompt, params)
94
- role = params[:role] || @provider.user_role
95
- @messages.concat [LLM::Message.new(role, prompt)]
96
- @messages.concat messages
97
- @messages.concat [res.choices[-1]]
98
- res
99
- end
100
-
101
- ##
102
- # @return [String]
103
- def inspect
104
- "#<#{self.class.name}:0x#{object_id.to_s(16)} " \
105
- "@provider=#{@provider.class}, @params=#{@params.inspect}, " \
106
- "@messages=#{@messages.inspect}>"
107
- end
108
-
109
- ##
110
- # Returns an array of functions that can be called
111
- # @return [Array<LLM::Function>]
112
- def functions
113
- @messages
114
- .select(&:assistant?)
115
- .flat_map do |msg|
116
- fns = msg.functions.select(&:pending?)
117
- fns.each do |fn|
118
- fn.tracer = tracer
119
- fn.model = msg.model
120
- end
121
- end
122
- end
123
-
124
- ##
125
- # Returns token usage for the conversation
126
- # @note
127
- # This method returns token usage for the latest
128
- # assistant message, and it returns an empty object
129
- # if there are no assistant messages
130
- # @return [LLM::Object]
131
- def usage
132
- @messages.find(&:assistant?)&.usage || LLM::Object.from({})
133
- end
134
-
135
- ##
136
- # Build a role-aware prompt for a single request.
137
- #
138
- # Prefer this method over {#build_prompt}. The older
139
- # method name is kept for backward compatibility.
140
- # @example
141
- # prompt = ses.prompt do
142
- # system "Your task is to assist the user"
143
- # user "Hello, can you assist me?"
144
- # end
145
- # ses.talk(prompt)
146
- # @param [Proc] b
147
- # A block that composes messages. If it takes one argument,
148
- # it receives the prompt object. Otherwise it runs in prompt context.
149
- # @return [LLM::Prompt]
150
- def prompt(&b)
151
- LLM::Prompt.new(@provider, &b)
152
- end
153
- alias_method :build_prompt, :prompt
154
-
155
- ##
156
- # Recongize an object as a URL to an image
157
- # @param [String] url
158
- # The URL
159
- # @return [LLM::Object]
160
- # Returns a tagged object
161
- def image_url(url)
162
- LLM::Object.from(value: url, kind: :image_url)
163
- end
164
-
165
- ##
166
- # Recongize an object as a local file
167
- # @param [String] path
168
- # The path
169
- # @return [LLM::Object]
170
- # Returns a tagged object
171
- def local_file(path)
172
- LLM::Object.from(value: LLM.File(path), kind: :local_file)
173
- end
174
-
175
- ##
176
- # Reconginize an object as a remote file
177
- # @param [LLM::Response] res
178
- # The response
179
- # @return [LLM::Object]
180
- # Returns a tagged object
181
- def remote_file(res)
182
- LLM::Object.from(value: res, kind: :remote_file)
183
- end
184
-
185
- ##
186
- # @return [LLM::Tracer]
187
- # Returns an LLM tracer
188
- def tracer
189
- @provider.tracer
190
- end
191
-
192
- ##
193
- # Returns the model a Session is actively using
194
- # @return [String]
195
- def model
196
- messages.find(&:assistant?)&.model || @params[:model]
197
- end
198
-
199
- ##
200
- # @return [Hash]
201
- def to_h
202
- {model:, messages:}
203
- end
204
-
205
- ##
206
- # @return [String]
207
- def to_json(...)
208
- {schema_version: 1}.merge!(to_h).to_json(...)
209
- end
210
-
211
- ##
212
- # Save a session
213
- # @example
214
- # llm = LLM.openai(key: ENV["KEY"])
215
- # ses = LLM::Session.new(llm)
216
- # ses.talk "Hello"
217
- # ses.save(path: "session.json")
218
- # @raise [SystemCallError]
219
- # Might raise a number of SystemCallError subclasses
220
- # @return [void]
221
- def serialize(path:)
222
- ::File.binwrite path, LLM.json.dump(self)
223
- end
224
- alias_method :save, :serialize
225
-
226
- ##
227
- # Restore a session
228
- # @param [String, nil] path
229
- # The path to a JSON file
230
- # @param [String, nil] string
231
- # A raw JSON string
232
- # @raise [SystemCallError]
233
- # Might raise a number of SystemCallError subclasses
234
- # @return [LLM::Session]
235
- def deserialize(path: nil, string: nil)
236
- payload = if path.nil? and string.nil?
237
- raise ArgumentError, "a path or string is required"
238
- elsif path
239
- ::File.binread(path)
240
- else
241
- string
242
- end
243
- ses = LLM.json.load(payload)
244
- @messages.concat [*ses["messages"]].map { deserialize_message(_1) }
245
- self
246
- end
247
- alias_method :restore, :deserialize
248
-
249
- private
250
-
251
- def fetch(prompt, params)
252
- return [prompt, params, []] unless LLM::Prompt === prompt
253
- messages = prompt.to_a
254
- prompt = messages.shift
255
- params.merge!(role: prompt.role)
256
- [prompt.content, params, messages]
257
- end
258
- end
259
-
260
- # Backward-compatible alias
261
- Bot = Session
262
-
263
- # Scheduled for removal in v5.0
264
- deprecate_constant :Bot
265
- end
3
+ require_relative "context"
data/lib/llm/buffer.rb CHANGED
@@ -83,6 +83,13 @@ module LLM
83
83
  "message_count=#{@messages.size}>"
84
84
  end
85
85
 
86
+ ##
87
+ # @return [Integer]
88
+ # Returns the number of messages in the buffer
89
+ def size
90
+ @messages.size
91
+ end
92
+
86
93
  ##
87
94
  # Returns true when the buffer is empty
88
95
  # @return [Boolean]
@@ -1,6 +1,6 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- class LLM::Session
3
+ class LLM::Context
4
4
  ##
5
5
  # @api private
6
6
  module Deserializer
@@ -11,7 +11,8 @@ class LLM::Session
11
11
  tool_calls = deserialize_tool_calls(payload["tools"])
12
12
  returns = deserialize_returns(payload["content"]) if returns.nil?
13
13
  original_tool_calls = payload["original_tool_calls"]
14
- extra = {tool_calls:, original_tool_calls:}.compact
14
+ usage = payload["usage"]
15
+ extra = {tool_calls:, original_tool_calls:, tools: @params[:tools], usage:}.compact
15
16
  content = returns.nil? ? payload["content"] : returns
16
17
  LLM::Message.new(payload["role"], content, extra)
17
18
  end
@@ -27,7 +28,7 @@ class LLM::Session
27
28
  returns = [*items].filter_map do |item|
28
29
  next unless Hash === item
29
30
  id, name, value = item.values_at("id", "name", "value")
30
- next if id.nil? || name.nil? || value.nil?
31
+ next if name.nil? || value.nil?
31
32
  LLM::Function::Return.new(id, name, value)
32
33
  end
33
34
  returns.empty? ? nil : returns
@@ -0,0 +1,292 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM
4
+ ##
5
+ # {LLM::Context LLM::Context} represents a stateful interaction with
6
+ # an LLM, including conversation history, tools, execution state,
7
+ # and cost tracking. It evolves over time as the system runs.
8
+ #
9
+ # Context is the stateful environment in which an LLM operates.
10
+ # This is not just prompt context; it is an active, evolving
11
+ # execution boundary for LLM workflows.
12
+ #
13
+ # A context can use the chat completions API that all providers
14
+ # support or the responses API that currently only OpenAI supports.
15
+ #
16
+ # @example
17
+ # #!/usr/bin/env ruby
18
+ # require "llm"
19
+ #
20
+ # llm = LLM.openai(key: ENV["KEY"])
21
+ # ctx = LLM::Context.new(llm)
22
+ #
23
+ # prompt = LLM::Prompt.new(llm) do
24
+ # system "Be concise and show your reasoning briefly."
25
+ # user "If a train goes 60 mph for 1.5 hours, how far does it travel?"
26
+ # user "Now double the speed for the same time."
27
+ # end
28
+ #
29
+ # ctx.talk(prompt)
30
+ # ctx.messages.each { |m| puts "[#{m.role}] #{m.content}" }
31
+ class Context
32
+ require_relative "context/deserializer"
33
+ include Deserializer
34
+
35
+ ##
36
+ # Returns the accumulated message history for this context
37
+ # @return [LLM::Buffer<LLM::Message>]
38
+ attr_reader :messages
39
+
40
+ ##
41
+ # Returns a provider
42
+ # @return [LLM::Provider]
43
+ attr_reader :llm
44
+
45
+ ##
46
+ # @param [LLM::Provider] llm
47
+ # A provider
48
+ # @param [Hash] params
49
+ # The parameters to maintain throughout the conversation.
50
+ # Any parameter the provider supports can be included and
51
+ # not only those listed here.
52
+ # @option params [String] :model Defaults to the provider's default model
53
+ # @option params [Array<LLM::Function>, nil] :tools Defaults to nil
54
+ def initialize(llm, params = {})
55
+ @llm = llm
56
+ @params = {model: llm.default_model, schema: nil}.compact.merge!(params)
57
+ @messages = LLM::Buffer.new(llm)
58
+ end
59
+
60
+ ##
61
+ # Interact with the context via the chat completions API.
62
+ # This method immediately sends a request to the LLM and returns the response.
63
+ #
64
+ # @param prompt (see LLM::Provider#complete)
65
+ # @param params The params, including optional :role (defaults to :user), :stream, :tools, :schema etc.
66
+ # @return [LLM::Response] Returns the LLM's response for this turn.
67
+ # @example
68
+ # llm = LLM.openai(key: ENV["KEY"])
69
+ # ctx = LLM::Context.new(llm)
70
+ # res = ctx.talk("Hello, what is your name?")
71
+ # puts res.messages[0].content
72
+ def talk(prompt, params = {})
73
+ params = params.merge(messages: @messages.to_a)
74
+ params = @params.merge(params)
75
+ res = @llm.complete(prompt, params)
76
+ role = params[:role] || @llm.user_role
77
+ role = @llm.tool_role if params[:role].nil? && [*prompt].grep(LLM::Function::Return).any?
78
+ @messages.concat LLM::Prompt === prompt ? prompt.to_a : [LLM::Message.new(role, prompt)]
79
+ @messages.concat [res.choices[-1]]
80
+ res
81
+ end
82
+ alias_method :chat, :talk
83
+
84
+ ##
85
+ # Interact with the context via the responses API.
86
+ # This method immediately sends a request to the LLM and returns the response.
87
+ #
88
+ # @note Not all LLM providers support this API
89
+ # @param prompt (see LLM::Provider#complete)
90
+ # @param params The params, including optional :role (defaults to :user), :stream, :tools, :schema etc.
91
+ # @return [LLM::Response] Returns the LLM's response for this turn.
92
+ # @example
93
+ # llm = LLM.openai(key: ENV["KEY"])
94
+ # ctx = LLM::Context.new(llm)
95
+ # res = ctx.respond("What is the capital of France?")
96
+ # puts res.output_text
97
+ def respond(prompt, params = {})
98
+ res_id = @messages.find(&:assistant?)&.response&.response_id
99
+ params = params.merge(previous_response_id: res_id, input: @messages.to_a).compact
100
+ params = @params.merge(params)
101
+ res = @llm.responses.create(prompt, params)
102
+ role = params[:role] || @llm.user_role
103
+ @messages.concat LLM::Prompt === prompt ? prompt.to_a : [LLM::Message.new(role, prompt)]
104
+ @messages.concat [res.choices[-1]]
105
+ res
106
+ end
107
+
108
+ ##
109
+ # @return [String]
110
+ def inspect
111
+ "#<#{self.class.name}:0x#{object_id.to_s(16)} " \
112
+ "@llm=#{@llm.class}, @params=#{@params.inspect}, " \
113
+ "@messages=#{@messages.inspect}>"
114
+ end
115
+
116
+ ##
117
+ # Returns an array of functions that can be called
118
+ # @return [Array<LLM::Function>]
119
+ def functions
120
+ @messages
121
+ .select(&:assistant?)
122
+ .flat_map do |msg|
123
+ fns = msg.functions.select(&:pending?)
124
+ fns.each do |fn|
125
+ fn.tracer = tracer
126
+ fn.model = msg.model
127
+ end
128
+ end.extend(LLM::Function::Array)
129
+ end
130
+
131
+ ##
132
+ # Returns token usage accumulated in this context
133
+ # @note
134
+ # This method returns token usage for the latest
135
+ # assistant message, and it returns nil for non-assistant
136
+ # messages.
137
+ # @return [LLM::Object, nil]
138
+ def usage
139
+ @messages.find(&:assistant?)&.usage
140
+ end
141
+
142
+ ##
143
+ # Build a role-aware prompt for a single request.
144
+ #
145
+ # Prefer this method over {#build_prompt}. The older
146
+ # method name is kept for backward compatibility.
147
+ # @example
148
+ # prompt = ctx.prompt do
149
+ # system "Your task is to assist the user"
150
+ # user "Hello, can you assist me?"
151
+ # end
152
+ # ctx.talk(prompt)
153
+ # @param [Proc] b
154
+ # A block that composes messages. If it takes one argument,
155
+ # it receives the prompt object. Otherwise it runs in prompt context.
156
+ # @return [LLM::Prompt]
157
+ def prompt(&b)
158
+ LLM::Prompt.new(@llm, &b)
159
+ end
160
+ alias_method :build_prompt, :prompt
161
+
162
+ ##
163
+ # Recongize an object as a URL to an image
164
+ # @param [String] url
165
+ # The URL
166
+ # @return [LLM::Object]
167
+ # Returns a tagged object
168
+ def image_url(url)
169
+ LLM::Object.from(value: url, kind: :image_url)
170
+ end
171
+
172
+ ##
173
+ # Recongize an object as a local file
174
+ # @param [String] path
175
+ # The path
176
+ # @return [LLM::Object]
177
+ # Returns a tagged object
178
+ def local_file(path)
179
+ LLM::Object.from(value: LLM.File(path), kind: :local_file)
180
+ end
181
+
182
+ ##
183
+ # Reconginize an object as a remote file
184
+ # @param [LLM::Response] res
185
+ # The response
186
+ # @return [LLM::Object]
187
+ # Returns a tagged object
188
+ def remote_file(res)
189
+ LLM::Object.from(value: res, kind: :remote_file)
190
+ end
191
+
192
+ ##
193
+ # @return [LLM::Tracer]
194
+ # Returns an LLM tracer
195
+ def tracer
196
+ @llm.tracer
197
+ end
198
+
199
+ ##
200
+ # Returns the model a Context is actively using
201
+ # @return [String]
202
+ def model
203
+ messages.find(&:assistant?)&.model || @params[:model]
204
+ end
205
+
206
+ ##
207
+ # @return [Hash]
208
+ def to_h
209
+ {model:, messages:}
210
+ end
211
+
212
+ ##
213
+ # @return [String]
214
+ def to_json(...)
215
+ {schema_version: 1}.merge!(to_h).to_json(...)
216
+ end
217
+
218
+ ##
219
+ # Save the current context state
220
+ # @example
221
+ # llm = LLM.openai(key: ENV["KEY"])
222
+ # ctx = LLM::Context.new(llm)
223
+ # ctx.talk "Hello"
224
+ # ctx.save(path: "context.json")
225
+ # @raise [SystemCallError]
226
+ # Might raise a number of SystemCallError subclasses
227
+ # @return [void]
228
+ def serialize(path:)
229
+ ::File.binwrite path, LLM.json.dump(self)
230
+ end
231
+ alias_method :save, :serialize
232
+
233
+ ##
234
+ # Restore a saved context state
235
+ # @param [String, nil] path
236
+ # The path to a JSON file
237
+ # @param [String, nil] string
238
+ # A raw JSON string
239
+ # @raise [SystemCallError]
240
+ # Might raise a number of SystemCallError subclasses
241
+ # @return [LLM::Context]
242
+ def deserialize(path: nil, string: nil)
243
+ payload = if path.nil? and string.nil?
244
+ raise ArgumentError, "a path or string is required"
245
+ elsif path
246
+ ::File.binread(path)
247
+ else
248
+ string
249
+ end
250
+ ctx = LLM.json.load(payload)
251
+ @messages.concat [*ctx["messages"]].map { deserialize_message(_1) }
252
+ self
253
+ end
254
+ alias_method :restore, :deserialize
255
+
256
+ ##
257
+ # @return [LLM::Cost]
258
+ # Returns an _approximate_ cost for a given context
259
+ # based on both the provider, and model
260
+ def cost
261
+ return LLM::Cost.new(0, 0) unless usage
262
+ cost = LLM.registry_for(llm).cost(model:)
263
+ LLM::Cost.new(
264
+ (cost.input.to_f / 1_000_000.0) * usage.input_tokens,
265
+ (cost.output.to_f / 1_000_000.0) * usage.output_tokens
266
+ )
267
+ end
268
+
269
+ ##
270
+ # Returns the model's context window.
271
+ # The context window is the maximum amount of input and output
272
+ # tokens a model can consider in a single request.
273
+ # @note
274
+ # This method returns 0 when the provider or
275
+ # model can't be found within {LLM::Registry}.
276
+ # @return [Integer]
277
+ def context_window
278
+ LLM
279
+ .registry_for(llm)
280
+ .limit(model:)
281
+ .context
282
+ rescue LLM::NoSuchModelError, LLM::NoSuchRegistryError
283
+ 0
284
+ end
285
+ end
286
+
287
+ # Backward-compatible alias
288
+ Bot = Context
289
+
290
+ # Scheduled for removal in v6.0
291
+ deprecate_constant :Bot
292
+ end
data/lib/llm/cost.rb ADDED
@@ -0,0 +1,26 @@
1
+ # frozen_string_literal: true
2
+
3
+ ##
4
+ # The {LLM::Cost LLM::Cost} class represents an approximate
5
+ # cost breakdown for a provider request. It stores the input
6
+ # and output costs separately and can return the total.
7
+ #
8
+ # @attr [Float] input_costs
9
+ # Returns the input cost
10
+ # @attr [Float] output_costs
11
+ # Returns the output cost
12
+ class LLM::Cost < Struct.new(:input_costs, :output_costs)
13
+ ##
14
+ # @return [Float]
15
+ # Returns the total cost
16
+ def total
17
+ input_costs + output_costs
18
+ end
19
+
20
+ ##
21
+ # @return [String]
22
+ # Returns the total cost in a human friendly format
23
+ def to_s
24
+ format("%.12f", total).sub(/\.?0+$/, "")
25
+ end
26
+ end
data/lib/llm/error.rb CHANGED
@@ -54,4 +54,12 @@ module LLM
54
54
  ##
55
55
  # When stuck in a tool call loop
56
56
  ToolLoopError = Class.new(Error)
57
+
58
+ ##
59
+ # When {LLM::Registry} can't map a model
60
+ NoSuchModelError = Class.new(Error)
61
+
62
+ ##
63
+ # When {LLM::Registry} can't map a registry
64
+ NoSuchRegistryError = Class.new(Error)
57
65
  end
@@ -0,0 +1,61 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::Function
4
+ ##
5
+ # The {LLM::Function::Array} module extends the array
6
+ # returned by {LLM::Context#functions} with methods
7
+ # that can call all pending functions sequentially or
8
+ # concurrently. The return values can be reported back
9
+ # to the LLM on the next turn.
10
+ module Array
11
+ ##
12
+ # Calls all functions in a collection sequentially.
13
+ # @return [Array<LLM::Function::Return>]
14
+ # Returns values to be reported back to the LLM.
15
+ def call
16
+ map(&:call)
17
+ end
18
+
19
+ ##
20
+ # Calls all functions in a collection concurrently.
21
+ # This method returns an {LLM::Function::ThreadGroup},
22
+ # {LLM::Function::TaskGroup}, or {LLM::Function::FiberGroup}
23
+ # that can be waited on to access the return values.
24
+ #
25
+ # @param [Symbol] strategy
26
+ # Controls concurrency strategy:
27
+ # - `:thread`: Use threads
28
+ # - `:task`: Use async tasks (requires async gem)
29
+ # - `:fiber`: Use raw fibers
30
+ #
31
+ # @return [LLM::Function::ThreadGroup, LLM::Function::TaskGroup, LLM::Function::FiberGroup]
32
+ def spawn(strategy)
33
+ case strategy
34
+ when :task
35
+ TaskGroup.new(map { |fn| fn.spawn(:task) })
36
+ when :thread
37
+ ThreadGroup.new(map { |fn| fn.spawn(:thread) })
38
+ when :fiber
39
+ FiberGroup.new(map { |fn| fn.spawn(:fiber) })
40
+ else
41
+ raise ArgumentError, "Unknown strategy: #{strategy.inspect}. Expected :thread, :task, or :fiber"
42
+ end
43
+ end
44
+
45
+ ##
46
+ # Calls all functions in a collection concurrently
47
+ # and waits for the return values.
48
+ #
49
+ # @param [Symbol] strategy
50
+ # Controls concurrency strategy:
51
+ # - `:thread`: Use threads
52
+ # - `:task`: Use async tasks (requires async gem)
53
+ # - `:fiber`: Use raw fibers
54
+ #
55
+ # @return [Array<LLM::Function::Return>]
56
+ # Returns values to be reported back to the LLM.
57
+ def wait(strategy)
58
+ spawn(strategy).wait
59
+ end
60
+ end
61
+ end