llm.rb 4.13.0 → 4.15.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +107 -0
- data/README.md +82 -32
- data/lib/llm/context.rb +25 -10
- data/lib/llm/error.rb +4 -0
- data/lib/llm/eventhandler.rb +16 -12
- data/lib/llm/eventstream/event.rb +15 -5
- data/lib/llm/eventstream/parser.rb +64 -17
- data/lib/llm/mcp/command.rb +1 -1
- data/lib/llm/mcp/mailbox.rb +23 -0
- data/lib/llm/mcp/pipe.rb +1 -1
- data/lib/llm/mcp/router.rb +44 -0
- data/lib/llm/mcp/rpc.rb +29 -18
- data/lib/llm/mcp/transport/http/event_handler.rb +11 -9
- data/lib/llm/mcp/transport/http.rb +2 -2
- data/lib/llm/mcp/transport/stdio.rb +1 -1
- data/lib/llm/mcp.rb +5 -2
- data/lib/llm/provider/transport/http/execution.rb +115 -0
- data/lib/llm/provider/transport/http/interruptible.rb +109 -0
- data/lib/llm/provider/transport/http/stream_decoder.rb +92 -0
- data/lib/llm/provider/transport/http.rb +144 -0
- data/lib/llm/provider.rb +17 -103
- data/lib/llm/providers/anthropic/stream_parser.rb +6 -3
- data/lib/llm/providers/google/stream_parser.rb +6 -3
- data/lib/llm/providers/ollama/stream_parser.rb +3 -2
- data/lib/llm/providers/openai/responses/stream_parser.rb +216 -91
- data/lib/llm/providers/openai/stream_parser.rb +111 -57
- data/lib/llm/response.rb +12 -4
- data/lib/llm/sequel/plugin.rb +252 -0
- data/lib/llm/stream/queue.rb +2 -2
- data/lib/llm/stream.rb +2 -2
- data/lib/llm/version.rb +1 -1
- data/lib/llm.rb +8 -0
- data/lib/sequel/plugins/llm.rb +8 -0
- metadata +9 -2
- data/lib/llm/client.rb +0 -36
|
@@ -4,6 +4,8 @@ class LLM::OpenAI
|
|
|
4
4
|
##
|
|
5
5
|
# @private
|
|
6
6
|
class StreamParser
|
|
7
|
+
EMPTY_HASH = {}.freeze
|
|
8
|
+
|
|
7
9
|
##
|
|
8
10
|
# Returns the fully constructed response body
|
|
9
11
|
# @return [Hash]
|
|
@@ -14,7 +16,11 @@ class LLM::OpenAI
|
|
|
14
16
|
def initialize(stream)
|
|
15
17
|
@body = {}
|
|
16
18
|
@stream = stream
|
|
17
|
-
@emits = {tools:
|
|
19
|
+
@emits = {tools: {}}
|
|
20
|
+
@can_emit_content = stream.respond_to?(:on_content)
|
|
21
|
+
@can_emit_reasoning_content = stream.respond_to?(:on_reasoning_content)
|
|
22
|
+
@can_emit_tool_call = stream.respond_to?(:on_tool_call)
|
|
23
|
+
@can_push_content = stream.respond_to?(:<<)
|
|
18
24
|
end
|
|
19
25
|
|
|
20
26
|
##
|
|
@@ -45,45 +51,68 @@ class LLM::OpenAI
|
|
|
45
51
|
end
|
|
46
52
|
|
|
47
53
|
def merge_choices!(choices)
|
|
54
|
+
body_choices = @body["choices"]
|
|
48
55
|
choices.each do |choice|
|
|
49
56
|
index = choice["index"]
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
57
|
+
delta = choice["delta"] || EMPTY_HASH
|
|
58
|
+
target_message = if body_choice = body_choices[index]
|
|
59
|
+
body_choice["message"]
|
|
60
|
+
else
|
|
61
|
+
body_choices[index] = {"message" => {"role" => "assistant"}}
|
|
62
|
+
body_choices[index]["message"]
|
|
63
|
+
end
|
|
64
|
+
merge_delta!(target_message, delta)
|
|
65
|
+
end
|
|
66
|
+
end
|
|
67
|
+
|
|
68
|
+
def merge_delta!(target_message, delta)
|
|
69
|
+
if delta.length == 1
|
|
70
|
+
merge_single_delta!(target_message, delta)
|
|
71
|
+
elsif content = delta["content"]
|
|
72
|
+
if target_content = target_message["content"]
|
|
73
|
+
target_content << content
|
|
74
|
+
else
|
|
75
|
+
target_message["content"] = content
|
|
76
|
+
end
|
|
77
|
+
emit_content(content)
|
|
78
|
+
elsif reasoning = delta["reasoning_content"]
|
|
79
|
+
if target_reasoning = target_message["reasoning_content"]
|
|
80
|
+
target_reasoning << reasoning
|
|
81
|
+
else
|
|
82
|
+
target_message["reasoning_content"] = reasoning
|
|
83
|
+
end
|
|
84
|
+
emit_reasoning_content(reasoning)
|
|
85
|
+
elsif tool_calls = delta["tool_calls"]
|
|
86
|
+
merge_tools!(target_message, tool_calls)
|
|
87
|
+
end
|
|
88
|
+
return if delta.length <= 1
|
|
89
|
+
delta.each do |key, value|
|
|
90
|
+
next if value.nil? || key == "content" || key == "reasoning_content" || key == "tool_calls"
|
|
91
|
+
target_message[key] = value
|
|
92
|
+
end
|
|
93
|
+
end
|
|
94
|
+
|
|
95
|
+
def merge_single_delta!(target_message, delta)
|
|
96
|
+
if content = delta["content"]
|
|
97
|
+
if target_content = target_message["content"]
|
|
98
|
+
target_content << content
|
|
99
|
+
else
|
|
100
|
+
target_message["content"] = content
|
|
101
|
+
end
|
|
102
|
+
emit_content(content)
|
|
103
|
+
return
|
|
104
|
+
end
|
|
105
|
+
if reasoning = delta["reasoning_content"]
|
|
106
|
+
if target_reasoning = target_message["reasoning_content"]
|
|
107
|
+
target_reasoning << reasoning
|
|
69
108
|
else
|
|
70
|
-
|
|
71
|
-
@body["choices"][index] = {"message" => message_hash}
|
|
72
|
-
(choice["delta"] || {}).each do |key, value|
|
|
73
|
-
next if value.nil?
|
|
74
|
-
if key == "content"
|
|
75
|
-
emit_content(value)
|
|
76
|
-
message_hash[key] = value
|
|
77
|
-
elsif key == "reasoning_content"
|
|
78
|
-
emit_reasoning_content(value)
|
|
79
|
-
message_hash[key] = value
|
|
80
|
-
elsif key == "tool_calls"
|
|
81
|
-
merge_tools!(message_hash, value)
|
|
82
|
-
else
|
|
83
|
-
message_hash[key] = value
|
|
84
|
-
end
|
|
85
|
-
end
|
|
109
|
+
target_message["reasoning_content"] = reasoning
|
|
86
110
|
end
|
|
111
|
+
emit_reasoning_content(reasoning)
|
|
112
|
+
return
|
|
113
|
+
end
|
|
114
|
+
if tool_calls = delta["tool_calls"]
|
|
115
|
+
merge_tools!(target_message, tool_calls)
|
|
87
116
|
end
|
|
88
117
|
end
|
|
89
118
|
|
|
@@ -93,12 +122,11 @@ class LLM::OpenAI
|
|
|
93
122
|
tindex = toola["index"]
|
|
94
123
|
tindex = index unless Integer === tindex && tindex >= 0
|
|
95
124
|
toolb = target["tool_calls"][tindex]
|
|
96
|
-
|
|
125
|
+
functiona = toola["function"]
|
|
126
|
+
functionb = toolb && toolb["function"]
|
|
127
|
+
if functiona && functionb
|
|
97
128
|
# Append to existing function arguments
|
|
98
|
-
|
|
99
|
-
toolb["function"][func_key] ||= +""
|
|
100
|
-
toolb["function"][func_key] << func_value
|
|
101
|
-
end
|
|
129
|
+
merge_function!(functionb, functiona)
|
|
102
130
|
else
|
|
103
131
|
target["tool_calls"][tindex] = toola
|
|
104
132
|
end
|
|
@@ -106,40 +134,61 @@ class LLM::OpenAI
|
|
|
106
134
|
end
|
|
107
135
|
end
|
|
108
136
|
|
|
137
|
+
def merge_function!(target, source)
|
|
138
|
+
if arguments = source["arguments"]
|
|
139
|
+
if target_arguments = target["arguments"]
|
|
140
|
+
target_arguments << arguments
|
|
141
|
+
else
|
|
142
|
+
target["arguments"] = arguments
|
|
143
|
+
end
|
|
144
|
+
end
|
|
145
|
+
if name = source["name"]
|
|
146
|
+
if target_name = target["name"]
|
|
147
|
+
target_name << name
|
|
148
|
+
else
|
|
149
|
+
target["name"] = name
|
|
150
|
+
end
|
|
151
|
+
end
|
|
152
|
+
return if source.length <= 2
|
|
153
|
+
source.each do |func_key, func_value|
|
|
154
|
+
next if func_key == "arguments" || func_key == "name"
|
|
155
|
+
target[func_key] ||= +""
|
|
156
|
+
target[func_key] << func_value
|
|
157
|
+
end
|
|
158
|
+
end
|
|
159
|
+
|
|
109
160
|
def emit_content(value)
|
|
110
|
-
if @
|
|
161
|
+
if @can_emit_content
|
|
111
162
|
@stream.on_content(value)
|
|
112
|
-
elsif @
|
|
163
|
+
elsif @can_push_content
|
|
113
164
|
@stream << value
|
|
114
165
|
end
|
|
115
166
|
end
|
|
116
167
|
|
|
117
168
|
def emit_reasoning_content(value)
|
|
118
|
-
if @
|
|
169
|
+
if @can_emit_reasoning_content
|
|
119
170
|
@stream.on_reasoning_content(value)
|
|
120
171
|
end
|
|
121
172
|
end
|
|
122
173
|
|
|
123
174
|
def emit_tool(tool, tindex)
|
|
124
|
-
return unless @
|
|
125
|
-
return
|
|
126
|
-
return if @emits[:tools].include?(tindex)
|
|
127
|
-
function, error = resolve_tool(tool)
|
|
128
|
-
@emits[:tools] << tindex
|
|
129
|
-
@stream.on_tool_call(function, error)
|
|
130
|
-
end
|
|
131
|
-
|
|
132
|
-
def complete_tool?(tool)
|
|
175
|
+
return unless @can_emit_tool_call
|
|
176
|
+
return if @emits[:tools][tindex]
|
|
133
177
|
function = tool["function"]
|
|
134
|
-
function && tool["id"] && function["name"]
|
|
178
|
+
return unless function && tool["id"] && function["name"]
|
|
179
|
+
return unless arguments_complete?(function["arguments"])
|
|
180
|
+
arguments = parse_arguments(function["arguments"])
|
|
181
|
+
return unless arguments
|
|
182
|
+
function, error = resolve_tool(tool, function, arguments)
|
|
183
|
+
@emits[:tools][tindex] = true
|
|
184
|
+
@stream.on_tool_call(function, error)
|
|
135
185
|
end
|
|
136
186
|
|
|
137
|
-
def resolve_tool(tool)
|
|
138
|
-
function = tool["function"]
|
|
187
|
+
def resolve_tool(tool, function, arguments)
|
|
139
188
|
registered = LLM::Function.find_by_name(function["name"])
|
|
140
189
|
fn = (registered || LLM::Function.new(function["name"])).dup.tap do |fn|
|
|
141
190
|
fn.id = tool["id"]
|
|
142
|
-
fn.arguments =
|
|
191
|
+
fn.arguments = arguments
|
|
143
192
|
end
|
|
144
193
|
[fn, (registered ? nil : @stream.tool_not_found(fn))]
|
|
145
194
|
end
|
|
@@ -151,5 +200,10 @@ class LLM::OpenAI
|
|
|
151
200
|
rescue *LLM.json.parser_error
|
|
152
201
|
nil
|
|
153
202
|
end
|
|
203
|
+
|
|
204
|
+
def arguments_complete?(arguments)
|
|
205
|
+
value = arguments.to_s.rstrip
|
|
206
|
+
!value.empty? && value.end_with?("}")
|
|
207
|
+
end
|
|
154
208
|
end
|
|
155
209
|
end
|
data/lib/llm/response.rb
CHANGED
|
@@ -2,10 +2,18 @@
|
|
|
2
2
|
|
|
3
3
|
module LLM
|
|
4
4
|
##
|
|
5
|
-
# {LLM::Response LLM::Response}
|
|
6
|
-
#
|
|
7
|
-
#
|
|
8
|
-
#
|
|
5
|
+
# {LLM::Response LLM::Response} is the normalized base shape for
|
|
6
|
+
# provider and endpoint responses in llm.rb.
|
|
7
|
+
#
|
|
8
|
+
# Provider calls return an instance of this class, then extend it
|
|
9
|
+
# with provider-, endpoint-, or context-specific modules so response
|
|
10
|
+
# handling can share one common surface without flattening away
|
|
11
|
+
# specialized behavior.
|
|
12
|
+
#
|
|
13
|
+
# The normalized response still keeps the original
|
|
14
|
+
# {Net::HTTPResponse Net::HTTPResponse} available through {#res}
|
|
15
|
+
# when callers need direct access to raw HTTP details such as
|
|
16
|
+
# headers, status codes, or unadapted bodies.
|
|
9
17
|
class Response
|
|
10
18
|
require "json"
|
|
11
19
|
|
|
@@ -0,0 +1,252 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module LLM::Sequel
|
|
4
|
+
##
|
|
5
|
+
# Sequel plugin for persisting {LLM::Context LLM::Context} state.
|
|
6
|
+
#
|
|
7
|
+
# This plugin maps model columns onto provider selection, model
|
|
8
|
+
# selection, usage accounting, and serialized context data while
|
|
9
|
+
# leaving application-specific concerns such as credentials,
|
|
10
|
+
# associations, and UI shaping to the host app.
|
|
11
|
+
#
|
|
12
|
+
# Context state can be stored as a JSON string (`format: :string`, the
|
|
13
|
+
# default) or as a structured object (`format: :json` / `:jsonb`) for
|
|
14
|
+
# databases such as PostgreSQL that can persist JSON natively.
|
|
15
|
+
# `:json` and `:jsonb` expect a real JSON column type with Sequel handling
|
|
16
|
+
# JSON typecasting for the model.
|
|
17
|
+
module Plugin
|
|
18
|
+
EMPTY_HASH = {}.freeze
|
|
19
|
+
DEFAULT_USAGE_COLUMNS = {
|
|
20
|
+
input_tokens: :input_tokens,
|
|
21
|
+
output_tokens: :output_tokens,
|
|
22
|
+
total_tokens: :total_tokens
|
|
23
|
+
}.freeze
|
|
24
|
+
DEFAULTS = {
|
|
25
|
+
provider_column: :provider,
|
|
26
|
+
model_column: :model,
|
|
27
|
+
data_column: :data,
|
|
28
|
+
format: :string,
|
|
29
|
+
usage_columns: DEFAULT_USAGE_COLUMNS,
|
|
30
|
+
provider: EMPTY_HASH,
|
|
31
|
+
context: EMPTY_HASH
|
|
32
|
+
}.freeze
|
|
33
|
+
|
|
34
|
+
##
|
|
35
|
+
# Called by Sequel when the plugin is first applied to a model class.
|
|
36
|
+
#
|
|
37
|
+
# This hook installs the plugin's class- and instance-level behavior on
|
|
38
|
+
# the target model. It runs before {configure}, so it should only attach
|
|
39
|
+
# methods and not depend on per-model plugin options.
|
|
40
|
+
#
|
|
41
|
+
# @param [Class] model
|
|
42
|
+
# @return [void]
|
|
43
|
+
def self.apply(model, **)
|
|
44
|
+
model.extend ClassMethods
|
|
45
|
+
model.include InstanceMethods
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
##
|
|
49
|
+
# Called by Sequel after {apply} with the options passed to
|
|
50
|
+
# `plugin :llm, ...`.
|
|
51
|
+
#
|
|
52
|
+
# This hook merges plugin defaults with the model's explicit settings and
|
|
53
|
+
# stores the resolved configuration on the model class for later use by
|
|
54
|
+
# instance methods such as {InstanceMethods#llm} and {InstanceMethods#ctx}.
|
|
55
|
+
#
|
|
56
|
+
# @param [Class] model
|
|
57
|
+
# @param [Hash] options
|
|
58
|
+
# @option options [Symbol] :format
|
|
59
|
+
# Storage format for the serialized context. Use `:string` for text
|
|
60
|
+
# columns, or `:json` / `:jsonb` for structured JSON columns with Sequel
|
|
61
|
+
# JSON typecasting enabled.
|
|
62
|
+
# @return [void]
|
|
63
|
+
def self.configure(model, options = EMPTY_HASH)
|
|
64
|
+
options = DEFAULTS.merge(options)
|
|
65
|
+
usage_columns = DEFAULT_USAGE_COLUMNS.merge(options[:usage_columns] || EMPTY_HASH)
|
|
66
|
+
model.instance_variable_set(
|
|
67
|
+
:@llm_plugin_options,
|
|
68
|
+
options.merge(usage_columns: usage_columns.freeze).freeze
|
|
69
|
+
)
|
|
70
|
+
end
|
|
71
|
+
end
|
|
72
|
+
|
|
73
|
+
module Plugin::ClassMethods
|
|
74
|
+
##
|
|
75
|
+
# @return [Hash]
|
|
76
|
+
def llm_plugin_options
|
|
77
|
+
@llm_plugin_options || DEFAULTS
|
|
78
|
+
end
|
|
79
|
+
end
|
|
80
|
+
|
|
81
|
+
module Plugin::InstanceMethods
|
|
82
|
+
##
|
|
83
|
+
# Continues the stored context with new input and flushes it.
|
|
84
|
+
# @see LLM::Context#talk
|
|
85
|
+
# @return [LLM::Response]
|
|
86
|
+
def talk(...)
|
|
87
|
+
ctx.talk(...).tap { flush }
|
|
88
|
+
end
|
|
89
|
+
|
|
90
|
+
##
|
|
91
|
+
# Continues the stored context through the Responses API and flushes it.
|
|
92
|
+
# @see LLM::Context#respond
|
|
93
|
+
# @return [LLM::Response]
|
|
94
|
+
def respond(...)
|
|
95
|
+
ctx.respond(...).tap { flush }
|
|
96
|
+
end
|
|
97
|
+
|
|
98
|
+
##
|
|
99
|
+
# Waits for queued tool work to finish.
|
|
100
|
+
# @see LLM::Context#wait
|
|
101
|
+
# @return [Array<LLM::Function::Return>]
|
|
102
|
+
def wait(...)
|
|
103
|
+
ctx.wait(...)
|
|
104
|
+
end
|
|
105
|
+
|
|
106
|
+
##
|
|
107
|
+
# Calls into the stored context.
|
|
108
|
+
# @see LLM::Context#call
|
|
109
|
+
# @return [Object]
|
|
110
|
+
def call(...)
|
|
111
|
+
ctx.call(...)
|
|
112
|
+
end
|
|
113
|
+
|
|
114
|
+
##
|
|
115
|
+
# @see LLM::Context#messages
|
|
116
|
+
# @return [Array<LLM::Message>]
|
|
117
|
+
def messages
|
|
118
|
+
ctx.messages
|
|
119
|
+
end
|
|
120
|
+
|
|
121
|
+
##
|
|
122
|
+
# @note The bang is used because Sequel reserves `model` for the
|
|
123
|
+
# underlying model class on instances.
|
|
124
|
+
# @see LLM::Context#model
|
|
125
|
+
# @return [String]
|
|
126
|
+
def model!
|
|
127
|
+
ctx.model
|
|
128
|
+
end
|
|
129
|
+
|
|
130
|
+
##
|
|
131
|
+
# @see LLM::Context#functions
|
|
132
|
+
# @return [Array<LLM::Function>]
|
|
133
|
+
def functions
|
|
134
|
+
ctx.functions
|
|
135
|
+
end
|
|
136
|
+
|
|
137
|
+
##
|
|
138
|
+
# @see LLM::Context#cost
|
|
139
|
+
# @return [LLM::Cost]
|
|
140
|
+
def cost
|
|
141
|
+
ctx.cost
|
|
142
|
+
end
|
|
143
|
+
|
|
144
|
+
##
|
|
145
|
+
# @see LLM::Context#context_window
|
|
146
|
+
# @return [Integer]
|
|
147
|
+
def context_window
|
|
148
|
+
ctx.context_window
|
|
149
|
+
rescue LLM::NoSuchModelError, LLM::NoSuchRegistryError
|
|
150
|
+
0
|
|
151
|
+
end
|
|
152
|
+
|
|
153
|
+
##
|
|
154
|
+
# Returns usage from the mapped usage columns.
|
|
155
|
+
# @return [LLM::Object]
|
|
156
|
+
def usage
|
|
157
|
+
LLM::Object.from(
|
|
158
|
+
input_tokens: self[columns[:input_tokens]] || 0,
|
|
159
|
+
output_tokens: self[columns[:output_tokens]] || 0,
|
|
160
|
+
total_tokens: self[columns[:total_tokens]] || 0
|
|
161
|
+
)
|
|
162
|
+
end
|
|
163
|
+
|
|
164
|
+
private
|
|
165
|
+
|
|
166
|
+
##
|
|
167
|
+
# Returns the resolved provider instance for this record.
|
|
168
|
+
# @return [LLM::Provider]
|
|
169
|
+
def llm
|
|
170
|
+
options = self.class.llm_plugin_options
|
|
171
|
+
provider = self[columns[:provider_column]]
|
|
172
|
+
kwargs = resolve_options(options[:provider])
|
|
173
|
+
@llm ||= LLM.method(provider).call(**kwargs)
|
|
174
|
+
end
|
|
175
|
+
|
|
176
|
+
##
|
|
177
|
+
# @return [LLM::Context]
|
|
178
|
+
def ctx
|
|
179
|
+
@ctx ||= begin
|
|
180
|
+
options = self.class.llm_plugin_options
|
|
181
|
+
params = resolve_options(options[:context]).dup
|
|
182
|
+
params[:model] ||= self[columns[:model_column]]
|
|
183
|
+
ctx = LLM::Context.new(llm, params.compact)
|
|
184
|
+
data = self[columns[:data_column]]
|
|
185
|
+
if data.nil? || data == ""
|
|
186
|
+
ctx
|
|
187
|
+
else
|
|
188
|
+
string = case options[:format]
|
|
189
|
+
when :string then data
|
|
190
|
+
when :json, :jsonb then LLM.json.dump(data)
|
|
191
|
+
else raise ArgumentError, "Unknown format: #{options[:format].inspect}"
|
|
192
|
+
end
|
|
193
|
+
ctx.restore(string:)
|
|
194
|
+
end
|
|
195
|
+
end
|
|
196
|
+
end
|
|
197
|
+
|
|
198
|
+
##
|
|
199
|
+
# @return [void]
|
|
200
|
+
def flush
|
|
201
|
+
options = self.class.llm_plugin_options
|
|
202
|
+
update({
|
|
203
|
+
columns[:data_column] => serialize_context(options[:format]),
|
|
204
|
+
columns[:input_tokens] => ctx.usage.input_tokens,
|
|
205
|
+
columns[:output_tokens] => ctx.usage.output_tokens,
|
|
206
|
+
columns[:total_tokens] => ctx.usage.total_tokens
|
|
207
|
+
})
|
|
208
|
+
end
|
|
209
|
+
|
|
210
|
+
##
|
|
211
|
+
# @return [Hash]
|
|
212
|
+
def resolve_option(option)
|
|
213
|
+
case option
|
|
214
|
+
when Proc then instance_exec(&option)
|
|
215
|
+
when Hash then option.dup
|
|
216
|
+
else option
|
|
217
|
+
end
|
|
218
|
+
end
|
|
219
|
+
|
|
220
|
+
##
|
|
221
|
+
# @return [Hash]
|
|
222
|
+
def resolve_options(option)
|
|
223
|
+
case option
|
|
224
|
+
when Proc, Hash then resolve_option(option)
|
|
225
|
+
else EMPTY_HASH.dup
|
|
226
|
+
end
|
|
227
|
+
end
|
|
228
|
+
|
|
229
|
+
def serialize_context(format)
|
|
230
|
+
case format
|
|
231
|
+
when :string then ctx.to_json
|
|
232
|
+
when :json, :jsonb then ctx.to_h
|
|
233
|
+
else raise ArgumentError, "Unknown format: #{format.inspect}"
|
|
234
|
+
end
|
|
235
|
+
end
|
|
236
|
+
|
|
237
|
+
def columns
|
|
238
|
+
@columns ||= begin
|
|
239
|
+
options = self.class.llm_plugin_options
|
|
240
|
+
usage_columns = options[:usage_columns]
|
|
241
|
+
{
|
|
242
|
+
provider_column: options[:provider_column],
|
|
243
|
+
model_column: options[:model_column],
|
|
244
|
+
data_column: options[:data_column],
|
|
245
|
+
input_tokens: usage_columns[:input_tokens],
|
|
246
|
+
output_tokens: usage_columns[:output_tokens],
|
|
247
|
+
total_tokens: usage_columns[:total_tokens]
|
|
248
|
+
}.freeze
|
|
249
|
+
end
|
|
250
|
+
end
|
|
251
|
+
end
|
|
252
|
+
end
|
data/lib/llm/stream/queue.rb
CHANGED
|
@@ -54,9 +54,9 @@ class LLM::Stream
|
|
|
54
54
|
private
|
|
55
55
|
|
|
56
56
|
def fire_hooks(tasks, results)
|
|
57
|
-
results.each_with_index do |
|
|
57
|
+
results.each_with_index do |result, idx|
|
|
58
58
|
tool = tasks[idx]&.function
|
|
59
|
-
@stream.on_tool_return(tool,
|
|
59
|
+
@stream.on_tool_return(tool, result) if tool
|
|
60
60
|
end
|
|
61
61
|
results
|
|
62
62
|
end
|
data/lib/llm/stream.rb
CHANGED
|
@@ -86,10 +86,10 @@ module LLM
|
|
|
86
86
|
# `tool.spawn(:fiber)`, or `tool.spawn(:task)`.
|
|
87
87
|
# @param [LLM::Function] tool
|
|
88
88
|
# The tool that returned.
|
|
89
|
-
# @param [LLM::Function::Return]
|
|
89
|
+
# @param [LLM::Function::Return] result
|
|
90
90
|
# The completed tool return.
|
|
91
91
|
# @return [nil]
|
|
92
|
-
def on_tool_return(tool,
|
|
92
|
+
def on_tool_return(tool, result)
|
|
93
93
|
nil
|
|
94
94
|
end
|
|
95
95
|
|
data/lib/llm/version.rb
CHANGED
data/lib/llm.rb
CHANGED
|
@@ -40,6 +40,14 @@ module LLM
|
|
|
40
40
|
# Model registry
|
|
41
41
|
@registry = {}
|
|
42
42
|
|
|
43
|
+
##
|
|
44
|
+
# Shared HTTP clients used by providers.
|
|
45
|
+
@clients = {}
|
|
46
|
+
|
|
47
|
+
##
|
|
48
|
+
# @api private
|
|
49
|
+
def self.clients = @clients
|
|
50
|
+
|
|
43
51
|
##
|
|
44
52
|
# @param [Symbol, LLM::Provider] llm
|
|
45
53
|
# The name of a provider, or an instance of LLM::Provider
|
metadata
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
|
2
2
|
name: llm.rb
|
|
3
3
|
version: !ruby/object:Gem::Version
|
|
4
|
-
version: 4.
|
|
4
|
+
version: 4.15.0
|
|
5
5
|
platform: ruby
|
|
6
6
|
authors:
|
|
7
7
|
- Antar Azri
|
|
@@ -231,7 +231,6 @@ files:
|
|
|
231
231
|
- lib/llm/agent.rb
|
|
232
232
|
- lib/llm/bot.rb
|
|
233
233
|
- lib/llm/buffer.rb
|
|
234
|
-
- lib/llm/client.rb
|
|
235
234
|
- lib/llm/context.rb
|
|
236
235
|
- lib/llm/context/deserializer.rb
|
|
237
236
|
- lib/llm/contract.rb
|
|
@@ -255,7 +254,9 @@ files:
|
|
|
255
254
|
- lib/llm/mcp.rb
|
|
256
255
|
- lib/llm/mcp/command.rb
|
|
257
256
|
- lib/llm/mcp/error.rb
|
|
257
|
+
- lib/llm/mcp/mailbox.rb
|
|
258
258
|
- lib/llm/mcp/pipe.rb
|
|
259
|
+
- lib/llm/mcp/router.rb
|
|
259
260
|
- lib/llm/mcp/rpc.rb
|
|
260
261
|
- lib/llm/mcp/transport/http.rb
|
|
261
262
|
- lib/llm/mcp/transport/http/event_handler.rb
|
|
@@ -270,6 +271,10 @@ files:
|
|
|
270
271
|
- lib/llm/object/kernel.rb
|
|
271
272
|
- lib/llm/prompt.rb
|
|
272
273
|
- lib/llm/provider.rb
|
|
274
|
+
- lib/llm/provider/transport/http.rb
|
|
275
|
+
- lib/llm/provider/transport/http/execution.rb
|
|
276
|
+
- lib/llm/provider/transport/http/interruptible.rb
|
|
277
|
+
- lib/llm/provider/transport/http/stream_decoder.rb
|
|
273
278
|
- lib/llm/providers/anthropic.rb
|
|
274
279
|
- lib/llm/providers/anthropic/error_handler.rb
|
|
275
280
|
- lib/llm/providers/anthropic/files.rb
|
|
@@ -362,6 +367,7 @@ files:
|
|
|
362
367
|
- lib/llm/schema/parser.rb
|
|
363
368
|
- lib/llm/schema/string.rb
|
|
364
369
|
- lib/llm/schema/version.rb
|
|
370
|
+
- lib/llm/sequel/plugin.rb
|
|
365
371
|
- lib/llm/server_tool.rb
|
|
366
372
|
- lib/llm/session.rb
|
|
367
373
|
- lib/llm/stream.rb
|
|
@@ -376,6 +382,7 @@ files:
|
|
|
376
382
|
- lib/llm/usage.rb
|
|
377
383
|
- lib/llm/utils.rb
|
|
378
384
|
- lib/llm/version.rb
|
|
385
|
+
- lib/sequel/plugins/llm.rb
|
|
379
386
|
- llm.gemspec
|
|
380
387
|
homepage: https://github.com/llmrb/llm.rb
|
|
381
388
|
licenses:
|
data/lib/llm/client.rb
DELETED
|
@@ -1,36 +0,0 @@
|
|
|
1
|
-
# frozen_string_literal: true
|
|
2
|
-
|
|
3
|
-
module LLM
|
|
4
|
-
##
|
|
5
|
-
# @api private
|
|
6
|
-
module Client
|
|
7
|
-
private
|
|
8
|
-
|
|
9
|
-
##
|
|
10
|
-
# @api private
|
|
11
|
-
def persistent_client
|
|
12
|
-
LLM.lock(:clients) do
|
|
13
|
-
if clients[client_id]
|
|
14
|
-
clients[client_id]
|
|
15
|
-
else
|
|
16
|
-
require "net/http/persistent" unless defined?(Net::HTTP::Persistent)
|
|
17
|
-
client = Net::HTTP::Persistent.new(name: self.class.name)
|
|
18
|
-
client.read_timeout = timeout
|
|
19
|
-
clients[client_id] = client
|
|
20
|
-
end
|
|
21
|
-
end
|
|
22
|
-
end
|
|
23
|
-
|
|
24
|
-
##
|
|
25
|
-
# @api private
|
|
26
|
-
def transient_client
|
|
27
|
-
client = Net::HTTP.new(host, port)
|
|
28
|
-
client.read_timeout = timeout
|
|
29
|
-
client.use_ssl = ssl
|
|
30
|
-
client
|
|
31
|
-
end
|
|
32
|
-
|
|
33
|
-
def client_id = "#{host}:#{port}:#{timeout}:#{ssl}"
|
|
34
|
-
def clients = self.class.clients
|
|
35
|
-
end
|
|
36
|
-
end
|