llm.rb 4.21.0 → 4.23.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -13,6 +13,7 @@ module LLM::ActiveRecord
13
13
  EMPTY_HASH = LLM::ActiveRecord::ActsAsLLM::EMPTY_HASH
14
14
  DEFAULT_USAGE_COLUMNS = LLM::ActiveRecord::ActsAsLLM::DEFAULT_USAGE_COLUMNS
15
15
  DEFAULTS = LLM::ActiveRecord::ActsAsLLM::DEFAULTS
16
+ Utils = LLM::ActiveRecord::ActsAsLLM::Utils
16
17
 
17
18
  module ClassMethods
18
19
  def model(model = nil)
@@ -52,7 +53,7 @@ module LLM::ActiveRecord
52
53
  # @param [Class] model
53
54
  # @return [void]
54
55
  def self.extended(model)
55
- options = model.llm_agent_options
56
+ options = model.llm_plugin_options
56
57
  model.validates options[:provider_column], options[:model_column], presence: true
57
58
  model.include LLM::ActiveRecord::ActsAsLLM::InstanceMethods unless model.ancestors.include?(LLM::ActiveRecord::ActsAsLLM::InstanceMethods)
58
59
  model.include InstanceMethods unless model.ancestors.include?(InstanceMethods)
@@ -79,8 +80,8 @@ module LLM::ActiveRecord
79
80
  def acts_as_agent(options = EMPTY_HASH, &block)
80
81
  options = DEFAULTS.merge(options)
81
82
  usage_columns = DEFAULT_USAGE_COLUMNS.merge(options[:usage_columns] || EMPTY_HASH)
82
- class_attribute :llm_agent_options, instance_accessor: false, default: DEFAULTS unless respond_to?(:llm_agent_options)
83
- self.llm_agent_options = options.merge(usage_columns: usage_columns.freeze).freeze
83
+ class_attribute :llm_plugin_options, instance_accessor: false, default: DEFAULTS unless respond_to?(:llm_plugin_options)
84
+ self.llm_plugin_options = options.merge(usage_columns: usage_columns.freeze).freeze
84
85
  extend Hooks
85
86
  class_exec(&block) if block
86
87
  end
@@ -90,12 +91,13 @@ module LLM::ActiveRecord
90
91
  # Returns the resolved provider instance for this record.
91
92
  # @return [LLM::Provider]
92
93
  def llm
93
- options = self.class.llm_agent_options
94
+ options = self.class.llm_plugin_options
95
+ columns = Utils.columns(options)
94
96
  provider = self[columns[:provider_column]]
95
- kwargs = resolve_options(options[:provider])
97
+ kwargs = Utils.resolve_options(self, options[:provider], ActsAsAgent::EMPTY_HASH)
96
98
  return @llm if @llm
97
99
  @llm = LLM.method(provider).call(**kwargs)
98
- @llm.tracer = resolve_option(options[:tracer]) if options[:tracer]
100
+ @llm.tracer = Utils.resolve_option(self, options[:tracer]) if options[:tracer]
99
101
  @llm
100
102
  end
101
103
 
@@ -105,8 +107,9 @@ module LLM::ActiveRecord
105
107
  # @return [LLM::Agent]
106
108
  def ctx
107
109
  @ctx ||= begin
108
- options = self.class.llm_agent_options
109
- params = resolve_options(options[:context]).dup
110
+ options = self.class.llm_plugin_options
111
+ columns = Utils.columns(options)
112
+ params = Utils.resolve_options(self, options[:context], ActsAsAgent::EMPTY_HASH).dup
110
113
  params[:model] ||= self[columns[:model_column]]
111
114
  ctx = self.class.agent.new(llm, params.compact)
112
115
  data = self[columns[:data_column]]
@@ -121,62 +124,6 @@ module LLM::ActiveRecord
121
124
  end
122
125
  end
123
126
  end
124
-
125
- ##
126
- # @return [void]
127
- def flush
128
- attrs = {
129
- columns[:data_column] => serialize_context(self.class.llm_agent_options[:format]),
130
- columns[:input_tokens] => ctx.usage.input_tokens,
131
- columns[:output_tokens] => ctx.usage.output_tokens,
132
- columns[:total_tokens] => ctx.usage.total_tokens
133
- }
134
- assign_attributes(attrs)
135
- save!
136
- end
137
-
138
- ##
139
- # @return [Hash]
140
- def resolve_option(option)
141
- case option
142
- when Proc then instance_exec(&option)
143
- when Symbol then send(option)
144
- when Hash then option.dup
145
- else option
146
- end
147
- end
148
-
149
- ##
150
- # @return [Hash]
151
- def resolve_options(option)
152
- case option
153
- when Proc, Symbol, Hash then resolve_option(option)
154
- else ActsAsAgent::EMPTY_HASH.dup
155
- end
156
- end
157
-
158
- def serialize_context(format)
159
- case format
160
- when :string then ctx.to_json
161
- when :json, :jsonb then ctx.to_h
162
- else raise ArgumentError, "Unknown format: #{format.inspect}"
163
- end
164
- end
165
-
166
- def columns
167
- @columns ||= begin
168
- options = self.class.llm_agent_options
169
- usage_columns = options[:usage_columns]
170
- {
171
- provider_column: options[:provider_column],
172
- model_column: options[:model_column],
173
- data_column: options[:data_column],
174
- input_tokens: usage_columns[:input_tokens],
175
- output_tokens: usage_columns[:output_tokens],
176
- total_tokens: usage_columns[:total_tokens]
177
- }.freeze
178
- end
179
- end
180
127
  end
181
128
  end
182
129
  end
@@ -33,6 +33,77 @@ module LLM::ActiveRecord
33
33
  context: EMPTY_HASH
34
34
  }.freeze
35
35
 
36
+ ##
37
+ # Shared helper methods for the ORM wrapper.
38
+ #
39
+ # These utilities keep persistence plumbing out of the wrapped model's
40
+ # method namespace so the injected surface stays focused on the runtime
41
+ # API itself.
42
+ # @api private
43
+ module Utils
44
+ ##
45
+ # Resolves a single configured option against a model instance.
46
+ # @return [Object]
47
+ def self.resolve_option(obj, option)
48
+ case option
49
+ when Proc then obj.instance_exec(&option)
50
+ when Symbol then obj.send(option)
51
+ when Hash then option.dup
52
+ else option
53
+ end
54
+ end
55
+
56
+ ##
57
+ # Resolves hash-like wrapper options against a model instance.
58
+ # @return [Hash]
59
+ def self.resolve_options(obj, option, empty_hash)
60
+ case option
61
+ when Proc, Symbol, Hash then resolve_option(obj, option)
62
+ else empty_hash.dup
63
+ end
64
+ end
65
+
66
+ ##
67
+ # Serializes the runtime into the configured storage format.
68
+ # @return [String, Hash]
69
+ def self.serialize_context(ctx, format)
70
+ case format
71
+ when :string then ctx.to_json
72
+ when :json, :jsonb then ctx.to_h
73
+ else raise ArgumentError, "Unknown format: #{format.inspect}"
74
+ end
75
+ end
76
+
77
+ ##
78
+ # Maps wrapper options onto the record's storage columns.
79
+ # @return [Hash]
80
+ def self.columns(options)
81
+ usage_columns = options[:usage_columns]
82
+ {
83
+ provider_column: options[:provider_column],
84
+ model_column: options[:model_column],
85
+ data_column: options[:data_column],
86
+ input_tokens: usage_columns[:input_tokens],
87
+ output_tokens: usage_columns[:output_tokens],
88
+ total_tokens: usage_columns[:total_tokens]
89
+ }.freeze
90
+ end
91
+
92
+ ##
93
+ # Persists the runtime state and usage columns back onto the record.
94
+ # @return [void]
95
+ def self.save(obj, ctx, options)
96
+ columns = self.columns(options)
97
+ obj.assign_attributes(
98
+ columns[:data_column] => serialize_context(ctx, options[:format]),
99
+ columns[:input_tokens] => ctx.usage.input_tokens,
100
+ columns[:output_tokens] => ctx.usage.output_tokens,
101
+ columns[:total_tokens] => ctx.usage.total_tokens
102
+ )
103
+ obj.save!
104
+ end
105
+ end
106
+
36
107
  module Hooks
37
108
  ##
38
109
  # Called when hooks are extended onto an ActiveRecord model.
@@ -72,7 +143,8 @@ module LLM::ActiveRecord
72
143
  # @see LLM::Context#talk
73
144
  # @return [LLM::Response]
74
145
  def talk(...)
75
- ctx.talk(...).tap { flush }
146
+ options = self.class.llm_plugin_options
147
+ ctx.talk(...).tap { Utils.save(self, ctx, options) }
76
148
  end
77
149
 
78
150
  ##
@@ -80,7 +152,8 @@ module LLM::ActiveRecord
80
152
  # @see LLM::Context#respond
81
153
  # @return [LLM::Response]
82
154
  def respond(...)
83
- ctx.respond(...).tap { flush }
155
+ options = self.class.llm_plugin_options
156
+ ctx.respond(...).tap { Utils.save(self, ctx, options) }
84
157
  end
85
158
 
86
159
  ##
@@ -155,6 +228,7 @@ module LLM::ActiveRecord
155
228
  # Returns usage from the mapped usage columns.
156
229
  # @return [LLM::Object]
157
230
  def usage
231
+ columns = Utils.columns(self.class.llm_plugin_options)
158
232
  LLM::Object.from(
159
233
  input_tokens: self[columns[:input_tokens]] || 0,
160
234
  output_tokens: self[columns[:output_tokens]] || 0,
@@ -211,11 +285,12 @@ module LLM::ActiveRecord
211
285
  # @return [LLM::Provider]
212
286
  def llm
213
287
  options = self.class.llm_plugin_options
288
+ columns = Utils.columns(options)
214
289
  provider = self[columns[:provider_column]]
215
- kwargs = resolve_options(options[:provider])
290
+ kwargs = Utils.resolve_options(self, options[:provider], ActsAsLLM::EMPTY_HASH)
216
291
  return @llm if @llm
217
292
  @llm = LLM.method(provider).call(**kwargs)
218
- @llm.tracer = resolve_option(options[:tracer]) if options[:tracer]
293
+ @llm.tracer = Utils.resolve_option(self, options[:tracer]) if options[:tracer]
219
294
  @llm
220
295
  end
221
296
 
@@ -226,7 +301,8 @@ module LLM::ActiveRecord
226
301
  def ctx
227
302
  @ctx ||= begin
228
303
  options = self.class.llm_plugin_options
229
- params = resolve_options(options[:context]).dup
304
+ columns = Utils.columns(options)
305
+ params = Utils.resolve_options(self, options[:context], ActsAsLLM::EMPTY_HASH).dup
230
306
  params[:model] ||= self[columns[:model_column]]
231
307
  ctx = LLM::Context.new(llm, params.compact)
232
308
  data = self[columns[:data_column]]
@@ -241,62 +317,6 @@ module LLM::ActiveRecord
241
317
  end
242
318
  end
243
319
  end
244
-
245
- ##
246
- # @return [void]
247
- def flush
248
- attrs = {
249
- columns[:data_column] => serialize_context(self.class.llm_plugin_options[:format]),
250
- columns[:input_tokens] => ctx.usage.input_tokens,
251
- columns[:output_tokens] => ctx.usage.output_tokens,
252
- columns[:total_tokens] => ctx.usage.total_tokens
253
- }
254
- assign_attributes(attrs)
255
- save!
256
- end
257
-
258
- ##
259
- # @return [Hash]
260
- def resolve_option(option)
261
- case option
262
- when Proc then instance_exec(&option)
263
- when Symbol then send(option)
264
- when Hash then option.dup
265
- else option
266
- end
267
- end
268
-
269
- ##
270
- # @return [Hash]
271
- def resolve_options(option)
272
- case option
273
- when Proc, Symbol, Hash then resolve_option(option)
274
- else ActsAsLLM::EMPTY_HASH.dup
275
- end
276
- end
277
-
278
- def serialize_context(format)
279
- case format
280
- when :string then ctx.to_json
281
- when :json, :jsonb then ctx.to_h
282
- else raise ArgumentError, "Unknown format: #{format.inspect}"
283
- end
284
- end
285
-
286
- def columns
287
- @columns ||= begin
288
- options = self.class.llm_plugin_options
289
- usage_columns = options[:usage_columns]
290
- {
291
- provider_column: options[:provider_column],
292
- model_column: options[:model_column],
293
- data_column: options[:data_column],
294
- input_tokens: usage_columns[:input_tokens],
295
- output_tokens: usage_columns[:output_tokens],
296
- total_tokens: usage_columns[:total_tokens]
297
- }.freeze
298
- end
299
- end
300
320
  end
301
321
  end
302
322
  end
data/lib/llm/agent.rb CHANGED
@@ -14,7 +14,7 @@ module LLM
14
14
  # `respond`, instead of leaving tool loops to the caller.
15
15
  #
16
16
  # **Notes:**
17
- # * Instructions are injected only on the first request.
17
+ # * Instructions are injected once unless a system message is already present.
18
18
  # * An agent automatically executes tool loops (unlike {LLM::Context LLM::Context}).
19
19
  # * Tool loop execution can be configured with `concurrency :call`,
20
20
  # `:thread`, `:task`, `:fiber`, `:ractor`, or a list of queued task
@@ -349,16 +349,28 @@ module LLM
349
349
  instr = self.class.instructions
350
350
  return new_prompt unless instr
351
351
  if LLM::Prompt === new_prompt
352
- new_prompt.system(instr) if @ctx.messages.empty?
352
+ new_prompt.system(instr) if inject_instructions?(new_prompt)
353
353
  new_prompt
354
354
  else
355
355
  prompt do
356
- _1.system(instr) if @ctx.messages.empty?
356
+ _1.system(instr) if inject_instructions?
357
357
  _1.user(new_prompt)
358
358
  end
359
359
  end
360
360
  end
361
361
 
362
+ ##
363
+ # Returns true when agent instructions should be injected for the turn.
364
+ # Instructions are injected once unless a system message is already
365
+ # present in the existing context or the prompt being sent.
366
+ # @param [LLM::Prompt, nil] prompt
367
+ # @return [Boolean]
368
+ def inject_instructions?(prompt = nil)
369
+ return false if @ctx.messages.any?(&:system?)
370
+ return true if prompt.nil?
371
+ !prompt.to_a.any?(&:system?)
372
+ end
373
+
362
374
  ##
363
375
  # @return [Array<LLM::Function::Return>]
364
376
  def call_functions
data/lib/llm/buffer.rb CHANGED
@@ -23,6 +23,16 @@ module LLM
23
23
  @messages.concat(ary)
24
24
  end
25
25
 
26
+ ##
27
+ # Replace the tracked messages
28
+ # @param [Array<LLM::Message>] messages
29
+ # The replacement messages
30
+ # @return [LLM::Buffer]
31
+ def replace(messages)
32
+ @messages.replace(messages)
33
+ self
34
+ end
35
+
26
36
  ##
27
37
  # @yield [LLM::Message]
28
38
  # Yields each message in the conversation thread
@@ -0,0 +1,128 @@
1
+ # frozen_string_literal: true
2
+
3
+ ##
4
+ # {LLM::Compactor LLM::Compactor} summarizes older context messages into a
5
+ # smaller replacement message when a context grows too large.
6
+ #
7
+ # This work is directly inspired by the compaction approach developed by
8
+ # General Intelligence Systems in
9
+ # [Brute](https://github.com/general-intelligence-systems/brute).
10
+ #
11
+ # The compactor can also use a different model from the main context by
12
+ # setting `model:` in the compactor config. By default, `token_threshold` is
13
+ # 10% less than the current context window, or `100_000` when the context
14
+ # window is unknown. Set `message_threshold:` or `token_threshold:` to `nil`
15
+ # to disable that constraint.
16
+ class LLM::Compactor
17
+ DEFAULT_TOKEN_THRESHOLD = 100_000
18
+ DEFAULTS = {
19
+ message_threshold: 200,
20
+ retention_window: 8,
21
+ model: nil
22
+ }.freeze
23
+
24
+ ##
25
+ # @return [Hash]
26
+ attr_reader :config
27
+
28
+ ##
29
+ # @param [LLM::Context] ctx
30
+ # @param [Hash] config
31
+ # @option config [Integer] :token_threshold
32
+ # Defaults to 10% less than the current context window, or `100_000` when
33
+ # the context window is unknown. Set to `nil` to disable token-based
34
+ # compaction.
35
+ # @option config [Integer] :message_threshold
36
+ # Set to `nil` to disable message-count-based compaction.
37
+ # @option config [Integer] :retention_window
38
+ # @option config [String, nil] :model
39
+ # The model to use for the summarization request. Defaults to the current
40
+ # context model.
41
+ def initialize(ctx, **config)
42
+ @ctx = ctx
43
+ @config = DEFAULTS.merge(token_threshold: default_token_threshold).merge(config)
44
+ end
45
+
46
+ ##
47
+ # Returns true when the context should be compacted
48
+ # @param [Object] prompt
49
+ # The next prompt or turn input
50
+ # @return [Boolean]
51
+ def compact?(prompt = nil)
52
+ return false if ctx.functions.any? || [*prompt].grep(LLM::Function::Return).any?
53
+ messages = ctx.messages.reject(&:system?)
54
+ return true if config[:message_threshold] && messages.size > config[:message_threshold]
55
+ usage = ctx.usage
56
+ return true if config[:token_threshold] && usage && usage.total_tokens > config[:token_threshold]
57
+ false
58
+ end
59
+
60
+ ##
61
+ # Summarize older messages and replace them with a compact summary.
62
+ # @param [Object] prompt
63
+ # The next prompt or turn input
64
+ # @return [LLM::Message, nil]
65
+ def compact!(prompt = nil)
66
+ return nil if ctx.functions.any? || [*prompt].grep(LLM::Function::Return).any?
67
+ messages = ctx.messages.reject(&:system?)
68
+ retention_window = [config[:retention_window], messages.size].min
69
+ return nil unless messages.size > retention_window
70
+ stream = ctx.params[:stream]
71
+ stream.on_compaction(ctx, self) if LLM::Stream === stream
72
+ recent = retained_messages
73
+ older = messages[0...(messages.size - recent.size)]
74
+ summary = LLM::Message.new(ctx.llm.user_role, "[Previous conversation summary]\n\n#{summarize(older)}")
75
+ ctx.messages.replace([*ctx.messages.take_while(&:system?), summary, *recent])
76
+ stream.on_compaction_finish(ctx, self) if LLM::Stream === stream
77
+ summary
78
+ end
79
+
80
+ private
81
+
82
+ attr_reader :ctx
83
+
84
+ def default_token_threshold
85
+ window = ctx.context_window
86
+ return DEFAULT_TOKEN_THRESHOLD if window.zero?
87
+ window - (window / 10)
88
+ end
89
+
90
+ def retained_messages
91
+ messages = ctx.messages.reject(&:system?)
92
+ retention_window = [config[:retention_window], messages.size].min
93
+ start = [messages.size - retention_window, 0].max
94
+ start -= 1 while start > 0 && messages[start].tool_return?
95
+ messages[start..] || []
96
+ end
97
+
98
+ def summarize(messages)
99
+ model = config[:model] || ctx.params[:model] || ctx.llm.default_model
100
+ ctx.llm.complete(summary_prompt(messages), model:).content
101
+ end
102
+
103
+ def summary_prompt(messages)
104
+ <<~PROMPT
105
+ Summarize this conversation history for context continuity.
106
+ The summary will replace these messages in the context window.
107
+
108
+ Focus on:
109
+ - What the user asked for
110
+ - Important facts and decisions
111
+ - Tool calls and outcomes that still matter
112
+ - What should happen next
113
+
114
+ Conversation:
115
+ #{serialize(messages)}
116
+ PROMPT
117
+ end
118
+
119
+ def serialize(messages)
120
+ messages.map do |message|
121
+ content = case message.content
122
+ when Array then message.content.map(&:inspect).join(", ")
123
+ else message.content.to_s
124
+ end
125
+ "#{message.role}: #{content.empty? ? "(empty)" : content}"
126
+ end.join("\n---\n")
127
+ end
128
+ end
data/lib/llm/context.rb CHANGED
@@ -34,6 +34,7 @@ module LLM
34
34
  # ctx.talk(prompt)
35
35
  # ctx.messages.each { |m| puts "[#{m.role}] #{m.content}" }
36
36
  class Context
37
+ require_relative "compactor"
37
38
  require_relative "context/serializer"
38
39
  require_relative "context/deserializer"
39
40
  include Serializer
@@ -54,6 +55,13 @@ module LLM
54
55
  # @return [Symbol]
55
56
  attr_reader :mode
56
57
 
58
+ ##
59
+ # Returns the default params for this context
60
+ # @return [Hash]
61
+ def params
62
+ @params.dup
63
+ end
64
+
57
65
  ##
58
66
  # @param [LLM::Provider] llm
59
67
  # A provider
@@ -68,12 +76,24 @@ module LLM
68
76
  def initialize(llm, params = {})
69
77
  @llm = llm
70
78
  @mode = params.delete(:mode) || :completions
79
+ @compactor = params.delete(:compactor)
71
80
  tools = [*params.delete(:tools), *load_skills(params.delete(:skills))]
72
81
  @params = {model: llm.default_model, schema: nil}.compact.merge!(params)
73
82
  @params[:tools] = tools unless tools.empty?
74
83
  @messages = LLM::Buffer.new(llm)
75
84
  end
76
85
 
86
+ ##
87
+ # Returns a context compactor
88
+ # This feature is inspired by the compaction approach developed by
89
+ # General Intelligence Systems in
90
+ # [Brute](https://github.com/general-intelligence-systems/brute).
91
+ # @return [LLM::Compactor]
92
+ def compactor
93
+ @compactor = LLM::Compactor.new(self, **(@compactor || {})) unless LLM::Compactor === @compactor
94
+ @compactor
95
+ end
96
+
77
97
  ##
78
98
  # Interact with the context via the chat completions API.
79
99
  # This method immediately sends a request to the LLM and returns the response.
@@ -89,6 +109,7 @@ module LLM
89
109
  def talk(prompt, params = {})
90
110
  return respond(prompt, params) if mode == :responses
91
111
  @owner = Fiber.current
112
+ compactor.compact!(prompt) if compactor.compact?(prompt)
92
113
  params = params.merge(messages: @messages.to_a)
93
114
  params = @params.merge(params)
94
115
  bind!(params[:stream], params[:model])
@@ -116,6 +137,7 @@ module LLM
116
137
  # puts res.output_text
117
138
  def respond(prompt, params = {})
118
139
  @owner = Fiber.current
140
+ compactor.compact!(prompt) if compactor.compact?(prompt)
119
141
  params = @params.merge(params)
120
142
  bind!(params[:stream], params[:model])
121
143
  res_id = params[:store] == false ? nil : @messages.find(&:assistant?)&.response&.response_id
@@ -217,7 +239,14 @@ module LLM
217
239
  # messages.
218
240
  # @return [LLM::Object, nil]
219
241
  def usage
220
- @messages.find(&:assistant?)&.usage
242
+ usage = @messages.find(&:assistant?)&.usage
243
+ return unless usage
244
+ LLM::Object.from(
245
+ input_tokens: usage.input_tokens || 0,
246
+ output_tokens: usage.output_tokens || 0,
247
+ reasoning_tokens: usage.reasoning_tokens || 0,
248
+ total_tokens: usage.total_tokens || 0
249
+ )
221
250
  end
222
251
 
223
252
  ##
@@ -350,7 +379,7 @@ module LLM
350
379
  end
351
380
 
352
381
  def load_skills(skills)
353
- [*skills].map { LLM::Skill.load(_1).to_tool(llm) }
382
+ [*skills].map { LLM::Skill.load(_1).to_tool(self) }
354
383
  end
355
384
  end
356
385
 
data/lib/llm/function.rb CHANGED
@@ -266,9 +266,10 @@ class LLM::Function
266
266
  parameters: (@params || {type: "object", properties: {}}).to_h.merge(additionalProperties: false), strict: false
267
267
  }.compact
268
268
  else
269
+ params = @params || {type: "object", properties: {}}
269
270
  {
270
271
  type: "function", name: @name,
271
- function: {name: @name, description: @description, parameters: @params}
272
+ function: {name: @name, description: @description, parameters: params}
272
273
  }.compact
273
274
  end
274
275
  end
@@ -10,9 +10,11 @@ module LLM::Sequel
10
10
  # instructions, and concurrency are configured on the model class and
11
11
  # forwarded to an internal agent subclass.
12
12
  module Agent
13
+ require_relative "plugin"
13
14
  EMPTY_HASH = LLM::Sequel::Plugin::EMPTY_HASH
14
15
  DEFAULT_USAGE_COLUMNS = LLM::Sequel::Plugin::DEFAULT_USAGE_COLUMNS
15
16
  DEFAULTS = LLM::Sequel::Plugin::DEFAULTS
17
+ Utils = LLM::Sequel::Plugin::Utils
16
18
 
17
19
  def self.apply(model, **)
18
20
  model.extend ClassMethods
@@ -71,7 +73,8 @@ module LLM::Sequel
71
73
  def ctx
72
74
  @ctx ||= begin
73
75
  options = self.class.llm_plugin_options
74
- params = resolve_options(options[:context]).dup
76
+ columns = Agent::Utils.columns(options)
77
+ params = Agent::Utils.resolve_options(self, options[:context], Agent::EMPTY_HASH).dup
75
78
  params[:model] ||= self[columns[:model_column]]
76
79
  ctx = self.class.agent.new(llm, params.compact)
77
80
  data = self[columns[:data_column]]
@@ -86,22 +89,6 @@ module LLM::Sequel
86
89
  end
87
90
  end
88
91
  end
89
-
90
- def resolve_option(option)
91
- case option
92
- when Proc then instance_exec(&option)
93
- when Symbol then send(option)
94
- when Hash then option.dup
95
- else option
96
- end
97
- end
98
-
99
- def resolve_options(option)
100
- case option
101
- when Proc, Symbol, Hash then resolve_option(option)
102
- else Agent::EMPTY_HASH.dup
103
- end
104
- end
105
92
  end
106
93
  end
107
94
  end