ruby_llm-agents 0.5.0 → 1.0.0.beta.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +189 -31
- data/app/controllers/ruby_llm/agents/agents_controller.rb +136 -16
- data/app/controllers/ruby_llm/agents/dashboard_controller.rb +29 -9
- data/app/controllers/ruby_llm/agents/workflows_controller.rb +355 -0
- data/app/helpers/ruby_llm/agents/application_helper.rb +25 -0
- data/app/models/ruby_llm/agents/execution.rb +3 -0
- data/app/models/ruby_llm/agents/tenant_budget.rb +58 -15
- data/app/services/ruby_llm/agents/agent_registry.rb +51 -12
- data/app/views/layouts/ruby_llm/agents/application.html.erb +2 -29
- data/app/views/ruby_llm/agents/agents/_agent.html.erb +13 -1
- data/app/views/ruby_llm/agents/agents/_config_agent.html.erb +235 -0
- data/app/views/ruby_llm/agents/agents/_config_embedder.html.erb +70 -0
- data/app/views/ruby_llm/agents/agents/_config_image_generator.html.erb +152 -0
- data/app/views/ruby_llm/agents/agents/_config_moderator.html.erb +63 -0
- data/app/views/ruby_llm/agents/agents/_config_speaker.html.erb +108 -0
- data/app/views/ruby_llm/agents/agents/_config_transcriber.html.erb +91 -0
- data/app/views/ruby_llm/agents/agents/_workflow.html.erb +1 -1
- data/app/views/ruby_llm/agents/agents/index.html.erb +74 -9
- data/app/views/ruby_llm/agents/agents/show.html.erb +18 -378
- data/app/views/ruby_llm/agents/dashboard/_agent_comparison.html.erb +269 -15
- data/app/views/ruby_llm/agents/executions/show.html.erb +16 -0
- data/app/views/ruby_llm/agents/shared/_agent_type_badge.html.erb +93 -0
- data/app/views/ruby_llm/agents/workflows/_step_performance.html.erb +236 -0
- data/app/views/ruby_llm/agents/workflows/_structure_parallel.html.erb +76 -0
- data/app/views/ruby_llm/agents/workflows/_structure_pipeline.html.erb +74 -0
- data/app/views/ruby_llm/agents/workflows/_structure_router.html.erb +108 -0
- data/app/views/ruby_llm/agents/workflows/show.html.erb +442 -0
- data/config/routes.rb +1 -0
- data/lib/generators/ruby_llm_agents/agent_generator.rb +56 -7
- data/lib/generators/ruby_llm_agents/background_remover_generator.rb +110 -0
- data/lib/generators/ruby_llm_agents/embedder_generator.rb +107 -0
- data/lib/generators/ruby_llm_agents/image_analyzer_generator.rb +115 -0
- data/lib/generators/ruby_llm_agents/image_editor_generator.rb +108 -0
- data/lib/generators/ruby_llm_agents/image_generator_generator.rb +116 -0
- data/lib/generators/ruby_llm_agents/image_pipeline_generator.rb +178 -0
- data/lib/generators/ruby_llm_agents/image_transformer_generator.rb +109 -0
- data/lib/generators/ruby_llm_agents/image_upscaler_generator.rb +103 -0
- data/lib/generators/ruby_llm_agents/image_variator_generator.rb +102 -0
- data/lib/generators/ruby_llm_agents/install_generator.rb +76 -4
- data/lib/generators/ruby_llm_agents/restructure_generator.rb +292 -0
- data/lib/generators/ruby_llm_agents/speaker_generator.rb +121 -0
- data/lib/generators/ruby_llm_agents/templates/add_execution_type_migration.rb.tt +8 -0
- data/lib/generators/ruby_llm_agents/templates/agent.rb.tt +99 -84
- data/lib/generators/ruby_llm_agents/templates/application_agent.rb.tt +42 -40
- data/lib/generators/ruby_llm_agents/templates/application_background_remover.rb.tt +26 -0
- data/lib/generators/ruby_llm_agents/templates/application_embedder.rb.tt +50 -0
- data/lib/generators/ruby_llm_agents/templates/application_image_analyzer.rb.tt +26 -0
- data/lib/generators/ruby_llm_agents/templates/application_image_editor.rb.tt +20 -0
- data/lib/generators/ruby_llm_agents/templates/application_image_generator.rb.tt +38 -0
- data/lib/generators/ruby_llm_agents/templates/application_image_pipeline.rb.tt +139 -0
- data/lib/generators/ruby_llm_agents/templates/application_image_transformer.rb.tt +21 -0
- data/lib/generators/ruby_llm_agents/templates/application_image_upscaler.rb.tt +20 -0
- data/lib/generators/ruby_llm_agents/templates/application_image_variator.rb.tt +20 -0
- data/lib/generators/ruby_llm_agents/templates/application_speaker.rb.tt +49 -0
- data/lib/generators/ruby_llm_agents/templates/application_transcriber.rb.tt +53 -0
- data/lib/generators/ruby_llm_agents/templates/background_remover.rb.tt +44 -0
- data/lib/generators/ruby_llm_agents/templates/embedder.rb.tt +41 -0
- data/lib/generators/ruby_llm_agents/templates/image_analyzer.rb.tt +45 -0
- data/lib/generators/ruby_llm_agents/templates/image_editor.rb.tt +35 -0
- data/lib/generators/ruby_llm_agents/templates/image_generator.rb.tt +47 -0
- data/lib/generators/ruby_llm_agents/templates/image_pipeline.rb.tt +50 -0
- data/lib/generators/ruby_llm_agents/templates/image_transformer.rb.tt +44 -0
- data/lib/generators/ruby_llm_agents/templates/image_upscaler.rb.tt +38 -0
- data/lib/generators/ruby_llm_agents/templates/image_variator.rb.tt +33 -0
- data/lib/generators/ruby_llm_agents/templates/skills/AGENTS.md.tt +228 -0
- data/lib/generators/ruby_llm_agents/templates/skills/BACKGROUND_REMOVERS.md.tt +131 -0
- data/lib/generators/ruby_llm_agents/templates/skills/EMBEDDERS.md.tt +255 -0
- data/lib/generators/ruby_llm_agents/templates/skills/IMAGE_ANALYZERS.md.tt +120 -0
- data/lib/generators/ruby_llm_agents/templates/skills/IMAGE_EDITORS.md.tt +102 -0
- data/lib/generators/ruby_llm_agents/templates/skills/IMAGE_GENERATORS.md.tt +282 -0
- data/lib/generators/ruby_llm_agents/templates/skills/IMAGE_PIPELINES.md.tt +228 -0
- data/lib/generators/ruby_llm_agents/templates/skills/IMAGE_TRANSFORMERS.md.tt +120 -0
- data/lib/generators/ruby_llm_agents/templates/skills/IMAGE_UPSCALERS.md.tt +110 -0
- data/lib/generators/ruby_llm_agents/templates/skills/IMAGE_VARIATORS.md.tt +120 -0
- data/lib/generators/ruby_llm_agents/templates/skills/SPEAKERS.md.tt +212 -0
- data/lib/generators/ruby_llm_agents/templates/skills/TOOLS.md.tt +227 -0
- data/lib/generators/ruby_llm_agents/templates/skills/TRANSCRIBERS.md.tt +251 -0
- data/lib/generators/ruby_llm_agents/templates/skills/WORKFLOWS.md.tt +300 -0
- data/lib/generators/ruby_llm_agents/templates/speaker.rb.tt +56 -0
- data/lib/generators/ruby_llm_agents/templates/transcriber.rb.tt +51 -0
- data/lib/generators/ruby_llm_agents/transcriber_generator.rb +107 -0
- data/lib/generators/ruby_llm_agents/upgrade_generator.rb +152 -1
- data/lib/ruby_llm/agents/audio/speaker.rb +553 -0
- data/lib/ruby_llm/agents/audio/transcriber.rb +669 -0
- data/lib/ruby_llm/agents/base_agent.rb +675 -0
- data/lib/ruby_llm/agents/core/base/moderation_dsl.rb +181 -0
- data/lib/ruby_llm/agents/core/base/moderation_execution.rb +274 -0
- data/lib/ruby_llm/agents/core/base.rb +135 -0
- data/lib/ruby_llm/agents/core/configuration.rb +981 -0
- data/lib/ruby_llm/agents/core/errors.rb +150 -0
- data/lib/ruby_llm/agents/{instrumentation.rb → core/instrumentation.rb} +22 -1
- data/lib/ruby_llm/agents/core/llm_tenant.rb +358 -0
- data/lib/ruby_llm/agents/{version.rb → core/version.rb} +1 -1
- data/lib/ruby_llm/agents/dsl/base.rb +110 -0
- data/lib/ruby_llm/agents/dsl/caching.rb +142 -0
- data/lib/ruby_llm/agents/dsl/reliability.rb +307 -0
- data/lib/ruby_llm/agents/dsl.rb +41 -0
- data/lib/ruby_llm/agents/image/analyzer/dsl.rb +130 -0
- data/lib/ruby_llm/agents/image/analyzer/execution.rb +402 -0
- data/lib/ruby_llm/agents/image/analyzer.rb +90 -0
- data/lib/ruby_llm/agents/image/background_remover/dsl.rb +154 -0
- data/lib/ruby_llm/agents/image/background_remover/execution.rb +240 -0
- data/lib/ruby_llm/agents/image/background_remover.rb +89 -0
- data/lib/ruby_llm/agents/image/concerns/image_operation_dsl.rb +91 -0
- data/lib/ruby_llm/agents/image/concerns/image_operation_execution.rb +165 -0
- data/lib/ruby_llm/agents/image/editor/dsl.rb +56 -0
- data/lib/ruby_llm/agents/image/editor/execution.rb +207 -0
- data/lib/ruby_llm/agents/image/editor.rb +92 -0
- data/lib/ruby_llm/agents/image/generator/active_storage_support.rb +127 -0
- data/lib/ruby_llm/agents/image/generator/content_policy.rb +95 -0
- data/lib/ruby_llm/agents/image/generator/pricing.rb +353 -0
- data/lib/ruby_llm/agents/image/generator/templates.rb +124 -0
- data/lib/ruby_llm/agents/image/generator.rb +455 -0
- data/lib/ruby_llm/agents/image/pipeline/dsl.rb +213 -0
- data/lib/ruby_llm/agents/image/pipeline/execution.rb +382 -0
- data/lib/ruby_llm/agents/image/pipeline.rb +97 -0
- data/lib/ruby_llm/agents/image/transformer/dsl.rb +148 -0
- data/lib/ruby_llm/agents/image/transformer/execution.rb +223 -0
- data/lib/ruby_llm/agents/image/transformer.rb +95 -0
- data/lib/ruby_llm/agents/image/upscaler/dsl.rb +83 -0
- data/lib/ruby_llm/agents/image/upscaler/execution.rb +219 -0
- data/lib/ruby_llm/agents/image/upscaler.rb +81 -0
- data/lib/ruby_llm/agents/image/variator/dsl.rb +62 -0
- data/lib/ruby_llm/agents/image/variator/execution.rb +189 -0
- data/lib/ruby_llm/agents/image/variator.rb +80 -0
- data/lib/ruby_llm/agents/{alert_manager.rb → infrastructure/alert_manager.rb} +17 -22
- data/lib/ruby_llm/agents/infrastructure/budget/budget_query.rb +145 -0
- data/lib/ruby_llm/agents/infrastructure/budget/config_resolver.rb +149 -0
- data/lib/ruby_llm/agents/infrastructure/budget/forecaster.rb +68 -0
- data/lib/ruby_llm/agents/infrastructure/budget/spend_recorder.rb +279 -0
- data/lib/ruby_llm/agents/infrastructure/budget_tracker.rb +275 -0
- data/lib/ruby_llm/agents/{execution_logger_job.rb → infrastructure/execution_logger_job.rb} +17 -1
- data/lib/ruby_llm/agents/{reliability → infrastructure/reliability}/executor.rb +2 -1
- data/lib/ruby_llm/agents/{reliability → infrastructure/reliability}/retry_strategy.rb +9 -3
- data/lib/ruby_llm/agents/{reliability.rb → infrastructure/reliability.rb} +11 -21
- data/lib/ruby_llm/agents/pipeline/builder.rb +215 -0
- data/lib/ruby_llm/agents/pipeline/context.rb +255 -0
- data/lib/ruby_llm/agents/pipeline/executor.rb +86 -0
- data/lib/ruby_llm/agents/pipeline/middleware/base.rb +124 -0
- data/lib/ruby_llm/agents/pipeline/middleware/budget.rb +95 -0
- data/lib/ruby_llm/agents/pipeline/middleware/cache.rb +171 -0
- data/lib/ruby_llm/agents/pipeline/middleware/instrumentation.rb +415 -0
- data/lib/ruby_llm/agents/pipeline/middleware/reliability.rb +276 -0
- data/lib/ruby_llm/agents/pipeline/middleware/tenant.rb +196 -0
- data/lib/ruby_llm/agents/pipeline.rb +68 -0
- data/lib/ruby_llm/agents/{engine.rb → rails/engine.rb} +79 -11
- data/lib/ruby_llm/agents/results/background_removal_result.rb +286 -0
- data/lib/ruby_llm/agents/{result.rb → results/base.rb} +73 -1
- data/lib/ruby_llm/agents/results/embedding_result.rb +243 -0
- data/lib/ruby_llm/agents/results/image_analysis_result.rb +314 -0
- data/lib/ruby_llm/agents/results/image_edit_result.rb +250 -0
- data/lib/ruby_llm/agents/results/image_generation_result.rb +346 -0
- data/lib/ruby_llm/agents/results/image_pipeline_result.rb +399 -0
- data/lib/ruby_llm/agents/results/image_transform_result.rb +251 -0
- data/lib/ruby_llm/agents/results/image_upscale_result.rb +255 -0
- data/lib/ruby_llm/agents/results/image_variation_result.rb +237 -0
- data/lib/ruby_llm/agents/results/moderation_result.rb +158 -0
- data/lib/ruby_llm/agents/results/speech_result.rb +338 -0
- data/lib/ruby_llm/agents/results/transcription_result.rb +408 -0
- data/lib/ruby_llm/agents/text/embedder.rb +444 -0
- data/lib/ruby_llm/agents/text/moderator.rb +237 -0
- data/lib/ruby_llm/agents/workflow/async.rb +220 -0
- data/lib/ruby_llm/agents/workflow/async_executor.rb +156 -0
- data/lib/ruby_llm/agents/{workflow.rb → workflow/orchestrator.rb} +6 -5
- data/lib/ruby_llm/agents/workflow/parallel.rb +34 -17
- data/lib/ruby_llm/agents/workflow/thread_pool.rb +185 -0
- data/lib/ruby_llm/agents.rb +86 -20
- metadata +172 -34
- data/lib/ruby_llm/agents/base/caching.rb +0 -40
- data/lib/ruby_llm/agents/base/cost_calculation.rb +0 -105
- data/lib/ruby_llm/agents/base/dsl.rb +0 -324
- data/lib/ruby_llm/agents/base/execution.rb +0 -366
- data/lib/ruby_llm/agents/base/reliability_dsl.rb +0 -82
- data/lib/ruby_llm/agents/base/reliability_execution.rb +0 -136
- data/lib/ruby_llm/agents/base/response_building.rb +0 -86
- data/lib/ruby_llm/agents/base/tool_tracking.rb +0 -57
- data/lib/ruby_llm/agents/base.rb +0 -210
- data/lib/ruby_llm/agents/budget_tracker.rb +0 -733
- data/lib/ruby_llm/agents/configuration.rb +0 -394
- /data/lib/ruby_llm/agents/{deprecations.rb → core/deprecations.rb} +0 -0
- /data/lib/ruby_llm/agents/{inflections.rb → core/inflections.rb} +0 -0
- /data/lib/ruby_llm/agents/{resolved_config.rb → core/resolved_config.rb} +0 -0
- /data/lib/ruby_llm/agents/{attempt_tracker.rb → infrastructure/attempt_tracker.rb} +0 -0
- /data/lib/ruby_llm/agents/{cache_helper.rb → infrastructure/cache_helper.rb} +0 -0
- /data/lib/ruby_llm/agents/{circuit_breaker.rb → infrastructure/circuit_breaker.rb} +0 -0
- /data/lib/ruby_llm/agents/{redactor.rb → infrastructure/redactor.rb} +0 -0
- /data/lib/ruby_llm/agents/{reliability → infrastructure/reliability}/breaker_manager.rb +0 -0
- /data/lib/ruby_llm/agents/{reliability → infrastructure/reliability}/execution_constraints.rb +0 -0
- /data/lib/ruby_llm/agents/{reliability → infrastructure/reliability}/fallback_routing.rb +0 -0
|
@@ -0,0 +1,675 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative "dsl"
|
|
4
|
+
require_relative "pipeline"
|
|
5
|
+
require_relative "infrastructure/cache_helper"
|
|
6
|
+
|
|
7
|
+
module RubyLLM
|
|
8
|
+
module Agents
|
|
9
|
+
# Base class for all agents using the middleware pipeline architecture.
|
|
10
|
+
#
|
|
11
|
+
# BaseAgent provides a unified foundation for building LLM-powered agents
|
|
12
|
+
# with configurable middleware for caching, reliability, instrumentation,
|
|
13
|
+
# budgeting, and multi-tenancy.
|
|
14
|
+
#
|
|
15
|
+
# @example Creating an agent
|
|
16
|
+
# class SearchAgent < RubyLLM::Agents::BaseAgent
|
|
17
|
+
# model "gpt-4o"
|
|
18
|
+
# version "1.0"
|
|
19
|
+
# description "Searches for relevant documents"
|
|
20
|
+
# timeout 30
|
|
21
|
+
#
|
|
22
|
+
# cache_for 1.hour
|
|
23
|
+
#
|
|
24
|
+
# reliability do
|
|
25
|
+
# retries max: 3, backoff: :exponential
|
|
26
|
+
# fallback_models "gpt-4o-mini"
|
|
27
|
+
# end
|
|
28
|
+
#
|
|
29
|
+
# param :query, required: true
|
|
30
|
+
# param :limit, default: 10
|
|
31
|
+
#
|
|
32
|
+
# def system_prompt
|
|
33
|
+
# "You are a search assistant..."
|
|
34
|
+
# end
|
|
35
|
+
#
|
|
36
|
+
# def user_prompt
|
|
37
|
+
# "Search for: #{query}"
|
|
38
|
+
# end
|
|
39
|
+
# end
|
|
40
|
+
#
|
|
41
|
+
# @example Calling an agent
|
|
42
|
+
# SearchAgent.call(query: "red dress")
|
|
43
|
+
# SearchAgent.call(query: "red dress", dry_run: true)
|
|
44
|
+
# SearchAgent.call(query: "red dress", skip_cache: true)
|
|
45
|
+
#
|
|
46
|
+
class BaseAgent
|
|
47
|
+
extend DSL::Base
|
|
48
|
+
extend DSL::Reliability
|
|
49
|
+
extend DSL::Caching
|
|
50
|
+
include CacheHelper
|
|
51
|
+
|
|
52
|
+
class << self
|
|
53
|
+
# Factory method to instantiate and execute an agent
|
|
54
|
+
#
|
|
55
|
+
# @param kwargs [Hash] Named parameters for the agent
|
|
56
|
+
# @option kwargs [Boolean] :dry_run Return prompt info without API call
|
|
57
|
+
# @option kwargs [Boolean] :skip_cache Bypass caching even if enabled
|
|
58
|
+
# @option kwargs [Hash, Object] :tenant Tenant context for multi-tenancy
|
|
59
|
+
# @option kwargs [String, Array<String>] :with Attachments (files, URLs)
|
|
60
|
+
# @yield [chunk] Yields chunks when streaming is enabled
|
|
61
|
+
# @return [Object] The processed response from the agent
|
|
62
|
+
def call(**kwargs, &block)
|
|
63
|
+
new(**kwargs).call(&block)
|
|
64
|
+
end
|
|
65
|
+
|
|
66
|
+
# Streams agent execution, yielding chunks as they arrive
|
|
67
|
+
#
|
|
68
|
+
# @param kwargs [Hash] Agent parameters
|
|
69
|
+
# @yield [chunk] Yields each chunk as it arrives
|
|
70
|
+
# @return [Result] The final result after streaming completes
|
|
71
|
+
# @raise [ArgumentError] If no block is provided
|
|
72
|
+
def stream(**kwargs, &block)
|
|
73
|
+
raise ArgumentError, "Block required for streaming" unless block_given?
|
|
74
|
+
|
|
75
|
+
instance = new(**kwargs)
|
|
76
|
+
instance.instance_variable_set(:@force_streaming, true)
|
|
77
|
+
instance.call(&block)
|
|
78
|
+
end
|
|
79
|
+
|
|
80
|
+
# Returns the agent type for this class
|
|
81
|
+
#
|
|
82
|
+
# Used by middleware to determine which tracking/budget config to use.
|
|
83
|
+
# Subclasses should override this method.
|
|
84
|
+
#
|
|
85
|
+
# @return [Symbol] The agent type (:conversation, :embedding, :image, etc.)
|
|
86
|
+
def agent_type
|
|
87
|
+
:conversation
|
|
88
|
+
end
|
|
89
|
+
|
|
90
|
+
# @!group Parameter DSL
|
|
91
|
+
|
|
92
|
+
# Defines a parameter for the agent
|
|
93
|
+
#
|
|
94
|
+
# Creates an accessor method for the parameter that retrieves values
|
|
95
|
+
# from the options hash, falling back to the default value.
|
|
96
|
+
#
|
|
97
|
+
# @param name [Symbol] The parameter name
|
|
98
|
+
# @param required [Boolean] Whether the parameter is required
|
|
99
|
+
# @param default [Object, nil] Default value if not provided
|
|
100
|
+
# @param type [Class, nil] Optional type for validation
|
|
101
|
+
# @return [void]
|
|
102
|
+
def param(name, required: false, default: nil, type: nil)
|
|
103
|
+
@params ||= {}
|
|
104
|
+
@params[name] = { required: required, default: default, type: type }
|
|
105
|
+
define_method(name) do
|
|
106
|
+
@options[name] || @options[name.to_s] || self.class.params.dig(name, :default)
|
|
107
|
+
end
|
|
108
|
+
end
|
|
109
|
+
|
|
110
|
+
# Returns all defined parameters including inherited ones
|
|
111
|
+
#
|
|
112
|
+
# @return [Hash{Symbol => Hash}] Parameter definitions
|
|
113
|
+
def params
|
|
114
|
+
parent = superclass.respond_to?(:params) ? superclass.params : {}
|
|
115
|
+
parent.merge(@params || {})
|
|
116
|
+
end
|
|
117
|
+
|
|
118
|
+
# @!endgroup
|
|
119
|
+
|
|
120
|
+
# @!group Streaming DSL
|
|
121
|
+
|
|
122
|
+
# Enables or returns streaming mode for this agent
|
|
123
|
+
#
|
|
124
|
+
# @param value [Boolean, nil] Whether to enable streaming
|
|
125
|
+
# @return [Boolean] The current streaming setting
|
|
126
|
+
def streaming(value = nil)
|
|
127
|
+
@streaming = value unless value.nil?
|
|
128
|
+
return @streaming unless @streaming.nil?
|
|
129
|
+
|
|
130
|
+
superclass.respond_to?(:streaming) ? superclass.streaming : default_streaming
|
|
131
|
+
end
|
|
132
|
+
|
|
133
|
+
# @!endgroup
|
|
134
|
+
|
|
135
|
+
# @!group Tools DSL
|
|
136
|
+
|
|
137
|
+
# Sets or returns the tools available to this agent
|
|
138
|
+
#
|
|
139
|
+
# @param tool_classes [Array<Class>] Tool classes to make available
|
|
140
|
+
# @return [Array<Class>] The current tools
|
|
141
|
+
def tools(tool_classes = nil)
|
|
142
|
+
@tools = Array(tool_classes) if tool_classes
|
|
143
|
+
@tools || (superclass.respond_to?(:tools) ? superclass.tools : [])
|
|
144
|
+
end
|
|
145
|
+
|
|
146
|
+
# @!endgroup
|
|
147
|
+
|
|
148
|
+
# @!group Temperature DSL
|
|
149
|
+
|
|
150
|
+
# Sets or returns the temperature for LLM responses
|
|
151
|
+
#
|
|
152
|
+
# @param value [Float, nil] Temperature value (0.0-2.0)
|
|
153
|
+
# @return [Float] The current temperature setting
|
|
154
|
+
def temperature(value = nil)
|
|
155
|
+
@temperature = value if value
|
|
156
|
+
@temperature || (superclass.respond_to?(:temperature) ? superclass.temperature : default_temperature)
|
|
157
|
+
end
|
|
158
|
+
|
|
159
|
+
# @!endgroup
|
|
160
|
+
|
|
161
|
+
# @!group Thinking DSL
|
|
162
|
+
|
|
163
|
+
# Configures extended thinking/reasoning for this agent
|
|
164
|
+
#
|
|
165
|
+
# @param effort [Symbol, nil] Thinking depth (:none, :low, :medium, :high)
|
|
166
|
+
# @param budget [Integer, nil] Token budget for thinking
|
|
167
|
+
# @return [Hash, nil] The current thinking configuration
|
|
168
|
+
def thinking(effort: nil, budget: nil)
|
|
169
|
+
if effort || budget
|
|
170
|
+
@thinking_config = {}
|
|
171
|
+
@thinking_config[:effort] = effort if effort
|
|
172
|
+
@thinking_config[:budget] = budget if budget
|
|
173
|
+
end
|
|
174
|
+
thinking_config
|
|
175
|
+
end
|
|
176
|
+
|
|
177
|
+
# Returns the thinking configuration
|
|
178
|
+
#
|
|
179
|
+
# Falls back to global configuration default if not set at class level.
|
|
180
|
+
#
|
|
181
|
+
# @return [Hash, nil] The thinking configuration
|
|
182
|
+
def thinking_config
|
|
183
|
+
return @thinking_config if @thinking_config
|
|
184
|
+
return superclass.thinking_config if superclass.respond_to?(:thinking_config) && superclass.thinking_config
|
|
185
|
+
|
|
186
|
+
# Fall back to global configuration default
|
|
187
|
+
RubyLLM::Agents.configuration.default_thinking
|
|
188
|
+
rescue StandardError
|
|
189
|
+
nil
|
|
190
|
+
end
|
|
191
|
+
|
|
192
|
+
# @!endgroup
|
|
193
|
+
|
|
194
|
+
private
|
|
195
|
+
|
|
196
|
+
def default_streaming
|
|
197
|
+
RubyLLM::Agents.configuration.default_streaming
|
|
198
|
+
rescue StandardError
|
|
199
|
+
false
|
|
200
|
+
end
|
|
201
|
+
|
|
202
|
+
def default_temperature
|
|
203
|
+
RubyLLM::Agents.configuration.default_temperature
|
|
204
|
+
rescue StandardError
|
|
205
|
+
0.7
|
|
206
|
+
end
|
|
207
|
+
end
|
|
208
|
+
|
|
209
|
+
# @!attribute [r] model
|
|
210
|
+
# @return [String] The LLM model being used
|
|
211
|
+
# @!attribute [r] temperature
|
|
212
|
+
# @return [Float] The temperature setting
|
|
213
|
+
# @!attribute [r] client
|
|
214
|
+
# @return [RubyLLM::Chat] The configured RubyLLM client
|
|
215
|
+
attr_reader :model, :temperature, :client
|
|
216
|
+
|
|
217
|
+
# Creates a new agent instance
|
|
218
|
+
#
|
|
219
|
+
# @param model [String] Override the class-level model setting
|
|
220
|
+
# @param temperature [Float] Override the class-level temperature
|
|
221
|
+
# @param options [Hash] Agent parameters defined via the param DSL
|
|
222
|
+
def initialize(model: self.class.model, temperature: self.class.temperature, **options)
|
|
223
|
+
@model = model
|
|
224
|
+
@temperature = temperature
|
|
225
|
+
@options = options
|
|
226
|
+
validate_required_params!
|
|
227
|
+
end
|
|
228
|
+
|
|
229
|
+
# Executes the agent through the middleware pipeline
|
|
230
|
+
#
|
|
231
|
+
# @yield [chunk] Yields chunks when streaming is enabled
|
|
232
|
+
# @return [Object] The processed response
|
|
233
|
+
def call(&block)
|
|
234
|
+
return dry_run_response if @options[:dry_run]
|
|
235
|
+
|
|
236
|
+
context = build_context(&block)
|
|
237
|
+
result_context = Pipeline::Executor.execute(context)
|
|
238
|
+
result_context.output
|
|
239
|
+
end
|
|
240
|
+
|
|
241
|
+
# @!group Template Methods (override in subclasses)
|
|
242
|
+
|
|
243
|
+
# User prompt to send to the LLM
|
|
244
|
+
#
|
|
245
|
+
# @abstract Subclasses must implement this method
|
|
246
|
+
# @return [String] The user prompt
|
|
247
|
+
def user_prompt
|
|
248
|
+
raise NotImplementedError, "#{self.class} must implement #user_prompt"
|
|
249
|
+
end
|
|
250
|
+
|
|
251
|
+
# System prompt for LLM instructions
|
|
252
|
+
#
|
|
253
|
+
# @return [String, nil] System instructions, or nil for none
|
|
254
|
+
def system_prompt
|
|
255
|
+
nil
|
|
256
|
+
end
|
|
257
|
+
|
|
258
|
+
# Response schema for structured output
|
|
259
|
+
#
|
|
260
|
+
# @return [RubyLLM::Schema, nil] Schema definition, or nil for free-form
|
|
261
|
+
def schema
|
|
262
|
+
nil
|
|
263
|
+
end
|
|
264
|
+
|
|
265
|
+
# Conversation history for multi-turn conversations
|
|
266
|
+
#
|
|
267
|
+
# @return [Array<Hash>] Array of messages with :role and :content keys
|
|
268
|
+
def messages
|
|
269
|
+
[]
|
|
270
|
+
end
|
|
271
|
+
|
|
272
|
+
# Post-processes the LLM response
|
|
273
|
+
#
|
|
274
|
+
# @param response [RubyLLM::Message] The raw response from the LLM
|
|
275
|
+
# @return [Object] The processed result
|
|
276
|
+
def process_response(response)
|
|
277
|
+
content = response.content
|
|
278
|
+
return content unless content.is_a?(Hash)
|
|
279
|
+
|
|
280
|
+
content.transform_keys(&:to_sym)
|
|
281
|
+
end
|
|
282
|
+
|
|
283
|
+
# @!endgroup
|
|
284
|
+
|
|
285
|
+
# Generates the cache key for this agent invocation
|
|
286
|
+
#
|
|
287
|
+
# @return [String] Cache key in format "ruby_llm_agent/ClassName/version/hash"
|
|
288
|
+
def agent_cache_key
|
|
289
|
+
["ruby_llm_agent", self.class.name, self.class.version, cache_key_hash].join("/")
|
|
290
|
+
end
|
|
291
|
+
|
|
292
|
+
# Generates a hash of the cache key data
|
|
293
|
+
#
|
|
294
|
+
# @return [String] SHA256 hex digest of the cache key data
|
|
295
|
+
def cache_key_hash
|
|
296
|
+
Digest::SHA256.hexdigest(cache_key_data.to_json)
|
|
297
|
+
end
|
|
298
|
+
|
|
299
|
+
# Returns data to include in cache key generation
|
|
300
|
+
#
|
|
301
|
+
# @return [Hash] Data to hash for cache key
|
|
302
|
+
def cache_key_data
|
|
303
|
+
excludes = self.class.cache_key_excludes || %i[skip_cache dry_run with]
|
|
304
|
+
base_data = @options.except(*excludes)
|
|
305
|
+
|
|
306
|
+
# Include model and other relevant config
|
|
307
|
+
base_data.merge(
|
|
308
|
+
model: model,
|
|
309
|
+
system_prompt: system_prompt,
|
|
310
|
+
user_prompt: user_prompt
|
|
311
|
+
)
|
|
312
|
+
end
|
|
313
|
+
|
|
314
|
+
# Resolves thinking configuration
|
|
315
|
+
#
|
|
316
|
+
# Public for testing and introspection.
|
|
317
|
+
#
|
|
318
|
+
# @return [Hash, nil] Thinking configuration
|
|
319
|
+
def resolved_thinking
|
|
320
|
+
# Check for :none effort which means disabled
|
|
321
|
+
if @options.key?(:thinking)
|
|
322
|
+
thinking_option = @options[:thinking]
|
|
323
|
+
return nil if thinking_option == false
|
|
324
|
+
return nil if thinking_option.is_a?(Hash) && thinking_option[:effort] == :none
|
|
325
|
+
return thinking_option if thinking_option.is_a?(Hash)
|
|
326
|
+
end
|
|
327
|
+
|
|
328
|
+
self.class.thinking_config
|
|
329
|
+
end
|
|
330
|
+
|
|
331
|
+
protected
|
|
332
|
+
|
|
333
|
+
# Returns the options hash
|
|
334
|
+
#
|
|
335
|
+
# @return [Hash] The options passed to the agent
|
|
336
|
+
attr_reader :options
|
|
337
|
+
|
|
338
|
+
private
|
|
339
|
+
|
|
340
|
+
# Builds the pipeline context for execution
|
|
341
|
+
#
|
|
342
|
+
# @yield [chunk] Block for streaming
|
|
343
|
+
# @return [Pipeline::Context] The context object
|
|
344
|
+
def build_context(&block)
|
|
345
|
+
Pipeline::Context.new(
|
|
346
|
+
input: user_prompt,
|
|
347
|
+
agent_class: self.class,
|
|
348
|
+
agent_instance: self,
|
|
349
|
+
model: model,
|
|
350
|
+
tenant: resolve_tenant,
|
|
351
|
+
skip_cache: @options[:skip_cache],
|
|
352
|
+
stream_block: (block if streaming_enabled?),
|
|
353
|
+
options: execution_options
|
|
354
|
+
)
|
|
355
|
+
end
|
|
356
|
+
|
|
357
|
+
# Returns options for the LLM execution
|
|
358
|
+
#
|
|
359
|
+
# @return [Hash] Execution options
|
|
360
|
+
def execution_options
|
|
361
|
+
{
|
|
362
|
+
temperature: temperature,
|
|
363
|
+
system_prompt: system_prompt,
|
|
364
|
+
schema: schema,
|
|
365
|
+
messages: resolved_messages,
|
|
366
|
+
tools: resolved_tools,
|
|
367
|
+
thinking: resolved_thinking,
|
|
368
|
+
attachments: @options[:with],
|
|
369
|
+
timeout: self.class.timeout
|
|
370
|
+
}.compact
|
|
371
|
+
end
|
|
372
|
+
|
|
373
|
+
# Resolves the tenant from options
|
|
374
|
+
#
|
|
375
|
+
# @return [Hash, nil] Resolved tenant info
|
|
376
|
+
def resolve_tenant
|
|
377
|
+
tenant_value = @options[:tenant]
|
|
378
|
+
return nil unless tenant_value
|
|
379
|
+
|
|
380
|
+
if tenant_value.is_a?(Hash)
|
|
381
|
+
tenant_value
|
|
382
|
+
elsif tenant_value.respond_to?(:llm_tenant_id)
|
|
383
|
+
{ id: tenant_value.llm_tenant_id, object: tenant_value }
|
|
384
|
+
else
|
|
385
|
+
raise ArgumentError, "tenant must be a Hash or respond to :llm_tenant_id"
|
|
386
|
+
end
|
|
387
|
+
end
|
|
388
|
+
|
|
389
|
+
# Resolves tools for this execution
|
|
390
|
+
#
|
|
391
|
+
# @return [Array<Class>] Tool classes to use
|
|
392
|
+
def resolved_tools
|
|
393
|
+
if self.class.instance_methods(false).include?(:tools)
|
|
394
|
+
tools
|
|
395
|
+
else
|
|
396
|
+
self.class.tools
|
|
397
|
+
end
|
|
398
|
+
end
|
|
399
|
+
|
|
400
|
+
# Resolves messages for this execution
|
|
401
|
+
#
|
|
402
|
+
# @return [Array<Hash>] Messages to apply
|
|
403
|
+
def resolved_messages
|
|
404
|
+
return @options[:messages] if @options[:messages]&.any?
|
|
405
|
+
|
|
406
|
+
messages
|
|
407
|
+
end
|
|
408
|
+
|
|
409
|
+
# Returns whether streaming is enabled
|
|
410
|
+
#
|
|
411
|
+
# @return [Boolean]
|
|
412
|
+
def streaming_enabled?
|
|
413
|
+
@force_streaming || self.class.streaming
|
|
414
|
+
end
|
|
415
|
+
|
|
416
|
+
# Returns prompt info without making an API call
|
|
417
|
+
#
|
|
418
|
+
# @return [Result] A Result with dry run configuration info
|
|
419
|
+
def dry_run_response
|
|
420
|
+
Result.new(
|
|
421
|
+
content: {
|
|
422
|
+
dry_run: true,
|
|
423
|
+
agent: self.class.name,
|
|
424
|
+
model: model,
|
|
425
|
+
temperature: temperature,
|
|
426
|
+
timeout: self.class.timeout,
|
|
427
|
+
system_prompt: system_prompt,
|
|
428
|
+
user_prompt: user_prompt,
|
|
429
|
+
attachments: @options[:with],
|
|
430
|
+
schema: schema&.class&.name,
|
|
431
|
+
streaming: self.class.streaming,
|
|
432
|
+
tools: resolved_tools.map { |t| t.respond_to?(:name) ? t.name : t.to_s },
|
|
433
|
+
cache_enabled: self.class.cache_enabled?,
|
|
434
|
+
reliability_config: self.class.reliability_config
|
|
435
|
+
},
|
|
436
|
+
model_id: model,
|
|
437
|
+
temperature: temperature,
|
|
438
|
+
streaming: self.class.streaming
|
|
439
|
+
)
|
|
440
|
+
end
|
|
441
|
+
|
|
442
|
+
# Validates that all required parameters are present
|
|
443
|
+
#
|
|
444
|
+
# @raise [ArgumentError] If required parameters are missing
|
|
445
|
+
def validate_required_params!
|
|
446
|
+
self.class.params.each do |name, config|
|
|
447
|
+
value = @options[name] || @options[name.to_s]
|
|
448
|
+
has_value = @options.key?(name) || @options.key?(name.to_s)
|
|
449
|
+
|
|
450
|
+
if config[:required] && !has_value
|
|
451
|
+
raise ArgumentError, "#{self.class} missing required param: #{name}"
|
|
452
|
+
end
|
|
453
|
+
|
|
454
|
+
if config[:type] && has_value && !value.nil? && !value.is_a?(config[:type])
|
|
455
|
+
raise ArgumentError,
|
|
456
|
+
"#{self.class} expected #{config[:type]} for :#{name}, got #{value.class}"
|
|
457
|
+
end
|
|
458
|
+
end
|
|
459
|
+
end
|
|
460
|
+
|
|
461
|
+
# Execute the core LLM call
|
|
462
|
+
#
|
|
463
|
+
# This is called by the Pipeline::Executor after all middleware
|
|
464
|
+
# has been applied. Override this method in specialized agent types
|
|
465
|
+
# (embedder, image generator, etc.) to customize the execution.
|
|
466
|
+
#
|
|
467
|
+
# @param context [Pipeline::Context] The execution context
|
|
468
|
+
# @return [void] Sets context.output with the result
|
|
469
|
+
def execute(context)
|
|
470
|
+
client = build_client
|
|
471
|
+
response = execute_llm_call(client, context)
|
|
472
|
+
capture_response(response, context)
|
|
473
|
+
result = build_result(process_response(response), response, context)
|
|
474
|
+
context.output = result
|
|
475
|
+
end
|
|
476
|
+
|
|
477
|
+
# Builds and configures the RubyLLM client
|
|
478
|
+
#
|
|
479
|
+
# @return [RubyLLM::Chat] Configured chat client
|
|
480
|
+
def build_client
|
|
481
|
+
client = RubyLLM.chat
|
|
482
|
+
.with_model(model)
|
|
483
|
+
.with_temperature(temperature)
|
|
484
|
+
|
|
485
|
+
client = client.with_instructions(system_prompt) if system_prompt
|
|
486
|
+
client = client.with_schema(schema) if schema
|
|
487
|
+
client = client.with_tools(*resolved_tools) if resolved_tools.any?
|
|
488
|
+
client = apply_messages(client, resolved_messages) if resolved_messages.any?
|
|
489
|
+
client = client.with_thinking(**resolved_thinking) if resolved_thinking
|
|
490
|
+
|
|
491
|
+
client
|
|
492
|
+
end
|
|
493
|
+
|
|
494
|
+
# Executes the LLM call
|
|
495
|
+
#
|
|
496
|
+
# @param client [RubyLLM::Chat] The configured client
|
|
497
|
+
# @param context [Pipeline::Context] The execution context
|
|
498
|
+
# @return [RubyLLM::Message] The response
|
|
499
|
+
def execute_llm_call(client, context)
|
|
500
|
+
timeout = self.class.timeout
|
|
501
|
+
ask_opts = {}
|
|
502
|
+
ask_opts[:with] = @options[:with] if @options[:with]
|
|
503
|
+
|
|
504
|
+
Timeout.timeout(timeout) do
|
|
505
|
+
if streaming_enabled? && context.stream_block
|
|
506
|
+
execute_with_streaming(client, context, ask_opts)
|
|
507
|
+
else
|
|
508
|
+
client.ask(user_prompt, **ask_opts)
|
|
509
|
+
end
|
|
510
|
+
end
|
|
511
|
+
end
|
|
512
|
+
|
|
513
|
+
# Executes with streaming enabled
|
|
514
|
+
#
|
|
515
|
+
# @param client [RubyLLM::Chat] The client
|
|
516
|
+
# @param context [Pipeline::Context] The context
|
|
517
|
+
# @param ask_opts [Hash] Options for the ask call
|
|
518
|
+
# @return [RubyLLM::Message] The response
|
|
519
|
+
def execute_with_streaming(client, context, ask_opts)
|
|
520
|
+
first_chunk_at = nil
|
|
521
|
+
started_at = context.started_at || Time.current
|
|
522
|
+
|
|
523
|
+
response = client.ask(user_prompt, **ask_opts) do |chunk|
|
|
524
|
+
first_chunk_at ||= Time.current
|
|
525
|
+
context.stream_block.call(chunk)
|
|
526
|
+
end
|
|
527
|
+
|
|
528
|
+
if first_chunk_at
|
|
529
|
+
context.time_to_first_token_ms = ((first_chunk_at - started_at) * 1000).to_i
|
|
530
|
+
end
|
|
531
|
+
|
|
532
|
+
response
|
|
533
|
+
end
|
|
534
|
+
|
|
535
|
+
# Captures response metadata to the context
|
|
536
|
+
#
|
|
537
|
+
# @param response [RubyLLM::Message] The response
|
|
538
|
+
# @param context [Pipeline::Context] The context
|
|
539
|
+
def capture_response(response, context)
|
|
540
|
+
context.input_tokens = response.input_tokens
|
|
541
|
+
context.output_tokens = response.output_tokens
|
|
542
|
+
context.model_used = response.model_id || model
|
|
543
|
+
# finish_reason may not be available on all RubyLLM::Message versions
|
|
544
|
+
context.finish_reason = response.respond_to?(:finish_reason) ? response.finish_reason : nil
|
|
545
|
+
|
|
546
|
+
calculate_costs(response, context) if context.input_tokens
|
|
547
|
+
end
|
|
548
|
+
|
|
549
|
+
# Calculates costs for the response
|
|
550
|
+
#
|
|
551
|
+
# @param response [RubyLLM::Message] The response
|
|
552
|
+
# @param context [Pipeline::Context] The context
|
|
553
|
+
def calculate_costs(response, context)
|
|
554
|
+
model_info = find_model_info(response.model_id || model)
|
|
555
|
+
return unless model_info
|
|
556
|
+
|
|
557
|
+
input_tokens = context.input_tokens || 0
|
|
558
|
+
output_tokens = context.output_tokens || 0
|
|
559
|
+
|
|
560
|
+
input_price = extract_model_price(model_info, :input_price)
|
|
561
|
+
output_price = extract_model_price(model_info, :output_price)
|
|
562
|
+
|
|
563
|
+
context.input_cost = (input_tokens / 1_000_000.0) * input_price
|
|
564
|
+
context.output_cost = (output_tokens / 1_000_000.0) * output_price
|
|
565
|
+
context.total_cost = (context.input_cost + context.output_cost).round(6)
|
|
566
|
+
end
|
|
567
|
+
|
|
568
|
+
# Extracts price from model info (supports both hash and object access)
|
|
569
|
+
#
|
|
570
|
+
# @param model_info [Hash, Object] Model info
|
|
571
|
+
# @param key [Symbol] The price key
|
|
572
|
+
# @return [Float] The price, or 0 if not found
|
|
573
|
+
def extract_model_price(model_info, key)
|
|
574
|
+
if model_info.respond_to?(key)
|
|
575
|
+
model_info.send(key) || 0
|
|
576
|
+
elsif model_info.respond_to?(:[])
|
|
577
|
+
model_info[key] || 0
|
|
578
|
+
else
|
|
579
|
+
0
|
|
580
|
+
end
|
|
581
|
+
end
|
|
582
|
+
|
|
583
|
+
# Finds model pricing info
|
|
584
|
+
#
|
|
585
|
+
# @param model_id [String] The model ID
|
|
586
|
+
# @return [Hash, nil] Model info with pricing
|
|
587
|
+
def find_model_info(model_id)
|
|
588
|
+
return nil unless defined?(RubyLLM::Models)
|
|
589
|
+
|
|
590
|
+
RubyLLM::Models.find(model_id)
|
|
591
|
+
rescue StandardError
|
|
592
|
+
nil
|
|
593
|
+
end
|
|
594
|
+
|
|
595
|
+
# Builds a Result object from the response
|
|
596
|
+
#
|
|
597
|
+
# @param content [Object] The processed content
|
|
598
|
+
# @param response [RubyLLM::Message] The raw response
|
|
599
|
+
# @param context [Pipeline::Context] The context
|
|
600
|
+
# @return [Result] The result object
|
|
601
|
+
def build_result(content, response, context)
|
|
602
|
+
Result.new(
|
|
603
|
+
content: content,
|
|
604
|
+
input_tokens: context.input_tokens,
|
|
605
|
+
output_tokens: context.output_tokens,
|
|
606
|
+
input_cost: context.input_cost,
|
|
607
|
+
output_cost: context.output_cost,
|
|
608
|
+
total_cost: context.total_cost,
|
|
609
|
+
model_id: model,
|
|
610
|
+
chosen_model_id: context.model_used || model,
|
|
611
|
+
temperature: temperature,
|
|
612
|
+
started_at: context.started_at,
|
|
613
|
+
completed_at: context.completed_at,
|
|
614
|
+
duration_ms: context.duration_ms,
|
|
615
|
+
time_to_first_token_ms: context.time_to_first_token_ms,
|
|
616
|
+
finish_reason: context.finish_reason,
|
|
617
|
+
streaming: streaming_enabled?,
|
|
618
|
+
attempts_count: context.attempts_made || 1
|
|
619
|
+
)
|
|
620
|
+
end
|
|
621
|
+
|
|
622
|
+
# Extracts thinking data from a response for inclusion in Result
|
|
623
|
+
#
|
|
624
|
+
# @param response [Object] The response object
|
|
625
|
+
# @return [Hash] Hash with thinking_text, thinking_signature, thinking_tokens
|
|
626
|
+
def result_thinking_data(response)
|
|
627
|
+
return {} unless response.respond_to?(:thinking) && response.thinking
|
|
628
|
+
|
|
629
|
+
thinking = response.thinking
|
|
630
|
+
|
|
631
|
+
data = {}
|
|
632
|
+
data[:thinking_text] = extract_thinking_value(thinking, :text)
|
|
633
|
+
data[:thinking_signature] = extract_thinking_value(thinking, :signature)
|
|
634
|
+
data[:thinking_tokens] = extract_thinking_value(thinking, :tokens)
|
|
635
|
+
|
|
636
|
+
data.compact
|
|
637
|
+
end
|
|
638
|
+
|
|
639
|
+
# Safely extracts thinking data without raising errors
|
|
640
|
+
#
|
|
641
|
+
# @param response [Object] The response object
|
|
642
|
+
# @return [Hash] Hash with thinking data or empty hash
|
|
643
|
+
def safe_extract_thinking_data(response)
|
|
644
|
+
result_thinking_data(response)
|
|
645
|
+
rescue StandardError
|
|
646
|
+
{}
|
|
647
|
+
end
|
|
648
|
+
|
|
649
|
+
# Extracts a value from thinking object (supports both hash and object access)
|
|
650
|
+
#
|
|
651
|
+
# @param thinking [Hash, Object] The thinking object
|
|
652
|
+
# @param key [Symbol] The key to extract
|
|
653
|
+
# @return [Object, nil] The value or nil
|
|
654
|
+
def extract_thinking_value(thinking, key)
|
|
655
|
+
if thinking.respond_to?(key)
|
|
656
|
+
thinking.send(key)
|
|
657
|
+
elsif thinking.respond_to?(:[])
|
|
658
|
+
thinking[key]
|
|
659
|
+
end
|
|
660
|
+
end
|
|
661
|
+
|
|
662
|
+
# Applies conversation history to the client
|
|
663
|
+
#
|
|
664
|
+
# @param client [RubyLLM::Chat] The chat client
|
|
665
|
+
# @param msgs [Array<Hash>] Messages with :role and :content keys
|
|
666
|
+
# @return [RubyLLM::Chat] Client with messages applied
|
|
667
|
+
def apply_messages(client, msgs)
|
|
668
|
+
msgs.each do |message|
|
|
669
|
+
client.add_message(role: message[:role].to_sym, content: message[:content])
|
|
670
|
+
end
|
|
671
|
+
client
|
|
672
|
+
end
|
|
673
|
+
end
|
|
674
|
+
end
|
|
675
|
+
end
|