ruby_llm-agents 3.8.0 → 3.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +30 -10
  3. data/app/controllers/ruby_llm/agents/requests_controller.rb +117 -0
  4. data/app/views/layouts/ruby_llm/agents/application.html.erb +4 -2
  5. data/app/views/ruby_llm/agents/requests/index.html.erb +153 -0
  6. data/app/views/ruby_llm/agents/requests/show.html.erb +136 -0
  7. data/config/routes.rb +2 -0
  8. data/lib/generators/ruby_llm_agents/agent_generator.rb +2 -2
  9. data/lib/generators/ruby_llm_agents/demo_generator.rb +102 -0
  10. data/lib/generators/ruby_llm_agents/doctor_generator.rb +196 -0
  11. data/lib/generators/ruby_llm_agents/install_generator.rb +7 -19
  12. data/lib/generators/ruby_llm_agents/templates/agent.rb.tt +27 -80
  13. data/lib/generators/ruby_llm_agents/templates/application_agent.rb.tt +18 -51
  14. data/lib/generators/ruby_llm_agents/templates/initializer.rb.tt +19 -17
  15. data/lib/ruby_llm/agents/base_agent.rb +68 -7
  16. data/lib/ruby_llm/agents/core/base.rb +4 -0
  17. data/lib/ruby_llm/agents/core/configuration.rb +10 -0
  18. data/lib/ruby_llm/agents/core/version.rb +1 -1
  19. data/lib/ruby_llm/agents/pipeline/context.rb +26 -0
  20. data/lib/ruby_llm/agents/pipeline/middleware/base.rb +58 -4
  21. data/lib/ruby_llm/agents/pipeline/middleware/budget.rb +17 -15
  22. data/lib/ruby_llm/agents/pipeline/middleware/cache.rb +34 -22
  23. data/lib/ruby_llm/agents/pipeline/middleware/instrumentation.rb +105 -50
  24. data/lib/ruby_llm/agents/pipeline/middleware/reliability.rb +7 -5
  25. data/lib/ruby_llm/agents/pipeline/middleware/tenant.rb +6 -4
  26. data/lib/ruby_llm/agents/rails/engine.rb +11 -0
  27. data/lib/ruby_llm/agents/results/background_removal_result.rb +7 -1
  28. data/lib/ruby_llm/agents/results/base.rb +24 -2
  29. data/lib/ruby_llm/agents/results/embedding_result.rb +4 -0
  30. data/lib/ruby_llm/agents/results/image_analysis_result.rb +7 -1
  31. data/lib/ruby_llm/agents/results/image_edit_result.rb +7 -1
  32. data/lib/ruby_llm/agents/results/image_generation_result.rb +7 -1
  33. data/lib/ruby_llm/agents/results/image_pipeline_result.rb +7 -1
  34. data/lib/ruby_llm/agents/results/image_transform_result.rb +7 -1
  35. data/lib/ruby_llm/agents/results/image_upscale_result.rb +7 -1
  36. data/lib/ruby_llm/agents/results/image_variation_result.rb +7 -1
  37. data/lib/ruby_llm/agents/results/speech_result.rb +6 -0
  38. data/lib/ruby_llm/agents/results/trackable.rb +25 -0
  39. data/lib/ruby_llm/agents/results/transcription_result.rb +6 -0
  40. data/lib/ruby_llm/agents/text/embedder.rb +7 -4
  41. data/lib/ruby_llm/agents/track_report.rb +127 -0
  42. data/lib/ruby_llm/agents/tracker.rb +32 -0
  43. data/lib/ruby_llm/agents.rb +208 -0
  44. data/lib/tasks/ruby_llm_agents.rake +6 -0
  45. metadata +10 -2
@@ -0,0 +1,196 @@
1
+ # frozen_string_literal: true
2
+
3
+ require "rails/generators"
4
+
5
+ module RubyLlmAgents
6
+ # Doctor generator — validates that setup is complete and working.
7
+ #
8
+ # Usage:
9
+ # rails generate ruby_llm_agents:doctor
10
+ # rails ruby_llm_agents:doctor (rake task alias)
11
+ #
12
+ # Checks:
13
+ # 1. API keys — at least one provider key is configured
14
+ # 2. Migrations — required tables exist
15
+ # 3. Routes — engine is mounted
16
+ # 4. Background jobs — ActiveJob adapter is configured (not :async/:inline in prod)
17
+ # 5. Agents — at least one agent file exists
18
+ #
19
+ class DoctorGenerator < ::Rails::Generators::Base
20
+ desc "Validate your RubyLLM::Agents setup and print actionable fixes"
21
+
22
+ def run_checks
23
+ @pass = 0
24
+ @fail = 0
25
+ @warn = 0
26
+
27
+ say ""
28
+ say "RubyLLM::Agents Doctor", :bold
29
+ say "=" * 40
30
+
31
+ check_api_keys
32
+ check_migrations
33
+ check_routes
34
+ check_background_jobs
35
+ check_agents
36
+
37
+ say ""
38
+ say "=" * 40
39
+ summary = "#{@pass} passed, #{@fail} failed, #{@warn} warnings"
40
+ if @fail > 0
41
+ say "Result: #{summary}", :red
42
+ elsif @warn > 0
43
+ say "Result: #{summary}", :yellow
44
+ else
45
+ say "Result: #{summary} — you're all set!", :green
46
+ end
47
+ say ""
48
+ end
49
+
50
+ private
51
+
52
+ def check_api_keys
53
+ say ""
54
+ say "API Keys", :bold
55
+
56
+ config = RubyLLM::Agents.configuration
57
+ providers = {
58
+ "OpenAI" => -> { config.openai_api_key },
59
+ "Anthropic" => -> { config.anthropic_api_key },
60
+ "Gemini" => -> { config.gemini_api_key },
61
+ "DeepSeek" => -> { config.deepseek_api_key },
62
+ "OpenRouter" => -> { config.openrouter_api_key },
63
+ "Mistral" => -> { config.mistral_api_key }
64
+ }
65
+
66
+ configured = providers.select { |_, v| v.call.present? }.keys
67
+
68
+ if configured.any?
69
+ configured.each { |name| pass "#{name} API key configured" }
70
+ else
71
+ fail_check "No API keys configured"
72
+ fix "Add to config/initializers/ruby_llm_agents.rb:"
73
+ fix " config.openai_api_key = ENV[\"OPENAI_API_KEY\"]"
74
+ fix "Then set the environment variable in .env or credentials."
75
+ end
76
+ end
77
+
78
+ def check_migrations
79
+ say ""
80
+ say "Database", :bold
81
+
82
+ tables = {
83
+ "ruby_llm_agents_executions" => "rails generate ruby_llm_agents:install && rails db:migrate",
84
+ "ruby_llm_agents_execution_details" => "rails generate ruby_llm_agents:upgrade && rails db:migrate"
85
+ }
86
+
87
+ tables.each do |table, fix_cmd|
88
+ if table_exists?(table)
89
+ pass "Table #{table} exists"
90
+ else
91
+ fail_check "Table #{table} missing"
92
+ fix fix_cmd
93
+ end
94
+ end
95
+ end
96
+
97
+ def check_routes
98
+ say ""
99
+ say "Routes", :bold
100
+
101
+ routes_file = File.join(destination_root, "config/routes.rb")
102
+ if File.exist?(routes_file)
103
+ content = File.read(routes_file)
104
+ if content.include?("RubyLLM::Agents::Engine")
105
+ pass "Dashboard engine mounted"
106
+ else
107
+ warn_check "Dashboard engine not mounted in routes"
108
+ fix "Add to config/routes.rb:"
109
+ fix " mount RubyLLM::Agents::Engine => \"/agents\""
110
+ end
111
+ else
112
+ warn_check "Could not find config/routes.rb"
113
+ end
114
+ end
115
+
116
+ def check_background_jobs
117
+ say ""
118
+ say "Background Jobs", :bold
119
+
120
+ adapter = ActiveJob::Base.queue_adapter.class.name
121
+ async_logging = RubyLLM::Agents.configuration.async_logging
122
+
123
+ if !async_logging
124
+ pass "Async logging disabled (synchronous mode)"
125
+ elsif adapter.include?("Async") || adapter.include?("Inline")
126
+ if Rails.env.production?
127
+ warn_check "ActiveJob adapter is #{adapter} — execution logging may be lost in production"
128
+ fix "Configure a persistent adapter (Sidekiq, GoodJob, SolidQueue, etc.)"
129
+ fix "Or set config.async_logging = false for synchronous logging."
130
+ else
131
+ pass "ActiveJob adapter: #{adapter} (OK for development)"
132
+ end
133
+ else
134
+ pass "ActiveJob adapter: #{adapter}"
135
+ end
136
+ end
137
+
138
+ def check_agents
139
+ say ""
140
+ say "Agents", :bold
141
+
142
+ agents_dir = File.join(destination_root, "app/agents")
143
+ if Dir.exist?(agents_dir)
144
+ agent_files = Dir.glob(File.join(agents_dir, "**/*_agent.rb"))
145
+ .reject { |f| f.end_with?("application_agent.rb") }
146
+
147
+ if agent_files.any?
148
+ pass "Found #{agent_files.size} agent(s)"
149
+ else
150
+ warn_check "No agents found (only application_agent.rb)"
151
+ fix "rails generate ruby_llm_agents:agent HelloWorld query:required"
152
+ fix "Or: rails generate ruby_llm_agents:demo"
153
+ end
154
+ else
155
+ fail_check "app/agents/ directory missing"
156
+ fix "rails generate ruby_llm_agents:install"
157
+ end
158
+ end
159
+
160
+ # Helpers
161
+
162
+ def table_exists?(name)
163
+ ActiveRecord::Base.connection.table_exists?(name)
164
+ rescue => e
165
+ say " (Could not check database: #{e.message})", :yellow
166
+ false
167
+ end
168
+
169
+ def pass(msg)
170
+ @pass += 1
171
+ say " #{status_icon(:pass)} #{msg}", :green
172
+ end
173
+
174
+ def fail_check(msg)
175
+ @fail += 1
176
+ say " #{status_icon(:fail)} #{msg}", :red
177
+ end
178
+
179
+ def warn_check(msg)
180
+ @warn += 1
181
+ say " #{status_icon(:warn)} #{msg}", :yellow
182
+ end
183
+
184
+ def fix(msg)
185
+ say " Fix: #{msg}"
186
+ end
187
+
188
+ def status_icon(type)
189
+ case type
190
+ when :pass then "OK"
191
+ when :fail then "FAIL"
192
+ when :warn then "WARN"
193
+ end
194
+ end
195
+ end
196
+ end
@@ -96,28 +96,16 @@ module RubyLlmAgents
96
96
  say ""
97
97
  say "RubyLLM::Agents has been installed!", :green
98
98
  say ""
99
- say "Directory structure created:"
100
- say " app/"
101
- say " ├── agents/"
102
- say " │ ├── application_agent.rb"
103
- say " │ ├── concerns/"
104
- say " │ └── AGENTS.md"
105
- say " └── tools/"
106
- say " └── TOOLS.md"
107
- say ""
108
- say "Skill files (*.md) help AI coding assistants understand how to use this gem."
109
- say ""
110
99
  say "Next steps:"
111
- say " 1. Set your API keys in config/initializers/ruby_llm_agents.rb"
100
+ say " 1. Set your API key in config/initializers/ruby_llm_agents.rb"
112
101
  say " 2. Run migrations: rails db:migrate"
113
- say " 3. Generate an agent: rails generate ruby_llm_agents:agent MyAgent query:required"
114
- say " 4. Access the dashboard at: /agents"
102
+ say " 3. Verify setup: rails ruby_llm_agents:doctor"
103
+ say " 4. Try it out: rails generate ruby_llm_agents:demo"
104
+ say ""
105
+ say "Or generate a custom agent:"
106
+ say " rails generate ruby_llm_agents:agent MyAgent query:required"
115
107
  say ""
116
- say "Generator commands:"
117
- say " rails generate ruby_llm_agents:agent CustomerSupport query:required"
118
- say " rails generate ruby_llm_agents:image_generator Product"
119
- say " rails generate ruby_llm_agents:transcriber Meeting"
120
- say " rails generate ruby_llm_agents:embedder Semantic"
108
+ say "Dashboard: /agents"
121
109
  say ""
122
110
  end
123
111
 
@@ -8,108 +8,55 @@
8
8
  <%- else -%>
9
9
  class <%= class_name %>Agent < ApplicationAgent
10
10
  <%- end -%>
11
- # ============================================
12
- # Model Configuration
13
- # ============================================
14
-
11
+ <% if options[:model] != "default" -%>
15
12
  model "<%= options[:model] %>"
13
+ <% end -%>
14
+ <% if options[:temperature] != 0.0 -%>
16
15
  temperature <%= options[:temperature] %>
17
- # timeout 30 # Per-request timeout in seconds (default: 60)
18
-
19
- # ============================================
20
- # Caching
21
- # ============================================
22
-
23
- <% if options[:cache] -%>
24
- cache <%= options[:cache] %>
25
- <% else -%>
26
- # cache 1.hour # Enable response caching with TTL
27
16
  <% end -%>
28
17
 
29
18
  # ============================================
30
- # Reliability (Retries & Fallbacks)
19
+ # Prompts
31
20
  # ============================================
21
+ # Use {placeholder} syntax — placeholders become required params automatically.
32
22
 
33
- # Automatic retries with exponential backoff
34
- # - max: Number of retry attempts
35
- # - backoff: :constant or :exponential
36
- # - base: Base delay in seconds
37
- # - max_delay: Maximum delay between retries
38
- # - on: Additional error classes to retry on
39
- # retries max: 2, backoff: :exponential, base: 0.4, max_delay: 3.0
40
-
41
- # Fallback models (tried in order when primary model fails)
42
- # fallback_models ["gpt-4o-mini", "claude-3-haiku"]
43
-
44
- # Total timeout across all retry/fallback attempts
45
- # total_timeout 30
46
-
47
- # Circuit breaker (prevents repeated calls to failing models)
48
- # - errors: Number of errors to trigger open state
49
- # - within: Rolling window in seconds
50
- # - cooldown: Time to wait before allowing requests again
51
- # circuit_breaker errors: 5, within: 60, cooldown: 300
52
-
53
- # ============================================
54
- # Parameters
55
- # ============================================
23
+ system "You are a helpful assistant."
56
24
 
57
- <% parsed_params.each do |param| -%>
58
- param :<%= param.name %><%= ", required: true" if param.required? %><%= ", default: #{param.default.inspect}" if param.default && !param.required? %>
25
+ <% if parsed_params.any? -%>
26
+ prompt "<%= parsed_params.map { |p| "{#{p.name}}" }.join(" ") %>"
27
+ <% else -%>
28
+ prompt "Your prompt here"
59
29
  <% end -%>
60
30
 
61
- private
31
+ <% parsed_params.select { |p| !p.required? && p.default }.each do |param| -%>
32
+ param :<%= param.name %>, default: <%= param.default.inspect %>
33
+ <% end -%>
34
+ <% if options[:cache] -%>
62
35
 
63
36
  # ============================================
64
- # Prompts (required)
37
+ # Caching
65
38
  # ============================================
66
39
 
67
- def system_prompt
68
- <<~PROMPT
69
- You are a helpful assistant.
70
- # Define your system instructions here
71
- PROMPT
72
- end
73
-
74
- def user_prompt
75
- # Build the prompt from parameters
76
- <% if parsed_params.any? -%>
77
- <%= parsed_params.first.name %>
78
- <% else -%>
79
- "Your prompt here"
40
+ cache for: <%= options[:cache] %>
80
41
  <% end -%>
81
- end
82
42
 
83
43
  # ============================================
84
- # Optional Overrides
44
+ # Error Handling (uncomment to enable)
85
45
  # ============================================
86
46
 
87
- # Structured output schema (returns parsed hash instead of raw text)
88
- # def schema
89
- # @schema ||= RubyLLM::Schema.create do
90
- # string :result, description: "The result"
91
- # integer :confidence, description: "Confidence score 1-100"
92
- # array :tags, description: "Relevant tags" do
93
- # string
94
- # end
95
- # end
96
- # end
97
-
98
- # Custom response processing (default: symbolize hash keys)
99
- # def process_response(response)
100
- # content = response.content
101
- # # Transform or validate the response
102
- # content
47
+ # on_failure do
48
+ # retries times: 2, backoff: :exponential
49
+ # fallback to: ["gpt-4o-mini"]
50
+ # timeout 30
103
51
  # end
104
52
 
105
- # Custom metadata to include in execution logs
106
- # def metadata
107
- # { custom_field: "value", request_id: params[:request_id] }
108
- # end
53
+ # ============================================
54
+ # Structured Output (uncomment to enable)
55
+ # ============================================
109
56
 
110
- # Custom cache key data (default: all params except skip_cache, dry_run)
111
- # def cache_key_data
112
- # { query: params[:query], locale: I18n.locale }
57
+ # returns do
58
+ # string :result, description: "The result"
59
+ # integer :confidence, description: "Confidence score 1-100"
113
60
  # end
114
61
  <%- if class_name.include?("::") -%>
115
62
  <%- (class_name.split("::").length - 1).times do |i| -%>
@@ -2,15 +2,15 @@
2
2
 
3
3
  # ApplicationAgent - Base class for all agents in this application
4
4
  #
5
- # All agents inherit from this class. Configure shared settings here
6
- # that apply to all agents, or override them per-agent as needed.
5
+ # All agents inherit from this class. Configure shared settings here.
7
6
  #
8
- # Example:
7
+ # Quick reference:
9
8
  # class MyAgent < ApplicationAgent
10
- # param :query, required: true
9
+ # system "You are a helpful assistant."
10
+ # prompt "Answer this: {query}" # {query} becomes a required param
11
11
  #
12
- # def user_prompt
13
- # query
12
+ # returns do # Optional: structured output
13
+ # string :answer, description: "The answer"
14
14
  # end
15
15
  # end
16
16
  #
@@ -20,50 +20,17 @@
20
20
  # MyAgent.call(query: "hello", skip_cache: true) # Bypass cache
21
21
  #
22
22
  class ApplicationAgent < RubyLLM::Agents::Base
23
- # ============================================
24
- # Shared Model Configuration
25
- # ============================================
26
- # These settings are inherited by all agents
27
-
28
- # model "gemini-2.0-flash" # Default model for all agents
29
- # temperature 0.0 # Default temperature (0.0 = deterministic)
30
- # timeout 60 # Default timeout in seconds
31
-
32
- # ============================================
33
- # Shared Caching
34
- # ============================================
35
-
36
- # cache 1.hour # Enable caching for all agents (override per-agent if needed)
37
-
38
- # ============================================
39
- # Shared Reliability Settings
40
- # ============================================
41
- # Configure once here, all agents inherit these settings
42
-
43
- # Automatic retries for all agents
44
- # retries max: 2, backoff: :exponential, base: 0.4, max_delay: 3.0
45
-
46
- # Shared fallback models
47
- # fallback_models ["gpt-4o-mini", "claude-3-haiku"]
48
-
49
- # Total timeout across retries/fallbacks
50
- # total_timeout 30
51
-
52
- # Circuit breaker (per agent-model pair)
53
- # circuit_breaker errors: 5, within: 60, cooldown: 300
54
-
55
- # ============================================
56
- # Shared Helper Methods
57
- # ============================================
58
- # Define methods here that can be used by all agents
59
-
60
- # Example: Common system prompt prefix
61
- # def system_prompt_prefix
62
- # "You are an AI assistant for #{Rails.application.class.module_parent_name}."
63
- # end
64
-
65
- # Example: Common metadata
66
- # def metadata
67
- # { app_version: Rails.application.config.version }
23
+ # Shared settings inherited by all agents.
24
+ # Override per-agent as needed.
25
+
26
+ # model "gpt-4o" # Override the configured default model
27
+ # temperature 0.0 # 0.0 = deterministic, 2.0 = creative
28
+ # cache for: 1.hour # Enable caching for all agents
29
+
30
+ # Shared error handling (uncomment to apply to all agents)
31
+ # on_failure do
32
+ # retries times: 2, backoff: :exponential
33
+ # fallback to: ["gpt-4o-mini"]
34
+ # timeout 30
68
35
  # end
69
36
  end
@@ -6,29 +6,14 @@
6
6
 
7
7
  RubyLLM::Agents.configure do |config|
8
8
  # ============================================
9
- # LLM Provider API Keys
9
+ # Quick Start — set ONE API key to get going
10
10
  # ============================================
11
- # Configure at least one provider. Set these in your environment
12
- # or replace ENV[] calls with your keys directly.
11
+ # Uncomment one line below, then run: rails ruby_llm_agents:doctor
13
12
 
14
13
  # config.openai_api_key = ENV["OPENAI_API_KEY"]
15
14
  # config.anthropic_api_key = ENV["ANTHROPIC_API_KEY"]
16
15
  # config.gemini_api_key = ENV["GOOGLE_API_KEY"]
17
16
 
18
- # Additional providers:
19
- # config.deepseek_api_key = ENV["DEEPSEEK_API_KEY"]
20
- # config.openrouter_api_key = ENV["OPENROUTER_API_KEY"]
21
- # config.mistral_api_key = ENV["MISTRAL_API_KEY"]
22
- # config.xai_api_key = ENV["XAI_API_KEY"]
23
-
24
- # Custom endpoints (e.g., Azure OpenAI, local Ollama):
25
- # config.openai_api_base = "https://your-resource.openai.azure.com"
26
- # config.ollama_api_base = "http://localhost:11434"
27
-
28
- # Connection settings:
29
- # config.request_timeout = 120
30
- # config.max_retries = 3
31
-
32
17
  # ============================================
33
18
  # Model Defaults
34
19
  # ============================================
@@ -46,6 +31,23 @@ RubyLLM::Agents.configure do |config|
46
31
  # When enabled, agents stream responses and track time-to-first-token
47
32
  # config.default_streaming = false
48
33
 
34
+ # ============================================
35
+ # Additional Providers (uncomment as needed)
36
+ # ============================================
37
+
38
+ # config.deepseek_api_key = ENV["DEEPSEEK_API_KEY"]
39
+ # config.openrouter_api_key = ENV["OPENROUTER_API_KEY"]
40
+ # config.mistral_api_key = ENV["MISTRAL_API_KEY"]
41
+ # config.xai_api_key = ENV["XAI_API_KEY"]
42
+
43
+ # Custom endpoints (e.g., Azure OpenAI, local Ollama):
44
+ # config.openai_api_base = "https://your-resource.openai.azure.com"
45
+ # config.ollama_api_base = "http://localhost:11434"
46
+
47
+ # Connection settings:
48
+ # config.request_timeout = 120
49
+ # config.max_retries = 3
50
+
49
51
  # ============================================
50
52
  # Caching
51
53
  # ============================================
@@ -332,6 +332,14 @@ module RubyLLM
332
332
  # @param temperature [Float] Override the class-level temperature
333
333
  # @param options [Hash] Agent parameters defined via the param DSL
334
334
  def initialize(model: self.class.model, temperature: self.class.temperature, **options)
335
+ # Merge tracker defaults (shared options like tenant) — explicit opts win
336
+ tracker = Thread.current[:ruby_llm_agents_tracker]
337
+ if tracker
338
+ options = tracker.defaults.merge(options)
339
+ @_track_request_id = tracker.request_id
340
+ @_track_tags = tracker.tags
341
+ end
342
+
335
343
  @ask_message = options.delete(:_ask_message)
336
344
  @parent_execution_id = options.delete(:_parent_execution_id)
337
345
  @root_execution_id = options.delete(:_root_execution_id)
@@ -506,6 +514,7 @@ module RubyLLM
506
514
  stream_block: (block if streaming_enabled?),
507
515
  parent_execution_id: @parent_execution_id,
508
516
  root_execution_id: @root_execution_id,
517
+ debug: @options[:debug],
509
518
  options: execution_options
510
519
  )
511
520
  end
@@ -721,6 +730,10 @@ module RubyLLM
721
730
  capture_response(response, context)
722
731
  result = build_result(process_response(response), response, context)
723
732
  context.output = result
733
+ rescue RubyLLM::UnauthorizedError, RubyLLM::ForbiddenError => e
734
+ raise_with_setup_hint(e, context)
735
+ rescue RubyLLM::ModelNotFoundError => e
736
+ raise_with_model_hint(e, context)
724
737
  ensure
725
738
  Thread.current[:ruby_llm_agents_caller_context] = previous_context
726
739
  end
@@ -733,12 +746,16 @@ module RubyLLM
733
746
  effective_model = context&.model || model
734
747
  chat_opts = {model: effective_model}
735
748
 
736
- # Pass scoped RubyLLM context for thread-safe per-tenant API keys
749
+ # Use scoped RubyLLM::Context for thread-safe per-tenant API keys.
750
+ # RubyLLM::Context#chat creates a Chat with the scoped config,
751
+ # so we call .chat on the context instead of RubyLLM.chat.
737
752
  llm_ctx = context&.llm
738
- chat_opts[:context] = llm_ctx if llm_ctx.is_a?(RubyLLM::Context)
739
-
740
- client = RubyLLM.chat(**chat_opts)
741
- .with_temperature(temperature)
753
+ client = if llm_ctx.is_a?(RubyLLM::Context)
754
+ llm_ctx.chat(**chat_opts)
755
+ else
756
+ RubyLLM.chat(**chat_opts)
757
+ end
758
+ client = client.with_temperature(temperature)
742
759
 
743
760
  client = client.with_instructions(system_prompt) if system_prompt
744
761
  client = client.with_schema(schema) if schema
@@ -889,8 +906,9 @@ module RubyLLM
889
906
  # @param context [Pipeline::Context] The context
890
907
  # @return [Result] The result object
891
908
  def build_result(content, response, context)
892
- Result.new(
909
+ result_opts = {
893
910
  content: content,
911
+ agent_class_name: self.class.name,
894
912
  input_tokens: context.input_tokens,
895
913
  output_tokens: context.output_tokens,
896
914
  input_cost: context.input_cost,
@@ -907,7 +925,12 @@ module RubyLLM
907
925
  streaming: streaming_enabled?,
908
926
  attempts_count: context.attempts_made || 1,
909
927
  execution_id: context.execution_id
910
- )
928
+ }
929
+
930
+ # Attach pipeline trace when debug mode is enabled
931
+ result_opts[:trace] = context.trace if context.trace_enabled? && context.trace.any?
932
+
933
+ Result.new(**result_opts)
911
934
  end
912
935
 
913
936
  # Extracts thinking data from a response for inclusion in Result
@@ -1077,6 +1100,44 @@ module RubyLLM
1077
1100
  tool_call[key] || tool_call[key.to_s]
1078
1101
  end
1079
1102
  end
1103
+
1104
+ # Re-raises auth errors with actionable setup guidance
1105
+ def raise_with_setup_hint(error, context)
1106
+ effective_model = context&.model || model
1107
+ provider = detect_provider(effective_model)
1108
+
1109
+ hint = "#{self.class.name} failed: #{error.message}\n\n" \
1110
+ "The API key for #{provider || "your provider"} is missing or invalid.\n" \
1111
+ "Fix: Set the key in config/initializers/ruby_llm_agents.rb\n" \
1112
+ " or run: rails ruby_llm_agents:doctor"
1113
+
1114
+ raise RubyLLM::Agents::ConfigurationError, hint
1115
+ end
1116
+
1117
+ # Re-raises model errors with actionable guidance
1118
+ def raise_with_model_hint(error, context)
1119
+ effective_model = context&.model || model
1120
+
1121
+ hint = "#{self.class.name} failed: #{error.message}\n\n" \
1122
+ "Model '#{effective_model}' was not found.\n" \
1123
+ "Fix: Check the model name or set a default in your initializer:\n" \
1124
+ " config.default_model = \"gpt-4o\""
1125
+
1126
+ raise RubyLLM::Agents::ConfigurationError, hint
1127
+ end
1128
+
1129
+ # Best-effort provider detection from model name
1130
+ def detect_provider(model_id)
1131
+ return nil unless model_id
1132
+
1133
+ case model_id.to_s
1134
+ when /gpt|o[1-9]|dall-e|whisper|tts/i then "OpenAI"
1135
+ when /claude/i then "Anthropic"
1136
+ when /gemini|gemma/i then "Google (Gemini)"
1137
+ when /deepseek/i then "DeepSeek"
1138
+ when /mistral|mixtral/i then "Mistral"
1139
+ end
1140
+ end
1080
1141
  end
1081
1142
  end
1082
1143
  end
@@ -87,6 +87,10 @@ module RubyLLM
87
87
  run_callbacks(:after, context, response)
88
88
 
89
89
  context.output = build_result(processed_content, response, context)
90
+ rescue RubyLLM::UnauthorizedError, RubyLLM::ForbiddenError => e
91
+ raise_with_setup_hint(e, context)
92
+ rescue RubyLLM::ModelNotFoundError => e
93
+ raise_with_model_hint(e, context)
90
94
  end
91
95
 
92
96
  # Returns the resolved tenant ID for tracking
@@ -819,6 +819,16 @@ module RubyLLM
819
819
  tenant_resolver&.call
820
820
  end
821
821
 
822
+ # Returns a concise string representation for debugging
823
+ #
824
+ # @return [String] Summary of key configuration values
825
+ def inspect
826
+ "#<#{self.class} model=#{default_model.inspect} temperature=#{default_temperature} " \
827
+ "timeout=#{default_timeout} streaming=#{default_streaming} " \
828
+ "multi_tenancy=#{multi_tenancy_enabled} async_logging=#{async_logging} " \
829
+ "track_executions=#{track_executions}>"
830
+ end
831
+
822
832
  # Returns whether the async gem is available
823
833
  #
824
834
  # @return [Boolean] true if async gem is loaded
@@ -4,6 +4,6 @@ module RubyLLM
4
4
  module Agents
5
5
  # Current version of the RubyLLM::Agents gem
6
6
  # @return [String] Semantic version string
7
- VERSION = "3.8.0"
7
+ VERSION = "3.9.0"
8
8
  end
9
9
  end