ruby_llm-agents 3.7.2 → 3.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (68) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +30 -10
  3. data/app/controllers/ruby_llm/agents/agents_controller.rb +14 -141
  4. data/app/controllers/ruby_llm/agents/dashboard_controller.rb +12 -166
  5. data/app/controllers/ruby_llm/agents/executions_controller.rb +1 -1
  6. data/app/controllers/ruby_llm/agents/requests_controller.rb +117 -0
  7. data/app/helpers/ruby_llm/agents/application_helper.rb +38 -0
  8. data/app/models/ruby_llm/agents/execution/analytics.rb +302 -103
  9. data/app/models/ruby_llm/agents/execution.rb +76 -54
  10. data/app/models/ruby_llm/agents/execution_detail.rb +2 -0
  11. data/app/models/ruby_llm/agents/tenant.rb +39 -0
  12. data/app/services/ruby_llm/agents/agent_registry.rb +98 -0
  13. data/app/views/layouts/ruby_llm/agents/application.html.erb +4 -2
  14. data/app/views/ruby_llm/agents/executions/_list.html.erb +3 -17
  15. data/app/views/ruby_llm/agents/requests/index.html.erb +153 -0
  16. data/app/views/ruby_llm/agents/requests/show.html.erb +136 -0
  17. data/config/routes.rb +2 -0
  18. data/lib/generators/ruby_llm_agents/agent_generator.rb +2 -2
  19. data/lib/generators/ruby_llm_agents/demo_generator.rb +102 -0
  20. data/lib/generators/ruby_llm_agents/doctor_generator.rb +196 -0
  21. data/lib/generators/ruby_llm_agents/install_generator.rb +7 -19
  22. data/lib/generators/ruby_llm_agents/templates/add_dashboard_performance_indexes_migration.rb.tt +23 -0
  23. data/lib/generators/ruby_llm_agents/templates/agent.rb.tt +27 -80
  24. data/lib/generators/ruby_llm_agents/templates/application_agent.rb.tt +18 -51
  25. data/lib/generators/ruby_llm_agents/templates/initializer.rb.tt +19 -17
  26. data/lib/generators/ruby_llm_agents/templates/migration.rb.tt +3 -0
  27. data/lib/generators/ruby_llm_agents/upgrade_generator.rb +25 -0
  28. data/lib/ruby_llm/agents/base_agent.rb +71 -4
  29. data/lib/ruby_llm/agents/core/base.rb +4 -0
  30. data/lib/ruby_llm/agents/core/configuration.rb +11 -0
  31. data/lib/ruby_llm/agents/core/instrumentation.rb +15 -19
  32. data/lib/ruby_llm/agents/core/version.rb +1 -1
  33. data/lib/ruby_llm/agents/infrastructure/alert_manager.rb +4 -4
  34. data/lib/ruby_llm/agents/infrastructure/budget_tracker.rb +19 -11
  35. data/lib/ruby_llm/agents/pipeline/builder.rb +8 -4
  36. data/lib/ruby_llm/agents/pipeline/context.rb +69 -1
  37. data/lib/ruby_llm/agents/pipeline/middleware/base.rb +58 -4
  38. data/lib/ruby_llm/agents/pipeline/middleware/budget.rb +21 -17
  39. data/lib/ruby_llm/agents/pipeline/middleware/cache.rb +40 -26
  40. data/lib/ruby_llm/agents/pipeline/middleware/instrumentation.rb +126 -120
  41. data/lib/ruby_llm/agents/pipeline/middleware/reliability.rb +13 -11
  42. data/lib/ruby_llm/agents/pipeline/middleware/tenant.rb +29 -31
  43. data/lib/ruby_llm/agents/providers/inception/capabilities.rb +107 -0
  44. data/lib/ruby_llm/agents/providers/inception/chat.rb +17 -0
  45. data/lib/ruby_llm/agents/providers/inception/configuration.rb +9 -0
  46. data/lib/ruby_llm/agents/providers/inception/models.rb +38 -0
  47. data/lib/ruby_llm/agents/providers/inception/registry.rb +45 -0
  48. data/lib/ruby_llm/agents/providers/inception.rb +50 -0
  49. data/lib/ruby_llm/agents/rails/engine.rb +11 -0
  50. data/lib/ruby_llm/agents/results/background_removal_result.rb +7 -1
  51. data/lib/ruby_llm/agents/results/base.rb +28 -4
  52. data/lib/ruby_llm/agents/results/embedding_result.rb +4 -0
  53. data/lib/ruby_llm/agents/results/image_analysis_result.rb +11 -3
  54. data/lib/ruby_llm/agents/results/image_edit_result.rb +7 -1
  55. data/lib/ruby_llm/agents/results/image_generation_result.rb +7 -1
  56. data/lib/ruby_llm/agents/results/image_pipeline_result.rb +7 -1
  57. data/lib/ruby_llm/agents/results/image_transform_result.rb +7 -1
  58. data/lib/ruby_llm/agents/results/image_upscale_result.rb +7 -1
  59. data/lib/ruby_llm/agents/results/image_variation_result.rb +7 -1
  60. data/lib/ruby_llm/agents/results/speech_result.rb +6 -0
  61. data/lib/ruby_llm/agents/results/trackable.rb +25 -0
  62. data/lib/ruby_llm/agents/results/transcription_result.rb +6 -0
  63. data/lib/ruby_llm/agents/text/embedder.rb +8 -1
  64. data/lib/ruby_llm/agents/track_report.rb +127 -0
  65. data/lib/ruby_llm/agents/tracker.rb +32 -0
  66. data/lib/ruby_llm/agents.rb +212 -0
  67. data/lib/tasks/ruby_llm_agents.rake +6 -0
  68. metadata +17 -2
@@ -2,15 +2,15 @@
2
2
 
3
3
  # ApplicationAgent - Base class for all agents in this application
4
4
  #
5
- # All agents inherit from this class. Configure shared settings here
6
- # that apply to all agents, or override them per-agent as needed.
5
+ # All agents inherit from this class. Configure shared settings here.
7
6
  #
8
- # Example:
7
+ # Quick reference:
9
8
  # class MyAgent < ApplicationAgent
10
- # param :query, required: true
9
+ # system "You are a helpful assistant."
10
+ # prompt "Answer this: {query}" # {query} becomes a required param
11
11
  #
12
- # def user_prompt
13
- # query
12
+ # returns do # Optional: structured output
13
+ # string :answer, description: "The answer"
14
14
  # end
15
15
  # end
16
16
  #
@@ -20,50 +20,17 @@
20
20
  # MyAgent.call(query: "hello", skip_cache: true) # Bypass cache
21
21
  #
22
22
  class ApplicationAgent < RubyLLM::Agents::Base
23
- # ============================================
24
- # Shared Model Configuration
25
- # ============================================
26
- # These settings are inherited by all agents
27
-
28
- # model "gemini-2.0-flash" # Default model for all agents
29
- # temperature 0.0 # Default temperature (0.0 = deterministic)
30
- # timeout 60 # Default timeout in seconds
31
-
32
- # ============================================
33
- # Shared Caching
34
- # ============================================
35
-
36
- # cache 1.hour # Enable caching for all agents (override per-agent if needed)
37
-
38
- # ============================================
39
- # Shared Reliability Settings
40
- # ============================================
41
- # Configure once here, all agents inherit these settings
42
-
43
- # Automatic retries for all agents
44
- # retries max: 2, backoff: :exponential, base: 0.4, max_delay: 3.0
45
-
46
- # Shared fallback models
47
- # fallback_models ["gpt-4o-mini", "claude-3-haiku"]
48
-
49
- # Total timeout across retries/fallbacks
50
- # total_timeout 30
51
-
52
- # Circuit breaker (per agent-model pair)
53
- # circuit_breaker errors: 5, within: 60, cooldown: 300
54
-
55
- # ============================================
56
- # Shared Helper Methods
57
- # ============================================
58
- # Define methods here that can be used by all agents
59
-
60
- # Example: Common system prompt prefix
61
- # def system_prompt_prefix
62
- # "You are an AI assistant for #{Rails.application.class.module_parent_name}."
63
- # end
64
-
65
- # Example: Common metadata
66
- # def metadata
67
- # { app_version: Rails.application.config.version }
23
+ # Shared settings inherited by all agents.
24
+ # Override per-agent as needed.
25
+
26
+ # model "gpt-4o" # Override the configured default model
27
+ # temperature 0.0 # 0.0 = deterministic, 2.0 = creative
28
+ # cache for: 1.hour # Enable caching for all agents
29
+
30
+ # Shared error handling (uncomment to apply to all agents)
31
+ # on_failure do
32
+ # retries times: 2, backoff: :exponential
33
+ # fallback to: ["gpt-4o-mini"]
34
+ # timeout 30
68
35
  # end
69
36
  end
@@ -6,29 +6,14 @@
6
6
 
7
7
  RubyLLM::Agents.configure do |config|
8
8
  # ============================================
9
- # LLM Provider API Keys
9
+ # Quick Start — set ONE API key to get going
10
10
  # ============================================
11
- # Configure at least one provider. Set these in your environment
12
- # or replace ENV[] calls with your keys directly.
11
+ # Uncomment one line below, then run: rails ruby_llm_agents:doctor
13
12
 
14
13
  # config.openai_api_key = ENV["OPENAI_API_KEY"]
15
14
  # config.anthropic_api_key = ENV["ANTHROPIC_API_KEY"]
16
15
  # config.gemini_api_key = ENV["GOOGLE_API_KEY"]
17
16
 
18
- # Additional providers:
19
- # config.deepseek_api_key = ENV["DEEPSEEK_API_KEY"]
20
- # config.openrouter_api_key = ENV["OPENROUTER_API_KEY"]
21
- # config.mistral_api_key = ENV["MISTRAL_API_KEY"]
22
- # config.xai_api_key = ENV["XAI_API_KEY"]
23
-
24
- # Custom endpoints (e.g., Azure OpenAI, local Ollama):
25
- # config.openai_api_base = "https://your-resource.openai.azure.com"
26
- # config.ollama_api_base = "http://localhost:11434"
27
-
28
- # Connection settings:
29
- # config.request_timeout = 120
30
- # config.max_retries = 3
31
-
32
17
  # ============================================
33
18
  # Model Defaults
34
19
  # ============================================
@@ -46,6 +31,23 @@ RubyLLM::Agents.configure do |config|
46
31
  # When enabled, agents stream responses and track time-to-first-token
47
32
  # config.default_streaming = false
48
33
 
34
+ # ============================================
35
+ # Additional Providers (uncomment as needed)
36
+ # ============================================
37
+
38
+ # config.deepseek_api_key = ENV["DEEPSEEK_API_KEY"]
39
+ # config.openrouter_api_key = ENV["OPENROUTER_API_KEY"]
40
+ # config.mistral_api_key = ENV["MISTRAL_API_KEY"]
41
+ # config.xai_api_key = ENV["XAI_API_KEY"]
42
+
43
+ # Custom endpoints (e.g., Azure OpenAI, local Ollama):
44
+ # config.openai_api_base = "https://your-resource.openai.azure.com"
45
+ # config.ollama_api_base = "http://localhost:11434"
46
+
47
+ # Connection settings:
48
+ # config.request_timeout = 120
49
+ # config.max_retries = 3
50
+
49
51
  # ============================================
50
52
  # Caching
51
53
  # ============================================
@@ -77,6 +77,9 @@ class CreateRubyLLMAgentsExecutions < ActiveRecord::Migration<%= migration_versi
77
77
  add_index :ruby_llm_agents_executions, :request_id
78
78
  add_index :ruby_llm_agents_executions, :parent_execution_id
79
79
  add_index :ruby_llm_agents_executions, :root_execution_id
80
+ add_index :ruby_llm_agents_executions, [:status, :created_at]
81
+ add_index :ruby_llm_agents_executions, [:model_id, :status]
82
+ add_index :ruby_llm_agents_executions, [:cache_hit, :created_at]
80
83
 
81
84
  # Foreign keys for execution hierarchy
82
85
  add_foreign_key :ruby_llm_agents_executions, :ruby_llm_agents_executions,
@@ -98,6 +98,25 @@ module RubyLlmAgents
98
98
  )
99
99
  end
100
100
 
101
+ # Add dashboard performance indexes
102
+ def create_add_dashboard_performance_indexes_migration
103
+ unless table_exists?(:ruby_llm_agents_executions)
104
+ say_status :skip, "executions table does not exist yet", :yellow
105
+ return
106
+ end
107
+
108
+ if index_exists?(:ruby_llm_agents_executions, [:status, :created_at])
109
+ say_status :skip, "dashboard performance indexes already exist", :yellow
110
+ return
111
+ end
112
+
113
+ say_status :upgrade, "Adding dashboard performance indexes", :blue
114
+ migration_template(
115
+ "add_dashboard_performance_indexes_migration.rb.tt",
116
+ File.join(db_migrate_path, "add_dashboard_performance_indexes.rb")
117
+ )
118
+ end
119
+
101
120
  def suggest_config_consolidation
102
121
  ruby_llm_initializer = File.join(destination_root, "config/initializers/ruby_llm.rb")
103
122
  agents_initializer = File.join(destination_root, "config/initializers/ruby_llm_agents.rb")
@@ -192,5 +211,11 @@ module RubyLlmAgents
192
211
  rescue
193
212
  false
194
213
  end
214
+
215
+ def index_exists?(table, columns)
216
+ ActiveRecord::Base.connection.index_exists?(table, columns)
217
+ rescue
218
+ false
219
+ end
195
220
  end
196
221
  end
@@ -332,6 +332,14 @@ module RubyLLM
332
332
  # @param temperature [Float] Override the class-level temperature
333
333
  # @param options [Hash] Agent parameters defined via the param DSL
334
334
  def initialize(model: self.class.model, temperature: self.class.temperature, **options)
335
+ # Merge tracker defaults (shared options like tenant) — explicit opts win
336
+ tracker = Thread.current[:ruby_llm_agents_tracker]
337
+ if tracker
338
+ options = tracker.defaults.merge(options)
339
+ @_track_request_id = tracker.request_id
340
+ @_track_tags = tracker.tags
341
+ end
342
+
335
343
  @ask_message = options.delete(:_ask_message)
336
344
  @parent_execution_id = options.delete(:_parent_execution_id)
337
345
  @root_execution_id = options.delete(:_root_execution_id)
@@ -506,6 +514,7 @@ module RubyLLM
506
514
  stream_block: (block if streaming_enabled?),
507
515
  parent_execution_id: @parent_execution_id,
508
516
  root_execution_id: @root_execution_id,
517
+ debug: @options[:debug],
509
518
  options: execution_options
510
519
  )
511
520
  end
@@ -721,6 +730,10 @@ module RubyLLM
721
730
  capture_response(response, context)
722
731
  result = build_result(process_response(response), response, context)
723
732
  context.output = result
733
+ rescue RubyLLM::UnauthorizedError, RubyLLM::ForbiddenError => e
734
+ raise_with_setup_hint(e, context)
735
+ rescue RubyLLM::ModelNotFoundError => e
736
+ raise_with_model_hint(e, context)
724
737
  ensure
725
738
  Thread.current[:ruby_llm_agents_caller_context] = previous_context
726
739
  end
@@ -731,8 +744,18 @@ module RubyLLM
731
744
  # @return [RubyLLM::Chat] Configured chat client
732
745
  def build_client(context = nil)
733
746
  effective_model = context&.model || model
734
- client = RubyLLM.chat(model: effective_model)
735
- .with_temperature(temperature)
747
+ chat_opts = {model: effective_model}
748
+
749
+ # Use scoped RubyLLM::Context for thread-safe per-tenant API keys.
750
+ # RubyLLM::Context#chat creates a Chat with the scoped config,
751
+ # so we call .chat on the context instead of RubyLLM.chat.
752
+ llm_ctx = context&.llm
753
+ client = if llm_ctx.is_a?(RubyLLM::Context)
754
+ llm_ctx.chat(**chat_opts)
755
+ else
756
+ RubyLLM.chat(**chat_opts)
757
+ end
758
+ client = client.with_temperature(temperature)
736
759
 
737
760
  client = client.with_instructions(system_prompt) if system_prompt
738
761
  client = client.with_schema(schema) if schema
@@ -883,8 +906,9 @@ module RubyLLM
883
906
  # @param context [Pipeline::Context] The context
884
907
  # @return [Result] The result object
885
908
  def build_result(content, response, context)
886
- Result.new(
909
+ result_opts = {
887
910
  content: content,
911
+ agent_class_name: self.class.name,
888
912
  input_tokens: context.input_tokens,
889
913
  output_tokens: context.output_tokens,
890
914
  input_cost: context.input_cost,
@@ -901,7 +925,12 @@ module RubyLLM
901
925
  streaming: streaming_enabled?,
902
926
  attempts_count: context.attempts_made || 1,
903
927
  execution_id: context.execution_id
904
- )
928
+ }
929
+
930
+ # Attach pipeline trace when debug mode is enabled
931
+ result_opts[:trace] = context.trace if context.trace_enabled? && context.trace.any?
932
+
933
+ Result.new(**result_opts)
905
934
  end
906
935
 
907
936
  # Extracts thinking data from a response for inclusion in Result
@@ -1071,6 +1100,44 @@ module RubyLLM
1071
1100
  tool_call[key] || tool_call[key.to_s]
1072
1101
  end
1073
1102
  end
1103
+
1104
+ # Re-raises auth errors with actionable setup guidance
1105
+ def raise_with_setup_hint(error, context)
1106
+ effective_model = context&.model || model
1107
+ provider = detect_provider(effective_model)
1108
+
1109
+ hint = "#{self.class.name} failed: #{error.message}\n\n" \
1110
+ "The API key for #{provider || "your provider"} is missing or invalid.\n" \
1111
+ "Fix: Set the key in config/initializers/ruby_llm_agents.rb\n" \
1112
+ " or run: rails ruby_llm_agents:doctor"
1113
+
1114
+ raise RubyLLM::Agents::ConfigurationError, hint
1115
+ end
1116
+
1117
+ # Re-raises model errors with actionable guidance
1118
+ def raise_with_model_hint(error, context)
1119
+ effective_model = context&.model || model
1120
+
1121
+ hint = "#{self.class.name} failed: #{error.message}\n\n" \
1122
+ "Model '#{effective_model}' was not found.\n" \
1123
+ "Fix: Check the model name or set a default in your initializer:\n" \
1124
+ " config.default_model = \"gpt-4o\""
1125
+
1126
+ raise RubyLLM::Agents::ConfigurationError, hint
1127
+ end
1128
+
1129
+ # Best-effort provider detection from model name
1130
+ def detect_provider(model_id)
1131
+ return nil unless model_id
1132
+
1133
+ case model_id.to_s
1134
+ when /gpt|o[1-9]|dall-e|whisper|tts/i then "OpenAI"
1135
+ when /claude/i then "Anthropic"
1136
+ when /gemini|gemma/i then "Google (Gemini)"
1137
+ when /deepseek/i then "DeepSeek"
1138
+ when /mistral|mixtral/i then "Mistral"
1139
+ end
1140
+ end
1074
1141
  end
1075
1142
  end
1076
1143
  end
@@ -87,6 +87,10 @@ module RubyLLM
87
87
  run_callbacks(:after, context, response)
88
88
 
89
89
  context.output = build_result(processed_content, response, context)
90
+ rescue RubyLLM::UnauthorizedError, RubyLLM::ForbiddenError => e
91
+ raise_with_setup_hint(e, context)
92
+ rescue RubyLLM::ModelNotFoundError => e
93
+ raise_with_model_hint(e, context)
90
94
  end
91
95
 
92
96
  # Returns the resolved tenant ID for tracking
@@ -349,6 +349,7 @@ module RubyLLM
349
349
  perplexity_api_key
350
350
  xai_api_key
351
351
  gpustack_api_key
352
+ inception_api_key
352
353
  openai_api_base
353
354
  openai_organization_id
354
355
  openai_project_id
@@ -818,6 +819,16 @@ module RubyLLM
818
819
  tenant_resolver&.call
819
820
  end
820
821
 
822
+ # Returns a concise string representation for debugging
823
+ #
824
+ # @return [String] Summary of key configuration values
825
+ def inspect
826
+ "#<#{self.class} model=#{default_model.inspect} temperature=#{default_temperature} " \
827
+ "timeout=#{default_timeout} streaming=#{default_streaming} " \
828
+ "multi_tenancy=#{multi_tenancy_enabled} async_logging=#{async_logging} " \
829
+ "track_executions=#{track_executions}>"
830
+ end
831
+
821
832
  # Returns whether the async gem is available
822
833
  #
823
834
  # @return [Boolean] true if async gem is loaded
@@ -2,31 +2,27 @@
2
2
 
3
3
  module RubyLLM
4
4
  module Agents
5
- # Instrumentation concern for tracking agent executions
5
+ # @deprecated This module is deprecated and will be removed in a future version.
6
+ # All agents now use {Pipeline::Middleware::Instrumentation} automatically
7
+ # via the middleware pipeline. This module is no longer included in any
8
+ # production class. It remains only for backward compatibility with code
9
+ # that explicitly includes it.
6
10
  #
7
- # Provides comprehensive execution tracking including:
8
- # - Timing metrics (started_at, completed_at, duration_ms)
9
- # - Token usage tracking (input, output, cached)
10
- # - Cost calculation via RubyLLM pricing data
11
- # - Error and timeout handling with status tracking
12
- # - Safe parameter sanitization for logging
13
- #
14
- # Included automatically in {RubyLLM::Agents::Base}.
15
- #
16
- # @example Adding custom metadata to executions
17
- # class MyAgent < ApplicationAgent
18
- # def metadata
19
- # { user_id: Current.user&.id, request_id: request.uuid }
20
- # end
21
- # end
22
- #
23
- # @see RubyLLM::Agents::Execution
24
- # @see RubyLLM::Agents::ExecutionLoggerJob
11
+ # @see Pipeline::Middleware::Instrumentation
25
12
  # @api private
26
13
  module Instrumentation
27
14
  extend ActiveSupport::Concern
28
15
 
29
16
  included do
17
+ if defined?(RubyLLM::Agents::Deprecations)
18
+ RubyLLM::Agents::Deprecations.warn(
19
+ "RubyLLM::Agents::Instrumentation is deprecated. " \
20
+ "All agents now use Pipeline::Middleware::Instrumentation automatically. " \
21
+ "Remove `include RubyLLM::Agents::Instrumentation` from #{name || "your class"}.",
22
+ caller
23
+ )
24
+ end
25
+
30
26
  # @!attribute [rw] execution_id
31
27
  # The ID of the current execution record
32
28
  # @return [Integer, nil]
@@ -4,6 +4,6 @@ module RubyLLM
4
4
  module Agents
5
5
  # Current version of the RubyLLM::Agents gem
6
6
  # @return [String] Semantic version string
7
- VERSION = "3.7.2"
7
+ VERSION = "3.9.0"
8
8
  end
9
9
  end
@@ -83,8 +83,8 @@ module RubyLLM
83
83
  # @return [void]
84
84
  def emit_notification(event, payload)
85
85
  ActiveSupport::Notifications.instrument("ruby_llm_agents.alert.#{event}", payload)
86
- rescue
87
- # Ignore notification failures
86
+ rescue => e
87
+ Rails.logger.debug("[RubyLLM::Agents::AlertManager] Notification failed: #{e.message}") if defined?(Rails) && Rails.logger
88
88
  end
89
89
 
90
90
  # Stores the alert in cache for dashboard display
@@ -106,8 +106,8 @@ module RubyLLM
106
106
  alerts = alerts.first(50)
107
107
 
108
108
  cache.write(key, alerts, expires_in: 24.hours)
109
- rescue
110
- # Ignore cache failures
109
+ rescue => e
110
+ Rails.logger.debug("[RubyLLM::Agents::AlertManager] Cache store failed: #{e.message}") if defined?(Rails) && Rails.logger
111
111
  end
112
112
 
113
113
  # Formats a human-readable message for the event
@@ -44,7 +44,7 @@ module RubyLLM
44
44
  # @raise [Reliability::BudgetExceededError] If hard cap is exceeded
45
45
  # @return [void]
46
46
  def check_budget!(agent_type, tenant_id: nil, tenant_config: nil)
47
- tenant_id = Budget::ConfigResolver.resolve_tenant_id(tenant_id)
47
+ tenant_id = resolve_tid(tenant_id)
48
48
  budget_config = Budget::ConfigResolver.resolve_budget_config(tenant_id, runtime_config: tenant_config)
49
49
 
50
50
  return unless budget_config[:enabled]
@@ -61,7 +61,7 @@ module RubyLLM
61
61
  # @raise [Reliability::BudgetExceededError] If hard cap is exceeded
62
62
  # @return [void]
63
63
  def check_token_budget!(agent_type, tenant_id: nil, tenant_config: nil)
64
- tenant_id = Budget::ConfigResolver.resolve_tenant_id(tenant_id)
64
+ tenant_id = resolve_tid(tenant_id)
65
65
  budget_config = Budget::ConfigResolver.resolve_budget_config(tenant_id, runtime_config: tenant_config)
66
66
 
67
67
  return unless budget_config[:enabled]
@@ -80,7 +80,7 @@ module RubyLLM
80
80
  def record_spend!(agent_type, amount, tenant_id: nil, tenant_config: nil)
81
81
  return if amount.nil? || amount <= 0
82
82
 
83
- tenant_id = Budget::ConfigResolver.resolve_tenant_id(tenant_id)
83
+ tenant_id = resolve_tid(tenant_id)
84
84
  budget_config = Budget::ConfigResolver.resolve_budget_config(tenant_id, runtime_config: tenant_config)
85
85
 
86
86
  Budget::SpendRecorder.record_spend!(agent_type, amount, tenant_id: tenant_id, budget_config: budget_config)
@@ -96,7 +96,7 @@ module RubyLLM
96
96
  def record_tokens!(agent_type, tokens, tenant_id: nil, tenant_config: nil)
97
97
  return if tokens.nil? || tokens <= 0
98
98
 
99
- tenant_id = Budget::ConfigResolver.resolve_tenant_id(tenant_id)
99
+ tenant_id = resolve_tid(tenant_id)
100
100
  budget_config = Budget::ConfigResolver.resolve_budget_config(tenant_id, runtime_config: tenant_config)
101
101
 
102
102
  Budget::SpendRecorder.record_tokens!(agent_type, tokens, tenant_id: tenant_id, budget_config: budget_config)
@@ -110,7 +110,7 @@ module RubyLLM
110
110
  # @param tenant_id [String, nil] Optional tenant identifier (uses resolver if not provided)
111
111
  # @return [Float] Current spend in USD
112
112
  def current_spend(scope, period, agent_type: nil, tenant_id: nil)
113
- tenant_id = Budget::ConfigResolver.resolve_tenant_id(tenant_id)
113
+ tenant_id = resolve_tid(tenant_id)
114
114
  Budget::BudgetQuery.current_spend(scope, period, agent_type: agent_type, tenant_id: tenant_id)
115
115
  end
116
116
 
@@ -120,7 +120,7 @@ module RubyLLM
120
120
  # @param tenant_id [String, nil] Optional tenant identifier (uses resolver if not provided)
121
121
  # @return [Integer] Current token usage
122
122
  def current_tokens(period, tenant_id: nil)
123
- tenant_id = Budget::ConfigResolver.resolve_tenant_id(tenant_id)
123
+ tenant_id = resolve_tid(tenant_id)
124
124
  Budget::BudgetQuery.current_tokens(period, tenant_id: tenant_id)
125
125
  end
126
126
 
@@ -132,7 +132,7 @@ module RubyLLM
132
132
  # @param tenant_id [String, nil] Optional tenant identifier (uses resolver if not provided)
133
133
  # @return [Float, nil] Remaining budget in USD, or nil if no limit configured
134
134
  def remaining_budget(scope, period, agent_type: nil, tenant_id: nil)
135
- tenant_id = Budget::ConfigResolver.resolve_tenant_id(tenant_id)
135
+ tenant_id = resolve_tid(tenant_id)
136
136
  budget_config = Budget::ConfigResolver.resolve_budget_config(tenant_id)
137
137
 
138
138
  Budget::BudgetQuery.remaining_budget(scope, period, agent_type: agent_type, tenant_id: tenant_id, budget_config: budget_config)
@@ -144,7 +144,7 @@ module RubyLLM
144
144
  # @param tenant_id [String, nil] Optional tenant identifier (uses resolver if not provided)
145
145
  # @return [Integer, nil] Remaining token budget, or nil if no limit configured
146
146
  def remaining_token_budget(period, tenant_id: nil)
147
- tenant_id = Budget::ConfigResolver.resolve_tenant_id(tenant_id)
147
+ tenant_id = resolve_tid(tenant_id)
148
148
  budget_config = Budget::ConfigResolver.resolve_budget_config(tenant_id)
149
149
 
150
150
  Budget::BudgetQuery.remaining_token_budget(period, tenant_id: tenant_id, budget_config: budget_config)
@@ -156,7 +156,7 @@ module RubyLLM
156
156
  # @param tenant_id [String, nil] Optional tenant identifier (uses resolver if not provided)
157
157
  # @return [Hash] Budget status information
158
158
  def status(agent_type: nil, tenant_id: nil)
159
- tenant_id = Budget::ConfigResolver.resolve_tenant_id(tenant_id)
159
+ tenant_id = resolve_tid(tenant_id)
160
160
  budget_config = Budget::ConfigResolver.resolve_budget_config(tenant_id)
161
161
 
162
162
  Budget::BudgetQuery.status(agent_type: agent_type, tenant_id: tenant_id, budget_config: budget_config)
@@ -167,7 +167,7 @@ module RubyLLM
167
167
  # @param tenant_id [String, nil] Optional tenant identifier (uses resolver if not provided)
168
168
  # @return [Hash, nil] Forecast information
169
169
  def calculate_forecast(tenant_id: nil)
170
- tenant_id = Budget::ConfigResolver.resolve_tenant_id(tenant_id)
170
+ tenant_id = resolve_tid(tenant_id)
171
171
  budget_config = Budget::ConfigResolver.resolve_budget_config(tenant_id)
172
172
 
173
173
  Budget::Forecaster.calculate_forecast(tenant_id: tenant_id, budget_config: budget_config)
@@ -178,7 +178,7 @@ module RubyLLM
178
178
  # @param tenant_id [String, nil] Optional tenant identifier to reset only that tenant's counters
179
179
  # @return [void]
180
180
  def reset!(tenant_id: nil)
181
- tenant_id = Budget::ConfigResolver.resolve_tenant_id(tenant_id)
181
+ tenant_id = resolve_tid(tenant_id)
182
182
  tenant_part = Budget::SpendRecorder.tenant_key_part(tenant_id)
183
183
  today = Budget::SpendRecorder.date_key_part(:daily)
184
184
  month = Budget::SpendRecorder.date_key_part(:monthly)
@@ -192,6 +192,14 @@ module RubyLLM
192
192
 
193
193
  private
194
194
 
195
+ # Resolves tenant ID, falling back to the configured resolver
196
+ #
197
+ # @param tenant_id [String, nil] Explicit tenant ID or nil
198
+ # @return [String, nil] Resolved tenant ID
199
+ def resolve_tid(tenant_id)
200
+ Budget::ConfigResolver.resolve_tenant_id(tenant_id)
201
+ end
202
+
195
203
  # Checks budget limits and raises error if exceeded
196
204
  #
197
205
  # @param agent_type [String] The agent class name
@@ -170,7 +170,8 @@ module RubyLLM
170
170
  # @return [Array<Hash>] Middleware entries
171
171
  def global_middleware_entries
172
172
  RubyLLM::Agents.configuration.middleware_stack
173
- rescue
173
+ rescue => e
174
+ Rails.logger.debug("[RubyLLM::Agents::Pipeline] Failed to load global middleware: #{e.message}") if defined?(Rails) && Rails.logger
174
175
  []
175
176
  end
176
177
 
@@ -182,7 +183,8 @@ module RubyLLM
182
183
  return [] unless agent_class&.respond_to?(:agent_middleware)
183
184
 
184
185
  agent_class.agent_middleware
185
- rescue
186
+ rescue => e
187
+ Rails.logger.debug("[RubyLLM::Agents::Pipeline] Failed to load agent middleware: #{e.message}") if defined?(Rails) && Rails.logger
186
188
  []
187
189
  end
188
190
 
@@ -207,7 +209,8 @@ module RubyLLM
207
209
  # @return [Boolean]
208
210
  def budgets_enabled?
209
211
  RubyLLM::Agents.configuration.budgets_enabled?
210
- rescue
212
+ rescue => e
213
+ Rails.logger.debug("[RubyLLM::Agents::Pipeline] Failed to check budgets_enabled: #{e.message}") if defined?(Rails) && Rails.logger
211
214
  false
212
215
  end
213
216
 
@@ -248,7 +251,8 @@ module RubyLLM
248
251
 
249
252
  (retries.is_a?(Integer) && retries.positive?) ||
250
253
  (fallbacks.is_a?(Array) && fallbacks.any?)
251
- rescue
254
+ rescue => e
255
+ Rails.logger.debug("[RubyLLM::Agents::Pipeline] Failed to check reliability_enabled: #{e.message}") if defined?(Rails) && Rails.logger
252
256
  false
253
257
  end
254
258
  end