ruby-pi 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGELOG.md +31 -0
- data/LICENSE +21 -0
- data/README.md +415 -0
- data/lib/ruby_pi/agent/core.rb +175 -0
- data/lib/ruby_pi/agent/events.rb +120 -0
- data/lib/ruby_pi/agent/loop.rb +265 -0
- data/lib/ruby_pi/agent/result.rb +101 -0
- data/lib/ruby_pi/agent/state.rb +155 -0
- data/lib/ruby_pi/configuration.rb +80 -0
- data/lib/ruby_pi/context/compaction.rb +160 -0
- data/lib/ruby_pi/context/transform.rb +115 -0
- data/lib/ruby_pi/errors.rb +97 -0
- data/lib/ruby_pi/extensions/base.rb +96 -0
- data/lib/ruby_pi/llm/anthropic.rb +314 -0
- data/lib/ruby_pi/llm/base_provider.rb +220 -0
- data/lib/ruby_pi/llm/fallback.rb +96 -0
- data/lib/ruby_pi/llm/gemini.rb +260 -0
- data/lib/ruby_pi/llm/model.rb +82 -0
- data/lib/ruby_pi/llm/openai.rb +287 -0
- data/lib/ruby_pi/llm/response.rb +82 -0
- data/lib/ruby_pi/llm/stream_event.rb +91 -0
- data/lib/ruby_pi/llm/tool_call.rb +78 -0
- data/lib/ruby_pi/tools/definition.rb +149 -0
- data/lib/ruby_pi/tools/executor.rb +168 -0
- data/lib/ruby_pi/tools/registry.rb +120 -0
- data/lib/ruby_pi/tools/result.rb +83 -0
- data/lib/ruby_pi/tools/schema.rb +170 -0
- data/lib/ruby_pi/version.rb +11 -0
- data/lib/ruby_pi.rb +112 -0
- metadata +192 -0
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
# lib/ruby_pi/configuration.rb
|
|
4
|
+
#
|
|
5
|
+
# Global configuration for the RubyPi framework. Provides a centralized place
|
|
6
|
+
# to set API keys, retry behavior, timeouts, and default model preferences.
|
|
7
|
+
# Configure via RubyPi.configure { |c| c.gemini_api_key = "..." }.
|
|
8
|
+
|
|
9
|
+
module RubyPi
|
|
10
|
+
# Holds all configurable settings for the RubyPi framework.
|
|
11
|
+
#
|
|
12
|
+
# @example Setting API keys and retry behavior
|
|
13
|
+
# RubyPi.configure do |config|
|
|
14
|
+
# config.gemini_api_key = ENV["GEMINI_API_KEY"]
|
|
15
|
+
# config.anthropic_api_key = ENV["ANTHROPIC_API_KEY"]
|
|
16
|
+
# config.openai_api_key = ENV["OPENAI_API_KEY"]
|
|
17
|
+
# config.max_retries = 5
|
|
18
|
+
# config.retry_base_delay = 2.0
|
|
19
|
+
# end
|
|
20
|
+
class Configuration
|
|
21
|
+
# @return [String, nil] API key for Google Gemini
|
|
22
|
+
attr_accessor :gemini_api_key
|
|
23
|
+
|
|
24
|
+
# @return [String, nil] API key for Anthropic Claude
|
|
25
|
+
attr_accessor :anthropic_api_key
|
|
26
|
+
|
|
27
|
+
# @return [String, nil] API key for OpenAI
|
|
28
|
+
attr_accessor :openai_api_key
|
|
29
|
+
|
|
30
|
+
# @return [Integer] Maximum number of retry attempts for transient errors (default: 3)
|
|
31
|
+
attr_accessor :max_retries
|
|
32
|
+
|
|
33
|
+
# @return [Float] Base delay in seconds for exponential backoff (default: 1.0)
|
|
34
|
+
attr_accessor :retry_base_delay
|
|
35
|
+
|
|
36
|
+
# @return [Float] Maximum delay in seconds between retries (default: 30.0)
|
|
37
|
+
attr_accessor :retry_max_delay
|
|
38
|
+
|
|
39
|
+
# @return [Integer] HTTP request timeout in seconds (default: 120)
|
|
40
|
+
attr_accessor :request_timeout
|
|
41
|
+
|
|
42
|
+
# @return [Integer] HTTP connection open timeout in seconds (default: 10)
|
|
43
|
+
attr_accessor :open_timeout
|
|
44
|
+
|
|
45
|
+
# @return [String] Default model name for Gemini provider
|
|
46
|
+
attr_accessor :default_gemini_model
|
|
47
|
+
|
|
48
|
+
# @return [String] Default model name for Anthropic provider
|
|
49
|
+
attr_accessor :default_anthropic_model
|
|
50
|
+
|
|
51
|
+
# @return [String] Default model name for OpenAI provider
|
|
52
|
+
attr_accessor :default_openai_model
|
|
53
|
+
|
|
54
|
+
# @return [Logger, nil] Logger instance for debug output
|
|
55
|
+
attr_accessor :logger
|
|
56
|
+
|
|
57
|
+
# Initializes a new Configuration with sensible defaults.
|
|
58
|
+
def initialize
|
|
59
|
+
@gemini_api_key = nil
|
|
60
|
+
@anthropic_api_key = nil
|
|
61
|
+
@openai_api_key = nil
|
|
62
|
+
@max_retries = 3
|
|
63
|
+
@retry_base_delay = 1.0
|
|
64
|
+
@retry_max_delay = 30.0
|
|
65
|
+
@request_timeout = 120
|
|
66
|
+
@open_timeout = 10
|
|
67
|
+
@default_gemini_model = "gemini-2.0-flash"
|
|
68
|
+
@default_anthropic_model = "claude-sonnet-4-20250514"
|
|
69
|
+
@default_openai_model = "gpt-4o"
|
|
70
|
+
@logger = nil
|
|
71
|
+
end
|
|
72
|
+
|
|
73
|
+
# Resets all configuration options to their default values.
|
|
74
|
+
#
|
|
75
|
+
# @return [void]
|
|
76
|
+
def reset!
|
|
77
|
+
initialize
|
|
78
|
+
end
|
|
79
|
+
end
|
|
80
|
+
end
|
|
@@ -0,0 +1,160 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
# lib/ruby_pi/context/compaction.rb
|
|
4
|
+
#
|
|
5
|
+
# RubyPi::Context::Compaction — Token estimation and context window management.
|
|
6
|
+
#
|
|
7
|
+
# When the conversation history grows too large for the model's context window,
|
|
8
|
+
# Compaction summarizes older messages while preserving the most recent ones.
|
|
9
|
+
# Token estimation uses a simple heuristic (~4 characters per token) to avoid
|
|
10
|
+
# external tokenizer dependencies. When compaction triggers, older messages are
|
|
11
|
+
# replaced with a single summary message generated by the LLM, and a :compaction
|
|
12
|
+
# event is emitted.
|
|
13
|
+
|
|
14
|
+
module RubyPi
|
|
15
|
+
module Context
|
|
16
|
+
# Manages context window size by summarizing older messages when the
|
|
17
|
+
# estimated token count exceeds a configurable threshold. The most recent
|
|
18
|
+
# N messages are always preserved to maintain conversational coherence.
|
|
19
|
+
#
|
|
20
|
+
# @example Configuring compaction
|
|
21
|
+
# compaction = RubyPi::Context::Compaction.new(
|
|
22
|
+
# max_tokens: 8000,
|
|
23
|
+
# summary_model: model,
|
|
24
|
+
# preserve_last_n: 4
|
|
25
|
+
# )
|
|
26
|
+
# compacted = compaction.compact(messages, system_prompt)
|
|
27
|
+
class Compaction
|
|
28
|
+
# Average characters per token — a rough heuristic that avoids the need
|
|
29
|
+
# for provider-specific tokenizers. Errs on the conservative side.
|
|
30
|
+
CHARS_PER_TOKEN = 4
|
|
31
|
+
|
|
32
|
+
# @return [Integer] the token threshold above which compaction triggers
|
|
33
|
+
attr_reader :max_tokens
|
|
34
|
+
|
|
35
|
+
# @return [RubyPi::LLM::BaseProvider] the model used to generate summaries
|
|
36
|
+
attr_reader :summary_model
|
|
37
|
+
|
|
38
|
+
# @return [Integer] number of recent messages always preserved
|
|
39
|
+
attr_reader :preserve_last_n
|
|
40
|
+
|
|
41
|
+
# @return [#emit, nil] optional event emitter for :compaction events
|
|
42
|
+
attr_accessor :emitter
|
|
43
|
+
|
|
44
|
+
# Creates a new Compaction instance.
|
|
45
|
+
#
|
|
46
|
+
# @param max_tokens [Integer] trigger compaction above this token estimate
|
|
47
|
+
# (default: 8000)
|
|
48
|
+
# @param summary_model [RubyPi::LLM::BaseProvider] model for summarization
|
|
49
|
+
# @param preserve_last_n [Integer] always keep the last N messages
|
|
50
|
+
# (default: 4)
|
|
51
|
+
def initialize(max_tokens: 8000, summary_model:, preserve_last_n: 4)
|
|
52
|
+
@max_tokens = max_tokens
|
|
53
|
+
@summary_model = summary_model
|
|
54
|
+
@preserve_last_n = preserve_last_n
|
|
55
|
+
@emitter = nil
|
|
56
|
+
end
|
|
57
|
+
|
|
58
|
+
# Compacts the message history if the estimated token count exceeds
|
|
59
|
+
# the threshold. Returns the compacted messages array, or nil if no
|
|
60
|
+
# compaction was needed.
|
|
61
|
+
#
|
|
62
|
+
# The compaction process:
|
|
63
|
+
# 1. Estimate total tokens for system_prompt + all messages.
|
|
64
|
+
# 2. If under threshold, return nil (no compaction needed).
|
|
65
|
+
# 3. Split messages into "droppable" (older) and "preserved" (recent).
|
|
66
|
+
# 4. Summarize the droppable messages via the summary model.
|
|
67
|
+
# 5. Return a new array: [summary_message] + preserved_messages.
|
|
68
|
+
#
|
|
69
|
+
# @param messages [Array<Hash>] the current conversation history
|
|
70
|
+
# @param system_prompt [String] the system prompt (included in estimate)
|
|
71
|
+
# @return [Array<Hash>, nil] compacted messages, or nil if not needed
|
|
72
|
+
def compact(messages, system_prompt)
|
|
73
|
+
total_tokens = estimate_tokens(system_prompt, messages)
|
|
74
|
+
return nil if total_tokens <= @max_tokens
|
|
75
|
+
|
|
76
|
+
# Split into messages to summarize and messages to keep
|
|
77
|
+
preserved_count = [@preserve_last_n, messages.size].min
|
|
78
|
+
droppable = messages[0...(messages.size - preserved_count)]
|
|
79
|
+
preserved = messages[(messages.size - preserved_count)..]
|
|
80
|
+
|
|
81
|
+
# If there's nothing to drop, we can't compact further
|
|
82
|
+
return nil if droppable.empty?
|
|
83
|
+
|
|
84
|
+
# Generate a summary of the dropped messages
|
|
85
|
+
summary = summarize(droppable)
|
|
86
|
+
|
|
87
|
+
# Emit compaction event if an emitter is available
|
|
88
|
+
@emitter&.emit(:compaction, dropped_count: droppable.size, summary: summary)
|
|
89
|
+
|
|
90
|
+
# Build the compacted history: summary as a system-context message + preserved
|
|
91
|
+
summary_message = {
|
|
92
|
+
role: :system,
|
|
93
|
+
content: "[Conversation Summary]\n#{summary}"
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
[summary_message] + preserved
|
|
97
|
+
end
|
|
98
|
+
|
|
99
|
+
# Estimates the total token count for a system prompt and message array
|
|
100
|
+
# using the character-based heuristic.
|
|
101
|
+
#
|
|
102
|
+
# @param system_prompt [String] the system prompt text
|
|
103
|
+
# @param messages [Array<Hash>] conversation messages
|
|
104
|
+
# @return [Integer] estimated token count
|
|
105
|
+
def estimate_tokens(system_prompt, messages)
|
|
106
|
+
total_chars = system_prompt.to_s.length
|
|
107
|
+
|
|
108
|
+
messages.each do |msg|
|
|
109
|
+
total_chars += msg[:content].to_s.length
|
|
110
|
+
# Account for role and structural overhead (~10 tokens per message)
|
|
111
|
+
total_chars += 40
|
|
112
|
+
end
|
|
113
|
+
|
|
114
|
+
(total_chars.to_f / CHARS_PER_TOKEN).ceil
|
|
115
|
+
end
|
|
116
|
+
|
|
117
|
+
private
|
|
118
|
+
|
|
119
|
+
# Generates a summary of the given messages using the summary model.
|
|
120
|
+
# The summary captures key decisions, facts, and context from the
|
|
121
|
+
# conversation so far.
|
|
122
|
+
#
|
|
123
|
+
# @param messages [Array<Hash>] messages to summarize
|
|
124
|
+
# @return [String] the generated summary text
|
|
125
|
+
def summarize(messages)
|
|
126
|
+
summary_prompt = build_summary_prompt(messages)
|
|
127
|
+
|
|
128
|
+
response = @summary_model.complete(
|
|
129
|
+
messages: [
|
|
130
|
+
{ role: :system, content: "You are a precise conversation summarizer. " \
|
|
131
|
+
"Produce a concise summary that preserves all important facts, decisions, " \
|
|
132
|
+
"tool results, and context. Do not add any information not present in " \
|
|
133
|
+
"the conversation." },
|
|
134
|
+
{ role: :user, content: summary_prompt }
|
|
135
|
+
],
|
|
136
|
+
tools: [],
|
|
137
|
+
stream: false
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
response.content || ""
|
|
141
|
+
end
|
|
142
|
+
|
|
143
|
+
# Builds the prompt text for the summarization LLM call by formatting
|
|
144
|
+
# each message into a readable transcript.
|
|
145
|
+
#
|
|
146
|
+
# @param messages [Array<Hash>] messages to include in the transcript
|
|
147
|
+
# @return [String] formatted prompt for summarization
|
|
148
|
+
def build_summary_prompt(messages)
|
|
149
|
+
transcript = messages.map do |msg|
|
|
150
|
+
role = msg[:role].to_s.capitalize
|
|
151
|
+
content = msg[:content].to_s
|
|
152
|
+
"#{role}: #{content}"
|
|
153
|
+
end.join("\n\n")
|
|
154
|
+
|
|
155
|
+
"Summarize the following conversation, preserving all key facts, " \
|
|
156
|
+
"decisions, and tool call results:\n\n#{transcript}"
|
|
157
|
+
end
|
|
158
|
+
end
|
|
159
|
+
end
|
|
160
|
+
end
|
|
@@ -0,0 +1,115 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
# lib/ruby_pi/context/transform.rb
|
|
4
|
+
#
|
|
5
|
+
# RubyPi::Context::Transform — Composable helpers for mutating agent state
|
|
6
|
+
# before each LLM call.
|
|
7
|
+
#
|
|
8
|
+
# Transforms are callables (lambdas/procs) that receive the Agent::State and
|
|
9
|
+
# modify it in place — typically appending context to the system prompt. This
|
|
10
|
+
# module provides factory methods for common transform patterns (datetime
|
|
11
|
+
# injection, user preferences, workspace context) and a `compose` method for
|
|
12
|
+
# chaining multiple transforms into a single callable.
|
|
13
|
+
|
|
14
|
+
module RubyPi
|
|
15
|
+
module Context
|
|
16
|
+
# Factory methods for building transform_context callables. Each method
|
|
17
|
+
# returns a Proc that accepts an Agent::State and mutates it. Use
|
|
18
|
+
# `compose` to chain multiple transforms.
|
|
19
|
+
#
|
|
20
|
+
# @example Composing transforms
|
|
21
|
+
# transform = RubyPi::Context::Transform.compose(
|
|
22
|
+
# RubyPi::Context::Transform.inject_datetime,
|
|
23
|
+
# RubyPi::Context::Transform.inject_user_preferences { |state| state.user_data[:prefs] }
|
|
24
|
+
# )
|
|
25
|
+
# agent = RubyPi::Agent.new(transform_context: transform, ...)
|
|
26
|
+
module Transform
|
|
27
|
+
class << self
|
|
28
|
+
# Chains multiple transform callables into a single callable that
|
|
29
|
+
# executes them in order. Each transform receives the same State
|
|
30
|
+
# object and can mutate it freely.
|
|
31
|
+
#
|
|
32
|
+
# @param transforms [Array<Proc>] transform callables to chain
|
|
33
|
+
# @return [Proc] a single callable that runs all transforms in sequence
|
|
34
|
+
#
|
|
35
|
+
# @example
|
|
36
|
+
# combined = Transform.compose(transform_a, transform_b, transform_c)
|
|
37
|
+
# combined.call(state) # runs a, then b, then c
|
|
38
|
+
def compose(*transforms)
|
|
39
|
+
->(state) do
|
|
40
|
+
transforms.each { |t| t.call(state) }
|
|
41
|
+
end
|
|
42
|
+
end
|
|
43
|
+
|
|
44
|
+
# Returns a transform that appends the current date and time to the
|
|
45
|
+
# system prompt. Useful for giving the LLM temporal awareness.
|
|
46
|
+
#
|
|
47
|
+
# @return [Proc] transform callable
|
|
48
|
+
#
|
|
49
|
+
# @example
|
|
50
|
+
# transform = Transform.inject_datetime
|
|
51
|
+
# # Appends: "\n\nCurrent date and time: 2025-01-15 14:30:00 UTC"
|
|
52
|
+
def inject_datetime
|
|
53
|
+
->(state) do
|
|
54
|
+
timestamp = Time.now.utc.strftime("%Y-%m-%d %H:%M:%S UTC")
|
|
55
|
+
state.system_prompt += "\n\nCurrent date and time: #{timestamp}"
|
|
56
|
+
end
|
|
57
|
+
end
|
|
58
|
+
|
|
59
|
+
# Returns a transform that appends user preferences to the system
|
|
60
|
+
# prompt. The block is called with the state and should return a
|
|
61
|
+
# string or hash of preferences. If nil is returned, nothing is
|
|
62
|
+
# appended.
|
|
63
|
+
#
|
|
64
|
+
# @yield [state] block that extracts preferences from the state
|
|
65
|
+
# @yieldparam state [RubyPi::Agent::State] the current agent state
|
|
66
|
+
# @yieldreturn [String, Hash, nil] preferences to inject
|
|
67
|
+
# @return [Proc] transform callable
|
|
68
|
+
#
|
|
69
|
+
# @example
|
|
70
|
+
# transform = Transform.inject_user_preferences { |s| s.user_data[:prefs] }
|
|
71
|
+
def inject_user_preferences(&block)
|
|
72
|
+
->(state) do
|
|
73
|
+
preferences = block.call(state)
|
|
74
|
+
return if preferences.nil?
|
|
75
|
+
|
|
76
|
+
prefs_text = preferences.is_a?(Hash) ? format_hash(preferences) : preferences.to_s
|
|
77
|
+
state.system_prompt += "\n\n[User Preferences]\n#{prefs_text}"
|
|
78
|
+
end
|
|
79
|
+
end
|
|
80
|
+
|
|
81
|
+
# Returns a transform that appends workspace context to the system
|
|
82
|
+
# prompt. The block is called with the state and should return
|
|
83
|
+
# contextual information about the current workspace/project.
|
|
84
|
+
#
|
|
85
|
+
# @yield [state] block that extracts workspace context from the state
|
|
86
|
+
# @yieldparam state [RubyPi::Agent::State] the current agent state
|
|
87
|
+
# @yieldreturn [String, Hash, nil] workspace context to inject
|
|
88
|
+
# @return [Proc] transform callable
|
|
89
|
+
#
|
|
90
|
+
# @example
|
|
91
|
+
# transform = Transform.inject_workspace_context { |s| s.user_data[:workspace] }
|
|
92
|
+
def inject_workspace_context(&block)
|
|
93
|
+
->(state) do
|
|
94
|
+
context = block.call(state)
|
|
95
|
+
return if context.nil?
|
|
96
|
+
|
|
97
|
+
ctx_text = context.is_a?(Hash) ? format_hash(context) : context.to_s
|
|
98
|
+
state.system_prompt += "\n\n[Workspace Context]\n#{ctx_text}"
|
|
99
|
+
end
|
|
100
|
+
end
|
|
101
|
+
|
|
102
|
+
private
|
|
103
|
+
|
|
104
|
+
# Formats a hash into a human-readable key-value string for injection
|
|
105
|
+
# into the system prompt.
|
|
106
|
+
#
|
|
107
|
+
# @param hash [Hash] the data to format
|
|
108
|
+
# @return [String] formatted key-value pairs
|
|
109
|
+
def format_hash(hash)
|
|
110
|
+
hash.map { |k, v| "- #{k}: #{v}" }.join("\n")
|
|
111
|
+
end
|
|
112
|
+
end
|
|
113
|
+
end
|
|
114
|
+
end
|
|
115
|
+
end
|
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
# lib/ruby_pi/errors.rb
|
|
4
|
+
#
|
|
5
|
+
# Defines the error hierarchy for the RubyPi framework. All RubyPi exceptions
|
|
6
|
+
# inherit from RubyPi::Error, which itself inherits from StandardError.
|
|
7
|
+
# This allows callers to rescue specific error types or catch all RubyPi
|
|
8
|
+
# errors with a single rescue clause.
|
|
9
|
+
|
|
10
|
+
module RubyPi
|
|
11
|
+
# Base error class for all RubyPi exceptions. Rescue this to catch any
|
|
12
|
+
# error originating from the RubyPi framework.
|
|
13
|
+
#
|
|
14
|
+
# @example Catching all RubyPi errors
|
|
15
|
+
# begin
|
|
16
|
+
# provider.complete(messages: msgs)
|
|
17
|
+
# rescue RubyPi::Error => e
|
|
18
|
+
# logger.error("RubyPi error: #{e.message}")
|
|
19
|
+
# end
|
|
20
|
+
class Error < StandardError; end
|
|
21
|
+
|
|
22
|
+
# Raised when an API request fails due to a server-side or client-side
|
|
23
|
+
# HTTP error (e.g., 400, 500). Includes the HTTP status code and the
|
|
24
|
+
# response body for debugging.
|
|
25
|
+
class ApiError < Error
|
|
26
|
+
# @return [Integer, nil] the HTTP status code returned by the API
|
|
27
|
+
attr_reader :status_code
|
|
28
|
+
|
|
29
|
+
# @return [String, nil] the raw response body from the API
|
|
30
|
+
attr_reader :response_body
|
|
31
|
+
|
|
32
|
+
# @param message [String] human-readable error description
|
|
33
|
+
# @param status_code [Integer, nil] HTTP status code
|
|
34
|
+
# @param response_body [String, nil] raw response body
|
|
35
|
+
def initialize(message = nil, status_code: nil, response_body: nil)
|
|
36
|
+
@status_code = status_code
|
|
37
|
+
@response_body = response_body
|
|
38
|
+
super(message || "API request failed with status #{status_code}")
|
|
39
|
+
end
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
# Raised when authentication fails (HTTP 401 or 403). Typically indicates
|
|
43
|
+
# an invalid, expired, or missing API key.
|
|
44
|
+
class AuthenticationError < ApiError
|
|
45
|
+
# @param message [String] human-readable error description
|
|
46
|
+
# @param response_body [String, nil] raw response body
|
|
47
|
+
def initialize(message = nil, response_body: nil)
|
|
48
|
+
super(message || "Authentication failed — check your API key", status_code: 401, response_body: response_body)
|
|
49
|
+
end
|
|
50
|
+
end
|
|
51
|
+
|
|
52
|
+
# Raised when the API returns a rate limit response (HTTP 429). The caller
|
|
53
|
+
# should back off and retry after the indicated period.
|
|
54
|
+
class RateLimitError < ApiError
|
|
55
|
+
# @return [Float, nil] suggested retry delay in seconds, if provided by the API
|
|
56
|
+
attr_reader :retry_after
|
|
57
|
+
|
|
58
|
+
# @param message [String] human-readable error description
|
|
59
|
+
# @param retry_after [Float, nil] seconds to wait before retrying
|
|
60
|
+
# @param response_body [String, nil] raw response body
|
|
61
|
+
def initialize(message = nil, retry_after: nil, response_body: nil)
|
|
62
|
+
@retry_after = retry_after
|
|
63
|
+
super(message || "Rate limit exceeded", status_code: 429, response_body: response_body)
|
|
64
|
+
end
|
|
65
|
+
end
|
|
66
|
+
|
|
67
|
+
# Raised when an HTTP request times out before receiving a response.
|
|
68
|
+
class TimeoutError < Error
|
|
69
|
+
# @param message [String] human-readable error description
|
|
70
|
+
def initialize(message = nil)
|
|
71
|
+
super(message || "Request timed out")
|
|
72
|
+
end
|
|
73
|
+
end
|
|
74
|
+
|
|
75
|
+
# Raised when a provider-specific error occurs that does not map to one
|
|
76
|
+
# of the more specific error types. Includes the provider name for context.
|
|
77
|
+
class ProviderError < Error
|
|
78
|
+
# @return [Symbol, String] the name of the provider that raised the error
|
|
79
|
+
attr_reader :provider
|
|
80
|
+
|
|
81
|
+
# @param message [String] human-readable error description
|
|
82
|
+
# @param provider [Symbol, String] provider identifier (e.g., :gemini, :anthropic)
|
|
83
|
+
def initialize(message = nil, provider: nil)
|
|
84
|
+
@provider = provider
|
|
85
|
+
super(message || "Provider error from #{provider}")
|
|
86
|
+
end
|
|
87
|
+
end
|
|
88
|
+
|
|
89
|
+
# Raised when a subclass does not implement a required abstract method
|
|
90
|
+
# from a base class.
|
|
91
|
+
class NotImplementedError < Error
|
|
92
|
+
# @param method_name [String, Symbol] the name of the unimplemented method
|
|
93
|
+
def initialize(method_name = nil)
|
|
94
|
+
super(method_name ? "Subclass must implement ##{method_name}" : "Subclass must implement this method")
|
|
95
|
+
end
|
|
96
|
+
end
|
|
97
|
+
end
|
|
@@ -0,0 +1,96 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
# lib/ruby_pi/extensions/base.rb
|
|
4
|
+
#
|
|
5
|
+
# RubyPi::Extensions::Base — Base class for agent extensions with a hook DSL.
|
|
6
|
+
#
|
|
7
|
+
# Extensions allow external modules to tap into the agent lifecycle without
|
|
8
|
+
# modifying core agent code. Subclasses use the `on_event` class method to
|
|
9
|
+
# declare handlers for specific events. When an extension is registered with
|
|
10
|
+
# an agent via `agent.use(MyExtension)`, all declared hooks are automatically
|
|
11
|
+
# subscribed to the agent's event emitter.
|
|
12
|
+
#
|
|
13
|
+
# Hooks are inherited by subclasses, so a base extension can define common
|
|
14
|
+
# behavior that specialized extensions build upon.
|
|
15
|
+
|
|
16
|
+
module RubyPi
|
|
17
|
+
module Extensions
|
|
18
|
+
# Abstract base class for agent extensions. Subclass this and use the
|
|
19
|
+
# `on_event` DSL to register lifecycle hooks.
|
|
20
|
+
#
|
|
21
|
+
# @example Defining an extension
|
|
22
|
+
# class AuditExtension < RubyPi::Extensions::Base
|
|
23
|
+
# on_event :tool_execution_end do |data, agent|
|
|
24
|
+
# AuditLog.record(tool: data[:tool_name], success: data[:success])
|
|
25
|
+
# end
|
|
26
|
+
#
|
|
27
|
+
# on_event :agent_end do |data, agent|
|
|
28
|
+
# AuditLog.finalize(success: data[:success])
|
|
29
|
+
# end
|
|
30
|
+
#
|
|
31
|
+
# def self.name
|
|
32
|
+
# "audit"
|
|
33
|
+
# end
|
|
34
|
+
# end
|
|
35
|
+
#
|
|
36
|
+
# agent.use(AuditExtension)
|
|
37
|
+
class Base
|
|
38
|
+
class << self
|
|
39
|
+
# Registers a hook for the given event type. The block receives the
|
|
40
|
+
# event data hash and the agent instance when the event fires.
|
|
41
|
+
#
|
|
42
|
+
# @param event [Symbol] the event type to hook into (must be in Agent::EVENTS)
|
|
43
|
+
# @param block [Proc] the hook handler; receives (data, agent)
|
|
44
|
+
# @return [void]
|
|
45
|
+
# @raise [ArgumentError] if the event type is not recognized
|
|
46
|
+
def on_event(event, &block)
|
|
47
|
+
unless RubyPi::Agent::EVENTS.include?(event)
|
|
48
|
+
raise ArgumentError,
|
|
49
|
+
"Unknown event type: #{event.inspect}. " \
|
|
50
|
+
"Must be one of: #{RubyPi::Agent::EVENTS.join(', ')}"
|
|
51
|
+
end
|
|
52
|
+
|
|
53
|
+
own_hooks[event] ||= []
|
|
54
|
+
own_hooks[event] << block
|
|
55
|
+
end
|
|
56
|
+
|
|
57
|
+
# Returns all registered hooks for this extension class, including
|
|
58
|
+
# hooks inherited from parent extension classes. Each event type
|
|
59
|
+
# maps to an array of callable handlers.
|
|
60
|
+
#
|
|
61
|
+
# @return [Hash{Symbol => Array<Proc>}] hooks keyed by event type
|
|
62
|
+
def hooks
|
|
63
|
+
if superclass.respond_to?(:hooks)
|
|
64
|
+
# Merge parent hooks with own hooks, preserving order
|
|
65
|
+
merged = superclass.hooks.dup
|
|
66
|
+
own_hooks.each do |event, handlers|
|
|
67
|
+
merged[event] = (merged[event] || []) + handlers
|
|
68
|
+
end
|
|
69
|
+
merged
|
|
70
|
+
else
|
|
71
|
+
own_hooks.dup
|
|
72
|
+
end
|
|
73
|
+
end
|
|
74
|
+
|
|
75
|
+
# Returns the extension name. Override in subclasses to provide
|
|
76
|
+
# a human-readable identifier.
|
|
77
|
+
#
|
|
78
|
+
# @return [String] the extension name
|
|
79
|
+
def name
|
|
80
|
+
super
|
|
81
|
+
end
|
|
82
|
+
|
|
83
|
+
private
|
|
84
|
+
|
|
85
|
+
# Returns the hooks hash defined directly on this class (not
|
|
86
|
+
# including inherited hooks). Used internally to separate
|
|
87
|
+
# own hooks from inherited ones.
|
|
88
|
+
#
|
|
89
|
+
# @return [Hash{Symbol => Array<Proc>}]
|
|
90
|
+
def own_hooks
|
|
91
|
+
@own_hooks ||= {}
|
|
92
|
+
end
|
|
93
|
+
end
|
|
94
|
+
end
|
|
95
|
+
end
|
|
96
|
+
end
|