ruby-pi 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/CHANGELOG.md +31 -0
- data/LICENSE +21 -0
- data/README.md +415 -0
- data/lib/ruby_pi/agent/core.rb +175 -0
- data/lib/ruby_pi/agent/events.rb +120 -0
- data/lib/ruby_pi/agent/loop.rb +265 -0
- data/lib/ruby_pi/agent/result.rb +101 -0
- data/lib/ruby_pi/agent/state.rb +155 -0
- data/lib/ruby_pi/configuration.rb +80 -0
- data/lib/ruby_pi/context/compaction.rb +160 -0
- data/lib/ruby_pi/context/transform.rb +115 -0
- data/lib/ruby_pi/errors.rb +97 -0
- data/lib/ruby_pi/extensions/base.rb +96 -0
- data/lib/ruby_pi/llm/anthropic.rb +314 -0
- data/lib/ruby_pi/llm/base_provider.rb +220 -0
- data/lib/ruby_pi/llm/fallback.rb +96 -0
- data/lib/ruby_pi/llm/gemini.rb +260 -0
- data/lib/ruby_pi/llm/model.rb +82 -0
- data/lib/ruby_pi/llm/openai.rb +287 -0
- data/lib/ruby_pi/llm/response.rb +82 -0
- data/lib/ruby_pi/llm/stream_event.rb +91 -0
- data/lib/ruby_pi/llm/tool_call.rb +78 -0
- data/lib/ruby_pi/tools/definition.rb +149 -0
- data/lib/ruby_pi/tools/executor.rb +168 -0
- data/lib/ruby_pi/tools/registry.rb +120 -0
- data/lib/ruby_pi/tools/result.rb +83 -0
- data/lib/ruby_pi/tools/schema.rb +170 -0
- data/lib/ruby_pi/version.rb +11 -0
- data/lib/ruby_pi.rb +112 -0
- metadata +192 -0
|
@@ -0,0 +1,260 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
# lib/ruby_pi/llm/gemini.rb
|
|
4
|
+
#
|
|
5
|
+
# LLM provider for Google Gemini. Implements the BaseProvider interface using
|
|
6
|
+
# the Gemini REST API for both synchronous and streaming completions, including
|
|
7
|
+
# tool/function calling support.
|
|
8
|
+
|
|
9
|
+
module RubyPi
|
|
10
|
+
module LLM
|
|
11
|
+
# Google Gemini provider implementation. Communicates with the Gemini
|
|
12
|
+
# generativelanguage API to generate text completions, handle tool calls,
|
|
13
|
+
# and stream responses.
|
|
14
|
+
#
|
|
15
|
+
# @example Basic usage
|
|
16
|
+
# provider = RubyPi::LLM::Gemini.new(
|
|
17
|
+
# model: "gemini-2.0-flash",
|
|
18
|
+
# api_key: ENV["GEMINI_API_KEY"]
|
|
19
|
+
# )
|
|
20
|
+
# response = provider.complete(messages: [{ role: "user", content: "Hello!" }])
|
|
21
|
+
# puts response.content
|
|
22
|
+
class Gemini < BaseProvider
|
|
23
|
+
# Base URL for the Gemini generativelanguage API.
|
|
24
|
+
BASE_URL = "https://generativelanguage.googleapis.com"
|
|
25
|
+
|
|
26
|
+
# API version prefix for endpoint paths.
|
|
27
|
+
API_VERSION = "v1beta"
|
|
28
|
+
|
|
29
|
+
# Creates a new Gemini provider instance.
|
|
30
|
+
#
|
|
31
|
+
# @param model [String] the Gemini model identifier (e.g., "gemini-2.0-flash")
|
|
32
|
+
# @param api_key [String, nil] Gemini API key (falls back to global config)
|
|
33
|
+
# @param options [Hash] additional options passed to BaseProvider
|
|
34
|
+
def initialize(model: nil, api_key: nil, **options)
|
|
35
|
+
super(**options)
|
|
36
|
+
config = RubyPi.configuration
|
|
37
|
+
@model = model || config.default_gemini_model
|
|
38
|
+
@api_key = api_key || config.gemini_api_key
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
# Returns the Gemini model identifier.
|
|
42
|
+
#
|
|
43
|
+
# @return [String]
|
|
44
|
+
def model_name
|
|
45
|
+
@model
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
# Returns :gemini as the provider identifier.
|
|
49
|
+
#
|
|
50
|
+
# @return [Symbol]
|
|
51
|
+
def provider_name
|
|
52
|
+
:gemini
|
|
53
|
+
end
|
|
54
|
+
|
|
55
|
+
private
|
|
56
|
+
|
|
57
|
+
# Performs the completion request against the Gemini API.
|
|
58
|
+
#
|
|
59
|
+
# @param messages [Array<Hash>] conversation messages
|
|
60
|
+
# @param tools [Array<Hash>] tool definitions
|
|
61
|
+
# @param stream [Boolean] whether to use streaming
|
|
62
|
+
# @yield [event] streaming events if stream is true
|
|
63
|
+
# @return [RubyPi::LLM::Response]
|
|
64
|
+
def perform_complete(messages:, tools:, stream:, &block)
|
|
65
|
+
body = build_request_body(messages, tools)
|
|
66
|
+
|
|
67
|
+
if stream && block_given?
|
|
68
|
+
perform_streaming_request(body, &block)
|
|
69
|
+
else
|
|
70
|
+
perform_standard_request(body)
|
|
71
|
+
end
|
|
72
|
+
end
|
|
73
|
+
|
|
74
|
+
# Builds the Gemini API request body from messages and tools.
|
|
75
|
+
#
|
|
76
|
+
# @param messages [Array<Hash>] conversation messages
|
|
77
|
+
# @param tools [Array<Hash>] tool definitions
|
|
78
|
+
# @return [Hash] the request body
|
|
79
|
+
def build_request_body(messages, tools)
|
|
80
|
+
body = {
|
|
81
|
+
contents: messages.map { |msg| format_message(msg) }
|
|
82
|
+
}
|
|
83
|
+
|
|
84
|
+
unless tools.empty?
|
|
85
|
+
body[:tools] = [{
|
|
86
|
+
functionDeclarations: tools.map { |t| format_tool(t) }
|
|
87
|
+
}]
|
|
88
|
+
end
|
|
89
|
+
|
|
90
|
+
body
|
|
91
|
+
end
|
|
92
|
+
|
|
93
|
+
# Converts a normalized message hash to Gemini's content format.
|
|
94
|
+
#
|
|
95
|
+
# @param message [Hash] a message with :role and :content keys
|
|
96
|
+
# @return [Hash] Gemini-formatted content object
|
|
97
|
+
def format_message(message)
|
|
98
|
+
role = message[:role]&.to_s || message["role"]&.to_s || "user"
|
|
99
|
+
content = message[:content] || message["content"] || ""
|
|
100
|
+
|
|
101
|
+
# Gemini uses "user" and "model" roles
|
|
102
|
+
gemini_role = role == "assistant" ? "model" : role
|
|
103
|
+
|
|
104
|
+
{
|
|
105
|
+
role: gemini_role,
|
|
106
|
+
parts: [{ text: content.to_s }]
|
|
107
|
+
}
|
|
108
|
+
end
|
|
109
|
+
|
|
110
|
+
# Converts a tool definition to Gemini's function declaration format.
|
|
111
|
+
# Accepts either a RubyPi::Tools::Definition or a plain Hash.
|
|
112
|
+
#
|
|
113
|
+
# @param tool [RubyPi::Tools::Definition, Hash] tool definition
|
|
114
|
+
# @return [Hash] Gemini function declaration
|
|
115
|
+
def format_tool(tool)
|
|
116
|
+
return tool.to_gemini_format if tool.respond_to?(:to_gemini_format)
|
|
117
|
+
|
|
118
|
+
declaration = {
|
|
119
|
+
name: tool[:name] || tool["name"],
|
|
120
|
+
description: tool[:description] || tool["description"] || ""
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
params = tool[:parameters] || tool["parameters"]
|
|
124
|
+
declaration[:parameters] = params if params
|
|
125
|
+
|
|
126
|
+
declaration
|
|
127
|
+
end
|
|
128
|
+
|
|
129
|
+
# Executes a standard (non-streaming) request to the Gemini API.
|
|
130
|
+
#
|
|
131
|
+
# @param body [Hash] the request body
|
|
132
|
+
# @return [RubyPi::LLM::Response]
|
|
133
|
+
def perform_standard_request(body)
|
|
134
|
+
conn = build_connection(base_url: BASE_URL)
|
|
135
|
+
url = "/#{API_VERSION}/models/#{@model}:generateContent?key=#{@api_key}"
|
|
136
|
+
|
|
137
|
+
response = conn.post(url) do |req|
|
|
138
|
+
req.headers["Content-Type"] = "application/json"
|
|
139
|
+
req.body = JSON.generate(body)
|
|
140
|
+
end
|
|
141
|
+
|
|
142
|
+
handle_error_response(response) unless response.success?
|
|
143
|
+
parse_response(JSON.parse(response.body))
|
|
144
|
+
end
|
|
145
|
+
|
|
146
|
+
# Executes a streaming request to the Gemini API, yielding events.
|
|
147
|
+
#
|
|
148
|
+
# @param body [Hash] the request body
|
|
149
|
+
# @yield [event] StreamEvent objects
|
|
150
|
+
# @return [RubyPi::LLM::Response] final aggregated response
|
|
151
|
+
def perform_streaming_request(body, &block)
|
|
152
|
+
conn = build_connection(base_url: BASE_URL)
|
|
153
|
+
url = "/#{API_VERSION}/models/#{@model}:streamGenerateContent?key=#{@api_key}&alt=sse"
|
|
154
|
+
|
|
155
|
+
accumulated_text = +""
|
|
156
|
+
accumulated_tool_calls = []
|
|
157
|
+
usage_data = {}
|
|
158
|
+
|
|
159
|
+
response = conn.post(url) do |req|
|
|
160
|
+
req.headers["Content-Type"] = "application/json"
|
|
161
|
+
req.body = JSON.generate(body)
|
|
162
|
+
end
|
|
163
|
+
|
|
164
|
+
handle_error_response(response) unless response.success?
|
|
165
|
+
|
|
166
|
+
# Parse SSE events from the response body
|
|
167
|
+
parse_sse_events(response.body) do |data|
|
|
168
|
+
candidates = data.dig("candidates") || []
|
|
169
|
+
candidate = candidates.first
|
|
170
|
+
next unless candidate
|
|
171
|
+
|
|
172
|
+
parts = candidate.dig("content", "parts") || []
|
|
173
|
+
parts.each do |part|
|
|
174
|
+
if part.key?("text")
|
|
175
|
+
text_chunk = part["text"]
|
|
176
|
+
accumulated_text << text_chunk
|
|
177
|
+
block.call(StreamEvent.new(type: :text_delta, data: text_chunk))
|
|
178
|
+
elsif part.key?("functionCall")
|
|
179
|
+
fc = part["functionCall"]
|
|
180
|
+
tool_call = ToolCall.new(
|
|
181
|
+
id: "gemini_#{accumulated_tool_calls.length}",
|
|
182
|
+
name: fc["name"],
|
|
183
|
+
arguments: fc["args"] || {}
|
|
184
|
+
)
|
|
185
|
+
accumulated_tool_calls << tool_call
|
|
186
|
+
block.call(StreamEvent.new(type: :tool_call_delta, data: tool_call.to_h))
|
|
187
|
+
end
|
|
188
|
+
end
|
|
189
|
+
|
|
190
|
+
# Capture usage metadata if present
|
|
191
|
+
if data.key?("usageMetadata")
|
|
192
|
+
meta = data["usageMetadata"]
|
|
193
|
+
usage_data = {
|
|
194
|
+
prompt_tokens: meta["promptTokenCount"],
|
|
195
|
+
completion_tokens: meta["candidatesTokenCount"],
|
|
196
|
+
total_tokens: meta["totalTokenCount"]
|
|
197
|
+
}
|
|
198
|
+
end
|
|
199
|
+
end
|
|
200
|
+
|
|
201
|
+
# Signal completion
|
|
202
|
+
block.call(StreamEvent.new(type: :done))
|
|
203
|
+
|
|
204
|
+
Response.new(
|
|
205
|
+
content: accumulated_text.empty? ? nil : accumulated_text,
|
|
206
|
+
tool_calls: accumulated_tool_calls,
|
|
207
|
+
usage: usage_data,
|
|
208
|
+
finish_reason: "stop"
|
|
209
|
+
)
|
|
210
|
+
end
|
|
211
|
+
|
|
212
|
+
# Parses a Gemini API response hash into a normalized Response object.
|
|
213
|
+
#
|
|
214
|
+
# @param data [Hash] parsed JSON response from Gemini
|
|
215
|
+
# @return [RubyPi::LLM::Response]
|
|
216
|
+
def parse_response(data)
|
|
217
|
+
candidates = data["candidates"] || []
|
|
218
|
+
candidate = candidates.first || {}
|
|
219
|
+
|
|
220
|
+
content = nil
|
|
221
|
+
tool_calls = []
|
|
222
|
+
|
|
223
|
+
parts = candidate.dig("content", "parts") || []
|
|
224
|
+
parts.each do |part|
|
|
225
|
+
if part.key?("text")
|
|
226
|
+
content = (content || +"") << part["text"]
|
|
227
|
+
elsif part.key?("functionCall")
|
|
228
|
+
fc = part["functionCall"]
|
|
229
|
+
tool_calls << ToolCall.new(
|
|
230
|
+
id: "gemini_#{tool_calls.length}",
|
|
231
|
+
name: fc["name"],
|
|
232
|
+
arguments: fc["args"] || {}
|
|
233
|
+
)
|
|
234
|
+
end
|
|
235
|
+
end
|
|
236
|
+
|
|
237
|
+
# Extract usage metadata
|
|
238
|
+
usage = {}
|
|
239
|
+
if data.key?("usageMetadata")
|
|
240
|
+
meta = data["usageMetadata"]
|
|
241
|
+
usage = {
|
|
242
|
+
prompt_tokens: meta["promptTokenCount"],
|
|
243
|
+
completion_tokens: meta["candidatesTokenCount"],
|
|
244
|
+
total_tokens: meta["totalTokenCount"]
|
|
245
|
+
}
|
|
246
|
+
end
|
|
247
|
+
|
|
248
|
+
# Map Gemini finish reason to normalized string
|
|
249
|
+
finish_reason = candidate["finishReason"]&.downcase
|
|
250
|
+
|
|
251
|
+
Response.new(
|
|
252
|
+
content: content,
|
|
253
|
+
tool_calls: tool_calls,
|
|
254
|
+
usage: usage,
|
|
255
|
+
finish_reason: finish_reason
|
|
256
|
+
)
|
|
257
|
+
end
|
|
258
|
+
end
|
|
259
|
+
end
|
|
260
|
+
end
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
# lib/ruby_pi/llm/model.rb
|
|
4
|
+
#
|
|
5
|
+
# Represents a model identifier combining a provider and model name. Used as
|
|
6
|
+
# a lightweight descriptor that can be passed around and later instantiated
|
|
7
|
+
# into a full provider instance via the factory method.
|
|
8
|
+
|
|
9
|
+
module RubyPi
|
|
10
|
+
module LLM
|
|
11
|
+
# A model descriptor that pairs a provider identifier with a specific
|
|
12
|
+
# model name. Use the factory method RubyPi::LLM.model to create provider
|
|
13
|
+
# instances directly, or instantiate a Model object for deferred construction.
|
|
14
|
+
#
|
|
15
|
+
# @example Creating a model descriptor
|
|
16
|
+
# model = RubyPi::LLM::Model.new(provider: :gemini, name: "gemini-2.0-flash")
|
|
17
|
+
# model.provider # => :gemini
|
|
18
|
+
# model.name # => "gemini-2.0-flash"
|
|
19
|
+
# provider = model.build # => RubyPi::LLM::Gemini instance
|
|
20
|
+
#
|
|
21
|
+
# @example Using the factory shortcut
|
|
22
|
+
# provider = RubyPi::LLM.model(:openai, "gpt-4o")
|
|
23
|
+
class Model
|
|
24
|
+
# @return [Symbol] the provider identifier (:gemini, :anthropic, :openai)
|
|
25
|
+
attr_reader :provider
|
|
26
|
+
|
|
27
|
+
# @return [String] the model name within the provider
|
|
28
|
+
attr_reader :name
|
|
29
|
+
|
|
30
|
+
# Creates a new Model descriptor.
|
|
31
|
+
#
|
|
32
|
+
# @param provider [Symbol, String] provider identifier
|
|
33
|
+
# @param name [String] model name
|
|
34
|
+
def initialize(provider:, name:)
|
|
35
|
+
@provider = provider.to_sym
|
|
36
|
+
@name = name.to_s
|
|
37
|
+
end
|
|
38
|
+
|
|
39
|
+
# Builds a configured provider instance from this model descriptor.
|
|
40
|
+
# Delegates to RubyPi::LLM.model for provider construction.
|
|
41
|
+
#
|
|
42
|
+
# @param options [Hash] additional options passed to the provider constructor
|
|
43
|
+
# @return [RubyPi::LLM::BaseProvider] a configured provider instance
|
|
44
|
+
def build(**options)
|
|
45
|
+
RubyPi::LLM.model(@provider, @name, **options)
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
# Returns a hash representation of the model descriptor.
|
|
49
|
+
#
|
|
50
|
+
# @return [Hash]
|
|
51
|
+
def to_h
|
|
52
|
+
{ provider: @provider, name: @name }
|
|
53
|
+
end
|
|
54
|
+
|
|
55
|
+
# Returns a human-readable string representation.
|
|
56
|
+
#
|
|
57
|
+
# @return [String]
|
|
58
|
+
def to_s
|
|
59
|
+
"#<RubyPi::LLM::Model provider=#{@provider.inspect} name=#{@name.inspect}>"
|
|
60
|
+
end
|
|
61
|
+
|
|
62
|
+
alias_method :inspect, :to_s
|
|
63
|
+
|
|
64
|
+
# Equality comparison based on provider and name.
|
|
65
|
+
#
|
|
66
|
+
# @param other [RubyPi::LLM::Model] another model descriptor
|
|
67
|
+
# @return [Boolean]
|
|
68
|
+
def ==(other)
|
|
69
|
+
other.is_a?(Model) && @provider == other.provider && @name == other.name
|
|
70
|
+
end
|
|
71
|
+
|
|
72
|
+
alias_method :eql?, :==
|
|
73
|
+
|
|
74
|
+
# Hash code for use in hash keys and sets.
|
|
75
|
+
#
|
|
76
|
+
# @return [Integer]
|
|
77
|
+
def hash
|
|
78
|
+
[@provider, @name].hash
|
|
79
|
+
end
|
|
80
|
+
end
|
|
81
|
+
end
|
|
82
|
+
end
|
|
@@ -0,0 +1,287 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
# lib/ruby_pi/llm/openai.rb
|
|
4
|
+
#
|
|
5
|
+
# LLM provider for OpenAI. Implements the BaseProvider interface using the
|
|
6
|
+
# OpenAI Chat Completions API for both synchronous and streaming completions,
|
|
7
|
+
# including function/tool calling support.
|
|
8
|
+
|
|
9
|
+
module RubyPi
|
|
10
|
+
module LLM
|
|
11
|
+
# OpenAI provider implementation. Communicates with the OpenAI Chat
|
|
12
|
+
# Completions API to generate text completions, handle tool/function calls,
|
|
13
|
+
# and stream responses via Server-Sent Events.
|
|
14
|
+
#
|
|
15
|
+
# @example Basic usage
|
|
16
|
+
# provider = RubyPi::LLM::OpenAI.new(
|
|
17
|
+
# model: "gpt-4o",
|
|
18
|
+
# api_key: ENV["OPENAI_API_KEY"]
|
|
19
|
+
# )
|
|
20
|
+
# response = provider.complete(messages: [{ role: "user", content: "Hello!" }])
|
|
21
|
+
# puts response.content
|
|
22
|
+
class OpenAI < BaseProvider
|
|
23
|
+
# Base URL for the OpenAI API.
|
|
24
|
+
BASE_URL = "https://api.openai.com"
|
|
25
|
+
|
|
26
|
+
# Creates a new OpenAI provider instance.
|
|
27
|
+
#
|
|
28
|
+
# @param model [String] the OpenAI model identifier (e.g., "gpt-4o")
|
|
29
|
+
# @param api_key [String, nil] OpenAI API key (falls back to global config)
|
|
30
|
+
# @param options [Hash] additional options passed to BaseProvider
|
|
31
|
+
def initialize(model: nil, api_key: nil, **options)
|
|
32
|
+
super(**options)
|
|
33
|
+
config = RubyPi.configuration
|
|
34
|
+
@model = model || config.default_openai_model
|
|
35
|
+
@api_key = api_key || config.openai_api_key
|
|
36
|
+
end
|
|
37
|
+
|
|
38
|
+
# Returns the OpenAI model identifier.
|
|
39
|
+
#
|
|
40
|
+
# @return [String]
|
|
41
|
+
def model_name
|
|
42
|
+
@model
|
|
43
|
+
end
|
|
44
|
+
|
|
45
|
+
# Returns :openai as the provider identifier.
|
|
46
|
+
#
|
|
47
|
+
# @return [Symbol]
|
|
48
|
+
def provider_name
|
|
49
|
+
:openai
|
|
50
|
+
end
|
|
51
|
+
|
|
52
|
+
private
|
|
53
|
+
|
|
54
|
+
# Performs the completion request against the OpenAI API.
|
|
55
|
+
#
|
|
56
|
+
# @param messages [Array<Hash>] conversation messages
|
|
57
|
+
# @param tools [Array<Hash>] tool definitions
|
|
58
|
+
# @param stream [Boolean] whether to use streaming
|
|
59
|
+
# @yield [event] streaming events if stream is true
|
|
60
|
+
# @return [RubyPi::LLM::Response]
|
|
61
|
+
def perform_complete(messages:, tools:, stream:, &block)
|
|
62
|
+
body = build_request_body(messages, tools, stream)
|
|
63
|
+
|
|
64
|
+
if stream && block_given?
|
|
65
|
+
perform_streaming_request(body, &block)
|
|
66
|
+
else
|
|
67
|
+
perform_standard_request(body)
|
|
68
|
+
end
|
|
69
|
+
end
|
|
70
|
+
|
|
71
|
+
# Builds the OpenAI Chat Completions request body.
|
|
72
|
+
#
|
|
73
|
+
# @param messages [Array<Hash>] conversation messages
|
|
74
|
+
# @param tools [Array<Hash>] tool definitions
|
|
75
|
+
# @param stream [Boolean] whether streaming is enabled
|
|
76
|
+
# @return [Hash] the request body
|
|
77
|
+
def build_request_body(messages, tools, stream)
|
|
78
|
+
body = {
|
|
79
|
+
model: @model,
|
|
80
|
+
messages: messages.map { |msg| format_message(msg) }
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
body[:stream] = true if stream
|
|
84
|
+
|
|
85
|
+
unless tools.empty?
|
|
86
|
+
body[:tools] = tools.map { |t| format_tool(t) }
|
|
87
|
+
end
|
|
88
|
+
|
|
89
|
+
body
|
|
90
|
+
end
|
|
91
|
+
|
|
92
|
+
# Converts a normalized message hash to OpenAI's message format.
|
|
93
|
+
#
|
|
94
|
+
# @param message [Hash] a message with :role and :content keys
|
|
95
|
+
# @return [Hash] OpenAI-formatted message
|
|
96
|
+
def format_message(message)
|
|
97
|
+
{
|
|
98
|
+
role: (message[:role] || message["role"]).to_s,
|
|
99
|
+
content: (message[:content] || message["content"]).to_s
|
|
100
|
+
}
|
|
101
|
+
end
|
|
102
|
+
|
|
103
|
+
# Converts a tool definition to OpenAI's function tool format.
|
|
104
|
+
# Accepts either a RubyPi::Tools::Definition or a plain Hash.
|
|
105
|
+
#
|
|
106
|
+
# @param tool [RubyPi::Tools::Definition, Hash] tool definition
|
|
107
|
+
# @return [Hash] OpenAI tool definition
|
|
108
|
+
def format_tool(tool)
|
|
109
|
+
return tool.to_openai_format if tool.respond_to?(:to_openai_format)
|
|
110
|
+
|
|
111
|
+
{
|
|
112
|
+
type: "function",
|
|
113
|
+
function: {
|
|
114
|
+
name: tool[:name] || tool["name"],
|
|
115
|
+
description: tool[:description] || tool["description"] || "",
|
|
116
|
+
parameters: tool[:parameters] || tool["parameters"] || { type: "object", properties: {} }
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
end
|
|
120
|
+
|
|
121
|
+
# Executes a standard (non-streaming) request to the OpenAI API.
|
|
122
|
+
#
|
|
123
|
+
# @param body [Hash] the request body
|
|
124
|
+
# @return [RubyPi::LLM::Response]
|
|
125
|
+
def perform_standard_request(body)
|
|
126
|
+
conn = build_connection(
|
|
127
|
+
base_url: BASE_URL,
|
|
128
|
+
headers: default_headers
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
response = conn.post("/v1/chat/completions") do |req|
|
|
132
|
+
req.headers["Content-Type"] = "application/json"
|
|
133
|
+
req.body = JSON.generate(body)
|
|
134
|
+
end
|
|
135
|
+
|
|
136
|
+
handle_error_response(response) unless response.success?
|
|
137
|
+
parse_response(JSON.parse(response.body))
|
|
138
|
+
end
|
|
139
|
+
|
|
140
|
+
# Executes a streaming request to the OpenAI API, yielding events.
|
|
141
|
+
#
|
|
142
|
+
# @param body [Hash] the request body
|
|
143
|
+
# @yield [event] StreamEvent objects
|
|
144
|
+
# @return [RubyPi::LLM::Response] final aggregated response
|
|
145
|
+
def perform_streaming_request(body, &block)
|
|
146
|
+
conn = build_connection(
|
|
147
|
+
base_url: BASE_URL,
|
|
148
|
+
headers: default_headers
|
|
149
|
+
)
|
|
150
|
+
|
|
151
|
+
accumulated_text = +""
|
|
152
|
+
tool_call_accumulators = {}
|
|
153
|
+
finish_reason = nil
|
|
154
|
+
|
|
155
|
+
response = conn.post("/v1/chat/completions") do |req|
|
|
156
|
+
req.headers["Content-Type"] = "application/json"
|
|
157
|
+
req.body = JSON.generate(body)
|
|
158
|
+
end
|
|
159
|
+
|
|
160
|
+
handle_error_response(response) unless response.success?
|
|
161
|
+
|
|
162
|
+
# Parse SSE events from the response body
|
|
163
|
+
parse_sse_events(response.body) do |data|
|
|
164
|
+
choices = data["choices"] || []
|
|
165
|
+
choice = choices.first
|
|
166
|
+
next unless choice
|
|
167
|
+
|
|
168
|
+
delta = choice["delta"] || {}
|
|
169
|
+
finish_reason = choice["finish_reason"] if choice["finish_reason"]
|
|
170
|
+
|
|
171
|
+
# Handle text content deltas
|
|
172
|
+
if delta.key?("content") && delta["content"]
|
|
173
|
+
text = delta["content"]
|
|
174
|
+
accumulated_text << text
|
|
175
|
+
block.call(StreamEvent.new(type: :text_delta, data: text))
|
|
176
|
+
end
|
|
177
|
+
|
|
178
|
+
# Handle tool call deltas
|
|
179
|
+
if delta.key?("tool_calls")
|
|
180
|
+
delta["tool_calls"].each do |tc_delta|
|
|
181
|
+
index = tc_delta["index"] || 0
|
|
182
|
+
|
|
183
|
+
# Initialize accumulator for this tool call
|
|
184
|
+
tool_call_accumulators[index] ||= { id: nil, name: +"", arguments: +"" }
|
|
185
|
+
acc = tool_call_accumulators[index]
|
|
186
|
+
|
|
187
|
+
acc[:id] = tc_delta["id"] if tc_delta["id"]
|
|
188
|
+
|
|
189
|
+
if tc_delta.dig("function", "name")
|
|
190
|
+
acc[:name] << tc_delta["function"]["name"]
|
|
191
|
+
end
|
|
192
|
+
|
|
193
|
+
if tc_delta.dig("function", "arguments")
|
|
194
|
+
acc[:arguments] << tc_delta["function"]["arguments"]
|
|
195
|
+
end
|
|
196
|
+
|
|
197
|
+
block.call(StreamEvent.new(type: :tool_call_delta, data: {
|
|
198
|
+
index: index,
|
|
199
|
+
id: acc[:id],
|
|
200
|
+
name: acc[:name],
|
|
201
|
+
arguments_fragment: tc_delta.dig("function", "arguments") || ""
|
|
202
|
+
}))
|
|
203
|
+
end
|
|
204
|
+
end
|
|
205
|
+
end
|
|
206
|
+
|
|
207
|
+
# Build final tool calls from accumulators
|
|
208
|
+
tool_calls = tool_call_accumulators.sort_by { |k, _| k }.map do |_, acc|
|
|
209
|
+
arguments = acc[:arguments].empty? ? {} : JSON.parse(acc[:arguments])
|
|
210
|
+
ToolCall.new(id: acc[:id], name: acc[:name], arguments: arguments)
|
|
211
|
+
end
|
|
212
|
+
|
|
213
|
+
# Signal completion
|
|
214
|
+
block.call(StreamEvent.new(type: :done))
|
|
215
|
+
|
|
216
|
+
Response.new(
|
|
217
|
+
content: accumulated_text.empty? ? nil : accumulated_text,
|
|
218
|
+
tool_calls: tool_calls,
|
|
219
|
+
usage: {},
|
|
220
|
+
finish_reason: normalize_finish_reason(finish_reason)
|
|
221
|
+
)
|
|
222
|
+
end
|
|
223
|
+
|
|
224
|
+
# Returns the default HTTP headers required by the OpenAI API.
|
|
225
|
+
#
|
|
226
|
+
# @return [Hash] headers hash
|
|
227
|
+
def default_headers
|
|
228
|
+
{
|
|
229
|
+
"Authorization" => "Bearer #{@api_key}"
|
|
230
|
+
}
|
|
231
|
+
end
|
|
232
|
+
|
|
233
|
+
# Parses an OpenAI Chat Completions response into a normalized Response.
|
|
234
|
+
#
|
|
235
|
+
# @param data [Hash] parsed JSON response from OpenAI
|
|
236
|
+
# @return [RubyPi::LLM::Response]
|
|
237
|
+
def parse_response(data)
|
|
238
|
+
choice = (data["choices"] || []).first || {}
|
|
239
|
+
message = choice["message"] || {}
|
|
240
|
+
|
|
241
|
+
content = message["content"]
|
|
242
|
+
tool_calls = []
|
|
243
|
+
|
|
244
|
+
(message["tool_calls"] || []).each do |tc|
|
|
245
|
+
func = tc["function"] || {}
|
|
246
|
+
arguments = func["arguments"] ? JSON.parse(func["arguments"]) : {}
|
|
247
|
+
tool_calls << ToolCall.new(
|
|
248
|
+
id: tc["id"],
|
|
249
|
+
name: func["name"],
|
|
250
|
+
arguments: arguments
|
|
251
|
+
)
|
|
252
|
+
end
|
|
253
|
+
|
|
254
|
+
# Extract usage
|
|
255
|
+
usage = {}
|
|
256
|
+
if data.key?("usage")
|
|
257
|
+
usage_info = data["usage"]
|
|
258
|
+
usage = {
|
|
259
|
+
prompt_tokens: usage_info["prompt_tokens"],
|
|
260
|
+
completion_tokens: usage_info["completion_tokens"],
|
|
261
|
+
total_tokens: usage_info["total_tokens"]
|
|
262
|
+
}
|
|
263
|
+
end
|
|
264
|
+
|
|
265
|
+
Response.new(
|
|
266
|
+
content: content,
|
|
267
|
+
tool_calls: tool_calls,
|
|
268
|
+
usage: usage,
|
|
269
|
+
finish_reason: normalize_finish_reason(choice["finish_reason"])
|
|
270
|
+
)
|
|
271
|
+
end
|
|
272
|
+
|
|
273
|
+
# Normalizes OpenAI-specific finish reasons to common values.
|
|
274
|
+
#
|
|
275
|
+
# @param reason [String, nil] OpenAI finish reason
|
|
276
|
+
# @return [String, nil] normalized finish reason
|
|
277
|
+
def normalize_finish_reason(reason)
|
|
278
|
+
case reason
|
|
279
|
+
when "stop" then "stop"
|
|
280
|
+
when "tool_calls" then "tool_calls"
|
|
281
|
+
when "length" then "max_tokens"
|
|
282
|
+
else reason
|
|
283
|
+
end
|
|
284
|
+
end
|
|
285
|
+
end
|
|
286
|
+
end
|
|
287
|
+
end
|