mcp_on_ruby 0.3.0 → 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/CHANGELOG.md +56 -28
- data/CODE_OF_CONDUCT.md +30 -58
- data/CONTRIBUTING.md +61 -67
- data/LICENSE.txt +2 -2
- data/README.md +159 -509
- data/bin/console +11 -0
- data/bin/setup +6 -0
- data/docs/advanced-usage.md +132 -0
- data/docs/api-reference.md +35 -0
- data/docs/testing.md +55 -0
- data/examples/claude/README.md +171 -0
- data/examples/claude/claude-bridge.js +122 -0
- data/lib/mcp_on_ruby/configuration.rb +74 -0
- data/lib/mcp_on_ruby/errors.rb +137 -0
- data/lib/mcp_on_ruby/generators/install_generator.rb +46 -0
- data/lib/mcp_on_ruby/generators/resource_generator.rb +63 -0
- data/lib/mcp_on_ruby/generators/templates/README +31 -0
- data/lib/mcp_on_ruby/generators/templates/application_resource.rb +20 -0
- data/lib/mcp_on_ruby/generators/templates/application_tool.rb +18 -0
- data/lib/mcp_on_ruby/generators/templates/initializer.rb +41 -0
- data/lib/mcp_on_ruby/generators/templates/resource.rb +50 -0
- data/lib/mcp_on_ruby/generators/templates/resource_spec.rb +67 -0
- data/lib/mcp_on_ruby/generators/templates/sample_resource.rb +57 -0
- data/lib/mcp_on_ruby/generators/templates/sample_tool.rb +59 -0
- data/lib/mcp_on_ruby/generators/templates/tool.rb +38 -0
- data/lib/mcp_on_ruby/generators/templates/tool_spec.rb +55 -0
- data/lib/mcp_on_ruby/generators/tool_generator.rb +51 -0
- data/lib/mcp_on_ruby/railtie.rb +108 -0
- data/lib/mcp_on_ruby/resource.rb +161 -0
- data/lib/mcp_on_ruby/server.rb +378 -0
- data/lib/mcp_on_ruby/tool.rb +134 -0
- data/lib/mcp_on_ruby/transport.rb +330 -0
- data/lib/mcp_on_ruby/version.rb +6 -0
- data/lib/mcp_on_ruby.rb +142 -0
- metadata +62 -173
- data/lib/ruby_mcp/client.rb +0 -43
- data/lib/ruby_mcp/configuration.rb +0 -90
- data/lib/ruby_mcp/errors.rb +0 -17
- data/lib/ruby_mcp/models/context.rb +0 -52
- data/lib/ruby_mcp/models/engine.rb +0 -31
- data/lib/ruby_mcp/models/message.rb +0 -60
- data/lib/ruby_mcp/providers/anthropic.rb +0 -269
- data/lib/ruby_mcp/providers/base.rb +0 -57
- data/lib/ruby_mcp/providers/openai.rb +0 -265
- data/lib/ruby_mcp/schemas.rb +0 -56
- data/lib/ruby_mcp/server/app.rb +0 -84
- data/lib/ruby_mcp/server/base_controller.rb +0 -49
- data/lib/ruby_mcp/server/content_controller.rb +0 -68
- data/lib/ruby_mcp/server/contexts_controller.rb +0 -67
- data/lib/ruby_mcp/server/controller.rb +0 -29
- data/lib/ruby_mcp/server/engines_controller.rb +0 -34
- data/lib/ruby_mcp/server/generate_controller.rb +0 -140
- data/lib/ruby_mcp/server/messages_controller.rb +0 -30
- data/lib/ruby_mcp/server/router.rb +0 -84
- data/lib/ruby_mcp/storage/active_record.rb +0 -414
- data/lib/ruby_mcp/storage/base.rb +0 -43
- data/lib/ruby_mcp/storage/error.rb +0 -8
- data/lib/ruby_mcp/storage/memory.rb +0 -69
- data/lib/ruby_mcp/storage/redis.rb +0 -197
- data/lib/ruby_mcp/storage_factory.rb +0 -43
- data/lib/ruby_mcp/validator.rb +0 -45
- data/lib/ruby_mcp/version.rb +0 -6
- data/lib/ruby_mcp.rb +0 -71
@@ -1,269 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module RubyMCP
|
4
|
-
module Providers
|
5
|
-
class Anthropic < Base
|
6
|
-
MODELS = [
|
7
|
-
{
|
8
|
-
id: 'claude-3-opus-20240229',
|
9
|
-
capabilities: %w[text-generation streaming tool-calls]
|
10
|
-
},
|
11
|
-
{
|
12
|
-
id: 'claude-3-sonnet-20240229',
|
13
|
-
capabilities: %w[text-generation streaming tool-calls]
|
14
|
-
},
|
15
|
-
{
|
16
|
-
id: 'claude-3-haiku-20240307',
|
17
|
-
capabilities: %w[text-generation streaming tool-calls]
|
18
|
-
},
|
19
|
-
{
|
20
|
-
id: 'claude-2.1',
|
21
|
-
capabilities: %w[text-generation streaming]
|
22
|
-
},
|
23
|
-
{
|
24
|
-
id: 'claude-2.0',
|
25
|
-
capabilities: %w[text-generation streaming]
|
26
|
-
},
|
27
|
-
{
|
28
|
-
id: 'claude-instant-1.2',
|
29
|
-
capabilities: %w[text-generation streaming]
|
30
|
-
}
|
31
|
-
].freeze
|
32
|
-
|
33
|
-
def list_engines
|
34
|
-
# Anthropic doesn't have an endpoint to list models, so we use a static list
|
35
|
-
MODELS.map do |model_info|
|
36
|
-
RubyMCP::Models::Engine.new(
|
37
|
-
id: "anthropic/#{model_info[:id]}",
|
38
|
-
provider: 'anthropic',
|
39
|
-
model: model_info[:id],
|
40
|
-
capabilities: model_info[:capabilities]
|
41
|
-
)
|
42
|
-
end
|
43
|
-
end
|
44
|
-
|
45
|
-
def generate(context, options = {})
|
46
|
-
messages = format_messages(context)
|
47
|
-
|
48
|
-
payload = {
|
49
|
-
model: options[:model],
|
50
|
-
messages: messages,
|
51
|
-
max_tokens: options[:max_tokens] || 4096,
|
52
|
-
temperature: options[:temperature],
|
53
|
-
top_p: options[:top_p],
|
54
|
-
stop_sequences: options[:stop]
|
55
|
-
}.compact
|
56
|
-
|
57
|
-
if options[:tools]
|
58
|
-
payload[:tools] = options[:tools]
|
59
|
-
payload[:tool_choice] = options[:tool_choice] || 'auto'
|
60
|
-
end
|
61
|
-
|
62
|
-
headers = {
|
63
|
-
'Anthropic-Version' => '2023-06-01',
|
64
|
-
'Content-Type' => 'application/json'
|
65
|
-
}
|
66
|
-
|
67
|
-
response = create_client.post('messages') do |req|
|
68
|
-
req.headers.merge!(headers)
|
69
|
-
req.body = payload.to_json
|
70
|
-
end
|
71
|
-
|
72
|
-
unless response.success?
|
73
|
-
raise RubyMCP::Errors::ProviderError,
|
74
|
-
"Anthropic generation failed: #{response.body['error']&.dig('message') || response.status}"
|
75
|
-
end
|
76
|
-
|
77
|
-
content = response.body['content']&.first&.dig('text')
|
78
|
-
tool_calls = nil
|
79
|
-
|
80
|
-
# Handle tool calls
|
81
|
-
if response.body['tool_calls']
|
82
|
-
tool_calls = response.body['tool_calls'].map do |tc|
|
83
|
-
{
|
84
|
-
id: tc['id'],
|
85
|
-
type: 'function',
|
86
|
-
function: {
|
87
|
-
name: tc['name'],
|
88
|
-
arguments: tc['input']
|
89
|
-
}
|
90
|
-
}
|
91
|
-
end
|
92
|
-
end
|
93
|
-
|
94
|
-
result = {
|
95
|
-
provider: 'anthropic',
|
96
|
-
model: options[:model],
|
97
|
-
created_at: Time.now.utc.iso8601
|
98
|
-
}
|
99
|
-
|
100
|
-
if tool_calls
|
101
|
-
result[:tool_calls] = tool_calls
|
102
|
-
else
|
103
|
-
result[:content] = content
|
104
|
-
end
|
105
|
-
|
106
|
-
result
|
107
|
-
end
|
108
|
-
|
109
|
-
def generate_stream(context, options = {})
|
110
|
-
messages = format_messages(context)
|
111
|
-
|
112
|
-
payload = {
|
113
|
-
model: options[:model],
|
114
|
-
messages: messages,
|
115
|
-
max_tokens: options[:max_tokens] || 4096,
|
116
|
-
temperature: options[:temperature],
|
117
|
-
top_p: options[:top_p],
|
118
|
-
stop_sequences: options[:stop],
|
119
|
-
stream: true
|
120
|
-
}.compact
|
121
|
-
|
122
|
-
if options[:tools]
|
123
|
-
payload[:tools] = options[:tools]
|
124
|
-
payload[:tool_choice] = options[:tool_choice] || 'auto'
|
125
|
-
end
|
126
|
-
|
127
|
-
headers = {
|
128
|
-
'Anthropic-Version' => '2023-06-01',
|
129
|
-
'Content-Type' => 'application/json'
|
130
|
-
}
|
131
|
-
|
132
|
-
conn = create_client
|
133
|
-
|
134
|
-
# Update the client to handle streaming
|
135
|
-
conn.options.timeout = 120 # Longer timeout for streaming
|
136
|
-
|
137
|
-
generation_id = SecureRandom.uuid
|
138
|
-
content_buffer = ''
|
139
|
-
current_tool_calls = []
|
140
|
-
|
141
|
-
# Initial event
|
142
|
-
yield({
|
143
|
-
id: generation_id,
|
144
|
-
event: 'generation.start',
|
145
|
-
created_at: Time.now.utc.iso8601
|
146
|
-
})
|
147
|
-
|
148
|
-
begin
|
149
|
-
conn.post('messages') do |req|
|
150
|
-
req.headers.merge!(headers)
|
151
|
-
req.body = payload.to_json
|
152
|
-
req.options.on_data = proc do |chunk, _size, _total|
|
153
|
-
next if chunk.strip.empty?
|
154
|
-
|
155
|
-
# Process each SSE event
|
156
|
-
chunk.split('data: ').each do |data|
|
157
|
-
next if data.strip.empty?
|
158
|
-
|
159
|
-
begin
|
160
|
-
json = JSON.parse(data.strip)
|
161
|
-
|
162
|
-
case json['type']
|
163
|
-
when 'content_block_delta'
|
164
|
-
delta = json['delta']['text']
|
165
|
-
content_buffer += delta
|
166
|
-
|
167
|
-
# Send content update
|
168
|
-
yield({
|
169
|
-
id: generation_id,
|
170
|
-
event: 'generation.content',
|
171
|
-
created_at: Time.now.utc.iso8601,
|
172
|
-
content: delta
|
173
|
-
})
|
174
|
-
when 'tool_call'
|
175
|
-
tool_call = {
|
176
|
-
'id' => json['id'],
|
177
|
-
'type' => 'function',
|
178
|
-
'function' => {
|
179
|
-
'name' => json['name'],
|
180
|
-
'arguments' => json['input']
|
181
|
-
}
|
182
|
-
}
|
183
|
-
|
184
|
-
current_tool_calls << tool_call
|
185
|
-
|
186
|
-
# Send tool call update
|
187
|
-
yield({
|
188
|
-
id: generation_id,
|
189
|
-
event: 'generation.tool_call',
|
190
|
-
created_at: Time.now.utc.iso8601,
|
191
|
-
tool_calls: current_tool_calls
|
192
|
-
})
|
193
|
-
when 'message_stop'
|
194
|
-
# Handled by the final event after the streaming is done
|
195
|
-
end
|
196
|
-
rescue JSON::ParserError => e
|
197
|
-
# Skip invalid JSON
|
198
|
-
RubyMCP.logger.warn "Invalid JSON in Anthropic stream: #{e.message}"
|
199
|
-
end
|
200
|
-
end
|
201
|
-
end
|
202
|
-
end
|
203
|
-
rescue Faraday::Error => e
|
204
|
-
raise RubyMCP::Errors::ProviderError, "Anthropic streaming failed: #{e.message}"
|
205
|
-
end
|
206
|
-
|
207
|
-
# Final event
|
208
|
-
if current_tool_calls.any?
|
209
|
-
# Final tool calls event
|
210
|
-
yield({
|
211
|
-
id: generation_id,
|
212
|
-
event: 'generation.complete',
|
213
|
-
created_at: Time.now.utc.iso8601,
|
214
|
-
tool_calls: current_tool_calls
|
215
|
-
})
|
216
|
-
else
|
217
|
-
# Final content event
|
218
|
-
yield({
|
219
|
-
id: generation_id,
|
220
|
-
event: 'generation.complete',
|
221
|
-
created_at: Time.now.utc.iso8601,
|
222
|
-
content: content_buffer
|
223
|
-
})
|
224
|
-
end
|
225
|
-
end
|
226
|
-
|
227
|
-
def abort_generation(_generation_id)
|
228
|
-
# Anthropic doesn't support aborting generations yet
|
229
|
-
raise RubyMCP::Errors::ProviderError, "Anthropic doesn't support aborting generations"
|
230
|
-
end
|
231
|
-
|
232
|
-
protected
|
233
|
-
|
234
|
-
def default_api_base
|
235
|
-
'https://api.anthropic.com/v1'
|
236
|
-
end
|
237
|
-
|
238
|
-
private
|
239
|
-
|
240
|
-
def format_messages(context)
|
241
|
-
context.messages.map do |msg|
|
242
|
-
# Convert to Anthropic's message format
|
243
|
-
if msg.content_type == 'array'
|
244
|
-
# Handle structured content
|
245
|
-
content_parts = []
|
246
|
-
|
247
|
-
msg.content.each do |part|
|
248
|
-
if part.is_a?(String)
|
249
|
-
content_parts << { 'type' => 'text', 'text' => part }
|
250
|
-
elsif part.is_a?(Hash)
|
251
|
-
if part[:type] == 'text'
|
252
|
-
content_parts << { 'type' => 'text', 'text' => part[:text] }
|
253
|
-
elsif part[:type] == 'content_pointer'
|
254
|
-
# We don't have file IDs for Anthropic here
|
255
|
-
content_parts << { 'type' => 'text', 'text' => "[Content reference: #{part[:content_id]}]" }
|
256
|
-
end
|
257
|
-
end
|
258
|
-
end
|
259
|
-
|
260
|
-
{ 'role' => msg.role, 'content' => content_parts }
|
261
|
-
else
|
262
|
-
# Simple text content
|
263
|
-
{ 'role' => msg.role, 'content' => msg.content }
|
264
|
-
end
|
265
|
-
end
|
266
|
-
end
|
267
|
-
end
|
268
|
-
end
|
269
|
-
end
|
@@ -1,57 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module RubyMCP
|
4
|
-
module Providers
|
5
|
-
class Base
|
6
|
-
attr_reader :config
|
7
|
-
|
8
|
-
def initialize(config = {})
|
9
|
-
@config = config
|
10
|
-
end
|
11
|
-
|
12
|
-
def list_engines
|
13
|
-
raise NotImplementedError, "#{self.class.name} must implement #list_engines"
|
14
|
-
end
|
15
|
-
|
16
|
-
def generate(context, options = {})
|
17
|
-
raise NotImplementedError, "#{self.class.name} must implement #generate"
|
18
|
-
end
|
19
|
-
|
20
|
-
def generate_stream(context, options = {}, &block)
|
21
|
-
raise NotImplementedError, "#{self.class.name} must implement #generate_stream"
|
22
|
-
end
|
23
|
-
|
24
|
-
def abort_generation(generation_id)
|
25
|
-
raise NotImplementedError, "#{self.class.name} must implement #abort_generation"
|
26
|
-
end
|
27
|
-
|
28
|
-
protected
|
29
|
-
|
30
|
-
def api_key
|
31
|
-
@config[:api_key] || ENV["#{provider_name.upcase}_API_KEY"]
|
32
|
-
end
|
33
|
-
|
34
|
-
def api_base
|
35
|
-
@config[:api_base] || default_api_base
|
36
|
-
end
|
37
|
-
|
38
|
-
def provider_name
|
39
|
-
self.class.name.split('::').last.downcase
|
40
|
-
end
|
41
|
-
|
42
|
-
def default_api_base
|
43
|
-
raise NotImplementedError, "#{self.class.name} must implement #default_api_base"
|
44
|
-
end
|
45
|
-
|
46
|
-
def create_client
|
47
|
-
Faraday.new(url: api_base) do |conn|
|
48
|
-
conn.request :json
|
49
|
-
conn.response :json
|
50
|
-
conn.adapter :net_http
|
51
|
-
conn.headers['Authorization'] = "Bearer #{api_key}"
|
52
|
-
conn.headers['Content-Type'] = 'application/json'
|
53
|
-
end
|
54
|
-
end
|
55
|
-
end
|
56
|
-
end
|
57
|
-
end
|
@@ -1,265 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
module RubyMCP
|
4
|
-
module Providers
|
5
|
-
class Openai < Base
|
6
|
-
def list_engines
|
7
|
-
response = create_client.get('models')
|
8
|
-
|
9
|
-
unless response.success?
|
10
|
-
raise RubyMCP::Errors::ProviderError,
|
11
|
-
"Failed to list OpenAI models: #{response.body['error']&.dig('message') || response.status}"
|
12
|
-
end
|
13
|
-
|
14
|
-
models = response.body['data']
|
15
|
-
|
16
|
-
models.map do |model_data|
|
17
|
-
next unless model_data['id'].start_with?('gpt')
|
18
|
-
|
19
|
-
capabilities = ['text-generation']
|
20
|
-
capabilities << 'streaming' if model_data['id'].start_with?('gpt-3.5', 'gpt-4')
|
21
|
-
capabilities << 'tool-calls' if model_data['id'].start_with?('gpt-3.5', 'gpt-4')
|
22
|
-
|
23
|
-
RubyMCP::Models::Engine.new(
|
24
|
-
id: "openai/#{model_data['id']}",
|
25
|
-
provider: 'openai',
|
26
|
-
model: model_data['id'],
|
27
|
-
capabilities: capabilities
|
28
|
-
)
|
29
|
-
end.compact
|
30
|
-
end
|
31
|
-
|
32
|
-
def generate(context, options = {})
|
33
|
-
messages = format_messages(context)
|
34
|
-
|
35
|
-
payload = {
|
36
|
-
model: options[:model],
|
37
|
-
messages: messages,
|
38
|
-
max_tokens: options[:max_tokens],
|
39
|
-
temperature: options[:temperature],
|
40
|
-
top_p: options[:top_p],
|
41
|
-
frequency_penalty: options[:frequency_penalty],
|
42
|
-
presence_penalty: options[:presence_penalty],
|
43
|
-
stop: options[:stop]
|
44
|
-
}.compact
|
45
|
-
|
46
|
-
if options[:tools]
|
47
|
-
payload[:tools] = options[:tools]
|
48
|
-
payload[:tool_choice] = options[:tool_choice] || 'auto'
|
49
|
-
end
|
50
|
-
|
51
|
-
response = create_client.post('chat/completions', payload)
|
52
|
-
|
53
|
-
unless response.success?
|
54
|
-
raise RubyMCP::Errors::ProviderError,
|
55
|
-
"OpenAI generation failed: #{response.body['error']&.dig('message') || response.status}"
|
56
|
-
end
|
57
|
-
|
58
|
-
choice = response.body['choices']&.first
|
59
|
-
content = choice&.dig('message', 'content')
|
60
|
-
|
61
|
-
# Handle tool calls
|
62
|
-
tool_calls = nil
|
63
|
-
if choice&.dig('message', 'tool_calls')
|
64
|
-
tool_calls = choice['message']['tool_calls'].map do |tc|
|
65
|
-
{
|
66
|
-
id: tc['id'],
|
67
|
-
type: 'function',
|
68
|
-
function: {
|
69
|
-
name: tc['function']['name'],
|
70
|
-
arguments: tc['function']['arguments']
|
71
|
-
}
|
72
|
-
}
|
73
|
-
end
|
74
|
-
end
|
75
|
-
|
76
|
-
result = {
|
77
|
-
provider: 'openai',
|
78
|
-
model: options[:model],
|
79
|
-
created_at: Time.now.utc.iso8601
|
80
|
-
}
|
81
|
-
|
82
|
-
if tool_calls
|
83
|
-
result[:tool_calls] = tool_calls
|
84
|
-
else
|
85
|
-
result[:content] = content
|
86
|
-
end
|
87
|
-
|
88
|
-
result
|
89
|
-
end
|
90
|
-
|
91
|
-
def generate_stream(context, options = {})
|
92
|
-
messages = format_messages(context)
|
93
|
-
|
94
|
-
payload = {
|
95
|
-
model: options[:model],
|
96
|
-
messages: messages,
|
97
|
-
max_tokens: options[:max_tokens],
|
98
|
-
temperature: options[:temperature],
|
99
|
-
top_p: options[:top_p],
|
100
|
-
frequency_penalty: options[:frequency_penalty],
|
101
|
-
presence_penalty: options[:presence_penalty],
|
102
|
-
stop: options[:stop],
|
103
|
-
stream: true
|
104
|
-
}.compact
|
105
|
-
|
106
|
-
if options[:tools]
|
107
|
-
payload[:tools] = options[:tools]
|
108
|
-
payload[:tool_choice] = options[:tool_choice] || 'auto'
|
109
|
-
end
|
110
|
-
|
111
|
-
conn = create_client
|
112
|
-
|
113
|
-
# Update the client to handle streaming
|
114
|
-
conn.options.timeout = 120 # Longer timeout for streaming
|
115
|
-
|
116
|
-
generation_id = SecureRandom.uuid
|
117
|
-
content_buffer = ''
|
118
|
-
current_tool_calls = []
|
119
|
-
|
120
|
-
# Initial event
|
121
|
-
yield({
|
122
|
-
id: generation_id,
|
123
|
-
event: 'generation.start',
|
124
|
-
created_at: Time.now.utc.iso8601
|
125
|
-
})
|
126
|
-
|
127
|
-
begin
|
128
|
-
conn.post('chat/completions') do |req|
|
129
|
-
req.body = payload.to_json
|
130
|
-
req.options.on_data = proc do |chunk, _size, _total|
|
131
|
-
next if chunk.strip.empty?
|
132
|
-
|
133
|
-
# Process each SSE event
|
134
|
-
chunk.split('data: ').each do |data|
|
135
|
-
next if data.strip.empty?
|
136
|
-
|
137
|
-
# Skip "[DONE]" marker
|
138
|
-
next if data.strip == '[DONE]'
|
139
|
-
|
140
|
-
begin
|
141
|
-
json = JSON.parse(data.strip)
|
142
|
-
delta = json.dig('choices', 0, 'delta')
|
143
|
-
|
144
|
-
if delta&.key?('content') && delta['content']
|
145
|
-
content_buffer += delta['content']
|
146
|
-
|
147
|
-
# Send content update
|
148
|
-
yield({
|
149
|
-
id: generation_id,
|
150
|
-
event: 'generation.content',
|
151
|
-
created_at: Time.now.utc.iso8601,
|
152
|
-
content: delta['content']
|
153
|
-
})
|
154
|
-
end
|
155
|
-
|
156
|
-
# Handle tool call updates
|
157
|
-
if delta&.key?('tool_calls')
|
158
|
-
delta['tool_calls'].each do |tc|
|
159
|
-
tc_id = tc['index']
|
160
|
-
|
161
|
-
# Initialize tool call if it's new
|
162
|
-
current_tool_calls[tc_id] ||= {
|
163
|
-
'id' => SecureRandom.uuid,
|
164
|
-
'type' => 'function',
|
165
|
-
'function' => {
|
166
|
-
'name' => '',
|
167
|
-
'arguments' => ''
|
168
|
-
}
|
169
|
-
}
|
170
|
-
|
171
|
-
# Update function name
|
172
|
-
if tc.dig('function', 'name')
|
173
|
-
current_tool_calls[tc_id]['function']['name'] += tc['function']['name']
|
174
|
-
end
|
175
|
-
|
176
|
-
# Update arguments
|
177
|
-
if tc.dig('function', 'arguments')
|
178
|
-
current_tool_calls[tc_id]['function']['arguments'] += tc['function']['arguments']
|
179
|
-
end
|
180
|
-
|
181
|
-
# Send tool call update
|
182
|
-
yield({
|
183
|
-
id: generation_id,
|
184
|
-
event: 'generation.tool_call',
|
185
|
-
created_at: Time.now.utc.iso8601,
|
186
|
-
tool_calls: current_tool_calls
|
187
|
-
})
|
188
|
-
end
|
189
|
-
end
|
190
|
-
rescue JSON::ParserError => e
|
191
|
-
# Skip invalid JSON
|
192
|
-
RubyMCP.logger.warn "Invalid JSON in OpenAI stream: #{e.message}"
|
193
|
-
end
|
194
|
-
end
|
195
|
-
end
|
196
|
-
end
|
197
|
-
rescue Faraday::Error => e
|
198
|
-
raise RubyMCP::Errors::ProviderError, "OpenAI streaming failed: #{e.message}"
|
199
|
-
end
|
200
|
-
|
201
|
-
# Final event
|
202
|
-
if current_tool_calls.any?
|
203
|
-
# Final tool calls event
|
204
|
-
yield({
|
205
|
-
id: generation_id,
|
206
|
-
event: 'generation.complete',
|
207
|
-
created_at: Time.now.utc.iso8601,
|
208
|
-
tool_calls: current_tool_calls
|
209
|
-
})
|
210
|
-
else
|
211
|
-
# Final content event
|
212
|
-
yield({
|
213
|
-
id: generation_id,
|
214
|
-
event: 'generation.complete',
|
215
|
-
created_at: Time.now.utc.iso8601,
|
216
|
-
content: content_buffer
|
217
|
-
})
|
218
|
-
end
|
219
|
-
end
|
220
|
-
|
221
|
-
def abort_generation(_generation_id)
|
222
|
-
# OpenAI doesn't support aborting generations yet
|
223
|
-
raise RubyMCP::Errors::ProviderError, "OpenAI doesn't support aborting generations"
|
224
|
-
end
|
225
|
-
|
226
|
-
protected
|
227
|
-
|
228
|
-
def default_api_base
|
229
|
-
'https://api.openai.com/v1'
|
230
|
-
end
|
231
|
-
|
232
|
-
private
|
233
|
-
|
234
|
-
def format_messages(context)
|
235
|
-
context.messages.map do |msg|
|
236
|
-
# Convert to OpenAI's message format
|
237
|
-
message = { 'role' => msg.role, 'content' => msg.content }
|
238
|
-
|
239
|
-
# Handle structured content
|
240
|
-
if msg.content_type == 'array'
|
241
|
-
content_parts = []
|
242
|
-
|
243
|
-
msg.content.each do |part|
|
244
|
-
if part.is_a?(String)
|
245
|
-
content_parts << { 'type' => 'text', 'text' => part }
|
246
|
-
elsif part.is_a?(Hash)
|
247
|
-
if part[:type] == 'text'
|
248
|
-
content_parts << { 'type' => 'text', 'text' => part[:text] }
|
249
|
-
elsif part[:type] == 'content_pointer'
|
250
|
-
# We don't have file IDs for OpenAI here
|
251
|
-
# In a real implementation, we would upload the file to OpenAI
|
252
|
-
content_parts << { 'type' => 'text', 'text' => "[Content reference: #{part[:content_id]}]" }
|
253
|
-
end
|
254
|
-
end
|
255
|
-
end
|
256
|
-
|
257
|
-
message['content'] = content_parts
|
258
|
-
end
|
259
|
-
|
260
|
-
message
|
261
|
-
end
|
262
|
-
end
|
263
|
-
end
|
264
|
-
end
|
265
|
-
end
|
data/lib/ruby_mcp/schemas.rb
DELETED
@@ -1,56 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require 'dry-schema'
|
4
|
-
|
5
|
-
module RubyMCP
|
6
|
-
module Schemas
|
7
|
-
# Define schemas using dry-schema
|
8
|
-
|
9
|
-
ContextSchema = Dry::Schema.JSON do
|
10
|
-
optional(:id).maybe(:string).filled(format?: /^ctx_[a-zA-Z0-9]+$/)
|
11
|
-
|
12
|
-
optional(:messages).array(:hash) do
|
13
|
-
required(:role).filled(:string, included_in?: %w[user assistant system tool])
|
14
|
-
required(:content).filled
|
15
|
-
optional(:id).maybe(:string)
|
16
|
-
optional(:metadata).maybe(:hash)
|
17
|
-
end
|
18
|
-
|
19
|
-
optional(:metadata).maybe(:hash)
|
20
|
-
end
|
21
|
-
|
22
|
-
MessageSchema = Dry::Schema.JSON do
|
23
|
-
required(:context_id).filled(:string, format?: /^ctx_[a-zA-Z0-9]+$/)
|
24
|
-
required(:role).filled(:string, included_in?: %w[user assistant system tool])
|
25
|
-
required(:content).filled
|
26
|
-
optional(:id).maybe(:string)
|
27
|
-
optional(:metadata).maybe(:hash)
|
28
|
-
end
|
29
|
-
|
30
|
-
GenerateSchema = Dry::Schema.JSON do
|
31
|
-
required(:context_id).filled(:string, format?: /^ctx_[a-zA-Z0-9]+$/)
|
32
|
-
required(:engine_id).filled(:string, format?: %r{^[a-z0-9-]+/[a-z0-9-]+$})
|
33
|
-
|
34
|
-
optional(:max_tokens).maybe(:integer, gt?: 0)
|
35
|
-
optional(:temperature).maybe(:float, gteq?: 0, lteq?: 2)
|
36
|
-
optional(:top_p).maybe(:float, gteq?: 0, lteq?: 1)
|
37
|
-
optional(:frequency_penalty).maybe(:float, gteq?: -2, lteq?: 2)
|
38
|
-
optional(:presence_penalty).maybe(:float, gteq?: -2, lteq?: 2)
|
39
|
-
optional(:stop).maybe(:string)
|
40
|
-
optional(:update_context).maybe(:bool)
|
41
|
-
|
42
|
-
# Tool calling support could be added here
|
43
|
-
end
|
44
|
-
|
45
|
-
ContentSchema = Dry::Schema.JSON do
|
46
|
-
required(:context_id).filled(:string, format?: /^ctx_[a-zA-Z0-9]+$/)
|
47
|
-
optional(:id).maybe(:string)
|
48
|
-
optional(:type).maybe(:string)
|
49
|
-
|
50
|
-
optional(:file_data).maybe(:string)
|
51
|
-
optional(:filename).maybe(:string)
|
52
|
-
optional(:content_type).maybe(:string)
|
53
|
-
optional(:data).maybe(:hash)
|
54
|
-
end
|
55
|
-
end
|
56
|
-
end
|