ruby_llm-mcp 0.4.1 → 0.5.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +313 -25
- data/lib/generators/ruby_llm/mcp/install_generator.rb +27 -0
- data/lib/generators/ruby_llm/mcp/templates/README.txt +32 -0
- data/lib/generators/ruby_llm/mcp/templates/initializer.rb +42 -0
- data/lib/generators/ruby_llm/mcp/templates/mcps.yml +9 -0
- data/lib/ruby_llm/chat.rb +2 -1
- data/lib/ruby_llm/mcp/client.rb +32 -13
- data/lib/ruby_llm/mcp/configuration.rb +123 -3
- data/lib/ruby_llm/mcp/coordinator.rb +108 -115
- data/lib/ruby_llm/mcp/errors.rb +3 -1
- data/lib/ruby_llm/mcp/notification_handler.rb +84 -0
- data/lib/ruby_llm/mcp/{requests/cancelled_notification.rb → notifications/cancelled.rb} +2 -2
- data/lib/ruby_llm/mcp/{requests/initialize_notification.rb → notifications/initialize.rb} +7 -3
- data/lib/ruby_llm/mcp/notifications/roots_list_change.rb +26 -0
- data/lib/ruby_llm/mcp/parameter.rb +19 -1
- data/lib/ruby_llm/mcp/progress.rb +3 -1
- data/lib/ruby_llm/mcp/prompt.rb +18 -0
- data/lib/ruby_llm/mcp/railtie.rb +20 -0
- data/lib/ruby_llm/mcp/requests/initialization.rb +8 -4
- data/lib/ruby_llm/mcp/requests/ping.rb +6 -2
- data/lib/ruby_llm/mcp/requests/prompt_list.rb +10 -2
- data/lib/ruby_llm/mcp/requests/resource_list.rb +12 -2
- data/lib/ruby_llm/mcp/requests/resource_template_list.rb +12 -2
- data/lib/ruby_llm/mcp/requests/shared/meta.rb +32 -0
- data/lib/ruby_llm/mcp/requests/shared/pagination.rb +17 -0
- data/lib/ruby_llm/mcp/requests/tool_call.rb +1 -1
- data/lib/ruby_llm/mcp/requests/tool_list.rb +10 -2
- data/lib/ruby_llm/mcp/resource.rb +17 -0
- data/lib/ruby_llm/mcp/response_handler.rb +58 -0
- data/lib/ruby_llm/mcp/responses/error.rb +33 -0
- data/lib/ruby_llm/mcp/{requests/ping_response.rb → responses/ping.rb} +2 -2
- data/lib/ruby_llm/mcp/responses/roots_list.rb +31 -0
- data/lib/ruby_llm/mcp/responses/sampling_create_message.rb +50 -0
- data/lib/ruby_llm/mcp/result.rb +21 -8
- data/lib/ruby_llm/mcp/roots.rb +45 -0
- data/lib/ruby_llm/mcp/sample.rb +148 -0
- data/lib/ruby_llm/mcp/{capabilities.rb → server_capabilities.rb} +1 -1
- data/lib/ruby_llm/mcp/tool.rb +35 -4
- data/lib/ruby_llm/mcp/transport.rb +58 -0
- data/lib/ruby_llm/mcp/transports/http_client.rb +26 -0
- data/lib/ruby_llm/mcp/{transport → transports}/sse.rb +25 -24
- data/lib/ruby_llm/mcp/{transport → transports}/stdio.rb +28 -26
- data/lib/ruby_llm/mcp/{transport → transports}/streamable_http.rb +25 -29
- data/lib/ruby_llm/mcp/transports/timeout.rb +32 -0
- data/lib/ruby_llm/mcp/version.rb +1 -1
- data/lib/ruby_llm/mcp.rb +60 -9
- metadata +27 -11
- data/lib/ruby_llm/mcp/requests/base.rb +0 -31
- data/lib/ruby_llm/mcp/requests/meta.rb +0 -30
@@ -0,0 +1,148 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module MCP
|
5
|
+
class Sample
|
6
|
+
class Hint
|
7
|
+
attr_reader :model, :cost_priority, :speed_priority, :intelligence_priority
|
8
|
+
|
9
|
+
def initialize(model, model_preferences)
|
10
|
+
@model = model
|
11
|
+
@model_preferences = model_preferences
|
12
|
+
|
13
|
+
@hints = model_preferences&.fetch("hints", [])
|
14
|
+
@cost_priority = model_preferences&.fetch("costPriority", nil)
|
15
|
+
@speed_priority = model_preferences&.fetch("speedPriority", nil)
|
16
|
+
@intelligence_priority = model_preferences&.fetch("intelligencePriority", nil)
|
17
|
+
end
|
18
|
+
|
19
|
+
def hints
|
20
|
+
@hints.map { |hint| hint["name"] }
|
21
|
+
end
|
22
|
+
|
23
|
+
def to_h
|
24
|
+
{
|
25
|
+
model: model,
|
26
|
+
hints: hints,
|
27
|
+
cost_priority: @cost_priority,
|
28
|
+
speed_priority: @speed_priority,
|
29
|
+
intelligence_priority: @intelligence_priority
|
30
|
+
}
|
31
|
+
end
|
32
|
+
end
|
33
|
+
|
34
|
+
REJECTED_MESSAGE = "Sampling request was rejected"
|
35
|
+
|
36
|
+
attr_reader :model_preferences, :system_prompt, :max_tokens, :raw_messages
|
37
|
+
|
38
|
+
def initialize(result, coordinator)
|
39
|
+
params = result.params
|
40
|
+
@id = result.id
|
41
|
+
@coordinator = coordinator
|
42
|
+
|
43
|
+
@raw_messages = params["messages"] || []
|
44
|
+
@model_preferences = Hint.new(params["model"], params["modelPreferences"])
|
45
|
+
@system_prompt = params["systemPrompt"]
|
46
|
+
@max_tokens = params["maxTokens"]
|
47
|
+
end
|
48
|
+
|
49
|
+
def execute
|
50
|
+
return unless callback_guard_success?
|
51
|
+
|
52
|
+
model = preferred_model
|
53
|
+
return unless model
|
54
|
+
|
55
|
+
chat_message = chat(model)
|
56
|
+
@coordinator.sampling_create_message_response(
|
57
|
+
id: @id, message: chat_message, model: model
|
58
|
+
)
|
59
|
+
end
|
60
|
+
|
61
|
+
def message
|
62
|
+
@message ||= raw_messages.map { |message| message.fetch("content")&.fetch("text") }.join("\n")
|
63
|
+
end
|
64
|
+
|
65
|
+
def to_h
|
66
|
+
{
|
67
|
+
id: @id,
|
68
|
+
model_preferences: @model_preferences.to_h,
|
69
|
+
system_prompt: @system_prompt,
|
70
|
+
max_tokens: @max_tokens
|
71
|
+
}
|
72
|
+
end
|
73
|
+
|
74
|
+
alias to_json to_h
|
75
|
+
|
76
|
+
private
|
77
|
+
|
78
|
+
def callback_guard_success?
|
79
|
+
return true unless @coordinator.client.sampling_callback_enabled?
|
80
|
+
|
81
|
+
unless @coordinator.client.on[:sampling].call(self)
|
82
|
+
@coordinator.error_response(id: @id, message: REJECTED_MESSAGE)
|
83
|
+
return false
|
84
|
+
end
|
85
|
+
|
86
|
+
true
|
87
|
+
rescue StandardError => e
|
88
|
+
RubyLLM::MCP.logger.error("Error in callback guard: #{e.message}, #{e.backtrace.join("\n")}")
|
89
|
+
@coordinator.error_response(id: @id, message: "Error executing sampling request")
|
90
|
+
false
|
91
|
+
end
|
92
|
+
|
93
|
+
def chat(model)
|
94
|
+
chat = RubyLLM::Chat.new(
|
95
|
+
model: model
|
96
|
+
)
|
97
|
+
if system_prompt
|
98
|
+
formated_system_message = create_message(system_message)
|
99
|
+
chat.add_message(formated_system_message)
|
100
|
+
end
|
101
|
+
raw_messages.each { |message| chat.add_message(create_message(message)) }
|
102
|
+
|
103
|
+
chat.complete
|
104
|
+
end
|
105
|
+
|
106
|
+
def preferred_model
|
107
|
+
@preferred_model ||= begin
|
108
|
+
model = RubyLLM::MCP.config.sampling.preferred_model
|
109
|
+
if model.respond_to?(:call)
|
110
|
+
model.call(model_preferences)
|
111
|
+
else
|
112
|
+
model
|
113
|
+
end
|
114
|
+
end
|
115
|
+
rescue StandardError => e
|
116
|
+
RubyLLM::MCP.logger.error("Error in preferred model: #{e.message}, #{e.backtrace.join("\n")}")
|
117
|
+
@coordinator.error_response(id: @id, message: "Failed to determine preferred model: #{e.message}")
|
118
|
+
false
|
119
|
+
end
|
120
|
+
|
121
|
+
def create_message(message)
|
122
|
+
role = message["role"]
|
123
|
+
content = create_content_for_message(message["content"])
|
124
|
+
|
125
|
+
RubyLLM::Message.new({ role: role, content: content })
|
126
|
+
end
|
127
|
+
|
128
|
+
def create_content_for_message(content)
|
129
|
+
case content["type"]
|
130
|
+
when "text"
|
131
|
+
MCP::Content.new(text: content["text"])
|
132
|
+
when "image", "audio"
|
133
|
+
attachment = MCP::Attachment.new(content["data"], content["mimeType"])
|
134
|
+
MCP::Content.new(text: nil, attachments: [attachment])
|
135
|
+
else
|
136
|
+
raise RubyLLM::MCP::Errors::InvalidFormatError.new(message: "Invalid content type: #{content['type']}")
|
137
|
+
end
|
138
|
+
end
|
139
|
+
|
140
|
+
def system_message
|
141
|
+
{
|
142
|
+
"role" => "system",
|
143
|
+
"content" => { "type" => "text", "text" => system_prompt }
|
144
|
+
}
|
145
|
+
end
|
146
|
+
end
|
147
|
+
end
|
148
|
+
end
|
data/lib/ruby_llm/mcp/tool.rb
CHANGED
@@ -12,16 +12,28 @@ module RubyLLM
|
|
12
12
|
@idempotent_hint = annotation["idempotentHint"] || false
|
13
13
|
@open_world_hint = annotation["openWorldHint"] || true
|
14
14
|
end
|
15
|
+
|
16
|
+
def to_h
|
17
|
+
{
|
18
|
+
title: @title,
|
19
|
+
readOnlyHint: @read_only_hint,
|
20
|
+
destructiveHint: @destructive_hint,
|
21
|
+
idempotentHint: @idempotent_hint,
|
22
|
+
openWorldHint: @open_world_hint
|
23
|
+
}
|
24
|
+
end
|
15
25
|
end
|
16
26
|
|
17
27
|
class Tool < RubyLLM::Tool
|
18
|
-
attr_reader :name, :description, :parameters, :coordinator, :tool_response
|
28
|
+
attr_reader :name, :description, :parameters, :coordinator, :tool_response, :with_prefix
|
19
29
|
|
20
|
-
def initialize(coordinator, tool_response)
|
30
|
+
def initialize(coordinator, tool_response, with_prefix: false)
|
21
31
|
super()
|
22
32
|
@coordinator = coordinator
|
23
33
|
|
24
|
-
@
|
34
|
+
@with_prefix = with_prefix
|
35
|
+
@name = format_name(tool_response["name"])
|
36
|
+
@mcp_name = tool_response["name"]
|
25
37
|
@description = tool_response["description"].to_s
|
26
38
|
@parameters = create_parameters(tool_response["inputSchema"])
|
27
39
|
@annotations = tool_response["annotations"] ? Annotation.new(tool_response["annotations"]) : nil
|
@@ -33,7 +45,7 @@ module RubyLLM
|
|
33
45
|
|
34
46
|
def execute(**params)
|
35
47
|
result = @coordinator.execute_tool(
|
36
|
-
name: @
|
48
|
+
name: @mcp_name,
|
37
49
|
parameters: params
|
38
50
|
)
|
39
51
|
|
@@ -54,6 +66,17 @@ module RubyLLM
|
|
54
66
|
end
|
55
67
|
end
|
56
68
|
|
69
|
+
def to_h
|
70
|
+
{
|
71
|
+
name: @name,
|
72
|
+
description: @description,
|
73
|
+
parameters: @parameters.to_h,
|
74
|
+
annotations: @annotations&.to_h
|
75
|
+
}
|
76
|
+
end
|
77
|
+
|
78
|
+
alias to_json to_h
|
79
|
+
|
57
80
|
private
|
58
81
|
|
59
82
|
def create_parameters(input_schema)
|
@@ -136,6 +159,14 @@ module RubyLLM
|
|
136
159
|
resource.to_content
|
137
160
|
end
|
138
161
|
end
|
162
|
+
|
163
|
+
def format_name(name)
|
164
|
+
if @with_prefix
|
165
|
+
"#{@coordinator.name}_#{name}"
|
166
|
+
else
|
167
|
+
name
|
168
|
+
end
|
169
|
+
end
|
139
170
|
end
|
140
171
|
end
|
141
172
|
end
|
@@ -0,0 +1,58 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module MCP
|
5
|
+
class Transport
|
6
|
+
class << self
|
7
|
+
def transports
|
8
|
+
@transports ||= {}
|
9
|
+
end
|
10
|
+
|
11
|
+
def register_transport(transport_type, transport_class)
|
12
|
+
transports[transport_type] = transport_class
|
13
|
+
end
|
14
|
+
end
|
15
|
+
|
16
|
+
extend Forwardable
|
17
|
+
|
18
|
+
register_transport(:sse, RubyLLM::MCP::Transports::SSE)
|
19
|
+
register_transport(:stdio, RubyLLM::MCP::Transports::Stdio)
|
20
|
+
register_transport(:streamable, RubyLLM::MCP::Transports::StreamableHTTP)
|
21
|
+
register_transport(:streamable_http, RubyLLM::MCP::Transports::StreamableHTTP)
|
22
|
+
|
23
|
+
attr_reader :transport_type, :coordinator, :config, :pid
|
24
|
+
|
25
|
+
def initialize(transport_type, coordinator, config:)
|
26
|
+
@transport_type = transport_type
|
27
|
+
@coordinator = coordinator
|
28
|
+
@config = config
|
29
|
+
@pid = Process.pid
|
30
|
+
end
|
31
|
+
|
32
|
+
def_delegators :transport_protocol, :request, :alive?, :close, :start, :set_protocol_version
|
33
|
+
|
34
|
+
def transport_protocol
|
35
|
+
if @pid != Process.pid
|
36
|
+
@pid = Process.pid
|
37
|
+
@transport = build_transport
|
38
|
+
coordinator.restart_transport
|
39
|
+
end
|
40
|
+
|
41
|
+
@transport_protocol ||= build_transport
|
42
|
+
end
|
43
|
+
|
44
|
+
private
|
45
|
+
|
46
|
+
def build_transport
|
47
|
+
unless RubyLLM::MCP::Transport.transports.key?(transport_type)
|
48
|
+
supported_types = RubyLLM::MCP::Transport.transports.keys.join(", ")
|
49
|
+
message = "Invalid transport type: :#{transport_type}. Supported types are #{supported_types}"
|
50
|
+
raise Errors::InvalidTransportType.new(message: message)
|
51
|
+
end
|
52
|
+
|
53
|
+
transport_klass = RubyLLM::MCP::Transport.transports[transport_type]
|
54
|
+
transport_klass.new(coordinator: coordinator, **config)
|
55
|
+
end
|
56
|
+
end
|
57
|
+
end
|
58
|
+
end
|
@@ -0,0 +1,26 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "httpx"
|
4
|
+
|
5
|
+
module RubyLLM
|
6
|
+
module MCP
|
7
|
+
module Transports
|
8
|
+
class HTTPClient
|
9
|
+
CONNECTION_KEY = :ruby_llm_mcp_client_connection
|
10
|
+
|
11
|
+
def self.connection
|
12
|
+
Thread.current[CONNECTION_KEY] ||= build_connection
|
13
|
+
end
|
14
|
+
|
15
|
+
def self.build_connection
|
16
|
+
HTTPX.with(
|
17
|
+
pool_options: {
|
18
|
+
max_connections: RubyLLM::MCP.config.max_connections,
|
19
|
+
pool_timeout: RubyLLM::MCP.config.pool_timeout
|
20
|
+
}
|
21
|
+
)
|
22
|
+
end
|
23
|
+
end
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
@@ -8,11 +8,13 @@ require "securerandom"
|
|
8
8
|
|
9
9
|
module RubyLLM
|
10
10
|
module MCP
|
11
|
-
module
|
11
|
+
module Transports
|
12
12
|
class SSE
|
13
|
+
include Timeout
|
14
|
+
|
13
15
|
attr_reader :headers, :id, :coordinator
|
14
16
|
|
15
|
-
def initialize(url
|
17
|
+
def initialize(url:, coordinator:, request_timeout:, headers: {})
|
16
18
|
@event_url = url
|
17
19
|
@messages_url = nil
|
18
20
|
@coordinator = coordinator
|
@@ -36,13 +38,10 @@ module RubyLLM
|
|
36
38
|
@pending_requests = {}
|
37
39
|
@pending_mutex = Mutex.new
|
38
40
|
@connection_mutex = Mutex.new
|
39
|
-
@running =
|
41
|
+
@running = false
|
40
42
|
@sse_thread = nil
|
41
43
|
|
42
44
|
RubyLLM::MCP.logger.info "Initializing SSE transport to #{@event_url} with client ID #{@client_id}"
|
43
|
-
|
44
|
-
# Start the SSE listener thread
|
45
|
-
start_sse_listener
|
46
45
|
end
|
47
46
|
|
48
47
|
def request(body, add_id: true, wait_for_response: true) # rubocop:disable Metrics/MethodLength
|
@@ -60,7 +59,8 @@ module RubyLLM
|
|
60
59
|
end
|
61
60
|
|
62
61
|
begin
|
63
|
-
http_client =
|
62
|
+
http_client = HTTPClient.connection.with(timeout: { request_timeout: @request_timeout / 1000 },
|
63
|
+
headers: @headers)
|
64
64
|
response = http_client.post(@messages_url, body: JSON.generate(body))
|
65
65
|
|
66
66
|
unless response.status == 200
|
@@ -83,16 +83,13 @@ module RubyLLM
|
|
83
83
|
return unless wait_for_response
|
84
84
|
|
85
85
|
begin
|
86
|
-
|
86
|
+
with_timeout(@request_timeout / 1000, request_id: request_id) do
|
87
87
|
response_queue.pop
|
88
88
|
end
|
89
|
-
rescue
|
89
|
+
rescue RubyLLM::MCP::Errors::TimeoutError => e
|
90
90
|
@pending_mutex.synchronize { @pending_requests.delete(request_id.to_s) }
|
91
91
|
RubyLLM::MCP.logger.error "SSE request timeout (ID: #{request_id}) after #{@request_timeout / 1000} seconds"
|
92
|
-
raise
|
93
|
-
message: "Request timed out after #{@request_timeout / 1000} seconds",
|
94
|
-
request_id: request_id
|
95
|
-
)
|
92
|
+
raise e
|
96
93
|
end
|
97
94
|
end
|
98
95
|
|
@@ -100,6 +97,13 @@ module RubyLLM
|
|
100
97
|
@running
|
101
98
|
end
|
102
99
|
|
100
|
+
def start
|
101
|
+
return if @running
|
102
|
+
|
103
|
+
@running = true
|
104
|
+
start_sse_listener
|
105
|
+
end
|
106
|
+
|
103
107
|
def close
|
104
108
|
RubyLLM::MCP.logger.info "Closing SSE transport connection"
|
105
109
|
@running = false
|
@@ -107,6 +111,10 @@ module RubyLLM
|
|
107
111
|
@sse_thread = nil
|
108
112
|
end
|
109
113
|
|
114
|
+
def set_protocol_version(version)
|
115
|
+
@protocol_version = version
|
116
|
+
end
|
117
|
+
|
110
118
|
private
|
111
119
|
|
112
120
|
def start_sse_listener
|
@@ -125,7 +133,7 @@ module RubyLLM
|
|
125
133
|
end
|
126
134
|
@sse_thread.abort_on_exception = true
|
127
135
|
|
128
|
-
|
136
|
+
with_timeout(@request_timeout / 1000) do
|
129
137
|
endpoint = response_queue.pop
|
130
138
|
set_message_endpoint(endpoint)
|
131
139
|
end
|
@@ -179,7 +187,7 @@ module RubyLLM
|
|
179
187
|
sleep 1
|
180
188
|
end
|
181
189
|
|
182
|
-
def process_event(raw_event)
|
190
|
+
def process_event(raw_event)
|
183
191
|
# Return if we believe that are getting a partial event
|
184
192
|
return if raw_event[:data].nil?
|
185
193
|
|
@@ -209,15 +217,8 @@ module RubyLLM
|
|
209
217
|
request_id = event["id"]&.to_s
|
210
218
|
result = RubyLLM::MCP::Result.new(event)
|
211
219
|
|
212
|
-
|
213
|
-
|
214
|
-
return
|
215
|
-
end
|
216
|
-
|
217
|
-
if result.request?
|
218
|
-
coordinator.process_request(result) if coordinator.alive?
|
219
|
-
return
|
220
|
-
end
|
220
|
+
result = @coordinator.process_result(result)
|
221
|
+
return if result.nil?
|
221
222
|
|
222
223
|
@pending_mutex.synchronize do
|
223
224
|
# You can receieve duplicate events for the same request id, and we will ignore thoses
|
@@ -7,11 +7,13 @@ require "securerandom"
|
|
7
7
|
|
8
8
|
module RubyLLM
|
9
9
|
module MCP
|
10
|
-
module
|
10
|
+
module Transports
|
11
11
|
class Stdio
|
12
|
+
include Timeout
|
13
|
+
|
12
14
|
attr_reader :command, :stdin, :stdout, :stderr, :id, :coordinator
|
13
15
|
|
14
|
-
def initialize(command
|
16
|
+
def initialize(command:, request_timeout:, coordinator:, args: [], env: {})
|
15
17
|
@request_timeout = request_timeout
|
16
18
|
@command = command
|
17
19
|
@coordinator = coordinator
|
@@ -23,11 +25,9 @@ module RubyLLM
|
|
23
25
|
@id_mutex = Mutex.new
|
24
26
|
@pending_requests = {}
|
25
27
|
@pending_mutex = Mutex.new
|
26
|
-
@running =
|
28
|
+
@running = false
|
27
29
|
@reader_thread = nil
|
28
30
|
@stderr_thread = nil
|
29
|
-
|
30
|
-
start_process
|
31
31
|
end
|
32
32
|
|
33
33
|
def request(body, add_id: true, wait_for_response: true)
|
@@ -58,15 +58,14 @@ module RubyLLM
|
|
58
58
|
return unless wait_for_response
|
59
59
|
|
60
60
|
begin
|
61
|
-
|
61
|
+
with_timeout(@request_timeout / 1000, request_id: request_id) do
|
62
62
|
response_queue.pop
|
63
63
|
end
|
64
|
-
rescue
|
64
|
+
rescue RubyLLM::MCP::Errors::TimeoutError => e
|
65
65
|
@pending_mutex.synchronize { @pending_requests.delete(request_id.to_s) }
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
)
|
66
|
+
log_message = "Stdio request timeout (ID: #{request_id}) after #{@request_timeout / 1000} seconds"
|
67
|
+
RubyLLM::MCP.logger.error(log_message)
|
68
|
+
raise e
|
70
69
|
end
|
71
70
|
end
|
72
71
|
|
@@ -74,6 +73,11 @@ module RubyLLM
|
|
74
73
|
@running
|
75
74
|
end
|
76
75
|
|
76
|
+
def start
|
77
|
+
start_process unless @running
|
78
|
+
@running = true
|
79
|
+
end
|
80
|
+
|
77
81
|
def close # rubocop:disable Metrics/MethodLength
|
78
82
|
@running = false
|
79
83
|
|
@@ -121,6 +125,10 @@ module RubyLLM
|
|
121
125
|
@stderr_thread = nil
|
122
126
|
end
|
123
127
|
|
128
|
+
def set_protocol_version(version)
|
129
|
+
@protocol_version = version
|
130
|
+
end
|
131
|
+
|
124
132
|
private
|
125
133
|
|
126
134
|
def start_process
|
@@ -199,22 +207,16 @@ module RubyLLM
|
|
199
207
|
response = JSON.parse(line)
|
200
208
|
request_id = response["id"]&.to_s
|
201
209
|
result = RubyLLM::MCP::Result.new(response)
|
202
|
-
|
203
210
|
RubyLLM::MCP.logger.debug "Result Received: #{result.inspect}"
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
@pending_mutex.synchronize do
|
214
|
-
if result.matching_id?(request_id) && @pending_requests.key?(request_id)
|
215
|
-
response_queue = @pending_requests.delete(request_id)
|
216
|
-
response_queue&.push(result)
|
217
|
-
end
|
211
|
+
|
212
|
+
result = @coordinator.process_result(result)
|
213
|
+
return if result.nil?
|
214
|
+
|
215
|
+
# Handle regular responses (tool calls, etc.)
|
216
|
+
@pending_mutex.synchronize do
|
217
|
+
if result.matching_id?(request_id) && @pending_requests.key?(request_id)
|
218
|
+
response_queue = @pending_requests.delete(request_id)
|
219
|
+
response_queue&.push(result)
|
218
220
|
end
|
219
221
|
end
|
220
222
|
rescue JSON::ParserError => e
|
@@ -8,7 +8,7 @@ require "securerandom"
|
|
8
8
|
|
9
9
|
module RubyLLM
|
10
10
|
module MCP
|
11
|
-
module
|
11
|
+
module Transports
|
12
12
|
# Configuration options for reconnection behavior
|
13
13
|
class ReconnectionOptions
|
14
14
|
attr_reader :max_reconnection_delay, :initial_reconnection_delay,
|
@@ -40,10 +40,12 @@ module RubyLLM
|
|
40
40
|
|
41
41
|
# Main StreamableHTTP transport class
|
42
42
|
class StreamableHTTP
|
43
|
+
include Timeout
|
44
|
+
|
43
45
|
attr_reader :session_id, :protocol_version, :coordinator
|
44
46
|
|
45
47
|
def initialize( # rubocop:disable Metrics/ParameterLists
|
46
|
-
url
|
48
|
+
url:,
|
47
49
|
request_timeout:,
|
48
50
|
coordinator:,
|
49
51
|
headers: {},
|
@@ -110,6 +112,12 @@ module RubyLLM
|
|
110
112
|
@abort_controller = false
|
111
113
|
end
|
112
114
|
|
115
|
+
def set_protocol_version(version)
|
116
|
+
@protocol_version = version
|
117
|
+
end
|
118
|
+
|
119
|
+
private
|
120
|
+
|
113
121
|
def terminate_session
|
114
122
|
return unless @session_id
|
115
123
|
|
@@ -139,12 +147,6 @@ module RubyLLM
|
|
139
147
|
end
|
140
148
|
end
|
141
149
|
|
142
|
-
def set_protocol_version(version)
|
143
|
-
@protocol_version = version
|
144
|
-
end
|
145
|
-
|
146
|
-
private
|
147
|
-
|
148
150
|
def handle_httpx_error_response!(response, context:, allow_eof_for_sse: false)
|
149
151
|
return false unless response.is_a?(HTTPX::ErrorResponse)
|
150
152
|
|
@@ -200,7 +202,7 @@ module RubyLLM
|
|
200
202
|
end
|
201
203
|
|
202
204
|
def create_connection
|
203
|
-
client =
|
205
|
+
client = HTTPClient.connection.with(
|
204
206
|
timeout: {
|
205
207
|
connect_timeout: 10,
|
206
208
|
read_timeout: @request_timeout / 1000,
|
@@ -257,7 +259,7 @@ module RubyLLM
|
|
257
259
|
def create_connection_with_streaming_callbacks(request_id)
|
258
260
|
buffer = +""
|
259
261
|
|
260
|
-
client =
|
262
|
+
client = HTTPClient.connection.plugin(:callbacks).on_response_body_chunk do |request, _response, chunk|
|
261
263
|
next unless @running && !@abort_controller
|
262
264
|
|
263
265
|
RubyLLM::MCP.logger.debug "Received chunk: #{chunk.bytesize} bytes for #{request.uri}"
|
@@ -562,19 +564,14 @@ module RubyLLM
|
|
562
564
|
result = RubyLLM::MCP::Result.new(event_data, session_id: @session_id)
|
563
565
|
RubyLLM::MCP.logger.debug "SSE Result Received: #{result.inspect}"
|
564
566
|
|
565
|
-
|
566
|
-
if result.
|
567
|
-
|
568
|
-
|
569
|
-
|
570
|
-
|
571
|
-
|
572
|
-
|
573
|
-
if request_id
|
574
|
-
@pending_mutex.synchronize do
|
575
|
-
response_queue = @pending_requests.delete(request_id)
|
576
|
-
response_queue&.push(result)
|
577
|
-
end
|
567
|
+
result = @coordinator.process_result(result)
|
568
|
+
return if result.nil?
|
569
|
+
|
570
|
+
request_id = result.id&.to_s
|
571
|
+
if request_id
|
572
|
+
@pending_mutex.synchronize do
|
573
|
+
response_queue = @pending_requests.delete(request_id)
|
574
|
+
response_queue&.push(result)
|
578
575
|
end
|
579
576
|
end
|
580
577
|
rescue JSON::ParserError => e
|
@@ -591,15 +588,14 @@ module RubyLLM
|
|
591
588
|
end
|
592
589
|
|
593
590
|
def wait_for_response_with_timeout(request_id, response_queue)
|
594
|
-
|
591
|
+
with_timeout(@request_timeout / 1000, request_id: request_id) do
|
595
592
|
response_queue.pop
|
596
593
|
end
|
597
|
-
rescue
|
594
|
+
rescue RubyLLM::MCP::Errors::TimeoutError => e
|
595
|
+
log_message = "StreamableHTTP request timeout (ID: #{request_id}) after #{@request_timeout / 1000} seconds"
|
596
|
+
RubyLLM::MCP.logger.error(log_message)
|
598
597
|
@pending_mutex.synchronize { @pending_requests.delete(request_id.to_s) }
|
599
|
-
raise
|
600
|
-
message: "Request timed out after #{@request_timeout / 1000} seconds",
|
601
|
-
request_id: request_id
|
602
|
-
)
|
598
|
+
raise e
|
603
599
|
end
|
604
600
|
|
605
601
|
def cleanup_sse_resources
|
@@ -0,0 +1,32 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module MCP
|
5
|
+
module Transports
|
6
|
+
module Timeout
|
7
|
+
def with_timeout(seconds, request_id: nil)
|
8
|
+
result = nil
|
9
|
+
exception = nil
|
10
|
+
|
11
|
+
worker = Thread.new do
|
12
|
+
result = yield
|
13
|
+
rescue StandardError => e
|
14
|
+
exception = e
|
15
|
+
end
|
16
|
+
|
17
|
+
if worker.join(seconds)
|
18
|
+
raise exception if exception
|
19
|
+
|
20
|
+
result
|
21
|
+
else
|
22
|
+
worker.kill # stop the thread (can still have some risk if shared resources)
|
23
|
+
raise RubyLLM::MCP::Errors::TimeoutError.new(
|
24
|
+
message: "Request timed out after #{seconds} seconds",
|
25
|
+
request_id: request_id
|
26
|
+
)
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|