ruby_llm-mcp 0.3.1 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (45) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +121 -2
  3. data/lib/ruby_llm/mcp/capabilities.rb +22 -2
  4. data/lib/ruby_llm/mcp/client.rb +106 -18
  5. data/lib/ruby_llm/mcp/configuration.rb +66 -0
  6. data/lib/ruby_llm/mcp/coordinator.rb +197 -33
  7. data/lib/ruby_llm/mcp/error.rb +34 -0
  8. data/lib/ruby_llm/mcp/errors.rb +37 -4
  9. data/lib/ruby_llm/mcp/logging.rb +16 -0
  10. data/lib/ruby_llm/mcp/parameter.rb +2 -0
  11. data/lib/ruby_llm/mcp/progress.rb +33 -0
  12. data/lib/ruby_llm/mcp/prompt.rb +12 -5
  13. data/lib/ruby_llm/mcp/providers/anthropic/complex_parameter_support.rb +5 -2
  14. data/lib/ruby_llm/mcp/providers/gemini/complex_parameter_support.rb +6 -3
  15. data/lib/ruby_llm/mcp/providers/openai/complex_parameter_support.rb +6 -3
  16. data/lib/ruby_llm/mcp/requests/base.rb +3 -3
  17. data/lib/ruby_llm/mcp/requests/cancelled_notification.rb +32 -0
  18. data/lib/ruby_llm/mcp/requests/completion_prompt.rb +3 -3
  19. data/lib/ruby_llm/mcp/requests/completion_resource.rb +3 -3
  20. data/lib/ruby_llm/mcp/requests/initialization.rb +24 -18
  21. data/lib/ruby_llm/mcp/requests/initialize_notification.rb +15 -9
  22. data/lib/ruby_llm/mcp/requests/logging_set_level.rb +28 -0
  23. data/lib/ruby_llm/mcp/requests/meta.rb +30 -0
  24. data/lib/ruby_llm/mcp/requests/ping.rb +20 -0
  25. data/lib/ruby_llm/mcp/requests/ping_response.rb +28 -0
  26. data/lib/ruby_llm/mcp/requests/prompt_call.rb +3 -3
  27. data/lib/ruby_llm/mcp/requests/prompt_list.rb +1 -1
  28. data/lib/ruby_llm/mcp/requests/resource_list.rb +1 -1
  29. data/lib/ruby_llm/mcp/requests/resource_read.rb +4 -4
  30. data/lib/ruby_llm/mcp/requests/resource_template_list.rb +1 -1
  31. data/lib/ruby_llm/mcp/requests/resources_subscribe.rb +30 -0
  32. data/lib/ruby_llm/mcp/requests/tool_call.rb +6 -3
  33. data/lib/ruby_llm/mcp/requests/tool_list.rb +17 -11
  34. data/lib/ruby_llm/mcp/resource.rb +26 -5
  35. data/lib/ruby_llm/mcp/resource_template.rb +11 -6
  36. data/lib/ruby_llm/mcp/result.rb +90 -0
  37. data/lib/ruby_llm/mcp/tool.rb +28 -3
  38. data/lib/ruby_llm/mcp/transport/sse.rb +81 -75
  39. data/lib/ruby_llm/mcp/transport/stdio.rb +33 -17
  40. data/lib/ruby_llm/mcp/transport/streamable_http.rb +647 -0
  41. data/lib/ruby_llm/mcp/version.rb +1 -1
  42. data/lib/ruby_llm/mcp.rb +18 -0
  43. data/lib/tasks/release.rake +23 -0
  44. metadata +20 -50
  45. data/lib/ruby_llm/mcp/transport/streamable.rb +0 -299
@@ -2,7 +2,7 @@
2
2
 
3
3
  require "json"
4
4
  require "uri"
5
- require "faraday"
5
+ require "httpx"
6
6
  require "timeout"
7
7
  require "securerandom"
8
8
 
@@ -10,11 +10,12 @@ module RubyLLM
10
10
  module MCP
11
11
  module Transport
12
12
  class SSE
13
- attr_reader :headers, :id
13
+ attr_reader :headers, :id, :coordinator
14
14
 
15
- def initialize(url, headers: {}, request_timeout: 8000)
15
+ def initialize(url, coordinator:, request_timeout:, headers: {})
16
16
  @event_url = url
17
17
  @messages_url = nil
18
+ @coordinator = coordinator
18
19
  @request_timeout = request_timeout
19
20
 
20
21
  uri = URI.parse(url)
@@ -24,6 +25,7 @@ module RubyLLM
24
25
  @client_id = SecureRandom.uuid
25
26
  @headers = headers.merge({
26
27
  "Accept" => "text/event-stream",
28
+ "Content-Type" => "application/json",
27
29
  "Cache-Control" => "no-cache",
28
30
  "Connection" => "keep-alive",
29
31
  "X-CLIENT-ID" => @client_id
@@ -37,19 +39,19 @@ module RubyLLM
37
39
  @running = true
38
40
  @sse_thread = nil
39
41
 
42
+ RubyLLM::MCP.logger.info "Initializing SSE transport to #{@event_url} with client ID #{@client_id}"
43
+
40
44
  # Start the SSE listener thread
41
45
  start_sse_listener
42
46
  end
43
47
 
44
48
  def request(body, add_id: true, wait_for_response: true) # rubocop:disable Metrics/MethodLength
45
- # Generate a unique request ID
46
49
  if add_id
47
50
  @id_mutex.synchronize { @id_counter += 1 }
48
51
  request_id = @id_counter
49
52
  body["id"] = request_id
50
53
  end
51
54
 
52
- # Create a queue for this request's response
53
55
  response_queue = Queue.new
54
56
  if wait_for_response
55
57
  @pending_mutex.synchronize do
@@ -57,28 +59,26 @@ module RubyLLM
57
59
  end
58
60
  end
59
61
 
60
- # Send the request using Faraday
61
62
  begin
62
- conn = Faraday.new do |f|
63
- f.options.timeout = @request_timeout / 1000
64
- f.options.open_timeout = 5
65
- end
66
-
67
- response = conn.post(@messages_url) do |req|
68
- @headers.each do |key, value|
69
- req.headers[key] = value
70
- end
71
- req.headers["Content-Type"] = "application/json"
72
- req.body = JSON.generate(body)
73
- end
63
+ http_client = HTTPX.with(timeout: { request_timeout: @request_timeout / 1000 }, headers: @headers)
64
+ response = http_client.post(@messages_url, body: JSON.generate(body))
74
65
 
75
66
  unless response.status == 200
76
67
  @pending_mutex.synchronize { @pending_requests.delete(request_id.to_s) }
77
- raise "Failed to request #{@messages_url}: #{response.status} - #{response.body}"
68
+ RubyLLM::MCP.logger.error "SSE request failed: #{response.status} - #{response.body}"
69
+ raise Errors::TransportError.new(
70
+ message: "Failed to request #{@messages_url}: #{response.status} - #{response.body}",
71
+ code: response.status
72
+ )
78
73
  end
79
74
  rescue StandardError => e
80
75
  @pending_mutex.synchronize { @pending_requests.delete(request_id.to_s) }
81
- raise e
76
+ RubyLLM::MCP.logger.error "SSE request error (ID: #{request_id}): #{e.message}"
77
+ raise RubyLLM::MCP::Errors::TransportError.new(
78
+ message: e.message,
79
+ code: -1,
80
+ error: e
81
+ )
82
82
  end
83
83
  return unless wait_for_response
84
84
 
@@ -88,8 +88,10 @@ module RubyLLM
88
88
  end
89
89
  rescue Timeout::Error
90
90
  @pending_mutex.synchronize { @pending_requests.delete(request_id.to_s) }
91
- raise RubyLLM::MCP::Errors::TimeoutError.new(
92
- message: "Request timed out after #{@request_timeout / 1000} seconds"
91
+ RubyLLM::MCP.logger.error "SSE request timeout (ID: #{request_id}) after #{@request_timeout / 1000} seconds"
92
+ raise Errors::TimeoutError.new(
93
+ message: "Request timed out after #{@request_timeout / 1000} seconds",
94
+ request_id: request_id
93
95
  )
94
96
  end
95
97
  end
@@ -99,6 +101,7 @@ module RubyLLM
99
101
  end
100
102
 
101
103
  def close
104
+ RubyLLM::MCP.logger.info "Closing SSE transport connection"
102
105
  @running = false
103
106
  @sse_thread&.join(1) # Give the thread a second to clean up
104
107
  @sse_thread = nil
@@ -110,6 +113,8 @@ module RubyLLM
110
113
  @connection_mutex.synchronize do
111
114
  return if sse_thread_running?
112
115
 
116
+ RubyLLM::MCP.logger.info "Starting SSE listener thread"
117
+
113
118
  response_queue = Queue.new
114
119
  @pending_mutex.synchronize do
115
120
  @pending_requests["endpoint"] = response_queue
@@ -120,10 +125,10 @@ module RubyLLM
120
125
  end
121
126
  @sse_thread.abort_on_exception = true
122
127
 
123
- endpoint = response_queue.pop
124
- set_message_endpoint(endpoint)
125
-
126
- @pending_mutex.synchronize { @pending_requests.delete("endpoint") }
128
+ Timeout.timeout(100) do
129
+ endpoint = response_queue.pop
130
+ set_message_endpoint(endpoint)
131
+ end
127
132
  end
128
133
  end
129
134
 
@@ -135,6 +140,8 @@ module RubyLLM
135
140
  else
136
141
  endpoint
137
142
  end
143
+
144
+ RubyLLM::MCP.logger.info "SSE message endpoint set to: #{@messages_url}"
138
145
  end
139
146
 
140
147
  def sse_thread_running?
@@ -143,84 +150,83 @@ module RubyLLM
143
150
 
144
151
  def listen_for_events
145
152
  stream_events_from_server
146
- rescue Faraday::Error => e
147
- handle_connection_error("SSE connection failed", e)
148
153
  rescue StandardError => e
149
154
  handle_connection_error("SSE connection error", e)
150
155
  end
151
156
 
152
157
  def stream_events_from_server
153
- buffer = +""
154
- create_sse_connection.get(@event_url) do |req|
155
- setup_request_headers(req)
156
- setup_streaming_callback(req, buffer)
157
- end
158
- end
159
-
160
- def create_sse_connection
161
- Faraday.new do |f|
162
- f.options.timeout = 300 # 5 minutes
163
- f.response :raise_error # raise errors on non-200 responses
164
- end
165
- end
166
-
167
- def setup_request_headers(request)
168
- @headers.each do |key, value|
169
- request.headers[key] = value
170
- end
171
- end
172
-
173
- def setup_streaming_callback(request, buffer)
174
- request.options.on_data = proc do |chunk, _size, _env|
175
- buffer << chunk
176
- process_buffer_events(buffer)
177
- end
178
- end
158
+ sse_client = HTTPX.plugin(:stream)
159
+ sse_client = sse_client.with(
160
+ headers: @headers
161
+ )
162
+ response = sse_client.get(@event_url, stream: true)
163
+ response.each_line do |event_line|
164
+ unless @running
165
+ response.body.close
166
+ next
167
+ end
179
168
 
180
- def process_buffer_events(buffer)
181
- while (event = extract_event(buffer))
182
- event_data, buffer = event
183
- process_event(event_data) if event_data
169
+ event = parse_event(event_line)
170
+ process_event(event)
184
171
  end
185
172
  end
186
173
 
187
174
  def handle_connection_error(message, error)
188
- puts "#{message}: #{error.message}. Reconnecting in 3 seconds..."
189
- sleep 3
175
+ return unless @running
176
+
177
+ error_message = "#{message}: #{error.message}"
178
+ RubyLLM::MCP.logger.error "#{error_message}. Reconnecting in 1 seconds..."
179
+ sleep 1
190
180
  end
191
181
 
192
- def process_event(raw_event)
182
+ def process_event(raw_event) # rubocop:disable Metrics/MethodLength
183
+ # Return if we believe that are getting a partial event
193
184
  return if raw_event[:data].nil?
194
185
 
195
186
  if raw_event[:event] == "endpoint"
196
187
  request_id = "endpoint"
197
188
  event = raw_event[:data]
189
+ return if event.nil?
190
+
191
+ RubyLLM::MCP.logger.debug "Received endpoint event: #{event}"
192
+ @pending_mutex.synchronize do
193
+ response_queue = @pending_requests.delete(request_id)
194
+ response_queue&.push(event)
195
+ end
198
196
  else
199
197
  event = begin
200
198
  JSON.parse(raw_event[:data])
201
- rescue StandardError
199
+ rescue JSON::ParserError => e
200
+ # We can sometimes get partial endpoint events, so we will ignore them
201
+ unless @endpoint.nil?
202
+ RubyLLM::MCP.logger.info "Failed to parse SSE event data: #{raw_event[:data]} - #{e.message}"
203
+ end
204
+
202
205
  nil
203
206
  end
204
207
  return if event.nil?
205
208
 
206
209
  request_id = event["id"]&.to_s
207
- end
210
+ result = RubyLLM::MCP::Result.new(event)
208
211
 
209
- @pending_mutex.synchronize do
210
- if request_id && @pending_requests.key?(request_id)
211
- response_queue = @pending_requests.delete(request_id)
212
- response_queue&.push(event)
212
+ if result.notification?
213
+ coordinator.process_notification(result)
214
+ return
213
215
  end
214
- end
215
- rescue JSON::ParserError => e
216
- puts "Error parsing event data: #{e.message}"
217
- end
218
216
 
219
- def extract_event(buffer)
220
- return nil unless buffer.include?("\n\n")
217
+ if result.request?
218
+ coordinator.process_request(result) if coordinator.alive?
219
+ return
220
+ end
221
221
 
222
- raw, rest = buffer.split("\n\n", 2)
223
- [parse_event(raw), rest]
222
+ @pending_mutex.synchronize do
223
+ # You can receieve duplicate events for the same request id, and we will ignore thoses
224
+ if result.matching_id?(request_id) && @pending_requests.key?(request_id)
225
+ response_queue = @pending_requests.delete(request_id)
226
+ response_queue&.push(result)
227
+ end
228
+ end
229
+ end
224
230
  end
225
231
 
226
232
  def parse_event(raw)
@@ -9,11 +9,12 @@ module RubyLLM
9
9
  module MCP
10
10
  module Transport
11
11
  class Stdio
12
- attr_reader :command, :stdin, :stdout, :stderr, :id
12
+ attr_reader :command, :stdin, :stdout, :stderr, :id, :coordinator
13
13
 
14
- def initialize(command, request_timeout:, args: [], env: {})
14
+ def initialize(command, request_timeout:, coordinator:, args: [], env: {})
15
15
  @request_timeout = request_timeout
16
16
  @command = command
17
+ @coordinator = coordinator
17
18
  @args = args
18
19
  @env = env || {}
19
20
  @client_id = SecureRandom.uuid
@@ -44,12 +45,14 @@ module RubyLLM
44
45
  end
45
46
 
46
47
  begin
47
- @stdin.puts(JSON.generate(body))
48
+ body = JSON.generate(body)
49
+ RubyLLM::MCP.logger.debug "Sending Request: #{body}"
50
+ @stdin.puts(body)
48
51
  @stdin.flush
49
52
  rescue IOError, Errno::EPIPE => e
50
53
  @pending_mutex.synchronize { @pending_requests.delete(request_id.to_s) }
51
54
  restart_process
52
- raise "Failed to send request: #{e.message}"
55
+ raise RubyLLM::MCP::Errors::TransportError.new(message: e.message, error: e)
53
56
  end
54
57
 
55
58
  return unless wait_for_response
@@ -61,7 +64,8 @@ module RubyLLM
61
64
  rescue Timeout::Error
62
65
  @pending_mutex.synchronize { @pending_requests.delete(request_id.to_s) }
63
66
  raise RubyLLM::MCP::Errors::TimeoutError.new(
64
- message: "Request timed out after #{@request_timeout / 1000} seconds"
67
+ message: "Request timed out after #{@request_timeout / 1000} seconds",
68
+ request_id: request_id
65
69
  )
66
70
  end
67
71
  end
@@ -133,7 +137,7 @@ module RubyLLM
133
137
  end
134
138
 
135
139
  def restart_process
136
- puts "Process connection lost. Restarting..."
140
+ RubyLLM::MCP.logger.error "Process connection lost. Restarting..."
137
141
  start_process
138
142
  end
139
143
 
@@ -152,11 +156,11 @@ module RubyLLM
152
156
 
153
157
  process_response(line.strip)
154
158
  rescue IOError, Errno::EPIPE => e
155
- puts "Reader error: #{e.message}. Restarting in 1 second..."
159
+ RubyLLM::MCP.logger.error "Reader error: #{e.message}. Restarting in 1 second..."
156
160
  sleep 1
157
161
  restart_process if @running
158
162
  rescue StandardError => e
159
- puts "Error in reader thread: #{e.message}, #{e.backtrace.join("\n")}"
163
+ RubyLLM::MCP.logger.error "Error in reader thread: #{e.message}, #{e.backtrace.join("\n")}"
160
164
  sleep 1
161
165
  end
162
166
  end
@@ -177,12 +181,12 @@ module RubyLLM
177
181
  line = @stderr.gets
178
182
  next unless line && !line.strip.empty?
179
183
 
180
- puts "STDERR: #{line.strip}"
184
+ RubyLLM::MCP.logger.info(line.strip)
181
185
  rescue IOError, Errno::EPIPE => e
182
- puts "Stderr reader error: #{e.message}"
186
+ RubyLLM::MCP.logger.error "Stderr reader error: #{e.message}"
183
187
  sleep 1
184
188
  rescue StandardError => e
185
- puts "Error in stderr thread: #{e.message}"
189
+ RubyLLM::MCP.logger.error "Error in stderr thread: #{e.message}"
186
190
  sleep 1
187
191
  end
188
192
  end
@@ -194,15 +198,27 @@ module RubyLLM
194
198
  def process_response(line)
195
199
  response = JSON.parse(line)
196
200
  request_id = response["id"]&.to_s
197
-
198
- @pending_mutex.synchronize do
199
- if request_id && @pending_requests.key?(request_id)
200
- response_queue = @pending_requests.delete(request_id)
201
- response_queue&.push(response)
201
+ result = RubyLLM::MCP::Result.new(response)
202
+
203
+ RubyLLM::MCP.logger.debug "Result Received: #{result.inspect}"
204
+ # Handle notifications (process but don't return - continue processing other responses)
205
+ if result.notification?
206
+ coordinator.process_notification(result)
207
+ # Don't return here - continue to process potential tool responses
208
+ elsif result.request?
209
+ coordinator.process_request(result)
210
+ nil
211
+ else
212
+ # Handle regular responses (tool calls, etc.)
213
+ @pending_mutex.synchronize do
214
+ if result.matching_id?(request_id) && @pending_requests.key?(request_id)
215
+ response_queue = @pending_requests.delete(request_id)
216
+ response_queue&.push(result)
217
+ end
202
218
  end
203
219
  end
204
220
  rescue JSON::ParserError => e
205
- RubyLLM.logger.error("Error parsing response as JSON: #{e.message}\nRaw response: #{line}")
221
+ RubyLLM::MCP.logger.error("Error parsing response as JSON: #{e.message}\nRaw response: #{line}")
206
222
  end
207
223
  end
208
224
  end