ruby_llm-mcp 0.3.0 → 0.4.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +121 -2
  3. data/lib/ruby_llm/mcp/capabilities.rb +22 -2
  4. data/lib/ruby_llm/mcp/client.rb +104 -136
  5. data/lib/ruby_llm/mcp/configuration.rb +66 -0
  6. data/lib/ruby_llm/mcp/coordinator.rb +276 -0
  7. data/lib/ruby_llm/mcp/error.rb +34 -0
  8. data/lib/ruby_llm/mcp/errors.rb +38 -3
  9. data/lib/ruby_llm/mcp/logging.rb +16 -0
  10. data/lib/ruby_llm/mcp/parameter.rb +5 -2
  11. data/lib/ruby_llm/mcp/progress.rb +33 -0
  12. data/lib/ruby_llm/mcp/prompt.rb +20 -13
  13. data/lib/ruby_llm/mcp/providers/anthropic/complex_parameter_support.rb +7 -3
  14. data/lib/ruby_llm/mcp/providers/gemini/complex_parameter_support.rb +8 -4
  15. data/lib/ruby_llm/mcp/providers/openai/complex_parameter_support.rb +8 -4
  16. data/lib/ruby_llm/mcp/requests/base.rb +3 -3
  17. data/lib/ruby_llm/mcp/requests/cancelled_notification.rb +32 -0
  18. data/lib/ruby_llm/mcp/requests/completion_prompt.rb +3 -3
  19. data/lib/ruby_llm/mcp/requests/completion_resource.rb +3 -3
  20. data/lib/ruby_llm/mcp/requests/initialization.rb +24 -18
  21. data/lib/ruby_llm/mcp/requests/initialize_notification.rb +20 -0
  22. data/lib/ruby_llm/mcp/requests/logging_set_level.rb +28 -0
  23. data/lib/ruby_llm/mcp/requests/meta.rb +30 -0
  24. data/lib/ruby_llm/mcp/requests/ping.rb +20 -0
  25. data/lib/ruby_llm/mcp/requests/ping_response.rb +28 -0
  26. data/lib/ruby_llm/mcp/requests/prompt_call.rb +3 -3
  27. data/lib/ruby_llm/mcp/requests/prompt_list.rb +1 -1
  28. data/lib/ruby_llm/mcp/requests/resource_list.rb +1 -1
  29. data/lib/ruby_llm/mcp/requests/resource_read.rb +4 -4
  30. data/lib/ruby_llm/mcp/requests/resource_template_list.rb +1 -1
  31. data/lib/ruby_llm/mcp/requests/resources_subscribe.rb +30 -0
  32. data/lib/ruby_llm/mcp/requests/tool_call.rb +6 -3
  33. data/lib/ruby_llm/mcp/requests/tool_list.rb +17 -11
  34. data/lib/ruby_llm/mcp/resource.rb +28 -7
  35. data/lib/ruby_llm/mcp/resource_template.rb +17 -12
  36. data/lib/ruby_llm/mcp/result.rb +90 -0
  37. data/lib/ruby_llm/mcp/tool.rb +36 -10
  38. data/lib/ruby_llm/mcp/transport/sse.rb +82 -75
  39. data/lib/ruby_llm/mcp/transport/stdio.rb +33 -17
  40. data/lib/ruby_llm/mcp/transport/streamable_http.rb +647 -0
  41. data/lib/ruby_llm/mcp/version.rb +1 -1
  42. data/lib/ruby_llm/mcp.rb +18 -0
  43. data/lib/tasks/release.rake +23 -0
  44. metadata +22 -51
  45. data/lib/ruby_llm/mcp/requests/notification.rb +0 -14
  46. data/lib/ruby_llm/mcp/transport/streamable.rb +0 -299
@@ -2,7 +2,7 @@
2
2
 
3
3
  require "json"
4
4
  require "uri"
5
- require "faraday"
5
+ require "httpx"
6
6
  require "timeout"
7
7
  require "securerandom"
8
8
 
@@ -10,11 +10,13 @@ module RubyLLM
10
10
  module MCP
11
11
  module Transport
12
12
  class SSE
13
- attr_reader :headers, :id
13
+ attr_reader :headers, :id, :coordinator
14
14
 
15
- def initialize(url, headers: {})
15
+ def initialize(url, coordinator:, request_timeout:, headers: {})
16
16
  @event_url = url
17
17
  @messages_url = nil
18
+ @coordinator = coordinator
19
+ @request_timeout = request_timeout
18
20
 
19
21
  uri = URI.parse(url)
20
22
  @root_url = "#{uri.scheme}://#{uri.host}"
@@ -23,6 +25,7 @@ module RubyLLM
23
25
  @client_id = SecureRandom.uuid
24
26
  @headers = headers.merge({
25
27
  "Accept" => "text/event-stream",
28
+ "Content-Type" => "application/json",
26
29
  "Cache-Control" => "no-cache",
27
30
  "Connection" => "keep-alive",
28
31
  "X-CLIENT-ID" => @client_id
@@ -36,19 +39,19 @@ module RubyLLM
36
39
  @running = true
37
40
  @sse_thread = nil
38
41
 
42
+ RubyLLM::MCP.logger.info "Initializing SSE transport to #{@event_url} with client ID #{@client_id}"
43
+
39
44
  # Start the SSE listener thread
40
45
  start_sse_listener
41
46
  end
42
47
 
43
48
  def request(body, add_id: true, wait_for_response: true) # rubocop:disable Metrics/MethodLength
44
- # Generate a unique request ID
45
49
  if add_id
46
50
  @id_mutex.synchronize { @id_counter += 1 }
47
51
  request_id = @id_counter
48
52
  body["id"] = request_id
49
53
  end
50
54
 
51
- # Create a queue for this request's response
52
55
  response_queue = Queue.new
53
56
  if wait_for_response
54
57
  @pending_mutex.synchronize do
@@ -56,28 +59,26 @@ module RubyLLM
56
59
  end
57
60
  end
58
61
 
59
- # Send the request using Faraday
60
62
  begin
61
- conn = Faraday.new do |f|
62
- f.options.timeout = @request_timeout / 1000
63
- f.options.open_timeout = 5
64
- end
65
-
66
- response = conn.post(@messages_url) do |req|
67
- @headers.each do |key, value|
68
- req.headers[key] = value
69
- end
70
- req.headers["Content-Type"] = "application/json"
71
- req.body = JSON.generate(body)
72
- end
63
+ http_client = HTTPX.with(timeout: { request_timeout: @request_timeout / 1000 }, headers: @headers)
64
+ response = http_client.post(@messages_url, body: JSON.generate(body))
73
65
 
74
66
  unless response.status == 200
75
67
  @pending_mutex.synchronize { @pending_requests.delete(request_id.to_s) }
76
- raise "Failed to request #{@messages_url}: #{response.status} - #{response.body}"
68
+ RubyLLM::MCP.logger.error "SSE request failed: #{response.status} - #{response.body}"
69
+ raise Errors::TransportError.new(
70
+ message: "Failed to request #{@messages_url}: #{response.status} - #{response.body}",
71
+ code: response.status
72
+ )
77
73
  end
78
74
  rescue StandardError => e
79
75
  @pending_mutex.synchronize { @pending_requests.delete(request_id.to_s) }
80
- raise e
76
+ RubyLLM::MCP.logger.error "SSE request error (ID: #{request_id}): #{e.message}"
77
+ raise RubyLLM::MCP::Errors::TransportError.new(
78
+ message: e.message,
79
+ code: -1,
80
+ error: e
81
+ )
81
82
  end
82
83
  return unless wait_for_response
83
84
 
@@ -87,8 +88,10 @@ module RubyLLM
87
88
  end
88
89
  rescue Timeout::Error
89
90
  @pending_mutex.synchronize { @pending_requests.delete(request_id.to_s) }
90
- raise RubyLLM::MCP::Errors::TimeoutError.new(
91
- message: "Request timed out after #{@request_timeout / 1000} seconds"
91
+ RubyLLM::MCP.logger.error "SSE request timeout (ID: #{request_id}) after #{@request_timeout / 1000} seconds"
92
+ raise Errors::TimeoutError.new(
93
+ message: "Request timed out after #{@request_timeout / 1000} seconds",
94
+ request_id: request_id
92
95
  )
93
96
  end
94
97
  end
@@ -98,6 +101,7 @@ module RubyLLM
98
101
  end
99
102
 
100
103
  def close
104
+ RubyLLM::MCP.logger.info "Closing SSE transport connection"
101
105
  @running = false
102
106
  @sse_thread&.join(1) # Give the thread a second to clean up
103
107
  @sse_thread = nil
@@ -109,6 +113,8 @@ module RubyLLM
109
113
  @connection_mutex.synchronize do
110
114
  return if sse_thread_running?
111
115
 
116
+ RubyLLM::MCP.logger.info "Starting SSE listener thread"
117
+
112
118
  response_queue = Queue.new
113
119
  @pending_mutex.synchronize do
114
120
  @pending_requests["endpoint"] = response_queue
@@ -119,10 +125,10 @@ module RubyLLM
119
125
  end
120
126
  @sse_thread.abort_on_exception = true
121
127
 
122
- endpoint = response_queue.pop
123
- set_message_endpoint(endpoint)
124
-
125
- @pending_mutex.synchronize { @pending_requests.delete("endpoint") }
128
+ Timeout.timeout(100) do
129
+ endpoint = response_queue.pop
130
+ set_message_endpoint(endpoint)
131
+ end
126
132
  end
127
133
  end
128
134
 
@@ -134,6 +140,8 @@ module RubyLLM
134
140
  else
135
141
  endpoint
136
142
  end
143
+
144
+ RubyLLM::MCP.logger.info "SSE message endpoint set to: #{@messages_url}"
137
145
  end
138
146
 
139
147
  def sse_thread_running?
@@ -142,84 +150,83 @@ module RubyLLM
142
150
 
143
151
  def listen_for_events
144
152
  stream_events_from_server
145
- rescue Faraday::Error => e
146
- handle_connection_error("SSE connection failed", e)
147
153
  rescue StandardError => e
148
154
  handle_connection_error("SSE connection error", e)
149
155
  end
150
156
 
151
157
  def stream_events_from_server
152
- buffer = +""
153
- create_sse_connection.get(@event_url) do |req|
154
- setup_request_headers(req)
155
- setup_streaming_callback(req, buffer)
156
- end
157
- end
158
-
159
- def create_sse_connection
160
- Faraday.new do |f|
161
- f.options.timeout = 300 # 5 minutes
162
- f.response :raise_error # raise errors on non-200 responses
163
- end
164
- end
165
-
166
- def setup_request_headers(request)
167
- @headers.each do |key, value|
168
- request.headers[key] = value
169
- end
170
- end
171
-
172
- def setup_streaming_callback(request, buffer)
173
- request.options.on_data = proc do |chunk, _size, _env|
174
- buffer << chunk
175
- process_buffer_events(buffer)
176
- end
177
- end
158
+ sse_client = HTTPX.plugin(:stream)
159
+ sse_client = sse_client.with(
160
+ headers: @headers
161
+ )
162
+ response = sse_client.get(@event_url, stream: true)
163
+ response.each_line do |event_line|
164
+ unless @running
165
+ response.body.close
166
+ next
167
+ end
178
168
 
179
- def process_buffer_events(buffer)
180
- while (event = extract_event(buffer))
181
- event_data, buffer = event
182
- process_event(event_data) if event_data
169
+ event = parse_event(event_line)
170
+ process_event(event)
183
171
  end
184
172
  end
185
173
 
186
174
  def handle_connection_error(message, error)
187
- puts "#{message}: #{error.message}. Reconnecting in 3 seconds..."
188
- sleep 3
175
+ return unless @running
176
+
177
+ error_message = "#{message}: #{error.message}"
178
+ RubyLLM::MCP.logger.error "#{error_message}. Reconnecting in 1 seconds..."
179
+ sleep 1
189
180
  end
190
181
 
191
- def process_event(raw_event)
182
+ def process_event(raw_event) # rubocop:disable Metrics/MethodLength
183
+ # Return if we believe that are getting a partial event
192
184
  return if raw_event[:data].nil?
193
185
 
194
186
  if raw_event[:event] == "endpoint"
195
187
  request_id = "endpoint"
196
188
  event = raw_event[:data]
189
+ return if event.nil?
190
+
191
+ RubyLLM::MCP.logger.debug "Received endpoint event: #{event}"
192
+ @pending_mutex.synchronize do
193
+ response_queue = @pending_requests.delete(request_id)
194
+ response_queue&.push(event)
195
+ end
197
196
  else
198
197
  event = begin
199
198
  JSON.parse(raw_event[:data])
200
- rescue StandardError
199
+ rescue JSON::ParserError => e
200
+ # We can sometimes get partial endpoint events, so we will ignore them
201
+ unless @endpoint.nil?
202
+ RubyLLM::MCP.logger.info "Failed to parse SSE event data: #{raw_event[:data]} - #{e.message}"
203
+ end
204
+
201
205
  nil
202
206
  end
203
207
  return if event.nil?
204
208
 
205
209
  request_id = event["id"]&.to_s
206
- end
210
+ result = RubyLLM::MCP::Result.new(event)
207
211
 
208
- @pending_mutex.synchronize do
209
- if request_id && @pending_requests.key?(request_id)
210
- response_queue = @pending_requests.delete(request_id)
211
- response_queue&.push(event)
212
+ if result.notification?
213
+ coordinator.process_notification(result)
214
+ return
212
215
  end
213
- end
214
- rescue JSON::ParserError => e
215
- puts "Error parsing event data: #{e.message}"
216
- end
217
216
 
218
- def extract_event(buffer)
219
- return nil unless buffer.include?("\n\n")
217
+ if result.request?
218
+ coordinator.process_request(result) if coordinator.alive?
219
+ return
220
+ end
220
221
 
221
- raw, rest = buffer.split("\n\n", 2)
222
- [parse_event(raw), rest]
222
+ @pending_mutex.synchronize do
223
+ # You can receieve duplicate events for the same request id, and we will ignore thoses
224
+ if result.matching_id?(request_id) && @pending_requests.key?(request_id)
225
+ response_queue = @pending_requests.delete(request_id)
226
+ response_queue&.push(result)
227
+ end
228
+ end
229
+ end
223
230
  end
224
231
 
225
232
  def parse_event(raw)
@@ -9,11 +9,12 @@ module RubyLLM
9
9
  module MCP
10
10
  module Transport
11
11
  class Stdio
12
- attr_reader :command, :stdin, :stdout, :stderr, :id
12
+ attr_reader :command, :stdin, :stdout, :stderr, :id, :coordinator
13
13
 
14
- def initialize(command, request_timeout:, args: [], env: {})
14
+ def initialize(command, request_timeout:, coordinator:, args: [], env: {})
15
15
  @request_timeout = request_timeout
16
16
  @command = command
17
+ @coordinator = coordinator
17
18
  @args = args
18
19
  @env = env || {}
19
20
  @client_id = SecureRandom.uuid
@@ -44,12 +45,14 @@ module RubyLLM
44
45
  end
45
46
 
46
47
  begin
47
- @stdin.puts(JSON.generate(body))
48
+ body = JSON.generate(body)
49
+ RubyLLM::MCP.logger.debug "Sending Request: #{body}"
50
+ @stdin.puts(body)
48
51
  @stdin.flush
49
52
  rescue IOError, Errno::EPIPE => e
50
53
  @pending_mutex.synchronize { @pending_requests.delete(request_id.to_s) }
51
54
  restart_process
52
- raise "Failed to send request: #{e.message}"
55
+ raise RubyLLM::MCP::Errors::TransportError.new(message: e.message, error: e)
53
56
  end
54
57
 
55
58
  return unless wait_for_response
@@ -61,7 +64,8 @@ module RubyLLM
61
64
  rescue Timeout::Error
62
65
  @pending_mutex.synchronize { @pending_requests.delete(request_id.to_s) }
63
66
  raise RubyLLM::MCP::Errors::TimeoutError.new(
64
- message: "Request timed out after #{@request_timeout / 1000} seconds"
67
+ message: "Request timed out after #{@request_timeout / 1000} seconds",
68
+ request_id: request_id
65
69
  )
66
70
  end
67
71
  end
@@ -133,7 +137,7 @@ module RubyLLM
133
137
  end
134
138
 
135
139
  def restart_process
136
- puts "Process connection lost. Restarting..."
140
+ RubyLLM::MCP.logger.error "Process connection lost. Restarting..."
137
141
  start_process
138
142
  end
139
143
 
@@ -152,11 +156,11 @@ module RubyLLM
152
156
 
153
157
  process_response(line.strip)
154
158
  rescue IOError, Errno::EPIPE => e
155
- puts "Reader error: #{e.message}. Restarting in 1 second..."
159
+ RubyLLM::MCP.logger.error "Reader error: #{e.message}. Restarting in 1 second..."
156
160
  sleep 1
157
161
  restart_process if @running
158
162
  rescue StandardError => e
159
- puts "Error in reader thread: #{e.message}, #{e.backtrace.join("\n")}"
163
+ RubyLLM::MCP.logger.error "Error in reader thread: #{e.message}, #{e.backtrace.join("\n")}"
160
164
  sleep 1
161
165
  end
162
166
  end
@@ -177,12 +181,12 @@ module RubyLLM
177
181
  line = @stderr.gets
178
182
  next unless line && !line.strip.empty?
179
183
 
180
- puts "STDERR: #{line.strip}"
184
+ RubyLLM::MCP.logger.info(line.strip)
181
185
  rescue IOError, Errno::EPIPE => e
182
- puts "Stderr reader error: #{e.message}"
186
+ RubyLLM::MCP.logger.error "Stderr reader error: #{e.message}"
183
187
  sleep 1
184
188
  rescue StandardError => e
185
- puts "Error in stderr thread: #{e.message}"
189
+ RubyLLM::MCP.logger.error "Error in stderr thread: #{e.message}"
186
190
  sleep 1
187
191
  end
188
192
  end
@@ -194,15 +198,27 @@ module RubyLLM
194
198
  def process_response(line)
195
199
  response = JSON.parse(line)
196
200
  request_id = response["id"]&.to_s
197
-
198
- @pending_mutex.synchronize do
199
- if request_id && @pending_requests.key?(request_id)
200
- response_queue = @pending_requests.delete(request_id)
201
- response_queue&.push(response)
201
+ result = RubyLLM::MCP::Result.new(response)
202
+
203
+ RubyLLM::MCP.logger.debug "Result Received: #{result.inspect}"
204
+ # Handle notifications (process but don't return - continue processing other responses)
205
+ if result.notification?
206
+ coordinator.process_notification(result)
207
+ # Don't return here - continue to process potential tool responses
208
+ elsif result.request?
209
+ coordinator.process_request(result)
210
+ nil
211
+ else
212
+ # Handle regular responses (tool calls, etc.)
213
+ @pending_mutex.synchronize do
214
+ if result.matching_id?(request_id) && @pending_requests.key?(request_id)
215
+ response_queue = @pending_requests.delete(request_id)
216
+ response_queue&.push(result)
217
+ end
202
218
  end
203
219
  end
204
220
  rescue JSON::ParserError => e
205
- RubyLLM.logger.error("Error parsing response as JSON: #{e.message}\nRaw response: #{line}")
221
+ RubyLLM::MCP.logger.error("Error parsing response as JSON: #{e.message}\nRaw response: #{line}")
206
222
  end
207
223
  end
208
224
  end