ruby_llm-mcp 0.4.1 → 0.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +296 -25
- data/lib/ruby_llm/chat.rb +2 -1
- data/lib/ruby_llm/mcp/client.rb +32 -13
- data/lib/ruby_llm/mcp/configuration.rb +123 -3
- data/lib/ruby_llm/mcp/coordinator.rb +108 -115
- data/lib/ruby_llm/mcp/errors.rb +3 -1
- data/lib/ruby_llm/mcp/notification_handler.rb +84 -0
- data/lib/ruby_llm/mcp/{requests/cancelled_notification.rb → notifications/cancelled.rb} +2 -2
- data/lib/ruby_llm/mcp/{requests/initialize_notification.rb → notifications/initialize.rb} +7 -3
- data/lib/ruby_llm/mcp/notifications/roots_list_change.rb +26 -0
- data/lib/ruby_llm/mcp/parameter.rb +19 -1
- data/lib/ruby_llm/mcp/progress.rb +3 -1
- data/lib/ruby_llm/mcp/prompt.rb +18 -0
- data/lib/ruby_llm/mcp/railtie.rb +20 -0
- data/lib/ruby_llm/mcp/requests/initialization.rb +8 -4
- data/lib/ruby_llm/mcp/requests/ping.rb +6 -2
- data/lib/ruby_llm/mcp/requests/prompt_list.rb +10 -2
- data/lib/ruby_llm/mcp/requests/resource_list.rb +12 -2
- data/lib/ruby_llm/mcp/requests/resource_template_list.rb +12 -2
- data/lib/ruby_llm/mcp/requests/shared/meta.rb +32 -0
- data/lib/ruby_llm/mcp/requests/shared/pagination.rb +17 -0
- data/lib/ruby_llm/mcp/requests/tool_call.rb +1 -1
- data/lib/ruby_llm/mcp/requests/tool_list.rb +10 -2
- data/lib/ruby_llm/mcp/resource.rb +17 -0
- data/lib/ruby_llm/mcp/response_handler.rb +58 -0
- data/lib/ruby_llm/mcp/responses/error.rb +33 -0
- data/lib/ruby_llm/mcp/{requests/ping_response.rb → responses/ping.rb} +2 -2
- data/lib/ruby_llm/mcp/responses/roots_list.rb +31 -0
- data/lib/ruby_llm/mcp/responses/sampling_create_message.rb +50 -0
- data/lib/ruby_llm/mcp/result.rb +21 -8
- data/lib/ruby_llm/mcp/roots.rb +45 -0
- data/lib/ruby_llm/mcp/sample.rb +148 -0
- data/lib/ruby_llm/mcp/{capabilities.rb → server_capabilities.rb} +1 -1
- data/lib/ruby_llm/mcp/tool.rb +35 -4
- data/lib/ruby_llm/mcp/transport.rb +58 -0
- data/lib/ruby_llm/mcp/transports/http_client.rb +26 -0
- data/lib/ruby_llm/mcp/{transport → transports}/sse.rb +25 -24
- data/lib/ruby_llm/mcp/{transport → transports}/stdio.rb +28 -26
- data/lib/ruby_llm/mcp/{transport → transports}/streamable_http.rb +25 -29
- data/lib/ruby_llm/mcp/transports/timeout.rb +32 -0
- data/lib/ruby_llm/mcp/version.rb +1 -1
- data/lib/ruby_llm/mcp.rb +50 -9
- metadata +23 -12
- data/lib/ruby_llm/mcp/requests/base.rb +0 -31
- data/lib/ruby_llm/mcp/requests/meta.rb +0 -30
- data/lib/tasks/release.rake +0 -23
@@ -3,11 +3,21 @@
|
|
3
3
|
module RubyLLM
|
4
4
|
module MCP
|
5
5
|
module Requests
|
6
|
-
class ResourceTemplateList
|
6
|
+
class ResourceTemplateList
|
7
|
+
include Shared::Pagination
|
8
|
+
|
9
|
+
def initialize(coordinator, cursor: nil)
|
10
|
+
@coordinator = coordinator
|
11
|
+
@cursor = cursor
|
12
|
+
end
|
13
|
+
|
7
14
|
def call
|
8
|
-
|
15
|
+
body = merge_pagination(resource_template_list_body)
|
16
|
+
@coordinator.request(body)
|
9
17
|
end
|
10
18
|
|
19
|
+
private
|
20
|
+
|
11
21
|
def resource_template_list_body
|
12
22
|
{
|
13
23
|
jsonrpc: "2.0",
|
@@ -0,0 +1,32 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "securerandom"
|
4
|
+
|
5
|
+
module RubyLLM
|
6
|
+
module MCP
|
7
|
+
module Requests
|
8
|
+
module Shared
|
9
|
+
module Meta
|
10
|
+
def merge_meta(body)
|
11
|
+
meta = {}
|
12
|
+
meta.merge!(progress_token) if @coordinator.client.tracking_progress?
|
13
|
+
|
14
|
+
body[:params] ||= {}
|
15
|
+
body[:params].merge!({ _meta: meta }) unless meta.empty?
|
16
|
+
body
|
17
|
+
end
|
18
|
+
|
19
|
+
private
|
20
|
+
|
21
|
+
def progress_token
|
22
|
+
{ progressToken: generate_progress_token }
|
23
|
+
end
|
24
|
+
|
25
|
+
def generate_progress_token
|
26
|
+
SecureRandom.uuid
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
@@ -0,0 +1,17 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module MCP
|
5
|
+
module Requests
|
6
|
+
module Shared
|
7
|
+
module Pagination
|
8
|
+
def merge_pagination(body)
|
9
|
+
body[:params] ||= {}
|
10
|
+
body[:params].merge!({ cursor: @cursor }) if @cursor
|
11
|
+
body
|
12
|
+
end
|
13
|
+
end
|
14
|
+
end
|
15
|
+
end
|
16
|
+
end
|
17
|
+
end
|
@@ -3,9 +3,17 @@
|
|
3
3
|
module RubyLLM
|
4
4
|
module MCP
|
5
5
|
module Requests
|
6
|
-
class ToolList
|
6
|
+
class ToolList
|
7
|
+
include Shared::Pagination
|
8
|
+
|
9
|
+
def initialize(coordinator, cursor: nil)
|
10
|
+
@coordinator = coordinator
|
11
|
+
@cursor = cursor
|
12
|
+
end
|
13
|
+
|
7
14
|
def call
|
8
|
-
|
15
|
+
body = merge_pagination(tool_list_body)
|
16
|
+
@coordinator.request(body)
|
9
17
|
end
|
10
18
|
|
11
19
|
private
|
@@ -31,6 +31,10 @@ module RubyLLM
|
|
31
31
|
@content = @content_response["text"] || @content_response["blob"]
|
32
32
|
end
|
33
33
|
|
34
|
+
def content_loaded?
|
35
|
+
!@content.nil?
|
36
|
+
end
|
37
|
+
|
34
38
|
def subscribe!
|
35
39
|
if @coordinator.capabilities.resource_subscribe?
|
36
40
|
@coordinator.resources_subscribe(uri: @uri)
|
@@ -67,6 +71,19 @@ module RubyLLM
|
|
67
71
|
end
|
68
72
|
end
|
69
73
|
|
74
|
+
def to_h
|
75
|
+
{
|
76
|
+
uri: @uri,
|
77
|
+
name: @name,
|
78
|
+
description: @description,
|
79
|
+
mime_type: @mime_type,
|
80
|
+
contented_loaded: content_loaded?,
|
81
|
+
content: @content
|
82
|
+
}
|
83
|
+
end
|
84
|
+
|
85
|
+
alias to_json to_h
|
86
|
+
|
70
87
|
private
|
71
88
|
|
72
89
|
def content_type
|
@@ -0,0 +1,58 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module MCP
|
5
|
+
class ResponseHandler
|
6
|
+
attr_reader :coordinator, :client
|
7
|
+
|
8
|
+
def initialize(coordinator)
|
9
|
+
@coordinator = coordinator
|
10
|
+
@client = coordinator.client
|
11
|
+
end
|
12
|
+
|
13
|
+
def execute(result) # rubocop:disable Naming/PredicateMethod
|
14
|
+
if result.ping?
|
15
|
+
coordinator.ping_response(id: result.id)
|
16
|
+
true
|
17
|
+
elsif result.roots?
|
18
|
+
handle_roots_response(result)
|
19
|
+
true
|
20
|
+
elsif result.sampling?
|
21
|
+
handle_sampling_response(result)
|
22
|
+
true
|
23
|
+
else
|
24
|
+
handle_unknown_request(result)
|
25
|
+
RubyLLM::MCP.logger.error("MCP client was sent unknown method type and could not respond: #{result.inspect}")
|
26
|
+
false
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
private
|
31
|
+
|
32
|
+
def handle_roots_response(result)
|
33
|
+
if client.roots.active?
|
34
|
+
coordinator.roots_list_response(id: result.id, roots: client.roots)
|
35
|
+
else
|
36
|
+
coordinator.error_response(id: result.id, message: "Roots are not enabled", code: -32_000)
|
37
|
+
end
|
38
|
+
end
|
39
|
+
|
40
|
+
def handle_sampling_response(result)
|
41
|
+
unless MCP.config.sampling.enabled?
|
42
|
+
RubyLLM::MCP.logger.info("Sampling is disabled, yet server requested sampling")
|
43
|
+
coordinator.error_response(id: result.id, message: "Sampling is disabled", code: -32_000)
|
44
|
+
return
|
45
|
+
end
|
46
|
+
|
47
|
+
RubyLLM::MCP.logger.info("Sampling response: #{result.inspect}")
|
48
|
+
Sample.new(result, coordinator).execute
|
49
|
+
end
|
50
|
+
|
51
|
+
def handle_unknown_request(result)
|
52
|
+
coordinator.error_response(id: result.id,
|
53
|
+
message: "Unknown method and could not respond: #{result.method}",
|
54
|
+
code: -32_000)
|
55
|
+
end
|
56
|
+
end
|
57
|
+
end
|
58
|
+
end
|
@@ -0,0 +1,33 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module MCP
|
5
|
+
module Responses
|
6
|
+
class Error
|
7
|
+
def initialize(coordinator, id:, message:, code: -32_000)
|
8
|
+
@coordinator = coordinator
|
9
|
+
@id = id
|
10
|
+
@message = message
|
11
|
+
@code = code
|
12
|
+
end
|
13
|
+
|
14
|
+
def call
|
15
|
+
@coordinator.request(sampling_error_body, add_id: false, wait_for_response: false)
|
16
|
+
end
|
17
|
+
|
18
|
+
private
|
19
|
+
|
20
|
+
def sampling_error_body
|
21
|
+
{
|
22
|
+
jsonrpc: "2.0",
|
23
|
+
id: @id,
|
24
|
+
error: {
|
25
|
+
code: @code,
|
26
|
+
message: @message
|
27
|
+
}
|
28
|
+
}
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
@@ -0,0 +1,31 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module MCP
|
5
|
+
module Responses
|
6
|
+
class RootsList
|
7
|
+
def initialize(coordinator, roots:, id:)
|
8
|
+
@coordinator = coordinator
|
9
|
+
@roots = roots
|
10
|
+
@id = id
|
11
|
+
end
|
12
|
+
|
13
|
+
def call
|
14
|
+
@coordinator.request(roots_list_body, add_id: false, wait_for_response: false)
|
15
|
+
end
|
16
|
+
|
17
|
+
private
|
18
|
+
|
19
|
+
def roots_list_body
|
20
|
+
{
|
21
|
+
jsonrpc: "2.0",
|
22
|
+
id: @id,
|
23
|
+
result: {
|
24
|
+
roots: @roots.to_request
|
25
|
+
}
|
26
|
+
}
|
27
|
+
end
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
@@ -0,0 +1,50 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module MCP
|
5
|
+
module Responses
|
6
|
+
class SamplingCreateMessage
|
7
|
+
def initialize(coordinator, id:, message:, model:)
|
8
|
+
@coordinator = coordinator
|
9
|
+
@id = id
|
10
|
+
@message = message
|
11
|
+
@model = model
|
12
|
+
end
|
13
|
+
|
14
|
+
def call
|
15
|
+
@coordinator.request(sampling_create_message_body, add_id: false, wait_for_response: false)
|
16
|
+
end
|
17
|
+
|
18
|
+
private
|
19
|
+
|
20
|
+
def sampling_create_message_body
|
21
|
+
{
|
22
|
+
jsonrpc: "2.0",
|
23
|
+
id: @id,
|
24
|
+
result: {
|
25
|
+
role: @message.role,
|
26
|
+
content: format_content(@message.content),
|
27
|
+
model: @model,
|
28
|
+
# TODO: We are going to assume it was a endTurn
|
29
|
+
# Look into getting RubyLLM to expose stopReason in message response
|
30
|
+
stopReason: "endTurn"
|
31
|
+
}
|
32
|
+
}
|
33
|
+
end
|
34
|
+
|
35
|
+
def format_content(content)
|
36
|
+
if content.is_a?(RubyLLM::Content)
|
37
|
+
if context.text.none? && content.attachments.any?
|
38
|
+
attachment = content.attachments.first
|
39
|
+
{ type: attachment.type, data: attachment.content, mime_type: attachment.mime_type }
|
40
|
+
else
|
41
|
+
{ type: "text", text: content.text }
|
42
|
+
end
|
43
|
+
else
|
44
|
+
{ type: "text", text: content }
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
48
|
+
end
|
49
|
+
end
|
50
|
+
end
|
data/lib/ruby_llm/mcp/result.rb
CHANGED
@@ -12,7 +12,13 @@ module RubyLLM
|
|
12
12
|
end
|
13
13
|
|
14
14
|
class Result
|
15
|
-
attr_reader :
|
15
|
+
attr_reader :response, :session_id, :id, :method, :result, :params, :error, :next_cursor
|
16
|
+
|
17
|
+
REQUEST_METHODS = {
|
18
|
+
ping: "ping",
|
19
|
+
roots: "roots/list",
|
20
|
+
sampling: "sampling/createMessage"
|
21
|
+
}.freeze
|
16
22
|
|
17
23
|
def initialize(response, session_id: nil)
|
18
24
|
@response = response
|
@@ -24,6 +30,13 @@ module RubyLLM
|
|
24
30
|
@error = response["error"] || {}
|
25
31
|
|
26
32
|
@result_is_error = response.dig("result", "isError") || false
|
33
|
+
@next_cursor = response.dig("result", "nextCursor")
|
34
|
+
end
|
35
|
+
|
36
|
+
REQUEST_METHODS.each do |method_name, method_value|
|
37
|
+
define_method "#{method_name}?" do
|
38
|
+
@method == method_value
|
39
|
+
end
|
27
40
|
end
|
28
41
|
|
29
42
|
alias value result
|
@@ -47,23 +60,23 @@ module RubyLLM
|
|
47
60
|
end
|
48
61
|
|
49
62
|
def matching_id?(request_id)
|
50
|
-
@id&.to_s == request_id
|
51
|
-
end
|
52
|
-
|
53
|
-
def ping?
|
54
|
-
@method == "ping"
|
63
|
+
@id&.to_s == request_id.to_s
|
55
64
|
end
|
56
65
|
|
57
66
|
def notification?
|
58
67
|
@method&.include?("notifications") || false
|
59
68
|
end
|
60
69
|
|
70
|
+
def next_cursor?
|
71
|
+
!@next_cursor.nil?
|
72
|
+
end
|
73
|
+
|
61
74
|
def request?
|
62
|
-
|
75
|
+
!@method.nil? && !notification? && @result.none? && @error.none?
|
63
76
|
end
|
64
77
|
|
65
78
|
def response?
|
66
|
-
|
79
|
+
!@id.nil? && (@result || @error.any?) && !@method
|
67
80
|
end
|
68
81
|
|
69
82
|
def success?
|
@@ -0,0 +1,45 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module MCP
|
5
|
+
class Roots
|
6
|
+
attr_reader :paths
|
7
|
+
|
8
|
+
def initialize(paths: [], coordinator: nil)
|
9
|
+
@paths = paths
|
10
|
+
@coordinator = coordinator
|
11
|
+
end
|
12
|
+
|
13
|
+
def active?
|
14
|
+
@paths.any?
|
15
|
+
end
|
16
|
+
|
17
|
+
def add(path)
|
18
|
+
@paths << path
|
19
|
+
@coordinator.roots_list_change_notification
|
20
|
+
end
|
21
|
+
|
22
|
+
def remove(path)
|
23
|
+
@paths.delete(path)
|
24
|
+
@coordinator.roots_list_change_notification
|
25
|
+
end
|
26
|
+
|
27
|
+
def to_request
|
28
|
+
@paths.map do |path|
|
29
|
+
name = File.basename(path, ".*")
|
30
|
+
|
31
|
+
{
|
32
|
+
uri: "file://#{path}",
|
33
|
+
name: name
|
34
|
+
}
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
def to_h
|
39
|
+
{
|
40
|
+
paths: to_request
|
41
|
+
}
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
45
|
+
end
|
@@ -0,0 +1,148 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module RubyLLM
|
4
|
+
module MCP
|
5
|
+
class Sample
|
6
|
+
class Hint
|
7
|
+
attr_reader :model, :cost_priority, :speed_priority, :intelligence_priority
|
8
|
+
|
9
|
+
def initialize(model, model_preferences)
|
10
|
+
@model = model
|
11
|
+
@model_preferences = model_preferences
|
12
|
+
|
13
|
+
@hints = model_preferences&.fetch("hints", [])
|
14
|
+
@cost_priority = model_preferences&.fetch("costPriority", nil)
|
15
|
+
@speed_priority = model_preferences&.fetch("speedPriority", nil)
|
16
|
+
@intelligence_priority = model_preferences&.fetch("intelligencePriority", nil)
|
17
|
+
end
|
18
|
+
|
19
|
+
def hints
|
20
|
+
@hints.map { |hint| hint["name"] }
|
21
|
+
end
|
22
|
+
|
23
|
+
def to_h
|
24
|
+
{
|
25
|
+
model: model,
|
26
|
+
hints: hints,
|
27
|
+
cost_priority: @cost_priority,
|
28
|
+
speed_priority: @speed_priority,
|
29
|
+
intelligence_priority: @intelligence_priority
|
30
|
+
}
|
31
|
+
end
|
32
|
+
end
|
33
|
+
|
34
|
+
REJECTED_MESSAGE = "Sampling request was rejected"
|
35
|
+
|
36
|
+
attr_reader :model_preferences, :system_prompt, :max_tokens, :raw_messages
|
37
|
+
|
38
|
+
def initialize(result, coordinator)
|
39
|
+
params = result.params
|
40
|
+
@id = result.id
|
41
|
+
@coordinator = coordinator
|
42
|
+
|
43
|
+
@raw_messages = params["messages"] || []
|
44
|
+
@model_preferences = Hint.new(params["model"], params["modelPreferences"])
|
45
|
+
@system_prompt = params["systemPrompt"]
|
46
|
+
@max_tokens = params["maxTokens"]
|
47
|
+
end
|
48
|
+
|
49
|
+
def execute
|
50
|
+
return unless callback_guard_success?
|
51
|
+
|
52
|
+
model = preferred_model
|
53
|
+
return unless model
|
54
|
+
|
55
|
+
chat_message = chat(model)
|
56
|
+
@coordinator.sampling_create_message_response(
|
57
|
+
id: @id, message: chat_message, model: model
|
58
|
+
)
|
59
|
+
end
|
60
|
+
|
61
|
+
def message
|
62
|
+
@message ||= raw_messages.map { |message| message.fetch("content")&.fetch("text") }.join("\n")
|
63
|
+
end
|
64
|
+
|
65
|
+
def to_h
|
66
|
+
{
|
67
|
+
id: @id,
|
68
|
+
model_preferences: @model_preferences.to_h,
|
69
|
+
system_prompt: @system_prompt,
|
70
|
+
max_tokens: @max_tokens
|
71
|
+
}
|
72
|
+
end
|
73
|
+
|
74
|
+
alias to_json to_h
|
75
|
+
|
76
|
+
private
|
77
|
+
|
78
|
+
def callback_guard_success?
|
79
|
+
return true unless @coordinator.client.sampling_callback_enabled?
|
80
|
+
|
81
|
+
unless @coordinator.client.on[:sampling].call(self)
|
82
|
+
@coordinator.error_response(id: @id, message: REJECTED_MESSAGE)
|
83
|
+
return false
|
84
|
+
end
|
85
|
+
|
86
|
+
true
|
87
|
+
rescue StandardError => e
|
88
|
+
RubyLLM::MCP.logger.error("Error in callback guard: #{e.message}, #{e.backtrace.join("\n")}")
|
89
|
+
@coordinator.error_response(id: @id, message: "Error executing sampling request")
|
90
|
+
false
|
91
|
+
end
|
92
|
+
|
93
|
+
def chat(model)
|
94
|
+
chat = RubyLLM::Chat.new(
|
95
|
+
model: model
|
96
|
+
)
|
97
|
+
if system_prompt
|
98
|
+
formated_system_message = create_message(system_message)
|
99
|
+
chat.add_message(formated_system_message)
|
100
|
+
end
|
101
|
+
raw_messages.each { |message| chat.add_message(create_message(message)) }
|
102
|
+
|
103
|
+
chat.complete
|
104
|
+
end
|
105
|
+
|
106
|
+
def preferred_model
|
107
|
+
@preferred_model ||= begin
|
108
|
+
model = RubyLLM::MCP.config.sampling.preferred_model
|
109
|
+
if model.respond_to?(:call)
|
110
|
+
model.call(model_preferences)
|
111
|
+
else
|
112
|
+
model
|
113
|
+
end
|
114
|
+
end
|
115
|
+
rescue StandardError => e
|
116
|
+
RubyLLM::MCP.logger.error("Error in preferred model: #{e.message}, #{e.backtrace.join("\n")}")
|
117
|
+
@coordinator.error_response(id: @id, message: "Failed to determine preferred model: #{e.message}")
|
118
|
+
false
|
119
|
+
end
|
120
|
+
|
121
|
+
def create_message(message)
|
122
|
+
role = message["role"]
|
123
|
+
content = create_content_for_message(message["content"])
|
124
|
+
|
125
|
+
RubyLLM::Message.new({ role: role, content: content })
|
126
|
+
end
|
127
|
+
|
128
|
+
def create_content_for_message(content)
|
129
|
+
case content["type"]
|
130
|
+
when "text"
|
131
|
+
MCP::Content.new(text: content["text"])
|
132
|
+
when "image", "audio"
|
133
|
+
attachment = MCP::Attachment.new(content["data"], content["mimeType"])
|
134
|
+
MCP::Content.new(text: nil, attachments: [attachment])
|
135
|
+
else
|
136
|
+
raise RubyLLM::MCP::Errors::InvalidFormatError.new(message: "Invalid content type: #{content['type']}")
|
137
|
+
end
|
138
|
+
end
|
139
|
+
|
140
|
+
def system_message
|
141
|
+
{
|
142
|
+
"role" => "system",
|
143
|
+
"content" => { "type" => "text", "text" => system_prompt }
|
144
|
+
}
|
145
|
+
end
|
146
|
+
end
|
147
|
+
end
|
148
|
+
end
|
data/lib/ruby_llm/mcp/tool.rb
CHANGED
@@ -12,16 +12,28 @@ module RubyLLM
|
|
12
12
|
@idempotent_hint = annotation["idempotentHint"] || false
|
13
13
|
@open_world_hint = annotation["openWorldHint"] || true
|
14
14
|
end
|
15
|
+
|
16
|
+
def to_h
|
17
|
+
{
|
18
|
+
title: @title,
|
19
|
+
readOnlyHint: @read_only_hint,
|
20
|
+
destructiveHint: @destructive_hint,
|
21
|
+
idempotentHint: @idempotent_hint,
|
22
|
+
openWorldHint: @open_world_hint
|
23
|
+
}
|
24
|
+
end
|
15
25
|
end
|
16
26
|
|
17
27
|
class Tool < RubyLLM::Tool
|
18
|
-
attr_reader :name, :description, :parameters, :coordinator, :tool_response
|
28
|
+
attr_reader :name, :description, :parameters, :coordinator, :tool_response, :with_prefix
|
19
29
|
|
20
|
-
def initialize(coordinator, tool_response)
|
30
|
+
def initialize(coordinator, tool_response, with_prefix: false)
|
21
31
|
super()
|
22
32
|
@coordinator = coordinator
|
23
33
|
|
24
|
-
@
|
34
|
+
@with_prefix = with_prefix
|
35
|
+
@name = format_name(tool_response["name"])
|
36
|
+
@mcp_name = tool_response["name"]
|
25
37
|
@description = tool_response["description"].to_s
|
26
38
|
@parameters = create_parameters(tool_response["inputSchema"])
|
27
39
|
@annotations = tool_response["annotations"] ? Annotation.new(tool_response["annotations"]) : nil
|
@@ -33,7 +45,7 @@ module RubyLLM
|
|
33
45
|
|
34
46
|
def execute(**params)
|
35
47
|
result = @coordinator.execute_tool(
|
36
|
-
name: @
|
48
|
+
name: @mcp_name,
|
37
49
|
parameters: params
|
38
50
|
)
|
39
51
|
|
@@ -54,6 +66,17 @@ module RubyLLM
|
|
54
66
|
end
|
55
67
|
end
|
56
68
|
|
69
|
+
def to_h
|
70
|
+
{
|
71
|
+
name: @name,
|
72
|
+
description: @description,
|
73
|
+
parameters: @parameters.to_h,
|
74
|
+
annotations: @annotations&.to_h
|
75
|
+
}
|
76
|
+
end
|
77
|
+
|
78
|
+
alias to_json to_h
|
79
|
+
|
57
80
|
private
|
58
81
|
|
59
82
|
def create_parameters(input_schema)
|
@@ -136,6 +159,14 @@ module RubyLLM
|
|
136
159
|
resource.to_content
|
137
160
|
end
|
138
161
|
end
|
162
|
+
|
163
|
+
def format_name(name)
|
164
|
+
if @with_prefix
|
165
|
+
"#{@coordinator.name}_#{name}"
|
166
|
+
else
|
167
|
+
name
|
168
|
+
end
|
169
|
+
end
|
139
170
|
end
|
140
171
|
end
|
141
172
|
end
|