llm.rb 4.7.0 → 4.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +335 -587
- data/data/anthropic.json +770 -0
- data/data/deepseek.json +75 -0
- data/data/google.json +1050 -0
- data/data/openai.json +1421 -0
- data/data/xai.json +792 -0
- data/data/zai.json +330 -0
- data/lib/llm/agent.rb +42 -41
- data/lib/llm/bot.rb +1 -263
- data/lib/llm/buffer.rb +7 -0
- data/lib/llm/{session → context}/deserializer.rb +4 -3
- data/lib/llm/context.rb +292 -0
- data/lib/llm/cost.rb +26 -0
- data/lib/llm/error.rb +8 -0
- data/lib/llm/eventstream/parser.rb +0 -5
- data/lib/llm/function/array.rb +61 -0
- data/lib/llm/function/fiber_group.rb +91 -0
- data/lib/llm/function/task_group.rb +89 -0
- data/lib/llm/function/thread_group.rb +94 -0
- data/lib/llm/function.rb +75 -10
- data/lib/llm/mcp/command.rb +108 -0
- data/lib/llm/mcp/error.rb +31 -0
- data/lib/llm/mcp/pipe.rb +82 -0
- data/lib/llm/mcp/rpc.rb +118 -0
- data/lib/llm/mcp/transport/stdio.rb +85 -0
- data/lib/llm/mcp.rb +102 -0
- data/lib/llm/message.rb +13 -11
- data/lib/llm/model.rb +115 -0
- data/lib/llm/prompt.rb +17 -7
- data/lib/llm/provider.rb +60 -32
- data/lib/llm/providers/anthropic/error_handler.rb +1 -1
- data/lib/llm/providers/anthropic/files.rb +3 -3
- data/lib/llm/providers/anthropic/models.rb +1 -1
- data/lib/llm/providers/anthropic/request_adapter.rb +20 -3
- data/lib/llm/providers/anthropic/response_adapter/models.rb +13 -0
- data/lib/llm/providers/anthropic/response_adapter.rb +2 -0
- data/lib/llm/providers/anthropic.rb +21 -5
- data/lib/llm/providers/deepseek.rb +10 -3
- data/lib/llm/providers/{gemini → google}/audio.rb +6 -6
- data/lib/llm/providers/{gemini → google}/error_handler.rb +20 -5
- data/lib/llm/providers/{gemini → google}/files.rb +11 -11
- data/lib/llm/providers/{gemini → google}/images.rb +7 -7
- data/lib/llm/providers/{gemini → google}/models.rb +5 -5
- data/lib/llm/providers/{gemini → google}/request_adapter/completion.rb +7 -3
- data/lib/llm/providers/{gemini → google}/request_adapter.rb +1 -1
- data/lib/llm/providers/{gemini → google}/response_adapter/completion.rb +7 -7
- data/lib/llm/providers/{gemini → google}/response_adapter/embedding.rb +1 -1
- data/lib/llm/providers/{gemini → google}/response_adapter/file.rb +1 -1
- data/lib/llm/providers/{gemini → google}/response_adapter/files.rb +1 -1
- data/lib/llm/providers/{gemini → google}/response_adapter/image.rb +1 -1
- data/lib/llm/providers/google/response_adapter/models.rb +13 -0
- data/lib/llm/providers/{gemini → google}/response_adapter/web_search.rb +2 -2
- data/lib/llm/providers/{gemini → google}/response_adapter.rb +8 -8
- data/lib/llm/providers/{gemini → google}/stream_parser.rb +3 -3
- data/lib/llm/providers/{gemini.rb → google.rb} +41 -26
- data/lib/llm/providers/llamacpp.rb +10 -3
- data/lib/llm/providers/ollama/error_handler.rb +1 -1
- data/lib/llm/providers/ollama/models.rb +1 -1
- data/lib/llm/providers/ollama/response_adapter/models.rb +13 -0
- data/lib/llm/providers/ollama/response_adapter.rb +2 -0
- data/lib/llm/providers/ollama.rb +19 -4
- data/lib/llm/providers/openai/error_handler.rb +18 -3
- data/lib/llm/providers/openai/files.rb +3 -3
- data/lib/llm/providers/openai/images.rb +17 -11
- data/lib/llm/providers/openai/models.rb +1 -1
- data/lib/llm/providers/openai/response_adapter/completion.rb +9 -1
- data/lib/llm/providers/openai/response_adapter/models.rb +13 -0
- data/lib/llm/providers/openai/response_adapter/responds.rb +9 -1
- data/lib/llm/providers/openai/response_adapter.rb +2 -0
- data/lib/llm/providers/openai/responses.rb +16 -1
- data/lib/llm/providers/openai/stream_parser.rb +2 -0
- data/lib/llm/providers/openai.rb +28 -6
- data/lib/llm/providers/xai/images.rb +7 -6
- data/lib/llm/providers/xai.rb +10 -3
- data/lib/llm/providers/zai.rb +9 -2
- data/lib/llm/registry.rb +81 -0
- data/lib/llm/schema/enum.rb +16 -0
- data/lib/llm/schema/parser.rb +109 -0
- data/lib/llm/schema.rb +5 -0
- data/lib/llm/server_tool.rb +5 -5
- data/lib/llm/session.rb +10 -1
- data/lib/llm/tool/param.rb +1 -1
- data/lib/llm/tool.rb +86 -5
- data/lib/llm/tracer/langsmith.rb +144 -0
- data/lib/llm/tracer/logger.rb +9 -1
- data/lib/llm/tracer/null.rb +8 -0
- data/lib/llm/tracer/telemetry.rb +98 -78
- data/lib/llm/tracer.rb +108 -4
- data/lib/llm/usage.rb +5 -0
- data/lib/llm/version.rb +1 -1
- data/lib/llm.rb +40 -6
- data/llm.gemspec +45 -8
- metadata +87 -28
- data/lib/llm/providers/gemini/response_adapter/models.rb +0 -15
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
class LLM::Function
|
|
4
|
+
##
|
|
5
|
+
# The {LLM::Function::Array} module extends the array
|
|
6
|
+
# returned by {LLM::Context#functions} with methods
|
|
7
|
+
# that can call all pending functions sequentially or
|
|
8
|
+
# concurrently. The return values can be reported back
|
|
9
|
+
# to the LLM on the next turn.
|
|
10
|
+
module Array
|
|
11
|
+
##
|
|
12
|
+
# Calls all functions in a collection sequentially.
|
|
13
|
+
# @return [Array<LLM::Function::Return>]
|
|
14
|
+
# Returns values to be reported back to the LLM.
|
|
15
|
+
def call
|
|
16
|
+
map(&:call)
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
##
|
|
20
|
+
# Calls all functions in a collection concurrently.
|
|
21
|
+
# This method returns an {LLM::Function::ThreadGroup},
|
|
22
|
+
# {LLM::Function::TaskGroup}, or {LLM::Function::FiberGroup}
|
|
23
|
+
# that can be waited on to access the return values.
|
|
24
|
+
#
|
|
25
|
+
# @param [Symbol] strategy
|
|
26
|
+
# Controls concurrency strategy:
|
|
27
|
+
# - `:thread`: Use threads
|
|
28
|
+
# - `:task`: Use async tasks (requires async gem)
|
|
29
|
+
# - `:fiber`: Use raw fibers
|
|
30
|
+
#
|
|
31
|
+
# @return [LLM::Function::ThreadGroup, LLM::Function::TaskGroup, LLM::Function::FiberGroup]
|
|
32
|
+
def spawn(strategy)
|
|
33
|
+
case strategy
|
|
34
|
+
when :task
|
|
35
|
+
TaskGroup.new(map { |fn| fn.spawn(:task) })
|
|
36
|
+
when :thread
|
|
37
|
+
ThreadGroup.new(map { |fn| fn.spawn(:thread) })
|
|
38
|
+
when :fiber
|
|
39
|
+
FiberGroup.new(map { |fn| fn.spawn(:fiber) })
|
|
40
|
+
else
|
|
41
|
+
raise ArgumentError, "Unknown strategy: #{strategy.inspect}. Expected :thread, :task, or :fiber"
|
|
42
|
+
end
|
|
43
|
+
end
|
|
44
|
+
|
|
45
|
+
##
|
|
46
|
+
# Calls all functions in a collection concurrently
|
|
47
|
+
# and waits for the return values.
|
|
48
|
+
#
|
|
49
|
+
# @param [Symbol] strategy
|
|
50
|
+
# Controls concurrency strategy:
|
|
51
|
+
# - `:thread`: Use threads
|
|
52
|
+
# - `:task`: Use async tasks (requires async gem)
|
|
53
|
+
# - `:fiber`: Use raw fibers
|
|
54
|
+
#
|
|
55
|
+
# @return [Array<LLM::Function::Return>]
|
|
56
|
+
# Returns values to be reported back to the LLM.
|
|
57
|
+
def wait(strategy)
|
|
58
|
+
spawn(strategy).wait
|
|
59
|
+
end
|
|
60
|
+
end
|
|
61
|
+
end
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
class LLM::Function
|
|
4
|
+
##
|
|
5
|
+
# The {LLM::Function::FiberGroup} class wraps an array of
|
|
6
|
+
# {Fiber} objects that are running {LLM::Function} calls
|
|
7
|
+
# concurrently using raw fibers.
|
|
8
|
+
#
|
|
9
|
+
# This class provides the same interface as {LLM::Function::ThreadGroup}
|
|
10
|
+
# but uses raw fibers for lightweight concurrency without the async gem.
|
|
11
|
+
#
|
|
12
|
+
# @example
|
|
13
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
|
14
|
+
# ctx = LLM::Context.new(llm, tools: [Weather, News, Stocks])
|
|
15
|
+
# ctx.talk "Summarize the weather, headlines, and stock price."
|
|
16
|
+
# grp = ctx.functions.spawn(:fiber)
|
|
17
|
+
# # do other work while tools run...
|
|
18
|
+
# ctx.talk(grp.wait)
|
|
19
|
+
#
|
|
20
|
+
# @see LLM::Function::Array#spawn
|
|
21
|
+
# @see LLM::Function::ThreadGroup
|
|
22
|
+
# @see LLM::Function::TaskGroup
|
|
23
|
+
class FiberGroup
|
|
24
|
+
##
|
|
25
|
+
# Creates a new {LLM::Function::FiberGroup} from an array
|
|
26
|
+
# of fiber objects.
|
|
27
|
+
#
|
|
28
|
+
# @param [Array<Fiber>] fibers
|
|
29
|
+
# An array of fibers, each running an {LLM::Function#spawn_fiber} call.
|
|
30
|
+
#
|
|
31
|
+
# @return [LLM::Function::FiberGroup]
|
|
32
|
+
# Returns a new fiber group.
|
|
33
|
+
def initialize(fibers)
|
|
34
|
+
@fibers = fibers
|
|
35
|
+
end
|
|
36
|
+
|
|
37
|
+
##
|
|
38
|
+
# Returns whether any fiber in the group is still alive.
|
|
39
|
+
#
|
|
40
|
+
# This method checks if any of the fibers in the group are
|
|
41
|
+
# still running. It can be useful for monitoring concurrent
|
|
42
|
+
# tool execution without blocking.
|
|
43
|
+
#
|
|
44
|
+
# @example
|
|
45
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
|
46
|
+
# ctx = LLM::Context.new(llm, tools: [Weather, News, Stocks])
|
|
47
|
+
# ctx.talk "Summarize the weather, headlines, and stock price."
|
|
48
|
+
# grp = ctx.functions.spawn(:fiber)
|
|
49
|
+
# while grp.alive?
|
|
50
|
+
# puts "Tools are still running..."
|
|
51
|
+
# sleep 1
|
|
52
|
+
# end
|
|
53
|
+
# ctx.talk(grp.wait)
|
|
54
|
+
#
|
|
55
|
+
# @return [Boolean]
|
|
56
|
+
# Returns true if any fiber in the group is still alive,
|
|
57
|
+
# false otherwise.
|
|
58
|
+
def alive?
|
|
59
|
+
@fibers.any?(&:alive?)
|
|
60
|
+
end
|
|
61
|
+
|
|
62
|
+
##
|
|
63
|
+
# Waits for all fibers in the group to finish and returns
|
|
64
|
+
# their {LLM::Function::Return} values.
|
|
65
|
+
#
|
|
66
|
+
# This method blocks until every fiber in the group has
|
|
67
|
+
# completed. If a fiber raised an exception, the exception
|
|
68
|
+
# is caught and wrapped in an {LLM::Function::Return} with
|
|
69
|
+
# error information.
|
|
70
|
+
#
|
|
71
|
+
# @example
|
|
72
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
|
73
|
+
# ctx = LLM::Context.new(llm, tools: [Weather, News, Stocks])
|
|
74
|
+
# ctx.talk "Summarize the weather, headlines, and stock price."
|
|
75
|
+
# grp = ctx.functions.spawn(:fiber)
|
|
76
|
+
# returns = grp.wait
|
|
77
|
+
# # returns is now an array of LLM::Function::Return objects
|
|
78
|
+
# ctx.talk(returns)
|
|
79
|
+
#
|
|
80
|
+
# @return [Array<LLM::Function::Return>]
|
|
81
|
+
# Returns an array of function return values, in the same
|
|
82
|
+
# order as the original fibers.
|
|
83
|
+
def wait
|
|
84
|
+
@fibers.map do |fiber|
|
|
85
|
+
fiber.resume if fiber.alive?
|
|
86
|
+
fiber.value
|
|
87
|
+
end
|
|
88
|
+
end
|
|
89
|
+
alias_method :value, :wait
|
|
90
|
+
end
|
|
91
|
+
end
|
|
@@ -0,0 +1,89 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
class LLM::Function
|
|
4
|
+
##
|
|
5
|
+
# The {LLM::Function::TaskGroup} class wraps an array of
|
|
6
|
+
# {Async::Task} objects that are running {LLM::Function} calls
|
|
7
|
+
# concurrently using the async gem.
|
|
8
|
+
#
|
|
9
|
+
# This class provides the same interface as {LLM::Function::ThreadGroup}
|
|
10
|
+
# but uses async tasks for lightweight concurrency with automatic
|
|
11
|
+
# scheduling and I/O management.
|
|
12
|
+
#
|
|
13
|
+
# @example
|
|
14
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
|
15
|
+
# ctx = LLM::Context.new(llm, tools: [Weather, News, Stocks])
|
|
16
|
+
# ctx.talk "Summarize the weather, headlines, and stock price."
|
|
17
|
+
# grp = ctx.functions.spawn(:task)
|
|
18
|
+
# # do other work while tools run...
|
|
19
|
+
# ctx.talk(grp.wait)
|
|
20
|
+
#
|
|
21
|
+
# @see LLM::Function::Array#spawn
|
|
22
|
+
# @see LLM::Function::ThreadGroup
|
|
23
|
+
# @see LLM::Function::FiberGroup
|
|
24
|
+
class TaskGroup
|
|
25
|
+
##
|
|
26
|
+
# Creates a new {LLM::Function::TaskGroup} from an array
|
|
27
|
+
# of async task objects.
|
|
28
|
+
#
|
|
29
|
+
# @param [Array<Async::Task>] tasks
|
|
30
|
+
# An array of async tasks, each running an {LLM::Function#spawn_async} call.
|
|
31
|
+
#
|
|
32
|
+
# @return [LLM::Function::TaskGroup]
|
|
33
|
+
# Returns a new task group.
|
|
34
|
+
def initialize(tasks)
|
|
35
|
+
@tasks = tasks
|
|
36
|
+
end
|
|
37
|
+
|
|
38
|
+
##
|
|
39
|
+
# Returns whether any task in the group is still alive.
|
|
40
|
+
#
|
|
41
|
+
# This method checks if any of the tasks in the group are
|
|
42
|
+
# still running. It can be useful for monitoring concurrent
|
|
43
|
+
# tool execution without blocking.
|
|
44
|
+
#
|
|
45
|
+
# @example
|
|
46
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
|
47
|
+
# ctx = LLM::Context.new(llm, tools: [Weather, News, Stocks])
|
|
48
|
+
# ctx.talk "Summarize the weather, headlines, and stock price."
|
|
49
|
+
# grp = ctx.functions.spawn(:task)
|
|
50
|
+
# while grp.alive?
|
|
51
|
+
# puts "Tools are still running..."
|
|
52
|
+
# sleep 1
|
|
53
|
+
# end
|
|
54
|
+
# ctx.talk(grp.wait)
|
|
55
|
+
#
|
|
56
|
+
# @return [Boolean]
|
|
57
|
+
# Returns true if any task in the group is still alive,
|
|
58
|
+
# false otherwise.
|
|
59
|
+
def alive?
|
|
60
|
+
@tasks.any?(&:alive?)
|
|
61
|
+
end
|
|
62
|
+
|
|
63
|
+
##
|
|
64
|
+
# Waits for all tasks in the group to finish and returns
|
|
65
|
+
# their {LLM::Function::Return} values.
|
|
66
|
+
#
|
|
67
|
+
# This method blocks until every task in the group has
|
|
68
|
+
# completed. If a task raised an exception, the exception
|
|
69
|
+
# is caught and wrapped in an {LLM::Function::Return} with
|
|
70
|
+
# error information.
|
|
71
|
+
#
|
|
72
|
+
# @example
|
|
73
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
|
74
|
+
# ctx = LLM::Context.new(llm, tools: [Weather, News, Stocks])
|
|
75
|
+
# ctx.talk "Summarize the weather, headlines, and stock price."
|
|
76
|
+
# grp = ctx.functions.spawn(:task)
|
|
77
|
+
# returns = grp.wait
|
|
78
|
+
# # returns is now an array of LLM::Function::Return objects
|
|
79
|
+
# ctx.talk(returns)
|
|
80
|
+
#
|
|
81
|
+
# @return [Array<LLM::Function::Return>]
|
|
82
|
+
# Returns an array of function return values, in the same
|
|
83
|
+
# order as the original tasks.
|
|
84
|
+
def wait
|
|
85
|
+
@tasks.map(&:wait)
|
|
86
|
+
end
|
|
87
|
+
alias_method :value, :wait
|
|
88
|
+
end
|
|
89
|
+
end
|
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
class LLM::Function
|
|
4
|
+
##
|
|
5
|
+
# The {LLM::Function::ThreadGroup} class wraps an array of
|
|
6
|
+
# {Thread} objects that are running {LLM::Function} calls
|
|
7
|
+
# concurrently. It provides a single {#wait} method that
|
|
8
|
+
# collects the {LLM::Function::Return} values from those
|
|
9
|
+
# threads.
|
|
10
|
+
#
|
|
11
|
+
# This class is returned by {LLM::Function::Array#spawn}
|
|
12
|
+
# when you call `ctx.functions.spawn` on the collection
|
|
13
|
+
# returned by {LLM::Context#functions}. It is a lightweight
|
|
14
|
+
# wrapper that does not inherit from Ruby's built-in
|
|
15
|
+
# {::ThreadGroup}.
|
|
16
|
+
#
|
|
17
|
+
# @example
|
|
18
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
|
19
|
+
# ctx = LLM::Context.new(llm, tools: [Weather, News, Stocks])
|
|
20
|
+
# ctx.talk "Summarize the weather, headlines, and stock price."
|
|
21
|
+
# grp = ctx.functions.spawn
|
|
22
|
+
# # do other work while tools run...
|
|
23
|
+
# ctx.talk(grp.wait)
|
|
24
|
+
#
|
|
25
|
+
# @see LLM::Function::Array#spawn
|
|
26
|
+
# @see LLM::Function::Array#wait
|
|
27
|
+
class ThreadGroup
|
|
28
|
+
##
|
|
29
|
+
# Creates a new {LLM::Function::ThreadGroup} from an array
|
|
30
|
+
# of {Thread} objects.
|
|
31
|
+
#
|
|
32
|
+
# @param [Array<Thread>] threads
|
|
33
|
+
# An array of threads, each running an {LLM::Function#spawn}
|
|
34
|
+
# call. The thread's {Thread#value} will be an
|
|
35
|
+
# {LLM::Function::Return}.
|
|
36
|
+
#
|
|
37
|
+
# @return [LLM::Function::ThreadGroup]
|
|
38
|
+
# Returns a new thread group.
|
|
39
|
+
def initialize(threads)
|
|
40
|
+
@threads = threads
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
##
|
|
44
|
+
# Returns whether any thread in the group is still alive.
|
|
45
|
+
#
|
|
46
|
+
# This method checks if any of the threads in the group are
|
|
47
|
+
# still running. It can be useful for monitoring concurrent
|
|
48
|
+
# tool execution without blocking.
|
|
49
|
+
#
|
|
50
|
+
# @example
|
|
51
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
|
52
|
+
# ctx = LLM::Context.new(llm, tools: [Weather, News, Stocks])
|
|
53
|
+
# ctx.talk "Summarize the weather, headlines, and stock price."
|
|
54
|
+
# grp = ctx.functions.spawn
|
|
55
|
+
# while grp.alive?
|
|
56
|
+
# puts "Tools are still running..."
|
|
57
|
+
# sleep 1
|
|
58
|
+
# end
|
|
59
|
+
# ctx.talk(grp.wait)
|
|
60
|
+
#
|
|
61
|
+
# @return [Boolean]
|
|
62
|
+
# Returns true if any thread in the group is still alive,
|
|
63
|
+
# false otherwise.
|
|
64
|
+
def alive?
|
|
65
|
+
@threads.any?(&:alive?)
|
|
66
|
+
end
|
|
67
|
+
|
|
68
|
+
##
|
|
69
|
+
# Waits for all threads in the group to finish and returns
|
|
70
|
+
# their {LLM::Function::Return} values.
|
|
71
|
+
#
|
|
72
|
+
# This method blocks until every thread in the group has
|
|
73
|
+
# completed. If a thread raised an exception, the exception
|
|
74
|
+
# is caught and wrapped in an {LLM::Function::Return} with
|
|
75
|
+
# error information.
|
|
76
|
+
#
|
|
77
|
+
# @example
|
|
78
|
+
# llm = LLM.openai(key: ENV["KEY"])
|
|
79
|
+
# ctx = LLM::Context.new(llm, tools: [Weather, News, Stocks])
|
|
80
|
+
# ctx.talk "Summarize the weather, headlines, and stock price."
|
|
81
|
+
# grp = ctx.functions.spawn
|
|
82
|
+
# returns = grp.wait
|
|
83
|
+
# # returns is now an array of LLM::Function::Return objects
|
|
84
|
+
# ctx.talk(returns)
|
|
85
|
+
#
|
|
86
|
+
# @return [Array<LLM::Function::Return>]
|
|
87
|
+
# Returns an array of function return values, in the same
|
|
88
|
+
# order as the original threads.
|
|
89
|
+
def wait
|
|
90
|
+
@threads.map(&:value)
|
|
91
|
+
end
|
|
92
|
+
alias_method :value, :wait
|
|
93
|
+
end
|
|
94
|
+
end
|
data/lib/llm/function.rb
CHANGED
|
@@ -30,9 +30,14 @@
|
|
|
30
30
|
# end
|
|
31
31
|
class LLM::Function
|
|
32
32
|
require_relative "function/tracing"
|
|
33
|
+
require_relative "function/array"
|
|
34
|
+
require_relative "function/thread_group"
|
|
35
|
+
require_relative "function/fiber_group"
|
|
36
|
+
require_relative "function/task_group"
|
|
37
|
+
|
|
33
38
|
prepend LLM::Function::Tracing
|
|
34
39
|
|
|
35
|
-
|
|
40
|
+
Return = Struct.new(:id, :name, :value) do
|
|
36
41
|
##
|
|
37
42
|
# Returns a Hash representation of {LLM::Function::Return}
|
|
38
43
|
# @return [Hash]
|
|
@@ -105,13 +110,15 @@ class LLM::Function
|
|
|
105
110
|
##
|
|
106
111
|
# Set (or get) the function parameters
|
|
107
112
|
# @yieldparam [LLM::Schema] schema The schema object
|
|
108
|
-
# @return [
|
|
113
|
+
# @return [LLM::Schema::Leaf, nil]
|
|
109
114
|
def params
|
|
110
115
|
if block_given?
|
|
116
|
+
params = yield(@schema)
|
|
117
|
+
params = LLM::Schema.parse(params) if Hash === params
|
|
111
118
|
if @params
|
|
112
|
-
@params.merge!(
|
|
119
|
+
@params.merge!(params)
|
|
113
120
|
else
|
|
114
|
-
@params =
|
|
121
|
+
@params = params
|
|
115
122
|
end
|
|
116
123
|
else
|
|
117
124
|
@params
|
|
@@ -131,8 +138,51 @@ class LLM::Function
|
|
|
131
138
|
# Call the function
|
|
132
139
|
# @return [LLM::Function::Return] The result of the function call
|
|
133
140
|
def call
|
|
134
|
-
|
|
135
|
-
|
|
141
|
+
call_function
|
|
142
|
+
ensure
|
|
143
|
+
@called = true
|
|
144
|
+
end
|
|
145
|
+
|
|
146
|
+
##
|
|
147
|
+
# Calls the function in a separate thread.
|
|
148
|
+
#
|
|
149
|
+
# This is the low-level method that powers concurrent tool execution.
|
|
150
|
+
# Prefer the collection methods on {LLM::Context#functions} for most
|
|
151
|
+
# use cases: {LLM::Function::Array#call}, {LLM::Function::Array#wait},
|
|
152
|
+
# or {LLM::Function::Array#spawn}.
|
|
153
|
+
#
|
|
154
|
+
# @example
|
|
155
|
+
# # Normal usage (via collection)
|
|
156
|
+
# ctx.talk(ctx.functions.wait)
|
|
157
|
+
#
|
|
158
|
+
# # Direct usage (uncommon)
|
|
159
|
+
# thread = tool.spawn
|
|
160
|
+
# result = thread.value
|
|
161
|
+
#
|
|
162
|
+
# @param [Symbol] strategy
|
|
163
|
+
# Controls concurrency strategy:
|
|
164
|
+
# - `:thread`: Use threads
|
|
165
|
+
# - `:task`: Use async tasks (requires async gem)
|
|
166
|
+
# - `:fiber`: Use raw fibers
|
|
167
|
+
#
|
|
168
|
+
# @return [Thread, Async::Task, Fiber]
|
|
169
|
+
# Returns a thread, async task, or fiber whose `#value` is an {LLM::Function::Return}.
|
|
170
|
+
def spawn(strategy)
|
|
171
|
+
case strategy
|
|
172
|
+
when :task
|
|
173
|
+
require "async" unless defined?(::Async)
|
|
174
|
+
Async { call_function }
|
|
175
|
+
when :thread
|
|
176
|
+
Thread.new { call_function }
|
|
177
|
+
when :fiber
|
|
178
|
+
Fiber.new do
|
|
179
|
+
call_function
|
|
180
|
+
ensure
|
|
181
|
+
Fiber.yield
|
|
182
|
+
end.tap(&:resume)
|
|
183
|
+
else
|
|
184
|
+
raise ArgumentError, "Unknown strategy: #{strategy.inspect}. Expected :thread, :task, or :fiber"
|
|
185
|
+
end
|
|
136
186
|
ensure
|
|
137
187
|
@called = true
|
|
138
188
|
end
|
|
@@ -141,9 +191,9 @@ class LLM::Function
|
|
|
141
191
|
# Returns a value that communicates that the function call was cancelled
|
|
142
192
|
# @example
|
|
143
193
|
# llm = LLM.openai(key: ENV["KEY"])
|
|
144
|
-
#
|
|
145
|
-
#
|
|
146
|
-
#
|
|
194
|
+
# ctx = LLM::Context.new(llm, tools: [fn1, fn2])
|
|
195
|
+
# ctx.talk "I want to run the functions"
|
|
196
|
+
# ctx.talk ctx.functions.map(&:cancel)
|
|
147
197
|
# @return [LLM::Function::Return]
|
|
148
198
|
def cancel(reason: "function call cancelled")
|
|
149
199
|
Return.new(id, name, {cancelled: true, reason:})
|
|
@@ -176,7 +226,7 @@ class LLM::Function
|
|
|
176
226
|
# @return [Hash]
|
|
177
227
|
def adapt(provider)
|
|
178
228
|
case provider.class.to_s
|
|
179
|
-
when "LLM::
|
|
229
|
+
when "LLM::Google"
|
|
180
230
|
{name: @name, description: @description, parameters: @params}.compact
|
|
181
231
|
when "LLM::Anthropic"
|
|
182
232
|
{name: @name, description: @description, input_schema: @params}.compact
|
|
@@ -185,6 +235,8 @@ class LLM::Function
|
|
|
185
235
|
end
|
|
186
236
|
end
|
|
187
237
|
|
|
238
|
+
private
|
|
239
|
+
|
|
188
240
|
def format_openai(provider)
|
|
189
241
|
case provider.class.to_s
|
|
190
242
|
when "LLM::OpenAI::Responses"
|
|
@@ -199,4 +251,17 @@ class LLM::Function
|
|
|
199
251
|
}.compact
|
|
200
252
|
end
|
|
201
253
|
end
|
|
254
|
+
|
|
255
|
+
##
|
|
256
|
+
# Internal method that calls the function and returns a Return object.
|
|
257
|
+
# Handles both class-based and proc-based runners, and rescues exceptions.
|
|
258
|
+
#
|
|
259
|
+
# @return [LLM::Function::Return]
|
|
260
|
+
# Returns a Return object with either the function result or error information.
|
|
261
|
+
def call_function
|
|
262
|
+
runner = ((Class === @runner) ? @runner.new : @runner)
|
|
263
|
+
Return.new(id, name, runner.call(**arguments))
|
|
264
|
+
rescue => ex
|
|
265
|
+
Return.new(id, name, {error: true, type: ex.class.name, message: ex.message})
|
|
266
|
+
end
|
|
202
267
|
end
|
|
@@ -0,0 +1,108 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
class LLM::MCP
|
|
4
|
+
##
|
|
5
|
+
# The {LLM::MCP::Command} class manages the lifecycle of an MCP process
|
|
6
|
+
# by wrapping a system command. It provides methods to start the process,
|
|
7
|
+
# write to its stdin, read from its stdout and stderr, and wait for it
|
|
8
|
+
# to exit.
|
|
9
|
+
class Command
|
|
10
|
+
##
|
|
11
|
+
# @return [Integer, nil]
|
|
12
|
+
# The PID of the running command, or nil if it's not running
|
|
13
|
+
attr_reader :pid
|
|
14
|
+
|
|
15
|
+
attr_reader :stdin, :stdout, :stderr
|
|
16
|
+
|
|
17
|
+
##
|
|
18
|
+
# @param [Array<String>] argv The command to run for the MCP process
|
|
19
|
+
# @param [Hash] env The environment variables to set for the MCP process
|
|
20
|
+
# @param [String, nil] cwd The working directory for the MCP process
|
|
21
|
+
# @return [LLM::MCP::Command] A new Command instance
|
|
22
|
+
def initialize(argv:, env: {}, cwd: nil)
|
|
23
|
+
@argv = argv
|
|
24
|
+
@env = env
|
|
25
|
+
@cwd = cwd
|
|
26
|
+
@pid = nil
|
|
27
|
+
@buffers = {}
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
##
|
|
31
|
+
# Starts a command.
|
|
32
|
+
# @raise [LLM::Error]
|
|
33
|
+
# When the command is already running
|
|
34
|
+
# @return [void]
|
|
35
|
+
def start
|
|
36
|
+
raise LLM::MCP::Error, "MCP command is already running" if alive?
|
|
37
|
+
@stdout, @stderr, @stdin = 3.times.map { Pipe.new }
|
|
38
|
+
@buffers.clear
|
|
39
|
+
@pid = Process.spawn(env.to_h, *argv, {chdir: cwd, out: stdout.w, err: stderr.w, in: stdin.r}.compact)
|
|
40
|
+
[stdin.close_reader, [stdout, stderr].each(&:close_writer)]
|
|
41
|
+
end
|
|
42
|
+
|
|
43
|
+
##
|
|
44
|
+
# Stops the command if it's running.
|
|
45
|
+
# @return [void]
|
|
46
|
+
def stop
|
|
47
|
+
return nil unless alive?
|
|
48
|
+
[stdin.close_writer, [stdout, stderr].each(&:close_reader)]
|
|
49
|
+
Process.kill("TERM", pid)
|
|
50
|
+
@buffers.clear
|
|
51
|
+
wait
|
|
52
|
+
end
|
|
53
|
+
|
|
54
|
+
##
|
|
55
|
+
# Returns true when command is running.
|
|
56
|
+
# @return [Boolean]
|
|
57
|
+
def alive?
|
|
58
|
+
!@pid.nil?
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
##
|
|
62
|
+
# Writes to the command's stdin
|
|
63
|
+
# @param [String] message The message to write
|
|
64
|
+
# @return [void]
|
|
65
|
+
def write(message)
|
|
66
|
+
stdin.write(message)
|
|
67
|
+
stdin.write("\n")
|
|
68
|
+
stdin.flush
|
|
69
|
+
end
|
|
70
|
+
|
|
71
|
+
##
|
|
72
|
+
# Reads from the command's IO without blocking.
|
|
73
|
+
# @param [Symbol] io
|
|
74
|
+
# The IO stream to read from (:stdout, :stderr)
|
|
75
|
+
# @raise [LLM::Error]
|
|
76
|
+
# When the command is not running
|
|
77
|
+
# @raise [IO::WaitReadable]
|
|
78
|
+
# When no complete message is available to read
|
|
79
|
+
# @return [String]
|
|
80
|
+
# The next complete line from the specified IO stream
|
|
81
|
+
def read_nonblock(io = :stdout)
|
|
82
|
+
raise LLM::MCP::Error, "MCP command is not running" unless alive?
|
|
83
|
+
io = public_send(io)
|
|
84
|
+
@buffers[io] ||= +""
|
|
85
|
+
loop do
|
|
86
|
+
if (index = @buffers[io].index("\n"))
|
|
87
|
+
return @buffers[io].slice!(0, index + 1)
|
|
88
|
+
end
|
|
89
|
+
@buffers[io] << io.read_nonblock(4096)
|
|
90
|
+
end
|
|
91
|
+
end
|
|
92
|
+
|
|
93
|
+
##
|
|
94
|
+
# Waits for the command to exit and returns its exit status.
|
|
95
|
+
# @return [Process::Status, nil]
|
|
96
|
+
# The exit status of the command, or nil
|
|
97
|
+
def wait
|
|
98
|
+
Process.wait(pid)
|
|
99
|
+
@pid = nil
|
|
100
|
+
rescue Errno::ECHILD
|
|
101
|
+
nil
|
|
102
|
+
end
|
|
103
|
+
|
|
104
|
+
private
|
|
105
|
+
|
|
106
|
+
attr_reader :argv, :env, :cwd
|
|
107
|
+
end
|
|
108
|
+
end
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
class LLM::MCP
|
|
4
|
+
class Error < LLM::Error
|
|
5
|
+
attr_reader :code, :data
|
|
6
|
+
|
|
7
|
+
##
|
|
8
|
+
# @param [Hash] response
|
|
9
|
+
# The full response from the MCP process, including the error object
|
|
10
|
+
# @return [LLM::MCP::Error]
|
|
11
|
+
def self.from(response:)
|
|
12
|
+
error = response.fetch("error")
|
|
13
|
+
new(*error.values_at("message", "code", "data"))
|
|
14
|
+
end
|
|
15
|
+
|
|
16
|
+
##
|
|
17
|
+
# @param [String] message
|
|
18
|
+
# The error message
|
|
19
|
+
# @param [Integer] code
|
|
20
|
+
# The error code
|
|
21
|
+
# @param [Object] data
|
|
22
|
+
# Additional error data provided by the MCP process
|
|
23
|
+
def initialize(message, code = nil, data = nil)
|
|
24
|
+
super(message)
|
|
25
|
+
@code = code
|
|
26
|
+
@data = data
|
|
27
|
+
end
|
|
28
|
+
end
|
|
29
|
+
|
|
30
|
+
TimeoutError = Class.new(Error)
|
|
31
|
+
end
|
data/lib/llm/mcp/pipe.rb
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
class LLM::MCP
|
|
4
|
+
##
|
|
5
|
+
# The {LLM::MCP::Pipe LLM::MCP::Pipe} class wraps a pair of IO
|
|
6
|
+
# objects created by {IO.pipe}. It is used by
|
|
7
|
+
# {LLM::MCP::Transport::Stdio LLM::MCP::Transport::Stdio} to manage
|
|
8
|
+
# the stdin, stdout, and stderr streams of an MCP process through
|
|
9
|
+
# one small interface.
|
|
10
|
+
class Pipe
|
|
11
|
+
##
|
|
12
|
+
# @return [IO]
|
|
13
|
+
# Returns the reader
|
|
14
|
+
attr_reader :r
|
|
15
|
+
|
|
16
|
+
##
|
|
17
|
+
# @return [IO]
|
|
18
|
+
# Returns the writer
|
|
19
|
+
attr_reader :w
|
|
20
|
+
|
|
21
|
+
##
|
|
22
|
+
# Returns a new pipe.
|
|
23
|
+
# @return [LLM::MCP::Pipe]
|
|
24
|
+
def initialize
|
|
25
|
+
@r, @w = IO.pipe
|
|
26
|
+
end
|
|
27
|
+
|
|
28
|
+
##
|
|
29
|
+
# Reads from the reader end without blocking.
|
|
30
|
+
# @raise [IO::WaitReadable]
|
|
31
|
+
# When no data is available to read
|
|
32
|
+
# @return [String]
|
|
33
|
+
def read_nonblock(...)
|
|
34
|
+
@r.read_nonblock(...)
|
|
35
|
+
end
|
|
36
|
+
|
|
37
|
+
##
|
|
38
|
+
# Writes to the writer.
|
|
39
|
+
# @return [Integer]
|
|
40
|
+
def write(...)
|
|
41
|
+
@w.write(...)
|
|
42
|
+
end
|
|
43
|
+
|
|
44
|
+
##
|
|
45
|
+
# Flushes the writer.
|
|
46
|
+
# @return [void]
|
|
47
|
+
def flush
|
|
48
|
+
@w.flush
|
|
49
|
+
end
|
|
50
|
+
|
|
51
|
+
##
|
|
52
|
+
# Returns true when both ends are closed.
|
|
53
|
+
# @return [Boolean]
|
|
54
|
+
def closed?
|
|
55
|
+
[@r, @w].all?(&:closed?)
|
|
56
|
+
end
|
|
57
|
+
|
|
58
|
+
##
|
|
59
|
+
# Closes both ends of the pipe.
|
|
60
|
+
# @return [void]
|
|
61
|
+
def close
|
|
62
|
+
[@r, @w].each(&:close)
|
|
63
|
+
rescue IOError
|
|
64
|
+
end
|
|
65
|
+
|
|
66
|
+
##
|
|
67
|
+
# Closes the reader.
|
|
68
|
+
# @return [void]
|
|
69
|
+
def close_reader
|
|
70
|
+
@r.close
|
|
71
|
+
rescue IOError
|
|
72
|
+
end
|
|
73
|
+
|
|
74
|
+
##
|
|
75
|
+
# Closes the writer.
|
|
76
|
+
# @return [void]
|
|
77
|
+
def close_writer
|
|
78
|
+
@w.close
|
|
79
|
+
rescue IOError
|
|
80
|
+
end
|
|
81
|
+
end
|
|
82
|
+
end
|