llm.rb 4.7.0 → 4.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (95) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +335 -587
  3. data/data/anthropic.json +770 -0
  4. data/data/deepseek.json +75 -0
  5. data/data/google.json +1050 -0
  6. data/data/openai.json +1421 -0
  7. data/data/xai.json +792 -0
  8. data/data/zai.json +330 -0
  9. data/lib/llm/agent.rb +42 -41
  10. data/lib/llm/bot.rb +1 -263
  11. data/lib/llm/buffer.rb +7 -0
  12. data/lib/llm/{session → context}/deserializer.rb +4 -3
  13. data/lib/llm/context.rb +292 -0
  14. data/lib/llm/cost.rb +26 -0
  15. data/lib/llm/error.rb +8 -0
  16. data/lib/llm/eventstream/parser.rb +0 -5
  17. data/lib/llm/function/array.rb +61 -0
  18. data/lib/llm/function/fiber_group.rb +91 -0
  19. data/lib/llm/function/task_group.rb +89 -0
  20. data/lib/llm/function/thread_group.rb +94 -0
  21. data/lib/llm/function.rb +75 -10
  22. data/lib/llm/mcp/command.rb +108 -0
  23. data/lib/llm/mcp/error.rb +31 -0
  24. data/lib/llm/mcp/pipe.rb +82 -0
  25. data/lib/llm/mcp/rpc.rb +118 -0
  26. data/lib/llm/mcp/transport/stdio.rb +85 -0
  27. data/lib/llm/mcp.rb +102 -0
  28. data/lib/llm/message.rb +13 -11
  29. data/lib/llm/model.rb +115 -0
  30. data/lib/llm/prompt.rb +17 -7
  31. data/lib/llm/provider.rb +60 -32
  32. data/lib/llm/providers/anthropic/error_handler.rb +1 -1
  33. data/lib/llm/providers/anthropic/files.rb +3 -3
  34. data/lib/llm/providers/anthropic/models.rb +1 -1
  35. data/lib/llm/providers/anthropic/request_adapter.rb +20 -3
  36. data/lib/llm/providers/anthropic/response_adapter/models.rb +13 -0
  37. data/lib/llm/providers/anthropic/response_adapter.rb +2 -0
  38. data/lib/llm/providers/anthropic.rb +21 -5
  39. data/lib/llm/providers/deepseek.rb +10 -3
  40. data/lib/llm/providers/{gemini → google}/audio.rb +6 -6
  41. data/lib/llm/providers/{gemini → google}/error_handler.rb +20 -5
  42. data/lib/llm/providers/{gemini → google}/files.rb +11 -11
  43. data/lib/llm/providers/{gemini → google}/images.rb +7 -7
  44. data/lib/llm/providers/{gemini → google}/models.rb +5 -5
  45. data/lib/llm/providers/{gemini → google}/request_adapter/completion.rb +7 -3
  46. data/lib/llm/providers/{gemini → google}/request_adapter.rb +1 -1
  47. data/lib/llm/providers/{gemini → google}/response_adapter/completion.rb +7 -7
  48. data/lib/llm/providers/{gemini → google}/response_adapter/embedding.rb +1 -1
  49. data/lib/llm/providers/{gemini → google}/response_adapter/file.rb +1 -1
  50. data/lib/llm/providers/{gemini → google}/response_adapter/files.rb +1 -1
  51. data/lib/llm/providers/{gemini → google}/response_adapter/image.rb +1 -1
  52. data/lib/llm/providers/google/response_adapter/models.rb +13 -0
  53. data/lib/llm/providers/{gemini → google}/response_adapter/web_search.rb +2 -2
  54. data/lib/llm/providers/{gemini → google}/response_adapter.rb +8 -8
  55. data/lib/llm/providers/{gemini → google}/stream_parser.rb +3 -3
  56. data/lib/llm/providers/{gemini.rb → google.rb} +41 -26
  57. data/lib/llm/providers/llamacpp.rb +10 -3
  58. data/lib/llm/providers/ollama/error_handler.rb +1 -1
  59. data/lib/llm/providers/ollama/models.rb +1 -1
  60. data/lib/llm/providers/ollama/response_adapter/models.rb +13 -0
  61. data/lib/llm/providers/ollama/response_adapter.rb +2 -0
  62. data/lib/llm/providers/ollama.rb +19 -4
  63. data/lib/llm/providers/openai/error_handler.rb +18 -3
  64. data/lib/llm/providers/openai/files.rb +3 -3
  65. data/lib/llm/providers/openai/images.rb +17 -11
  66. data/lib/llm/providers/openai/models.rb +1 -1
  67. data/lib/llm/providers/openai/response_adapter/completion.rb +9 -1
  68. data/lib/llm/providers/openai/response_adapter/models.rb +13 -0
  69. data/lib/llm/providers/openai/response_adapter/responds.rb +9 -1
  70. data/lib/llm/providers/openai/response_adapter.rb +2 -0
  71. data/lib/llm/providers/openai/responses.rb +16 -1
  72. data/lib/llm/providers/openai/stream_parser.rb +2 -0
  73. data/lib/llm/providers/openai.rb +28 -6
  74. data/lib/llm/providers/xai/images.rb +7 -6
  75. data/lib/llm/providers/xai.rb +10 -3
  76. data/lib/llm/providers/zai.rb +9 -2
  77. data/lib/llm/registry.rb +81 -0
  78. data/lib/llm/schema/enum.rb +16 -0
  79. data/lib/llm/schema/parser.rb +109 -0
  80. data/lib/llm/schema.rb +5 -0
  81. data/lib/llm/server_tool.rb +5 -5
  82. data/lib/llm/session.rb +10 -1
  83. data/lib/llm/tool/param.rb +1 -1
  84. data/lib/llm/tool.rb +86 -5
  85. data/lib/llm/tracer/langsmith.rb +144 -0
  86. data/lib/llm/tracer/logger.rb +9 -1
  87. data/lib/llm/tracer/null.rb +8 -0
  88. data/lib/llm/tracer/telemetry.rb +98 -78
  89. data/lib/llm/tracer.rb +108 -4
  90. data/lib/llm/usage.rb +5 -0
  91. data/lib/llm/version.rb +1 -1
  92. data/lib/llm.rb +40 -6
  93. data/llm.gemspec +45 -8
  94. metadata +87 -28
  95. data/lib/llm/providers/gemini/response_adapter/models.rb +0 -15
@@ -0,0 +1,118 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::MCP
4
+ ##
5
+ # The {LLM::MCP::RPC} module provides the JSON-RPC interface used by
6
+ # {LLM::MCP}. MCP uses JSON-RPC to exchange messages between a client
7
+ # and a server. A client sends a method name and its parameters as a
8
+ # request, and the server replies with either a result or an error.
9
+ #
10
+ # This module is responsible for composing those requests, applying
11
+ # the defaults needed by built-in MCP methods such as initialize,
12
+ # and reading responses for request methods. Notifications are sent
13
+ # without waiting for a response, and errors are raised as
14
+ # {LLM::MCP::Error}.
15
+ # @private
16
+ module RPC
17
+ ##
18
+ # Sends a method over the transport.
19
+ # @param [LLM::MCP::Transport] transport
20
+ # The transport to write to
21
+ # @param [String] method
22
+ # The method name to call
23
+ # @param [Hash] params
24
+ # The parameters to send with the method call
25
+ # @return [Object, nil]
26
+ # The result of the method call, or nil if it's a notification
27
+ def call(transport, method, params = {})
28
+ message = {jsonrpc: "2.0", method:, params: default_params(method).merge(params)}
29
+ if notification?(method)
30
+ transport.write(message)
31
+ nil
32
+ else
33
+ @request_id = (@request_id || -1) + 1
34
+ id = @request_id
35
+ transport.write(message.merge(id:))
36
+ recv(transport, id)
37
+ end
38
+ end
39
+
40
+ private
41
+
42
+ ##
43
+ # Reads a response from the transport.
44
+ # @param [LLM::MCP::Transport] transport
45
+ # The transport to read from
46
+ # @param [Integer] id
47
+ # The request id to wait for
48
+ # @raise [LLM::MCP::Error]
49
+ # When the MCP process returns an error
50
+ # @return [Object, nil]
51
+ # The result returned by the MCP process
52
+ def recv(transport, id)
53
+ poll(timeout:, ex: [IO::WaitReadable]) do
54
+ loop do
55
+ res = transport.read_nonblock
56
+ next unless res["id"] == id
57
+ if res["error"]
58
+ raise LLM::MCP::Error.from(response: res)
59
+ else
60
+ break res["result"]
61
+ end
62
+ end
63
+ end
64
+ end
65
+
66
+ ##
67
+ # Returns default parameters for built-in methods.
68
+ # @param [String] method
69
+ # The method name
70
+ # @return [Hash]
71
+ def default_params(method)
72
+ case method
73
+ when "initialize"
74
+ {protocolVersion: "2025-03-26", capabilities: {}}
75
+ else
76
+ {}
77
+ end
78
+ end
79
+
80
+ ##
81
+ # Returns true when the method is a notification.
82
+ # @param [String] method
83
+ # The method name
84
+ # @return [Boolean]
85
+ def notification?(method)
86
+ method.to_s.start_with?("notifications/")
87
+ end
88
+
89
+ ##
90
+ # Returns the maximum amount of time to wait when reading from an MCP process.
91
+ # @return [Integer]
92
+ def timeout
93
+ @timeout ||= 5
94
+ end
95
+
96
+ ##
97
+ # Runs a block until it succeeds, times out, or raises an unhandled exception.
98
+ # @param [Integer] timeout
99
+ # The timeout for the block, in seconds
100
+ # @param [Array<Class>] ex
101
+ # The exceptions to retry when raised
102
+ # @yield
103
+ # The block to run
104
+ # @raise [LLM::MCP::TimeoutError]
105
+ # When the block takes longer than the timeout
106
+ # @return [Object]
107
+ def poll(timeout:, ex: [])
108
+ start = Process.clock_gettime(Process::CLOCK_MONOTONIC)
109
+ loop do
110
+ return yield
111
+ rescue *ex
112
+ duration = Process.clock_gettime(Process::CLOCK_MONOTONIC) - start
113
+ raise LLM::MCP::TimeoutError, "MCP process timed out" if duration > timeout
114
+ sleep 0.05
115
+ end
116
+ end
117
+ end
118
+ end
@@ -0,0 +1,85 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::MCP::Transport
4
+ ##
5
+ # The {LLM::MCP::Transport::Stdio LLM::MCP::Transport::Stdio} class
6
+ # provides a stdio transport for {LLM::MCP LLM::MCP}. It sends JSON-RPC
7
+ # messages to an MCP process over stdin and stdout and delegates process
8
+ # lifecycle management to {LLM::MCP::Command LLM::MCP::Command}.
9
+ class Stdio
10
+ ##
11
+ # Returns a new Stdio transport instance.
12
+ # @param command [LLM::MCP::Command]
13
+ # The command to run for the MCP process
14
+ # @return [LLM::MCP::Transport::Stdio]
15
+ def initialize(command:)
16
+ @command = command
17
+ end
18
+
19
+ ##
20
+ # Starts an MCP process over a stdio transport.
21
+ # This method is non-blocking and returns immediately.
22
+ # @raise [LLM::Error]
23
+ # When the transport is already running
24
+ # @return [void]
25
+ def start
26
+ if command.alive?
27
+ raise LLM::MCP::Error, "MCP transport is already running"
28
+ else
29
+ command.start
30
+ end
31
+ end
32
+
33
+ ##
34
+ # Closes the connection to the MCP process.
35
+ # This method is idempotent and can be called multiple times without error.
36
+ # @return [void]
37
+ def stop
38
+ command.stop
39
+ end
40
+
41
+ ##
42
+ # Writes a message to the MCP process.
43
+ # @param [Hash] message
44
+ # The message to write
45
+ # @raise [LLM::Error]
46
+ # When the transport is not running
47
+ # @return [void]
48
+ def write(message)
49
+ if command.alive?
50
+ command.write(LLM.json.dump(message))
51
+ else
52
+ raise LLM::MCP::Error, "MCP transport is not running"
53
+ end
54
+ end
55
+
56
+ ##
57
+ # Reads a message from the MCP process without blocking.
58
+ # @raise [LLM::Error]
59
+ # When the transport is not running
60
+ # @raise [IO::WaitReadable]
61
+ # When no complete message is available to read
62
+ # @return [Hash]
63
+ # The next message from the MCP process
64
+ def read_nonblock
65
+ if command.alive?
66
+ LLM.json.load(command.read_nonblock)
67
+ else
68
+ raise LLM::MCP::Error, "MCP transport is not running"
69
+ end
70
+ end
71
+
72
+ ##
73
+ # Waits for the command to exit.
74
+ # This method is blocking and will return only after the
75
+ # process has exited.
76
+ # @return [void]
77
+ def wait
78
+ command.wait
79
+ end
80
+
81
+ private
82
+
83
+ attr_reader :command, :stdin, :stdout, :stderr
84
+ end
85
+ end
data/lib/llm/mcp.rb ADDED
@@ -0,0 +1,102 @@
1
+ # frozen_string_literal: true
2
+
3
+ ##
4
+ # The {LLM::MCP LLM::MCP} class provides access to servers that
5
+ # implement the Model Context Protocol. MCP defines a standard way for
6
+ # clients and servers to exchange capabilities such as tools, prompts,
7
+ # resources, and other structured interactions.
8
+ #
9
+ # In llm.rb, {LLM::MCP LLM::MCP} currently supports stdio servers and
10
+ # focuses on discovering tools that can be used through
11
+ # {LLM::Context LLM::Context} and {LLM::Agent LLM::Agent}.
12
+ class LLM::MCP
13
+ require "monitor"
14
+ require_relative "mcp/error"
15
+ require_relative "mcp/command"
16
+ require_relative "mcp/rpc"
17
+ require_relative "mcp/pipe"
18
+ require_relative "mcp/transport/stdio"
19
+
20
+ include RPC
21
+
22
+ ##
23
+ # @param [LLM::Provider, nil] llm
24
+ # The provider to use for MCP transports that need one
25
+ # @param [Hash] stdio The configuration for the stdio transport
26
+ # @option stdio [Array<String>] :argv
27
+ # The command to run for the MCP process
28
+ # @option stdio [Hash] :env
29
+ # The environment variables to set for the MCP process
30
+ # @option stdio [String, nil] :cwd
31
+ # The working directory for the MCP process
32
+ # @param [Integer] timeout The maximum amount of time to wait when reading from an MCP process
33
+ # @return [LLM::MCP] A new MCP instance
34
+ def initialize(llm = nil, stdio:, timeout: 30)
35
+ @llm = llm
36
+ @command = Command.new(**stdio)
37
+ @monitor = Monitor.new
38
+ @transport = Transport::Stdio.new(command:)
39
+ @timeout = timeout
40
+ end
41
+
42
+ ##
43
+ # Starts the MCP process.
44
+ # @return [void]
45
+ def start
46
+ lock do
47
+ transport.start
48
+ call(transport, "initialize", {clientInfo: {name: "llm.rb", version: LLM::VERSION}})
49
+ call(transport, "notifications/initialized")
50
+ end
51
+ end
52
+
53
+ ##
54
+ # Stops the MCP process.
55
+ # @return [void]
56
+ def stop
57
+ lock do
58
+ transport.stop
59
+ nil
60
+ end
61
+ end
62
+
63
+ ##
64
+ # Returns the tools provided by the MCP process.
65
+ # @return [Array<Class<LLM::Tool>>]
66
+ def tools
67
+ lock do
68
+ res = call(transport, "tools/list")
69
+ res["tools"].map { LLM::Tool.mcp(self, _1) }
70
+ end
71
+ end
72
+
73
+ ##
74
+ # Calls a tool by name with the given arguments
75
+ # @param [String] name The name of the tool to call
76
+ # @param [Hash] arguments The arguments to pass to the tool
77
+ # @return [Object] The result of the tool call
78
+ def call_tool(name, arguments = {})
79
+ lock do
80
+ res = call(transport, "tools/call", {name:, arguments:})
81
+ adapt_tool_result(res)
82
+ end
83
+ end
84
+
85
+ private
86
+
87
+ attr_reader :llm, :command, :transport, :timeout
88
+
89
+ def adapt_tool_result(result)
90
+ if result["structuredContent"]
91
+ result["structuredContent"]
92
+ elsif result["content"]
93
+ {content: result["content"]}
94
+ else
95
+ result
96
+ end
97
+ end
98
+
99
+ def lock(&)
100
+ @monitor.synchronize(&)
101
+ end
102
+ end
data/lib/llm/message.rb CHANGED
@@ -26,7 +26,7 @@ module LLM
26
26
  def initialize(role, content, extra = {})
27
27
  @role = role.to_s
28
28
  @content = content
29
- @extra = extra
29
+ @extra = LLM::Object.from(extra)
30
30
  end
31
31
 
32
32
  ##
@@ -34,8 +34,9 @@ module LLM
34
34
  # @return [Hash]
35
35
  def to_h
36
36
  {role:, content:,
37
- tools: @extra[:tool_calls],
38
- original_tool_calls: extra[:original_tool_calls]}.compact
37
+ tools: extra.tool_calls,
38
+ usage:,
39
+ original_tool_calls: extra.original_tool_calls}.compact
39
40
  end
40
41
 
41
42
  ##
@@ -69,8 +70,9 @@ module LLM
69
70
  ##
70
71
  # @return [Array<LLM::Function>]
71
72
  def functions
72
- @functions ||= tool_calls.map do |fn|
73
- function = available_tools.find { _1.name.to_s == fn["name"] }.dup
73
+ @functions ||= tool_calls.filter_map do |fn|
74
+ function = available_tools.find { _1.name.to_s == fn["name"] } || next
75
+ function = function.dup
74
76
  function.tap { _1.id = fn.id }
75
77
  function.tap { _1.arguments = fn.arguments }
76
78
  end
@@ -119,7 +121,7 @@ module LLM
119
121
  # @return [LLM::Response, nil]
120
122
  # Returns the response associated with the message, or nil
121
123
  def response
122
- extra[:response]
124
+ extra.response
123
125
  end
124
126
 
125
127
  ##
@@ -129,7 +131,7 @@ module LLM
129
131
  # Returns annotations associated with the message
130
132
  # @return [Array<LLM::Object>]
131
133
  def annotations
132
- @annotations ||= LLM::Object.from(extra["annotations"] || [])
134
+ @annotations ||= LLM::Object.from(extra.annotations || [])
133
135
  end
134
136
 
135
137
  ##
@@ -139,8 +141,7 @@ module LLM
139
141
  # Returns token usage statistics
140
142
  # @return [LLM::Object, nil]
141
143
  def usage
142
- return nil unless response
143
- @usage ||= response.usage
144
+ @usage ||= extra.usage || response&.usage
144
145
  end
145
146
  alias_method :token_usage, :usage
146
147
 
@@ -163,11 +164,12 @@ module LLM
163
164
  private
164
165
 
165
166
  def tool_calls
166
- @tool_calls ||= LLM::Object.from(@extra[:tool_calls] || [])
167
+ @tool_calls ||= LLM::Object.from(extra.tool_calls || [])
167
168
  end
168
169
 
169
170
  def available_tools
170
- response&.__tools__ || []
171
+ tools = extra.tools || response&.__tools__ || []
172
+ tools.map { _1.respond_to?(:function) ? _1.function : _1 }
171
173
  end
172
174
  end
173
175
  end
data/lib/llm/model.rb ADDED
@@ -0,0 +1,115 @@
1
+ # frozen_string_literal: true
2
+
3
+ ##
4
+ # The {LLM::Model LLM::Model} class provides a normalized view of
5
+ # a provider model record returned by the models API.
6
+ class LLM::Model
7
+ ##
8
+ # The provider-specific model payload.
9
+ # @return [LLM::Object]
10
+ attr_reader :raw
11
+
12
+ ##
13
+ # @param [LLM::Object, Hash] raw
14
+ def initialize(raw)
15
+ @raw = raw
16
+ end
17
+
18
+ ##
19
+ # Returns a normalized identifier suitable for API calls.
20
+ # @return [String, nil]
21
+ def id
22
+ normalize_id(raw.id || raw.model || raw.name)
23
+ end
24
+
25
+ ##
26
+ # Returns a display-friendly model name.
27
+ # @return [String, nil]
28
+ def name
29
+ raw.display_name || raw.displayName || id
30
+ end
31
+
32
+ ##
33
+ # Best-effort predicate for chat support.
34
+ # @return [Boolean]
35
+ def chat?
36
+ return true if anthropic?
37
+ return [*(raw.supportedGenerationMethods || [])].include?("generateContent") if google?
38
+ openai_compatible_chat?
39
+ end
40
+
41
+ ##
42
+ # Returns a Hash representation of the normalized model.
43
+ # @return [Hash]
44
+ def to_h
45
+ {id:, name:, chat?: chat?}.compact
46
+ end
47
+
48
+ ##
49
+ # @private
50
+ module Collection
51
+ include ::Enumerable
52
+
53
+ ##
54
+ # @yield [model]
55
+ # @yieldparam [LLM::Model] model
56
+ # @return [Enumerator, void]
57
+ def each(&)
58
+ return enum_for(:each) unless block_given?
59
+ models.each(&)
60
+ end
61
+
62
+ ##
63
+ # Returns an element, or a slice, or nil.
64
+ # @return [Object, Array<Object>, nil]
65
+ def [](*pos, **kw)
66
+ models[*pos, **kw]
67
+ end
68
+
69
+ ##
70
+ # @return [Boolean]
71
+ def empty?
72
+ models.empty?
73
+ end
74
+
75
+ ##
76
+ # @return [Integer]
77
+ def size
78
+ models.size
79
+ end
80
+
81
+ ##
82
+ # Returns normalized models.
83
+ # @return [Array<LLM::Model>]
84
+ def models
85
+ @models ||= raw_models.map { LLM::Model.new(_1) }
86
+ end
87
+ end
88
+
89
+ private
90
+
91
+ def normalize_id(value)
92
+ value&.sub(%r{\Amodels/}, "")
93
+ end
94
+
95
+ def anthropic?
96
+ raw.type == "model" && raw.key?(:display_name) && raw.key?(:created_at)
97
+ end
98
+
99
+ def google?
100
+ raw.key?(:supportedGenerationMethods)
101
+ end
102
+
103
+ def openai_compatible_chat?
104
+ value = [id, raw.name, raw.model].compact.join(" ").downcase
105
+ return false if value.include?("embedding")
106
+ return false if value.include?("moderation")
107
+ return false if value.include?("tts")
108
+ return false if value.include?("transcrib")
109
+ return false if value.include?("image")
110
+ return false if value.include?("whisper")
111
+ return false if value.include?("dall")
112
+ return false if value.include?("omni-moderation")
113
+ true
114
+ end
115
+ end
data/lib/llm/prompt.rb CHANGED
@@ -5,20 +5,20 @@
5
5
  # a single request from multiple role-aware messages.
6
6
  # A prompt is not just a string. It is an ordered chain of
7
7
  # messages with explicit roles (for example `system` and `user`).
8
- # Use {LLM::Session#prompt} when building a prompt inside a session.
8
+ # Use {LLM::Context#prompt} when building a prompt inside a session.
9
9
  # Use `LLM::Prompt.new(provider)` directly when you want to construct
10
10
  # or pass prompt objects around explicitly.
11
11
  #
12
12
  # @example
13
13
  # llm = LLM.openai(key: ENV["KEY"])
14
- # ses = LLM::Session.new(llm)
14
+ # ctx = LLM::Context.new(llm)
15
15
  #
16
- # prompt = ses.prompt do
16
+ # prompt = ctx.prompt do
17
17
  # system "Your task is to assist the user"
18
18
  # user "Hello. Can you assist me?"
19
19
  # end
20
20
  #
21
- # res = ses.talk(prompt)
21
+ # res = ctx.talk(prompt)
22
22
  class LLM::Prompt
23
23
  ##
24
24
  # @param [LLM::Provider] provider
@@ -57,7 +57,7 @@ class LLM::Prompt
57
57
  # The message content
58
58
  # @return [void]
59
59
  def user(content)
60
- chat(content, role: @provider.user_role)
60
+ talk(content, role: @provider.user_role)
61
61
  end
62
62
 
63
63
  ##
@@ -65,7 +65,7 @@ class LLM::Prompt
65
65
  # The message content
66
66
  # @return [void]
67
67
  def system(content)
68
- chat(content, role: @provider.system_role)
68
+ talk(content, role: @provider.system_role)
69
69
  end
70
70
 
71
71
  ##
@@ -73,7 +73,7 @@ class LLM::Prompt
73
73
  # The message content
74
74
  # @return [void]
75
75
  def developer(content)
76
- chat(content, role: @provider.developer_role)
76
+ talk(content, role: @provider.developer_role)
77
77
  end
78
78
 
79
79
  ##
@@ -82,4 +82,14 @@ class LLM::Prompt
82
82
  def to_a
83
83
  @buffer.dup
84
84
  end
85
+
86
+ ##
87
+ # Returns true when two prompts have the same buffer
88
+ # @param [LLM::Prompt] other
89
+ # @return [Boolean]
90
+ def ==(other)
91
+ return false unless LLM::Prompt === other
92
+ @buffer == other.to_a
93
+ end
94
+ alias_method :eql?, :==
85
95
  end