llm.rb 0.4.2 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +173 -115
  3. data/lib/json/schema/array.rb +5 -0
  4. data/lib/json/schema/boolean.rb +4 -0
  5. data/lib/json/schema/integer.rb +23 -1
  6. data/lib/json/schema/leaf.rb +11 -0
  7. data/lib/json/schema/null.rb +4 -0
  8. data/lib/json/schema/number.rb +23 -1
  9. data/lib/json/schema/object.rb +6 -2
  10. data/lib/json/schema/string.rb +26 -1
  11. data/lib/json/schema/version.rb +2 -0
  12. data/lib/json/schema.rb +10 -10
  13. data/lib/llm/buffer.rb +31 -12
  14. data/lib/llm/chat.rb +56 -29
  15. data/lib/llm/core_ext/ostruct.rb +14 -8
  16. data/lib/llm/file.rb +6 -1
  17. data/lib/llm/function.rb +86 -0
  18. data/lib/llm/message.rb +54 -2
  19. data/lib/llm/provider.rb +32 -46
  20. data/lib/llm/providers/anthropic/format/completion_format.rb +73 -0
  21. data/lib/llm/providers/anthropic/format.rb +8 -33
  22. data/lib/llm/providers/anthropic/response_parser/completion_parser.rb +51 -0
  23. data/lib/llm/providers/anthropic/response_parser.rb +1 -9
  24. data/lib/llm/providers/anthropic.rb +14 -14
  25. data/lib/llm/providers/gemini/audio.rb +9 -9
  26. data/lib/llm/providers/gemini/files.rb +11 -10
  27. data/lib/llm/providers/gemini/format/completion_format.rb +54 -0
  28. data/lib/llm/providers/gemini/format.rb +20 -27
  29. data/lib/llm/providers/gemini/images.rb +12 -7
  30. data/lib/llm/providers/gemini/models.rb +3 -3
  31. data/lib/llm/providers/gemini/response_parser/completion_parser.rb +46 -0
  32. data/lib/llm/providers/gemini/response_parser.rb +13 -20
  33. data/lib/llm/providers/gemini.rb +10 -20
  34. data/lib/llm/providers/ollama/format/completion_format.rb +72 -0
  35. data/lib/llm/providers/ollama/format.rb +11 -30
  36. data/lib/llm/providers/ollama/response_parser/completion_parser.rb +42 -0
  37. data/lib/llm/providers/ollama/response_parser.rb +8 -11
  38. data/lib/llm/providers/ollama.rb +9 -17
  39. data/lib/llm/providers/openai/audio.rb +6 -6
  40. data/lib/llm/providers/openai/files.rb +3 -3
  41. data/lib/llm/providers/openai/format/completion_format.rb +83 -0
  42. data/lib/llm/providers/openai/format/respond_format.rb +69 -0
  43. data/lib/llm/providers/openai/format.rb +27 -58
  44. data/lib/llm/providers/openai/images.rb +4 -2
  45. data/lib/llm/providers/openai/response_parser/completion_parser.rb +55 -0
  46. data/lib/llm/providers/openai/response_parser/respond_parser.rb +56 -0
  47. data/lib/llm/providers/openai/response_parser.rb +8 -44
  48. data/lib/llm/providers/openai/responses.rb +13 -14
  49. data/lib/llm/providers/openai.rb +11 -23
  50. data/lib/llm/providers/voyageai.rb +4 -4
  51. data/lib/llm/response/{output.rb → respond.rb} +2 -2
  52. data/lib/llm/response.rb +1 -1
  53. data/lib/llm/version.rb +1 -1
  54. data/lib/llm.rb +38 -10
  55. data/llm.gemspec +1 -0
  56. metadata +28 -3
@@ -1,9 +1,34 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  class JSON::Schema
4
+ ##
5
+ # The {JSON::Schema::String JSON::Schema::String} class represents a
6
+ # string value in a JSON schema. It is a subclass of
7
+ # {JSON::Schema::Leaf JSON::Schema::Leaf} and provides methods that
8
+ # can act as constraints.
4
9
  class String < Leaf
10
+ ##
11
+ # Constrain the string to a minimum length
12
+ # @param [Integer] i The minimum length
13
+ # @return [JSON::Schema::String] Returns self
14
+ def min(i)
15
+ tap { @minimum = i }
16
+ end
17
+
18
+ ##
19
+ # Constrain the string to a maximum length
20
+ # @param [Integer] i The maximum length
21
+ # @return [JSON::Schema::String] Returns self
22
+ def max(i)
23
+ tap { @maximum = i }
24
+ end
25
+
5
26
  def to_h
6
- super.merge!({type: "string"})
27
+ super.merge!({
28
+ type: "string",
29
+ minLength: @minimum,
30
+ maxLength: @maximum
31
+ }).compact
7
32
  end
8
33
  end
9
34
  end
@@ -1,3 +1,5 @@
1
+ # frozen_string_literal: true
2
+
1
3
  module JSON
2
4
  end unless defined?(JSON)
3
5
 
data/lib/json/schema.rb CHANGED
@@ -50,35 +50,35 @@ class JSON::Schema
50
50
  ##
51
51
  # Returns a string
52
52
  # @return [JSON::Schema::String]
53
- def string(...)
54
- String.new(...)
53
+ def string
54
+ String.new
55
55
  end
56
56
 
57
57
  ##
58
58
  # Returns a number
59
59
  # @return [JSON::Schema::Number] a number
60
- def number(...)
61
- Number.new(...)
60
+ def number
61
+ Number.new
62
62
  end
63
63
 
64
64
  ##
65
65
  # Returns an integer
66
66
  # @return [JSON::Schema::Integer]
67
- def integer(...)
68
- Integer.new(...)
67
+ def integer
68
+ Integer.new
69
69
  end
70
70
 
71
71
  ##
72
72
  # Returns a boolean
73
73
  # @return [JSON::Schema::Boolean]
74
- def boolean(...)
75
- Boolean.new(...)
74
+ def boolean
75
+ Boolean.new
76
76
  end
77
77
 
78
78
  ##
79
79
  # Returns null
80
80
  # @return [JSON::Schema::Null]
81
- def null(...)
82
- Null.new(...)
81
+ def null
82
+ Null.new
83
83
  end
84
84
  end
data/lib/llm/buffer.rb CHANGED
@@ -28,6 +28,22 @@ module LLM
28
28
  @completed.each { yield(_1) }
29
29
  end
30
30
 
31
+ ##
32
+ # Returns an array of unread messages
33
+ # @see LLM::Message#read?
34
+ # @see LLM::Message#read!
35
+ # @return [Array<LLM::Message>]
36
+ def unread
37
+ reject(&:read?)
38
+ end
39
+
40
+ ##
41
+ # Find a message (in descending order)
42
+ # @return [LLM::Message, nil]
43
+ def find(...)
44
+ reverse_each.find(...)
45
+ end
46
+
31
47
  ##
32
48
  # @param [[LLM::Message, Hash]] item
33
49
  # A message and its parameters
@@ -48,7 +64,7 @@ module LLM
48
64
  private
49
65
 
50
66
  def empty!
51
- message, params, method = @pending[-1]
67
+ message, params, method = @pending.pop
52
68
  if method == :complete
53
69
  complete!(message, params)
54
70
  elsif method == :respond
@@ -59,24 +75,27 @@ module LLM
59
75
  end
60
76
 
61
77
  def complete!(message, params)
62
- messages = @pending[0..-2].map { _1[0] }
78
+ pendings = @pending.map { _1[0] }
79
+ messages = [*@completed, *pendings]
80
+ role = message.role
63
81
  completion = @provider.complete(
64
82
  message.content,
65
- message.role,
66
- **params.merge(messages:)
83
+ params.merge(role:, messages:)
67
84
  )
68
- @completed.concat([*messages, message, completion.choices[0]])
85
+ @completed.concat([*pendings, message, completion.choices[0]])
69
86
  @pending.clear
70
87
  end
71
88
 
72
89
  def respond!(message, params)
73
- input = @pending[0..-2].map { _1[0] }
74
- @response = @provider.responses.create(
75
- message.content,
76
- message.role,
77
- **params.merge(input:).merge(@response ? {previous_response_id: @response.id} : {})
78
- )
79
- @completed.concat([*input, message, @response.outputs[0]])
90
+ pendings = @pending.map { _1[0] }
91
+ input = [*pendings]
92
+ role = message.role
93
+ params = [
94
+ params.merge(input:),
95
+ @response ? {previous_response_id: @response.id} : {}
96
+ ].inject({}, &:merge!)
97
+ @response = @provider.responses.create(message.content, params.merge(role:))
98
+ @completed.concat([*pendings, message, @response.outputs[0]])
80
99
  @pending.clear
81
100
  end
82
101
  end
data/lib/llm/chat.rb CHANGED
@@ -13,11 +13,10 @@ module LLM
13
13
  #
14
14
  # llm = LLM.openai(ENV["KEY"])
15
15
  # bot = LLM::Chat.new(llm).lazy
16
- # bot.chat("Your task is to answer all of my questions", :system)
17
- # bot.chat("Your answers should be short and concise", :system)
18
- # bot.chat("What is 5 + 7 ?", :user)
19
- # bot.chat("Why is the sky blue ?", :user)
20
- # bot.chat("Why did the chicken cross the road ?", :user)
16
+ # bot.chat("Provide short and concise answers", role: :system)
17
+ # bot.chat("What is 5 + 7 ?", role: :user)
18
+ # bot.chat("Why is the sky blue ?", role: :user)
19
+ # bot.chat("Why did the chicken cross the road ?", role: :user)
21
20
  # bot.messages.map { print "[#{_1.role}]", _1.content, "\n" }
22
21
  class Chat
23
22
  ##
@@ -27,31 +26,34 @@ module LLM
27
26
  ##
28
27
  # @param [LLM::Provider] provider
29
28
  # A provider
30
- # @param [to_json] schema
31
- # The JSON schema to maintain throughout the conversation
32
- # @param [String] model
33
- # The model to maintain throughout the conversation
34
29
  # @param [Hash] params
35
- # Other parameters to maintain throughout the conversation
36
- def initialize(provider, model: provider.default_model, schema: nil, **params)
30
+ # The parameters to maintain throughout the conversation.
31
+ # Any parameter the provider supports can be included and
32
+ # not only those listed here.
33
+ # @option params [String] :model Defaults to the provider's default model
34
+ # @option params [#to_json, nil] :schema Defaults to nil
35
+ # @option params [Array<LLM::Function>, nil] :tools Defaults to nil
36
+ def initialize(provider, params = {})
37
37
  @provider = provider
38
- @params = params.merge!(model:, schema:)
38
+ @params = {model: provider.default_model, schema: nil}.compact.merge!(params)
39
39
  @lazy = false
40
- @messages = []
40
+ @messages = [].extend(Array)
41
41
  end
42
42
 
43
43
  ##
44
44
  # Maintain a conversation via the chat completions API
45
- # @param prompt (see LLM::Provider#prompt)
46
- # @param role (see LLM::Provider#prompt)
47
- # @param params (see LLM::Provider#prompt)
45
+ # @param prompt (see LLM::Provider#complete)
46
+ # @param params (see LLM::Provider#complete)
48
47
  # @return [LLM::Chat]
49
- def chat(prompt, role = :user, **params)
48
+ def chat(prompt, params = {})
49
+ params = {role: :user}.merge!(params)
50
50
  if lazy?
51
+ role = params.delete(:role)
51
52
  @messages << [LLM::Message.new(role, prompt), @params.merge(params), :complete]
52
53
  self
53
54
  else
54
- completion = complete!(prompt, role, params)
55
+ role = params[:role]
56
+ completion = complete!(prompt, params)
55
57
  @messages.concat [Message.new(role, prompt), completion.choices[0]]
56
58
  self
57
59
  end
@@ -60,16 +62,18 @@ module LLM
60
62
  ##
61
63
  # Maintain a conversation via the responses API
62
64
  # @note Not all LLM providers support this API
63
- # @param prompt (see LLM::Provider#prompt)
64
- # @param role (see LLM::Provider#prompt)
65
- # @param params (see LLM::Provider#prompt)
65
+ # @param prompt (see LLM::Provider#complete)
66
+ # @param params (see LLM::Provider#complete)
66
67
  # @return [LLM::Chat]
67
- def respond(prompt, role = :user, **params)
68
+ def respond(prompt, params = {})
69
+ params = {role: :user}.merge!(params)
68
70
  if lazy?
71
+ role = params.delete(:role)
69
72
  @messages << [LLM::Message.new(role, prompt), @params.merge(params), :respond]
70
73
  self
71
74
  else
72
- @response = respond!(prompt, role, params)
75
+ role = params[:role]
76
+ @response = respond!(prompt, params)
73
77
  @messages.concat [Message.new(role, prompt), @response.outputs[0]]
74
78
  self
75
79
  end
@@ -108,27 +112,50 @@ module LLM
108
112
  @lazy
109
113
  end
110
114
 
115
+ ##
116
+ # @return [String]
111
117
  def inspect
112
118
  "#<#{self.class.name}:0x#{object_id.to_s(16)} " \
113
119
  "@provider=#{@provider.class}, @params=#{@params.inspect}, " \
114
120
  "@messages=#{@messages.inspect}, @lazy=#{@lazy.inspect}>"
115
121
  end
116
122
 
123
+ ##
124
+ # Returns an array of functions that have yet to be called
125
+ # @return [Array<LLM::Function>]
126
+ def functions
127
+ messages
128
+ .select(&:assistant?)
129
+ .flat_map(&:functions)
130
+ .reject(&:called?)
131
+ end
132
+
117
133
  private
118
134
 
119
- def respond!(prompt, role, params)
135
+ ##
136
+ # @private
137
+ module Array
138
+ def find(...)
139
+ reverse_each.find(...)
140
+ end
141
+
142
+ def unread
143
+ reject(&:read?)
144
+ end
145
+ end
146
+ private_constant :Array
147
+
148
+ def respond!(prompt, params)
120
149
  @provider.responses.create(
121
150
  prompt,
122
- role,
123
- **@params.merge(params.merge(@response ? {previous_response_id: @response.id} : {}))
151
+ @params.merge(params.merge(@response ? {previous_response_id: @response.id} : {}))
124
152
  )
125
153
  end
126
154
 
127
- def complete!(prompt, role, params)
155
+ def complete!(prompt, params)
128
156
  @provider.complete(
129
157
  prompt,
130
- role,
131
- **@params.merge(params.merge(messages:))
158
+ @params.merge(params.merge(messages:))
132
159
  )
133
160
  end
134
161
  end
@@ -8,17 +8,19 @@ class OpenStruct
8
8
  # obj = OpenStruct.from_hash(person: {name: 'John'})
9
9
  # obj.person.name # => 'John'
10
10
  # obj.person.class # => OpenStruct
11
- # @param [Hash] hash_obj
11
+ # @param [Hash, Array] obj
12
12
  # A Hash object
13
13
  # @return [OpenStruct]
14
- # An OpenStruct object initialized by visiting `hash_obj` with
15
- # recursion
16
- def from_hash(hash_obj)
17
- visited_object = {}
18
- hash_obj.each do |key, value|
19
- visited_object[key] = walk(value)
14
+ # An OpenStruct object initialized by visiting `obj` with recursion
15
+ def from_hash(obj)
16
+ case obj
17
+ when self then from_hash(obj.to_h)
18
+ when Array then obj.map { |v| from_hash(v) }
19
+ else
20
+ visited = {}
21
+ obj.each { visited[_1] = walk(_2) }
22
+ new(visited)
20
23
  end
21
- new(visited_object)
22
24
  end
23
25
 
24
26
  private
@@ -34,4 +36,8 @@ class OpenStruct
34
36
  end
35
37
  end
36
38
  extend FromHash
39
+
40
+ def to_json(...)
41
+ to_h.to_json(...)
42
+ end
37
43
  end
data/lib/llm/file.rb CHANGED
@@ -72,5 +72,10 @@ end
72
72
  # The path to a file
73
73
  # @return [LLM::File]
74
74
  def LLM.File(path)
75
- LLM::File.new(path)
75
+ case path
76
+ when LLM::File, LLM::Response::File
77
+ path
78
+ else
79
+ LLM::File.new(path)
80
+ end
76
81
  end
@@ -0,0 +1,86 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::Function
4
+ class Return < Struct.new(:id, :value)
5
+ end
6
+
7
+ ##
8
+ # Returns the function name
9
+ # @return [String]
10
+ attr_reader :name
11
+
12
+ ##
13
+ # Returns function arguments
14
+ # @return [Array, nil]
15
+ attr_accessor :arguments
16
+
17
+ ##
18
+ # Returns the function ID
19
+ # @return [String, nil]
20
+ attr_accessor :id
21
+
22
+ ##
23
+ # @param [String] name The function name
24
+ # @yieldparam [LLM::Function] self The function object
25
+ def initialize(name, &b)
26
+ @name = name
27
+ @schema = JSON::Schema.new
28
+ yield(self)
29
+ end
30
+
31
+ ##
32
+ # Set the function description
33
+ # @param [String] str The function description
34
+ # @return [void]
35
+ def description(str)
36
+ @description = str
37
+ end
38
+
39
+ ##
40
+ # @yieldparam [JSON::Schema] schema The schema object
41
+ # @return [void]
42
+ def params
43
+ @params = yield(@schema)
44
+ end
45
+
46
+ ##
47
+ # Set the function implementation
48
+ # @param [Proc] b The function implementation
49
+ # @return [void]
50
+ def define(&b)
51
+ @runner = b
52
+ end
53
+
54
+ ##
55
+ # Call the function
56
+ # @param [Array] args The arguments to pass to the function
57
+ # @return [Object] The result of the function call
58
+ def call
59
+ Return.new id, @runner.call(arguments)
60
+ ensure
61
+ @called = true
62
+ end
63
+
64
+ ##
65
+ # Returns true when a function has been called
66
+ # @return [Boolean]
67
+ def called?
68
+ @called
69
+ end
70
+
71
+ ##
72
+ # @return [Hash]
73
+ def format(provider)
74
+ case provider.class.to_s
75
+ when "LLM::Gemini"
76
+ {name: @name, description: @description, parameters: @params}.compact
77
+ when "LLM::Anthropic"
78
+ {name: @name, description: @description, input_schema: @params}.compact
79
+ else
80
+ {
81
+ type: "function", name: @name,
82
+ function: {name: @name, description: @description, parameters: @params}
83
+ }.compact
84
+ end
85
+ end
86
+ end
data/lib/llm/message.rb CHANGED
@@ -58,18 +58,70 @@ module LLM
58
58
  end
59
59
 
60
60
  ##
61
- # Returns true when the message is from the LLM
61
+ # @return [Array<LLM::Function>]
62
+ def functions
63
+ @functions ||= tool_calls.map do |fn|
64
+ function = LLM.functions[fn.name].dup
65
+ function.tap { _1.id = fn.id }
66
+ function.tap { _1.arguments = fn.arguments }
67
+ end
68
+ end
69
+
70
+ ##
71
+ # Marks the message as read
72
+ # @return [void]
73
+ def read!
74
+ @read = true
75
+ end
76
+
77
+ ##
78
+ # Returns true when the message has been read
79
+ # @return [Boolean]
80
+ def read?
81
+ @read
82
+ end
83
+
84
+ ##
85
+ # Returns true when the message is an assistant message
62
86
  # @return [Boolean]
63
87
  def assistant?
64
88
  role == "assistant" || role == "model"
65
89
  end
66
90
 
91
+ ##
92
+ # Returns true when the message is a system message
93
+ # @return [Boolean]
94
+ def system?
95
+ role == "system"
96
+ end
97
+
98
+ ##
99
+ # Returns true when the message is a user message
100
+ # @return [Boolean]
101
+ def user?
102
+ role == "user"
103
+ end
104
+
105
+ ##
106
+ # @return [Boolean]
107
+ # Returns true when the message requests a function call
108
+ def tool_call?
109
+ tool_calls.any?
110
+ end
111
+
67
112
  ##
68
113
  # Returns a string representation of the message
69
114
  # @return [String]
70
115
  def inspect
71
116
  "#<#{self.class.name}:0x#{object_id.to_s(16)} " \
72
- "role=#{role.inspect} content=#{content.inspect}>"
117
+ "tool_call=#{tool_calls.any?} role=#{role.inspect} " \
118
+ "content=#{content.inspect}>"
119
+ end
120
+
121
+ private
122
+
123
+ def tool_calls
124
+ @tool_calls ||= OpenStruct.from_hash(@extra[:tool_calls] || [])
73
125
  end
74
126
  end
75
127
  end
data/lib/llm/provider.rb CHANGED
@@ -9,7 +9,7 @@ class LLM::Provider
9
9
  require "net/http"
10
10
 
11
11
  ##
12
- # @param [String] secret
12
+ # @param [String, nil] key
13
13
  # The secret key for authentication
14
14
  # @param [String] host
15
15
  # The host address of the LLM provider
@@ -17,8 +17,10 @@ class LLM::Provider
17
17
  # The port number
18
18
  # @param [Integer] timeout
19
19
  # The number of seconds to wait for a response
20
- def initialize(secret, host:, port: 443, timeout: 60, ssl: true)
21
- @secret = secret
20
+ # @param [Boolean] ssl
21
+ # Whether to use SSL for the connection
22
+ def initialize(key:, host:, port: 443, timeout: 60, ssl: true)
23
+ @key = key
22
24
  @http = Net::HTTP.new(host, port).tap do |http|
23
25
  http.use_ssl = ssl
24
26
  http.read_timeout = timeout
@@ -30,7 +32,7 @@ class LLM::Provider
30
32
  # @return [String]
31
33
  # @note The secret key is redacted in inspect for security reasons
32
34
  def inspect
33
- "#<#{self.class.name}:0x#{object_id.to_s(16)} @secret=[REDACTED] @http=#{@http.inspect}>"
35
+ "#<#{self.class.name}:0x#{object_id.to_s(16)} @key=[REDACTED] @http=#{@http.inspect}>"
34
36
  end
35
37
 
36
38
  ##
@@ -52,26 +54,23 @@ class LLM::Provider
52
54
  # Provides an interface to the chat completions API
53
55
  # @example
54
56
  # llm = LLM.openai(ENV["KEY"])
55
- # messages = [
56
- # {role: "system", content: "Your task is to answer all of my questions"},
57
- # {role: "system", content: "Your answers should be short and concise"},
58
- # ]
59
- # res = llm.complete("Hello. What is the answer to 5 + 2 ?", :user, messages:)
57
+ # messages = [{role: "system", content: "Your task is to answer all of my questions"}]
58
+ # res = llm.complete("5 + 2 ?", messages:)
60
59
  # print "[#{res.choices[0].role}]", res.choices[0].content, "\n"
61
60
  # @param [String] prompt
62
61
  # The input prompt to be completed
63
- # @param [Symbol] role
64
- # The role of the prompt (e.g. :user, :system)
65
- # @param [String] model
66
- # The model to use for the completion
67
- # @param [#to_json, nil] schema
68
- # The schema that describes the expected response format
69
62
  # @param [Hash] params
70
- # Other completion parameters
63
+ # The parameters to maintain throughout the conversation.
64
+ # Any parameter the provider supports can be included and
65
+ # not only those listed here.
66
+ # @option params [Symbol] :role Defaults to the provider's default role
67
+ # @option params [String] :model Defaults to the provider's default model
68
+ # @option params [#to_json, nil] :schema Defaults to nil
69
+ # @option params [Array<LLM::Function>, nil] :tools Defaults to nil
71
70
  # @raise [NotImplementedError]
72
71
  # When the method is not implemented by a subclass
73
72
  # @return [LLM::Response::Completion]
74
- def complete(prompt, role = :user, model: default_model, schema: nil, **params)
73
+ def complete(prompt, params = {})
75
74
  raise NotImplementedError
76
75
  end
77
76
 
@@ -81,15 +80,11 @@ class LLM::Provider
81
80
  # This method creates a lazy version of a
82
81
  # {LLM::Chat LLM::Chat} object.
83
82
  # @param prompt (see LLM::Provider#complete)
84
- # @param role (see LLM::Provider#complete)
85
- # @param model (see LLM::Provider#complete)
86
- # @param schema (see LLM::Provider#complete)
87
- # @param [Hash] params
88
- # Other completion parameters to maintain throughout a chat
89
- # @raise (see LLM::Provider#complete)
83
+ # @param params (see LLM::Provider#complete)
90
84
  # @return [LLM::Chat]
91
- def chat(prompt, role = :user, model: default_model, schema: nil, **params)
92
- LLM::Chat.new(self, **params.merge(model:, schema:)).lazy.chat(prompt, role)
85
+ def chat(prompt, params = {})
86
+ role = params.delete(:role)
87
+ LLM::Chat.new(self, params).lazy.chat(prompt, role:)
93
88
  end
94
89
 
95
90
  ##
@@ -98,15 +93,12 @@ class LLM::Provider
98
93
  # This method creates a non-lazy version of a
99
94
  # {LLM::Chat LLM::Chat} object.
100
95
  # @param prompt (see LLM::Provider#complete)
101
- # @param role (see LLM::Provider#complete)
102
- # @param model (see LLM::Provider#complete)
103
- # @param schema (see LLM::Provider#complete)
104
- # @param [Hash] params
105
- # Other completion parameters to maintain throughout a chat
96
+ # @param params (see LLM::Provider#complete)
106
97
  # @raise (see LLM::Provider#complete)
107
98
  # @return [LLM::Chat]
108
- def chat!(prompt, role = :user, model: default_model, schema: nil, **params)
109
- LLM::Chat.new(self, **params.merge(model:, schema:)).chat(prompt, role)
99
+ def chat!(prompt, params = {})
100
+ role = params.delete(:role)
101
+ LLM::Chat.new(self, params).chat(prompt, role:)
110
102
  end
111
103
 
112
104
  ##
@@ -115,15 +107,12 @@ class LLM::Provider
115
107
  # This method creates a lazy variant of a
116
108
  # {LLM::Chat LLM::Chat} object.
117
109
  # @param prompt (see LLM::Provider#complete)
118
- # @param role (see LLM::Provider#complete)
119
- # @param model (see LLM::Provider#complete)
120
- # @param schema (see LLM::Provider#complete)
121
- # @param [Hash] params
122
- # Other completion parameters to maintain throughout a chat
110
+ # @param params (see LLM::Provider#complete)
123
111
  # @raise (see LLM::Provider#complete)
124
112
  # @return [LLM::Chat]
125
- def respond(prompt, role = :user, model: default_model, schema: nil, **params)
126
- LLM::Chat.new(self, **params.merge(model:, schema:)).lazy.respond(prompt, role)
113
+ def respond(prompt, params = {})
114
+ role = params.delete(:role)
115
+ LLM::Chat.new(self, params).lazy.respond(prompt, role:)
127
116
  end
128
117
 
129
118
  ##
@@ -132,15 +121,12 @@ class LLM::Provider
132
121
  # This method creates a non-lazy variant of a
133
122
  # {LLM::Chat LLM::Chat} object.
134
123
  # @param prompt (see LLM::Provider#complete)
135
- # @param role (see LLM::Provider#complete)
136
- # @param model (see LLM::Provider#complete)
137
- # @param schema (see LLM::Provider#complete)
138
- # @param [Hash] params
139
- # Other completion parameters to maintain throughout a chat
124
+ # @param params (see LLM::Provider#complete)
140
125
  # @raise (see LLM::Provider#complete)
141
126
  # @return [LLM::Chat]
142
- def respond!(prompt, role = :user, model: default_model, schema: nil, **params)
143
- LLM::Chat.new(self, **params.merge(model:, schema:)).respond(prompt, role)
127
+ def respond!(prompt, params = {})
128
+ role = params.delete(:role)
129
+ LLM::Chat.new(self, params).respond(prompt, role:)
144
130
  end
145
131
 
146
132
  ##