llm.rb 0.4.2 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (56) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +173 -115
  3. data/lib/json/schema/array.rb +5 -0
  4. data/lib/json/schema/boolean.rb +4 -0
  5. data/lib/json/schema/integer.rb +23 -1
  6. data/lib/json/schema/leaf.rb +11 -0
  7. data/lib/json/schema/null.rb +4 -0
  8. data/lib/json/schema/number.rb +23 -1
  9. data/lib/json/schema/object.rb +6 -2
  10. data/lib/json/schema/string.rb +26 -1
  11. data/lib/json/schema/version.rb +2 -0
  12. data/lib/json/schema.rb +10 -10
  13. data/lib/llm/buffer.rb +31 -12
  14. data/lib/llm/chat.rb +56 -29
  15. data/lib/llm/core_ext/ostruct.rb +14 -8
  16. data/lib/llm/file.rb +6 -1
  17. data/lib/llm/function.rb +86 -0
  18. data/lib/llm/message.rb +54 -2
  19. data/lib/llm/provider.rb +32 -46
  20. data/lib/llm/providers/anthropic/format/completion_format.rb +73 -0
  21. data/lib/llm/providers/anthropic/format.rb +8 -33
  22. data/lib/llm/providers/anthropic/response_parser/completion_parser.rb +51 -0
  23. data/lib/llm/providers/anthropic/response_parser.rb +1 -9
  24. data/lib/llm/providers/anthropic.rb +14 -14
  25. data/lib/llm/providers/gemini/audio.rb +9 -9
  26. data/lib/llm/providers/gemini/files.rb +11 -10
  27. data/lib/llm/providers/gemini/format/completion_format.rb +54 -0
  28. data/lib/llm/providers/gemini/format.rb +20 -27
  29. data/lib/llm/providers/gemini/images.rb +12 -7
  30. data/lib/llm/providers/gemini/models.rb +3 -3
  31. data/lib/llm/providers/gemini/response_parser/completion_parser.rb +46 -0
  32. data/lib/llm/providers/gemini/response_parser.rb +13 -20
  33. data/lib/llm/providers/gemini.rb +10 -20
  34. data/lib/llm/providers/ollama/format/completion_format.rb +72 -0
  35. data/lib/llm/providers/ollama/format.rb +11 -30
  36. data/lib/llm/providers/ollama/response_parser/completion_parser.rb +42 -0
  37. data/lib/llm/providers/ollama/response_parser.rb +8 -11
  38. data/lib/llm/providers/ollama.rb +9 -17
  39. data/lib/llm/providers/openai/audio.rb +6 -6
  40. data/lib/llm/providers/openai/files.rb +3 -3
  41. data/lib/llm/providers/openai/format/completion_format.rb +83 -0
  42. data/lib/llm/providers/openai/format/respond_format.rb +69 -0
  43. data/lib/llm/providers/openai/format.rb +27 -58
  44. data/lib/llm/providers/openai/images.rb +4 -2
  45. data/lib/llm/providers/openai/response_parser/completion_parser.rb +55 -0
  46. data/lib/llm/providers/openai/response_parser/respond_parser.rb +56 -0
  47. data/lib/llm/providers/openai/response_parser.rb +8 -44
  48. data/lib/llm/providers/openai/responses.rb +13 -14
  49. data/lib/llm/providers/openai.rb +11 -23
  50. data/lib/llm/providers/voyageai.rb +4 -4
  51. data/lib/llm/response/{output.rb → respond.rb} +2 -2
  52. data/lib/llm/response.rb +1 -1
  53. data/lib/llm/version.rb +1 -1
  54. data/lib/llm.rb +38 -10
  55. data/llm.gemspec +1 -0
  56. metadata +28 -3
@@ -4,6 +4,9 @@ class LLM::OpenAI
4
4
  ##
5
5
  # @private
6
6
  module Format
7
+ require_relative "format/completion_format"
8
+ require_relative "format/respond_format"
9
+
7
10
  ##
8
11
  # @param [Array<LLM::Message>] messages
9
12
  # The messages to format
@@ -11,11 +14,11 @@ class LLM::OpenAI
11
14
  # The mode to format the messages for
12
15
  # @return [Array<Hash>]
13
16
  def format(messages, mode)
14
- messages.map do
15
- if Hash === _1
16
- {role: _1[:role], content: format_content(_1[:content], mode)}
17
+ messages.filter_map do |message|
18
+ if mode == :complete
19
+ CompletionFormat.new(message).format
17
20
  else
18
- {role: _1.role, content: format_content(_1.content, mode)}
21
+ RespondFormat.new(message).format
19
22
  end
20
23
  end
21
24
  end
@@ -23,62 +26,28 @@ class LLM::OpenAI
23
26
  private
24
27
 
25
28
  ##
26
- # @param [String, URI] content
27
- # The content to format
28
- # @return [String, Hash]
29
- # The formatted content
30
- def format_content(content, mode)
31
- if mode == :complete
32
- format_complete(content)
33
- elsif mode == :response
34
- format_response(content)
35
- end
29
+ # @param [JSON::Schema] schema
30
+ # The schema to format
31
+ # @return [Hash]
32
+ def format_schema(params)
33
+ return {} unless params and params[:schema]
34
+ schema = params.delete(:schema)
35
+ {
36
+ response_format: {
37
+ type: "json_schema",
38
+ json_schema: {name: "JSONSchema", schema:}
39
+ }
40
+ }
36
41
  end
37
42
 
38
- def format_complete(content)
39
- case content
40
- when Array
41
- content.flat_map { format_complete(_1) }
42
- when URI
43
- [{type: :image_url, image_url: {url: content.to_s}}]
44
- when LLM::File
45
- file = content
46
- if file.image?
47
- [{type: :image_url, image_url: {url: file.to_data_uri}}]
48
- else
49
- [{type: :file, file: {filename: file.basename, file_data: file.to_data_uri}}]
50
- end
51
- when LLM::Response::File
52
- [{type: :file, file: {file_id: content.id}}]
53
- when String
54
- [{type: :text, text: content.to_s}]
55
- when LLM::Message
56
- format_complete(content.content)
57
- else
58
- raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
59
- "is not supported by the OpenAI chat completions API"
60
- end
61
- end
62
-
63
- def format_response(content)
64
- case content
65
- when Array
66
- content.flat_map { format_response(_1) }
67
- when LLM::Response::File
68
- file = LLM::File(content.filename)
69
- if file.image?
70
- [{type: :input_image, file_id: content.id}]
71
- else
72
- [{type: :input_file, file_id: content.id}]
73
- end
74
- when String
75
- [{type: :input_text, text: content.to_s}]
76
- when LLM::Message
77
- format_response(content.content)
78
- else
79
- raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
80
- "is not supported by the OpenAI responses API"
81
- end
43
+ ##
44
+ # @param [Array<LLM::Function>] tools
45
+ # The tools to format
46
+ # @return [Hash]
47
+ def format_tools(params)
48
+ return {} unless params and params[:tools]&.any?
49
+ tools = params[:tools]
50
+ {tools: tools.map { _1.format(self) }}
82
51
  end
83
52
  end
84
53
  end
@@ -57,7 +57,7 @@ class LLM::OpenAI
57
57
  # Create image variations
58
58
  # @example
59
59
  # llm = LLM.openai(ENV["KEY"])
60
- # res = llm.images.create_variation(image: LLM::File("/images/hat.png"), n: 5)
60
+ # res = llm.images.create_variation(image: "/images/hat.png", n: 5)
61
61
  # p res.urls
62
62
  # @see https://platform.openai.com/docs/api-reference/images/createVariation OpenAI docs
63
63
  # @param [File] image The image to create variations from
@@ -66,6 +66,7 @@ class LLM::OpenAI
66
66
  # @raise (see LLM::Provider#request)
67
67
  # @return [LLM::Response::Image]
68
68
  def create_variation(image:, model: "dall-e-2", **params)
69
+ image = LLM.File(image)
69
70
  multi = LLM::Multipart.new(params.merge!(image:, model:))
70
71
  req = Net::HTTP::Post.new("/v1/images/variations", headers)
71
72
  req["content-type"] = multi.content_type
@@ -78,7 +79,7 @@ class LLM::OpenAI
78
79
  # Edit an image
79
80
  # @example
80
81
  # llm = LLM.openai(ENV["KEY"])
81
- # res = llm.images.edit(image: LLM::File("/images/hat.png"), prompt: "A cat wearing this hat")
82
+ # res = llm.images.edit(image: "/images/hat.png", prompt: "A cat wearing this hat")
82
83
  # p res.urls
83
84
  # @see https://platform.openai.com/docs/api-reference/images/createEdit OpenAI docs
84
85
  # @param [File] image The image to edit
@@ -88,6 +89,7 @@ class LLM::OpenAI
88
89
  # @raise (see LLM::Provider#request)
89
90
  # @return [LLM::Response::Image]
90
91
  def edit(image:, prompt:, model: "dall-e-2", **params)
92
+ image = LLM.File(image)
91
93
  multi = LLM::Multipart.new(params.merge!(image:, prompt:, model:))
92
94
  req = Net::HTTP::Post.new("/v1/images/edits", headers)
93
95
  req["content-type"] = multi.content_type
@@ -0,0 +1,55 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::OpenAI::ResponseParser
4
+ ##
5
+ # @private
6
+ class CompletionParser
7
+ def initialize(body)
8
+ @body = OpenStruct.from_hash(body)
9
+ end
10
+
11
+ def format(response)
12
+ {
13
+ model:,
14
+ prompt_tokens:,
15
+ completion_tokens:,
16
+ total_tokens:,
17
+ choices: format_choices(response)
18
+ }
19
+ end
20
+
21
+ private
22
+
23
+ def format_choices(response)
24
+ choices.map.with_index do |choice, index|
25
+ message = choice.message
26
+ extra = {
27
+ index:, response:,
28
+ logprobs: choice.logprobs,
29
+ tool_calls: format_tool_calls(message.tool_calls),
30
+ original_tool_calls: message.tool_calls
31
+ }
32
+ LLM::Message.new(message.role, message.content, extra)
33
+ end
34
+ end
35
+
36
+ def format_tool_calls(tools)
37
+ (tools || []).filter_map do |tool|
38
+ next unless tool.function
39
+ tool = {
40
+ id: tool.id,
41
+ name: tool.function.name,
42
+ arguments: JSON.parse(tool.function.arguments)
43
+ }
44
+ OpenStruct.new(tool)
45
+ end
46
+ end
47
+
48
+ def body = @body
49
+ def model = body.model
50
+ def prompt_tokens = body.usage.prompt_tokens
51
+ def completion_tokens = body.usage.completion_tokens
52
+ def total_tokens = body.usage.total_tokens
53
+ def choices = body.choices
54
+ end
55
+ end
@@ -0,0 +1,56 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::OpenAI::ResponseParser
4
+ ##
5
+ # @private
6
+ class RespondParser
7
+ def initialize(body)
8
+ @body = OpenStruct.from_hash(body)
9
+ end
10
+
11
+ def format(response)
12
+ {
13
+ id:,
14
+ model:,
15
+ input_tokens:,
16
+ output_tokens:,
17
+ total_tokens:,
18
+ outputs: [format_message(response)]
19
+ }
20
+ end
21
+
22
+ private
23
+
24
+ def format_message(response)
25
+ message = LLM::Message.new("assistant", +"", {response:, tool_calls: []})
26
+ choices.each.with_index do |choice, index|
27
+ if choice.type == "function_call"
28
+ message.extra[:tool_calls] << format_tool(choice)
29
+ elsif choice.content
30
+ choice.content.each do |c|
31
+ next unless c["type"] == "output_text"
32
+ message.content << c["text"] << "\n"
33
+ end
34
+ end
35
+ end
36
+ message
37
+ end
38
+
39
+ def format_tool(tool)
40
+ OpenStruct.new(
41
+ id: tool.call_id,
42
+ name: tool.name,
43
+ arguments: JSON.parse(tool.arguments)
44
+ )
45
+ end
46
+
47
+ def body = @body
48
+ def id = body.id
49
+ def model = body.model
50
+ def input_tokens = body.usage.input_tokens
51
+ def output_tokens = body.usage.output_tokens
52
+ def total_tokens = body.usage.total_tokens
53
+ def choices = body.output
54
+ def tools = output.select { _1.type == "function_call" }
55
+ end
56
+ end
@@ -8,55 +8,28 @@ class LLM::OpenAI
8
8
  # @param [Hash] body
9
9
  # The response body from the LLM provider
10
10
  # @return [Hash]
11
- def parse_embedding(body)
12
- {
13
- model: body["model"],
14
- embeddings: body["data"].map { _1["embedding"] },
15
- prompt_tokens: body.dig("usage", "prompt_tokens"),
16
- total_tokens: body.dig("usage", "total_tokens")
17
- }
11
+ def parse_completion(body)
12
+ CompletionParser.new(body).format(self)
18
13
  end
19
14
 
20
15
  ##
21
16
  # @param [Hash] body
22
17
  # The response body from the LLM provider
23
18
  # @return [Hash]
24
- def parse_completion(body)
25
- {
26
- model: body["model"],
27
- choices: body["choices"].map.with_index do
28
- extra = {
29
- index: _2, response: self,
30
- logprobs: _1["logprobs"]
31
- }
32
- LLM::Message.new(*_1["message"].values_at("role", "content"), extra)
33
- end,
34
- prompt_tokens: body.dig("usage", "prompt_tokens"),
35
- completion_tokens: body.dig("usage", "completion_tokens"),
36
- total_tokens: body.dig("usage", "total_tokens")
37
- }
19
+ def parse_respond_response(body)
20
+ RespondParser.new(body).format(self)
38
21
  end
39
22
 
40
23
  ##
41
24
  # @param [Hash] body
42
25
  # The response body from the LLM provider
43
26
  # @return [Hash]
44
- def parse_output_response(body)
27
+ def parse_embedding(body)
45
28
  {
46
- id: body["id"],
47
29
  model: body["model"],
48
- input_tokens: body.dig("usage", "input_tokens"),
49
- output_tokens: body.dig("usage", "output_tokens"),
50
- total_tokens: body.dig("usage", "total_tokens"),
51
- outputs: body["output"].filter_map.with_index do |output, index|
52
- next unless output["content"]
53
- extra = {
54
- index:, response: self,
55
- contents: output["content"],
56
- annotations: output["annotations"]
57
- }
58
- LLM::Message.new(output["role"], text(output), extra)
59
- end
30
+ embeddings: body["data"].map { _1["embedding"] },
31
+ prompt_tokens: body.dig("usage", "prompt_tokens"),
32
+ total_tokens: body.dig("usage", "total_tokens")
60
33
  }
61
34
  end
62
35
 
@@ -73,14 +46,5 @@ class LLM::OpenAI
73
46
  end
74
47
  }
75
48
  end
76
-
77
- private
78
-
79
- def text(output)
80
- output["content"]
81
- .select { _1["type"] == "output_text" }
82
- .map { _1["text"] }
83
- .join("\n")
84
- end
85
49
  end
86
50
  end
@@ -21,14 +21,14 @@ class LLM::OpenAI
21
21
  # require "llm"
22
22
  #
23
23
  # llm = LLM.openai(ENV["KEY"])
24
- # file = llm.files.create file: LLM::File("/images/hat.png")
24
+ # file = llm.files.create file: "/images/hat.png"
25
25
  # res = llm.responses.create ["Describe the image", file]
26
26
  # @example
27
27
  # #!/usr/bin/env ruby
28
28
  # require "llm"
29
29
  #
30
30
  # llm = LLM.openai(ENV["KEY"])
31
- # file = llm.files.create file: LLM::File("/documents/freebsd.pdf")
31
+ # file = llm.files.create file: "/documents/freebsd.pdf"
32
32
  # res = llm.responses.create ["Describe the document, file]
33
33
  class Responses
34
34
  include Format
@@ -45,24 +45,21 @@ class LLM::OpenAI
45
45
  # Create a response
46
46
  # @see https://platform.openai.com/docs/api-reference/responses/create OpenAI docs
47
47
  # @param prompt (see LLM::Provider#complete)
48
- # @param role (see LLM::Provider#complete)
49
- # @param model (see LLM::Provider#complete)
50
- # @param [Hash] params Response params
48
+ # @param params (see LLM::Provider#complete)
51
49
  # @raise (see LLM::Provider#request)
52
50
  # @raise [LLM::Error::PromptError]
53
51
  # When given an object a provider does not understand
54
52
  # @return [LLM::Response::Output]
55
- def create(prompt, role = :user, model: @provider.default_model, schema: nil, **params)
56
- params = {model:}
57
- .merge!(expand_schema(schema))
58
- .merge!(params)
59
- .compact
53
+ def create(prompt, params = {})
54
+ params = {role: :user, model: @provider.default_model}.merge!(params)
55
+ params = [params, format_schema(params), format_tools(params)].inject({}, &:merge!).compact
56
+ role = params.delete(:role)
60
57
  req = Net::HTTP::Post.new("/v1/responses", headers)
61
58
  messages = [*(params.delete(:input) || []), LLM::Message.new(role, prompt)]
62
- body = JSON.dump({input: format(messages, :response)}.merge!(params))
59
+ body = JSON.dump({input: [format(messages, :response)].flatten}.merge!(params))
63
60
  set_body_stream(req, StringIO.new(body))
64
61
  res = request(http, req)
65
- LLM::Response::Output.new(res).extend(response_parser)
62
+ LLM::Response::Respond.new(res).extend(response_parser)
66
63
  end
67
64
 
68
65
  ##
@@ -76,7 +73,7 @@ class LLM::OpenAI
76
73
  query = URI.encode_www_form(params)
77
74
  req = Net::HTTP::Get.new("/v1/responses/#{response_id}?#{query}", headers)
78
75
  res = request(http, req)
79
- LLM::Response::Output.new(res).extend(response_parser)
76
+ LLM::Response::Respond.new(res).extend(response_parser)
80
77
  end
81
78
 
82
79
  ##
@@ -98,7 +95,9 @@ class LLM::OpenAI
98
95
  @provider.instance_variable_get(:@http)
99
96
  end
100
97
 
101
- [:response_parser, :headers, :request, :set_body_stream, :expand_schema].each do |m|
98
+ [:response_parser, :headers,
99
+ :request, :set_body_stream,
100
+ :format_schema, :format_tools].each do |m|
102
101
  define_method(m) { |*args, &b| @provider.send(m, *args, &b) }
103
102
  end
104
103
  end
@@ -8,6 +8,8 @@ module LLM
8
8
  require_relative "openai/format"
9
9
  require_relative "openai/error_handler"
10
10
  require_relative "openai/response_parser"
11
+ require_relative "openai/response_parser/completion_parser"
12
+ require_relative "openai/response_parser/respond_parser"
11
13
  require_relative "openai/responses"
12
14
  require_relative "openai/images"
13
15
  require_relative "openai/audio"
@@ -18,9 +20,9 @@ module LLM
18
20
  HOST = "api.openai.com"
19
21
 
20
22
  ##
21
- # @param secret (see LLM::Provider#initialize)
22
- def initialize(secret, **)
23
- super(secret, host: HOST, **)
23
+ # @param key (see LLM::Provider#initialize)
24
+ def initialize(**)
25
+ super(host: HOST, **)
24
26
  end
25
27
 
26
28
  ##
@@ -42,23 +44,19 @@ module LLM
42
44
  # Provides an interface to the chat completions API
43
45
  # @see https://platform.openai.com/docs/api-reference/chat/create OpenAI docs
44
46
  # @param prompt (see LLM::Provider#complete)
45
- # @param role (see LLM::Provider#complete)
46
- # @param model (see LLM::Provider#complete)
47
- # @param schema (see LLM::Provider#complete)
48
47
  # @param params (see LLM::Provider#complete)
49
48
  # @example (see LLM::Provider#complete)
50
49
  # @raise (see LLM::Provider#request)
51
50
  # @raise [LLM::Error::PromptError]
52
51
  # When given an object a provider does not understand
53
52
  # @return (see LLM::Provider#complete)
54
- def complete(prompt, role = :user, model: default_model, schema: nil, **params)
55
- params = {model:}
56
- .merge!(expand_schema(schema))
57
- .merge!(params)
58
- .compact
53
+ def complete(prompt, params = {})
54
+ params = {role: :user, model: default_model}.merge!(params)
55
+ params = [params, format_schema(params), format_tools(params)].inject({}, &:merge!).compact
56
+ role = params.delete(:role)
59
57
  req = Net::HTTP::Post.new("/v1/chat/completions", headers)
60
58
  messages = [*(params.delete(:messages) || []), Message.new(role, prompt)]
61
- body = JSON.dump({messages: format(messages, :complete)}.merge!(params))
59
+ body = JSON.dump({messages: format(messages, :complete).flatten}.merge!(params))
62
60
  set_body_stream(req, StringIO.new(body))
63
61
  res = request(@http, req)
64
62
  Response::Completion.new(res).extend(response_parser)
@@ -123,7 +121,7 @@ module LLM
123
121
  def headers
124
122
  {
125
123
  "Content-Type" => "application/json",
126
- "Authorization" => "Bearer #{@secret}"
124
+ "Authorization" => "Bearer #{@key}"
127
125
  }
128
126
  end
129
127
 
@@ -134,15 +132,5 @@ module LLM
134
132
  def error_handler
135
133
  LLM::OpenAI::ErrorHandler
136
134
  end
137
-
138
- def expand_schema(schema)
139
- return {} unless schema
140
- {
141
- response_format: {
142
- type: "json_schema",
143
- json_schema: {name: "JSONSchema", schema:}
144
- }
145
- }
146
- end
147
135
  end
148
136
  end
@@ -7,9 +7,9 @@ module LLM
7
7
  HOST = "api.voyageai.com"
8
8
 
9
9
  ##
10
- # @param secret (see LLM::Provider#initialize)
11
- def initialize(secret, **)
12
- super(secret, host: HOST, **)
10
+ # @param key (see LLM::Provider#initialize)
11
+ def initialize(**)
12
+ super(host: HOST, **)
13
13
  end
14
14
 
15
15
  ##
@@ -29,7 +29,7 @@ module LLM
29
29
  def headers
30
30
  {
31
31
  "Content-Type" => "application/json",
32
- "Authorization" => "Bearer #{@secret}"
32
+ "Authorization" => "Bearer #{@key}"
33
33
  }
34
34
  end
35
35
 
@@ -1,7 +1,7 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module LLM
4
- class Response::Output < Response
4
+ class Response::Respond < Response
5
5
  ##
6
6
  # @return [String]
7
7
  # Returns the id of the response
@@ -50,7 +50,7 @@ module LLM
50
50
  # @return [Hash]
51
51
  # Returns the parsed response from the provider
52
52
  def parsed
53
- @parsed ||= parse_output_response(body)
53
+ @parsed ||= parse_respond_response(body)
54
54
  end
55
55
  end
56
56
  end
data/lib/llm/response.rb CHANGED
@@ -5,7 +5,7 @@ module LLM
5
5
  require "json"
6
6
  require_relative "response/completion"
7
7
  require_relative "response/embedding"
8
- require_relative "response/output"
8
+ require_relative "response/respond"
9
9
  require_relative "response/image"
10
10
  require_relative "response/audio"
11
11
  require_relative "response/audio_transcription"
data/lib/llm/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module LLM
4
- VERSION = "0.4.2"
4
+ VERSION = "0.6.0"
5
5
  end
data/lib/llm.rb CHANGED
@@ -2,6 +2,7 @@
2
2
 
3
3
  module LLM
4
4
  require "stringio"
5
+ require_relative "json/schema"
5
6
  require_relative "llm/core_ext/ostruct"
6
7
  require_relative "llm/version"
7
8
  require_relative "llm/utils"
@@ -15,47 +16,74 @@ module LLM
15
16
  require_relative "llm/provider"
16
17
  require_relative "llm/chat"
17
18
  require_relative "llm/buffer"
19
+ require_relative "llm/function"
18
20
 
19
21
  module_function
20
22
 
21
23
  ##
22
24
  # @param secret (see LLM::Anthropic#initialize)
23
25
  # @return (see LLM::Anthropic#initialize)
24
- def anthropic(secret, options = {})
26
+ def anthropic(**)
25
27
  require_relative "llm/providers/anthropic" unless defined?(LLM::Anthropic)
26
28
  require_relative "llm/providers/voyageai" unless defined?(LLM::VoyageAI)
27
- LLM::Anthropic.new(secret, **options)
29
+ LLM::Anthropic.new(**)
28
30
  end
29
31
 
30
32
  ##
31
33
  # @param secret (see LLM::VoyageAI#initialize)
32
34
  # @return (see LLM::VoyageAI#initialize)
33
- def voyageai(secret, options = {})
35
+ def voyageai(**)
34
36
  require_relative "llm/providers/voyageai" unless defined?(LLM::VoyageAI)
35
- LLM::VoyageAI.new(secret, **options)
37
+ LLM::VoyageAI.new(**)
36
38
  end
37
39
 
38
40
  ##
39
41
  # @param secret (see LLM::Gemini#initialize)
40
42
  # @return (see LLM::Gemini#initialize)
41
- def gemini(secret, options = {})
43
+ def gemini(**)
42
44
  require_relative "llm/providers/gemini" unless defined?(LLM::Gemini)
43
- LLM::Gemini.new(secret, **options)
45
+ LLM::Gemini.new(**)
44
46
  end
45
47
 
46
48
  ##
47
49
  # @param host (see LLM::Ollama#initialize)
48
50
  # @return (see LLM::Ollama#initialize)
49
- def ollama(secret, options = {})
51
+ def ollama(key: nil, **)
50
52
  require_relative "llm/providers/ollama" unless defined?(LLM::Ollama)
51
- LLM::Ollama.new(secret, **options)
53
+ LLM::Ollama.new(key:, **)
52
54
  end
53
55
 
54
56
  ##
55
57
  # @param secret (see LLM::OpenAI#initialize)
56
58
  # @return (see LLM::OpenAI#initialize)
57
- def openai(secret, options = {})
59
+ def openai(**)
58
60
  require_relative "llm/providers/openai" unless defined?(LLM::OpenAI)
59
- LLM::OpenAI.new(secret, **options)
61
+ LLM::OpenAI.new(**)
62
+ end
63
+
64
+ ##
65
+ # Define a function
66
+ # @example
67
+ # LLM.function(:system) do |fn|
68
+ # fn.description "Run system command"
69
+ # fn.params do |schema|
70
+ # schema.object(command: schema.string.required)
71
+ # end
72
+ # fn.define do |params|
73
+ # system(params.command)
74
+ # end
75
+ # end
76
+ # @param [Symbol] name The name of the function
77
+ # @param [Proc] b The block to define the function
78
+ # @return [LLM::Function] The function object
79
+ def function(name, &b)
80
+ functions[name.to_s] = LLM::Function.new(name, &b)
81
+ end
82
+
83
+ ##
84
+ # Returns all known functions
85
+ # @return [Hash<String,LLM::Function>]
86
+ def functions
87
+ @functions ||= {}
60
88
  end
61
89
  end
data/llm.gemspec CHANGED
@@ -36,4 +36,5 @@ Gem::Specification.new do |spec|
36
36
  spec.add_development_dependency "rspec", "~> 3.0"
37
37
  spec.add_development_dependency "standard", "~> 1.40"
38
38
  spec.add_development_dependency "vcr", "~> 6.0"
39
+ spec.add_development_dependency "dotenv", "~> 2.8"
39
40
  end