llm.rb 0.10.1 → 0.12.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +120 -119
  3. data/lib/llm/bot/builder.rb +2 -2
  4. data/lib/llm/bot.rb +13 -22
  5. data/lib/llm/buffer.rb +7 -0
  6. data/lib/llm/file.rb +22 -12
  7. data/lib/llm/function.rb +8 -7
  8. data/lib/llm/message.rb +8 -0
  9. data/lib/llm/multipart.rb +0 -1
  10. data/lib/llm/object/kernel.rb +8 -0
  11. data/lib/llm/object.rb +9 -3
  12. data/lib/llm/provider.rb +10 -12
  13. data/lib/llm/providers/anthropic/format/completion_format.rb +10 -5
  14. data/lib/llm/providers/anthropic/models.rb +4 -9
  15. data/lib/llm/providers/anthropic/response/completion.rb +39 -0
  16. data/lib/llm/providers/anthropic.rb +13 -25
  17. data/lib/llm/providers/deepseek/format/completion_format.rb +3 -3
  18. data/lib/llm/providers/deepseek.rb +16 -1
  19. data/lib/llm/providers/gemini/audio.rb +9 -13
  20. data/lib/llm/providers/gemini/files.rb +19 -34
  21. data/lib/llm/providers/gemini/format/completion_format.rb +20 -5
  22. data/lib/llm/providers/gemini/images.rb +12 -11
  23. data/lib/llm/providers/gemini/models.rb +4 -10
  24. data/lib/llm/providers/gemini/{response_parser/completion_parser.rb → response/completion.rb} +10 -24
  25. data/lib/llm/providers/gemini/response/embedding.rb +8 -0
  26. data/lib/llm/providers/gemini/response/file.rb +11 -0
  27. data/lib/llm/providers/gemini/response/image.rb +26 -0
  28. data/lib/llm/providers/gemini.rb +18 -29
  29. data/lib/llm/providers/llamacpp.rb +18 -1
  30. data/lib/llm/providers/ollama/format/completion_format.rb +8 -5
  31. data/lib/llm/providers/ollama/models.rb +2 -8
  32. data/lib/llm/providers/ollama/response/completion.rb +28 -0
  33. data/lib/llm/providers/ollama/response/embedding.rb +9 -0
  34. data/lib/llm/providers/ollama.rb +13 -19
  35. data/lib/llm/providers/openai/audio.rb +10 -10
  36. data/lib/llm/providers/openai/files.rb +22 -34
  37. data/lib/llm/providers/openai/format/completion_format.rb +11 -4
  38. data/lib/llm/providers/openai/format/moderation_format.rb +2 -2
  39. data/lib/llm/providers/openai/format/respond_format.rb +7 -4
  40. data/lib/llm/providers/openai/images.rb +18 -17
  41. data/lib/llm/providers/openai/models.rb +4 -9
  42. data/lib/llm/providers/openai/moderations.rb +9 -11
  43. data/lib/llm/providers/openai/response/audio.rb +7 -0
  44. data/lib/llm/providers/openai/{response_parser/completion_parser.rb → response/completion.rb} +14 -30
  45. data/lib/llm/providers/openai/response/embedding.rb +9 -0
  46. data/lib/llm/providers/openai/response/file.rb +7 -0
  47. data/lib/llm/providers/openai/response/image.rb +16 -0
  48. data/lib/llm/providers/openai/response/moderations.rb +34 -0
  49. data/lib/llm/providers/openai/{response_parser/respond_parser.rb → response/responds.rb} +7 -29
  50. data/lib/llm/providers/openai/responses.rb +16 -34
  51. data/lib/llm/providers/openai/stream_parser.rb +1 -0
  52. data/lib/llm/providers/openai/vector_stores.rb +188 -0
  53. data/lib/llm/providers/openai.rb +24 -9
  54. data/lib/llm/providers/xai/images.rb +58 -0
  55. data/lib/llm/providers/xai.rb +72 -0
  56. data/lib/llm/response.rb +42 -13
  57. data/lib/llm/version.rb +1 -1
  58. data/lib/llm.rb +12 -13
  59. data/llm.gemspec +5 -5
  60. metadata +29 -38
  61. data/lib/llm/model.rb +0 -32
  62. data/lib/llm/providers/anthropic/response_parser/completion_parser.rb +0 -51
  63. data/lib/llm/providers/anthropic/response_parser.rb +0 -24
  64. data/lib/llm/providers/gemini/response_parser.rb +0 -46
  65. data/lib/llm/providers/ollama/response_parser/completion_parser.rb +0 -42
  66. data/lib/llm/providers/ollama/response_parser.rb +0 -30
  67. data/lib/llm/providers/openai/response_parser.rb +0 -65
  68. data/lib/llm/providers/voyageai/error_handler.rb +0 -32
  69. data/lib/llm/providers/voyageai/response_parser.rb +0 -13
  70. data/lib/llm/providers/voyageai.rb +0 -44
  71. data/lib/llm/response/audio.rb +0 -13
  72. data/lib/llm/response/audio_transcription.rb +0 -14
  73. data/lib/llm/response/audio_translation.rb +0 -14
  74. data/lib/llm/response/completion.rb +0 -51
  75. data/lib/llm/response/download_file.rb +0 -15
  76. data/lib/llm/response/embedding.rb +0 -23
  77. data/lib/llm/response/file.rb +0 -42
  78. data/lib/llm/response/filelist.rb +0 -18
  79. data/lib/llm/response/image.rb +0 -29
  80. data/lib/llm/response/modellist.rb +0 -18
  81. data/lib/llm/response/moderationlist/moderation.rb +0 -47
  82. data/lib/llm/response/moderationlist.rb +0 -51
  83. data/lib/llm/response/respond.rb +0 -56
  84. /data/lib/llm/{event_handler.rb → eventhandler.rb} +0 -0
@@ -32,10 +32,13 @@ module LLM::OpenAI::Format
32
32
  case content
33
33
  when URI
34
34
  [{type: :image_url, image_url: {url: content.to_s}}]
35
+ when File
36
+ content.close unless content.closed?
37
+ format_content(LLM.File(content.path))
35
38
  when LLM::File
36
39
  format_file(content)
37
- when LLM::Response::File
38
- [{type: :file, file: {file_id: content.id}}]
40
+ when LLM::Response
41
+ content.file? ? [{type: :file, file: {file_id: content.id}}] : prompt_error!(content)
39
42
  when String
40
43
  [{type: :text, text: content.to_s}]
41
44
  when LLM::Message
@@ -43,8 +46,7 @@ module LLM::OpenAI::Format
43
46
  when LLM::Function::Return
44
47
  throw(:abort, {role: "tool", tool_call_id: content.id, content: JSON.dump(content.value)})
45
48
  else
46
- raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
47
- "is not supported by the OpenAI chat completions API"
49
+ prompt_error!(content)
48
50
  end
49
51
  end
50
52
 
@@ -76,6 +78,11 @@ module LLM::OpenAI::Format
76
78
  end
77
79
  end
78
80
 
81
+ def prompt_error!(content)
82
+ raise LLM::PromptError, "The given object (an instance of #{content.class}) " \
83
+ "is not supported by the OpenAI chat completions API"
84
+ end
85
+
79
86
  def message = @message
80
87
  def content = message.content
81
88
  def returns = content.grep(LLM::Function::Return)
@@ -22,8 +22,8 @@ module LLM::OpenAI::Format
22
22
  elsif URI === input
23
23
  {type: :image_url, url: input.to_s}
24
24
  else
25
- raise LLM::Error::FormatError, "The given object (an instance of #{input.class}) " \
26
- "is not supported by OpenAI moderations API"
25
+ raise LLM::FormatError, "The given object (an instance of #{input.class}) " \
26
+ "is not supported by OpenAI moderations API"
27
27
  end
28
28
  end
29
29
  end
@@ -22,15 +22,14 @@ module LLM::OpenAI::Format
22
22
 
23
23
  def format_content(content)
24
24
  case content
25
- when LLM::Response::File
26
- format_file(content)
25
+ when LLM::Response
26
+ content.file? ? format_file(content) : prompt_error!(content)
27
27
  when String
28
28
  [{type: :input_text, text: content.to_s}]
29
29
  when LLM::Message
30
30
  format_content(content.content)
31
31
  else
32
- raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
33
- "is not supported by the OpenAI responses API"
32
+ prompt_error!(content)
34
33
  end
35
34
  end
36
35
 
@@ -62,6 +61,10 @@ module LLM::OpenAI::Format
62
61
  end
63
62
  end
64
63
 
64
+ def prompt_error!(content)
65
+ raise LLM::PromptError, "The given object (an instance of #{content.class}) " \
66
+ "is not supported by the OpenAI responses API"
67
+ end
65
68
  def message = @message
66
69
  def content = message.content
67
70
  def returns = content.grep(LLM::Function::Return)
@@ -2,31 +2,32 @@
2
2
 
3
3
  class LLM::OpenAI
4
4
  ##
5
- # The {LLM::OpenAI::Images LLM::OpenAI::Images} class provides an images
6
- # object for interacting with [OpenAI's images API](https://platform.openai.com/docs/api-reference/images).
5
+ # The {LLM::OpenAI::Images LLM::OpenAI::Images} class provides an interface
6
+ # for [OpenAI's images API](https://platform.openai.com/docs/api-reference/images).
7
7
  # OpenAI supports multiple response formats: temporary URLs, or binary strings
8
8
  # encoded in base64. The default is to return temporary URLs.
9
9
  #
10
- # @example example #1
10
+ # @example Temporary URLs
11
11
  # #!/usr/bin/env ruby
12
12
  # require "llm"
13
13
  # require "open-uri"
14
14
  # require "fileutils"
15
15
  #
16
- # llm = LLM.openai(ENV["KEY"])
16
+ # llm = LLM.openai(key: ENV["KEY"])
17
17
  # res = llm.images.create prompt: "A dog on a rocket to the moon"
18
18
  # FileUtils.mv OpenURI.open_uri(res.urls[0]).path,
19
19
  # "rocket.png"
20
20
  #
21
- # @example example #2
21
+ # @example Binary strings
22
22
  # #!/usr/bin/env ruby
23
23
  # require "llm"
24
24
  #
25
- # llm = LLM.openai(ENV["KEY"])
25
+ # llm = LLM.openai(key: ENV["KEY"])
26
26
  # res = llm.images.create prompt: "A dog on a rocket to the moon",
27
27
  # response_format: "b64_json"
28
28
  # IO.copy_stream res.images[0], "rocket.png"
29
29
  class Images
30
+ require_relative "response/image"
30
31
  ##
31
32
  # Returns a new Images object
32
33
  # @param provider [LLM::Provider]
@@ -38,26 +39,26 @@ class LLM::OpenAI
38
39
  ##
39
40
  # Create an image
40
41
  # @example
41
- # llm = LLM.openai(ENV["KEY"])
42
+ # llm = LLM.openai(key: ENV["KEY"])
42
43
  # res = llm.images.create prompt: "A dog on a rocket to the moon"
43
- # p res.urls
44
+ # res.urls.each { print _1, "\n" }
44
45
  # @see https://platform.openai.com/docs/api-reference/images/create OpenAI docs
45
46
  # @param [String] prompt The prompt
46
47
  # @param [String] model The model to use
47
48
  # @param [Hash] params Other parameters (see OpenAI docs)
48
49
  # @raise (see LLM::Provider#request)
49
- # @return [LLM::Response::Image]
50
+ # @return [LLM::Response]
50
51
  def create(prompt:, model: "dall-e-3", **params)
51
52
  req = Net::HTTP::Post.new("/v1/images/generations", headers)
52
53
  req.body = JSON.dump({prompt:, n: 1, model:}.merge!(params))
53
54
  res = execute(request: req)
54
- LLM::Response::Image.new(res).extend(response_parser)
55
+ LLM::Response.new(res).extend(LLM::OpenAI::Response::Image)
55
56
  end
56
57
 
57
58
  ##
58
59
  # Create image variations
59
60
  # @example
60
- # llm = LLM.openai(ENV["KEY"])
61
+ # llm = LLM.openai(key: ENV["KEY"])
61
62
  # res = llm.images.create_variation(image: "/images/hat.png", n: 5)
62
63
  # p res.urls
63
64
  # @see https://platform.openai.com/docs/api-reference/images/createVariation OpenAI docs
@@ -65,7 +66,7 @@ class LLM::OpenAI
65
66
  # @param [String] model The model to use
66
67
  # @param [Hash] params Other parameters (see OpenAI docs)
67
68
  # @raise (see LLM::Provider#request)
68
- # @return [LLM::Response::Image]
69
+ # @return [LLM::Response]
69
70
  def create_variation(image:, model: "dall-e-2", **params)
70
71
  image = LLM.File(image)
71
72
  multi = LLM::Multipart.new(params.merge!(image:, model:))
@@ -73,13 +74,13 @@ class LLM::OpenAI
73
74
  req["content-type"] = multi.content_type
74
75
  set_body_stream(req, multi.body)
75
76
  res = execute(request: req)
76
- LLM::Response::Image.new(res).extend(response_parser)
77
+ LLM::Response.new(res).extend(LLM::OpenAI::Response::Image)
77
78
  end
78
79
 
79
80
  ##
80
81
  # Edit an image
81
82
  # @example
82
- # llm = LLM.openai(ENV["KEY"])
83
+ # llm = LLM.openai(key: ENV["KEY"])
83
84
  # res = llm.images.edit(image: "/images/hat.png", prompt: "A cat wearing this hat")
84
85
  # p res.urls
85
86
  # @see https://platform.openai.com/docs/api-reference/images/createEdit OpenAI docs
@@ -88,7 +89,7 @@ class LLM::OpenAI
88
89
  # @param [String] model The model to use
89
90
  # @param [Hash] params Other parameters (see OpenAI docs)
90
91
  # @raise (see LLM::Provider#request)
91
- # @return [LLM::Response::Image]
92
+ # @return [LLM::Response]
92
93
  def edit(image:, prompt:, model: "dall-e-2", **params)
93
94
  image = LLM.File(image)
94
95
  multi = LLM::Multipart.new(params.merge!(image:, prompt:, model:))
@@ -96,12 +97,12 @@ class LLM::OpenAI
96
97
  req["content-type"] = multi.content_type
97
98
  set_body_stream(req, multi.body)
98
99
  res = execute(request: req)
99
- LLM::Response::Image.new(res).extend(response_parser)
100
+ LLM::Response.new(res).extend(LLM::OpenAI::Response::Image)
100
101
  end
101
102
 
102
103
  private
103
104
 
104
- [:response_parser, :headers, :execute, :set_body_stream].each do |m|
105
+ [:headers, :execute, :set_body_stream].each do |m|
105
106
  define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
106
107
  end
107
108
  end
@@ -11,7 +11,7 @@ class LLM::OpenAI
11
11
  # #!/usr/bin/env ruby
12
12
  # require "llm"
13
13
  #
14
- # llm = LLM.openai(ENV["KEY"])
14
+ # llm = LLM.openai(key: ENV["KEY"])
15
15
  # res = llm.models.all
16
16
  # res.each do |model|
17
17
  # print "id: ", model.id, "\n"
@@ -28,7 +28,7 @@ class LLM::OpenAI
28
28
  ##
29
29
  # List all models
30
30
  # @example
31
- # llm = LLM.openai(ENV["KEY"])
31
+ # llm = LLM.openai(key: ENV["KEY"])
32
32
  # res = llm.models.all
33
33
  # res.each do |model|
34
34
  # print "id: ", model.id, "\n"
@@ -36,17 +36,12 @@ class LLM::OpenAI
36
36
  # @see https://platform.openai.com/docs/api-reference/models/list OpenAI docs
37
37
  # @param [Hash] params Other parameters (see OpenAI docs)
38
38
  # @raise (see LLM::Provider#request)
39
- # @return [LLM::Response::FileList]
39
+ # @return [LLM::Response]
40
40
  def all(**params)
41
41
  query = URI.encode_www_form(params)
42
42
  req = Net::HTTP::Get.new("/v1/models?#{query}", headers)
43
43
  res = execute(request: req)
44
- LLM::Response::ModelList.new(res).tap { |modellist|
45
- models = modellist.body["data"].map do |model|
46
- LLM::Model.from_hash(model).tap { _1.provider = @provider }
47
- end
48
- modellist.models = models
49
- }
44
+ LLM::Response.new(res)
50
45
  end
51
46
 
52
47
  private
@@ -13,7 +13,8 @@ class LLM::OpenAI
13
13
  # require "llm"
14
14
  #
15
15
  # llm = LLM.openai(key: ENV["KEY"])
16
- # mod = llm.moderations.create input: "I hate you"
16
+ # res = llm.moderations.create input: "I hate you"
17
+ # mod = res.moderations[0]
17
18
  # print "categories: #{mod.categories}", "\n"
18
19
  # print "scores: #{mod.scores}", "\n"
19
20
  #
@@ -22,13 +23,16 @@ class LLM::OpenAI
22
23
  # require "llm"
23
24
  #
24
25
  # llm = LLM.openai(key: ENV["KEY"])
25
- # mod = llm.moderations.create input: URI.parse("https://example.com/image.png")
26
+ # res = llm.moderations.create input: URI.parse("https://example.com/image.png")
27
+ # mod = res.moderations[0]
26
28
  # print "categories: #{mod.categories}", "\n"
27
29
  # print "scores: #{mod.scores}", "\n"
28
30
  #
29
31
  # @see https://platform.openai.com/docs/api-reference/moderations/create OpenAI docs
30
32
  # @see https://platform.openai.com/docs/models#moderation OpenAI moderation models
31
33
  class Moderations
34
+ require_relative "response/moderations"
35
+
32
36
  ##
33
37
  # Returns a new Moderations object
34
38
  # @param [LLM::Provider] provider
@@ -41,26 +45,20 @@ class LLM::OpenAI
41
45
  # Create a moderation
42
46
  # @see https://platform.openai.com/docs/api-reference/moderations/create OpenAI docs
43
47
  # @see https://platform.openai.com/docs/models#moderation OpenAI moderation models
44
- # @note
45
- # Although OpenAI mentions an array as a valid input, and that it can return one
46
- # or more moderations, in practice the API only returns one moderation object. We
47
- # recommend using a single input string or URI, and to keep in mind that llm.rb
48
- # returns a Moderation object but has code in place to return multiple objects in
49
- # the future (in case OpenAI documentation ever matches the actual API).
50
48
  # @param [String, URI, Array<String, URI>] input
51
49
  # @param [String, LLM::Model] model The model to use
52
- # @return [LLM::Response::ModerationList::Moderation]
50
+ # @return [LLM::Response]
53
51
  def create(input:, model: "omni-moderation-latest", **params)
54
52
  req = Net::HTTP::Post.new("/v1/moderations", headers)
55
53
  input = Format::ModerationFormat.new(input).format
56
54
  req.body = JSON.dump({input:, model:}.merge!(params))
57
55
  res = execute(request: req)
58
- LLM::Response::ModerationList.new(res).extend(response_parser).first
56
+ LLM::Response.new(res).extend(LLM::OpenAI::Response::Moderations)
59
57
  end
60
58
 
61
59
  private
62
60
 
63
- [:response_parser, :headers, :execute].each do |m|
61
+ [:headers, :execute].each do |m|
64
62
  define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
65
63
  end
66
64
  end
@@ -0,0 +1,7 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::OpenAI::Response
4
+ module Audio
5
+ def audio = body.audio
6
+ end
7
+ end
@@ -1,30 +1,13 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- module LLM::OpenAI::ResponseParser
4
- ##
5
- # @private
6
- class CompletionParser
7
- def initialize(body)
8
- @body = LLM::Object.from_hash(body)
9
- end
10
-
11
- def format(response)
12
- {
13
- model:,
14
- prompt_tokens:,
15
- completion_tokens:,
16
- total_tokens:,
17
- choices: format_choices(response)
18
- }
19
- end
20
-
21
- private
22
-
23
- def format_choices(response)
24
- choices.map.with_index do |choice, index|
3
+ module LLM::OpenAI::Response
4
+ module Completion
5
+ def choices
6
+ body.choices.map.with_index do |choice, index|
7
+ choice = LLM::Object.from_hash(choice)
25
8
  message = choice.message
26
9
  extra = {
27
- index:, response:,
10
+ index:, response: self,
28
11
  logprobs: choice.logprobs,
29
12
  tool_calls: format_tool_calls(message.tool_calls),
30
13
  original_tool_calls: message.tool_calls
@@ -32,6 +15,14 @@ module LLM::OpenAI::ResponseParser
32
15
  LLM::Message.new(message.role, message.content, extra)
33
16
  end
34
17
  end
18
+ alias_method :messages, :choices
19
+
20
+ def model = body.model
21
+ def prompt_tokens = body.usage&.prompt_tokens
22
+ def completion_tokens = body.usage&.completion_tokens
23
+ def total_tokens = body.usage&.total_tokens
24
+
25
+ private
35
26
 
36
27
  def format_tool_calls(tools)
37
28
  (tools || []).filter_map do |tool|
@@ -44,12 +35,5 @@ module LLM::OpenAI::ResponseParser
44
35
  LLM::Object.new(tool)
45
36
  end
46
37
  end
47
-
48
- def body = @body
49
- def model = body.model
50
- def prompt_tokens = body.usage&.prompt_tokens
51
- def completion_tokens = body.usage&.completion_tokens
52
- def total_tokens = body.usage&.total_tokens
53
- def choices = body.choices
54
38
  end
55
39
  end
@@ -0,0 +1,9 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::OpenAI::Response
4
+ module Embedding
5
+ def embeddings = data.map { _1["embedding"] }
6
+ def prompt_tokens = data.dig(0, "usage", "prompt_tokens")
7
+ def total_tokens = data.dig(0, "usage", "total_tokens")
8
+ end
9
+ end
@@ -0,0 +1,7 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::OpenAI::Response
4
+ module File
5
+ def file? = true
6
+ end
7
+ end
@@ -0,0 +1,16 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::OpenAI::Response
4
+ module Image
5
+ def urls
6
+ data.filter_map { _1["url"] }
7
+ end
8
+
9
+ def images
10
+ data.filter_map do
11
+ next unless _1["b64_json"]
12
+ StringIO.new(_1["b64_json"].unpack1("m0"))
13
+ end
14
+ end
15
+ end
16
+ end
@@ -0,0 +1,34 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::OpenAI::Response
4
+ module Moderations
5
+ ##
6
+ # @return [Array<LLM::Response]
7
+ def moderations
8
+ @moderations ||= body.results.map { _1.extend(Moderation) }
9
+ end
10
+ end
11
+
12
+ module Moderation
13
+ ##
14
+ # Returns true if the moderation is flagged
15
+ # @return [Boolean]
16
+ def flagged?
17
+ body.flagged
18
+ end
19
+
20
+ ##
21
+ # Returns the moderation categories
22
+ # @return [Array<String>]
23
+ def categories
24
+ self["categories"].filter_map { _2 ? _1 : nil }
25
+ end
26
+
27
+ ##
28
+ # Returns the moderation scores
29
+ # @return [Hash]
30
+ def scores
31
+ self["category_scores"].select { |(key, _)| categories.include?(key) }.to_h
32
+ end
33
+ end
34
+ end
@@ -1,28 +1,15 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- module LLM::OpenAI::ResponseParser
4
- ##
5
- # @private
6
- class RespondParser
7
- def initialize(body)
8
- @body = LLM::Object.from_hash(body)
9
- end
10
-
11
- def format(response)
12
- {
13
- id:,
14
- model:,
15
- input_tokens:,
16
- output_tokens:,
17
- total_tokens:,
18
- outputs: [format_message(response)]
19
- }
20
- end
3
+ module LLM::OpenAI::Response
4
+ module Responds
5
+ def outputs = [format_message]
6
+ def choices = body.output
7
+ def tools = output.select { _1.type == "function_call" }
21
8
 
22
9
  private
23
10
 
24
- def format_message(response)
25
- message = LLM::Message.new("assistant", +"", {response:, tool_calls: []})
11
+ def format_message
12
+ message = LLM::Message.new("assistant", +"", {response: self, tool_calls: []})
26
13
  choices.each.with_index do |choice, index|
27
14
  if choice.type == "function_call"
28
15
  message.extra[:tool_calls] << format_tool(choice)
@@ -43,14 +30,5 @@ module LLM::OpenAI::ResponseParser
43
30
  arguments: JSON.parse(tool.arguments)
44
31
  )
45
32
  end
46
-
47
- def body = @body
48
- def id = body.id
49
- def model = body.model
50
- def input_tokens = body.usage.input_tokens
51
- def output_tokens = body.usage.output_tokens
52
- def total_tokens = body.usage.total_tokens
53
- def choices = body.output
54
- def tools = output.select { _1.type == "function_call" }
55
33
  end
56
34
  end
@@ -2,37 +2,19 @@
2
2
 
3
3
  class LLM::OpenAI
4
4
  ##
5
- # The {LLM::OpenAI::Responses LLM::OpenAI::Responses} class provides a responses
6
- # object for interacting with [OpenAI's response API](https://platform.openai.com/docs/guides/conversation-state?api-mode=responses).
7
- # The responses API is similar to the chat completions API but it can maintain
8
- # conversation state across multiple requests. This is useful when you want to
9
- # save bandwidth and/or not maintain the message thread by yourself.
5
+ # The {LLM::OpenAI::Responses LLM::OpenAI::Responses} class provides
6
+ # an interface for [OpenAI's response API](https://platform.openai.com/docs/guides/conversation-state?api-mode=responses).
10
7
  #
11
8
  # @example example #1
12
9
  # #!/usr/bin/env ruby
13
10
  # require "llm"
14
11
  #
15
- # llm = LLM.openai(ENV["KEY"])
16
- # res1 = llm.responses.create "Your task is to help me with math", role: :developer
17
- # res2 = llm.responses.create "5 + 5 = ?", role: :user, previous_response_id: res1.id
18
- # [res1,res2].each { llm.responses.delete(_1) }
19
- #
20
- # @example example #2
21
- # #!/usr/bin/env ruby
22
- # require "llm"
23
- #
24
- # llm = LLM.openai(ENV["KEY"])
25
- # file = llm.files.create file: "/images/hat.png"
26
- # res = llm.responses.create ["Describe the image", file]
27
- #
28
- # @example example #3
29
- # #!/usr/bin/env ruby
30
- # require "llm"
31
- #
32
- # llm = LLM.openai(ENV["KEY"])
33
- # file = llm.files.create file: "/documents/freebsd.pdf"
34
- # res = llm.responses.create ["Describe the document, file]
12
+ # llm = LLM.openai(key: ENV["KEY"])
13
+ # res1 = llm.responses.create "Your task is to answer the user's questions", role: :developer
14
+ # res2 = llm.responses.create "5 + 5 = X ?", role: :user, previous_response_id: res1.id
15
+ # [res1, res2].each { llm.responses.delete(_1) }
35
16
  class Responses
17
+ require_relative "response/responds"
36
18
  include Format
37
19
 
38
20
  ##
@@ -49,9 +31,9 @@ class LLM::OpenAI
49
31
  # @param prompt (see LLM::Provider#complete)
50
32
  # @param params (see LLM::Provider#complete)
51
33
  # @raise (see LLM::Provider#request)
52
- # @raise [LLM::Error::PromptError]
34
+ # @raise [LLM::PromptError]
53
35
  # When given an object a provider does not understand
54
- # @return [LLM::Response::Output]
36
+ # @return [LLM::Response]
55
37
  def create(prompt, params = {})
56
38
  params = {role: :user, model: @provider.default_model}.merge!(params)
57
39
  params = [params, format_schema(params), format_tools(params)].inject({}, &:merge!).compact
@@ -61,7 +43,7 @@ class LLM::OpenAI
61
43
  body = JSON.dump({input: [format(messages, :response)].flatten}.merge!(params))
62
44
  set_body_stream(req, StringIO.new(body))
63
45
  res = execute(request: req)
64
- LLM::Response::Respond.new(res).extend(response_parser)
46
+ LLM::Response.new(res).extend(LLM::OpenAI::Response::Responds)
65
47
  end
66
48
 
67
49
  ##
@@ -69,13 +51,13 @@ class LLM::OpenAI
69
51
  # @see https://platform.openai.com/docs/api-reference/responses/get OpenAI docs
70
52
  # @param [#id, #to_s] response Response ID
71
53
  # @raise (see LLM::Provider#request)
72
- # @return [LLM::Response::Output]
54
+ # @return [LLM::Response]
73
55
  def get(response, **params)
74
56
  response_id = response.respond_to?(:id) ? response.id : response
75
57
  query = URI.encode_www_form(params)
76
58
  req = Net::HTTP::Get.new("/v1/responses/#{response_id}?#{query}", headers)
77
59
  res = execute(request: req)
78
- LLM::Response::Respond.new(res).extend(response_parser)
60
+ LLM::Response.new(res).extend(LLM::OpenAI::Response::Responds)
79
61
  end
80
62
 
81
63
  ##
@@ -88,14 +70,14 @@ class LLM::OpenAI
88
70
  response_id = response.respond_to?(:id) ? response.id : response
89
71
  req = Net::HTTP::Delete.new("/v1/responses/#{response_id}", headers)
90
72
  res = execute(request: req)
91
- LLM::Object.from_hash JSON.parse(res.body)
73
+ LLM::Response.new(res)
92
74
  end
93
75
 
94
76
  private
95
77
 
96
- [:response_parser, :headers,
97
- :execute, :set_body_stream,
98
- :format_schema, :format_tools].each do |m|
78
+ [:headers, :execute,
79
+ :set_body_stream, :format_schema,
80
+ :format_tools].each do |m|
99
81
  define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
100
82
  end
101
83
  end
@@ -52,6 +52,7 @@ class LLM::OpenAI
52
52
  target[key] = value
53
53
  end
54
54
  else
55
+ @io << value if @io.respond_to?(:<<)
55
56
  target[key] = value
56
57
  end
57
58
  end