llm.rb 0.10.1 → 0.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (115) hide show
  1. checksums.yaml +4 -4
  2. data/LICENSE +0 -0
  3. data/README.md +81 -117
  4. data/lib/llm/bot/builder.rb +2 -2
  5. data/lib/llm/bot/conversable.rb +0 -0
  6. data/lib/llm/bot/prompt/completion.rb +0 -0
  7. data/lib/llm/bot/prompt/respond.rb +0 -0
  8. data/lib/llm/bot.rb +9 -11
  9. data/lib/llm/buffer.rb +0 -0
  10. data/lib/llm/error.rb +0 -0
  11. data/lib/llm/event_handler.rb +0 -0
  12. data/lib/llm/eventstream/event.rb +0 -0
  13. data/lib/llm/eventstream/parser.rb +0 -0
  14. data/lib/llm/eventstream.rb +0 -0
  15. data/lib/llm/file.rb +18 -9
  16. data/lib/llm/function.rb +6 -5
  17. data/lib/llm/json/schema/array.rb +0 -0
  18. data/lib/llm/json/schema/boolean.rb +0 -0
  19. data/lib/llm/json/schema/integer.rb +0 -0
  20. data/lib/llm/json/schema/leaf.rb +0 -0
  21. data/lib/llm/json/schema/null.rb +0 -0
  22. data/lib/llm/json/schema/number.rb +0 -0
  23. data/lib/llm/json/schema/object.rb +0 -0
  24. data/lib/llm/json/schema/string.rb +0 -0
  25. data/lib/llm/json/schema/version.rb +0 -0
  26. data/lib/llm/json/schema.rb +0 -0
  27. data/lib/llm/message.rb +8 -0
  28. data/lib/llm/mime.rb +0 -0
  29. data/lib/llm/multipart.rb +0 -0
  30. data/lib/llm/object/builder.rb +0 -0
  31. data/lib/llm/object/kernel.rb +8 -0
  32. data/lib/llm/object.rb +7 -0
  33. data/lib/llm/provider.rb +9 -11
  34. data/lib/llm/providers/anthropic/error_handler.rb +0 -0
  35. data/lib/llm/providers/anthropic/format/completion_format.rb +10 -5
  36. data/lib/llm/providers/anthropic/format.rb +0 -0
  37. data/lib/llm/providers/anthropic/models.rb +2 -7
  38. data/lib/llm/providers/anthropic/response/completion.rb +39 -0
  39. data/lib/llm/providers/anthropic/stream_parser.rb +0 -0
  40. data/lib/llm/providers/anthropic.rb +3 -24
  41. data/lib/llm/providers/deepseek/format/completion_format.rb +3 -3
  42. data/lib/llm/providers/deepseek/format.rb +0 -0
  43. data/lib/llm/providers/deepseek.rb +6 -0
  44. data/lib/llm/providers/gemini/audio.rb +6 -10
  45. data/lib/llm/providers/gemini/error_handler.rb +0 -0
  46. data/lib/llm/providers/gemini/files.rb +11 -14
  47. data/lib/llm/providers/gemini/format/completion_format.rb +20 -5
  48. data/lib/llm/providers/gemini/format.rb +0 -0
  49. data/lib/llm/providers/gemini/images.rb +8 -7
  50. data/lib/llm/providers/gemini/models.rb +2 -8
  51. data/lib/llm/providers/gemini/{response_parser/completion_parser.rb → response/completion.rb} +10 -24
  52. data/lib/llm/providers/gemini/response/embedding.rb +8 -0
  53. data/lib/llm/providers/gemini/response/file.rb +11 -0
  54. data/lib/llm/providers/gemini/response/image.rb +26 -0
  55. data/lib/llm/providers/gemini/stream_parser.rb +0 -0
  56. data/lib/llm/providers/gemini.rb +5 -8
  57. data/lib/llm/providers/llamacpp.rb +6 -0
  58. data/lib/llm/providers/ollama/error_handler.rb +0 -0
  59. data/lib/llm/providers/ollama/format/completion_format.rb +8 -5
  60. data/lib/llm/providers/ollama/format.rb +0 -0
  61. data/lib/llm/providers/ollama/models.rb +2 -8
  62. data/lib/llm/providers/ollama/response/completion.rb +28 -0
  63. data/lib/llm/providers/ollama/response/embedding.rb +10 -0
  64. data/lib/llm/providers/ollama/stream_parser.rb +0 -0
  65. data/lib/llm/providers/ollama.rb +5 -8
  66. data/lib/llm/providers/openai/audio.rb +6 -6
  67. data/lib/llm/providers/openai/error_handler.rb +0 -0
  68. data/lib/llm/providers/openai/files.rb +14 -15
  69. data/lib/llm/providers/openai/format/completion_format.rb +11 -4
  70. data/lib/llm/providers/openai/format/moderation_format.rb +2 -2
  71. data/lib/llm/providers/openai/format/respond_format.rb +7 -4
  72. data/lib/llm/providers/openai/format.rb +0 -0
  73. data/lib/llm/providers/openai/images.rb +8 -7
  74. data/lib/llm/providers/openai/models.rb +2 -7
  75. data/lib/llm/providers/openai/moderations.rb +9 -11
  76. data/lib/llm/providers/openai/response/audio.rb +7 -0
  77. data/lib/llm/providers/openai/{response_parser/completion_parser.rb → response/completion.rb} +15 -31
  78. data/lib/llm/providers/openai/response/embedding.rb +9 -0
  79. data/lib/llm/providers/openai/response/file.rb +7 -0
  80. data/lib/llm/providers/openai/response/image.rb +16 -0
  81. data/lib/llm/providers/openai/response/moderations.rb +34 -0
  82. data/lib/llm/providers/openai/{response_parser/respond_parser.rb → response/responds.rb} +7 -28
  83. data/lib/llm/providers/openai/responses.rb +10 -9
  84. data/lib/llm/providers/openai/stream_parser.rb +0 -0
  85. data/lib/llm/providers/openai/vector_stores.rb +106 -0
  86. data/lib/llm/providers/openai.rb +14 -8
  87. data/lib/llm/response.rb +37 -13
  88. data/lib/llm/utils.rb +0 -0
  89. data/lib/llm/version.rb +1 -1
  90. data/lib/llm.rb +2 -12
  91. data/llm.gemspec +1 -1
  92. metadata +18 -29
  93. data/lib/llm/model.rb +0 -32
  94. data/lib/llm/providers/anthropic/response_parser/completion_parser.rb +0 -51
  95. data/lib/llm/providers/anthropic/response_parser.rb +0 -24
  96. data/lib/llm/providers/gemini/response_parser.rb +0 -46
  97. data/lib/llm/providers/ollama/response_parser/completion_parser.rb +0 -42
  98. data/lib/llm/providers/ollama/response_parser.rb +0 -30
  99. data/lib/llm/providers/openai/response_parser.rb +0 -65
  100. data/lib/llm/providers/voyageai/error_handler.rb +0 -32
  101. data/lib/llm/providers/voyageai/response_parser.rb +0 -13
  102. data/lib/llm/providers/voyageai.rb +0 -44
  103. data/lib/llm/response/audio.rb +0 -13
  104. data/lib/llm/response/audio_transcription.rb +0 -14
  105. data/lib/llm/response/audio_translation.rb +0 -14
  106. data/lib/llm/response/completion.rb +0 -51
  107. data/lib/llm/response/download_file.rb +0 -15
  108. data/lib/llm/response/embedding.rb +0 -23
  109. data/lib/llm/response/file.rb +0 -42
  110. data/lib/llm/response/filelist.rb +0 -18
  111. data/lib/llm/response/image.rb +0 -29
  112. data/lib/llm/response/modellist.rb +0 -18
  113. data/lib/llm/response/moderationlist/moderation.rb +0 -47
  114. data/lib/llm/response/moderationlist.rb +0 -51
  115. data/lib/llm/response/respond.rb +0 -56
File without changes
@@ -19,10 +19,11 @@ module LLM
19
19
  # bot.chat "Describe the image"
20
20
  # bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
21
21
  class Ollama < Provider
22
+ require_relative "ollama/response/embedding"
23
+ require_relative "ollama/response/completion"
22
24
  require_relative "ollama/error_handler"
23
25
  require_relative "ollama/format"
24
26
  require_relative "ollama/stream_parser"
25
- require_relative "ollama/response_parser"
26
27
  require_relative "ollama/models"
27
28
 
28
29
  include Format
@@ -47,7 +48,7 @@ module LLM
47
48
  req = Net::HTTP::Post.new("/v1/embeddings", headers)
48
49
  req.body = JSON.dump({input:}.merge!(params))
49
50
  res = execute(request: req)
50
- Response::Embedding.new(res).extend(response_parser)
51
+ LLM::Response.new(res).extend(LLM::Ollama::Response::Embedding)
51
52
  end
52
53
 
53
54
  ##
@@ -57,7 +58,7 @@ module LLM
57
58
  # @param params (see LLM::Provider#complete)
58
59
  # @example (see LLM::Provider#complete)
59
60
  # @raise (see LLM::Provider#request)
60
- # @raise [LLM::Error::PromptError]
61
+ # @raise [LLM::PromptError]
61
62
  # When given an object a provider does not understand
62
63
  # @return (see LLM::Provider#complete)
63
64
  def complete(prompt, params = {})
@@ -70,7 +71,7 @@ module LLM
70
71
  body = JSON.dump({messages: [format(messages)].flatten}.merge!(params))
71
72
  set_body_stream(req, StringIO.new(body))
72
73
  res = execute(request: req, stream:)
73
- Response::Completion.new(res).extend(response_parser)
74
+ LLM::Response.new(res).extend(LLM::Ollama::Response::Completion)
74
75
  end
75
76
 
76
77
  ##
@@ -104,10 +105,6 @@ module LLM
104
105
  )
105
106
  end
106
107
 
107
- def response_parser
108
- LLM::Ollama::ResponseParser
109
- end
110
-
111
108
  def stream_parser
112
109
  LLM::Ollama::StreamParser
113
110
  end
@@ -30,13 +30,13 @@ class LLM::OpenAI
30
30
  # @param [String] response_format The response format
31
31
  # @param [Hash] params Other parameters (see OpenAI docs)
32
32
  # @raise (see LLM::Provider#request)
33
- # @return [LLM::Response::Audio]
33
+ # @return [LLM::Response]
34
34
  def create_speech(input:, voice: "alloy", model: "gpt-4o-mini-tts", response_format: "mp3", **params)
35
35
  req = Net::HTTP::Post.new("/v1/audio/speech", headers)
36
36
  req.body = JSON.dump({input:, voice:, model:, response_format:}.merge!(params))
37
37
  io = StringIO.new("".b)
38
38
  res = execute(request: req) { _1.read_body { |chunk| io << chunk } }
39
- LLM::Response::Audio.new(res).tap { _1.audio = io }
39
+ LLM::Response.new(res).tap { _1.define_singleton_method(:audio) { io } }
40
40
  end
41
41
 
42
42
  ##
@@ -50,14 +50,14 @@ class LLM::OpenAI
50
50
  # @param [String] model The model to use
51
51
  # @param [Hash] params Other parameters (see OpenAI docs)
52
52
  # @raise (see LLM::Provider#request)
53
- # @return [LLM::Response::AudioTranscription]
53
+ # @return [LLM::Response]
54
54
  def create_transcription(file:, model: "whisper-1", **params)
55
55
  multi = LLM::Multipart.new(params.merge!(file: LLM.File(file), model:))
56
56
  req = Net::HTTP::Post.new("/v1/audio/transcriptions", headers)
57
57
  req["content-type"] = multi.content_type
58
58
  set_body_stream(req, multi.body)
59
59
  res = execute(request: req)
60
- LLM::Response::AudioTranscription.new(res).tap { _1.text = _1.body["text"] }
60
+ LLM::Response.new(res)
61
61
  end
62
62
 
63
63
  ##
@@ -72,14 +72,14 @@ class LLM::OpenAI
72
72
  # @param [String] model The model to use
73
73
  # @param [Hash] params Other parameters (see OpenAI docs)
74
74
  # @raise (see LLM::Provider#request)
75
- # @return [LLM::Response::AudioTranslation]
75
+ # @return [LLM::Response]
76
76
  def create_translation(file:, model: "whisper-1", **params)
77
77
  multi = LLM::Multipart.new(params.merge!(file: LLM.File(file), model:))
78
78
  req = Net::HTTP::Post.new("/v1/audio/translations", headers)
79
79
  req["content-type"] = multi.content_type
80
80
  set_body_stream(req, multi.body)
81
81
  res = execute(request: req)
82
- LLM::Response::AudioTranslation.new(res).tap { _1.text = _1.body["text"] }
82
+ LLM::Response.new(res)
83
83
  end
84
84
 
85
85
  private
File without changes
@@ -29,6 +29,8 @@ class LLM::OpenAI
29
29
  # bot.chat(["Describe the document I sent to you", file])
30
30
  # bot.messages.select(&:assistant?).each { print "[#{_1.role}]", _1.content, "\n" }
31
31
  class Files
32
+ require_relative "response/file"
33
+
32
34
  ##
33
35
  # Returns a new Files object
34
36
  # @param provider [LLM::Provider]
@@ -48,15 +50,12 @@ class LLM::OpenAI
48
50
  # @see https://platform.openai.com/docs/api-reference/files/list OpenAI docs
49
51
  # @param [Hash] params Other parameters (see OpenAI docs)
50
52
  # @raise (see LLM::Provider#request)
51
- # @return [LLM::Response::FileList]
53
+ # @return [LLM::Response]
52
54
  def all(**params)
53
55
  query = URI.encode_www_form(params)
54
56
  req = Net::HTTP::Get.new("/v1/files?#{query}", headers)
55
57
  res = execute(request: req)
56
- LLM::Response::FileList.new(res).tap { |filelist|
57
- files = filelist.body["data"].map { LLM::Object.from_hash(_1) }
58
- filelist.files = files
59
- }
58
+ LLM::Response.new(res)
60
59
  end
61
60
 
62
61
  ##
@@ -65,18 +64,18 @@ class LLM::OpenAI
65
64
  # llm = LLM.openai(ENV["KEY"])
66
65
  # res = llm.files.create file: "/documents/haiku.txt"
67
66
  # @see https://platform.openai.com/docs/api-reference/files/create OpenAI docs
68
- # @param [File] file The file
67
+ # @param [File, LLM::File, String] file The file
69
68
  # @param [String] purpose The purpose of the file (see OpenAI docs)
70
69
  # @param [Hash] params Other parameters (see OpenAI docs)
71
70
  # @raise (see LLM::Provider#request)
72
- # @return [LLM::Response::File]
71
+ # @return [LLM::Response]
73
72
  def create(file:, purpose: "assistants", **params)
74
- multi = LLM::Multipart.new(params.merge!(file:, purpose:))
73
+ multi = LLM::Multipart.new(params.merge!(file: LLM.File(file), purpose:))
75
74
  req = Net::HTTP::Post.new("/v1/files", headers)
76
75
  req["content-type"] = multi.content_type
77
76
  set_body_stream(req, multi.body)
78
77
  res = execute(request: req)
79
- LLM::Response::File.new(res)
78
+ LLM::Response.new(res).extend(LLM::OpenAI::Response::File)
80
79
  end
81
80
 
82
81
  ##
@@ -89,13 +88,13 @@ class LLM::OpenAI
89
88
  # @param [#id, #to_s] file The file ID
90
89
  # @param [Hash] params Other parameters (see OpenAI docs)
91
90
  # @raise (see LLM::Provider#request)
92
- # @return [LLM::Response::File]
91
+ # @return [LLM::Response]
93
92
  def get(file:, **params)
94
93
  file_id = file.respond_to?(:id) ? file.id : file
95
94
  query = URI.encode_www_form(params)
96
95
  req = Net::HTTP::Get.new("/v1/files/#{file_id}?#{query}", headers)
97
96
  res = execute(request: req)
98
- LLM::Response::File.new(res)
97
+ LLM::Response.new(res).extend(LLM::OpenAI::Response::File)
99
98
  end
100
99
 
101
100
  ##
@@ -109,14 +108,14 @@ class LLM::OpenAI
109
108
  # @param [#id, #to_s] file The file ID
110
109
  # @param [Hash] params Other parameters (see OpenAI docs)
111
110
  # @raise (see LLM::Provider#request)
112
- # @return [LLM::Response::DownloadFile]
111
+ # @return [LLM::Response]
113
112
  def download(file:, **params)
114
113
  query = URI.encode_www_form(params)
115
114
  file_id = file.respond_to?(:id) ? file.id : file
116
115
  req = Net::HTTP::Get.new("/v1/files/#{file_id}/content?#{query}", headers)
117
116
  io = StringIO.new("".b)
118
117
  res = execute(request: req) { |res| res.read_body { |chunk| io << chunk } }
119
- LLM::Response::DownloadFile.new(res).tap { _1.file = io }
118
+ LLM::Response.new(res).tap { _1.define_singleton_method(:file) { io } }
120
119
  end
121
120
 
122
121
  ##
@@ -128,12 +127,12 @@ class LLM::OpenAI
128
127
  # @see https://platform.openai.com/docs/api-reference/files/delete OpenAI docs
129
128
  # @param [#id, #to_s] file The file ID
130
129
  # @raise (see LLM::Provider#request)
131
- # @return [LLM::Object] Response body
130
+ # @return [LLM::Response]
132
131
  def delete(file:)
133
132
  file_id = file.respond_to?(:id) ? file.id : file
134
133
  req = Net::HTTP::Delete.new("/v1/files/#{file_id}", headers)
135
134
  res = execute(request: req)
136
- LLM::Object.from_hash JSON.parse(res.body)
135
+ LLM::Response.new(res)
137
136
  end
138
137
 
139
138
  private
@@ -32,10 +32,13 @@ module LLM::OpenAI::Format
32
32
  case content
33
33
  when URI
34
34
  [{type: :image_url, image_url: {url: content.to_s}}]
35
+ when File
36
+ content.close unless content.closed?
37
+ format_content(LLM.File(content.path))
35
38
  when LLM::File
36
39
  format_file(content)
37
- when LLM::Response::File
38
- [{type: :file, file: {file_id: content.id}}]
40
+ when LLM::Response
41
+ content.file? ? [{type: :file, file: {file_id: content.id}}] : prompt_error!(content)
39
42
  when String
40
43
  [{type: :text, text: content.to_s}]
41
44
  when LLM::Message
@@ -43,8 +46,7 @@ module LLM::OpenAI::Format
43
46
  when LLM::Function::Return
44
47
  throw(:abort, {role: "tool", tool_call_id: content.id, content: JSON.dump(content.value)})
45
48
  else
46
- raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
47
- "is not supported by the OpenAI chat completions API"
49
+ prompt_error!(content)
48
50
  end
49
51
  end
50
52
 
@@ -76,6 +78,11 @@ module LLM::OpenAI::Format
76
78
  end
77
79
  end
78
80
 
81
+ def prompt_error!(content)
82
+ raise LLM::PromptError, "The given object (an instance of #{content.class}) " \
83
+ "is not supported by the OpenAI chat completions API"
84
+ end
85
+
79
86
  def message = @message
80
87
  def content = message.content
81
88
  def returns = content.grep(LLM::Function::Return)
@@ -22,8 +22,8 @@ module LLM::OpenAI::Format
22
22
  elsif URI === input
23
23
  {type: :image_url, url: input.to_s}
24
24
  else
25
- raise LLM::Error::FormatError, "The given object (an instance of #{input.class}) " \
26
- "is not supported by OpenAI moderations API"
25
+ raise LLM::FormatError, "The given object (an instance of #{input.class}) " \
26
+ "is not supported by OpenAI moderations API"
27
27
  end
28
28
  end
29
29
  end
@@ -22,15 +22,14 @@ module LLM::OpenAI::Format
22
22
 
23
23
  def format_content(content)
24
24
  case content
25
- when LLM::Response::File
26
- format_file(content)
25
+ when LLM::Response
26
+ content.file? ? format_file(content) : prompt_error!(content)
27
27
  when String
28
28
  [{type: :input_text, text: content.to_s}]
29
29
  when LLM::Message
30
30
  format_content(content.content)
31
31
  else
32
- raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
33
- "is not supported by the OpenAI responses API"
32
+ prompt_error!(content)
34
33
  end
35
34
  end
36
35
 
@@ -62,6 +61,10 @@ module LLM::OpenAI::Format
62
61
  end
63
62
  end
64
63
 
64
+ def prompt_error!(content)
65
+ raise LLM::PromptError, "The given object (an instance of #{content.class}) " \
66
+ "is not supported by the OpenAI responses API"
67
+ end
65
68
  def message = @message
66
69
  def content = message.content
67
70
  def returns = content.grep(LLM::Function::Return)
File without changes
@@ -27,6 +27,7 @@ class LLM::OpenAI
27
27
  # response_format: "b64_json"
28
28
  # IO.copy_stream res.images[0], "rocket.png"
29
29
  class Images
30
+ require_relative "response/image"
30
31
  ##
31
32
  # Returns a new Images object
32
33
  # @param provider [LLM::Provider]
@@ -46,12 +47,12 @@ class LLM::OpenAI
46
47
  # @param [String] model The model to use
47
48
  # @param [Hash] params Other parameters (see OpenAI docs)
48
49
  # @raise (see LLM::Provider#request)
49
- # @return [LLM::Response::Image]
50
+ # @return [LLM::Response]
50
51
  def create(prompt:, model: "dall-e-3", **params)
51
52
  req = Net::HTTP::Post.new("/v1/images/generations", headers)
52
53
  req.body = JSON.dump({prompt:, n: 1, model:}.merge!(params))
53
54
  res = execute(request: req)
54
- LLM::Response::Image.new(res).extend(response_parser)
55
+ LLM::Response.new(res).extend(LLM::OpenAI::Response::Image)
55
56
  end
56
57
 
57
58
  ##
@@ -65,7 +66,7 @@ class LLM::OpenAI
65
66
  # @param [String] model The model to use
66
67
  # @param [Hash] params Other parameters (see OpenAI docs)
67
68
  # @raise (see LLM::Provider#request)
68
- # @return [LLM::Response::Image]
69
+ # @return [LLM::Response]
69
70
  def create_variation(image:, model: "dall-e-2", **params)
70
71
  image = LLM.File(image)
71
72
  multi = LLM::Multipart.new(params.merge!(image:, model:))
@@ -73,7 +74,7 @@ class LLM::OpenAI
73
74
  req["content-type"] = multi.content_type
74
75
  set_body_stream(req, multi.body)
75
76
  res = execute(request: req)
76
- LLM::Response::Image.new(res).extend(response_parser)
77
+ LLM::Response.new(res).extend(LLM::OpenAI::Response::Image)
77
78
  end
78
79
 
79
80
  ##
@@ -88,7 +89,7 @@ class LLM::OpenAI
88
89
  # @param [String] model The model to use
89
90
  # @param [Hash] params Other parameters (see OpenAI docs)
90
91
  # @raise (see LLM::Provider#request)
91
- # @return [LLM::Response::Image]
92
+ # @return [LLM::Response]
92
93
  def edit(image:, prompt:, model: "dall-e-2", **params)
93
94
  image = LLM.File(image)
94
95
  multi = LLM::Multipart.new(params.merge!(image:, prompt:, model:))
@@ -96,12 +97,12 @@ class LLM::OpenAI
96
97
  req["content-type"] = multi.content_type
97
98
  set_body_stream(req, multi.body)
98
99
  res = execute(request: req)
99
- LLM::Response::Image.new(res).extend(response_parser)
100
+ LLM::Response.new(res).extend(LLM::OpenAI::Response::Image)
100
101
  end
101
102
 
102
103
  private
103
104
 
104
- [:response_parser, :headers, :execute, :set_body_stream].each do |m|
105
+ [:headers, :execute, :set_body_stream].each do |m|
105
106
  define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
106
107
  end
107
108
  end
@@ -36,17 +36,12 @@ class LLM::OpenAI
36
36
  # @see https://platform.openai.com/docs/api-reference/models/list OpenAI docs
37
37
  # @param [Hash] params Other parameters (see OpenAI docs)
38
38
  # @raise (see LLM::Provider#request)
39
- # @return [LLM::Response::FileList]
39
+ # @return [LLM::Response]
40
40
  def all(**params)
41
41
  query = URI.encode_www_form(params)
42
42
  req = Net::HTTP::Get.new("/v1/models?#{query}", headers)
43
43
  res = execute(request: req)
44
- LLM::Response::ModelList.new(res).tap { |modellist|
45
- models = modellist.body["data"].map do |model|
46
- LLM::Model.from_hash(model).tap { _1.provider = @provider }
47
- end
48
- modellist.models = models
49
- }
44
+ LLM::Response.new(res)
50
45
  end
51
46
 
52
47
  private
@@ -13,7 +13,8 @@ class LLM::OpenAI
13
13
  # require "llm"
14
14
  #
15
15
  # llm = LLM.openai(key: ENV["KEY"])
16
- # mod = llm.moderations.create input: "I hate you"
16
+ # res = llm.moderations.create input: "I hate you"
17
+ # mod = res.moderations[0]
17
18
  # print "categories: #{mod.categories}", "\n"
18
19
  # print "scores: #{mod.scores}", "\n"
19
20
  #
@@ -22,13 +23,16 @@ class LLM::OpenAI
22
23
  # require "llm"
23
24
  #
24
25
  # llm = LLM.openai(key: ENV["KEY"])
25
- # mod = llm.moderations.create input: URI.parse("https://example.com/image.png")
26
+ # res = llm.moderations.create input: URI.parse("https://example.com/image.png")
27
+ # mod = res.moderations[0]
26
28
  # print "categories: #{mod.categories}", "\n"
27
29
  # print "scores: #{mod.scores}", "\n"
28
30
  #
29
31
  # @see https://platform.openai.com/docs/api-reference/moderations/create OpenAI docs
30
32
  # @see https://platform.openai.com/docs/models#moderation OpenAI moderation models
31
33
  class Moderations
34
+ require_relative "response/moderations"
35
+
32
36
  ##
33
37
  # Returns a new Moderations object
34
38
  # @param [LLM::Provider] provider
@@ -41,26 +45,20 @@ class LLM::OpenAI
41
45
  # Create a moderation
42
46
  # @see https://platform.openai.com/docs/api-reference/moderations/create OpenAI docs
43
47
  # @see https://platform.openai.com/docs/models#moderation OpenAI moderation models
44
- # @note
45
- # Although OpenAI mentions an array as a valid input, and that it can return one
46
- # or more moderations, in practice the API only returns one moderation object. We
47
- # recommend using a single input string or URI, and to keep in mind that llm.rb
48
- # returns a Moderation object but has code in place to return multiple objects in
49
- # the future (in case OpenAI documentation ever matches the actual API).
50
48
  # @param [String, URI, Array<String, URI>] input
51
49
  # @param [String, LLM::Model] model The model to use
52
- # @return [LLM::Response::ModerationList::Moderation]
50
+ # @return [LLM::Response]
53
51
  def create(input:, model: "omni-moderation-latest", **params)
54
52
  req = Net::HTTP::Post.new("/v1/moderations", headers)
55
53
  input = Format::ModerationFormat.new(input).format
56
54
  req.body = JSON.dump({input:, model:}.merge!(params))
57
55
  res = execute(request: req)
58
- LLM::Response::ModerationList.new(res).extend(response_parser).first
56
+ LLM::Response.new(res).extend(LLM::OpenAI::Response::Moderations)
59
57
  end
60
58
 
61
59
  private
62
60
 
63
- [:response_parser, :headers, :execute].each do |m|
61
+ [:headers, :execute].each do |m|
64
62
  define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
65
63
  end
66
64
  end
@@ -0,0 +1,7 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::OpenAI::Response
4
+ module Audio
5
+ def audio = body.audio
6
+ end
7
+ end
@@ -1,30 +1,13 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- module LLM::OpenAI::ResponseParser
4
- ##
5
- # @private
6
- class CompletionParser
7
- def initialize(body)
8
- @body = LLM::Object.from_hash(body)
9
- end
10
-
11
- def format(response)
12
- {
13
- model:,
14
- prompt_tokens:,
15
- completion_tokens:,
16
- total_tokens:,
17
- choices: format_choices(response)
18
- }
19
- end
20
-
21
- private
22
-
23
- def format_choices(response)
24
- choices.map.with_index do |choice, index|
3
+ module LLM::OpenAI::Response
4
+ module Completion
5
+ def choices
6
+ body.choices.map.with_index do |choice, index|
7
+ choice = LLM::Object.from_hash(choice)
25
8
  message = choice.message
26
9
  extra = {
27
- index:, response:,
10
+ index:, response: self,
28
11
  logprobs: choice.logprobs,
29
12
  tool_calls: format_tool_calls(message.tool_calls),
30
13
  original_tool_calls: message.tool_calls
@@ -32,6 +15,14 @@ module LLM::OpenAI::ResponseParser
32
15
  LLM::Message.new(message.role, message.content, extra)
33
16
  end
34
17
  end
18
+ alias_method :messages, :choices
19
+
20
+ def model = body.model
21
+ def prompt_tokens = body.usage&.prompt_tokens
22
+ def completion_tokens = body.usage&.completion_tokens
23
+ def total_tokens = body.usage&.total_tokens
24
+
25
+ private
35
26
 
36
27
  def format_tool_calls(tools)
37
28
  (tools || []).filter_map do |tool|
@@ -44,12 +35,5 @@ module LLM::OpenAI::ResponseParser
44
35
  LLM::Object.new(tool)
45
36
  end
46
37
  end
47
-
48
- def body = @body
49
- def model = body.model
50
- def prompt_tokens = body.usage&.prompt_tokens
51
- def completion_tokens = body.usage&.completion_tokens
52
- def total_tokens = body.usage&.total_tokens
53
- def choices = body.choices
54
38
  end
55
- end
39
+ end
@@ -0,0 +1,9 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::OpenAI::Response
4
+ module Embedding
5
+ def embeddings = data.map { _1["embedding"] }
6
+ def prompt_tokens = data.dig(0, "usage", "prompt_tokens")
7
+ def total_tokens = data.dig(0, "usage", "total_tokens")
8
+ end
9
+ end
@@ -0,0 +1,7 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::OpenAI::Response
4
+ module File
5
+ def file? = true
6
+ end
7
+ end
@@ -0,0 +1,16 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::OpenAI::Response
4
+ module Image
5
+ def urls
6
+ data.filter_map { _1["url"] }
7
+ end
8
+
9
+ def images
10
+ data.filter_map do
11
+ next unless _1["b64_json"]
12
+ StringIO.new(_1["b64_json"].unpack1("m0"))
13
+ end
14
+ end
15
+ end
16
+ end
@@ -0,0 +1,34 @@
1
+ # frozen_string_literal: true
2
+
3
+ module LLM::OpenAI::Response
4
+ module Moderations
5
+ ##
6
+ # @return [Array<LLM::Response]
7
+ def moderations
8
+ @moderations ||= body.results.map { _1.extend(Moderation) }
9
+ end
10
+ end
11
+
12
+ module Moderation
13
+ ##
14
+ # Returns true if the moderation is flagged
15
+ # @return [Boolean]
16
+ def flagged?
17
+ body.flagged
18
+ end
19
+
20
+ ##
21
+ # Returns the moderation categories
22
+ # @return [Array<String>]
23
+ def categories
24
+ self["categories"].filter_map { _2 ? _1 : nil }
25
+ end
26
+
27
+ ##
28
+ # Returns the moderation scores
29
+ # @return [Hash]
30
+ def scores
31
+ self["category_scores"].select { |(key, _)| categories.include?(key) }.to_h
32
+ end
33
+ end
34
+ end
@@ -1,28 +1,16 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- module LLM::OpenAI::ResponseParser
4
- ##
5
- # @private
6
- class RespondParser
7
- def initialize(body)
8
- @body = LLM::Object.from_hash(body)
9
- end
3
+ module LLM::OpenAI::Response
4
+ module Responds
10
5
 
11
- def format(response)
12
- {
13
- id:,
14
- model:,
15
- input_tokens:,
16
- output_tokens:,
17
- total_tokens:,
18
- outputs: [format_message(response)]
19
- }
20
- end
6
+ def outputs = [format_message]
7
+ def choices = body.output
8
+ def tools = output.select { _1.type == "function_call" }
21
9
 
22
10
  private
23
11
 
24
- def format_message(response)
25
- message = LLM::Message.new("assistant", +"", {response:, tool_calls: []})
12
+ def format_message
13
+ message = LLM::Message.new("assistant", +"", {response: self, tool_calls: []})
26
14
  choices.each.with_index do |choice, index|
27
15
  if choice.type == "function_call"
28
16
  message.extra[:tool_calls] << format_tool(choice)
@@ -43,14 +31,5 @@ module LLM::OpenAI::ResponseParser
43
31
  arguments: JSON.parse(tool.arguments)
44
32
  )
45
33
  end
46
-
47
- def body = @body
48
- def id = body.id
49
- def model = body.model
50
- def input_tokens = body.usage.input_tokens
51
- def output_tokens = body.usage.output_tokens
52
- def total_tokens = body.usage.total_tokens
53
- def choices = body.output
54
- def tools = output.select { _1.type == "function_call" }
55
34
  end
56
35
  end