llm.rb 0.10.1 → 0.12.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. checksums.yaml +4 -4
  2. data/README.md +120 -119
  3. data/lib/llm/bot/builder.rb +2 -2
  4. data/lib/llm/bot.rb +13 -22
  5. data/lib/llm/buffer.rb +7 -0
  6. data/lib/llm/file.rb +22 -12
  7. data/lib/llm/function.rb +8 -7
  8. data/lib/llm/message.rb +8 -0
  9. data/lib/llm/multipart.rb +0 -1
  10. data/lib/llm/object/kernel.rb +8 -0
  11. data/lib/llm/object.rb +9 -3
  12. data/lib/llm/provider.rb +10 -12
  13. data/lib/llm/providers/anthropic/format/completion_format.rb +10 -5
  14. data/lib/llm/providers/anthropic/models.rb +4 -9
  15. data/lib/llm/providers/anthropic/response/completion.rb +39 -0
  16. data/lib/llm/providers/anthropic.rb +13 -25
  17. data/lib/llm/providers/deepseek/format/completion_format.rb +3 -3
  18. data/lib/llm/providers/deepseek.rb +16 -1
  19. data/lib/llm/providers/gemini/audio.rb +9 -13
  20. data/lib/llm/providers/gemini/files.rb +19 -34
  21. data/lib/llm/providers/gemini/format/completion_format.rb +20 -5
  22. data/lib/llm/providers/gemini/images.rb +12 -11
  23. data/lib/llm/providers/gemini/models.rb +4 -10
  24. data/lib/llm/providers/gemini/{response_parser/completion_parser.rb → response/completion.rb} +10 -24
  25. data/lib/llm/providers/gemini/response/embedding.rb +8 -0
  26. data/lib/llm/providers/gemini/response/file.rb +11 -0
  27. data/lib/llm/providers/gemini/response/image.rb +26 -0
  28. data/lib/llm/providers/gemini.rb +18 -29
  29. data/lib/llm/providers/llamacpp.rb +18 -1
  30. data/lib/llm/providers/ollama/format/completion_format.rb +8 -5
  31. data/lib/llm/providers/ollama/models.rb +2 -8
  32. data/lib/llm/providers/ollama/response/completion.rb +28 -0
  33. data/lib/llm/providers/ollama/response/embedding.rb +9 -0
  34. data/lib/llm/providers/ollama.rb +13 -19
  35. data/lib/llm/providers/openai/audio.rb +10 -10
  36. data/lib/llm/providers/openai/files.rb +22 -34
  37. data/lib/llm/providers/openai/format/completion_format.rb +11 -4
  38. data/lib/llm/providers/openai/format/moderation_format.rb +2 -2
  39. data/lib/llm/providers/openai/format/respond_format.rb +7 -4
  40. data/lib/llm/providers/openai/images.rb +18 -17
  41. data/lib/llm/providers/openai/models.rb +4 -9
  42. data/lib/llm/providers/openai/moderations.rb +9 -11
  43. data/lib/llm/providers/openai/response/audio.rb +7 -0
  44. data/lib/llm/providers/openai/{response_parser/completion_parser.rb → response/completion.rb} +14 -30
  45. data/lib/llm/providers/openai/response/embedding.rb +9 -0
  46. data/lib/llm/providers/openai/response/file.rb +7 -0
  47. data/lib/llm/providers/openai/response/image.rb +16 -0
  48. data/lib/llm/providers/openai/response/moderations.rb +34 -0
  49. data/lib/llm/providers/openai/{response_parser/respond_parser.rb → response/responds.rb} +7 -29
  50. data/lib/llm/providers/openai/responses.rb +16 -34
  51. data/lib/llm/providers/openai/stream_parser.rb +1 -0
  52. data/lib/llm/providers/openai/vector_stores.rb +188 -0
  53. data/lib/llm/providers/openai.rb +24 -9
  54. data/lib/llm/providers/xai/images.rb +58 -0
  55. data/lib/llm/providers/xai.rb +72 -0
  56. data/lib/llm/response.rb +42 -13
  57. data/lib/llm/version.rb +1 -1
  58. data/lib/llm.rb +12 -13
  59. data/llm.gemspec +5 -5
  60. metadata +29 -38
  61. data/lib/llm/model.rb +0 -32
  62. data/lib/llm/providers/anthropic/response_parser/completion_parser.rb +0 -51
  63. data/lib/llm/providers/anthropic/response_parser.rb +0 -24
  64. data/lib/llm/providers/gemini/response_parser.rb +0 -46
  65. data/lib/llm/providers/ollama/response_parser/completion_parser.rb +0 -42
  66. data/lib/llm/providers/ollama/response_parser.rb +0 -30
  67. data/lib/llm/providers/openai/response_parser.rb +0 -65
  68. data/lib/llm/providers/voyageai/error_handler.rb +0 -32
  69. data/lib/llm/providers/voyageai/response_parser.rb +0 -13
  70. data/lib/llm/providers/voyageai.rb +0 -44
  71. data/lib/llm/response/audio.rb +0 -13
  72. data/lib/llm/response/audio_transcription.rb +0 -14
  73. data/lib/llm/response/audio_translation.rb +0 -14
  74. data/lib/llm/response/completion.rb +0 -51
  75. data/lib/llm/response/download_file.rb +0 -15
  76. data/lib/llm/response/embedding.rb +0 -23
  77. data/lib/llm/response/file.rb +0 -42
  78. data/lib/llm/response/filelist.rb +0 -18
  79. data/lib/llm/response/image.rb +0 -29
  80. data/lib/llm/response/modellist.rb +0 -18
  81. data/lib/llm/response/moderationlist/moderation.rb +0 -47
  82. data/lib/llm/response/moderationlist.rb +0 -51
  83. data/lib/llm/response/respond.rb +0 -56
  84. /data/lib/llm/{event_handler.rb → eventhandler.rb} +0 -0
@@ -1,42 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module LLM
4
- ##
5
- # The {LLM::Response::File LLM::Response::File} class represents a file
6
- # that has been uploaded to a provider. Its properties are delegated
7
- # to the underlying response body, and vary by provider.
8
- class Response::File < Response
9
- ##
10
- # Returns a normalized response body
11
- # @return [Hash]
12
- def body
13
- @_body ||= if super["file"]
14
- super["file"].transform_keys { snakecase(_1) }
15
- else
16
- super.transform_keys { snakecase(_1) }
17
- end
18
- end
19
-
20
- ##
21
- # @return [String]
22
- def inspect
23
- "#<#{self.class}:0x#{object_id.to_s(16)} body=#{body}>"
24
- end
25
-
26
- private
27
-
28
- include LLM::Utils
29
-
30
- def respond_to_missing?(m, _)
31
- body.key?(m.to_s) || super
32
- end
33
-
34
- def method_missing(m, *args, &block)
35
- if body.key?(m.to_s)
36
- body[m.to_s]
37
- else
38
- super
39
- end
40
- end
41
- end
42
- end
@@ -1,18 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module LLM
4
- ##
5
- # The {LLM::Response::FileList LLM::Response::FileList} class represents a
6
- # list of file objects that are returned by a provider. It is an Enumerable
7
- # object, and can be used to iterate over the file objects in a way that is
8
- # similar to an array. Each element is an instance of LLM::Object.
9
- class Response::FileList < Response
10
- include Enumerable
11
-
12
- attr_accessor :files
13
-
14
- def each(&)
15
- @files.each(&)
16
- end
17
- end
18
- end
@@ -1,29 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module LLM
4
- ##
5
- # The {LLM::Response::Image LLM::Response::Image} class represents
6
- # an image response. An image response might encapsulate one or more
7
- # URLs, or a base64 encoded image -- depending on the provider.
8
- class Response::Image < Response
9
- ##
10
- # Returns one or more image objects, or nil
11
- # @return [Array<LLM::Object>, nil]
12
- def images
13
- parsed[:images].any? ? parsed[:images] : nil
14
- end
15
-
16
- ##
17
- # Returns one or more image URLs, or nil
18
- # @return [Array<String>, nil]
19
- def urls
20
- parsed[:urls].any? ? parsed[:urls] : nil
21
- end
22
-
23
- private
24
-
25
- def parsed
26
- @parsed ||= parse_image(body)
27
- end
28
- end
29
- end
@@ -1,18 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module LLM
4
- ##
5
- # The {LLM::Response::ModelList LLM::Response::ModelList} class represents a
6
- # list of model objects that are returned by a provider. It is an Enumerable
7
- # object, and can be used to iterate over the model objects in a way that is
8
- # similar to an array. Each element is an instance of LLM::Object.
9
- class Response::ModelList < Response
10
- include Enumerable
11
-
12
- attr_accessor :models
13
-
14
- def each(&)
15
- @models.each(&)
16
- end
17
- end
18
- end
@@ -1,47 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- class LLM::Response::ModerationList
4
- ##
5
- # The {LLM::Response::ModerationList::Moderation Moderation}
6
- # class represents a moderation object that is returned by
7
- # the moderations interface.
8
- # @see LLM::Response::ModerationList
9
- # @see LLM::OpenAI::Moderations
10
- class Moderation
11
- ##
12
- # @param [Hash] moderation
13
- # @return [LLM::Response::ModerationList::Moderation]
14
- def initialize(moderation)
15
- @moderation = moderation
16
- end
17
-
18
- ##
19
- # Returns true if the moderation is flagged
20
- # @return [Boolean]
21
- def flagged?
22
- @moderation["flagged"]
23
- end
24
-
25
- ##
26
- # Returns the moderation categories
27
- # @return [Array<String>]
28
- def categories
29
- @moderation["categories"].filter_map { _2 ? _1 : nil }
30
- end
31
-
32
- ##
33
- # Returns the moderation scores
34
- # @return [Hash]
35
- def scores
36
- @moderation["category_scores"].select { categories.include?(_1) }
37
- end
38
-
39
- ##
40
- # @return [String]
41
- def inspect
42
- "#<#{self.class}:0x#{object_id.to_s(16)} " \
43
- "categories=#{categories} " \
44
- "scores=#{scores}>"
45
- end
46
- end
47
- end
@@ -1,51 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module LLM
4
- ##
5
- # The {LLM::Response::ModerationList LLM::Response::ModerationList} class
6
- # represents a response from the moderations API. It is an Enumerable that
7
- # yields an instance of {LLM::Response::ModerationList::Moderation LLM::Response::ModerationList::Moderation},
8
- # and each moderation object contains the categories and scores for a given
9
- # input.
10
- # @see LLM::OpenAI::Moderations LLM::OpenAI::Moderations
11
- class Response::ModerationList < Response
12
- require_relative "moderationlist/moderation"
13
- include Enumerable
14
-
15
- ##
16
- # Returns the moderation ID
17
- # @return [String]
18
- def id
19
- parsed[:id]
20
- end
21
-
22
- ##
23
- # Returns the moderation model
24
- # @return [String]
25
- def model
26
- parsed[:model]
27
- end
28
-
29
- ##
30
- # Yields each moderation object
31
- # @yieldparam [OpenStruct] moderation
32
- # @yieldreturn [void]
33
- # @return [void]
34
- def each(&)
35
- moderations.each(&)
36
- end
37
-
38
- private
39
-
40
- def parsed
41
- @parsed ||= parse_moderation_list(body)
42
- end
43
-
44
- ##
45
- # Returns an array of moderation objects
46
- # @return [Array<OpenStruct>]
47
- def moderations
48
- parsed[:moderations]
49
- end
50
- end
51
- end
@@ -1,56 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module LLM
4
- class Response::Respond < Response
5
- ##
6
- # @return [String]
7
- # Returns the id of the response
8
- def id
9
- parsed[:id]
10
- end
11
-
12
- ##
13
- # @return [String]
14
- # Returns the model name
15
- def model
16
- parsed[:model]
17
- end
18
-
19
- ##
20
- # @return [Array<LLM::Message>]
21
- def outputs
22
- parsed[:outputs]
23
- end
24
-
25
- ##
26
- # @return [Integer]
27
- # Returns the input token count
28
- def input_tokens
29
- parsed[:input_tokens]
30
- end
31
-
32
- ##
33
- # @return [Integer]
34
- # Returns the output token count
35
- def output_tokens
36
- parsed[:output_tokens]
37
- end
38
-
39
- ##
40
- # @return [Integer]
41
- # Returns the total count of tokens
42
- def total_tokens
43
- parsed[:total_tokens]
44
- end
45
-
46
- private
47
-
48
- ##
49
- # @private
50
- # @return [Hash]
51
- # Returns the parsed response from the provider
52
- def parsed
53
- @parsed ||= parse_respond_response(body)
54
- end
55
- end
56
- end
File without changes