llm.rb 0.10.1 → 0.11.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (115) hide show
  1. checksums.yaml +4 -4
  2. data/LICENSE +0 -0
  3. data/README.md +81 -117
  4. data/lib/llm/bot/builder.rb +2 -2
  5. data/lib/llm/bot/conversable.rb +0 -0
  6. data/lib/llm/bot/prompt/completion.rb +0 -0
  7. data/lib/llm/bot/prompt/respond.rb +0 -0
  8. data/lib/llm/bot.rb +9 -11
  9. data/lib/llm/buffer.rb +0 -0
  10. data/lib/llm/error.rb +0 -0
  11. data/lib/llm/event_handler.rb +0 -0
  12. data/lib/llm/eventstream/event.rb +0 -0
  13. data/lib/llm/eventstream/parser.rb +0 -0
  14. data/lib/llm/eventstream.rb +0 -0
  15. data/lib/llm/file.rb +18 -9
  16. data/lib/llm/function.rb +6 -5
  17. data/lib/llm/json/schema/array.rb +0 -0
  18. data/lib/llm/json/schema/boolean.rb +0 -0
  19. data/lib/llm/json/schema/integer.rb +0 -0
  20. data/lib/llm/json/schema/leaf.rb +0 -0
  21. data/lib/llm/json/schema/null.rb +0 -0
  22. data/lib/llm/json/schema/number.rb +0 -0
  23. data/lib/llm/json/schema/object.rb +0 -0
  24. data/lib/llm/json/schema/string.rb +0 -0
  25. data/lib/llm/json/schema/version.rb +0 -0
  26. data/lib/llm/json/schema.rb +0 -0
  27. data/lib/llm/message.rb +8 -0
  28. data/lib/llm/mime.rb +0 -0
  29. data/lib/llm/multipart.rb +0 -0
  30. data/lib/llm/object/builder.rb +0 -0
  31. data/lib/llm/object/kernel.rb +8 -0
  32. data/lib/llm/object.rb +7 -0
  33. data/lib/llm/provider.rb +9 -11
  34. data/lib/llm/providers/anthropic/error_handler.rb +0 -0
  35. data/lib/llm/providers/anthropic/format/completion_format.rb +10 -5
  36. data/lib/llm/providers/anthropic/format.rb +0 -0
  37. data/lib/llm/providers/anthropic/models.rb +2 -7
  38. data/lib/llm/providers/anthropic/response/completion.rb +39 -0
  39. data/lib/llm/providers/anthropic/stream_parser.rb +0 -0
  40. data/lib/llm/providers/anthropic.rb +3 -24
  41. data/lib/llm/providers/deepseek/format/completion_format.rb +3 -3
  42. data/lib/llm/providers/deepseek/format.rb +0 -0
  43. data/lib/llm/providers/deepseek.rb +6 -0
  44. data/lib/llm/providers/gemini/audio.rb +6 -10
  45. data/lib/llm/providers/gemini/error_handler.rb +0 -0
  46. data/lib/llm/providers/gemini/files.rb +11 -14
  47. data/lib/llm/providers/gemini/format/completion_format.rb +20 -5
  48. data/lib/llm/providers/gemini/format.rb +0 -0
  49. data/lib/llm/providers/gemini/images.rb +8 -7
  50. data/lib/llm/providers/gemini/models.rb +2 -8
  51. data/lib/llm/providers/gemini/{response_parser/completion_parser.rb → response/completion.rb} +10 -24
  52. data/lib/llm/providers/gemini/response/embedding.rb +8 -0
  53. data/lib/llm/providers/gemini/response/file.rb +11 -0
  54. data/lib/llm/providers/gemini/response/image.rb +26 -0
  55. data/lib/llm/providers/gemini/stream_parser.rb +0 -0
  56. data/lib/llm/providers/gemini.rb +5 -8
  57. data/lib/llm/providers/llamacpp.rb +6 -0
  58. data/lib/llm/providers/ollama/error_handler.rb +0 -0
  59. data/lib/llm/providers/ollama/format/completion_format.rb +8 -5
  60. data/lib/llm/providers/ollama/format.rb +0 -0
  61. data/lib/llm/providers/ollama/models.rb +2 -8
  62. data/lib/llm/providers/ollama/response/completion.rb +28 -0
  63. data/lib/llm/providers/ollama/response/embedding.rb +10 -0
  64. data/lib/llm/providers/ollama/stream_parser.rb +0 -0
  65. data/lib/llm/providers/ollama.rb +5 -8
  66. data/lib/llm/providers/openai/audio.rb +6 -6
  67. data/lib/llm/providers/openai/error_handler.rb +0 -0
  68. data/lib/llm/providers/openai/files.rb +14 -15
  69. data/lib/llm/providers/openai/format/completion_format.rb +11 -4
  70. data/lib/llm/providers/openai/format/moderation_format.rb +2 -2
  71. data/lib/llm/providers/openai/format/respond_format.rb +7 -4
  72. data/lib/llm/providers/openai/format.rb +0 -0
  73. data/lib/llm/providers/openai/images.rb +8 -7
  74. data/lib/llm/providers/openai/models.rb +2 -7
  75. data/lib/llm/providers/openai/moderations.rb +9 -11
  76. data/lib/llm/providers/openai/response/audio.rb +7 -0
  77. data/lib/llm/providers/openai/{response_parser/completion_parser.rb → response/completion.rb} +15 -31
  78. data/lib/llm/providers/openai/response/embedding.rb +9 -0
  79. data/lib/llm/providers/openai/response/file.rb +7 -0
  80. data/lib/llm/providers/openai/response/image.rb +16 -0
  81. data/lib/llm/providers/openai/response/moderations.rb +34 -0
  82. data/lib/llm/providers/openai/{response_parser/respond_parser.rb → response/responds.rb} +7 -28
  83. data/lib/llm/providers/openai/responses.rb +10 -9
  84. data/lib/llm/providers/openai/stream_parser.rb +0 -0
  85. data/lib/llm/providers/openai/vector_stores.rb +106 -0
  86. data/lib/llm/providers/openai.rb +14 -8
  87. data/lib/llm/response.rb +37 -13
  88. data/lib/llm/utils.rb +0 -0
  89. data/lib/llm/version.rb +1 -1
  90. data/lib/llm.rb +2 -12
  91. data/llm.gemspec +1 -1
  92. metadata +18 -29
  93. data/lib/llm/model.rb +0 -32
  94. data/lib/llm/providers/anthropic/response_parser/completion_parser.rb +0 -51
  95. data/lib/llm/providers/anthropic/response_parser.rb +0 -24
  96. data/lib/llm/providers/gemini/response_parser.rb +0 -46
  97. data/lib/llm/providers/ollama/response_parser/completion_parser.rb +0 -42
  98. data/lib/llm/providers/ollama/response_parser.rb +0 -30
  99. data/lib/llm/providers/openai/response_parser.rb +0 -65
  100. data/lib/llm/providers/voyageai/error_handler.rb +0 -32
  101. data/lib/llm/providers/voyageai/response_parser.rb +0 -13
  102. data/lib/llm/providers/voyageai.rb +0 -44
  103. data/lib/llm/response/audio.rb +0 -13
  104. data/lib/llm/response/audio_transcription.rb +0 -14
  105. data/lib/llm/response/audio_translation.rb +0 -14
  106. data/lib/llm/response/completion.rb +0 -51
  107. data/lib/llm/response/download_file.rb +0 -15
  108. data/lib/llm/response/embedding.rb +0 -23
  109. data/lib/llm/response/file.rb +0 -42
  110. data/lib/llm/response/filelist.rb +0 -18
  111. data/lib/llm/response/image.rb +0 -29
  112. data/lib/llm/response/modellist.rb +0 -18
  113. data/lib/llm/response/moderationlist/moderation.rb +0 -47
  114. data/lib/llm/response/moderationlist.rb +0 -51
  115. data/lib/llm/response/respond.rb +0 -56
@@ -33,6 +33,7 @@ class LLM::OpenAI
33
33
  # file = llm.files.create file: "/documents/freebsd.pdf"
34
34
  # res = llm.responses.create ["Describe the document, file]
35
35
  class Responses
36
+ require_relative "response/responds"
36
37
  include Format
37
38
 
38
39
  ##
@@ -49,9 +50,9 @@ class LLM::OpenAI
49
50
  # @param prompt (see LLM::Provider#complete)
50
51
  # @param params (see LLM::Provider#complete)
51
52
  # @raise (see LLM::Provider#request)
52
- # @raise [LLM::Error::PromptError]
53
+ # @raise [LLM::PromptError]
53
54
  # When given an object a provider does not understand
54
- # @return [LLM::Response::Output]
55
+ # @return [LLM::Response]
55
56
  def create(prompt, params = {})
56
57
  params = {role: :user, model: @provider.default_model}.merge!(params)
57
58
  params = [params, format_schema(params), format_tools(params)].inject({}, &:merge!).compact
@@ -61,7 +62,7 @@ class LLM::OpenAI
61
62
  body = JSON.dump({input: [format(messages, :response)].flatten}.merge!(params))
62
63
  set_body_stream(req, StringIO.new(body))
63
64
  res = execute(request: req)
64
- LLM::Response::Respond.new(res).extend(response_parser)
65
+ LLM::Response.new(res).extend(LLM::OpenAI::Response::Responds)
65
66
  end
66
67
 
67
68
  ##
@@ -69,13 +70,13 @@ class LLM::OpenAI
69
70
  # @see https://platform.openai.com/docs/api-reference/responses/get OpenAI docs
70
71
  # @param [#id, #to_s] response Response ID
71
72
  # @raise (see LLM::Provider#request)
72
- # @return [LLM::Response::Output]
73
+ # @return [LLM::Response]
73
74
  def get(response, **params)
74
75
  response_id = response.respond_to?(:id) ? response.id : response
75
76
  query = URI.encode_www_form(params)
76
77
  req = Net::HTTP::Get.new("/v1/responses/#{response_id}?#{query}", headers)
77
78
  res = execute(request: req)
78
- LLM::Response::Respond.new(res).extend(response_parser)
79
+ LLM::Response.new(res).extend(LLM::OpenAI::Response::Responds)
79
80
  end
80
81
 
81
82
  ##
@@ -88,14 +89,14 @@ class LLM::OpenAI
88
89
  response_id = response.respond_to?(:id) ? response.id : response
89
90
  req = Net::HTTP::Delete.new("/v1/responses/#{response_id}", headers)
90
91
  res = execute(request: req)
91
- LLM::Object.from_hash JSON.parse(res.body)
92
+ LLM::Response.new(res)
92
93
  end
93
94
 
94
95
  private
95
96
 
96
- [:response_parser, :headers,
97
- :execute, :set_body_stream,
98
- :format_schema, :format_tools].each do |m|
97
+ [:headers, :execute,
98
+ :set_body_stream, :format_schema,
99
+ :format_tools].each do |m|
99
100
  define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
100
101
  end
101
102
  end
File without changes
@@ -0,0 +1,106 @@
1
+ # frozen_string_literal: true
2
+
3
+ class LLM::OpenAI
4
+ ##
5
+ # The {LLM::OpenAI::VectorStore LLM::OpenAI::VectorStore} class provides
6
+ # an interface to OpenAI's vector stores API
7
+ # @see https://platform.openai.com/docs/api-reference/vector_stores/create OpenAI docs
8
+ class VectorStores
9
+ ##
10
+ # @param [LLM::Provider] provider
11
+ # An OpenAI provider
12
+ def initialize(provider)
13
+ @provider = provider
14
+ end
15
+
16
+ ##
17
+ # List all vector stores
18
+ # @param [Hash] params Other parameters (see OpenAI docs)
19
+ # @return [LLM::Response]
20
+ def all(**params)
21
+ query = URI.encode_www_form(params)
22
+ req = Net::HTTP::Get.new("/v1/vector_stores?#{query}", headers)
23
+ res = execute(request: req)
24
+ LLM::Response.new(res)
25
+ end
26
+
27
+ ##
28
+ # Create a vector store
29
+ # @param [String] name The name of the vector store
30
+ # @param [Array<String>] file_ids The IDs of the files to include in the vector store
31
+ # @param [Hash] params Other parameters (see OpenAI docs)
32
+ # @raise (see LLM::Provider#request)
33
+ # @return [LLM::Response]
34
+ # @see https://platform.openai.com/docs/api-reference/vector_stores/create OpenAI docs
35
+ def create(name:, file_ids: [], **params)
36
+ req = Net::HTTP::Post.new("/v1/vector_stores", headers)
37
+ req.body = JSON.dump(params.merge({name:, file_ids:}).compact)
38
+ res = execute(request: req)
39
+ LLM::Response.new(res)
40
+ end
41
+
42
+ ##
43
+ # Get a vector store
44
+ # @param [String, #id] vector The ID of the vector store
45
+ # @raise (see LLM::Provider#request)
46
+ # @return [LLM::Response]
47
+ # @see https://platform.openai.com/docs/api-reference/vector_stores/retrieve OpenAI docs
48
+ def get(vector:)
49
+ vector_id = vector.respond_to?(:id) ? vector.id : vector
50
+ req = Net::HTTP::Get.new("/v1/vector_stores/#{vector_id}", headers)
51
+ res = execute(request: req)
52
+ LLM::Response.new(res)
53
+ end
54
+
55
+ ##
56
+ # Modify an existing vector store
57
+ # @param [String, #id] vector The ID of the vector store
58
+ # @param [String] name The new name of the vector store
59
+ # @param [Hash] params Other parameters (see OpenAI docs)
60
+ # @raise (see LLM::Provider#request)
61
+ # @return [LLM::Response]
62
+ # @see https://platform.openai.com/docs/api-reference/vector_stores/modify OpenAI docs
63
+ def modify(vector:, name: nil, **params)
64
+ vector_id = vector.respond_to?(:id) ? vector.id : vector
65
+ req = Net::HTTP::Post.new("/v1/vector_stores/#{vector_id}", headers)
66
+ req.body = JSON.dump(params.merge({name:}).compact)
67
+ res = execute(request: req)
68
+ LLM::Response.new(res)
69
+ end
70
+
71
+ ##
72
+ # Delete a vector store
73
+ # @param [String, #id] vector The ID of the vector store
74
+ # @raise (see LLM::Provider#request)
75
+ # @return [LLM::Response]
76
+ # @see https://platform.openai.com/docs/api-reference/vector_stores/delete OpenAI docs
77
+ def delete(vector:)
78
+ vector_id = vector.respond_to?(:id) ? vector.id : vector
79
+ req = Net::HTTP::Delete.new("/v1/vector_stores/#{vector_id}", headers)
80
+ res = execute(request: req)
81
+ LLM::Response.new(res)
82
+ end
83
+
84
+ ##
85
+ # Search a vector store
86
+ # @param [String, #id] vector The ID of the vector store
87
+ # @param query [String] The query to search for
88
+ # @param params [Hash] Other parameters (see OpenAI docs)
89
+ # @raise (see LLM::Provider#request)
90
+ # @return [LLM::Response]
91
+ # @see https://platform.openai.com/docs/api-reference/vector_stores/search OpenAI docs
92
+ def search(vector:, query:, **params)
93
+ vector_id = vector.respond_to?(:id) ? vector.id : vector
94
+ req = Net::HTTP::Post.new("/v1/vector_stores/#{vector_id}/search", headers)
95
+ req.body = JSON.dump(params.merge({query:}).compact)
96
+ res = execute(request: req)
97
+ LLM::Response.new(res)
98
+ end
99
+
100
+ private
101
+
102
+ [:headers, :execute, :set_body_stream].each do |m|
103
+ define_method(m) { |*args, **kwargs, &b| @provider.send(m, *args, **kwargs, &b) }
104
+ end
105
+ end
106
+ end
@@ -5,16 +5,18 @@ module LLM
5
5
  # The OpenAI class implements a provider for
6
6
  # [OpenAI](https://platform.openai.com/)
7
7
  class OpenAI < Provider
8
+ require_relative "openai/response/embedding"
9
+ require_relative "openai/response/completion"
8
10
  require_relative "openai/error_handler"
9
11
  require_relative "openai/format"
10
12
  require_relative "openai/stream_parser"
11
- require_relative "openai/response_parser"
12
13
  require_relative "openai/models"
13
14
  require_relative "openai/responses"
14
15
  require_relative "openai/images"
15
16
  require_relative "openai/audio"
16
17
  require_relative "openai/files"
17
18
  require_relative "openai/moderations"
19
+ require_relative "openai/vector_stores"
18
20
 
19
21
  include Format
20
22
 
@@ -38,7 +40,7 @@ module LLM
38
40
  req = Net::HTTP::Post.new("/v1/embeddings", headers)
39
41
  req.body = JSON.dump({input:, model:}.merge!(params))
40
42
  res = execute(request: req)
41
- Response::Embedding.new(res).extend(response_parser)
43
+ LLM::Response.new(res).extend(LLM::OpenAI::Response::Embedding)
42
44
  end
43
45
 
44
46
  ##
@@ -48,7 +50,7 @@ module LLM
48
50
  # @param params (see LLM::Provider#complete)
49
51
  # @example (see LLM::Provider#complete)
50
52
  # @raise (see LLM::Provider#request)
51
- # @raise [LLM::Error::PromptError]
53
+ # @raise [LLM::PromptError]
52
54
  # When given an object a provider does not understand
53
55
  # @return (see LLM::Provider#complete)
54
56
  def complete(prompt, params = {})
@@ -61,7 +63,7 @@ module LLM
61
63
  body = JSON.dump({messages: format(messages, :complete).flatten}.merge!(params))
62
64
  set_body_stream(req, StringIO.new(body))
63
65
  res = execute(request: req, stream:)
64
- Response::Completion.new(res).extend(response_parser)
66
+ LLM::Response.new(res).extend(LLM::OpenAI::Response::Completion)
65
67
  end
66
68
 
67
69
  ##
@@ -113,6 +115,14 @@ module LLM
113
115
  LLM::OpenAI::Moderations.new(self)
114
116
  end
115
117
 
118
+ ##
119
+ # Provides an interface to OpenAI's vector store API
120
+ # @see https://platform.openai.com/docs/api-reference/vector-stores/create OpenAI docs
121
+ # @return [LLM::OpenAI::VectorStore]
122
+ def vector_stores
123
+ LLM::OpenAI::VectorStores.new(self)
124
+ end
125
+
116
126
  ##
117
127
  # @return (see LLM::Provider#assistant_role)
118
128
  def assistant_role
@@ -136,10 +146,6 @@ module LLM
136
146
  )
137
147
  end
138
148
 
139
- def response_parser
140
- LLM::OpenAI::ResponseParser
141
- end
142
-
143
149
  def stream_parser
144
150
  LLM::OpenAI::StreamParser
145
151
  end
data/lib/llm/response.rb CHANGED
@@ -3,18 +3,11 @@
3
3
  module LLM
4
4
  class Response
5
5
  require "json"
6
- require_relative "response/completion"
7
- require_relative "response/embedding"
8
- require_relative "response/respond"
9
- require_relative "response/image"
10
- require_relative "response/audio"
11
- require_relative "response/audio_transcription"
12
- require_relative "response/audio_translation"
13
- require_relative "response/file"
14
- require_relative "response/filelist"
15
- require_relative "response/download_file"
16
- require_relative "response/modellist"
17
- require_relative "response/moderationlist"
6
+
7
+ ##
8
+ # Returns the HTTP response
9
+ # @return [Net::HTTPResponse]
10
+ attr_reader :res
18
11
 
19
12
  ##
20
13
  # @param [Net::HTTPResponse] res
@@ -30,9 +23,40 @@ module LLM
30
23
  # @return [Hash, String]
31
24
  def body
32
25
  @body ||= case @res["content-type"]
33
- when %r|\Aapplication/json\s*| then JSON.parse(@res.body)
26
+ when %r|\Aapplication/json\s*| then LLM::Object.from_hash(JSON.parse(@res.body))
34
27
  else @res.body
35
28
  end
36
29
  end
30
+
31
+ ##
32
+ # Returns an inspection of the response object
33
+ # @return [String]
34
+ def inspect
35
+ "#<#{self.class.name}:0x#{object_id.to_s(16)} @body=#{body.inspect} @res=#{@res.inspect}>"
36
+ end
37
+
38
+ ##
39
+ # Returns true if the response is successful
40
+ # @return [Boolean]
41
+ def ok?
42
+ Net::HTTPSuccess === @res
43
+ end
44
+
45
+ ##
46
+ # Returns true if the response is from the Files API
47
+ # @return [Boolean]
48
+ def file?
49
+ false
50
+ end
51
+
52
+ private
53
+
54
+ def method_missing(m, *args, **kwargs, &b)
55
+ body.respond_to?(m) ? body[m.to_s] : super
56
+ end
57
+
58
+ def respond_to_missing?(m, include_private = false)
59
+ body.respond_to?(m) || super
60
+ end
37
61
  end
38
62
  end
data/lib/llm/utils.rb CHANGED
File without changes
data/lib/llm/version.rb CHANGED
@@ -1,5 +1,5 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  module LLM
4
- VERSION = "0.10.1"
4
+ VERSION = "0.11.0"
5
5
  end
data/lib/llm.rb CHANGED
@@ -12,7 +12,6 @@ module LLM
12
12
  require_relative "llm/mime"
13
13
  require_relative "llm/multipart"
14
14
  require_relative "llm/file"
15
- require_relative "llm/model"
16
15
  require_relative "llm/provider"
17
16
  require_relative "llm/bot"
18
17
  require_relative "llm/buffer"
@@ -27,18 +26,9 @@ module LLM
27
26
  # @return (see LLM::Anthropic#initialize)
28
27
  def anthropic(**)
29
28
  require_relative "llm/providers/anthropic" unless defined?(LLM::Anthropic)
30
- require_relative "llm/providers/voyageai" unless defined?(LLM::VoyageAI)
31
29
  LLM::Anthropic.new(**)
32
30
  end
33
31
 
34
- ##
35
- # @param (see LLM::Provider#initialize)
36
- # @return (see LLM::VoyageAI#initialize)
37
- def voyageai(**)
38
- require_relative "llm/providers/voyageai" unless defined?(LLM::VoyageAI)
39
- LLM::VoyageAI.new(**)
40
- end
41
-
42
32
  ##
43
33
  # @param (see LLM::Provider#initialize)
44
34
  # @return (see LLM::Gemini#initialize)
@@ -87,8 +77,8 @@ module LLM
87
77
  # fn.params do |schema|
88
78
  # schema.object(command: schema.string.required)
89
79
  # end
90
- # fn.define do |params|
91
- # system(params.command)
80
+ # fn.define do |command:|
81
+ # system(command)
92
82
  # end
93
83
  # end
94
84
  # @param [Symbol] name The name of the function
data/llm.gemspec CHANGED
@@ -17,7 +17,7 @@ Gem::Specification.new do |spec|
17
17
 
18
18
  spec.description = spec.summary
19
19
  spec.homepage = "https://github.com/llmrb/llm"
20
- spec.license = "0BSDL"
20
+ spec.license = "0BSD"
21
21
  spec.required_ruby_version = ">= 3.2.0"
22
22
 
23
23
  spec.metadata["homepage_uri"] = spec.homepage
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: llm.rb
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.10.1
4
+ version: 0.11.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Antar Azri
@@ -189,7 +189,6 @@ files:
189
189
  - lib/llm/json/schema/version.rb
190
190
  - lib/llm/message.rb
191
191
  - lib/llm/mime.rb
192
- - lib/llm/model.rb
193
192
  - lib/llm/multipart.rb
194
193
  - lib/llm/object.rb
195
194
  - lib/llm/object/builder.rb
@@ -200,8 +199,7 @@ files:
200
199
  - lib/llm/providers/anthropic/format.rb
201
200
  - lib/llm/providers/anthropic/format/completion_format.rb
202
201
  - lib/llm/providers/anthropic/models.rb
203
- - lib/llm/providers/anthropic/response_parser.rb
204
- - lib/llm/providers/anthropic/response_parser/completion_parser.rb
202
+ - lib/llm/providers/anthropic/response/completion.rb
205
203
  - lib/llm/providers/anthropic/stream_parser.rb
206
204
  - lib/llm/providers/deepseek.rb
207
205
  - lib/llm/providers/deepseek/format.rb
@@ -214,8 +212,10 @@ files:
214
212
  - lib/llm/providers/gemini/format/completion_format.rb
215
213
  - lib/llm/providers/gemini/images.rb
216
214
  - lib/llm/providers/gemini/models.rb
217
- - lib/llm/providers/gemini/response_parser.rb
218
- - lib/llm/providers/gemini/response_parser/completion_parser.rb
215
+ - lib/llm/providers/gemini/response/completion.rb
216
+ - lib/llm/providers/gemini/response/embedding.rb
217
+ - lib/llm/providers/gemini/response/file.rb
218
+ - lib/llm/providers/gemini/response/image.rb
219
219
  - lib/llm/providers/gemini/stream_parser.rb
220
220
  - lib/llm/providers/llamacpp.rb
221
221
  - lib/llm/providers/ollama.rb
@@ -223,8 +223,8 @@ files:
223
223
  - lib/llm/providers/ollama/format.rb
224
224
  - lib/llm/providers/ollama/format/completion_format.rb
225
225
  - lib/llm/providers/ollama/models.rb
226
- - lib/llm/providers/ollama/response_parser.rb
227
- - lib/llm/providers/ollama/response_parser/completion_parser.rb
226
+ - lib/llm/providers/ollama/response/completion.rb
227
+ - lib/llm/providers/ollama/response/embedding.rb
228
228
  - lib/llm/providers/ollama/stream_parser.rb
229
229
  - lib/llm/providers/openai.rb
230
230
  - lib/llm/providers/openai/audio.rb
@@ -237,34 +237,23 @@ files:
237
237
  - lib/llm/providers/openai/images.rb
238
238
  - lib/llm/providers/openai/models.rb
239
239
  - lib/llm/providers/openai/moderations.rb
240
- - lib/llm/providers/openai/response_parser.rb
241
- - lib/llm/providers/openai/response_parser/completion_parser.rb
242
- - lib/llm/providers/openai/response_parser/respond_parser.rb
240
+ - lib/llm/providers/openai/response/audio.rb
241
+ - lib/llm/providers/openai/response/completion.rb
242
+ - lib/llm/providers/openai/response/embedding.rb
243
+ - lib/llm/providers/openai/response/file.rb
244
+ - lib/llm/providers/openai/response/image.rb
245
+ - lib/llm/providers/openai/response/moderations.rb
246
+ - lib/llm/providers/openai/response/responds.rb
243
247
  - lib/llm/providers/openai/responses.rb
244
248
  - lib/llm/providers/openai/stream_parser.rb
245
- - lib/llm/providers/voyageai.rb
246
- - lib/llm/providers/voyageai/error_handler.rb
247
- - lib/llm/providers/voyageai/response_parser.rb
249
+ - lib/llm/providers/openai/vector_stores.rb
248
250
  - lib/llm/response.rb
249
- - lib/llm/response/audio.rb
250
- - lib/llm/response/audio_transcription.rb
251
- - lib/llm/response/audio_translation.rb
252
- - lib/llm/response/completion.rb
253
- - lib/llm/response/download_file.rb
254
- - lib/llm/response/embedding.rb
255
- - lib/llm/response/file.rb
256
- - lib/llm/response/filelist.rb
257
- - lib/llm/response/image.rb
258
- - lib/llm/response/modellist.rb
259
- - lib/llm/response/moderationlist.rb
260
- - lib/llm/response/moderationlist/moderation.rb
261
- - lib/llm/response/respond.rb
262
251
  - lib/llm/utils.rb
263
252
  - lib/llm/version.rb
264
253
  - llm.gemspec
265
254
  homepage: https://github.com/llmrb/llm
266
255
  licenses:
267
- - 0BSDL
256
+ - 0BSD
268
257
  metadata:
269
258
  homepage_uri: https://github.com/llmrb/llm
270
259
  source_code_uri: https://github.com/llmrb/llm
@@ -282,7 +271,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
282
271
  - !ruby/object:Gem::Version
283
272
  version: '0'
284
273
  requirements: []
285
- rubygems_version: 3.7.1
274
+ rubygems_version: 3.6.9
286
275
  specification_version: 4
287
276
  summary: llm.rb is a zero-dependency Ruby toolkit for Large Language Models that includes
288
277
  OpenAI, Gemini, Anthropic, DeepSeek, Ollama, and LlamaCpp. The toolkit includes
data/lib/llm/model.rb DELETED
@@ -1,32 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- ##
4
- # The {LLM::Model LLM::Model} class represents an LLM model that
5
- # is available to use. Its properties are delegated to the underlying
6
- # response body, and vary by provider.
7
- class LLM::Model < LLM::Object
8
- ##
9
- # Returns a subclass of {LLM::Provider LLM::Provider}
10
- # @return [LLM::Provider]
11
- attr_accessor :provider
12
-
13
- ##
14
- # Returns the model ID
15
- # @return [String]
16
- def id
17
- case @provider.class.to_s
18
- when "LLM::Ollama"
19
- self["name"]
20
- when "LLM::Gemini"
21
- self["name"].sub(%r|\Amodels/|, "")
22
- else
23
- self["id"]
24
- end
25
- end
26
-
27
- ##
28
- # @return [String]
29
- def to_json(*)
30
- id.to_json(*)
31
- end
32
- end
@@ -1,51 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module LLM::Anthropic::ResponseParser
4
- ##
5
- # @private
6
- class CompletionParser
7
- def initialize(body)
8
- @body = LLM::Object.from_hash(body)
9
- end
10
-
11
- def format(response)
12
- {
13
- model:,
14
- prompt_tokens:,
15
- completion_tokens:,
16
- total_tokens:,
17
- choices: format_choices(response)
18
- }
19
- end
20
-
21
- private
22
-
23
- def format_choices(response)
24
- texts.map.with_index do |choice, index|
25
- extra = {index:, response:, tool_calls: format_tool_calls(tools), original_tool_calls: tools}
26
- LLM::Message.new(role, choice.text, extra)
27
- end
28
- end
29
-
30
- def format_tool_calls(tools)
31
- (tools || []).filter_map do |tool|
32
- tool = {
33
- id: tool.id,
34
- name: tool.name,
35
- arguments: tool.input
36
- }
37
- LLM::Object.new(tool)
38
- end
39
- end
40
-
41
- def body = @body
42
- def role = body.role
43
- def model = body.model
44
- def prompt_tokens = body.usage&.input_tokens
45
- def completion_tokens = body.usage&.output_tokens
46
- def total_tokens = body.usage&.total_tokens
47
- def parts = body.content
48
- def texts = parts.select { _1["type"] == "text" }
49
- def tools = parts.select { _1["type"] == "tool_use" }
50
- end
51
- end
@@ -1,24 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- class LLM::Anthropic
4
- ##
5
- # @private
6
- module ResponseParser
7
- require_relative "response_parser/completion_parser"
8
- def parse_embedding(body)
9
- {
10
- model: body["model"],
11
- embeddings: body["data"].map { _1["embedding"] },
12
- total_tokens: body.dig("usage", "total_tokens")
13
- }
14
- end
15
-
16
- ##
17
- # @param [Hash] body
18
- # The response body from the LLM provider
19
- # @return [Hash]
20
- def parse_completion(body)
21
- CompletionParser.new(body).format(self)
22
- end
23
- end
24
- end
@@ -1,46 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- class LLM::Gemini
4
- ##
5
- # @private
6
- module ResponseParser
7
- require_relative "response_parser/completion_parser"
8
-
9
- ##
10
- # @param [Hash] body
11
- # The response body from the LLM provider
12
- # @return [Hash]
13
- def parse_completion(body)
14
- CompletionParser.new(body).format(self)
15
- end
16
-
17
- ##
18
- # @param [Hash] body
19
- # The response body from the LLM provider
20
- # @return [Hash]
21
- def parse_embedding(body)
22
- {
23
- model: "text-embedding-004",
24
- embeddings: body.dig("embedding", "values")
25
- }
26
- end
27
-
28
- ##
29
- # @param [Hash] body
30
- # The response body from the LLM provider
31
- # @return [Hash]
32
- def parse_image(body)
33
- {
34
- urls: [],
35
- images: body["candidates"].flat_map do |c|
36
- parts = c["content"]["parts"]
37
- parts.filter_map do
38
- data = _1.dig("inlineData", "data")
39
- next unless data
40
- StringIO.new(data.unpack1("m0"))
41
- end
42
- end
43
- }
44
- end
45
- end
46
- end
@@ -1,42 +0,0 @@
1
- # frozen_string_literal: true
2
-
3
- module LLM::Ollama::ResponseParser
4
- ##
5
- # @private
6
- class CompletionParser
7
- def initialize(body)
8
- @body = LLM::Object.from_hash(body)
9
- end
10
-
11
- def format(response)
12
- {
13
- model:,
14
- choices: [format_choices(response)],
15
- prompt_tokens:,
16
- completion_tokens:
17
- }
18
- end
19
-
20
- private
21
-
22
- def format_choices(response)
23
- role, content, calls = message.to_h.values_at(:role, :content, :tool_calls)
24
- extra = {response:, tool_calls: format_tool_calls(calls)}
25
- LLM::Message.new(role, content, extra)
26
- end
27
-
28
- def format_tool_calls(tools)
29
- return [] unless tools
30
- tools.filter_map do |tool|
31
- next unless tool["function"]
32
- LLM::Object.new(tool["function"])
33
- end
34
- end
35
-
36
- def body = @body
37
- def model = body.model
38
- def prompt_tokens = body.prompt_eval_count
39
- def completion_tokens = body.eval_count
40
- def message = body.message
41
- end
42
- end