llm.rb 0.3.2 → 0.4.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/README.md +81 -8
- data/lib/json/schema/array.rb +22 -0
- data/lib/json/schema/boolean.rb +9 -0
- data/lib/json/schema/integer.rb +21 -0
- data/lib/json/schema/leaf.rb +40 -0
- data/lib/json/schema/null.rb +9 -0
- data/lib/json/schema/number.rb +21 -0
- data/lib/json/schema/object.rb +26 -0
- data/lib/json/schema/string.rb +9 -0
- data/lib/json/schema.rb +73 -0
- data/lib/llm/chat.rb +7 -3
- data/lib/llm/core_ext/ostruct.rb +1 -1
- data/lib/llm/file.rb +8 -1
- data/lib/llm/message.rb +7 -0
- data/lib/llm/model.rb +27 -2
- data/lib/llm/provider.rb +36 -28
- data/lib/llm/providers/anthropic/format.rb +19 -6
- data/lib/llm/providers/anthropic/models.rb +62 -0
- data/lib/llm/providers/anthropic.rb +22 -8
- data/lib/llm/providers/gemini/format.rb +6 -1
- data/lib/llm/providers/gemini/images.rb +3 -3
- data/lib/llm/providers/gemini/models.rb +69 -0
- data/lib/llm/providers/gemini/response_parser.rb +1 -5
- data/lib/llm/providers/gemini.rb +30 -5
- data/lib/llm/providers/ollama/format.rb +11 -3
- data/lib/llm/providers/ollama/models.rb +66 -0
- data/lib/llm/providers/ollama.rb +30 -8
- data/lib/llm/providers/openai/audio.rb +0 -2
- data/lib/llm/providers/openai/format.rb +6 -1
- data/lib/llm/providers/openai/images.rb +1 -1
- data/lib/llm/providers/openai/models.rb +62 -0
- data/lib/llm/providers/openai/response_parser.rb +1 -5
- data/lib/llm/providers/openai/responses.rb +12 -6
- data/lib/llm/providers/openai.rb +37 -7
- data/lib/llm/response/modellist.rb +18 -0
- data/lib/llm/response.rb +1 -0
- data/lib/llm/version.rb +1 -1
- data/lib/llm.rb +2 -1
- data/spec/anthropic/completion_spec.rb +36 -0
- data/spec/anthropic/models_spec.rb +21 -0
- data/spec/gemini/images_spec.rb +4 -12
- data/spec/gemini/models_spec.rb +21 -0
- data/spec/llm/conversation_spec.rb +71 -3
- data/spec/ollama/models_spec.rb +20 -0
- data/spec/openai/completion_spec.rb +19 -0
- data/spec/openai/images_spec.rb +2 -6
- data/spec/openai/models_spec.rb +21 -0
- metadata +20 -6
- data/share/llm/models/anthropic.yml +0 -35
- data/share/llm/models/gemini.yml +0 -35
- data/share/llm/models/ollama.yml +0 -155
- data/share/llm/models/openai.yml +0 -46
| @@ -0,0 +1,62 @@ | |
| 1 | 
            +
            # frozen_string_literal: true
         | 
| 2 | 
            +
             | 
| 3 | 
            +
            class LLM::Anthropic
         | 
| 4 | 
            +
              ##
         | 
| 5 | 
            +
              # The {LLM::Anthropic::Models LLM::Anthropic::Models} class provides a model
         | 
| 6 | 
            +
              # object for interacting with [Anthropic's models API](https://platform.anthropic.com/docs/api-reference/models/list).
         | 
| 7 | 
            +
              # The models API allows a client to query Anthropic for a list of models
         | 
| 8 | 
            +
              # that are available for use with the Anthropic API.
         | 
| 9 | 
            +
              #
         | 
| 10 | 
            +
              # @example
         | 
| 11 | 
            +
              #   #!/usr/bin/env ruby
         | 
| 12 | 
            +
              #   require "llm"
         | 
| 13 | 
            +
              #
         | 
| 14 | 
            +
              #   llm = LLM.anthropic(ENV["KEY"])
         | 
| 15 | 
            +
              #   res = llm.models.all
         | 
| 16 | 
            +
              #   res.each do |model|
         | 
| 17 | 
            +
              #     print "id: ", model.id, "\n"
         | 
| 18 | 
            +
              #   end
         | 
| 19 | 
            +
              class Models
         | 
| 20 | 
            +
                ##
         | 
| 21 | 
            +
                # Returns a new Models object
         | 
| 22 | 
            +
                # @param provider [LLM::Provider]
         | 
| 23 | 
            +
                # @return [LLM::Anthropic::Files]
         | 
| 24 | 
            +
                def initialize(provider)
         | 
| 25 | 
            +
                  @provider = provider
         | 
| 26 | 
            +
                end
         | 
| 27 | 
            +
             | 
| 28 | 
            +
                ##
         | 
| 29 | 
            +
                # List all models
         | 
| 30 | 
            +
                # @example
         | 
| 31 | 
            +
                #   llm = LLM.anthropic(ENV["KEY"])
         | 
| 32 | 
            +
                #   res = llm.models.all
         | 
| 33 | 
            +
                #   res.each do |model|
         | 
| 34 | 
            +
                #     print "id: ", model.id, "\n"
         | 
| 35 | 
            +
                #   end
         | 
| 36 | 
            +
                # @see https://docs.anthropic.com/en/api/models-list Anthropic docs
         | 
| 37 | 
            +
                # @param [Hash] params Other parameters (see Anthropic docs)
         | 
| 38 | 
            +
                # @raise (see LLM::Provider#request)
         | 
| 39 | 
            +
                # @return [LLM::Response::FileList]
         | 
| 40 | 
            +
                def all(**params)
         | 
| 41 | 
            +
                  query = URI.encode_www_form(params)
         | 
| 42 | 
            +
                  req = Net::HTTP::Get.new("/v1/models?#{query}", headers)
         | 
| 43 | 
            +
                  res = request(http, req)
         | 
| 44 | 
            +
                  LLM::Response::ModelList.new(res).tap { |modellist|
         | 
| 45 | 
            +
                    models = modellist.body["data"].map do |model|
         | 
| 46 | 
            +
                      LLM::Model.from_hash(model).tap { _1.provider = @provider }
         | 
| 47 | 
            +
                    end
         | 
| 48 | 
            +
                    modellist.models = models
         | 
| 49 | 
            +
                  }
         | 
| 50 | 
            +
                end
         | 
| 51 | 
            +
             | 
| 52 | 
            +
                private
         | 
| 53 | 
            +
             | 
| 54 | 
            +
                def http
         | 
| 55 | 
            +
                  @provider.instance_variable_get(:@http)
         | 
| 56 | 
            +
                end
         | 
| 57 | 
            +
             | 
| 58 | 
            +
                [:headers, :request].each do |m|
         | 
| 59 | 
            +
                  define_method(m) { |*args, &b| @provider.send(m, *args, &b) }
         | 
| 60 | 
            +
                end
         | 
| 61 | 
            +
              end
         | 
| 62 | 
            +
            end
         | 
| @@ -8,6 +8,7 @@ module LLM | |
| 8 8 | 
             
                require_relative "anthropic/error_handler"
         | 
| 9 9 | 
             
                require_relative "anthropic/response_parser"
         | 
| 10 10 | 
             
                require_relative "anthropic/format"
         | 
| 11 | 
            +
                require_relative "anthropic/models"
         | 
| 11 12 | 
             
                include Format
         | 
| 12 13 |  | 
| 13 14 | 
             
                HOST = "api.anthropic.com"
         | 
| @@ -45,16 +46,27 @@ module LLM | |
| 45 46 | 
             
                # @param params (see LLM::Provider#complete)
         | 
| 46 47 | 
             
                # @example (see LLM::Provider#complete)
         | 
| 47 48 | 
             
                # @raise (see LLM::Provider#request)
         | 
| 49 | 
            +
                # @raise [LLM::Error::PromptError]
         | 
| 50 | 
            +
                #  When given an object a provider does not understand
         | 
| 48 51 | 
             
                # @return (see LLM::Provider#complete)
         | 
| 49 | 
            -
                def complete(prompt, role = :user, model:  | 
| 50 | 
            -
                  params | 
| 51 | 
            -
                  req | 
| 52 | 
            +
                def complete(prompt, role = :user, model: default_model, max_tokens: 1024, **params)
         | 
| 53 | 
            +
                  params = {max_tokens:, model:}.merge!(params)
         | 
| 54 | 
            +
                  req = Net::HTTP::Post.new("/v1/messages", headers)
         | 
| 52 55 | 
             
                  messages = [*(params.delete(:messages) || []), Message.new(role, prompt)]
         | 
| 53 | 
            -
                   | 
| 54 | 
            -
                   | 
| 56 | 
            +
                  body = JSON.dump({messages: format(messages)}.merge!(params))
         | 
| 57 | 
            +
                  set_body_stream(req, StringIO.new(body))
         | 
| 58 | 
            +
                  res = request(@http, req)
         | 
| 55 59 | 
             
                  Response::Completion.new(res).extend(response_parser)
         | 
| 56 60 | 
             
                end
         | 
| 57 61 |  | 
| 62 | 
            +
                ##
         | 
| 63 | 
            +
                # Provides an interface to Anthropic's models API
         | 
| 64 | 
            +
                # @see https://docs.anthropic.com/en/api/models-list
         | 
| 65 | 
            +
                # @return [LLM::Anthropic::Models]
         | 
| 66 | 
            +
                def models
         | 
| 67 | 
            +
                  LLM::Anthropic::Models.new(self)
         | 
| 68 | 
            +
                end
         | 
| 69 | 
            +
             | 
| 58 70 | 
             
                ##
         | 
| 59 71 | 
             
                # @return (see LLM::Provider#assistant_role)
         | 
| 60 72 | 
             
                def assistant_role
         | 
| @@ -62,9 +74,11 @@ module LLM | |
| 62 74 | 
             
                end
         | 
| 63 75 |  | 
| 64 76 | 
             
                ##
         | 
| 65 | 
            -
                #  | 
| 66 | 
            -
                 | 
| 67 | 
            -
             | 
| 77 | 
            +
                # Returns the default model for chat completions
         | 
| 78 | 
            +
                # @see https://docs.anthropic.com/en/docs/about-claude/models/all-models#model-comparison-table claude-3-5-sonnet-20240620
         | 
| 79 | 
            +
                # @return [String]
         | 
| 80 | 
            +
                def default_model
         | 
| 81 | 
            +
                  "claude-3-5-sonnet-20240620"
         | 
| 68 82 | 
             
                end
         | 
| 69 83 |  | 
| 70 84 | 
             
                private
         | 
| @@ -35,8 +35,13 @@ class LLM::Gemini | |
| 35 35 | 
             
                  when LLM::File
         | 
| 36 36 | 
             
                    file = content
         | 
| 37 37 | 
             
                    {inline_data: {mime_type: file.mime_type, data: file.to_b64}}
         | 
| 38 | 
            -
                   | 
| 38 | 
            +
                  when String
         | 
| 39 39 | 
             
                    {text: content}
         | 
| 40 | 
            +
                  when LLM::Message
         | 
| 41 | 
            +
                    format_content(content.content)
         | 
| 42 | 
            +
                  else
         | 
| 43 | 
            +
                    raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
         | 
| 44 | 
            +
                                                   "is not supported by the Gemini API"
         | 
| 40 45 | 
             
                  end
         | 
| 41 46 | 
             
                end
         | 
| 42 47 | 
             
              end
         | 
| @@ -13,7 +13,7 @@ class LLM::Gemini | |
| 13 13 | 
             
              #
         | 
| 14 14 | 
             
              #   llm = LLM.gemini(ENV["KEY"])
         | 
| 15 15 | 
             
              #   res = llm.images.create prompt: "A dog on a rocket to the moon"
         | 
| 16 | 
            -
              #    | 
| 16 | 
            +
              #   IO.copy_stream res.images[0], "rocket.png"
         | 
| 17 17 | 
             
              class Images
         | 
| 18 18 | 
             
                include Format
         | 
| 19 19 |  | 
| @@ -30,7 +30,7 @@ class LLM::Gemini | |
| 30 30 | 
             
                # @example
         | 
| 31 31 | 
             
                #   llm = LLM.gemini(ENV["KEY"])
         | 
| 32 32 | 
             
                #   res = llm.images.create prompt: "A dog on a rocket to the moon"
         | 
| 33 | 
            -
                #    | 
| 33 | 
            +
                #   IO.copy_stream res.images[0], "rocket.png"
         | 
| 34 34 | 
             
                # @see https://ai.google.dev/gemini-api/docs/image-generation Gemini docs
         | 
| 35 35 | 
             
                # @param [String] prompt The prompt
         | 
| 36 36 | 
             
                # @param [Hash] params Other parameters (see Gemini docs)
         | 
| @@ -56,7 +56,7 @@ class LLM::Gemini | |
| 56 56 | 
             
                # @example
         | 
| 57 57 | 
             
                #   llm = LLM.gemini(ENV["KEY"])
         | 
| 58 58 | 
             
                #   res = llm.images.edit image: LLM::File("cat.png"), prompt: "Add a hat to the cat"
         | 
| 59 | 
            -
                #    | 
| 59 | 
            +
                #   IO.copy_stream res.images[0], "hatoncat.png"
         | 
| 60 60 | 
             
                # @see https://ai.google.dev/gemini-api/docs/image-generation Gemini docs
         | 
| 61 61 | 
             
                # @param [LLM::File] image The image to edit
         | 
| 62 62 | 
             
                # @param [String] prompt The prompt
         | 
| @@ -0,0 +1,69 @@ | |
| 1 | 
            +
            # frozen_string_literal: true
         | 
| 2 | 
            +
             | 
| 3 | 
            +
            class LLM::Gemini
         | 
| 4 | 
            +
              ##
         | 
| 5 | 
            +
              # The {LLM::Gemini::Models LLM::Gemini::Models} class provides a model
         | 
| 6 | 
            +
              # object for interacting with [Gemini's models API](https://ai.google.dev/api/models?hl=en#method:-models.list).
         | 
| 7 | 
            +
              # The models API allows a client to query Gemini for a list of models
         | 
| 8 | 
            +
              # that are available for use with the Gemini API.
         | 
| 9 | 
            +
              #
         | 
| 10 | 
            +
              # @example
         | 
| 11 | 
            +
              #   #!/usr/bin/env ruby
         | 
| 12 | 
            +
              #   require "llm"
         | 
| 13 | 
            +
              #
         | 
| 14 | 
            +
              #   llm = LLM.gemini(ENV["KEY"])
         | 
| 15 | 
            +
              #   res = llm.models.all
         | 
| 16 | 
            +
              #   res.each do |model|
         | 
| 17 | 
            +
              #     print "id: ", model.id, "\n"
         | 
| 18 | 
            +
              #   end
         | 
| 19 | 
            +
              class Models
         | 
| 20 | 
            +
                include LLM::Utils
         | 
| 21 | 
            +
             | 
| 22 | 
            +
                ##
         | 
| 23 | 
            +
                # Returns a new Models object
         | 
| 24 | 
            +
                # @param provider [LLM::Provider]
         | 
| 25 | 
            +
                # @return [LLM::Gemini::Models]
         | 
| 26 | 
            +
                def initialize(provider)
         | 
| 27 | 
            +
                  @provider = provider
         | 
| 28 | 
            +
                end
         | 
| 29 | 
            +
             | 
| 30 | 
            +
                ##
         | 
| 31 | 
            +
                # List all models
         | 
| 32 | 
            +
                # @example
         | 
| 33 | 
            +
                #   llm = LLM.gemini(ENV["KEY"])
         | 
| 34 | 
            +
                #   res = llm.models.all
         | 
| 35 | 
            +
                #   res.each do |model|
         | 
| 36 | 
            +
                #     print "id: ", model.id, "\n"
         | 
| 37 | 
            +
                #   end
         | 
| 38 | 
            +
                # @see https://ai.google.dev/api/models?hl=en#method:-models.list Gemini docs
         | 
| 39 | 
            +
                # @param [Hash] params Other parameters (see Gemini docs)
         | 
| 40 | 
            +
                # @raise (see LLM::Provider#request)
         | 
| 41 | 
            +
                # @return [LLM::Response::ModelList]
         | 
| 42 | 
            +
                def all(**params)
         | 
| 43 | 
            +
                  query = URI.encode_www_form(params.merge!(key: secret))
         | 
| 44 | 
            +
                  req = Net::HTTP::Get.new("/v1beta/models?#{query}", headers)
         | 
| 45 | 
            +
                  res = request(http, req)
         | 
| 46 | 
            +
                  LLM::Response::ModelList.new(res).tap { |modellist|
         | 
| 47 | 
            +
                    models = modellist.body["models"].map do |model|
         | 
| 48 | 
            +
                      model = model.transform_keys { snakecase(_1) }
         | 
| 49 | 
            +
                      LLM::Model.from_hash(model).tap { _1.provider = @provider }
         | 
| 50 | 
            +
                    end
         | 
| 51 | 
            +
                    modellist.models = models
         | 
| 52 | 
            +
                  }
         | 
| 53 | 
            +
                end
         | 
| 54 | 
            +
             | 
| 55 | 
            +
                private
         | 
| 56 | 
            +
             | 
| 57 | 
            +
                def http
         | 
| 58 | 
            +
                  @provider.instance_variable_get(:@http)
         | 
| 59 | 
            +
                end
         | 
| 60 | 
            +
             | 
| 61 | 
            +
                def secret
         | 
| 62 | 
            +
                  @provider.instance_variable_get(:@secret)
         | 
| 63 | 
            +
                end
         | 
| 64 | 
            +
             | 
| 65 | 
            +
                [:headers, :request].each do |m|
         | 
| 66 | 
            +
                  define_method(m) { |*args, &b| @provider.send(m, *args, &b) }
         | 
| 67 | 
            +
                end
         | 
| 68 | 
            +
              end
         | 
| 69 | 
            +
            end
         | 
| @@ -44,11 +44,7 @@ class LLM::Gemini | |
| 44 44 | 
             
                    images: body["candidates"].flat_map do |candidate|
         | 
| 45 45 | 
             
                      candidate["content"]["parts"].filter_map do
         | 
| 46 46 | 
             
                        next unless _1.dig("inlineData", "data")
         | 
| 47 | 
            -
                         | 
| 48 | 
            -
                          mime_type: _1["inlineData"]["mimeType"],
         | 
| 49 | 
            -
                          encoded: _1["inlineData"]["data"],
         | 
| 50 | 
            -
                          binary: _1["inlineData"]["data"].unpack1("m0")
         | 
| 51 | 
            -
                        )
         | 
| 47 | 
            +
                        StringIO.new(_1["inlineData"]["data"].unpack1("m0"))
         | 
| 52 48 | 
             
                      end
         | 
| 53 49 | 
             
                    end
         | 
| 54 50 | 
             
                  }
         | 
    
        data/lib/llm/providers/gemini.rb
    CHANGED
    
    | @@ -34,6 +34,7 @@ module LLM | |
| 34 34 | 
             
                require_relative "gemini/images"
         | 
| 35 35 | 
             
                require_relative "gemini/files"
         | 
| 36 36 | 
             
                require_relative "gemini/audio"
         | 
| 37 | 
            +
                require_relative "gemini/models"
         | 
| 37 38 | 
             
                include Format
         | 
| 38 39 |  | 
| 39 40 | 
             
                HOST = "generativelanguage.googleapis.com"
         | 
| @@ -52,6 +53,7 @@ module LLM | |
| 52 53 | 
             
                # @raise (see LLM::Provider#request)
         | 
| 53 54 | 
             
                # @return (see LLM::Provider#embed)
         | 
| 54 55 | 
             
                def embed(input, model: "text-embedding-004", **params)
         | 
| 56 | 
            +
                  model = model.respond_to?(:id) ? model.id : model
         | 
| 55 57 | 
             
                  path = ["/v1beta/models/#{model}", "embedContent?key=#{@secret}"].join(":")
         | 
| 56 58 | 
             
                  req = Net::HTTP::Post.new(path, headers)
         | 
| 57 59 | 
             
                  req.body = JSON.dump({content: {parts: [{text: input}]}})
         | 
| @@ -65,15 +67,19 @@ module LLM | |
| 65 67 | 
             
                # @param prompt (see LLM::Provider#complete)
         | 
| 66 68 | 
             
                # @param role (see LLM::Provider#complete)
         | 
| 67 69 | 
             
                # @param model (see LLM::Provider#complete)
         | 
| 70 | 
            +
                # @param schema (see LLM::Provider#complete)
         | 
| 68 71 | 
             
                # @param params (see LLM::Provider#complete)
         | 
| 69 72 | 
             
                # @example (see LLM::Provider#complete)
         | 
| 70 73 | 
             
                # @raise (see LLM::Provider#request)
         | 
| 74 | 
            +
                # @raise [LLM::Error::PromptError]
         | 
| 75 | 
            +
                #  When given an object a provider does not understand
         | 
| 71 76 | 
             
                # @return (see LLM::Provider#complete)
         | 
| 72 | 
            -
                def complete(prompt, role = :user, model:  | 
| 77 | 
            +
                def complete(prompt, role = :user, model: default_model, schema: nil, **params)
         | 
| 78 | 
            +
                  model.respond_to?(:id) ? model.id : model
         | 
| 73 79 | 
             
                  path = ["/v1beta/models/#{model}", "generateContent?key=#{@secret}"].join(":")
         | 
| 74 80 | 
             
                  req  = Net::HTTP::Post.new(path, headers)
         | 
| 75 81 | 
             
                  messages = [*(params.delete(:messages) || []), LLM::Message.new(role, prompt)]
         | 
| 76 | 
            -
                  body = JSON.dump({contents: format(messages)}) | 
| 82 | 
            +
                  body = JSON.dump({contents: format(messages)}.merge!(expand_schema(schema)))
         | 
| 77 83 | 
             
                  set_body_stream(req, StringIO.new(body))
         | 
| 78 84 | 
             
                  res = request(@http, req)
         | 
| 79 85 | 
             
                  Response::Completion.new(res).extend(response_parser)
         | 
| @@ -101,6 +107,13 @@ module LLM | |
| 101 107 | 
             
                  LLM::Gemini::Files.new(self)
         | 
| 102 108 | 
             
                end
         | 
| 103 109 |  | 
| 110 | 
            +
                ##
         | 
| 111 | 
            +
                # Provides an interface to Gemini's models API
         | 
| 112 | 
            +
                # @see https://ai.google.dev/gemini-api/docs/models Gemini docs
         | 
| 113 | 
            +
                def models
         | 
| 114 | 
            +
                  LLM::Gemini::Models.new(self)
         | 
| 115 | 
            +
                end
         | 
| 116 | 
            +
             | 
| 104 117 | 
             
                ##
         | 
| 105 118 | 
             
                # @return (see LLM::Provider#assistant_role)
         | 
| 106 119 | 
             
                def assistant_role
         | 
| @@ -108,9 +121,11 @@ module LLM | |
| 108 121 | 
             
                end
         | 
| 109 122 |  | 
| 110 123 | 
             
                ##
         | 
| 111 | 
            -
                #  | 
| 112 | 
            -
                 | 
| 113 | 
            -
             | 
| 124 | 
            +
                # Returns the default model for chat completions
         | 
| 125 | 
            +
                # @see https://ai.google.dev/gemini-api/docs/models#gemini-1.5-flash gemini-1.5-flash
         | 
| 126 | 
            +
                # @return [String]
         | 
| 127 | 
            +
                def default_model
         | 
| 128 | 
            +
                  "gemini-1.5-flash"
         | 
| 114 129 | 
             
                end
         | 
| 115 130 |  | 
| 116 131 | 
             
                private
         | 
| @@ -121,6 +136,16 @@ module LLM | |
| 121 136 | 
             
                  }
         | 
| 122 137 | 
             
                end
         | 
| 123 138 |  | 
| 139 | 
            +
                def expand_schema(schema)
         | 
| 140 | 
            +
                  return {} unless schema
         | 
| 141 | 
            +
                  {
         | 
| 142 | 
            +
                    "generationConfig" => {
         | 
| 143 | 
            +
                      "response_mime_type" => "application/json",
         | 
| 144 | 
            +
                      "response_schema" => schema
         | 
| 145 | 
            +
                    }
         | 
| 146 | 
            +
                  }
         | 
| 147 | 
            +
                end
         | 
| 148 | 
            +
             | 
| 124 149 | 
             
                def response_parser
         | 
| 125 150 | 
             
                  LLM::Gemini::ResponseParser
         | 
| 126 151 | 
             
                end
         | 
| @@ -28,14 +28,22 @@ class LLM::Ollama | |
| 28 28 | 
             
                # @return [String, Hash]
         | 
| 29 29 | 
             
                #  The formatted content
         | 
| 30 30 | 
             
                def format_content(content)
         | 
| 31 | 
            -
                   | 
| 31 | 
            +
                  case content
         | 
| 32 | 
            +
                  when LLM::File
         | 
| 32 33 | 
             
                    if content.image?
         | 
| 33 34 | 
             
                      {content: "This message has an image associated with it", images: [content.to_b64]}
         | 
| 34 35 | 
             
                    else
         | 
| 35 | 
            -
                      raise  | 
| 36 | 
            +
                      raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
         | 
| 37 | 
            +
                                                     "is not an image, and therefore not supported by the " \
         | 
| 38 | 
            +
                                                     "Ollama API"
         | 
| 36 39 | 
             
                    end
         | 
| 37 | 
            -
                   | 
| 40 | 
            +
                  when String
         | 
| 38 41 | 
             
                    {content:}
         | 
| 42 | 
            +
                  when LLM::Message
         | 
| 43 | 
            +
                    format_content(content.content)
         | 
| 44 | 
            +
                  else
         | 
| 45 | 
            +
                    raise LLM::Error::PromptError, "The given object (an instance of #{content.class}) " \
         | 
| 46 | 
            +
                                                   "is not supported by the Ollama API"
         | 
| 39 47 | 
             
                  end
         | 
| 40 48 | 
             
                end
         | 
| 41 49 | 
             
              end
         | 
| @@ -0,0 +1,66 @@ | |
| 1 | 
            +
            # frozen_string_literal: true
         | 
| 2 | 
            +
             | 
| 3 | 
            +
            class LLM::Ollama
         | 
| 4 | 
            +
              ##
         | 
| 5 | 
            +
              # The {LLM::Ollama::Models LLM::Ollama::Models} class provides a model
         | 
| 6 | 
            +
              # object for interacting with [Ollama's models API](https://github.com/ollama/ollama/blob/main/docs/api.md#list-local-models).
         | 
| 7 | 
            +
              # The models API allows a client to query Ollama for a list of models
         | 
| 8 | 
            +
              # that are available for use with the Ollama API.
         | 
| 9 | 
            +
              #
         | 
| 10 | 
            +
              # @example
         | 
| 11 | 
            +
              #   #!/usr/bin/env ruby
         | 
| 12 | 
            +
              #   require "llm"
         | 
| 13 | 
            +
              #
         | 
| 14 | 
            +
              #   llm = LLM.ollama(nil)
         | 
| 15 | 
            +
              #   res = llm.models.all
         | 
| 16 | 
            +
              #   res.each do |model|
         | 
| 17 | 
            +
              #     print "id: ", model.id, "\n"
         | 
| 18 | 
            +
              #   end
         | 
| 19 | 
            +
              class Models
         | 
| 20 | 
            +
                include LLM::Utils
         | 
| 21 | 
            +
             | 
| 22 | 
            +
                ##
         | 
| 23 | 
            +
                # Returns a new Models object
         | 
| 24 | 
            +
                # @param provider [LLM::Provider]
         | 
| 25 | 
            +
                # @return [LLM::Ollama::Models]
         | 
| 26 | 
            +
                def initialize(provider)
         | 
| 27 | 
            +
                  @provider = provider
         | 
| 28 | 
            +
                end
         | 
| 29 | 
            +
             | 
| 30 | 
            +
                ##
         | 
| 31 | 
            +
                # List all models
         | 
| 32 | 
            +
                # @example
         | 
| 33 | 
            +
                #   llm = LLM.ollama(nil)
         | 
| 34 | 
            +
                #   res = llm.models.all
         | 
| 35 | 
            +
                #   res.each do |model|
         | 
| 36 | 
            +
                #     print "id: ", model.id, "\n"
         | 
| 37 | 
            +
                #   end
         | 
| 38 | 
            +
                # @see https://github.com/ollama/ollama/blob/main/docs/api.md#list-local-models Ollama docs
         | 
| 39 | 
            +
                # @see https://ollama.com/library Ollama library
         | 
| 40 | 
            +
                # @param [Hash] params Other parameters (see Ollama docs)
         | 
| 41 | 
            +
                # @raise (see LLM::Provider#request)
         | 
| 42 | 
            +
                # @return [LLM::Response::ModelList]
         | 
| 43 | 
            +
                def all(**params)
         | 
| 44 | 
            +
                  query = URI.encode_www_form(params)
         | 
| 45 | 
            +
                  req = Net::HTTP::Get.new("/api/tags?#{query}", headers)
         | 
| 46 | 
            +
                  res = request(http, req)
         | 
| 47 | 
            +
                  LLM::Response::ModelList.new(res).tap { |modellist|
         | 
| 48 | 
            +
                    models = modellist.body["models"].map do |model|
         | 
| 49 | 
            +
                      model = model.transform_keys { snakecase(_1) }
         | 
| 50 | 
            +
                      LLM::Model.from_hash(model).tap { _1.provider = @provider }
         | 
| 51 | 
            +
                    end
         | 
| 52 | 
            +
                    modellist.models = models
         | 
| 53 | 
            +
                  }
         | 
| 54 | 
            +
                end
         | 
| 55 | 
            +
             | 
| 56 | 
            +
                private
         | 
| 57 | 
            +
             | 
| 58 | 
            +
                def http
         | 
| 59 | 
            +
                  @provider.instance_variable_get(:@http)
         | 
| 60 | 
            +
                end
         | 
| 61 | 
            +
             | 
| 62 | 
            +
                [:headers, :request].each do |m|
         | 
| 63 | 
            +
                  define_method(m) { |*args, &b| @provider.send(m, *args, &b) }
         | 
| 64 | 
            +
                end
         | 
| 65 | 
            +
              end
         | 
| 66 | 
            +
            end
         | 
    
        data/lib/llm/providers/ollama.rb
    CHANGED
    
    | @@ -22,6 +22,7 @@ module LLM | |
| 22 22 | 
             
                require_relative "ollama/error_handler"
         | 
| 23 23 | 
             
                require_relative "ollama/response_parser"
         | 
| 24 24 | 
             
                require_relative "ollama/format"
         | 
| 25 | 
            +
                require_relative "ollama/models"
         | 
| 25 26 | 
             
                include Format
         | 
| 26 27 |  | 
| 27 28 | 
             
                HOST = "localhost"
         | 
| @@ -56,16 +57,30 @@ module LLM | |
| 56 57 | 
             
                # @param params (see LLM::Provider#complete)
         | 
| 57 58 | 
             
                # @example (see LLM::Provider#complete)
         | 
| 58 59 | 
             
                # @raise (see LLM::Provider#request)
         | 
| 60 | 
            +
                # @raise [LLM::Error::PromptError]
         | 
| 61 | 
            +
                #  When given an object a provider does not understand
         | 
| 59 62 | 
             
                # @return (see LLM::Provider#complete)
         | 
| 60 | 
            -
                def complete(prompt, role = :user, model:  | 
| 61 | 
            -
                  params | 
| 62 | 
            -
             | 
| 63 | 
            +
                def complete(prompt, role = :user, model: default_model, schema: nil, **params)
         | 
| 64 | 
            +
                  params = {model:, stream: false}
         | 
| 65 | 
            +
                             .merge!(expand_schema(schema))
         | 
| 66 | 
            +
                             .merge!(params)
         | 
| 67 | 
            +
                             .compact
         | 
| 68 | 
            +
                  req = Net::HTTP::Post.new("/api/chat", headers)
         | 
| 63 69 | 
             
                  messages = [*(params.delete(:messages) || []), LLM::Message.new(role, prompt)]
         | 
| 64 | 
            -
                   | 
| 65 | 
            -
                   | 
| 70 | 
            +
                  body = JSON.dump({messages: format(messages)}.merge!(params))
         | 
| 71 | 
            +
                  set_body_stream(req, StringIO.new(body))
         | 
| 72 | 
            +
                  res = request(@http, req)
         | 
| 66 73 | 
             
                  Response::Completion.new(res).extend(response_parser)
         | 
| 67 74 | 
             
                end
         | 
| 68 75 |  | 
| 76 | 
            +
                ##
         | 
| 77 | 
            +
                # Provides an interface to Ollama's models API
         | 
| 78 | 
            +
                # @see https://github.com/ollama/ollama/blob/main/docs/api.md#list-local-models Ollama docs
         | 
| 79 | 
            +
                # @return [LLM::Ollama::Models]
         | 
| 80 | 
            +
                def models
         | 
| 81 | 
            +
                  LLM::Ollama::Models.new(self)
         | 
| 82 | 
            +
                end
         | 
| 83 | 
            +
             | 
| 69 84 | 
             
                ##
         | 
| 70 85 | 
             
                # @return (see LLM::Provider#assistant_role)
         | 
| 71 86 | 
             
                def assistant_role
         | 
| @@ -73,9 +88,11 @@ module LLM | |
| 73 88 | 
             
                end
         | 
| 74 89 |  | 
| 75 90 | 
             
                ##
         | 
| 76 | 
            -
                #  | 
| 77 | 
            -
                 | 
| 78 | 
            -
             | 
| 91 | 
            +
                # Returns the default model for chat completions
         | 
| 92 | 
            +
                # @see https://ollama.com/library llama3.2
         | 
| 93 | 
            +
                # @return [String]
         | 
| 94 | 
            +
                def default_model
         | 
| 95 | 
            +
                  "llama3.2"
         | 
| 79 96 | 
             
                end
         | 
| 80 97 |  | 
| 81 98 | 
             
                private
         | 
| @@ -87,6 +104,11 @@ module LLM | |
| 87 104 | 
             
                  }
         | 
| 88 105 | 
             
                end
         | 
| 89 106 |  | 
| 107 | 
            +
                def expand_schema(schema)
         | 
| 108 | 
            +
                  return {} unless schema
         | 
| 109 | 
            +
                  {format: schema}
         | 
| 110 | 
            +
                end
         | 
| 111 | 
            +
             | 
| 90 112 | 
             
                def response_parser
         | 
| 91 113 | 
             
                  LLM::Ollama::ResponseParser
         | 
| 92 114 | 
             
                end
         | 
| @@ -42,7 +42,12 @@ class LLM::OpenAI | |
| 42 42 | 
             
                  when URI
         | 
| 43 43 | 
             
                    [{type: :image_url, image_url: {url: content.to_s}}]
         | 
| 44 44 | 
             
                  when LLM::File
         | 
| 45 | 
            -
                     | 
| 45 | 
            +
                    file = content
         | 
| 46 | 
            +
                    if file.image?
         | 
| 47 | 
            +
                      [{type: :image_url, image_url: {url: file.to_data_uri}}]
         | 
| 48 | 
            +
                    else
         | 
| 49 | 
            +
                      [{type: :file, file: {filename: file.basename, file_data: file.to_data_uri}}]
         | 
| 50 | 
            +
                    end
         | 
| 46 51 | 
             
                  when LLM::Response::File
         | 
| 47 52 | 
             
                    [{type: :file, file: {file_id: content.id}}]
         | 
| 48 53 | 
             
                  when String
         | 
| @@ -24,7 +24,7 @@ class LLM::OpenAI | |
| 24 24 | 
             
              #   llm = LLM.openai(ENV["KEY"])
         | 
| 25 25 | 
             
              #   res = llm.images.create prompt: "A dog on a rocket to the moon",
         | 
| 26 26 | 
             
              #                           response_format: "b64_json"
         | 
| 27 | 
            -
              #    | 
| 27 | 
            +
              #   IO.copy_stream res.images[0], "rocket.png"
         | 
| 28 28 | 
             
              class Images
         | 
| 29 29 | 
             
                ##
         | 
| 30 30 | 
             
                # Returns a new Images object
         | 
| @@ -0,0 +1,62 @@ | |
| 1 | 
            +
            # frozen_string_literal: true
         | 
| 2 | 
            +
             | 
| 3 | 
            +
            class LLM::OpenAI
         | 
| 4 | 
            +
              ##
         | 
| 5 | 
            +
              # The {LLM::OpenAI::Models LLM::OpenAI::Models} class provides a model
         | 
| 6 | 
            +
              # object for interacting with [OpenAI's models API](https://platform.openai.com/docs/api-reference/models/list).
         | 
| 7 | 
            +
              # The models API allows a client to query OpenAI for a list of models
         | 
| 8 | 
            +
              # that are available for use with the OpenAI API.
         | 
| 9 | 
            +
              #
         | 
| 10 | 
            +
              # @example
         | 
| 11 | 
            +
              #   #!/usr/bin/env ruby
         | 
| 12 | 
            +
              #   require "llm"
         | 
| 13 | 
            +
              #
         | 
| 14 | 
            +
              #   llm = LLM.openai(ENV["KEY"])
         | 
| 15 | 
            +
              #   res = llm.models.all
         | 
| 16 | 
            +
              #   res.each do |model|
         | 
| 17 | 
            +
              #     print "id: ", model.id, "\n"
         | 
| 18 | 
            +
              #   end
         | 
| 19 | 
            +
              class Models
         | 
| 20 | 
            +
                ##
         | 
| 21 | 
            +
                # Returns a new Models object
         | 
| 22 | 
            +
                # @param provider [LLM::Provider]
         | 
| 23 | 
            +
                # @return [LLM::OpenAI::Files]
         | 
| 24 | 
            +
                def initialize(provider)
         | 
| 25 | 
            +
                  @provider = provider
         | 
| 26 | 
            +
                end
         | 
| 27 | 
            +
             | 
| 28 | 
            +
                ##
         | 
| 29 | 
            +
                # List all models
         | 
| 30 | 
            +
                # @example
         | 
| 31 | 
            +
                #   llm = LLM.openai(ENV["KEY"])
         | 
| 32 | 
            +
                #   res = llm.models.all
         | 
| 33 | 
            +
                #   res.each do |model|
         | 
| 34 | 
            +
                #     print "id: ", model.id, "\n"
         | 
| 35 | 
            +
                #   end
         | 
| 36 | 
            +
                # @see https://platform.openai.com/docs/api-reference/models/list OpenAI docs
         | 
| 37 | 
            +
                # @param [Hash] params Other parameters (see OpenAI docs)
         | 
| 38 | 
            +
                # @raise (see LLM::Provider#request)
         | 
| 39 | 
            +
                # @return [LLM::Response::FileList]
         | 
| 40 | 
            +
                def all(**params)
         | 
| 41 | 
            +
                  query = URI.encode_www_form(params)
         | 
| 42 | 
            +
                  req = Net::HTTP::Get.new("/v1/models?#{query}", headers)
         | 
| 43 | 
            +
                  res = request(http, req)
         | 
| 44 | 
            +
                  LLM::Response::ModelList.new(res).tap { |modellist|
         | 
| 45 | 
            +
                    models = modellist.body["data"].map do |model|
         | 
| 46 | 
            +
                      LLM::Model.from_hash(model).tap { _1.provider = @provider }
         | 
| 47 | 
            +
                    end
         | 
| 48 | 
            +
                    modellist.models = models
         | 
| 49 | 
            +
                  }
         | 
| 50 | 
            +
                end
         | 
| 51 | 
            +
             | 
| 52 | 
            +
                private
         | 
| 53 | 
            +
             | 
| 54 | 
            +
                def http
         | 
| 55 | 
            +
                  @provider.instance_variable_get(:@http)
         | 
| 56 | 
            +
                end
         | 
| 57 | 
            +
             | 
| 58 | 
            +
                [:headers, :request, :set_body_stream].each do |m|
         | 
| 59 | 
            +
                  define_method(m) { |*args, &b| @provider.send(m, *args, &b) }
         | 
| 60 | 
            +
                end
         | 
| 61 | 
            +
              end
         | 
| 62 | 
            +
            end
         | 
| @@ -69,11 +69,7 @@ class LLM::OpenAI | |
| 69 69 | 
             
                    urls: body["data"].filter_map { _1["url"] },
         | 
| 70 70 | 
             
                    images: body["data"].filter_map do
         | 
| 71 71 | 
             
                      next unless _1["b64_json"]
         | 
| 72 | 
            -
                       | 
| 73 | 
            -
                        mime_type: nil,
         | 
| 74 | 
            -
                        encoded: _1["b64_json"],
         | 
| 75 | 
            -
                        binary: _1["b64_json"].unpack1("m0")
         | 
| 76 | 
            -
                      )
         | 
| 72 | 
            +
                      StringIO.new(_1["b64_json"].unpack1("m0"))
         | 
| 77 73 | 
             
                    end
         | 
| 78 74 | 
             
                  }
         | 
| 79 75 | 
             
                end
         | 
| @@ -49,13 +49,19 @@ class LLM::OpenAI | |
| 49 49 | 
             
                # @param model (see LLM::Provider#complete)
         | 
| 50 50 | 
             
                # @param [Hash] params Response params
         | 
| 51 51 | 
             
                # @raise (see LLM::Provider#request)
         | 
| 52 | 
            +
                # @raise [LLM::Error::PromptError]
         | 
| 53 | 
            +
                #  When given an object a provider does not understand
         | 
| 52 54 | 
             
                # @return [LLM::Response::Output]
         | 
| 53 | 
            -
                def create(prompt, role = :user, model:  | 
| 54 | 
            -
                  params | 
| 55 | 
            -
             | 
| 55 | 
            +
                def create(prompt, role = :user, model: @provider.default_model, schema: nil, **params)
         | 
| 56 | 
            +
                  params = {model:}
         | 
| 57 | 
            +
                             .merge!(expand_schema(schema))
         | 
| 58 | 
            +
                             .merge!(params)
         | 
| 59 | 
            +
                             .compact
         | 
| 60 | 
            +
                  req = Net::HTTP::Post.new("/v1/responses", headers)
         | 
| 56 61 | 
             
                  messages = [*(params.delete(:input) || []), LLM::Message.new(role, prompt)]
         | 
| 57 | 
            -
                   | 
| 58 | 
            -
                   | 
| 62 | 
            +
                  body = JSON.dump({input: format(messages, :response)}.merge!(params))
         | 
| 63 | 
            +
                  set_body_stream(req, StringIO.new(body))
         | 
| 64 | 
            +
                  res = request(http, req)
         | 
| 59 65 | 
             
                  LLM::Response::Output.new(res).extend(response_parser)
         | 
| 60 66 | 
             
                end
         | 
| 61 67 |  | 
| @@ -92,7 +98,7 @@ class LLM::OpenAI | |
| 92 98 | 
             
                  @provider.instance_variable_get(:@http)
         | 
| 93 99 | 
             
                end
         | 
| 94 100 |  | 
| 95 | 
            -
                [:response_parser, :headers, :request].each do |m|
         | 
| 101 | 
            +
                [:response_parser, :headers, :request, :set_body_stream, :expand_schema].each do |m|
         | 
| 96 102 | 
             
                  define_method(m) { |*args, &b| @provider.send(m, *args, &b) }
         | 
| 97 103 | 
             
                end
         | 
| 98 104 | 
             
              end
         |