ollama-ruby 1.5.0 → 1.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.contexts/code_comment.rb +25 -0
- data/.contexts/full.rb +43 -0
- data/.contexts/info.rb +17 -0
- data/.contexts/lib.rb +27 -0
- data/.contexts/yard.md +93 -0
- data/CHANGES.md +22 -0
- data/README.md +54 -0
- data/Rakefile +3 -2
- data/bin/ollama_cli +31 -4
- data/bin/ollama_console +18 -0
- data/lib/ollama/client/command.rb +29 -3
- data/lib/ollama/client/configuration/config.rb +114 -3
- data/lib/ollama/client/doc.rb +18 -0
- data/lib/ollama/client.rb +131 -2
- data/lib/ollama/commands/chat.rb +96 -1
- data/lib/ollama/commands/copy.rb +59 -1
- data/lib/ollama/commands/create.rb +112 -1
- data/lib/ollama/commands/delete.rb +53 -1
- data/lib/ollama/commands/embed.rb +82 -1
- data/lib/ollama/commands/embeddings.rb +72 -1
- data/lib/ollama/commands/generate.rb +118 -2
- data/lib/ollama/commands/ps.rb +55 -0
- data/lib/ollama/commands/pull.rb +72 -1
- data/lib/ollama/commands/push.rb +65 -1
- data/lib/ollama/commands/show.rb +64 -1
- data/lib/ollama/commands/tags.rb +50 -0
- data/lib/ollama/commands/version.rb +50 -1
- data/lib/ollama/dto.rb +98 -1
- data/lib/ollama/errors.rb +50 -0
- data/lib/ollama/handlers/collector.rb +34 -0
- data/lib/ollama/handlers/concern.rb +60 -2
- data/lib/ollama/handlers/dump_json.rb +20 -0
- data/lib/ollama/handlers/dump_yaml.rb +22 -0
- data/lib/ollama/handlers/markdown.rb +28 -0
- data/lib/ollama/handlers/nop.rb +20 -0
- data/lib/ollama/handlers/print.rb +27 -0
- data/lib/ollama/handlers/progress.rb +38 -0
- data/lib/ollama/handlers/say.rb +66 -0
- data/lib/ollama/handlers/single.rb +35 -0
- data/lib/ollama/handlers.rb +9 -0
- data/lib/ollama/image.rb +67 -0
- data/lib/ollama/json_loader.rb +17 -0
- data/lib/ollama/message.rb +46 -1
- data/lib/ollama/options.rb +27 -2
- data/lib/ollama/response.rb +17 -0
- data/lib/ollama/tool/function/parameters/property.rb +41 -1
- data/lib/ollama/tool/function/parameters.rb +40 -1
- data/lib/ollama/tool/function.rb +44 -1
- data/lib/ollama/tool.rb +37 -1
- data/lib/ollama/version.rb +1 -1
- data/lib/ollama.rb +26 -0
- data/ollama-ruby.gemspec +6 -5
- data/spec/ollama/client/doc_spec.rb +1 -1
- data/spec/ollama/client_spec.rb +19 -1
- data/spec/ollama/commands/chat_spec.rb +1 -1
- data/spec/ollama/commands/copy_spec.rb +1 -1
- data/spec/ollama/commands/create_spec.rb +1 -1
- data/spec/ollama/commands/delete_spec.rb +1 -1
- data/spec/ollama/commands/embed_spec.rb +1 -1
- data/spec/ollama/commands/embeddings_spec.rb +1 -1
- data/spec/ollama/commands/generate_spec.rb +1 -1
- data/spec/ollama/commands/ps_spec.rb +1 -1
- data/spec/ollama/commands/pull_spec.rb +1 -1
- data/spec/ollama/commands/push_spec.rb +1 -1
- data/spec/ollama/commands/show_spec.rb +1 -1
- data/spec/ollama/commands/tags_spec.rb +1 -1
- data/spec/ollama/commands/version_spec.rb +1 -1
- data/spec/ollama/handlers/collector_spec.rb +1 -1
- data/spec/ollama/handlers/dump_json_spec.rb +1 -1
- data/spec/ollama/handlers/dump_yaml_spec.rb +1 -1
- data/spec/ollama/handlers/markdown_spec.rb +1 -1
- data/spec/ollama/handlers/nop_spec.rb +2 -2
- data/spec/ollama/handlers/print_spec.rb +1 -1
- data/spec/ollama/handlers/progress_spec.rb +1 -1
- data/spec/ollama/handlers/say_spec.rb +1 -1
- data/spec/ollama/handlers/single_spec.rb +1 -1
- data/spec/ollama/image_spec.rb +1 -1
- data/spec/ollama/message_spec.rb +1 -1
- data/spec/ollama/options_spec.rb +1 -1
- data/spec/ollama/tool_spec.rb +1 -1
- data/spec/spec_helper.rb +2 -6
- metadata +24 -5
@@ -1,20 +1,101 @@
|
|
1
|
+
# A command class that represents the embed API endpoint for Ollama.
|
2
|
+
#
|
3
|
+
# This class is used to interact with the Ollama API's embed endpoint, which
|
4
|
+
# generates embeddings for text input using a specified model. It inherits from
|
5
|
+
# the base command structure and provides the necessary functionality to execute
|
6
|
+
# embedding requests for generating vector representations of text.
|
7
|
+
#
|
8
|
+
# @example Generating embeddings for a single text
|
9
|
+
# embed = ollama.embed(model: 'all-minilm', input: 'Why is the sky blue?')
|
10
|
+
#
|
11
|
+
# @example Generating embeddings for multiple texts
|
12
|
+
# embed = ollama.embed(model: 'all-minilm', input: ['Why is the sky blue?', 'Why is the grass green?'])
|
1
13
|
class Ollama::Commands::Embed
|
2
14
|
include Ollama::DTO
|
3
15
|
|
16
|
+
# The path method returns the API endpoint path for embed requests.
|
17
|
+
#
|
18
|
+
# This class method provides the specific URL path used to interact with the
|
19
|
+
# Ollama API's embed endpoint. It is utilized internally by the command
|
20
|
+
# structure to determine the correct API route for generating embeddings.
|
21
|
+
#
|
22
|
+
# @return [ String ] the API endpoint path '/api/embed' for embed requests
|
4
23
|
def self.path
|
5
24
|
'/api/embed'
|
6
25
|
end
|
7
26
|
|
27
|
+
# The initialize method sets up a new instance with streaming disabled.
|
28
|
+
#
|
29
|
+
# This method is responsible for initializing a new object instance and
|
30
|
+
# configuring it with parameters required for embedding operations. It sets
|
31
|
+
# up the model, input text(s), and optional parameters while explicitly
|
32
|
+
# disabling streaming since embedding operations are typically non-streaming.
|
33
|
+
#
|
34
|
+
# @param model [ String ] the name of the model to use for generating embeddings
|
35
|
+
# @param input [ String, Array<String> ] the text input(s) to generate embeddings for
|
36
|
+
# @param options [ Ollama::Options, nil ] optional configuration parameters for the model
|
37
|
+
# @param truncate [ Boolean, nil ] whether to truncate the input if it exceeds context length
|
38
|
+
# @param keep_alive [ String, nil ] duration to keep the model loaded in memory
|
8
39
|
def initialize(model:, input:, options: nil, truncate: nil, keep_alive: nil)
|
9
40
|
@model, @input, @options, @truncate, @keep_alive =
|
10
41
|
model, input, options, truncate, keep_alive
|
11
42
|
@stream = false
|
12
43
|
end
|
13
44
|
|
14
|
-
|
45
|
+
# The model attribute reader returns the model name associated with the object.
|
46
|
+
#
|
47
|
+
# @return [ String ] the name of the model used by the command instance
|
48
|
+
attr_reader :model
|
15
49
|
|
50
|
+
# The input attribute reader returns the text input(s) associated with the object.
|
51
|
+
#
|
52
|
+
# @return [ String, Array<String> ] the text input(s) to generate embeddings for
|
53
|
+
attr_reader :input
|
54
|
+
|
55
|
+
# The options attribute reader returns the model configuration options associated with the object.
|
56
|
+
#
|
57
|
+
# @return [ Ollama::Options, nil ] optional configuration parameters for the model
|
58
|
+
attr_reader :options
|
59
|
+
|
60
|
+
# The truncate attribute reader returns the truncate setting associated with the object.
|
61
|
+
#
|
62
|
+
# @return [ Boolean, nil ] whether to truncate the input if it exceeds context length
|
63
|
+
attr_reader :truncate
|
64
|
+
|
65
|
+
# The keep_alive attribute reader returns the keep-alive duration associated with the object.
|
66
|
+
#
|
67
|
+
# @return [ String, nil ] duration to keep the model loaded in memory
|
68
|
+
attr_reader :keep_alive
|
69
|
+
|
70
|
+
# The stream attribute reader returns the streaming behavior setting
|
71
|
+
# associated with the object.
|
72
|
+
#
|
73
|
+
# @return [ FalseClass ] the streaming behavior flag, indicating whether
|
74
|
+
# streaming is enabled for the command execution (always false for embed commands)
|
75
|
+
attr_reader :stream
|
76
|
+
|
77
|
+
|
78
|
+
# The client attribute writer allows setting the client instance associated
|
79
|
+
# with the object.
|
80
|
+
#
|
81
|
+
# This method assigns the client that will be used to perform requests and
|
82
|
+
# handle responses for this command. It is typically called internally when a
|
83
|
+
# command is executed through a client instance.
|
84
|
+
#
|
85
|
+
# @attr_writer [ Ollama::Client ] the assigned client instance
|
16
86
|
attr_writer :client
|
17
87
|
|
88
|
+
# The perform method executes a command request using the specified handler.
|
89
|
+
#
|
90
|
+
# This method initiates a POST request to the Ollama API's embed endpoint,
|
91
|
+
# utilizing the client instance to send the request and process responses
|
92
|
+
# through the provided handler. It handles non-streaming scenarios since
|
93
|
+
# embedding commands do not support streaming.
|
94
|
+
#
|
95
|
+
# @param handler [ Ollama::Handler ] the handler object responsible for processing API
|
96
|
+
# responses
|
97
|
+
#
|
98
|
+
# @return [ self ] returns the current instance after initiating the request
|
18
99
|
def perform(handler)
|
19
100
|
@client.request(method: :post, path: self.class.path, body: to_json, stream:, handler:)
|
20
101
|
end
|
@@ -1,19 +1,90 @@
|
|
1
|
+
# A command class that represents the embeddings API endpoint for Ollama.
|
2
|
+
#
|
3
|
+
# This class is used to interact with the Ollama API's embeddings endpoint, which
|
4
|
+
# generates embeddings for text input using a specified model. It inherits from
|
5
|
+
# the base command structure and provides the necessary functionality to execute
|
6
|
+
# embedding requests for generating vector representations of text.
|
7
|
+
#
|
8
|
+
# @example Generating embeddings for a prompt
|
9
|
+
# embeddings = ollama.embeddings(model: 'mxbai-embed-large', prompt: 'The sky is blue because of rayleigh scattering')
|
1
10
|
class Ollama::Commands::Embeddings
|
2
11
|
include Ollama::DTO
|
3
12
|
|
13
|
+
# The path method returns the API endpoint path for embeddings requests.
|
14
|
+
#
|
15
|
+
# This class method provides the specific URL path used to interact with the
|
16
|
+
# Ollama API's embeddings endpoint. It is utilized internally by the command
|
17
|
+
# structure to determine the correct API route for generating embeddings.
|
18
|
+
#
|
19
|
+
# @return [ String ] the API endpoint path '/api/embeddings' for embeddings requests
|
4
20
|
def self.path
|
5
21
|
'/api/embeddings'
|
6
22
|
end
|
7
23
|
|
24
|
+
# The initialize method sets up a new instance with streaming disabled.
|
25
|
+
#
|
26
|
+
# This method is responsible for initializing a new object instance and
|
27
|
+
# configuring it with parameters required for embedding operations. It sets
|
28
|
+
# up the model, prompt text, and optional parameters while explicitly
|
29
|
+
# disabling streaming since embedding operations are typically non-streaming.
|
30
|
+
#
|
31
|
+
# @param model [ String ] the name of the model to use for generating embeddings
|
32
|
+
# @param prompt [ String ] the text prompt to generate embeddings for
|
33
|
+
# @param options [ Ollama::Options, nil ] optional configuration parameters for the model
|
34
|
+
# @param keep_alive [ String, nil ] duration to keep the model loaded in memory
|
8
35
|
def initialize(model:, prompt:, options: nil, keep_alive: nil)
|
9
36
|
@model, @prompt, @options, @keep_alive, @stream =
|
10
37
|
model, prompt, options, keep_alive, false
|
11
38
|
end
|
12
39
|
|
13
|
-
|
40
|
+
# The model attribute reader returns the model name associated with the object.
|
41
|
+
#
|
42
|
+
# @return [ String ] the name of the model used by the command instance
|
43
|
+
attr_reader :model
|
14
44
|
|
45
|
+
# The prompt attribute reader returns the text prompt associated with the object.
|
46
|
+
#
|
47
|
+
# @return [ String ] the text prompt to generate embeddings for
|
48
|
+
attr_reader :prompt
|
49
|
+
|
50
|
+
# The options attribute reader returns the model configuration options associated with the object.
|
51
|
+
#
|
52
|
+
# @return [ Ollama::Options, nil ] optional configuration parameters for the model
|
53
|
+
attr_reader :options
|
54
|
+
|
55
|
+
# The keep_alive attribute reader returns the keep-alive duration associated with the object.
|
56
|
+
#
|
57
|
+
# @return [ String, nil ] duration to keep the model loaded in memory
|
58
|
+
attr_reader :keep_alive
|
59
|
+
|
60
|
+
# The stream attribute reader returns the streaming behavior setting
|
61
|
+
# associated with the object.
|
62
|
+
#
|
63
|
+
# @return [ FalseClass ] the streaming behavior flag, indicating whether
|
64
|
+
# streaming is enabled for the command execution (always false for embeddings commands)
|
65
|
+
attr_reader :stream
|
66
|
+
|
67
|
+
# The client attribute writer allows setting the client instance associated
|
68
|
+
# with the object.
|
69
|
+
#
|
70
|
+
# This method assigns the client that will be used to perform requests and
|
71
|
+
# handle responses for this command. It is typically called internally when a
|
72
|
+
# command is executed through a client instance.
|
73
|
+
#
|
74
|
+
# @attr_writer [ Ollama::Client ] the assigned client instance
|
15
75
|
attr_writer :client
|
16
76
|
|
77
|
+
# The perform method executes a command request using the specified handler.
|
78
|
+
#
|
79
|
+
# This method initiates a POST request to the Ollama API's embeddings endpoint,
|
80
|
+
# utilizing the client instance to send the request and process responses
|
81
|
+
# through the provided handler. It handles non-streaming scenarios since
|
82
|
+
# embeddings commands do not support streaming.
|
83
|
+
#
|
84
|
+
# @param handler [ Ollama::Handler ] the handler object responsible for processing API
|
85
|
+
# responses
|
86
|
+
#
|
87
|
+
# @return [ self ] returns the current instance after initiating the request
|
17
88
|
def perform(handler)
|
18
89
|
@client.request(method: :post, path: self.class.path, body: to_json, stream:, handler:)
|
19
90
|
end
|
@@ -1,20 +1,136 @@
|
|
1
|
+
# A command class that represents the generate API endpoint for Ollama.
|
2
|
+
#
|
3
|
+
# This class is used to interact with the Ollama API's generate endpoint, which
|
4
|
+
# generates text completions using a specified model. It inherits from the base
|
5
|
+
# command structure and provides the necessary functionality to execute
|
6
|
+
# generation requests for text completion tasks.
|
7
|
+
#
|
8
|
+
# @example Generating a text completion
|
9
|
+
# generate = ollama.generate(model: 'llama3.1', prompt: 'Why is the sky blue?')
|
10
|
+
#
|
11
|
+
# @example Generating with streaming enabled
|
12
|
+
# generate = ollama.generate(model: 'llama3.1', prompt: 'Why is the sky blue?', stream: true)
|
1
13
|
class Ollama::Commands::Generate
|
2
14
|
include Ollama::DTO
|
3
15
|
|
16
|
+
# The path method returns the API endpoint path for generate requests.
|
17
|
+
#
|
18
|
+
# This class method provides the specific URL path used to interact with the
|
19
|
+
# Ollama API's generate endpoint. It is utilized internally by the command
|
20
|
+
# structure to determine the correct API route for text generation operations.
|
21
|
+
#
|
22
|
+
# @return [ String ] the API endpoint path '/api/generate' for generate requests
|
4
23
|
def self.path
|
5
24
|
'/api/generate'
|
6
25
|
end
|
7
26
|
|
27
|
+
# The initialize method sets up a new instance with default streaming behavior.
|
28
|
+
#
|
29
|
+
# This method is responsible for initializing a Generate command object with
|
30
|
+
# all the necessary parameters for text generation. It handles optional
|
31
|
+
# parameters and ensures proper data types (e.g., converting images to arrays).
|
32
|
+
#
|
33
|
+
# @param model [ String ] the name of the model to use for generation
|
34
|
+
# @param prompt [ String ] the text prompt to generate completions for
|
35
|
+
# @param suffix [ String, nil ] optional suffix to append to the generated text
|
36
|
+
# @param images [ Ollama::Image, Array<Ollama::Image>, nil ] optional image(s) to include in the request
|
37
|
+
# @param format [ String, nil ] optional format specification for the response
|
38
|
+
# @param options [ Ollama::Options, nil ] optional configuration parameters for the model
|
39
|
+
# @param system [ String, nil ] optional system message to set context for generation
|
40
|
+
# @param template [ String, nil ] optional template to use for formatting the prompt
|
41
|
+
# @param context [ Array<Integer>, nil ] optional context vector for continuation
|
42
|
+
# @param stream [ Boolean, nil ] whether to stream responses (default: false)
|
43
|
+
# @param raw [ Boolean, nil ] whether to return raw output without formatting
|
44
|
+
# @param keep_alive [ String, nil ] duration to keep the model loaded in memory
|
45
|
+
# @param think [ Boolean, nil ] whether to enable thinking mode for generation
|
8
46
|
def initialize(model:, prompt:, suffix: nil, images: nil, format: nil, options: nil, system: nil, template: nil, context: nil, stream: nil, raw: nil, keep_alive: nil, think: nil)
|
9
47
|
@model, @prompt, @suffix, @images, @format, @options, @system, @template, @context, @stream, @raw, @keep_alive, @think =
|
10
48
|
model, prompt, suffix, (Array(images) if images), format, options, system, template, context, stream, raw, keep_alive, think
|
11
49
|
end
|
12
50
|
|
13
|
-
|
14
|
-
|
51
|
+
# The model attribute reader returns the model name associated with the generate command.
|
52
|
+
#
|
53
|
+
# @return [ String ] the name of the model used for generation
|
54
|
+
attr_reader :model
|
15
55
|
|
56
|
+
# The prompt attribute reader returns the text prompt used for generation.
|
57
|
+
#
|
58
|
+
# @return [ String ] the text prompt to generate completions for
|
59
|
+
attr_reader :prompt
|
60
|
+
|
61
|
+
# The suffix attribute reader returns any suffix that was appended to the generated text.
|
62
|
+
#
|
63
|
+
# @return [ String, nil ] optional suffix to append to the generated text
|
64
|
+
attr_reader :suffix
|
65
|
+
|
66
|
+
# The images attribute reader returns image objects associated with the generate command.
|
67
|
+
#
|
68
|
+
# @return [ Array<Ollama::Image>, nil ] array of image objects, or nil if none provided
|
69
|
+
attr_reader :images
|
70
|
+
|
71
|
+
# The format attribute reader returns the format specification for the response.
|
72
|
+
#
|
73
|
+
# @return [ String, nil ] optional format specification for the response
|
74
|
+
attr_reader :format
|
75
|
+
|
76
|
+
# The options attribute reader returns configuration parameters for the model.
|
77
|
+
#
|
78
|
+
# @return [ Ollama::Options, nil ] optional configuration parameters for the model
|
79
|
+
attr_reader :options
|
80
|
+
|
81
|
+
# The system attribute reader returns the system message that sets context for generation.
|
82
|
+
#
|
83
|
+
# @return [ String, nil ] optional system message to set context for generation
|
84
|
+
attr_reader :system
|
85
|
+
|
86
|
+
# The template attribute reader returns the template used for formatting the prompt.
|
87
|
+
#
|
88
|
+
# @return [ String, nil ] optional template to use for formatting the prompt
|
89
|
+
attr_reader :template
|
90
|
+
|
91
|
+
# The context attribute reader returns the context vector for continuation.
|
92
|
+
#
|
93
|
+
# @return [ Array<Integer>, nil ] optional context vector for continuation
|
94
|
+
attr_reader :context
|
95
|
+
|
96
|
+
# The stream attribute reader returns whether responses will be streamed.
|
97
|
+
#
|
98
|
+
# @return [ Boolean, nil ] whether to stream responses (default: false)
|
99
|
+
attr_reader :stream
|
100
|
+
|
101
|
+
# The raw attribute reader returns whether raw output without formatting should be returned.
|
102
|
+
#
|
103
|
+
# @return [ Boolean, nil ] whether to return raw output without formatting
|
104
|
+
attr_reader :raw
|
105
|
+
|
106
|
+
# The keep_alive attribute reader returns the duration to keep the model loaded in memory.
|
107
|
+
#
|
108
|
+
# @return [ String, nil ] duration to keep the model loaded in memory
|
109
|
+
attr_reader :keep_alive
|
110
|
+
|
111
|
+
# The think attribute reader returns whether thinking mode is enabled for generation.
|
112
|
+
#
|
113
|
+
# @return [ Boolean, nil ] whether to enable thinking mode for generation
|
114
|
+
attr_reader :think
|
115
|
+
|
116
|
+
# The client attribute writer allows setting the client instance associated
|
117
|
+
# with the object.
|
118
|
+
#
|
119
|
+
# This method assigns the client that will be used to perform requests and
|
120
|
+
# handle responses for this command. It is typically called internally when a
|
121
|
+
# command is executed through a client instance.
|
122
|
+
#
|
123
|
+
# @attr_writer [ Ollama::Client ] the assigned client instance
|
16
124
|
attr_writer :client
|
17
125
|
|
126
|
+
# The perform method executes the generate command using the specified handler.
|
127
|
+
#
|
128
|
+
# This method sends a POST request to the Ollama API's generate endpoint with
|
129
|
+
# the command parameters serialized as JSON. It delegates to the client's request
|
130
|
+
# method for actual HTTP communication.
|
131
|
+
#
|
132
|
+
# @param handler [ Ollama::Handler ] the handler to process responses from the API
|
133
|
+
# @return [ void ]
|
18
134
|
def perform(handler)
|
19
135
|
@client.request(method: :post, path: self.class.path, body: to_json, stream:, handler:)
|
20
136
|
end
|
data/lib/ollama/commands/ps.rb
CHANGED
@@ -1,18 +1,73 @@
|
|
1
|
+
# A command class that represents the ps API endpoint for Ollama.
|
2
|
+
#
|
3
|
+
# This class is used to interact with the Ollama API's ps endpoint, which
|
4
|
+
# retrieves information about running models. It inherits from the base command
|
5
|
+
# structure and provides the necessary functionality to execute ps requests
|
6
|
+
# for monitoring active model processes.
|
7
|
+
#
|
8
|
+
# @example Retrieving information about running models
|
9
|
+
# ps = ollama.ps
|
10
|
+
# ps.models # => array of running model information
|
1
11
|
class Ollama::Commands::Ps
|
12
|
+
# The path method returns the API endpoint path for ps requests.
|
13
|
+
#
|
14
|
+
# This class method provides the specific URL path used to interact with the
|
15
|
+
# Ollama API's ps endpoint. It is utilized internally by the command
|
16
|
+
# structure to determine the correct API route for retrieving information
|
17
|
+
# about running models.
|
18
|
+
#
|
19
|
+
# @return [ String ] the API endpoint path '/api/ps' for ps requests
|
2
20
|
def self.path
|
3
21
|
'/api/ps'
|
4
22
|
end
|
5
23
|
|
24
|
+
# The initialize method sets up a new instance with streaming disabled.
|
25
|
+
#
|
26
|
+
# This method is responsible for initializing a new object instance and
|
27
|
+
# configuring it with a default setting that disables streaming behavior.
|
28
|
+
# It is typically called during the object creation process to establish
|
29
|
+
# the initial state of the instance.
|
30
|
+
#
|
31
|
+
# @param parameters [ Hash ] a hash containing initialization parameters
|
32
|
+
# (must be empty for this command)
|
33
|
+
#
|
34
|
+
# @raise [ ArgumentError ] if any parameters are provided (ps endpoint
|
35
|
+
# does not accept parameters)
|
6
36
|
def initialize(**parameters)
|
7
37
|
parameters.empty? or raise ArgumentError,
|
8
38
|
"Invalid parameters: #{parameters.keys * ' '}"
|
9
39
|
@stream = false
|
10
40
|
end
|
11
41
|
|
42
|
+
# The stream attribute reader returns the streaming behavior setting
|
43
|
+
# associated with the object.
|
44
|
+
#
|
45
|
+
# @return [ TrueClass, FalseClass ] the streaming behavior flag, indicating
|
46
|
+
# whether streaming is enabled for the command execution
|
47
|
+
# (always false for ps commands)
|
12
48
|
attr_reader :stream
|
13
49
|
|
50
|
+
# The client attribute writer allows setting the client instance associated
|
51
|
+
# with the object.
|
52
|
+
#
|
53
|
+
# This method assigns the client that will be used to perform requests and
|
54
|
+
# handle responses for this command. It is typically called internally when a
|
55
|
+
# command is executed through a client instance.
|
56
|
+
#
|
57
|
+
# @attr_writer [ Ollama::Client ] the assigned client instance
|
14
58
|
attr_writer :client
|
15
59
|
|
60
|
+
# The perform method executes a command request using the specified handler.
|
61
|
+
#
|
62
|
+
# This method initiates a GET request to the Ollama API's ps endpoint,
|
63
|
+
# utilizing the client instance to send the request and process responses
|
64
|
+
# through the provided handler. It handles non-streaming scenarios since
|
65
|
+
# ps commands do not support streaming.
|
66
|
+
#
|
67
|
+
# @param handler [ Ollama::Handler ] the handler object responsible for processing API
|
68
|
+
# responses
|
69
|
+
#
|
70
|
+
# @return [ self ] returns the current instance after initiating the request
|
16
71
|
def perform(handler)
|
17
72
|
@client.request(method: :get, path: self.class.path, stream:, handler:)
|
18
73
|
end
|
data/lib/ollama/commands/pull.rb
CHANGED
@@ -1,18 +1,89 @@
|
|
1
|
+
# The command method creates a command method for the Ollama client
|
2
|
+
#
|
3
|
+
# Defines a new command method that corresponds to an Ollama API endpoint. The
|
4
|
+
# command method can be invoked with parameters and an optional handler to
|
5
|
+
# process responses. It determines which handler to use based on whether the
|
6
|
+
# command supports streaming and the presence of an explicit handler.
|
7
|
+
#
|
8
|
+
# @param name [ Symbol ] the name of the command to define
|
9
|
+
# @param default_handler [ Class ] the default handler class to use when no explicit handler is provided
|
10
|
+
# @param stream_handler [ Class, nil ] the handler class to use for streaming responses, if applicable
|
11
|
+
#
|
12
|
+
# @note Create Command `name`, if `stream` was true, set `stream_handler`
|
13
|
+
# as default, otherwise `default_handler`.
|
14
|
+
#
|
15
|
+
# @return [ self ] returns the receiver after defining the command method
|
1
16
|
class Ollama::Commands::Pull
|
2
17
|
include Ollama::DTO
|
3
18
|
|
19
|
+
# The path method returns the API endpoint path for pull requests.
|
20
|
+
#
|
21
|
+
# This class method provides the specific URL path used to interact with the
|
22
|
+
# Ollama API's pull endpoint. It is utilized internally by the command
|
23
|
+
# structure to determine the correct API route for downloading models from a
|
24
|
+
# remote registry.
|
25
|
+
#
|
26
|
+
# @return [ String ] the API endpoint path '/api/pull' for pull requests
|
4
27
|
def self.path
|
5
28
|
'/api/pull'
|
6
29
|
end
|
7
30
|
|
31
|
+
# The initialize method sets up a new instance with streaming enabled by default.
|
32
|
+
#
|
33
|
+
# This method is responsible for initializing a new object instance and
|
34
|
+
# configuring it with a default setting that enables streaming behavior.
|
35
|
+
# It is typically called during the object creation process to establish
|
36
|
+
# the initial state of the instance.
|
37
|
+
#
|
38
|
+
# @param model [ String ] the name of the model to be pushed
|
39
|
+
# @param insecure [ TrueClass, FalseClass, nil ] whether to allow insecure
|
40
|
+
# connections, or nil to use default
|
41
|
+
# @param stream [ TrueClass, FalseClass ] whether to enable streaming for
|
42
|
+
# the operation, defaults to true
|
8
43
|
def initialize(model:, insecure: nil, stream: true)
|
9
44
|
@model, @insecure, @stream = model, insecure, stream
|
10
45
|
end
|
11
46
|
|
12
|
-
|
47
|
+
# The model attribute reader returns the model name associated with the object.
|
48
|
+
#
|
49
|
+
# @return [ String ] the name of the model used by the command instance
|
50
|
+
attr_reader :model
|
13
51
|
|
52
|
+
# The insecure attribute reader returns the insecure connection setting
|
53
|
+
# associated with the object.
|
54
|
+
#
|
55
|
+
# @return [ TrueClass, FalseClass, nil ] the insecure flag indicating whether
|
56
|
+
# insecure connections are allowed, or nil if not set
|
57
|
+
attr_reader :insecure
|
58
|
+
|
59
|
+
# The stream attribute reader returns the streaming behavior setting
|
60
|
+
# associated with the object.
|
61
|
+
#
|
62
|
+
# @return [ TrueClass, FalseClass ] the streaming behavior flag, indicating
|
63
|
+
# whether streaming is enabled for the command execution
|
64
|
+
attr_reader :stream
|
65
|
+
|
66
|
+
# The client attribute writer allows setting the client instance associated
|
67
|
+
# with the object.
|
68
|
+
#
|
69
|
+
# This method assigns the client that will be used to perform requests and
|
70
|
+
# handle responses for this command. It is typically called internally when a
|
71
|
+
# command is executed through a client instance.
|
72
|
+
#
|
73
|
+
# @attr_writer [ Ollama::Client ] the assigned client instance
|
14
74
|
attr_writer :client
|
15
75
|
|
76
|
+
# The perform method executes a command request using the specified handler.
|
77
|
+
#
|
78
|
+
# This method initiates a request to the Ollama API endpoint associated with
|
79
|
+
# the command, utilizing the client instance to send the request and process
|
80
|
+
# responses through the provided handler. It handles both streaming and
|
81
|
+
# non-streaming scenarios based on the command's configuration.
|
82
|
+
#
|
83
|
+
# @param handler [ Ollama::Handler ] the handler object responsible for processing API
|
84
|
+
# responses
|
85
|
+
#
|
86
|
+
# @return [ self ] returns the current instance after initiating the request
|
16
87
|
def perform(handler)
|
17
88
|
@client.request(method: :post, path: self.class.path, body: to_json, stream:, handler:)
|
18
89
|
end
|
data/lib/ollama/commands/push.rb
CHANGED
@@ -1,18 +1,82 @@
|
|
1
|
+
# A command class that represents the push API endpoint for Ollama.
|
2
|
+
#
|
3
|
+
# This class is used to interact with the Ollama API's push endpoint, which
|
4
|
+
# uploads a model to a remote registry. It inherits from the base command
|
5
|
+
# structure and provides the necessary functionality to execute push requests
|
6
|
+
# for model deployment.
|
7
|
+
#
|
8
|
+
# @example Pushing a model to a remote registry
|
9
|
+
# push = ollama.push(model: 'user/llama3.1')
|
1
10
|
class Ollama::Commands::Push
|
2
11
|
include Ollama::DTO
|
3
12
|
|
13
|
+
# The path method returns the API endpoint path for push requests.
|
14
|
+
#
|
15
|
+
# This class method provides the specific URL path used to interact with the
|
16
|
+
# Ollama API's push endpoint. It is utilized internally by the command
|
17
|
+
# structure to determine the correct API route for uploading models to a
|
18
|
+
# remote registry.
|
19
|
+
#
|
20
|
+
# @return [ String ] the API endpoint path '/api/push' for push requests
|
4
21
|
def self.path
|
5
22
|
'/api/push'
|
6
23
|
end
|
7
24
|
|
25
|
+
# The initialize method sets up a new instance with streaming enabled by default.
|
26
|
+
#
|
27
|
+
# This method is responsible for initializing a new object instance and
|
28
|
+
# configuring it with a default setting that enables streaming behavior.
|
29
|
+
# It is typically called during the object creation process to establish
|
30
|
+
# the initial state of the instance.
|
31
|
+
#
|
32
|
+
# @param model [ String ] the name of the model to be pushed
|
33
|
+
# @param insecure [ TrueClass, FalseClass, nil ] whether to allow insecure
|
34
|
+
# connections, or nil to use default
|
35
|
+
# @param stream [ TrueClass, FalseClass ] whether to enable streaming for
|
36
|
+
# the operation, defaults to true
|
8
37
|
def initialize(model:, insecure: nil, stream: true)
|
9
38
|
@model, @insecure, @stream = model, insecure, stream
|
10
39
|
end
|
11
40
|
|
12
|
-
|
41
|
+
# The model attribute reader returns the model name associated with the object.
|
42
|
+
#
|
43
|
+
# @return [ String ] the name of the model to be pushed
|
44
|
+
attr_reader :model
|
13
45
|
|
46
|
+
# The insecure attribute reader returns the insecure connection setting
|
47
|
+
# associated with the object.
|
48
|
+
#
|
49
|
+
# @return [ TrueClass, FalseClass, nil ] the insecure flag indicating whether
|
50
|
+
# insecure connections are allowed, or nil if not set
|
51
|
+
attr_reader :insecure
|
52
|
+
|
53
|
+
# The stream attribute reader returns the streaming behavior setting
|
54
|
+
# associated with the object.
|
55
|
+
#
|
56
|
+
# @return [ TrueClass, FalseClass ] the streaming behavior flag, indicating
|
57
|
+
# whether streaming is enabled for the command execution
|
58
|
+
attr_reader :stream
|
59
|
+
|
60
|
+
# The client attribute writer allows setting the client instance associated
|
61
|
+
# with the object.
|
62
|
+
#
|
63
|
+
# This method assigns the client that will be used to perform requests and
|
64
|
+
# handle responses for this command. It is typically called internally when a
|
65
|
+
# command is executed through a client instance.
|
66
|
+
#
|
67
|
+
# @attr_writer [ Ollama::Client ] the assigned client instance
|
14
68
|
attr_writer :client
|
15
69
|
|
70
|
+
# The perform method executes a command request using the specified handler.
|
71
|
+
#
|
72
|
+
# This method initiates a request to the Ollama API endpoint associated with
|
73
|
+
# the command, utilizing the client instance to send the request and process
|
74
|
+
# responses through the provided handler. It handles both streaming and
|
75
|
+
# non-streaming scenarios based on the command's configuration.
|
76
|
+
#
|
77
|
+
# @param handler [ Ollama::Handler ] the handler object responsible for processing API responses
|
78
|
+
#
|
79
|
+
# @return [ self ] returns the current instance after initiating the request
|
16
80
|
def perform(handler)
|
17
81
|
@client.request(method: :post, path: self.class.path, body: to_json, stream:, handler:)
|
18
82
|
end
|
data/lib/ollama/commands/show.rb
CHANGED
@@ -1,19 +1,82 @@
|
|
1
|
+
# A command class that represents the show API endpoint for Ollama.
|
2
|
+
#
|
3
|
+
# This class is used to interact with the Ollama API's show endpoint, which
|
4
|
+
# retrieves detailed information about a specific model. It inherits from the
|
5
|
+
# base command structure and provides the necessary functionality to execute
|
6
|
+
# show requests for model details.
|
7
|
+
#
|
8
|
+
# @example Retrieving detailed information about a model
|
9
|
+
# show = ollama.show(model: 'llama3.1')
|
10
|
+
# show.model_info # => hash containing model details
|
1
11
|
class Ollama::Commands::Show
|
2
12
|
include Ollama::DTO
|
3
13
|
|
14
|
+
# The path method returns the API endpoint path for show requests.
|
15
|
+
#
|
16
|
+
# This class method provides the specific URL path used to interact with the
|
17
|
+
# Ollama API's show endpoint. It is utilized internally by the command
|
18
|
+
# structure to determine the correct API route for retrieving detailed
|
19
|
+
# information about a specific model.
|
20
|
+
#
|
21
|
+
# @return [ String ] the API endpoint path '/api/show' for show requests
|
4
22
|
def self.path
|
5
23
|
'/api/show'
|
6
24
|
end
|
7
25
|
|
26
|
+
# The initialize method sets up a new instance with streaming disabled.
|
27
|
+
#
|
28
|
+
# This method is responsible for initializing a new object instance and
|
29
|
+
# configuring it with a default setting that disables streaming behavior.
|
30
|
+
# It is typically called during the object creation process to establish
|
31
|
+
# the initial state of the instance.
|
32
|
+
#
|
33
|
+
# @param model [ String ] the name of the model to be used @param verbose [
|
34
|
+
# TrueClass, FalseClass, nil ] whether to enable verbose output, or nil to
|
35
|
+
# use default
|
8
36
|
def initialize(model:, verbose: nil)
|
9
37
|
@model, @verbose = model, verbose
|
10
38
|
@stream = false
|
11
39
|
end
|
12
40
|
|
13
|
-
|
41
|
+
# The model attribute reader returns the model name associated with the object.
|
42
|
+
#
|
43
|
+
# @return [ String ] the name of the model used by the command instance
|
44
|
+
attr_reader :model
|
14
45
|
|
46
|
+
# The verbose attribute reader returns the verbose setting associated with
|
47
|
+
# the object.
|
48
|
+
#
|
49
|
+
# @return [ TrueClass, FalseClass, nil ] the verbose flag indicating whether
|
50
|
+
# verbose output is enabled, or nil if not set
|
51
|
+
attr_reader :verbose
|
52
|
+
|
53
|
+
# The stream attribute reader returns the streaming behavior setting
|
54
|
+
# associated with the object.
|
55
|
+
#
|
56
|
+
# @return [ TrueClass, FalseClass ] the streaming behavior flag, indicating
|
57
|
+
# whether streaming is enabled for the command execution
|
58
|
+
attr_reader :stream
|
59
|
+
|
60
|
+
# The client attribute writer allows setting the client instance associated
|
61
|
+
# with the object.
|
62
|
+
#
|
63
|
+
# This method assigns the client that will be used to perform requests and
|
64
|
+
# handle responses for this command. It is typically called internally when a
|
65
|
+
# command is executed through a client instance.
|
66
|
+
#
|
67
|
+
# @attr_writer [ Ollama::Client ] the assigned client instance
|
15
68
|
attr_writer :client
|
16
69
|
|
70
|
+
# The perform method executes a command request using the specified handler.
|
71
|
+
#
|
72
|
+
# This method initiates a request to the Ollama API endpoint associated with
|
73
|
+
# the command, utilizing the client instance to send the request and process
|
74
|
+
# responses through the provided handler. It handles both streaming and
|
75
|
+
# non-streaming scenarios based on the command's configuration.
|
76
|
+
#
|
77
|
+
# @param handler [ Ollama::Handler ] the handler object responsible for processing API responses
|
78
|
+
#
|
79
|
+
# @return [ self ] returns the current instance after initiating the request
|
17
80
|
def perform(handler)
|
18
81
|
@client.request(method: :post, path: self.class.path, body: to_json, stream:, handler:)
|
19
82
|
end
|