ollama-ruby 1.4.0 → 1.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (83) hide show
  1. checksums.yaml +4 -4
  2. data/.contexts/code_comment.rb +25 -0
  3. data/.contexts/full.rb +43 -0
  4. data/.contexts/info.rb +17 -0
  5. data/.contexts/lib.rb +27 -0
  6. data/.contexts/yard.md +93 -0
  7. data/CHANGES.md +39 -0
  8. data/README.md +74 -23
  9. data/Rakefile +4 -3
  10. data/bin/ollama_cli +41 -9
  11. data/bin/ollama_console +18 -0
  12. data/lib/ollama/client/command.rb +29 -3
  13. data/lib/ollama/client/configuration/config.rb +114 -3
  14. data/lib/ollama/client/doc.rb +18 -0
  15. data/lib/ollama/client.rb +131 -2
  16. data/lib/ollama/commands/chat.rb +96 -1
  17. data/lib/ollama/commands/copy.rb +59 -1
  18. data/lib/ollama/commands/create.rb +112 -1
  19. data/lib/ollama/commands/delete.rb +53 -1
  20. data/lib/ollama/commands/embed.rb +82 -1
  21. data/lib/ollama/commands/embeddings.rb +72 -1
  22. data/lib/ollama/commands/generate.rb +118 -2
  23. data/lib/ollama/commands/ps.rb +55 -0
  24. data/lib/ollama/commands/pull.rb +72 -1
  25. data/lib/ollama/commands/push.rb +65 -1
  26. data/lib/ollama/commands/show.rb +64 -1
  27. data/lib/ollama/commands/tags.rb +50 -0
  28. data/lib/ollama/commands/version.rb +50 -1
  29. data/lib/ollama/dto.rb +98 -1
  30. data/lib/ollama/errors.rb +50 -0
  31. data/lib/ollama/handlers/collector.rb +34 -0
  32. data/lib/ollama/handlers/concern.rb +60 -2
  33. data/lib/ollama/handlers/dump_json.rb +20 -0
  34. data/lib/ollama/handlers/dump_yaml.rb +22 -0
  35. data/lib/ollama/handlers/markdown.rb +28 -0
  36. data/lib/ollama/handlers/nop.rb +20 -0
  37. data/lib/ollama/handlers/print.rb +27 -0
  38. data/lib/ollama/handlers/progress.rb +38 -0
  39. data/lib/ollama/handlers/say.rb +66 -0
  40. data/lib/ollama/handlers/single.rb +35 -0
  41. data/lib/ollama/handlers.rb +9 -0
  42. data/lib/ollama/image.rb +67 -0
  43. data/lib/ollama/json_loader.rb +17 -0
  44. data/lib/ollama/message.rb +46 -1
  45. data/lib/ollama/options.rb +27 -2
  46. data/lib/ollama/response.rb +17 -0
  47. data/lib/ollama/tool/function/parameters/property.rb +41 -1
  48. data/lib/ollama/tool/function/parameters.rb +40 -1
  49. data/lib/ollama/tool/function.rb +44 -1
  50. data/lib/ollama/tool.rb +37 -1
  51. data/lib/ollama/version.rb +1 -1
  52. data/lib/ollama.rb +26 -0
  53. data/ollama-ruby.gemspec +7 -6
  54. data/spec/ollama/client/doc_spec.rb +1 -1
  55. data/spec/ollama/client_spec.rb +19 -1
  56. data/spec/ollama/commands/chat_spec.rb +1 -1
  57. data/spec/ollama/commands/copy_spec.rb +1 -1
  58. data/spec/ollama/commands/create_spec.rb +1 -1
  59. data/spec/ollama/commands/delete_spec.rb +1 -1
  60. data/spec/ollama/commands/embed_spec.rb +1 -1
  61. data/spec/ollama/commands/embeddings_spec.rb +1 -1
  62. data/spec/ollama/commands/generate_spec.rb +1 -1
  63. data/spec/ollama/commands/ps_spec.rb +1 -1
  64. data/spec/ollama/commands/pull_spec.rb +1 -1
  65. data/spec/ollama/commands/push_spec.rb +1 -1
  66. data/spec/ollama/commands/show_spec.rb +1 -1
  67. data/spec/ollama/commands/tags_spec.rb +1 -1
  68. data/spec/ollama/commands/version_spec.rb +1 -1
  69. data/spec/ollama/handlers/collector_spec.rb +1 -1
  70. data/spec/ollama/handlers/dump_json_spec.rb +1 -1
  71. data/spec/ollama/handlers/dump_yaml_spec.rb +1 -1
  72. data/spec/ollama/handlers/markdown_spec.rb +1 -1
  73. data/spec/ollama/handlers/nop_spec.rb +2 -2
  74. data/spec/ollama/handlers/print_spec.rb +1 -1
  75. data/spec/ollama/handlers/progress_spec.rb +1 -1
  76. data/spec/ollama/handlers/say_spec.rb +1 -1
  77. data/spec/ollama/handlers/single_spec.rb +1 -1
  78. data/spec/ollama/image_spec.rb +1 -1
  79. data/spec/ollama/message_spec.rb +1 -1
  80. data/spec/ollama/options_spec.rb +1 -1
  81. data/spec/ollama/tool_spec.rb +1 -1
  82. data/spec/spec_helper.rb +2 -6
  83. metadata +25 -5
@@ -1,20 +1,131 @@
1
+ # A command class that represents the create API endpoint for Ollama.
2
+ #
3
+ # This class is used to interact with the Ollama API's create endpoint, which
4
+ # creates a new model based on a modelfile or existing model. It inherits from
5
+ # the base command structure and provides the necessary functionality to execute
6
+ # model creation requests.
7
+ #
8
+ # @example Creating a new model from an existing model
9
+ # create = ollama.create(model: 'llama3.1-wopr', from: 'llama3.1', system: 'You are WOPR from WarGames')
10
+ #
11
+ # @example Creating a model with files and parameters
12
+ # create = ollama.create(
13
+ # model: 'my-model',
14
+ # from: 'llama3.1',
15
+ # files: { 'modelfile' => 'FROM llama3.1\nPARAMETER temperature 0.7' },
16
+ # parameters: Ollama::Options.new(temperature: 0.7, num_ctx: 8192)
17
+ # )
1
18
  class Ollama::Commands::Create
2
19
  include Ollama::DTO
3
20
 
21
+ # The path method returns the API endpoint path for create requests.
22
+ #
23
+ # This class method provides the specific URL path used to interact with the
24
+ # Ollama API's create endpoint. It is utilized internally by the command
25
+ # structure to determine the correct API route for creating new models.
26
+ #
27
+ # @return [ String ] the API endpoint path '/api/create' for create requests
4
28
  def self.path
5
29
  '/api/create'
6
30
  end
7
31
 
32
+ # The initialize method sets up a new instance with streaming enabled by default.
33
+ #
34
+ # This method is responsible for initializing a new object instance and
35
+ # configuring it with parameters required for model creation. It sets up the
36
+ # model name, source model (if any), files, adapters, template, license,
37
+ # system prompt, parameters, messages, and streaming behavior.
38
+ #
39
+ # @param model [ String ] the name of the new model to be created
40
+ # @param from [ String, nil ] the base model to create from (e.g., 'llama3.1')
41
+ # @param files [ Hash, nil ] file contents for the modelfile and other files
42
+ # @param adapters [ Hash, nil ] adapter files to use for quantization
43
+ # @param template [ String, nil ] the template to use for the model
44
+ # @param license [ String, Array<String>, nil ] the license(s) for the model
45
+ # @param system [ String, nil ] the system prompt to use for the model
46
+ # @param parameters [ Ollama::Options, nil ] configuration parameters for the model
47
+ # @param messages [ Array<Ollama::Message>, nil ] initial conversation messages
48
+ # @param stream [ TrueClass, FalseClass ] whether to enable streaming for the operation, defaults to true
49
+ # @param quantize [ String, nil ] quantization method to use (e.g., 'Q4_0')
8
50
  def initialize(model:, from: nil, files: nil, adapters: nil, template: nil, license: nil, system: nil, parameters: nil, messages: nil, stream: true, quantize: nil)
9
51
  @model, @from, @files, @adapters, @license, @system, @parameters, @messages, @stream, @quantize =
10
52
  model, from, as_hash(files), as_hash(adapters), as_array(license), system,
11
53
  as_hash(parameters), as_array_of_hashes(messages), stream, quantize
12
54
  end
13
55
 
14
- attr_reader :model, :from, :files, :adapters, :license, :system, :parameters, :messages, :stream, :quantize
56
+ # The model attribute reader returns the model name associated with the object.
57
+ #
58
+ # @return [ String ] the name of the new model to be created
59
+ attr_reader :model
15
60
 
61
+ # The from attribute reader returns the base model name associated with the object.
62
+ #
63
+ # @return [ String, nil ] the base model to create from (e.g., 'llama3.1')
64
+ attr_reader :from
65
+
66
+ # The files attribute reader returns the file contents associated with the object.
67
+ #
68
+ # @return [ Hash, nil ] file contents for the modelfile and other files
69
+ attr_reader :files
70
+
71
+ # The adapters attribute reader returns the adapter files associated with the object.
72
+ #
73
+ # @return [ Hash, nil ] adapter files to use for quantization
74
+ attr_reader :adapters
75
+
76
+ # The license attribute reader returns the license(s) associated with the object.
77
+ #
78
+ # @return [ String, Array<String>, nil ] the license(s) for the model
79
+ attr_reader :license
80
+
81
+ # The system attribute reader returns the system prompt associated with the object.
82
+ #
83
+ # @return [ String, nil ] the system prompt to use for the model
84
+ attr_reader :system
85
+
86
+ # The parameters attribute reader returns the model configuration parameters associated with the object.
87
+ #
88
+ # @return [ Ollama::Options, nil ] configuration parameters for the model
89
+ attr_reader :parameters
90
+
91
+ # The messages attribute reader returns the initial conversation messages associated with the object.
92
+ #
93
+ # @return [ Array<Ollama::Message>, nil ] initial conversation messages
94
+ attr_reader :messages
95
+
96
+ # The stream attribute reader returns the streaming behavior setting
97
+ # associated with the object.
98
+ #
99
+ # @return [ TrueClass, FalseClass ] the streaming behavior flag, indicating whether
100
+ # streaming is enabled for the command execution (defaults to true for create commands)
101
+ attr_reader :stream
102
+
103
+ # The quantize attribute reader returns the quantization method associated with the object.
104
+ #
105
+ # @return [ String, nil ] quantization method to use (e.g., 'Q4_0')
106
+ attr_reader :quantize
107
+
108
+ # The client attribute writer allows setting the client instance associated
109
+ # with the object.
110
+ #
111
+ # This method assigns the client that will be used to perform requests and
112
+ # handle responses for this command. It is typically called internally when a
113
+ # command is executed through a client instance.
114
+ #
115
+ # @attr_writer [ Ollama::Client ] the assigned client instance
16
116
  attr_writer :client
17
117
 
118
+ # The perform method executes a command request using the specified handler.
119
+ #
120
+ # This method initiates a POST request to the Ollama API's create endpoint,
121
+ # utilizing the client instance to send the request and process responses
122
+ # through the provided handler. It handles both streaming and non-streaming
123
+ # scenarios based on the command's configuration.
124
+ #
125
+ # @param handler [ Ollama::Handler ] the handler object responsible for processing API
126
+ # responses
127
+ #
128
+ # @return [ self ] returns the current instance after initiating the request
18
129
  def perform(handler)
19
130
  @client.request(method: :post, path: self.class.path, body: to_json, stream:, handler:)
20
131
  end
@@ -1,18 +1,70 @@
1
+ # A command class that represents the delete API endpoint for Ollama.
2
+ #
3
+ # This class is used to interact with the Ollama API's delete endpoint, which
4
+ # removes a specified model from the local system. It inherits from the base
5
+ # command structure and provides the necessary functionality to execute delete
6
+ # requests for model removal.
7
+ #
8
+ # @example Deleting a local model
9
+ # delete = ollama.delete(model: 'user/llama3.1')
1
10
  class Ollama::Commands::Delete
2
11
  include Ollama::DTO
3
12
 
13
+ # The path method returns the API endpoint path for delete requests.
14
+ #
15
+ # This class method provides the specific URL path used to interact with the
16
+ # Ollama API's delete endpoint. It is utilized internally by the command
17
+ # structure to determine the correct API route for removing models from local storage.
18
+ #
19
+ # @return [ String ] the API endpoint path '/api/delete' for delete requests
4
20
  def self.path
5
21
  '/api/delete'
6
22
  end
7
23
 
24
+ # The initialize method sets up a new instance with streaming disabled.
25
+ #
26
+ # This method is responsible for initializing a new object instance and
27
+ # configuring it with the model name to be deleted. It explicitly disables
28
+ # streaming since delete operations are typically non-streaming.
29
+ #
30
+ # @param model [ String ] the name of the model to be deleted
8
31
  def initialize(model:)
9
32
  @model, @stream = model, false
10
33
  end
11
34
 
12
- attr_reader :model, :stream
35
+ # The model attribute reader returns the model name associated with the object.
36
+ #
37
+ # @return [ String ] the name of the model to be deleted
38
+ attr_reader :model
13
39
 
40
+ # The stream attribute reader returns the streaming behavior setting
41
+ # associated with the object.
42
+ #
43
+ # @return [ FalseClass ] the streaming behavior flag, indicating whether
44
+ # streaming is enabled for the command execution (always false for delete commands)
45
+ attr_reader :stream
46
+
47
+ # The client attribute writer allows setting the client instance associated
48
+ # with the object.
49
+ #
50
+ # This method assigns the client that will be used to perform requests and
51
+ # handle responses for this command. It is typically called internally when a
52
+ # command is executed through a client instance.
53
+ #
54
+ # @attr_writer [ Ollama::Client ] the assigned client instance
14
55
  attr_writer :client
15
56
 
57
+ # The perform method executes a command request using the specified handler.
58
+ #
59
+ # This method initiates a DELETE request to the Ollama API's delete endpoint,
60
+ # utilizing the client instance to send the request and process responses
61
+ # through the provided handler. It handles non-streaming scenarios since
62
+ # delete commands do not support streaming.
63
+ #
64
+ # @param handler [ Ollama::Handler ] the handler object responsible for processing API
65
+ # responses
66
+ #
67
+ # @return [ self ] returns the current instance after initiating the request
16
68
  def perform(handler)
17
69
  @client.request(method: :delete, path: self.class.path, body: to_json, stream:, handler:)
18
70
  end
@@ -1,20 +1,101 @@
1
+ # A command class that represents the embed API endpoint for Ollama.
2
+ #
3
+ # This class is used to interact with the Ollama API's embed endpoint, which
4
+ # generates embeddings for text input using a specified model. It inherits from
5
+ # the base command structure and provides the necessary functionality to execute
6
+ # embedding requests for generating vector representations of text.
7
+ #
8
+ # @example Generating embeddings for a single text
9
+ # embed = ollama.embed(model: 'all-minilm', input: 'Why is the sky blue?')
10
+ #
11
+ # @example Generating embeddings for multiple texts
12
+ # embed = ollama.embed(model: 'all-minilm', input: ['Why is the sky blue?', 'Why is the grass green?'])
1
13
  class Ollama::Commands::Embed
2
14
  include Ollama::DTO
3
15
 
16
+ # The path method returns the API endpoint path for embed requests.
17
+ #
18
+ # This class method provides the specific URL path used to interact with the
19
+ # Ollama API's embed endpoint. It is utilized internally by the command
20
+ # structure to determine the correct API route for generating embeddings.
21
+ #
22
+ # @return [ String ] the API endpoint path '/api/embed' for embed requests
4
23
  def self.path
5
24
  '/api/embed'
6
25
  end
7
26
 
27
+ # The initialize method sets up a new instance with streaming disabled.
28
+ #
29
+ # This method is responsible for initializing a new object instance and
30
+ # configuring it with parameters required for embedding operations. It sets
31
+ # up the model, input text(s), and optional parameters while explicitly
32
+ # disabling streaming since embedding operations are typically non-streaming.
33
+ #
34
+ # @param model [ String ] the name of the model to use for generating embeddings
35
+ # @param input [ String, Array<String> ] the text input(s) to generate embeddings for
36
+ # @param options [ Ollama::Options, nil ] optional configuration parameters for the model
37
+ # @param truncate [ Boolean, nil ] whether to truncate the input if it exceeds context length
38
+ # @param keep_alive [ String, nil ] duration to keep the model loaded in memory
8
39
  def initialize(model:, input:, options: nil, truncate: nil, keep_alive: nil)
9
40
  @model, @input, @options, @truncate, @keep_alive =
10
41
  model, input, options, truncate, keep_alive
11
42
  @stream = false
12
43
  end
13
44
 
14
- attr_reader :model, :input, :options, :truncate, :keep_alive, :stream
45
+ # The model attribute reader returns the model name associated with the object.
46
+ #
47
+ # @return [ String ] the name of the model used by the command instance
48
+ attr_reader :model
15
49
 
50
+ # The input attribute reader returns the text input(s) associated with the object.
51
+ #
52
+ # @return [ String, Array<String> ] the text input(s) to generate embeddings for
53
+ attr_reader :input
54
+
55
+ # The options attribute reader returns the model configuration options associated with the object.
56
+ #
57
+ # @return [ Ollama::Options, nil ] optional configuration parameters for the model
58
+ attr_reader :options
59
+
60
+ # The truncate attribute reader returns the truncate setting associated with the object.
61
+ #
62
+ # @return [ Boolean, nil ] whether to truncate the input if it exceeds context length
63
+ attr_reader :truncate
64
+
65
+ # The keep_alive attribute reader returns the keep-alive duration associated with the object.
66
+ #
67
+ # @return [ String, nil ] duration to keep the model loaded in memory
68
+ attr_reader :keep_alive
69
+
70
+ # The stream attribute reader returns the streaming behavior setting
71
+ # associated with the object.
72
+ #
73
+ # @return [ FalseClass ] the streaming behavior flag, indicating whether
74
+ # streaming is enabled for the command execution (always false for embed commands)
75
+ attr_reader :stream
76
+
77
+
78
+ # The client attribute writer allows setting the client instance associated
79
+ # with the object.
80
+ #
81
+ # This method assigns the client that will be used to perform requests and
82
+ # handle responses for this command. It is typically called internally when a
83
+ # command is executed through a client instance.
84
+ #
85
+ # @attr_writer [ Ollama::Client ] the assigned client instance
16
86
  attr_writer :client
17
87
 
88
+ # The perform method executes a command request using the specified handler.
89
+ #
90
+ # This method initiates a POST request to the Ollama API's embed endpoint,
91
+ # utilizing the client instance to send the request and process responses
92
+ # through the provided handler. It handles non-streaming scenarios since
93
+ # embedding commands do not support streaming.
94
+ #
95
+ # @param handler [ Ollama::Handler ] the handler object responsible for processing API
96
+ # responses
97
+ #
98
+ # @return [ self ] returns the current instance after initiating the request
18
99
  def perform(handler)
19
100
  @client.request(method: :post, path: self.class.path, body: to_json, stream:, handler:)
20
101
  end
@@ -1,19 +1,90 @@
1
+ # A command class that represents the embeddings API endpoint for Ollama.
2
+ #
3
+ # This class is used to interact with the Ollama API's embeddings endpoint, which
4
+ # generates embeddings for text input using a specified model. It inherits from
5
+ # the base command structure and provides the necessary functionality to execute
6
+ # embedding requests for generating vector representations of text.
7
+ #
8
+ # @example Generating embeddings for a prompt
9
+ # embeddings = ollama.embeddings(model: 'mxbai-embed-large', prompt: 'The sky is blue because of rayleigh scattering')
1
10
  class Ollama::Commands::Embeddings
2
11
  include Ollama::DTO
3
12
 
13
+ # The path method returns the API endpoint path for embeddings requests.
14
+ #
15
+ # This class method provides the specific URL path used to interact with the
16
+ # Ollama API's embeddings endpoint. It is utilized internally by the command
17
+ # structure to determine the correct API route for generating embeddings.
18
+ #
19
+ # @return [ String ] the API endpoint path '/api/embeddings' for embeddings requests
4
20
  def self.path
5
21
  '/api/embeddings'
6
22
  end
7
23
 
24
+ # The initialize method sets up a new instance with streaming disabled.
25
+ #
26
+ # This method is responsible for initializing a new object instance and
27
+ # configuring it with parameters required for embedding operations. It sets
28
+ # up the model, prompt text, and optional parameters while explicitly
29
+ # disabling streaming since embedding operations are typically non-streaming.
30
+ #
31
+ # @param model [ String ] the name of the model to use for generating embeddings
32
+ # @param prompt [ String ] the text prompt to generate embeddings for
33
+ # @param options [ Ollama::Options, nil ] optional configuration parameters for the model
34
+ # @param keep_alive [ String, nil ] duration to keep the model loaded in memory
8
35
  def initialize(model:, prompt:, options: nil, keep_alive: nil)
9
36
  @model, @prompt, @options, @keep_alive, @stream =
10
37
  model, prompt, options, keep_alive, false
11
38
  end
12
39
 
13
- attr_reader :model, :prompt, :options, :keep_alive, :stream
40
+ # The model attribute reader returns the model name associated with the object.
41
+ #
42
+ # @return [ String ] the name of the model used by the command instance
43
+ attr_reader :model
14
44
 
45
+ # The prompt attribute reader returns the text prompt associated with the object.
46
+ #
47
+ # @return [ String ] the text prompt to generate embeddings for
48
+ attr_reader :prompt
49
+
50
+ # The options attribute reader returns the model configuration options associated with the object.
51
+ #
52
+ # @return [ Ollama::Options, nil ] optional configuration parameters for the model
53
+ attr_reader :options
54
+
55
+ # The keep_alive attribute reader returns the keep-alive duration associated with the object.
56
+ #
57
+ # @return [ String, nil ] duration to keep the model loaded in memory
58
+ attr_reader :keep_alive
59
+
60
+ # The stream attribute reader returns the streaming behavior setting
61
+ # associated with the object.
62
+ #
63
+ # @return [ FalseClass ] the streaming behavior flag, indicating whether
64
+ # streaming is enabled for the command execution (always false for embeddings commands)
65
+ attr_reader :stream
66
+
67
+ # The client attribute writer allows setting the client instance associated
68
+ # with the object.
69
+ #
70
+ # This method assigns the client that will be used to perform requests and
71
+ # handle responses for this command. It is typically called internally when a
72
+ # command is executed through a client instance.
73
+ #
74
+ # @attr_writer [ Ollama::Client ] the assigned client instance
15
75
  attr_writer :client
16
76
 
77
+ # The perform method executes a command request using the specified handler.
78
+ #
79
+ # This method initiates a POST request to the Ollama API's embeddings endpoint,
80
+ # utilizing the client instance to send the request and process responses
81
+ # through the provided handler. It handles non-streaming scenarios since
82
+ # embeddings commands do not support streaming.
83
+ #
84
+ # @param handler [ Ollama::Handler ] the handler object responsible for processing API
85
+ # responses
86
+ #
87
+ # @return [ self ] returns the current instance after initiating the request
17
88
  def perform(handler)
18
89
  @client.request(method: :post, path: self.class.path, body: to_json, stream:, handler:)
19
90
  end
@@ -1,20 +1,136 @@
1
+ # A command class that represents the generate API endpoint for Ollama.
2
+ #
3
+ # This class is used to interact with the Ollama API's generate endpoint, which
4
+ # generates text completions using a specified model. It inherits from the base
5
+ # command structure and provides the necessary functionality to execute
6
+ # generation requests for text completion tasks.
7
+ #
8
+ # @example Generating a text completion
9
+ # generate = ollama.generate(model: 'llama3.1', prompt: 'Why is the sky blue?')
10
+ #
11
+ # @example Generating with streaming enabled
12
+ # generate = ollama.generate(model: 'llama3.1', prompt: 'Why is the sky blue?', stream: true)
1
13
  class Ollama::Commands::Generate
2
14
  include Ollama::DTO
3
15
 
16
+ # The path method returns the API endpoint path for generate requests.
17
+ #
18
+ # This class method provides the specific URL path used to interact with the
19
+ # Ollama API's generate endpoint. It is utilized internally by the command
20
+ # structure to determine the correct API route for text generation operations.
21
+ #
22
+ # @return [ String ] the API endpoint path '/api/generate' for generate requests
4
23
  def self.path
5
24
  '/api/generate'
6
25
  end
7
26
 
27
+ # The initialize method sets up a new instance with default streaming behavior.
28
+ #
29
+ # This method is responsible for initializing a Generate command object with
30
+ # all the necessary parameters for text generation. It handles optional
31
+ # parameters and ensures proper data types (e.g., converting images to arrays).
32
+ #
33
+ # @param model [ String ] the name of the model to use for generation
34
+ # @param prompt [ String ] the text prompt to generate completions for
35
+ # @param suffix [ String, nil ] optional suffix to append to the generated text
36
+ # @param images [ Ollama::Image, Array<Ollama::Image>, nil ] optional image(s) to include in the request
37
+ # @param format [ String, nil ] optional format specification for the response
38
+ # @param options [ Ollama::Options, nil ] optional configuration parameters for the model
39
+ # @param system [ String, nil ] optional system message to set context for generation
40
+ # @param template [ String, nil ] optional template to use for formatting the prompt
41
+ # @param context [ Array<Integer>, nil ] optional context vector for continuation
42
+ # @param stream [ Boolean, nil ] whether to stream responses (default: false)
43
+ # @param raw [ Boolean, nil ] whether to return raw output without formatting
44
+ # @param keep_alive [ String, nil ] duration to keep the model loaded in memory
45
+ # @param think [ Boolean, nil ] whether to enable thinking mode for generation
8
46
  def initialize(model:, prompt:, suffix: nil, images: nil, format: nil, options: nil, system: nil, template: nil, context: nil, stream: nil, raw: nil, keep_alive: nil, think: nil)
9
47
  @model, @prompt, @suffix, @images, @format, @options, @system, @template, @context, @stream, @raw, @keep_alive, @think =
10
48
  model, prompt, suffix, (Array(images) if images), format, options, system, template, context, stream, raw, keep_alive, think
11
49
  end
12
50
 
13
- attr_reader :model, :prompt, :suffix, :images, :format, :options, :system,
14
- :template, :context, :stream, :raw, :keep_alive, :think
51
+ # The model attribute reader returns the model name associated with the generate command.
52
+ #
53
+ # @return [ String ] the name of the model used for generation
54
+ attr_reader :model
15
55
 
56
+ # The prompt attribute reader returns the text prompt used for generation.
57
+ #
58
+ # @return [ String ] the text prompt to generate completions for
59
+ attr_reader :prompt
60
+
61
+ # The suffix attribute reader returns any suffix that was appended to the generated text.
62
+ #
63
+ # @return [ String, nil ] optional suffix to append to the generated text
64
+ attr_reader :suffix
65
+
66
+ # The images attribute reader returns image objects associated with the generate command.
67
+ #
68
+ # @return [ Array<Ollama::Image>, nil ] array of image objects, or nil if none provided
69
+ attr_reader :images
70
+
71
+ # The format attribute reader returns the format specification for the response.
72
+ #
73
+ # @return [ String, nil ] optional format specification for the response
74
+ attr_reader :format
75
+
76
+ # The options attribute reader returns configuration parameters for the model.
77
+ #
78
+ # @return [ Ollama::Options, nil ] optional configuration parameters for the model
79
+ attr_reader :options
80
+
81
+ # The system attribute reader returns the system message that sets context for generation.
82
+ #
83
+ # @return [ String, nil ] optional system message to set context for generation
84
+ attr_reader :system
85
+
86
+ # The template attribute reader returns the template used for formatting the prompt.
87
+ #
88
+ # @return [ String, nil ] optional template to use for formatting the prompt
89
+ attr_reader :template
90
+
91
+ # The context attribute reader returns the context vector for continuation.
92
+ #
93
+ # @return [ Array<Integer>, nil ] optional context vector for continuation
94
+ attr_reader :context
95
+
96
+ # The stream attribute reader returns whether responses will be streamed.
97
+ #
98
+ # @return [ Boolean, nil ] whether to stream responses (default: false)
99
+ attr_reader :stream
100
+
101
+ # The raw attribute reader returns whether raw output without formatting should be returned.
102
+ #
103
+ # @return [ Boolean, nil ] whether to return raw output without formatting
104
+ attr_reader :raw
105
+
106
+ # The keep_alive attribute reader returns the duration to keep the model loaded in memory.
107
+ #
108
+ # @return [ String, nil ] duration to keep the model loaded in memory
109
+ attr_reader :keep_alive
110
+
111
+ # The think attribute reader returns whether thinking mode is enabled for generation.
112
+ #
113
+ # @return [ Boolean, nil ] whether to enable thinking mode for generation
114
+ attr_reader :think
115
+
116
+ # The client attribute writer allows setting the client instance associated
117
+ # with the object.
118
+ #
119
+ # This method assigns the client that will be used to perform requests and
120
+ # handle responses for this command. It is typically called internally when a
121
+ # command is executed through a client instance.
122
+ #
123
+ # @attr_writer [ Ollama::Client ] the assigned client instance
16
124
  attr_writer :client
17
125
 
126
+ # The perform method executes the generate command using the specified handler.
127
+ #
128
+ # This method sends a POST request to the Ollama API's generate endpoint with
129
+ # the command parameters serialized as JSON. It delegates to the client's request
130
+ # method for actual HTTP communication.
131
+ #
132
+ # @param handler [ Ollama::Handler ] the handler to process responses from the API
133
+ # @return [ void ]
18
134
  def perform(handler)
19
135
  @client.request(method: :post, path: self.class.path, body: to_json, stream:, handler:)
20
136
  end
@@ -1,18 +1,73 @@
1
+ # A command class that represents the ps API endpoint for Ollama.
2
+ #
3
+ # This class is used to interact with the Ollama API's ps endpoint, which
4
+ # retrieves information about running models. It inherits from the base command
5
+ # structure and provides the necessary functionality to execute ps requests
6
+ # for monitoring active model processes.
7
+ #
8
+ # @example Retrieving information about running models
9
+ # ps = ollama.ps
10
+ # ps.models # => array of running model information
1
11
  class Ollama::Commands::Ps
12
+ # The path method returns the API endpoint path for ps requests.
13
+ #
14
+ # This class method provides the specific URL path used to interact with the
15
+ # Ollama API's ps endpoint. It is utilized internally by the command
16
+ # structure to determine the correct API route for retrieving information
17
+ # about running models.
18
+ #
19
+ # @return [ String ] the API endpoint path '/api/ps' for ps requests
2
20
  def self.path
3
21
  '/api/ps'
4
22
  end
5
23
 
24
+ # The initialize method sets up a new instance with streaming disabled.
25
+ #
26
+ # This method is responsible for initializing a new object instance and
27
+ # configuring it with a default setting that disables streaming behavior.
28
+ # It is typically called during the object creation process to establish
29
+ # the initial state of the instance.
30
+ #
31
+ # @param parameters [ Hash ] a hash containing initialization parameters
32
+ # (must be empty for this command)
33
+ #
34
+ # @raise [ ArgumentError ] if any parameters are provided (ps endpoint
35
+ # does not accept parameters)
6
36
  def initialize(**parameters)
7
37
  parameters.empty? or raise ArgumentError,
8
38
  "Invalid parameters: #{parameters.keys * ' '}"
9
39
  @stream = false
10
40
  end
11
41
 
42
+ # The stream attribute reader returns the streaming behavior setting
43
+ # associated with the object.
44
+ #
45
+ # @return [ TrueClass, FalseClass ] the streaming behavior flag, indicating
46
+ # whether streaming is enabled for the command execution
47
+ # (always false for ps commands)
12
48
  attr_reader :stream
13
49
 
50
+ # The client attribute writer allows setting the client instance associated
51
+ # with the object.
52
+ #
53
+ # This method assigns the client that will be used to perform requests and
54
+ # handle responses for this command. It is typically called internally when a
55
+ # command is executed through a client instance.
56
+ #
57
+ # @attr_writer [ Ollama::Client ] the assigned client instance
14
58
  attr_writer :client
15
59
 
60
+ # The perform method executes a command request using the specified handler.
61
+ #
62
+ # This method initiates a GET request to the Ollama API's ps endpoint,
63
+ # utilizing the client instance to send the request and process responses
64
+ # through the provided handler. It handles non-streaming scenarios since
65
+ # ps commands do not support streaming.
66
+ #
67
+ # @param handler [ Ollama::Handler ] the handler object responsible for processing API
68
+ # responses
69
+ #
70
+ # @return [ self ] returns the current instance after initiating the request
16
71
  def perform(handler)
17
72
  @client.request(method: :get, path: self.class.path, stream:, handler:)
18
73
  end