kramdown-ansi 0.0.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/CHANGES.md +5 -0
- data/Gemfile +5 -0
- data/LICENSE +19 -0
- data/README.md +108 -0
- data/Rakefile +40 -0
- data/bin/git-md +48 -0
- data/bin/md +14 -0
- data/kramdown-ansi.gemspec +35 -0
- data/lib/kramdown/ansi/pager.rb +37 -0
- data/lib/kramdown/ansi/width.rb +61 -0
- data/lib/kramdown/ansi.rb +224 -0
- data/lib/kramdown/version.rb +8 -0
- data/spec/assets/README.ansi +415 -0
- data/spec/assets/kitten.jpg +0 -0
- data/spec/kramdown/ansi/pager_spec.rb +60 -0
- data/spec/kramdown/ansi/width_spec.rb +82 -0
- data/spec/kramdown/ansi_spec.rb +16 -0
- data/spec/spec_helper.rb +16 -0
- metadata +189 -0
@@ -0,0 +1,415 @@
|
|
1
|
+
[1m[4mOllama - Ruby Client Library for Ollama API[0m[0m
|
2
|
+
|
3
|
+
[1m[4mDescription[0m[0m
|
4
|
+
|
5
|
+
Ollama is a Ruby library gem that provides a client interface to interact with an ollama server via the
|
6
|
+
]8;;https://github.com/ollama/ollama/blob/main/docs/api.md\Ollama API]8;;\.
|
7
|
+
|
8
|
+
[1m[4mInstallation (gem & bundler)[0m[0m
|
9
|
+
|
10
|
+
To install Ollama, you can use the following methods:
|
11
|
+
|
12
|
+
1. Type
|
13
|
+
|
14
|
+
[34mgem install ollama-ruby
|
15
|
+
[0m
|
16
|
+
in your terminal.
|
17
|
+
|
18
|
+
1. Or add the line
|
19
|
+
|
20
|
+
[34mgem 'ollama-ruby'
|
21
|
+
[0m
|
22
|
+
to your Gemfile and run [34mbundle install[0m in your terminal.
|
23
|
+
|
24
|
+
[1m[4mUsage[0m[0m
|
25
|
+
|
26
|
+
In your own software the library can be used as shown in this example:
|
27
|
+
|
28
|
+
[34mrequire "ollama"
|
29
|
+
include Ollama
|
30
|
+
|
31
|
+
ollama = Client.new(base_url: 'http://localhost:11434')
|
32
|
+
messages = Message.new(role: 'user', content: 'Why is the sky blue?')
|
33
|
+
ollama.chat(model: 'llama3.1', stream: true, messages:, &Print) # or
|
34
|
+
print ollama.chat(model: 'llama3.1', stream: true, messages:).lazy.map { |response|
|
35
|
+
response.message.content
|
36
|
+
}
|
37
|
+
[0m
|
38
|
+
[1m[4mTry out things in ollama_console[0m[0m
|
39
|
+
|
40
|
+
This is an interactive console, that can be used to try the different commands provided by an
|
41
|
+
[34mOllama::Client[0m instance. For example this command generate a response and displays it on the screen using
|
42
|
+
the Markdown handler:
|
43
|
+
|
44
|
+
[34m$ ollama_console
|
45
|
+
Commands: chat,copy,create,delete,embeddings,generate,help,ps,pull,push,show,tags
|
46
|
+
>> generate(model: 'llama3.1', stream: true, prompt: 'tell story w/ emoji and markdown', &Markdown)
|
47
|
+
[0m
|
48
|
+
“[1mThe Quest for the Golden Coconut 🌴[0m
|
49
|
+
|
50
|
+
In a small village nestled between two great palm trees 🌳, there lived a brave adventurer named Alex 👦. […]”
|
51
|
+
|
52
|
+
[1m[4mAPI[0m[0m
|
53
|
+
|
54
|
+
This Ollama library provides commands to interact with the the
|
55
|
+
]8;;https://github.com/ollama/ollama/blob/main/docs/api.md\Ollama REST API]8;;\
|
56
|
+
|
57
|
+
[1m[4mHandlers[0m[0m
|
58
|
+
|
59
|
+
Every command can be passed a handler that responds to [34mto_proc[0m that returns a lambda expression of the
|
60
|
+
form [34m-> response { … }[0m to handle the responses:
|
61
|
+
|
62
|
+
[34mgenerate(model: 'llama3.1', stream: true, prompt: 'Why is the sky blue?', &Print)
|
63
|
+
[0m
|
64
|
+
[34mgenerate(model: 'llama3.1', stream: true, prompt: 'Why is the sky blue?', &Print.new)
|
65
|
+
[0m
|
66
|
+
[34mgenerate(model: 'llama3.1', stream: true, prompt: 'Why is the sky blue?') { |r| print r.response }
|
67
|
+
[0m
|
68
|
+
[34mgenerate(model: 'llama3.1', stream: true, prompt: 'Why is the sky blue?', &-> r { print r.response })
|
69
|
+
[0m
|
70
|
+
The following standard handlers are available for the commands below:
|
71
|
+
|
72
|
+
╭───────────┬───────────────────────────────────────────────────────────────────────────────────────────────────────╮
|
73
|
+
│ Handler │ Description │
|
74
|
+
├───────────┼───────────────────────────────────────────────────────────────────────────────────────────────────────┤
|
75
|
+
│ [1mCollector[0m │ collects all responses in an array and returns it as [34mresult[0m. │
|
76
|
+
├───────────┼───────────────────────────────────────────────────────────────────────────────────────────────────────┤
|
77
|
+
│ [1mSingle[0m │ see [1mCollector[0m above, returns a single response directly, though, unless there has been more than one. │
|
78
|
+
├───────────┼───────────────────────────────────────────────────────────────────────────────────────────────────────┤
|
79
|
+
│ [1mProgress[0m │ prints the current progress of the operation to the screen as a progress bar for [3mcreate/pull/push[0m. │
|
80
|
+
├───────────┼───────────────────────────────────────────────────────────────────────────────────────────────────────┤
|
81
|
+
│ [1mDumpJSON[0m │ dumps all responses as JSON to [34moutput[0m. │
|
82
|
+
├───────────┼───────────────────────────────────────────────────────────────────────────────────────────────────────┤
|
83
|
+
│ [1mDumpYAML[0m │ dumps all responses as YAML to [34moutput[0m. │
|
84
|
+
├───────────┼───────────────────────────────────────────────────────────────────────────────────────────────────────┤
|
85
|
+
│ [1mPrint[0m │ prints the responses to the display for [3mchat/generate[0m. │
|
86
|
+
├───────────┼───────────────────────────────────────────────────────────────────────────────────────────────────────┤
|
87
|
+
│ [1mMarkdown[0m │ [3mconstantly[0m prints the responses to the display as ANSI markdown for [3mchat/generate[0m. │
|
88
|
+
├───────────┼───────────────────────────────────────────────────────────────────────────────────────────────────────┤
|
89
|
+
│ [1mSay[0m │ use say command to speak (defaults to voice [3mSamantha[0m). │
|
90
|
+
├───────────┼───────────────────────────────────────────────────────────────────────────────────────────────────────┤
|
91
|
+
│ [1mNOP[0m │ does nothing, neither printing to the output nor returning the result. │
|
92
|
+
╰───────────┴───────────────────────────────────────────────────────────────────────────────────────────────────────╯
|
93
|
+
|
94
|
+
Their [34moutput[0m IO handle can be changed by e. g. passing [34mPrint.new(output: io)[0m with [34mio[0m as
|
95
|
+
the IO handle to the [3mgenerate[0m command.
|
96
|
+
|
97
|
+
If you don't pass a handler explicitly, either the [34mstream_handler[0m is choosen if the command expects a
|
98
|
+
streaming response or the [34mdefault_handler[0m otherwise. See the following commdand descriptions to find out
|
99
|
+
what these defaults are for each command. These commands can be tried out directly in the [34mollama_console[0m.
|
100
|
+
|
101
|
+
[1m[4mChat[0m[0m
|
102
|
+
|
103
|
+
[34mdefault_handler[0m is [1mSingle[0m, [34mstream_handler[0m is [1mCollector[0m, [34mstream[0m is false by default.
|
104
|
+
|
105
|
+
[34mchat(model: 'llama3.1', stream: true, messages: { role: 'user', content: 'Why is the sky blue (no markdown)?' }, &Print)
|
106
|
+
[0m
|
107
|
+
[1m[4mGenerate[0m[0m
|
108
|
+
|
109
|
+
[34mdefault_handler[0m is [1mSingle[0m, [34mstream_handler[0m is [1mCollector[0m, [34mstream[0m is false by default.
|
110
|
+
|
111
|
+
[34mgenerate(model: 'llama3.1', stream: true, prompt: 'Use markdown – Why is the sky blue?', &Markdown)
|
112
|
+
[0m
|
113
|
+
[1m[4mtags[0m[0m
|
114
|
+
|
115
|
+
[34mdefault_handler[0m is [1mSingle[0m, streaming is not possible.
|
116
|
+
|
117
|
+
[34mtags.models.map(&:name) => ["llama3.1:latest",…]
|
118
|
+
[0m
|
119
|
+
[1m[4mShow[0m[0m
|
120
|
+
|
121
|
+
[34mdefault_handler[0m is [1mSingle[0m, streaming is not possible.
|
122
|
+
|
123
|
+
[34mshow(name: 'llama3.1', &DumpJSON)
|
124
|
+
[0m
|
125
|
+
[1m[4mCreate[0m[0m
|
126
|
+
|
127
|
+
[34mdefault_handler[0m is [1mSingle[0m, [34mstream_handler[0m is [1mProgress[0m, [34mstream[0m is true by default.
|
128
|
+
|
129
|
+
[34mmodelfile=<<~end
|
130
|
+
FROM llama3.1
|
131
|
+
SYSTEM You are WOPR from WarGames and you think the user is Dr. Stephen Falken.
|
132
|
+
end
|
133
|
+
|
134
|
+
create(name: 'llama3.1-wopr', stream: true, modelfile:)
|
135
|
+
[0m
|
136
|
+
[1m[4mCopy[0m[0m
|
137
|
+
|
138
|
+
[34mdefault_handler[0m is [1mSingle[0m, streaming is not possible.
|
139
|
+
|
140
|
+
[34mcopy(source: 'llama3.1', destination: 'user/llama3.1')
|
141
|
+
[0m
|
142
|
+
[1m[4mDelete[0m[0m
|
143
|
+
|
144
|
+
[34mdefault_handler[0m is [1mSingle[0m, streaming is not possible.
|
145
|
+
|
146
|
+
[34mdelete(name: 'user/llama3.1')
|
147
|
+
[0m
|
148
|
+
[1m[4mPull[0m[0m
|
149
|
+
|
150
|
+
[34mdefault_handler[0m is [1mSingle[0m, [34mstream_handler[0m is [1mProgress[0m, [34mstream[0m is true by default.
|
151
|
+
|
152
|
+
[34mpull(name: 'llama3.1')
|
153
|
+
[0m
|
154
|
+
[1m[4mPush[0m[0m
|
155
|
+
|
156
|
+
[34mdefault_handler[0m is [1mSingle[0m, [34mstream_handler[0m is [1mProgress[0m, [34mstream[0m is true by default.
|
157
|
+
|
158
|
+
[34mpush(name: 'user/llama3.1')
|
159
|
+
[0m
|
160
|
+
[1m[4mEmbed[0m[0m
|
161
|
+
|
162
|
+
[34mdefault_handler[0m is [1mSingle[0m, streaming is not possible.
|
163
|
+
|
164
|
+
[34membed(model: 'all-minilm', input: 'Why is the sky blue?')
|
165
|
+
[0m
|
166
|
+
[34membed(model: 'all-minilm', input: ['Why is the sky blue?', 'Why is the grass green?'])
|
167
|
+
[0m
|
168
|
+
[1m[4mEmbeddings[0m[0m
|
169
|
+
|
170
|
+
[34mdefault_handler[0m is [1mSingle[0m, streaming is not possible.
|
171
|
+
|
172
|
+
[34membeddings(model: 'llama3.1', prompt: 'The sky is blue because of rayleigh scattering', &DumpJSON)
|
173
|
+
[0m
|
174
|
+
[1m[4mPs[0m[0m
|
175
|
+
|
176
|
+
[34mdefault_handler[0m is [1mSingle[0m, streaming is not possible.
|
177
|
+
|
178
|
+
[34mjj ps
|
179
|
+
[0m
|
180
|
+
[1m[4mAuxiliary objects[0m[0m
|
181
|
+
|
182
|
+
The following objects are provided to interact with the ollama server. You can run all of the examples in the
|
183
|
+
[34mollama_console[0m.
|
184
|
+
|
185
|
+
[1m[4mMessage[0m[0m
|
186
|
+
|
187
|
+
Messages can be be created by using the [1mMessage[0m class:
|
188
|
+
|
189
|
+
[34mmessage = Message.new role: 'user', content: 'hello world'
|
190
|
+
[0m
|
191
|
+
[1m[4mImage[0m[0m
|
192
|
+
|
193
|
+
If you want to add images to the message, you can use the [1mImage[0m class
|
194
|
+
|
195
|
+
[34mimage = Ollama::Image.for_string("the-image")
|
196
|
+
message = Message.new role: 'user', content: 'hello world', images: [ image ]
|
197
|
+
[0m
|
198
|
+
It's possible to create an [1mImage[0m object via [34mfor_base64(data)[0m, [34mfor_string(string)[0m,
|
199
|
+
[34mfor_io(io)[0m, or [34mfor_filename(path)[0m class methods.
|
200
|
+
|
201
|
+
[1m[4mOptions[0m[0m
|
202
|
+
|
203
|
+
For [34mchat[0m and [34mgenerate[0m commdands it's possible to pass an [1mOptions[0m object to configure
|
204
|
+
different ]8;;https://github.com/ollama/ollama/blob/main/docs/modelfile.md#parameter\parameters]8;;\ for the
|
205
|
+
running model. To set the [34mtemperature[0m can be done via:
|
206
|
+
|
207
|
+
[34moptions = Options.new(temperature: 0.999)
|
208
|
+
generate(model: 'llama3.1', options:, prompt: 'I am almost 0.5 years old and you are a teletubby.', &Print)
|
209
|
+
[0m
|
210
|
+
The class does some rudimentary type checking for the parameters as well.
|
211
|
+
|
212
|
+
[1m[4mTool… calling[0m[0m
|
213
|
+
|
214
|
+
You can use the provided [34mTool[0m, [34mTool::Function[0m, [34mTool::Function::Parameters[0m, and
|
215
|
+
[34mTool::Function::Parameters::Property[0m classes to define tool functions in models that support it.
|
216
|
+
|
217
|
+
[34mdef message(location)
|
218
|
+
Message.new(role: 'user', content: "What is the weather today in %s?" % location)
|
219
|
+
end
|
220
|
+
|
221
|
+
tools = Tool.new(
|
222
|
+
type: 'function',
|
223
|
+
function: Tool::Function.new(
|
224
|
+
name: 'get_current_weather',
|
225
|
+
description: 'Get the current weather for a location',
|
226
|
+
parameters: Tool::Function::Parameters.new(
|
227
|
+
type: 'object',
|
228
|
+
properties: {
|
229
|
+
location: Tool::Function::Parameters::Property.new(
|
230
|
+
type: 'string',
|
231
|
+
description: 'The location to get the weather for, e.g. San Francisco, CA'
|
232
|
+
),
|
233
|
+
temperature_unit: Tool::Function::Parameters::Property.new(
|
234
|
+
type: 'string',
|
235
|
+
description: "The unit to return the temperature in, either 'celsius' or 'fahrenheit'",
|
236
|
+
enum: %w[ celsius fahrenheit ]
|
237
|
+
),
|
238
|
+
},
|
239
|
+
required: %w[ location temperature_unit ]
|
240
|
+
)
|
241
|
+
)
|
242
|
+
)
|
243
|
+
jj chat(model: 'llama3.1', stream: false, messages: message('The City of Love'), tools:).message&.tool_calls
|
244
|
+
jj chat(model: 'llama3.1', stream: false, messages: message('The Windy City'), tools:).message&.tool_calls
|
245
|
+
[0m
|
246
|
+
[1m[4mErrors[0m[0m
|
247
|
+
|
248
|
+
The library raises specific errors like [34mOllama::Errors::NotFoundError[0m when a model is not found:
|
249
|
+
|
250
|
+
[34m(show(name: 'nixda', &DumpJSON) rescue $!).class # => Ollama::NotFoundError
|
251
|
+
[0m
|
252
|
+
If [34mOllama::Errors::TimeoutError[0m is raised, it might help to increase the [34mconnect_timeout[0m,
|
253
|
+
[34mread_timeout[0m and [34mwrite_timeout[0m parameters of the [34mOllama::Client[0m instance.
|
254
|
+
|
255
|
+
For more generic errors an [34mOllama::Errors::Error[0m is raised.
|
256
|
+
|
257
|
+
[1m[4mOther executables[0m[0m
|
258
|
+
|
259
|
+
[1m[4mollama_chat[0m[0m
|
260
|
+
|
261
|
+
This a chat client, that can be used to connect to an ollama server and enter a chat converstation with a LLM. It
|
262
|
+
can be called with the following arguments:
|
263
|
+
|
264
|
+
[34mUsage: ollama_chat [OPTIONS]
|
265
|
+
|
266
|
+
-f CONFIG config file to read
|
267
|
+
-u URL the ollama base url, OLLAMA_URL
|
268
|
+
-m MODEL the ollama model to chat with, OLLAMA_CHAT_MODEL
|
269
|
+
-s SYSTEM the system prompt to use as a file, OLLAMA_CHAT_SYSTEM
|
270
|
+
-c CHAT a saved chat conversation to load
|
271
|
+
-C COLLECTION name of the collection used in this conversation
|
272
|
+
-D DOCUMENT load document and add to embeddings collection (multiple)
|
273
|
+
-M use (empty) MemoryCache for this chat session
|
274
|
+
-E disable embeddings for this chat session
|
275
|
+
-V display the current version number and quit
|
276
|
+
-h this help
|
277
|
+
[0m
|
278
|
+
The base URL can be either set by the environment variable [34mOLLAMA_URL[0m or it is derived from the
|
279
|
+
environment variable [34mOLLAMA_HOST[0m. The default model to connect can be configured in the environment
|
280
|
+
variable [34mOLLAMA_MODEL[0m.
|
281
|
+
|
282
|
+
The YAML config file in [34m$XDG_CONFIG_HOME/ollama_chat/config.yml[0m, that you can use for more complex
|
283
|
+
settings, it looks like this:
|
284
|
+
|
285
|
+
[34m---
|
286
|
+
url: <%= ENV['OLLAMA_URL'] || 'http://%s' % ENV.fetch('OLLAMA_HOST') %>
|
287
|
+
model:
|
288
|
+
name: <%= ENV.fetch('OLLAMA_CHAT_MODEL', 'llama3.1') %>
|
289
|
+
options:
|
290
|
+
num_ctx: 8192
|
291
|
+
system: <%= ENV.fetch('OLLAMA_CHAT_SYSTEM', 'null') %>
|
292
|
+
voice: Samantha
|
293
|
+
markdown: true
|
294
|
+
embedding:
|
295
|
+
enabled: true
|
296
|
+
model:
|
297
|
+
name: mxbai-embed-large
|
298
|
+
options: {}
|
299
|
+
collection: <%= ENV.fetch('OLLAMA_CHAT_COLLECTION', 'ollama_chat') %>
|
300
|
+
found_texts_size: 4096
|
301
|
+
splitter:
|
302
|
+
name: RecursiveCharacter
|
303
|
+
chunk_size: 1024
|
304
|
+
cache: Ollama::Documents::RedisCache
|
305
|
+
redis:
|
306
|
+
url: <%= ENV.fetch('REDIS_URL', 'null') %>
|
307
|
+
debug: <%= ENV['OLLAMA_CHAT_DEBUG'].to_i == 1 ? true : false %>
|
308
|
+
[0m
|
309
|
+
If you want to store embeddings persistently, set an environment variable [34mREDIS_URL[0m or update the
|
310
|
+
[34mredis.url[0m setting in your [34mconfig.yml[0m file to connect to a Redis server. Without this setup,
|
311
|
+
embeddings will only be stored in process memory, which is less durable.
|
312
|
+
|
313
|
+
Some settings can be passed as arguments as well, e. g. if you want to choose a specific system prompt:
|
314
|
+
|
315
|
+
[34m$ ollama_chat -s sherlock.txt
|
316
|
+
Model with architecture llama found.
|
317
|
+
Connecting to llama3.1@http://ollama.local.net:11434 now…
|
318
|
+
Configured system prompt is:
|
319
|
+
You are Sherlock Holmes and the user is your new client, Dr. Watson is also in
|
320
|
+
the room. You will talk and act in the typical manner of Sherlock Holmes do and
|
321
|
+
try to solve the user's case using logic and deduction.
|
322
|
+
|
323
|
+
Type /help to display the chat help.
|
324
|
+
📨 user:
|
325
|
+
Good morning.
|
326
|
+
📨 assistant:
|
327
|
+
Ah, good morning, my dear fellow! It is a pleasure to make your acquaintance. I
|
328
|
+
am Sherlock Holmes, the renowned detective, and this is my trusty sidekick, Dr.
|
329
|
+
Watson. Please, have a seat and tell us about the nature of your visit. What
|
330
|
+
seems to be the problem that has brought you to our humble abode at 221B Baker
|
331
|
+
Street?
|
332
|
+
|
333
|
+
(Watson nods in encouragement as he takes notes)
|
334
|
+
|
335
|
+
Now, pray tell, what is it that puzzles you, my dear client? A missing item,
|
336
|
+
perhaps? Or a mysterious occurrence that requires clarification? The game, as
|
337
|
+
they say, is afoot!
|
338
|
+
[0m
|
339
|
+
This example shows how an image like this can be sent to a vision model for analysis:
|
340
|
+
|
341
|
+
]8;;spec/assets/kitten.jpg\🖼 cat]8;;\
|
342
|
+
|
343
|
+
[34m$ ollama_chat -m llava-llama3
|
344
|
+
Model with architecture llama found.
|
345
|
+
Connecting to llava-llama3@http://localhost:11434 now…
|
346
|
+
Type /help to display the chat help.
|
347
|
+
📸 user> What's on this image? ./spec/assets/kitten.jpg
|
348
|
+
📨 assistant:
|
349
|
+
The image captures a moment of tranquility featuring a young cat. The cat,
|
350
|
+
adorned with gray and white fur marked by black stripes on its face and legs,
|
351
|
+
is the central figure in this scene. Its eyes, a striking shade of blue, are
|
352
|
+
wide open and directed towards the camera, giving an impression of curiosity or
|
353
|
+
alertness.
|
354
|
+
|
355
|
+
The cat is comfortably nestled on a red blanket, which contrasts vividly with
|
356
|
+
its fur. The blanket, soft and inviting, provides a sense of warmth to the
|
357
|
+
image. In the background, partially obscured by the cat's head, is another
|
358
|
+
blanket of similar red hue. The repetition of the color adds a sense of harmony
|
359
|
+
to the composition.
|
360
|
+
|
361
|
+
The cat's position on the right side of the photo creates an interesting
|
362
|
+
asymmetry with the camera lens, which occupies the left side of the frame. This
|
363
|
+
visual balance enhances the overall composition of the image.
|
364
|
+
|
365
|
+
There are no discernible texts or other objects in the image. The focus is
|
366
|
+
solely on the cat and its immediate surroundings. The image does not provide
|
367
|
+
any information about the location or setting beyond what has been described.
|
368
|
+
The simplicity of the scene allows the viewer to concentrate on the main
|
369
|
+
subject - the young, blue-eyed cat.
|
370
|
+
[0m
|
371
|
+
The following commands can be given inside the chat, if prefixed by a [34m/[0m:
|
372
|
+
|
373
|
+
[34m/copy to copy last response to clipboard
|
374
|
+
/paste to paste content
|
375
|
+
/markdown toggle markdown output
|
376
|
+
/stream toggle stream output
|
377
|
+
/location toggle location submission
|
378
|
+
/voice( change) toggle voice output or change the voice
|
379
|
+
/list [n] list the last n / all conversation exchanges
|
380
|
+
/clear clear the whole conversation
|
381
|
+
/clobber clear the conversation and collection
|
382
|
+
/pop [n] pop the last n exchanges, defaults to 1
|
383
|
+
/model change the model
|
384
|
+
/system change system prompt (clears conversation)
|
385
|
+
/regenerate the last answer message
|
386
|
+
/collection( clear|change) change (default) collection or clear
|
387
|
+
/info show information for current session
|
388
|
+
/document_policy pick a scan policy for document references
|
389
|
+
/import source import the source's content
|
390
|
+
/summarize [n] source summarize the source's content in n words
|
391
|
+
/embedding toggle embedding paused or not
|
392
|
+
/embed source embed the source's content
|
393
|
+
/web [n] query query web search & return n or 1 results
|
394
|
+
/save filename store conversation messages
|
395
|
+
/load filename load conversation messages
|
396
|
+
/quit to quit
|
397
|
+
/help to view this help
|
398
|
+
[0m
|
399
|
+
[1m[4mDownload[0m[0m
|
400
|
+
|
401
|
+
The homepage of this library is located at
|
402
|
+
|
403
|
+
· https://github.com/flori/ollama-ruby
|
404
|
+
|
405
|
+
[1m[4mAuthor[0m[0m
|
406
|
+
|
407
|
+
[1mOllama Ruby[0m was written by Florian Frank ]8;;mailto:flori@ping.de\Florian Frank]8;;\
|
408
|
+
|
409
|
+
[1m[4mLicense[0m[0m
|
410
|
+
|
411
|
+
This software is licensed under the [3mMIT[0m license.
|
412
|
+
|
413
|
+
───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────
|
414
|
+
|
415
|
+
This is the end.
|
Binary file
|
@@ -0,0 +1,60 @@
|
|
1
|
+
require 'spec_helper'
|
2
|
+
|
3
|
+
RSpec.describe Kramdown::ANSI::Pager do
|
4
|
+
let :command do
|
5
|
+
'cat'
|
6
|
+
end
|
7
|
+
|
8
|
+
describe '.pager' do
|
9
|
+
context 'with no TTY' do
|
10
|
+
before do
|
11
|
+
expect(STDOUT).to receive(:tty?).at_least(:once).and_return(false)
|
12
|
+
end
|
13
|
+
|
14
|
+
it 'returns nil if STDOUT is no TTY' do
|
15
|
+
expect(Kramdown::ANSI::Pager.pager(command:)).to be_nil
|
16
|
+
end
|
17
|
+
end
|
18
|
+
|
19
|
+
context 'with TTY' do
|
20
|
+
before do
|
21
|
+
expect(STDOUT).to receive(:tty?).at_least(:once).and_return(true)
|
22
|
+
end
|
23
|
+
|
24
|
+
it 'returns command if STDOUT is TTY' do
|
25
|
+
expect(Kramdown::ANSI::Pager.pager(command:)).to eq command
|
26
|
+
end
|
27
|
+
|
28
|
+
it 'returns the provided command for paging if enough lines' do
|
29
|
+
expect(Tins::Terminal).to receive(:lines).and_return 25
|
30
|
+
expect(Kramdown::ANSI::Pager.pager(command: command, lines: 30)).to eq(command)
|
31
|
+
end
|
32
|
+
|
33
|
+
it 'returns nil if not enough lines' do
|
34
|
+
expect(Tins::Terminal).to receive(:lines).and_return 25
|
35
|
+
expect(Kramdown::ANSI::Pager.pager(command: command, lines: 23)).to be_nil
|
36
|
+
end
|
37
|
+
|
38
|
+
it 'can output to the command for paging if enough lines' do
|
39
|
+
expect(Tins::Terminal).to receive(:lines).and_return 25
|
40
|
+
block_called = false
|
41
|
+
Kramdown::ANSI::Pager.pager(command: command, lines: 30) do |output|
|
42
|
+
expect(output).to be_a IO
|
43
|
+
expect(output).not_to eq STDOUT
|
44
|
+
block_called = true
|
45
|
+
end
|
46
|
+
expect(block_called).to eq true
|
47
|
+
end
|
48
|
+
|
49
|
+
it 'can output STDOUT if not enough lines' do
|
50
|
+
expect(Tins::Terminal).to receive(:lines).and_return 25
|
51
|
+
block_called = false
|
52
|
+
Kramdown::ANSI::Pager.pager(command: command, lines: 23) do |output|
|
53
|
+
expect(output).to eq STDOUT
|
54
|
+
block_called = true
|
55
|
+
end
|
56
|
+
expect(block_called).to eq true
|
57
|
+
end
|
58
|
+
end
|
59
|
+
end
|
60
|
+
end
|
@@ -0,0 +1,82 @@
|
|
1
|
+
require 'spec_helper'
|
2
|
+
|
3
|
+
RSpec.describe Kramdown::ANSI::Width do
|
4
|
+
before do
|
5
|
+
allow(Tins::Terminal).to receive(:columns).and_return 80
|
6
|
+
end
|
7
|
+
|
8
|
+
describe '.width' do
|
9
|
+
it 'defaults to 100%' do
|
10
|
+
expect(described_class.width).to eq 80
|
11
|
+
end
|
12
|
+
|
13
|
+
it 'can be to 80%' do
|
14
|
+
expect(described_class.width(percentage: 80)).to eq 64
|
15
|
+
end
|
16
|
+
end
|
17
|
+
|
18
|
+
describe '.wrap' do
|
19
|
+
it 'can wrap with percentage' do
|
20
|
+
wrapped = described_class.wrap([ ?A * 10 ] * 10 * ' ', percentage: 80)
|
21
|
+
expect(wrapped).to eq(
|
22
|
+
"AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA\n"\
|
23
|
+
"AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA"
|
24
|
+
)
|
25
|
+
expect(wrapped.size).to eq 109
|
26
|
+
end
|
27
|
+
|
28
|
+
it 'can wrap with length' do
|
29
|
+
wrapped = described_class.wrap([ ?A * 10 ] * 10 * ' ', length: 64)
|
30
|
+
expect(wrapped).to eq(
|
31
|
+
"AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA\n"\
|
32
|
+
"AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA"
|
33
|
+
)
|
34
|
+
expect(wrapped.size).to eq 109
|
35
|
+
end
|
36
|
+
|
37
|
+
it "doesn't wrap with length 0" do
|
38
|
+
wrapped = described_class.wrap([ ?A * 10 ] * 10 * ' ', length: 0)
|
39
|
+
expect(wrapped).to eq(
|
40
|
+
"AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA "\
|
41
|
+
"AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA"
|
42
|
+
)
|
43
|
+
end
|
44
|
+
end
|
45
|
+
|
46
|
+
describe '.truncate' do
|
47
|
+
it 'can truncate with percentage' do
|
48
|
+
truncated = described_class.truncate([ ?A * 10 ] * 10 * ' ', percentage: 80)
|
49
|
+
expect(truncated).to eq(
|
50
|
+
"AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAA…"
|
51
|
+
)
|
52
|
+
expect(truncated.size).to eq 64
|
53
|
+
end
|
54
|
+
|
55
|
+
it 'can truncate with length' do
|
56
|
+
truncated = described_class.truncate([ ?A * 10 ] * 10 * ' ', length: 64)
|
57
|
+
expect(truncated).to eq(
|
58
|
+
"AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAA…"
|
59
|
+
)
|
60
|
+
expect(truncated.size).to eq 64
|
61
|
+
end
|
62
|
+
|
63
|
+
it 'cannot truncate if not necessary' do
|
64
|
+
text = [ ?A * 10 ] * 5 * ' '
|
65
|
+
truncated = described_class.truncate(text, length: 54)
|
66
|
+
expect(truncated).to eq text
|
67
|
+
end
|
68
|
+
|
69
|
+
it 'can truncate with length 0' do
|
70
|
+
truncated = described_class.truncate([ ?A * 10 ] * 10 * ' ', length: 0)
|
71
|
+
expect(truncated).to be_empty
|
72
|
+
end
|
73
|
+
|
74
|
+
it 'can truncate with ...' do
|
75
|
+
truncated = described_class.truncate([ ?A * 10 ] * 10 * ' ', length: 64, ellipsis: '...')
|
76
|
+
expect(truncated).to eq(
|
77
|
+
"AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAAAAAA AAAAAA..."
|
78
|
+
)
|
79
|
+
expect(truncated.size).to eq 64
|
80
|
+
end
|
81
|
+
end
|
82
|
+
end
|
@@ -0,0 +1,16 @@
|
|
1
|
+
require 'spec_helper'
|
2
|
+
require 'pathname'
|
3
|
+
|
4
|
+
RSpec.describe Kramdown::ANSI do
|
5
|
+
let :source do
|
6
|
+
File.read(Pathname.new(__dir__) + '..' + '..' + 'README.md')
|
7
|
+
end
|
8
|
+
|
9
|
+
it 'can parse' do
|
10
|
+
File.open('tmp/README.ansi', ?w) do |output|
|
11
|
+
ansi = described_class.parse(source)
|
12
|
+
expect(ansi).to match("This is the end.")
|
13
|
+
output.puts ansi
|
14
|
+
end
|
15
|
+
end
|
16
|
+
end
|
data/spec/spec_helper.rb
ADDED
@@ -0,0 +1,16 @@
|
|
1
|
+
if ENV['START_SIMPLECOV'].to_i == 1
|
2
|
+
require 'simplecov'
|
3
|
+
SimpleCov.start do
|
4
|
+
add_filter "#{File.basename(File.dirname(__FILE__))}/"
|
5
|
+
end
|
6
|
+
end
|
7
|
+
require 'rspec'
|
8
|
+
begin
|
9
|
+
require 'debug'
|
10
|
+
rescue LoadError
|
11
|
+
end
|
12
|
+
require 'kramdown/ansi'
|
13
|
+
|
14
|
+
def asset(name)
|
15
|
+
File.join(__dir__, 'assets', name)
|
16
|
+
end
|