pikuri 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. checksums.yaml +7 -0
  2. data/CHANGELOG.md +62 -0
  3. data/GETTING_STARTED.md +223 -0
  4. data/LICENSE +21 -0
  5. data/README.md +193 -0
  6. data/lib/pikuri/agent/chat_transport.rb +41 -0
  7. data/lib/pikuri/agent/context_window_detector.rb +101 -0
  8. data/lib/pikuri/agent/listener/in_memory_message_list.rb +33 -0
  9. data/lib/pikuri/agent/listener/message_listener.rb +93 -0
  10. data/lib/pikuri/agent/listener/step_limit.rb +97 -0
  11. data/lib/pikuri/agent/listener/terminal.rb +137 -0
  12. data/lib/pikuri/agent/listener/token_log.rb +166 -0
  13. data/lib/pikuri/agent/listener_list.rb +113 -0
  14. data/lib/pikuri/agent/message.rb +61 -0
  15. data/lib/pikuri/agent/synthesizer.rb +120 -0
  16. data/lib/pikuri/agent/tokens.rb +56 -0
  17. data/lib/pikuri/agent.rb +286 -0
  18. data/lib/pikuri/subprocess.rb +166 -0
  19. data/lib/pikuri/tool/bash.rb +272 -0
  20. data/lib/pikuri/tool/calculator.rb +82 -0
  21. data/lib/pikuri/tool/confirmer.rb +96 -0
  22. data/lib/pikuri/tool/edit.rb +196 -0
  23. data/lib/pikuri/tool/fetch.rb +167 -0
  24. data/lib/pikuri/tool/glob.rb +310 -0
  25. data/lib/pikuri/tool/grep.rb +338 -0
  26. data/lib/pikuri/tool/parameters.rb +314 -0
  27. data/lib/pikuri/tool/read.rb +254 -0
  28. data/lib/pikuri/tool/scraper/fetch_error.rb +16 -0
  29. data/lib/pikuri/tool/scraper/html.rb +285 -0
  30. data/lib/pikuri/tool/scraper/pdf.rb +54 -0
  31. data/lib/pikuri/tool/scraper/simple.rb +177 -0
  32. data/lib/pikuri/tool/search/brave.rb +184 -0
  33. data/lib/pikuri/tool/search/duckduckgo.rb +196 -0
  34. data/lib/pikuri/tool/search/engines.rb +154 -0
  35. data/lib/pikuri/tool/search/exa.rb +217 -0
  36. data/lib/pikuri/tool/search/rate_limiter.rb +92 -0
  37. data/lib/pikuri/tool/search/result.rb +29 -0
  38. data/lib/pikuri/tool/skill.rb +80 -0
  39. data/lib/pikuri/tool/skill_catalog.rb +376 -0
  40. data/lib/pikuri/tool/sub_agent.rb +102 -0
  41. data/lib/pikuri/tool/web_scrape.rb +117 -0
  42. data/lib/pikuri/tool/web_search.rb +38 -0
  43. data/lib/pikuri/tool/workspace.rb +150 -0
  44. data/lib/pikuri/tool/write.rb +170 -0
  45. data/lib/pikuri/tool.rb +118 -0
  46. data/lib/pikuri/url_cache.rb +106 -0
  47. data/lib/pikuri/version.rb +10 -0
  48. data/lib/pikuri.rb +165 -0
  49. data/prompts/coding-system-prompt.txt +28 -0
  50. data/prompts/pikuri-chat.txt +15 -0
  51. metadata +259 -0
@@ -0,0 +1,167 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Pikuri
4
+ class Tool
5
+ # Truncation policy and Tool spec for the +fetch+ tool. The HTTP work
6
+ # lives in {Tool::Scraper::Simple.fetch}; this module is a thin
7
+ # wrapper that accepts only textual content-types, applies a character
8
+ # cap so the LLM doesn't drown in long-form bodies, and exposes the
9
+ # result to the agent loop in OpenAI tool-call shape.
10
+ #
11
+ # Sister of {Tool::WebScrape}, but without HTML→Markdown or PDF→text
12
+ # extraction: bodies are returned verbatim. Useful for raw textual
13
+ # data — JSON APIs, CSV files, +robots.txt+, sitemaps, source files —
14
+ # where any rendering pass would corrupt the payload.
15
+ module Fetch
16
+ # @return [Integer] default character cap on the body returned by
17
+ # {.fetch}. Smaller than {Tool::WebScrape::DEFAULT_MAX_CHARS}
18
+ # because fetch's content profile is bimodal — most JSON/XML/CSV
19
+ # responses are tiny, and the long-tail (large data dumps) is
20
+ # better re-requested deliberately than padded into every default.
21
+ DEFAULT_MAX_CHARS = 5_000
22
+
23
+ # @return [Integer] hard ceiling on the +max_chars+ argument to
24
+ # {.fetch}. Matches {Tool::WebScrape::MAX_MAX_CHARS}.
25
+ MAX_MAX_CHARS = 100_000
26
+
27
+ # Application content-types that are textual in practice and so
28
+ # safe to return verbatim to the LLM, despite their +application/+
29
+ # prefix making them fail the +text/*+ check. Anything outside
30
+ # +text/*+ and this allowlist is refused.
31
+ # @return [Array<String>]
32
+ TEXTUAL_APPLICATION_TYPES = %w[
33
+ application/json
34
+ application/xml
35
+ application/javascript
36
+ application/xhtml+xml
37
+ application/rss+xml
38
+ application/atom+xml
39
+ ].freeze
40
+
41
+ # On-disk cache used by {.fetch} to memoize downloads. Defined as a
42
+ # method so specs can swap it for an isolated cache or
43
+ # {UrlCache::NULL} without touching the shared instance. Lives in
44
+ # its own subdir under {UrlCache::ROOT_DIR} so a +fetch+ on a URL
45
+ # and a +web_scrape+ on the same URL cannot collide on the same
46
+ # cache file (one returns the raw body, the other returns extracted
47
+ # Markdown).
48
+ #
49
+ # @return [UrlCache, #fetch]
50
+ CACHE = UrlCache.new(ttl: UrlCache::DEFAULT_TTL, dir: "#{UrlCache::ROOT_DIR}/fetch")
51
+ def self.cache
52
+ CACHE
53
+ end
54
+
55
+ # Download +url+ via {Tool::Scraper::Simple.fetch} and return the
56
+ # response body verbatim, provided the content-type is one we deem
57
+ # textual (any +text/*+, plus the formats listed in
58
+ # {TEXTUAL_APPLICATION_TYPES}). Anything else — PDFs, images, other
59
+ # binaries — produces an +"Error: ..."+ string in the calculator-
60
+ # style convention so the agent loop feeds the failure back to the
61
+ # model as the next observation.
62
+ #
63
+ # The body is cached on disk via {.cache}, keyed by URL, so repeat
64
+ # fetches within the cache TTL skip the network. +max_chars+ is not
65
+ # part of the cache key — different values for the same URL share
66
+ # one entry, and truncation runs after the cache lookup. The cache
67
+ # is only populated on success: {Scraper::FetchError} (HTTP non-2xx,
68
+ # network failure, redirect-loop exhaustion, refused content-type)
69
+ # is caught outside the +cache.fetch+ block, so failure strings are
70
+ # never persisted and a retry on the next call hits the network
71
+ # again. Other exceptions (parser bugs in our own code) bubble up
72
+ # unchanged.
73
+ #
74
+ # @param url [String] absolute HTTP(S) URL to download
75
+ # @param max_chars [Integer] character cap on the returned body.
76
+ # Clamped to +[1, {MAX_MAX_CHARS}]+; defaults to
77
+ # {DEFAULT_MAX_CHARS}. When the body exceeds the cap, output is
78
+ # cut and a marker noting the original length is appended.
79
+ # @return [String] response body, possibly truncated, or
80
+ # +"Error: ..."+ on a recoverable failure
81
+ def self.fetch(url, max_chars: DEFAULT_MAX_CHARS)
82
+ max_chars = max_chars.clamp(1, MAX_MAX_CHARS)
83
+ body = cache.fetch(url) { download(url) }
84
+ truncate(body, max_chars)
85
+ rescue Scraper::FetchError => e
86
+ "Error: #{e.message}"
87
+ end
88
+
89
+ # GET +url+ and verify the response's content-type is textual.
90
+ # Caller is responsible for caching and truncation; this method
91
+ # always hits the network.
92
+ #
93
+ # @param url [String]
94
+ # @return [String] response body
95
+ # @raise [Scraper::FetchError] on HTTP non-2xx, network failure,
96
+ # redirect-loop exhaustion, missing +Location+ on a 3xx, or a
97
+ # non-textual content-type
98
+ def self.download(url)
99
+ fetched = Scraper::Simple.fetch(url)
100
+ return fetched.body if textual?(fetched.content_type)
101
+
102
+ raise Scraper::FetchError,
103
+ "refused to fetch #{url}: content-type #{fetched.content_type.inspect} " \
104
+ 'is not textual (use web_scrape for PDFs or rendered pages)'
105
+ end
106
+
107
+ # @param content_type [String] normalized content-type (no +charset+
108
+ # parameter, lowercased) as produced by {Scraper::Simple.fetch}
109
+ # @return [Boolean] true when the content-type is +text/*+ or one
110
+ # of {TEXTUAL_APPLICATION_TYPES}
111
+ def self.textual?(content_type)
112
+ content_type.start_with?('text/') ||
113
+ TEXTUAL_APPLICATION_TYPES.include?(content_type)
114
+ end
115
+
116
+ # Cut +body+ to at most +max_chars+ characters, appending a marker
117
+ # describing the original length when truncation actually happens.
118
+ # Returns +body+ unchanged if it already fits. Same shape as
119
+ # {Tool::WebScrape.truncate} so the LLM sees a consistent
120
+ # truncation marker across both tools.
121
+ #
122
+ # @param body [String] full response body
123
+ # @param max_chars [Integer] character cap; assumed already clamped
124
+ # @return [String]
125
+ def self.truncate(body, max_chars)
126
+ return body if body.length <= max_chars
127
+
128
+ "#{body[0, max_chars]}\n\n" \
129
+ "... [truncated at #{max_chars} of #{body.length} chars; " \
130
+ 'call again with a larger `max_chars` to see more]'
131
+ end
132
+ end
133
+
134
+ # Verbatim URL download tool. Thin wrapper over {Tool::Fetch.fetch}
135
+ # that exposes it to the agent loop in OpenAI tool-call shape. Use for
136
+ # raw textual payloads (JSON APIs, CSV files, +robots.txt+, source
137
+ # files); use {Tool::WEB_SCRAPE} for rendered web pages or PDFs where
138
+ # readability extraction makes the result usable.
139
+ #
140
+ # @return [Tool]
141
+ FETCH = new(
142
+ name: 'fetch',
143
+ description: <<~DESC,
144
+ Downloads the given URL and returns its body verbatim.
145
+
146
+ Usage:
147
+ - Use for raw textual payloads: JSON APIs, CSV files, robots.txt, sitemaps, source files — anywhere a rendering pass would corrupt the data.
148
+ - For rendered HTML pages or PDFs, use web_scrape — it extracts readable content; fetch returns the raw HTML/PDF bytes unchanged.
149
+ - Accepts text/* and common textual application/* types (JSON, XML, JS, XHTML, RSS, Atom). Refuses PDFs, images, and other binaries.
150
+ DESC
151
+ parameters: Parameters.build { |p|
152
+ p.required_string :url,
153
+ 'Absolute URL to download, including the scheme, ' \
154
+ 'e.g. "https://example.com/data.json".'
155
+ p.optional_integer :max_chars,
156
+ 'Maximum number of characters of the body to ' \
157
+ 'return. Defaults to 5000; hard-capped at ' \
158
+ '100000. When the body is longer than this, ' \
159
+ 'output is cut and a marker reports the full ' \
160
+ 'length.'
161
+ },
162
+ execute: ->(url:, max_chars: Fetch::DEFAULT_MAX_CHARS) {
163
+ Fetch.fetch(url, max_chars: max_chars)
164
+ }
165
+ )
166
+ end
167
+ end
@@ -0,0 +1,310 @@
1
+ # frozen_string_literal: true
2
+
3
+ module Pikuri
4
+ class Tool
5
+ # The +glob+ tool — list files matching a glob pattern via
6
+ # +rg --files+, sorted by modification time (newest first).
7
+ # Instantiating +Tool::Glob.new(workspace: ws)+ produces a tool
8
+ # whose {Tool#to_ruby_llm_tool} wiring is identical to any bundled
9
+ # tool's. Same shape as {Tool::Grep} (workspace captured by the
10
+ # +execute+ closure, no confirmer — read-only).
11
+ #
12
+ # == Why a separate tool from Grep
13
+ #
14
+ # The unique capability is *mtime-descending sort* — "what's been
15
+ # touched recently" is a common navigation move and Grep can't
16
+ # express it. The rest (filter by name, default to listing all
17
+ # matching files) is theoretically reachable through Grep with
18
+ # +pattern="."+, but Glob avoids that hack and keeps Read / Grep /
19
+ # Glob as three clean roles: read one file, search content, list
20
+ # files by name.
21
+ #
22
+ # == ripgrep dependency
23
+ #
24
+ # Hard dependency: {.check_binaries!} runs in +initialize+ and
25
+ # raises if +rg+ isn't on +PATH+. Each tool owns its own probe so
26
+ # construction order doesn't matter — Glob doesn't lean on Grep's
27
+ # check.
28
+ #
29
+ # == Argv & filter pipeline
30
+ #
31
+ # rg --files --color=never --hidden --glob '!.git/*' \
32
+ # -- <relative-path-or-dot>
33
+ # # …then filter the result list in Ruby with File.fnmatch?
34
+ #
35
+ # Why not pass the user pattern as +--glob+ to rg? Because rg's
36
+ # +--glob+ documentation says *"This always overrides any other
37
+ # ignore logic"* — so +--glob '**/*.rb'+ would re-include
38
+ # +.gitignore+'d Ruby files, breaking our gitignore-respect
39
+ # promise. We let rg produce the full gitignore-respecting file
40
+ # list and filter to the user's pattern in Ruby with
41
+ # +File.fnmatch?(pattern, p, FNM_PATHNAME | FNM_EXTGLOB |
42
+ # FNM_DOTMATCH)+. The three flags together cover the common rg
43
+ # glob cases: +**+ recursion (+FNM_PATHNAME+), +{a,b}+ alternation
44
+ # (+FNM_EXTGLOB+), and dotfile inclusion (+FNM_DOTMATCH+, matching
45
+ # rg's +--hidden+ behavior). The +.git/+ exclusion stays on the rg
46
+ # side so its contents never even reach the Ruby filter.
47
+ #
48
+ # * +--hidden+ → search dotfiles (still respects +.gitignore+).
49
+ # * No +--sort+ flag: we re-sort by mtime in Ruby on the way out.
50
+ # * Output paths come back as +./...+ when the search path is +.+;
51
+ # the leading +./+ is stripped post-rg so the model sees clean
52
+ # workspace-relative paths.
53
+ #
54
+ # == Sort
55
+ #
56
+ # mtime-descending in Ruby after rg returns, with path-ascending
57
+ # as a tiebreaker for files with equal mtimes (the common case in
58
+ # fresh checkouts). Cost: one +stat+ per result. Broad patterns
59
+ # can make this expensive, but in practice rg's +.gitignore+ filter
60
+ # keeps result sets bounded; if real friction shows up later we can
61
+ # cap pre-sort.
62
+ #
63
+ # == Truncation
64
+ #
65
+ # Total output head-truncated to {MAX_BYTES} *after* mtime sort, so
66
+ # the kept rows are the newest. Matches {Tool::Grep}'s budget and
67
+ # head-bias.
68
+ #
69
+ # == Exit codes
70
+ #
71
+ # * +0+ → at least one file; format with footer.
72
+ # * +1+ → no files; return +"No files match pattern '...'"+.
73
+ # * +2+ → rg error (bad path, bad glob); return
74
+ # +"Error: ripgrep: ..."+.
75
+ #
76
+ # == Refusals
77
+ #
78
+ # All returned as +"Error: ..."+ observations:
79
+ #
80
+ # * Empty +pattern+ → fast reject.
81
+ # * +path+ is a regular file → fast reject pointing at the +read+
82
+ # tool.
83
+ # * +path+ not found → +"Error: path not found: <path>"+.
84
+ # * +path+ outside the workspace → caught from
85
+ # {Tool::Workspace::Error}.
86
+ class Glob < Tool
87
+ # @return [Integer] hard byte cap on combined rg output. Same
88
+ # value as {Tool::Grep::MAX_BYTES} so the two file-touching
89
+ # tools share a budget shape. Re-declared here rather than
90
+ # referenced cross-file because Zeitwerk's eager-load order
91
+ # isn't guaranteed between siblings.
92
+ MAX_BYTES = 50 * 1024
93
+
94
+ # @return [String] human-readable form of {MAX_BYTES} for the
95
+ # truncation marker.
96
+ MAX_BYTES_LABEL = "#{MAX_BYTES / 1024} KB"
97
+
98
+ # Description shown to the LLM. opencode-shape (summary +
99
+ # +Usage:+ bullets). Per-parameter constraints live in parameter
100
+ # descriptions.
101
+ #
102
+ # @return [String]
103
+ DESCRIPTION = <<~DESC
104
+ List files matching a glob pattern, sorted by modification time (newest first).
105
+
106
+ Usage:
107
+ - `.gitignore` is respected; for unfiltered listing use bash `rg --no-ignore --files -g <pattern>`.
108
+ - Glob syntax: `**` matches any number of directories, `*` matches any filename chars (not `/`), `{a,b}` is alternation.
109
+ - Default search root is the workspace root; pass `path` to narrow to a subdirectory.
110
+ - Use `glob` to find files by name; use `grep` to find files by content.
111
+ - Output is sorted by mtime descending — recently-touched files come first, so broad patterns still surface relevant files near the top.
112
+ - Output is truncated to #{MAX_BYTES_LABEL}; refine the pattern or narrow `path` if the response ends in a truncation marker.
113
+ DESC
114
+
115
+ # @param workspace [Tool::Workspace] captured for path resolution
116
+ # and as +chdir+ for rg. All path arguments route through
117
+ # +workspace.resolve_for_read+.
118
+ # @raise [RuntimeError] if +rg+ isn't on +PATH+; fail-loud at
119
+ # construction rather than the first tool call.
120
+ # @return [Glob]
121
+ def initialize(workspace:)
122
+ Glob.send(:check_binaries!)
123
+ super(
124
+ name: 'glob',
125
+ description: DESCRIPTION,
126
+ parameters: Parameters.build { |p|
127
+ p.required_string :pattern,
128
+ 'Glob pattern (** matches any number of ' \
129
+ 'directories; {a,b} alternation), e.g. ' \
130
+ '"**/*.rb" or "lib/**/*_spec.rb".'
131
+ p.optional_string :path,
132
+ 'Directory to search in. Relative paths resolve ' \
133
+ 'against the workspace root. Defaults to the ' \
134
+ 'workspace root, e.g. "lib/" or "spec/".'
135
+ },
136
+ execute: lambda { |pattern:, path: nil|
137
+ Glob.search(workspace: workspace, pattern: pattern, path: path)
138
+ }
139
+ )
140
+ end
141
+
142
+ # Validate inputs, resolve the path against the workspace, spawn
143
+ # rg, mtime-sort, head-truncate, render. Returns either the
144
+ # formatted listing, a "no files match" message, or
145
+ # +"Error: ..."+.
146
+ #
147
+ # @param workspace [Tool::Workspace]
148
+ # @param pattern [String]
149
+ # @param path [String, nil]
150
+ # @return [String]
151
+ def self.search(workspace:, pattern:, path:)
152
+ return 'Error: empty pattern.' if pattern.empty?
153
+
154
+ search_target = '.'
155
+ if path
156
+ resolved = workspace.resolve_for_read(path)
157
+ return "Error: path not found: #{path}" unless resolved.exist?
158
+ if resolved.file?
159
+ return "Error: #{path} is a file, not a directory; use the read tool to view it."
160
+ end
161
+
162
+ rel = resolved.relative_path_from(workspace.cwd).to_s
163
+ search_target = rel
164
+ end
165
+
166
+ argv = build_argv(path: search_target)
167
+ result = Pikuri::Subprocess.spawn(*argv, chdir: workspace.cwd.to_s).wait
168
+ exit_code = result.status.exitstatus
169
+
170
+ case exit_code
171
+ when 0
172
+ format_output(result.output, workspace: workspace,
173
+ pattern: pattern, path: path)
174
+ when 1
175
+ no_match_message(pattern: pattern, path: path)
176
+ else
177
+ stderr = result.output.strip
178
+ stderr = "exited #{exit_code}" if stderr.empty?
179
+ "Error: ripgrep: #{stderr}"
180
+ end
181
+ rescue Tool::Workspace::Error => e
182
+ "Error: #{e.message}"
183
+ end
184
+
185
+ # @return [Integer] flags for {File.fnmatch?}: +FNM_PATHNAME+ for
186
+ # +**+ recursion + path-aware +/+ matching, +FNM_EXTGLOB+ for
187
+ # +{a,b}+ alternation, +FNM_DOTMATCH+ to match dotfiles (rg
188
+ # does this when +--hidden+ is set).
189
+ FNMATCH_FLAGS = File::FNM_PATHNAME | File::FNM_EXTGLOB | File::FNM_DOTMATCH
190
+
191
+ # Build the +rg+ argv. User pattern is NOT passed to rg — see
192
+ # the class header for why (rg's +--glob+ overrides
193
+ # +.gitignore+).
194
+ #
195
+ # @return [Array<String>]
196
+ def self.build_argv(path:)
197
+ [
198
+ 'rg',
199
+ '--files',
200
+ '--color=never',
201
+ '--hidden',
202
+ '--glob', '!.git/*',
203
+ '--', path
204
+ ]
205
+ end
206
+ private_class_method :build_argv
207
+
208
+ # Strip the +./+ prefix rg adds when invoked with +.+ as the
209
+ # search path, filter to the user pattern with +fnmatch+,
210
+ # mtime-sort descending (path ascending as tiebreaker),
211
+ # head-truncate at {MAX_BYTES}, append a footer summarizing the
212
+ # count.
213
+ #
214
+ # @return [String]
215
+ def self.format_output(raw, workspace:, pattern:, path:)
216
+ all_paths = raw.split("\n").reject(&:empty?).map { |p| p.sub(%r{\A\./}, '') }
217
+ paths = all_paths.select { |p| File.fnmatch?(pattern, p, FNMATCH_FLAGS) }
218
+ return no_match_message(pattern: pattern, path: path) if paths.empty?
219
+
220
+ sorted = mtime_sort(paths, workspace.cwd)
221
+ joined = sorted.join("\n") + "\n"
222
+ content, truncation_marker = head_truncate(joined)
223
+ stripped = content.chomp
224
+ count = stripped.split("\n").size
225
+
226
+ footer = "Found #{pluralize(count, 'file', 'files')}."
227
+ [stripped, '', footer + truncation_marker].join("\n")
228
+ end
229
+ private_class_method :format_output
230
+
231
+ # mtime descending; path ascending for stable order on ties.
232
+ #
233
+ # @return [Array<String>]
234
+ def self.mtime_sort(paths, cwd)
235
+ paths
236
+ .map { |p| [p, mtime_of(cwd + p)] }
237
+ .sort_by { |(p, m)| [-m, p] }
238
+ .map(&:first)
239
+ end
240
+ private_class_method :mtime_sort
241
+
242
+ # @return [Float] epoch-seconds mtime; 0 for paths we can't stat
243
+ # (race between rg listing and our stat, deleted symlinks,
244
+ # etc.). The fallback puts unstattable entries at the bottom.
245
+ def self.mtime_of(absolute)
246
+ File.mtime(absolute).to_f
247
+ rescue Errno::ENOENT
248
+ 0.0
249
+ end
250
+ private_class_method :mtime_of
251
+
252
+ # Head-truncate +raw+ to {MAX_BYTES}, cutting at the last newline
253
+ # boundary so the final row is never partial. Returns the
254
+ # truncated content and a marker String (empty if no truncation).
255
+ #
256
+ # @return [Array(String, String)]
257
+ def self.head_truncate(raw)
258
+ total = raw.bytesize
259
+ return [raw, ''] if total <= MAX_BYTES
260
+
261
+ head = raw.byteslice(0, MAX_BYTES)
262
+ last_nl = head.rindex("\n")
263
+ head = head.byteslice(0, last_nl) if last_nl
264
+ omitted = total - head.bytesize
265
+ marker = "\n\n... [#{omitted} bytes omitted; total was #{total} bytes; " \
266
+ 'refine pattern or path] ...'
267
+ [head, marker]
268
+ end
269
+ private_class_method :head_truncate
270
+
271
+ # @return [String]
272
+ def self.no_match_message(pattern:, path:)
273
+ base = "No files match pattern '#{pattern}'"
274
+ base += " in #{path}" if path
275
+ "#{base}."
276
+ end
277
+ private_class_method :no_match_message
278
+
279
+ # @return [String] +"1 file"+ / +"2 files"+
280
+ def self.pluralize(n, sing, plural)
281
+ "#{n} #{n == 1 ? sing : plural}"
282
+ end
283
+ private_class_method :pluralize
284
+
285
+ # Verify +rg+ is reachable on +PATH+. Routed through
286
+ # {Pikuri::Subprocess.spawn} to honor the subprocess seam. rg
287
+ # missing surfaces as +Errno::ENOENT+; an installed rg returns
288
+ # exit 0 from +--version+.
289
+ #
290
+ # @return [void]
291
+ # @raise [RuntimeError] if rg is missing
292
+ def self.check_binaries!
293
+ result = Pikuri::Subprocess.spawn('rg', '--version', chdir: '/').wait
294
+ return if result.status.success?
295
+
296
+ raise install_hint
297
+ rescue Errno::ENOENT
298
+ raise install_hint
299
+ end
300
+ private_class_method :check_binaries!
301
+
302
+ # @return [String]
303
+ def self.install_hint
304
+ "Tool::Glob requires 'rg' (ripgrep) on PATH; install via your " \
305
+ "distro's package manager (e.g. 'apt install ripgrep')."
306
+ end
307
+ private_class_method :install_hint
308
+ end
309
+ end
310
+ end