scint 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/FEATURES.md +13 -0
- data/README.md +216 -0
- data/bin/bundler-vs-scint +233 -0
- data/bin/scint +35 -0
- data/bin/scint-io-summary +46 -0
- data/bin/scint-syscall-trace +41 -0
- data/lib/bundler/setup.rb +5 -0
- data/lib/bundler.rb +168 -0
- data/lib/scint/cache/layout.rb +131 -0
- data/lib/scint/cache/metadata_store.rb +75 -0
- data/lib/scint/cache/prewarm.rb +192 -0
- data/lib/scint/cli/add.rb +85 -0
- data/lib/scint/cli/cache.rb +316 -0
- data/lib/scint/cli/exec.rb +150 -0
- data/lib/scint/cli/install.rb +1047 -0
- data/lib/scint/cli/remove.rb +60 -0
- data/lib/scint/cli.rb +77 -0
- data/lib/scint/commands/exec.rb +17 -0
- data/lib/scint/commands/install.rb +17 -0
- data/lib/scint/credentials.rb +153 -0
- data/lib/scint/debug/io_trace.rb +218 -0
- data/lib/scint/debug/sampler.rb +138 -0
- data/lib/scint/downloader/fetcher.rb +113 -0
- data/lib/scint/downloader/pool.rb +112 -0
- data/lib/scint/errors.rb +63 -0
- data/lib/scint/fs.rb +119 -0
- data/lib/scint/gem/extractor.rb +86 -0
- data/lib/scint/gem/package.rb +62 -0
- data/lib/scint/gemfile/dependency.rb +30 -0
- data/lib/scint/gemfile/editor.rb +93 -0
- data/lib/scint/gemfile/parser.rb +275 -0
- data/lib/scint/index/cache.rb +166 -0
- data/lib/scint/index/client.rb +301 -0
- data/lib/scint/index/parser.rb +142 -0
- data/lib/scint/installer/extension_builder.rb +264 -0
- data/lib/scint/installer/linker.rb +226 -0
- data/lib/scint/installer/planner.rb +140 -0
- data/lib/scint/installer/preparer.rb +207 -0
- data/lib/scint/lockfile/parser.rb +251 -0
- data/lib/scint/lockfile/writer.rb +178 -0
- data/lib/scint/platform.rb +71 -0
- data/lib/scint/progress.rb +579 -0
- data/lib/scint/resolver/provider.rb +230 -0
- data/lib/scint/resolver/resolver.rb +249 -0
- data/lib/scint/runtime/exec.rb +141 -0
- data/lib/scint/runtime/setup.rb +45 -0
- data/lib/scint/scheduler.rb +392 -0
- data/lib/scint/source/base.rb +46 -0
- data/lib/scint/source/git.rb +92 -0
- data/lib/scint/source/path.rb +70 -0
- data/lib/scint/source/rubygems.rb +79 -0
- data/lib/scint/vendor/pub_grub/assignment.rb +20 -0
- data/lib/scint/vendor/pub_grub/basic_package_source.rb +169 -0
- data/lib/scint/vendor/pub_grub/failure_writer.rb +182 -0
- data/lib/scint/vendor/pub_grub/incompatibility.rb +150 -0
- data/lib/scint/vendor/pub_grub/package.rb +43 -0
- data/lib/scint/vendor/pub_grub/partial_solution.rb +121 -0
- data/lib/scint/vendor/pub_grub/rubygems.rb +45 -0
- data/lib/scint/vendor/pub_grub/solve_failure.rb +19 -0
- data/lib/scint/vendor/pub_grub/static_package_source.rb +61 -0
- data/lib/scint/vendor/pub_grub/strategy.rb +42 -0
- data/lib/scint/vendor/pub_grub/term.rb +105 -0
- data/lib/scint/vendor/pub_grub/version.rb +3 -0
- data/lib/scint/vendor/pub_grub/version_constraint.rb +129 -0
- data/lib/scint/vendor/pub_grub/version_range.rb +423 -0
- data/lib/scint/vendor/pub_grub/version_solver.rb +236 -0
- data/lib/scint/vendor/pub_grub/version_union.rb +178 -0
- data/lib/scint/vendor/pub_grub.rb +32 -0
- data/lib/scint/worker_pool.rb +114 -0
- data/lib/scint.rb +87 -0
- metadata +116 -0
|
@@ -0,0 +1,392 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative "worker_pool"
|
|
4
|
+
require_relative "progress"
|
|
5
|
+
require_relative "platform"
|
|
6
|
+
|
|
7
|
+
module Scint
|
|
8
|
+
class Scheduler
|
|
9
|
+
# Job types in priority order (lower index = higher priority)
|
|
10
|
+
PRIORITIES = {
|
|
11
|
+
fetch_index: 0,
|
|
12
|
+
git_clone: 1,
|
|
13
|
+
resolve: 2,
|
|
14
|
+
download: 3,
|
|
15
|
+
extract: 4,
|
|
16
|
+
link: 5,
|
|
17
|
+
build_ext: 6,
|
|
18
|
+
binstub: 7,
|
|
19
|
+
}.freeze
|
|
20
|
+
|
|
21
|
+
Job = Struct.new(:id, :type, :name, :payload, :state, :result, :error,
|
|
22
|
+
:depends_on, :enqueued_at, keyword_init: true)
|
|
23
|
+
|
|
24
|
+
attr_reader :errors, :progress
|
|
25
|
+
|
|
26
|
+
# max_workers: hard ceiling (default cpu_count * 2, capped at 50)
|
|
27
|
+
# initial_workers: how many threads to start with (default 1 — grow dynamically)
|
|
28
|
+
# per_type_limits: optional hash { job_type => max_concurrent }
|
|
29
|
+
# Example: { build_ext: 1, binstub: 1, link: 30 }
|
|
30
|
+
def initialize(max_workers: nil, initial_workers: 1, progress: nil, fail_fast: false, per_type_limits: {})
|
|
31
|
+
@max_workers = [max_workers || Platform.cpu_count * 2, 50].min
|
|
32
|
+
@initial_workers = [[initial_workers, 1].max, @max_workers].min
|
|
33
|
+
@current_workers = @initial_workers
|
|
34
|
+
@progress = progress || Progress.new
|
|
35
|
+
@fail_fast = fail_fast
|
|
36
|
+
@aborted = false
|
|
37
|
+
|
|
38
|
+
@mutex = Thread::Mutex.new
|
|
39
|
+
@cv = Thread::ConditionVariable.new
|
|
40
|
+
|
|
41
|
+
@jobs = {} # id => Job
|
|
42
|
+
@pending = [] # sorted by priority
|
|
43
|
+
@running = {} # id => Job
|
|
44
|
+
@completed = {} # id => Job
|
|
45
|
+
@failed = {} # id => Job
|
|
46
|
+
@running_by_type = Hash.new(0)
|
|
47
|
+
@per_type_limits = normalize_per_type_limits(per_type_limits)
|
|
48
|
+
|
|
49
|
+
@errors = [] # collected errors
|
|
50
|
+
@next_id = 0
|
|
51
|
+
@pool = nil
|
|
52
|
+
@started = false
|
|
53
|
+
@shutting_down = false
|
|
54
|
+
@in_flight_follow_ups = 0 # track follow-ups being executed
|
|
55
|
+
|
|
56
|
+
# Callbacks: type => [proc]
|
|
57
|
+
@on_complete_callbacks = Hash.new { |h, k| h[k] = [] }
|
|
58
|
+
|
|
59
|
+
# Waiters: type => [ConditionVariable]
|
|
60
|
+
@type_waiters = Hash.new { |h, k| h[k] = [] }
|
|
61
|
+
end
|
|
62
|
+
|
|
63
|
+
def start
|
|
64
|
+
return if @started
|
|
65
|
+
@started = true
|
|
66
|
+
@progress.start
|
|
67
|
+
|
|
68
|
+
@pool = WorkerPool.new(@max_workers, name: "scheduler")
|
|
69
|
+
@pool.start(@initial_workers) do |job|
|
|
70
|
+
execute_job(job)
|
|
71
|
+
end
|
|
72
|
+
|
|
73
|
+
# Dispatcher thread: pulls from priority queue and feeds the pool
|
|
74
|
+
@dispatcher = Thread.new do
|
|
75
|
+
Thread.current.name = "scheduler-dispatch"
|
|
76
|
+
begin
|
|
77
|
+
dispatch_loop
|
|
78
|
+
rescue Exception => e
|
|
79
|
+
$stderr.puts "\n!!! DISPATCHER THREAD CRASHED !!!"
|
|
80
|
+
$stderr.puts "Exception: #{e.class}: #{e.message}"
|
|
81
|
+
$stderr.puts e.backtrace.first(10).map { |l| " #{l}" }.join("\n")
|
|
82
|
+
raise
|
|
83
|
+
end
|
|
84
|
+
end
|
|
85
|
+
end
|
|
86
|
+
|
|
87
|
+
# Enqueue a job. Returns the job id.
|
|
88
|
+
# depends_on: array of job ids that must complete before this runs.
|
|
89
|
+
# follow_up: proc that receives (job) and can enqueue more jobs.
|
|
90
|
+
def enqueue(type, name, payload = nil, depends_on: [], follow_up: nil)
|
|
91
|
+
raise "scheduler not started" unless @started
|
|
92
|
+
raise "unknown job type: #{type}" unless PRIORITIES.key?(type)
|
|
93
|
+
|
|
94
|
+
job = nil
|
|
95
|
+
@mutex.synchronize do
|
|
96
|
+
return nil if @aborted
|
|
97
|
+
|
|
98
|
+
id = @next_id += 1
|
|
99
|
+
job = Job.new(
|
|
100
|
+
id: id,
|
|
101
|
+
type: type,
|
|
102
|
+
name: name,
|
|
103
|
+
payload: { data: payload, follow_up: follow_up },
|
|
104
|
+
state: :pending,
|
|
105
|
+
depends_on: depends_on.dup,
|
|
106
|
+
)
|
|
107
|
+
@jobs[id] = job
|
|
108
|
+
insert_pending(job)
|
|
109
|
+
@cv.broadcast
|
|
110
|
+
end
|
|
111
|
+
|
|
112
|
+
@progress.on_enqueue(job.id, job.type, job.name)
|
|
113
|
+
job.id
|
|
114
|
+
end
|
|
115
|
+
|
|
116
|
+
# Wait for all jobs of a specific type to complete.
|
|
117
|
+
# Returns once no pending/running jobs of that type remain.
|
|
118
|
+
def wait_for(type)
|
|
119
|
+
@mutex.synchronize do
|
|
120
|
+
loop do
|
|
121
|
+
pending_of_type = @pending.any? { |j| j.type == type }
|
|
122
|
+
running_of_type = @running.values.any? { |j| j.type == type }
|
|
123
|
+
break if @aborted
|
|
124
|
+
break unless pending_of_type || running_of_type
|
|
125
|
+
@cv.wait(@mutex)
|
|
126
|
+
end
|
|
127
|
+
end
|
|
128
|
+
end
|
|
129
|
+
|
|
130
|
+
# Wait for a specific job to complete. Returns the Job.
|
|
131
|
+
def wait_for_job(job_id)
|
|
132
|
+
@mutex.synchronize do
|
|
133
|
+
loop do
|
|
134
|
+
job = @jobs[job_id]
|
|
135
|
+
return job if job.nil? || job.state == :completed || job.state == :failed
|
|
136
|
+
@cv.wait(@mutex)
|
|
137
|
+
end
|
|
138
|
+
end
|
|
139
|
+
end
|
|
140
|
+
|
|
141
|
+
# Wait for ALL jobs to finish (including any in-flight follow-ups).
|
|
142
|
+
def wait_all
|
|
143
|
+
@mutex.synchronize do
|
|
144
|
+
loop do
|
|
145
|
+
break if @running.empty? && @in_flight_follow_ups == 0 && (@pending.empty? || @aborted)
|
|
146
|
+
@cv.wait(@mutex)
|
|
147
|
+
end
|
|
148
|
+
end
|
|
149
|
+
end
|
|
150
|
+
|
|
151
|
+
# Register a callback for when jobs of a given type complete.
|
|
152
|
+
def on_complete(type, &block)
|
|
153
|
+
@mutex.synchronize do
|
|
154
|
+
@on_complete_callbacks[type] << block
|
|
155
|
+
end
|
|
156
|
+
end
|
|
157
|
+
|
|
158
|
+
# Scale to exactly n workers (clamped to [current, max_workers]).
|
|
159
|
+
# Never shrinks — if n < current workers, this is a no-op.
|
|
160
|
+
def scale_to(n)
|
|
161
|
+
return unless @pool
|
|
162
|
+
target = [[n, @current_workers].max, @max_workers].min
|
|
163
|
+
return if target <= @current_workers
|
|
164
|
+
|
|
165
|
+
@pool.grow_to(target)
|
|
166
|
+
@current_workers = target
|
|
167
|
+
end
|
|
168
|
+
|
|
169
|
+
# Auto-scale based on pending queue depth.
|
|
170
|
+
# Formula: target = clamp(queue_depth / 4, 1, max_workers)
|
|
171
|
+
def adjust_workers(queue_depth = nil)
|
|
172
|
+
depth = queue_depth || @mutex.synchronize { @pending.size }
|
|
173
|
+
target = [[1, (depth / 4.0).ceil].max, @max_workers].min
|
|
174
|
+
scale_to(target)
|
|
175
|
+
end
|
|
176
|
+
|
|
177
|
+
# Scale worker count based on workload hint (e.g. gem count, download count).
|
|
178
|
+
# Convenience wrapper: after Gemfile parse pass gem_count,
|
|
179
|
+
# after resolution pass download_count.
|
|
180
|
+
def scale_workers(hint)
|
|
181
|
+
return unless @pool
|
|
182
|
+
target = [[1, (hint / 3.0).ceil].max, @max_workers].min
|
|
183
|
+
scale_to(target)
|
|
184
|
+
end
|
|
185
|
+
|
|
186
|
+
def current_workers
|
|
187
|
+
@current_workers
|
|
188
|
+
end
|
|
189
|
+
|
|
190
|
+
def max_workers
|
|
191
|
+
@max_workers
|
|
192
|
+
end
|
|
193
|
+
|
|
194
|
+
# Gracefully shut down: wait for all work, then stop threads.
|
|
195
|
+
def shutdown
|
|
196
|
+
return unless @started
|
|
197
|
+
wait_all
|
|
198
|
+
|
|
199
|
+
@mutex.synchronize { @shutting_down = true }
|
|
200
|
+
@cv.broadcast
|
|
201
|
+
@dispatcher&.join(5)
|
|
202
|
+
@pool&.stop
|
|
203
|
+
@progress.stop
|
|
204
|
+
@started = false
|
|
205
|
+
end
|
|
206
|
+
|
|
207
|
+
def stats
|
|
208
|
+
@mutex.synchronize do
|
|
209
|
+
{
|
|
210
|
+
pending: @pending.size,
|
|
211
|
+
running: @running.size,
|
|
212
|
+
completed: @completed.size,
|
|
213
|
+
failed: @failed.size,
|
|
214
|
+
total: @jobs.size,
|
|
215
|
+
workers: @current_workers,
|
|
216
|
+
max_workers: @max_workers,
|
|
217
|
+
}
|
|
218
|
+
end
|
|
219
|
+
end
|
|
220
|
+
|
|
221
|
+
def failed?
|
|
222
|
+
@mutex.synchronize { !@failed.empty? || !@errors.empty? }
|
|
223
|
+
end
|
|
224
|
+
|
|
225
|
+
def aborted?
|
|
226
|
+
@mutex.synchronize { @aborted }
|
|
227
|
+
end
|
|
228
|
+
|
|
229
|
+
private
|
|
230
|
+
|
|
231
|
+
def dispatch_loop
|
|
232
|
+
loop do
|
|
233
|
+
job = nil
|
|
234
|
+
|
|
235
|
+
@mutex.synchronize do
|
|
236
|
+
loop do
|
|
237
|
+
break if @shutting_down
|
|
238
|
+
break if @aborted && @running.empty?
|
|
239
|
+
if @aborted
|
|
240
|
+
@cv.wait(@mutex)
|
|
241
|
+
next
|
|
242
|
+
end
|
|
243
|
+
|
|
244
|
+
# Backpressure: keep at most @current_workers in-flight so a
|
|
245
|
+
# fail-fast error can still halt most pending work.
|
|
246
|
+
if @running.size >= @current_workers
|
|
247
|
+
@cv.wait(@mutex)
|
|
248
|
+
next
|
|
249
|
+
end
|
|
250
|
+
|
|
251
|
+
job = pick_ready_job
|
|
252
|
+
break if job
|
|
253
|
+
|
|
254
|
+
# Nothing ready yet — wait for state change
|
|
255
|
+
@cv.wait(@mutex)
|
|
256
|
+
end
|
|
257
|
+
|
|
258
|
+
if job
|
|
259
|
+
job.state = :running
|
|
260
|
+
@running[job.id] = job
|
|
261
|
+
@running_by_type[job.type] += 1
|
|
262
|
+
end
|
|
263
|
+
end
|
|
264
|
+
|
|
265
|
+
break if (@shutting_down || (@aborted && job.nil?)) && job.nil?
|
|
266
|
+
next unless job
|
|
267
|
+
|
|
268
|
+
@progress.on_start(job.id, job.type, job.name)
|
|
269
|
+
|
|
270
|
+
@pool.enqueue(job) do |pool_job|
|
|
271
|
+
finished_job = pool_job[:payload]
|
|
272
|
+
handle_completion(finished_job, pool_job[:error])
|
|
273
|
+
end
|
|
274
|
+
end
|
|
275
|
+
end
|
|
276
|
+
|
|
277
|
+
# Must be called inside @mutex
|
|
278
|
+
def pick_ready_job
|
|
279
|
+
@pending.each_with_index do |job, idx|
|
|
280
|
+
next unless type_slot_available?(job.type)
|
|
281
|
+
|
|
282
|
+
# Check if dependencies are met
|
|
283
|
+
deps_met = job.depends_on.all? do |dep_id|
|
|
284
|
+
dep = @jobs[dep_id]
|
|
285
|
+
dep && (dep.state == :completed || dep.state == :failed)
|
|
286
|
+
end
|
|
287
|
+
|
|
288
|
+
if deps_met
|
|
289
|
+
@pending.delete_at(idx)
|
|
290
|
+
return job
|
|
291
|
+
end
|
|
292
|
+
end
|
|
293
|
+
nil
|
|
294
|
+
end
|
|
295
|
+
|
|
296
|
+
# Insert into pending list maintaining priority order
|
|
297
|
+
def insert_pending(job)
|
|
298
|
+
priority = PRIORITIES[job.type] || 99
|
|
299
|
+
idx = @pending.bsearch_index { |j| (PRIORITIES[j.type] || 99) > priority }
|
|
300
|
+
if idx
|
|
301
|
+
@pending.insert(idx, job)
|
|
302
|
+
else
|
|
303
|
+
@pending.push(job)
|
|
304
|
+
end
|
|
305
|
+
end
|
|
306
|
+
|
|
307
|
+
def execute_job(job)
|
|
308
|
+
data = job.payload[:data]
|
|
309
|
+
if data.respond_to?(:call)
|
|
310
|
+
data.call
|
|
311
|
+
elsif data.is_a?(Hash) && data[:proc]
|
|
312
|
+
data[:proc].call
|
|
313
|
+
else
|
|
314
|
+
data
|
|
315
|
+
end
|
|
316
|
+
end
|
|
317
|
+
|
|
318
|
+
def handle_completion(job, error)
|
|
319
|
+
follow_up = job.payload[:follow_up]
|
|
320
|
+
callbacks = nil
|
|
321
|
+
run_follow_up = false
|
|
322
|
+
|
|
323
|
+
@mutex.synchronize do
|
|
324
|
+
@running.delete(job.id)
|
|
325
|
+
@running_by_type[job.type] -= 1 if @running_by_type[job.type] > 0
|
|
326
|
+
|
|
327
|
+
if error
|
|
328
|
+
job.state = :failed
|
|
329
|
+
job.error = error
|
|
330
|
+
@failed[job.id] = job
|
|
331
|
+
@errors << { job_id: job.id, type: job.type, name: job.name, error: error }
|
|
332
|
+
@aborted = true if @fail_fast
|
|
333
|
+
else
|
|
334
|
+
job.state = :completed
|
|
335
|
+
@completed[job.id] = job
|
|
336
|
+
|
|
337
|
+
callbacks = @on_complete_callbacks[job.type].dup
|
|
338
|
+
|
|
339
|
+
if follow_up && !@aborted
|
|
340
|
+
run_follow_up = true
|
|
341
|
+
@in_flight_follow_ups += 1
|
|
342
|
+
end
|
|
343
|
+
end
|
|
344
|
+
|
|
345
|
+
@cv.broadcast
|
|
346
|
+
end
|
|
347
|
+
|
|
348
|
+
# Run progress callbacks outside mutex (they don't mutate scheduler state)
|
|
349
|
+
if error
|
|
350
|
+
@progress.on_fail(job.id, job.type, job.name, error)
|
|
351
|
+
else
|
|
352
|
+
@progress.on_complete(job.id, job.type, job.name)
|
|
353
|
+
callbacks&.each { |cb| cb.call(job) }
|
|
354
|
+
end
|
|
355
|
+
|
|
356
|
+
# Run follow-up outside mutex so it can call enqueue without deadlock
|
|
357
|
+
if run_follow_up
|
|
358
|
+
begin
|
|
359
|
+
follow_up.call(job)
|
|
360
|
+
rescue => e
|
|
361
|
+
@mutex.synchronize do
|
|
362
|
+
@errors << { job_id: job.id, type: job.type, name: job.name, error: e, phase: :follow_up }
|
|
363
|
+
end
|
|
364
|
+
ensure
|
|
365
|
+
@mutex.synchronize do
|
|
366
|
+
@in_flight_follow_ups -= 1
|
|
367
|
+
@cv.broadcast
|
|
368
|
+
end
|
|
369
|
+
end
|
|
370
|
+
end
|
|
371
|
+
end
|
|
372
|
+
|
|
373
|
+
def normalize_per_type_limits(limits)
|
|
374
|
+
out = {}
|
|
375
|
+
limits.each do |type, limit|
|
|
376
|
+
next unless PRIORITIES.key?(type)
|
|
377
|
+
next if limit.nil?
|
|
378
|
+
|
|
379
|
+
n = limit.to_i
|
|
380
|
+
out[type] = n if n > 0
|
|
381
|
+
end
|
|
382
|
+
out
|
|
383
|
+
end
|
|
384
|
+
|
|
385
|
+
def type_slot_available?(type)
|
|
386
|
+
limit = @per_type_limits[type]
|
|
387
|
+
return true unless limit
|
|
388
|
+
|
|
389
|
+
@running_by_type[type] < limit
|
|
390
|
+
end
|
|
391
|
+
end
|
|
392
|
+
end
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
module Scint
|
|
4
|
+
module Source
|
|
5
|
+
# Abstract base class for all gem sources.
|
|
6
|
+
# Subclasses must implement: #name, #uri, #specs, #fetch_spec,
|
|
7
|
+
# #cache_slug, #to_lock, #eql?, #hash.
|
|
8
|
+
class Base
|
|
9
|
+
def name
|
|
10
|
+
raise NotImplementedError
|
|
11
|
+
end
|
|
12
|
+
|
|
13
|
+
def uri
|
|
14
|
+
raise NotImplementedError
|
|
15
|
+
end
|
|
16
|
+
|
|
17
|
+
# Return an array of available specs from this source.
|
|
18
|
+
def specs
|
|
19
|
+
raise NotImplementedError
|
|
20
|
+
end
|
|
21
|
+
|
|
22
|
+
# Fetch a specific spec by name, version, and platform.
|
|
23
|
+
def fetch_spec(name, version, platform = "ruby")
|
|
24
|
+
raise NotImplementedError
|
|
25
|
+
end
|
|
26
|
+
|
|
27
|
+
# A unique slug used for cache directory naming.
|
|
28
|
+
def cache_slug
|
|
29
|
+
raise NotImplementedError
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
# Lockfile representation (the header section, e.g. "GEM\n remote: ...\n specs:\n")
|
|
33
|
+
def to_lock
|
|
34
|
+
raise NotImplementedError
|
|
35
|
+
end
|
|
36
|
+
|
|
37
|
+
def to_s
|
|
38
|
+
"#{self.class.name.split("::").last}: #{uri}"
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
def ==(other)
|
|
42
|
+
eql?(other)
|
|
43
|
+
end
|
|
44
|
+
end
|
|
45
|
+
end
|
|
46
|
+
end
|
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative "base"
|
|
4
|
+
require "digest/sha2"
|
|
5
|
+
|
|
6
|
+
module Scint
|
|
7
|
+
module Source
|
|
8
|
+
class Git < Base
|
|
9
|
+
attr_reader :uri, :revision, :ref, :branch, :tag, :submodules, :glob
|
|
10
|
+
|
|
11
|
+
DEFAULT_GLOB = "{,*,*/*}.gemspec"
|
|
12
|
+
|
|
13
|
+
def initialize(uri:, revision: nil, ref: nil, branch: nil, tag: nil,
|
|
14
|
+
submodules: nil, glob: nil, name: nil)
|
|
15
|
+
@uri = uri.to_s
|
|
16
|
+
@revision = revision
|
|
17
|
+
@ref = ref || branch || tag
|
|
18
|
+
@branch = branch
|
|
19
|
+
@tag = tag
|
|
20
|
+
@submodules = submodules
|
|
21
|
+
@glob = glob || DEFAULT_GLOB
|
|
22
|
+
@name = name
|
|
23
|
+
end
|
|
24
|
+
|
|
25
|
+
def self.from_lock(options)
|
|
26
|
+
new(
|
|
27
|
+
uri: options.delete("remote"),
|
|
28
|
+
revision: options["revision"],
|
|
29
|
+
ref: options["ref"],
|
|
30
|
+
branch: options["branch"],
|
|
31
|
+
tag: options["tag"],
|
|
32
|
+
submodules: options["submodules"],
|
|
33
|
+
glob: options["glob"],
|
|
34
|
+
name: options["name"],
|
|
35
|
+
)
|
|
36
|
+
end
|
|
37
|
+
|
|
38
|
+
def name
|
|
39
|
+
@name || File.basename(uri, ".git")
|
|
40
|
+
end
|
|
41
|
+
|
|
42
|
+
def specs
|
|
43
|
+
[] # Loaded from checked-out gemspec at resolution time
|
|
44
|
+
end
|
|
45
|
+
|
|
46
|
+
def fetch_spec(name, version, platform = "ruby")
|
|
47
|
+
nil
|
|
48
|
+
end
|
|
49
|
+
|
|
50
|
+
def cache_slug
|
|
51
|
+
"#{name}-#{uri_hash}"
|
|
52
|
+
end
|
|
53
|
+
|
|
54
|
+
def to_lock
|
|
55
|
+
out = String.new("GIT\n")
|
|
56
|
+
out << " remote: #{@uri}\n"
|
|
57
|
+
out << " revision: #{@revision}\n" if @revision
|
|
58
|
+
out << " ref: #{@ref}\n" if @ref && @ref != @branch && @ref != @tag
|
|
59
|
+
out << " branch: #{@branch}\n" if @branch
|
|
60
|
+
out << " tag: #{@tag}\n" if @tag
|
|
61
|
+
out << " submodules: #{@submodules}\n" if @submodules
|
|
62
|
+
out << " glob: #{@glob}\n" unless @glob == DEFAULT_GLOB
|
|
63
|
+
out << " specs:\n"
|
|
64
|
+
out
|
|
65
|
+
end
|
|
66
|
+
|
|
67
|
+
def eql?(other)
|
|
68
|
+
other.is_a?(Git) &&
|
|
69
|
+
uri == other.uri &&
|
|
70
|
+
ref == other.ref &&
|
|
71
|
+
branch == other.branch &&
|
|
72
|
+
tag == other.tag &&
|
|
73
|
+
submodules == other.submodules
|
|
74
|
+
end
|
|
75
|
+
|
|
76
|
+
def hash
|
|
77
|
+
[self.class, uri, ref, branch, tag, submodules].hash
|
|
78
|
+
end
|
|
79
|
+
|
|
80
|
+
def to_s
|
|
81
|
+
at = tag || branch || ref
|
|
82
|
+
"git: #{@uri}#{" (#{at})" if at}"
|
|
83
|
+
end
|
|
84
|
+
|
|
85
|
+
private
|
|
86
|
+
|
|
87
|
+
def uri_hash
|
|
88
|
+
Digest::SHA256.hexdigest(@uri)[0, 12]
|
|
89
|
+
end
|
|
90
|
+
end
|
|
91
|
+
end
|
|
92
|
+
end
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative "base"
|
|
4
|
+
|
|
5
|
+
module Scint
|
|
6
|
+
module Source
|
|
7
|
+
class Path < Base
|
|
8
|
+
attr_reader :path, :glob
|
|
9
|
+
|
|
10
|
+
DEFAULT_GLOB = "{,*,*/*}.gemspec"
|
|
11
|
+
|
|
12
|
+
def initialize(path:, glob: nil, name: nil, version: nil)
|
|
13
|
+
@path = path.to_s
|
|
14
|
+
@glob = glob || DEFAULT_GLOB
|
|
15
|
+
@name = name
|
|
16
|
+
@version = version
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
def self.from_lock(options)
|
|
20
|
+
new(
|
|
21
|
+
path: options.delete("remote"),
|
|
22
|
+
glob: options["glob"],
|
|
23
|
+
name: options["name"],
|
|
24
|
+
version: options["version"],
|
|
25
|
+
)
|
|
26
|
+
end
|
|
27
|
+
|
|
28
|
+
def name
|
|
29
|
+
@name || File.basename(@path)
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
def uri
|
|
33
|
+
@path
|
|
34
|
+
end
|
|
35
|
+
|
|
36
|
+
def specs
|
|
37
|
+
[] # Loaded from gemspec on disk
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
def fetch_spec(name, version, platform = "ruby")
|
|
41
|
+
nil
|
|
42
|
+
end
|
|
43
|
+
|
|
44
|
+
def cache_slug
|
|
45
|
+
name
|
|
46
|
+
end
|
|
47
|
+
|
|
48
|
+
def to_lock
|
|
49
|
+
out = String.new("PATH\n")
|
|
50
|
+
out << " remote: #{@path}\n"
|
|
51
|
+
out << " glob: #{@glob}\n" unless @glob == DEFAULT_GLOB
|
|
52
|
+
out << " specs:\n"
|
|
53
|
+
out
|
|
54
|
+
end
|
|
55
|
+
|
|
56
|
+
def eql?(other)
|
|
57
|
+
other.is_a?(Path) &&
|
|
58
|
+
File.expand_path(path) == File.expand_path(other.path)
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
def hash
|
|
62
|
+
[self.class, File.expand_path(path)].hash
|
|
63
|
+
end
|
|
64
|
+
|
|
65
|
+
def to_s
|
|
66
|
+
"path: #{@path}"
|
|
67
|
+
end
|
|
68
|
+
end
|
|
69
|
+
end
|
|
70
|
+
end
|
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
# frozen_string_literal: true
|
|
2
|
+
|
|
3
|
+
require_relative "base"
|
|
4
|
+
require "uri"
|
|
5
|
+
|
|
6
|
+
module Scint
|
|
7
|
+
module Source
|
|
8
|
+
class Rubygems < Base
|
|
9
|
+
attr_reader :remotes
|
|
10
|
+
|
|
11
|
+
def initialize(remotes: [])
|
|
12
|
+
@remotes = Array(remotes).map { |r| normalize_uri(r) }
|
|
13
|
+
end
|
|
14
|
+
|
|
15
|
+
def self.from_lock(options)
|
|
16
|
+
remotes = Array(options["remote"]).reverse
|
|
17
|
+
new(remotes: remotes)
|
|
18
|
+
end
|
|
19
|
+
|
|
20
|
+
def name
|
|
21
|
+
"rubygems"
|
|
22
|
+
end
|
|
23
|
+
|
|
24
|
+
def uri
|
|
25
|
+
@remotes.first
|
|
26
|
+
end
|
|
27
|
+
|
|
28
|
+
def add_remote(remote)
|
|
29
|
+
remote = normalize_uri(remote)
|
|
30
|
+
@remotes << remote unless @remotes.include?(remote)
|
|
31
|
+
end
|
|
32
|
+
|
|
33
|
+
def specs
|
|
34
|
+
[] # Populated by compact index client at resolution time
|
|
35
|
+
end
|
|
36
|
+
|
|
37
|
+
def fetch_spec(name, version, platform = "ruby")
|
|
38
|
+
nil # Delegated to compact index / API at resolution time
|
|
39
|
+
end
|
|
40
|
+
|
|
41
|
+
def cache_slug
|
|
42
|
+
uri_obj = URI.parse(@remotes.first.to_s)
|
|
43
|
+
path = uri_obj.path.gsub("/", "-").sub(/^-/, "")
|
|
44
|
+
slug = uri_obj.host.to_s
|
|
45
|
+
slug += path unless path.empty? || path == "-"
|
|
46
|
+
slug
|
|
47
|
+
end
|
|
48
|
+
|
|
49
|
+
def to_lock
|
|
50
|
+
out = String.new("GEM\n")
|
|
51
|
+
@remotes.reverse_each do |remote|
|
|
52
|
+
out << " remote: #{remote}\n"
|
|
53
|
+
end
|
|
54
|
+
out << " specs:\n"
|
|
55
|
+
out
|
|
56
|
+
end
|
|
57
|
+
|
|
58
|
+
def eql?(other)
|
|
59
|
+
other.is_a?(Rubygems) && @remotes == other.remotes
|
|
60
|
+
end
|
|
61
|
+
|
|
62
|
+
def hash
|
|
63
|
+
[self.class, @remotes].hash
|
|
64
|
+
end
|
|
65
|
+
|
|
66
|
+
def to_s
|
|
67
|
+
"rubygems (#{@remotes.join(", ")})"
|
|
68
|
+
end
|
|
69
|
+
|
|
70
|
+
private
|
|
71
|
+
|
|
72
|
+
def normalize_uri(uri)
|
|
73
|
+
uri = uri.to_s
|
|
74
|
+
uri = "#{uri}/" unless uri.end_with?("/")
|
|
75
|
+
uri
|
|
76
|
+
end
|
|
77
|
+
end
|
|
78
|
+
end
|
|
79
|
+
end
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
module Scint::PubGrub
|
|
2
|
+
class Assignment
|
|
3
|
+
attr_reader :term, :cause, :decision_level, :index
|
|
4
|
+
def initialize(term, cause, decision_level, index)
|
|
5
|
+
@term = term
|
|
6
|
+
@cause = cause
|
|
7
|
+
@decision_level = decision_level
|
|
8
|
+
@index = index
|
|
9
|
+
end
|
|
10
|
+
|
|
11
|
+
def self.decision(package, version, decision_level, index)
|
|
12
|
+
term = Term.new(VersionConstraint.exact(package, version), true)
|
|
13
|
+
new(term, :decision, decision_level, index)
|
|
14
|
+
end
|
|
15
|
+
|
|
16
|
+
def decision?
|
|
17
|
+
cause == :decision
|
|
18
|
+
end
|
|
19
|
+
end
|
|
20
|
+
end
|