jit 0.0.0 → 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +5 -5
- data/LICENSE.txt +674 -0
- data/bin/jit +21 -0
- data/lib/color.rb +32 -0
- data/lib/command.rb +62 -0
- data/lib/command/add.rb +65 -0
- data/lib/command/base.rb +92 -0
- data/lib/command/branch.rb +199 -0
- data/lib/command/checkout.rb +104 -0
- data/lib/command/cherry_pick.rb +51 -0
- data/lib/command/commit.rb +86 -0
- data/lib/command/config.rb +126 -0
- data/lib/command/diff.rb +114 -0
- data/lib/command/fetch.rb +116 -0
- data/lib/command/init.rb +41 -0
- data/lib/command/log.rb +188 -0
- data/lib/command/merge.rb +148 -0
- data/lib/command/push.rb +172 -0
- data/lib/command/receive_pack.rb +92 -0
- data/lib/command/remote.rb +55 -0
- data/lib/command/reset.rb +64 -0
- data/lib/command/rev_list.rb +33 -0
- data/lib/command/revert.rb +69 -0
- data/lib/command/rm.rb +105 -0
- data/lib/command/shared/fast_forward.rb +19 -0
- data/lib/command/shared/print_diff.rb +116 -0
- data/lib/command/shared/receive_objects.rb +37 -0
- data/lib/command/shared/remote_agent.rb +44 -0
- data/lib/command/shared/remote_client.rb +82 -0
- data/lib/command/shared/send_objects.rb +24 -0
- data/lib/command/shared/sequencing.rb +146 -0
- data/lib/command/shared/write_commit.rb +167 -0
- data/lib/command/status.rb +210 -0
- data/lib/command/upload_pack.rb +54 -0
- data/lib/config.rb +240 -0
- data/lib/config/stack.rb +42 -0
- data/lib/database.rb +112 -0
- data/lib/database/author.rb +27 -0
- data/lib/database/backends.rb +57 -0
- data/lib/database/blob.rb +24 -0
- data/lib/database/commit.rb +70 -0
- data/lib/database/entry.rb +7 -0
- data/lib/database/loose.rb +70 -0
- data/lib/database/packed.rb +75 -0
- data/lib/database/tree.rb +77 -0
- data/lib/database/tree_diff.rb +88 -0
- data/lib/diff.rb +46 -0
- data/lib/diff/combined.rb +72 -0
- data/lib/diff/hunk.rb +64 -0
- data/lib/diff/myers.rb +90 -0
- data/lib/editor.rb +59 -0
- data/lib/index.rb +212 -0
- data/lib/index/checksum.rb +44 -0
- data/lib/index/entry.rb +91 -0
- data/lib/lockfile.rb +55 -0
- data/lib/merge/bases.rb +38 -0
- data/lib/merge/common_ancestors.rb +77 -0
- data/lib/merge/diff3.rb +156 -0
- data/lib/merge/inputs.rb +42 -0
- data/lib/merge/resolve.rb +178 -0
- data/lib/pack.rb +45 -0
- data/lib/pack/compressor.rb +83 -0
- data/lib/pack/delta.rb +58 -0
- data/lib/pack/entry.rb +54 -0
- data/lib/pack/expander.rb +54 -0
- data/lib/pack/index.rb +100 -0
- data/lib/pack/indexer.rb +200 -0
- data/lib/pack/numbers.rb +79 -0
- data/lib/pack/reader.rb +98 -0
- data/lib/pack/stream.rb +80 -0
- data/lib/pack/unpacker.rb +62 -0
- data/lib/pack/window.rb +47 -0
- data/lib/pack/writer.rb +92 -0
- data/lib/pack/xdelta.rb +118 -0
- data/lib/pager.rb +24 -0
- data/lib/progress.rb +78 -0
- data/lib/refs.rb +260 -0
- data/lib/remotes.rb +82 -0
- data/lib/remotes/protocol.rb +82 -0
- data/lib/remotes/refspec.rb +70 -0
- data/lib/remotes/remote.rb +57 -0
- data/lib/repository.rb +64 -0
- data/lib/repository/divergence.rb +21 -0
- data/lib/repository/hard_reset.rb +35 -0
- data/lib/repository/inspector.rb +49 -0
- data/lib/repository/migration.rb +168 -0
- data/lib/repository/pending_commit.rb +60 -0
- data/lib/repository/sequencer.rb +118 -0
- data/lib/repository/status.rb +98 -0
- data/lib/rev_list.rb +244 -0
- data/lib/revision.rb +155 -0
- data/lib/sorted_hash.rb +17 -0
- data/lib/temp_file.rb +34 -0
- data/lib/workspace.rb +107 -0
- metadata +103 -9
data/lib/index.rb
ADDED
@@ -0,0 +1,212 @@
|
|
1
|
+
require "set"
|
2
|
+
|
3
|
+
require_relative "./index/checksum"
|
4
|
+
require_relative "./index/entry"
|
5
|
+
require_relative "./lockfile"
|
6
|
+
|
7
|
+
class Index
|
8
|
+
Invalid = Class.new(StandardError)
|
9
|
+
|
10
|
+
HEADER_SIZE = 12
|
11
|
+
HEADER_FORMAT = "a4N2"
|
12
|
+
SIGNATURE = "DIRC"
|
13
|
+
VERSION = 2
|
14
|
+
|
15
|
+
def initialize(pathname)
|
16
|
+
@pathname = pathname
|
17
|
+
@lockfile = Lockfile.new(pathname)
|
18
|
+
clear
|
19
|
+
end
|
20
|
+
|
21
|
+
def clear!
|
22
|
+
clear
|
23
|
+
@changed = true
|
24
|
+
end
|
25
|
+
|
26
|
+
def load_for_update
|
27
|
+
@lockfile.hold_for_update
|
28
|
+
load
|
29
|
+
end
|
30
|
+
|
31
|
+
def load
|
32
|
+
clear
|
33
|
+
file = open_index_file
|
34
|
+
|
35
|
+
if file
|
36
|
+
reader = Checksum.new(file)
|
37
|
+
count = read_header(reader)
|
38
|
+
read_entries(reader, count)
|
39
|
+
reader.verify_checksum
|
40
|
+
end
|
41
|
+
ensure
|
42
|
+
file&.close
|
43
|
+
end
|
44
|
+
|
45
|
+
def write_updates
|
46
|
+
return @lockfile.rollback unless @changed
|
47
|
+
|
48
|
+
writer = Checksum.new(@lockfile)
|
49
|
+
|
50
|
+
header = [SIGNATURE, VERSION, @entries.size].pack(HEADER_FORMAT)
|
51
|
+
writer.write(header)
|
52
|
+
each_entry { |entry| writer.write(entry.to_s) }
|
53
|
+
|
54
|
+
writer.write_checksum
|
55
|
+
@lockfile.commit
|
56
|
+
|
57
|
+
@changed = false
|
58
|
+
end
|
59
|
+
|
60
|
+
def release_lock
|
61
|
+
@lockfile.rollback
|
62
|
+
end
|
63
|
+
|
64
|
+
def add(pathname, oid, stat)
|
65
|
+
(1..3).each { |stage| remove_entry_with_stage(pathname, stage) }
|
66
|
+
|
67
|
+
entry = Entry.create(pathname, oid, stat)
|
68
|
+
discard_conflicts(entry)
|
69
|
+
store_entry(entry)
|
70
|
+
@changed = true
|
71
|
+
end
|
72
|
+
|
73
|
+
def add_from_db(pathname, item)
|
74
|
+
store_entry(Entry.create_from_db(pathname, item, 0))
|
75
|
+
@changed = true
|
76
|
+
end
|
77
|
+
|
78
|
+
def add_conflict_set(pathname, items)
|
79
|
+
remove_entry_with_stage(pathname, 0)
|
80
|
+
|
81
|
+
items.each_with_index do |item, n|
|
82
|
+
next unless item
|
83
|
+
entry = Entry.create_from_db(pathname, item, n + 1)
|
84
|
+
store_entry(entry)
|
85
|
+
end
|
86
|
+
@changed = true
|
87
|
+
end
|
88
|
+
|
89
|
+
def update_entry_stat(entry, stat)
|
90
|
+
entry.update_stat(stat)
|
91
|
+
@changed = true
|
92
|
+
end
|
93
|
+
|
94
|
+
def remove(pathname)
|
95
|
+
@parents[pathname.to_s].each { |child| remove_entry(child) }
|
96
|
+
remove_entry(pathname)
|
97
|
+
@changed = true
|
98
|
+
end
|
99
|
+
|
100
|
+
def each_entry
|
101
|
+
if block_given?
|
102
|
+
@keys.each { |key| yield @entries[key] }
|
103
|
+
else
|
104
|
+
enum_for(:each_entry)
|
105
|
+
end
|
106
|
+
end
|
107
|
+
|
108
|
+
def conflict?
|
109
|
+
@entries.any? { |key, entry| entry.stage > 0 }
|
110
|
+
end
|
111
|
+
|
112
|
+
def conflict_paths
|
113
|
+
paths = Set.new
|
114
|
+
each_entry { |entry| paths.add(entry.path) unless entry.stage == 0 }
|
115
|
+
paths
|
116
|
+
end
|
117
|
+
|
118
|
+
def entry_for_path(path, stage = 0)
|
119
|
+
@entries[[path.to_s, stage]]
|
120
|
+
end
|
121
|
+
|
122
|
+
def child_paths(path)
|
123
|
+
@parents[path.to_s].to_a
|
124
|
+
end
|
125
|
+
|
126
|
+
def tracked_file?(path)
|
127
|
+
(0..3).any? { |stage| @entries.has_key?([path.to_s, stage]) }
|
128
|
+
end
|
129
|
+
|
130
|
+
def tracked_directory?(path)
|
131
|
+
@parents.has_key?(path.to_s)
|
132
|
+
end
|
133
|
+
|
134
|
+
def tracked?(path)
|
135
|
+
tracked_file?(path) or tracked_directory?(path)
|
136
|
+
end
|
137
|
+
|
138
|
+
private
|
139
|
+
|
140
|
+
def clear
|
141
|
+
@entries = {}
|
142
|
+
@keys = SortedSet.new
|
143
|
+
@parents = Hash.new { |hash, key| hash[key] = Set.new }
|
144
|
+
@changed = false
|
145
|
+
end
|
146
|
+
|
147
|
+
def discard_conflicts(entry)
|
148
|
+
entry.parent_directories.each { |parent| remove_entry(parent) }
|
149
|
+
|
150
|
+
set = @parents.fetch(entry.path, [])
|
151
|
+
set.each { |child| remove_entry(child) }
|
152
|
+
end
|
153
|
+
|
154
|
+
def remove_entry(pathname)
|
155
|
+
(0..3).each { |stage| remove_entry_with_stage(pathname, stage) }
|
156
|
+
end
|
157
|
+
|
158
|
+
def remove_entry_with_stage(pathname, stage)
|
159
|
+
entry = @entries[[pathname.to_s, stage]]
|
160
|
+
return unless entry
|
161
|
+
|
162
|
+
@keys.delete(entry.key)
|
163
|
+
@entries.delete(entry.key)
|
164
|
+
|
165
|
+
entry.parent_directories.each do |dirname|
|
166
|
+
dir = dirname.to_s
|
167
|
+
@parents[dir].delete(entry.path)
|
168
|
+
@parents.delete(dir) if @parents[dir].empty?
|
169
|
+
end
|
170
|
+
end
|
171
|
+
|
172
|
+
def store_entry(entry)
|
173
|
+
@keys.add(entry.key)
|
174
|
+
@entries[entry.key] = entry
|
175
|
+
|
176
|
+
entry.parent_directories.each do |dirname|
|
177
|
+
@parents[dirname.to_s].add(entry.path)
|
178
|
+
end
|
179
|
+
end
|
180
|
+
|
181
|
+
def open_index_file
|
182
|
+
File.open(@pathname, File::RDONLY)
|
183
|
+
rescue Errno::ENOENT
|
184
|
+
nil
|
185
|
+
end
|
186
|
+
|
187
|
+
def read_header(reader)
|
188
|
+
data = reader.read(HEADER_SIZE)
|
189
|
+
signature, version, count = data.unpack(HEADER_FORMAT)
|
190
|
+
|
191
|
+
unless signature == SIGNATURE
|
192
|
+
raise Invalid, "Signature: expected '#{ SIGNATURE }' but found '#{ signature }'"
|
193
|
+
end
|
194
|
+
unless version == VERSION
|
195
|
+
raise Invalid, "Version: expected '#{ VERSION }' but found '#{ version }'"
|
196
|
+
end
|
197
|
+
|
198
|
+
count
|
199
|
+
end
|
200
|
+
|
201
|
+
def read_entries(reader, count)
|
202
|
+
count.times do
|
203
|
+
entry = reader.read(ENTRY_MIN_SIZE)
|
204
|
+
|
205
|
+
until entry.byteslice(-1) == "\0"
|
206
|
+
entry.concat(reader.read(ENTRY_BLOCK))
|
207
|
+
end
|
208
|
+
|
209
|
+
store_entry(Entry.parse(entry))
|
210
|
+
end
|
211
|
+
end
|
212
|
+
end
|
@@ -0,0 +1,44 @@
|
|
1
|
+
require "digest/sha1"
|
2
|
+
|
3
|
+
class Index
|
4
|
+
class Checksum
|
5
|
+
|
6
|
+
EndOfFile = Class.new(StandardError)
|
7
|
+
|
8
|
+
CHECKSUM_SIZE = 20
|
9
|
+
|
10
|
+
def initialize(file)
|
11
|
+
@file = file
|
12
|
+
@digest = Digest::SHA1.new
|
13
|
+
end
|
14
|
+
|
15
|
+
def write(data)
|
16
|
+
@file.write(data)
|
17
|
+
@digest.update(data)
|
18
|
+
end
|
19
|
+
|
20
|
+
def write_checksum
|
21
|
+
@file.write(@digest.digest)
|
22
|
+
end
|
23
|
+
|
24
|
+
def read(size)
|
25
|
+
data = @file.read(size)
|
26
|
+
|
27
|
+
unless data.bytesize == size
|
28
|
+
raise EndOfFile, "Unexpected end-of-file while reading index"
|
29
|
+
end
|
30
|
+
|
31
|
+
@digest.update(data)
|
32
|
+
data
|
33
|
+
end
|
34
|
+
|
35
|
+
def verify_checksum
|
36
|
+
sum = @file.read(CHECKSUM_SIZE)
|
37
|
+
|
38
|
+
unless sum == @digest.digest
|
39
|
+
raise Invalid, "Checksum does not match value stored on disk"
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
end
|
44
|
+
end
|
data/lib/index/entry.rb
ADDED
@@ -0,0 +1,91 @@
|
|
1
|
+
require "pathname"
|
2
|
+
|
3
|
+
class Index
|
4
|
+
ENTRY_FORMAT = "N10H40nZ*"
|
5
|
+
ENTRY_BLOCK = 8
|
6
|
+
ENTRY_MIN_SIZE = 64
|
7
|
+
|
8
|
+
REGULAR_MODE = 0100644
|
9
|
+
EXECUTABLE_MODE = 0100755
|
10
|
+
MAX_PATH_SIZE = 0xfff
|
11
|
+
|
12
|
+
entry_fields = [
|
13
|
+
:ctime, :ctime_nsec,
|
14
|
+
:mtime, :mtime_nsec,
|
15
|
+
:dev, :ino, :mode, :uid, :gid, :size,
|
16
|
+
:oid, :flags, :path
|
17
|
+
]
|
18
|
+
|
19
|
+
Entry = Struct.new(*entry_fields) do
|
20
|
+
def self.create(pathname, oid, stat)
|
21
|
+
path = pathname.to_s
|
22
|
+
mode = mode_for_stat(stat)
|
23
|
+
flags = [path.bytesize, MAX_PATH_SIZE].min
|
24
|
+
|
25
|
+
Entry.new(
|
26
|
+
stat.ctime.to_i, stat.ctime.nsec,
|
27
|
+
stat.mtime.to_i, stat.mtime.nsec,
|
28
|
+
stat.dev, stat.ino, mode, stat.uid, stat.gid, stat.size,
|
29
|
+
oid, flags, path)
|
30
|
+
end
|
31
|
+
|
32
|
+
def self.create_from_db(pathname, item, n)
|
33
|
+
path = pathname.to_s
|
34
|
+
flags = (n << 12) | [path.bytesize, MAX_PATH_SIZE].min
|
35
|
+
|
36
|
+
Entry.new(0, 0, 0, 0, 0, 0, item.mode, 0, 0, 0, item.oid, flags, path)
|
37
|
+
end
|
38
|
+
|
39
|
+
def self.mode_for_stat(stat)
|
40
|
+
stat.executable? ? EXECUTABLE_MODE : REGULAR_MODE
|
41
|
+
end
|
42
|
+
|
43
|
+
def self.parse(data)
|
44
|
+
Entry.new(*data.unpack(ENTRY_FORMAT))
|
45
|
+
end
|
46
|
+
|
47
|
+
def key
|
48
|
+
[path, stage]
|
49
|
+
end
|
50
|
+
|
51
|
+
def stage
|
52
|
+
(flags >> 12) & 0x3
|
53
|
+
end
|
54
|
+
|
55
|
+
def parent_directories
|
56
|
+
Pathname.new(path).descend.to_a[0..-2]
|
57
|
+
end
|
58
|
+
|
59
|
+
def basename
|
60
|
+
Pathname.new(path).basename
|
61
|
+
end
|
62
|
+
|
63
|
+
def update_stat(stat)
|
64
|
+
self.ctime = stat.ctime.to_i
|
65
|
+
self.ctime_nsec = stat.ctime.nsec
|
66
|
+
self.mtime = stat.mtime.to_i
|
67
|
+
self.mtime_nsec = stat.mtime.nsec
|
68
|
+
self.dev = stat.dev
|
69
|
+
self.ino = stat.ino
|
70
|
+
self.mode = Entry.mode_for_stat(stat)
|
71
|
+
self.uid = stat.uid
|
72
|
+
self.gid = stat.gid
|
73
|
+
self.size = stat.size
|
74
|
+
end
|
75
|
+
|
76
|
+
def stat_match?(stat)
|
77
|
+
mode == Entry.mode_for_stat(stat) and (size == 0 or size == stat.size)
|
78
|
+
end
|
79
|
+
|
80
|
+
def times_match?(stat)
|
81
|
+
ctime == stat.ctime.to_i and ctime_nsec == stat.ctime.nsec and
|
82
|
+
mtime == stat.mtime.to_i and mtime_nsec == stat.mtime.nsec
|
83
|
+
end
|
84
|
+
|
85
|
+
def to_s
|
86
|
+
string = to_a.pack(ENTRY_FORMAT)
|
87
|
+
string.concat("\0") until string.bytesize % ENTRY_BLOCK == 0
|
88
|
+
string
|
89
|
+
end
|
90
|
+
end
|
91
|
+
end
|
data/lib/lockfile.rb
ADDED
@@ -0,0 +1,55 @@
|
|
1
|
+
class Lockfile
|
2
|
+
LockDenied = Class.new(StandardError)
|
3
|
+
MissingParent = Class.new(StandardError)
|
4
|
+
NoPermission = Class.new(StandardError)
|
5
|
+
StaleLock = Class.new(StandardError)
|
6
|
+
|
7
|
+
def initialize(path)
|
8
|
+
@file_path = path
|
9
|
+
@lock_path = path.sub_ext(".lock")
|
10
|
+
|
11
|
+
@lock = nil
|
12
|
+
end
|
13
|
+
|
14
|
+
def hold_for_update
|
15
|
+
unless @lock
|
16
|
+
flags = File::RDWR | File::CREAT | File::EXCL
|
17
|
+
@lock = File.open(@lock_path, flags)
|
18
|
+
end
|
19
|
+
rescue Errno::EEXIST
|
20
|
+
raise LockDenied, "Unable to create '#{ @lock_path }': File exists."
|
21
|
+
rescue Errno::ENOENT => error
|
22
|
+
raise MissingParent, error.message
|
23
|
+
rescue Errno::EACCES => error
|
24
|
+
raise NoPermission, error.message
|
25
|
+
end
|
26
|
+
|
27
|
+
def write(string)
|
28
|
+
raise_on_stale_lock
|
29
|
+
@lock.write(string)
|
30
|
+
end
|
31
|
+
|
32
|
+
def commit
|
33
|
+
raise_on_stale_lock
|
34
|
+
|
35
|
+
@lock.close
|
36
|
+
File.rename(@lock_path, @file_path)
|
37
|
+
@lock = nil
|
38
|
+
end
|
39
|
+
|
40
|
+
def rollback
|
41
|
+
raise_on_stale_lock
|
42
|
+
|
43
|
+
@lock.close
|
44
|
+
File.unlink(@lock_path)
|
45
|
+
@lock = nil
|
46
|
+
end
|
47
|
+
|
48
|
+
private
|
49
|
+
|
50
|
+
def raise_on_stale_lock
|
51
|
+
unless @lock
|
52
|
+
raise StaleLock, "Not holding lock on file: #{ @lock_path }"
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
data/lib/merge/bases.rb
ADDED
@@ -0,0 +1,38 @@
|
|
1
|
+
require "set"
|
2
|
+
require_relative "./common_ancestors"
|
3
|
+
|
4
|
+
module Merge
|
5
|
+
class Bases
|
6
|
+
|
7
|
+
def initialize(database, one, two)
|
8
|
+
@database = database
|
9
|
+
@common = CommonAncestors.new(@database, one, [two])
|
10
|
+
end
|
11
|
+
|
12
|
+
def find
|
13
|
+
@commits = @common.find
|
14
|
+
return @commits if @commits.size <= 1
|
15
|
+
|
16
|
+
@redundant = Set.new
|
17
|
+
@commits.each { |commit| filter_commit(commit) }
|
18
|
+
@commits - @redundant.to_a
|
19
|
+
end
|
20
|
+
|
21
|
+
private
|
22
|
+
|
23
|
+
def filter_commit(commit)
|
24
|
+
return if @redundant.include?(commit)
|
25
|
+
|
26
|
+
others = @commits - [commit, *@redundant]
|
27
|
+
common = CommonAncestors.new(@database, commit, others)
|
28
|
+
|
29
|
+
common.find
|
30
|
+
|
31
|
+
@redundant.add(commit) if common.marked?(commit, :parent2)
|
32
|
+
|
33
|
+
others.select! { |oid| common.marked?(oid, :parent1) }
|
34
|
+
@redundant.merge(others)
|
35
|
+
end
|
36
|
+
|
37
|
+
end
|
38
|
+
end
|
@@ -0,0 +1,77 @@
|
|
1
|
+
require "set"
|
2
|
+
|
3
|
+
module Merge
|
4
|
+
class CommonAncestors
|
5
|
+
|
6
|
+
BOTH_PARENTS = Set.new([:parent1, :parent2])
|
7
|
+
|
8
|
+
def initialize(database, one, twos)
|
9
|
+
@database = database
|
10
|
+
@flags = Hash.new { |hash, oid| hash[oid] = Set.new }
|
11
|
+
@queue = []
|
12
|
+
@results = []
|
13
|
+
|
14
|
+
insert_by_date(@queue, @database.load(one))
|
15
|
+
@flags[one].add(:parent1)
|
16
|
+
|
17
|
+
twos.each do |two|
|
18
|
+
insert_by_date(@queue, @database.load(two))
|
19
|
+
@flags[two].add(:parent2)
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
def find
|
24
|
+
process_queue until all_stale?
|
25
|
+
@results.map(&:oid).reject { |oid| marked?(oid, :stale) }
|
26
|
+
end
|
27
|
+
|
28
|
+
def marked?(oid, flag)
|
29
|
+
@flags[oid].include?(flag)
|
30
|
+
end
|
31
|
+
|
32
|
+
def counts
|
33
|
+
ones, twos = 0, 0
|
34
|
+
|
35
|
+
@flags.each do |oid, flags|
|
36
|
+
next unless flags.size == 1
|
37
|
+
ones += 1 if flags.include?(:parent1)
|
38
|
+
twos += 1 if flags.include?(:parent2)
|
39
|
+
end
|
40
|
+
|
41
|
+
[ones, twos]
|
42
|
+
end
|
43
|
+
|
44
|
+
private
|
45
|
+
|
46
|
+
def all_stale?
|
47
|
+
@queue.all? { |commit| marked?(commit.oid, :stale) }
|
48
|
+
end
|
49
|
+
|
50
|
+
def process_queue
|
51
|
+
commit = @queue.shift
|
52
|
+
flags = @flags[commit.oid]
|
53
|
+
|
54
|
+
if flags == BOTH_PARENTS
|
55
|
+
flags.add(:result)
|
56
|
+
insert_by_date(@results, commit)
|
57
|
+
add_parents(commit, flags + [:stale])
|
58
|
+
else
|
59
|
+
add_parents(commit, flags)
|
60
|
+
end
|
61
|
+
end
|
62
|
+
|
63
|
+
def add_parents(commit, flags)
|
64
|
+
commit.parents.each do |parent|
|
65
|
+
next if @flags[parent].superset?(flags)
|
66
|
+
|
67
|
+
@flags[parent].merge(flags)
|
68
|
+
insert_by_date(@queue, @database.load(parent))
|
69
|
+
end
|
70
|
+
end
|
71
|
+
|
72
|
+
def insert_by_date(list, commit)
|
73
|
+
index = list.find_index { |c| c.date < commit.date }
|
74
|
+
list.insert(index || list.size, commit)
|
75
|
+
end
|
76
|
+
end
|
77
|
+
end
|