jit 0.0.0 → 1.0.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +5 -5
- data/LICENSE.txt +674 -0
- data/bin/jit +21 -0
- data/lib/color.rb +32 -0
- data/lib/command.rb +62 -0
- data/lib/command/add.rb +65 -0
- data/lib/command/base.rb +92 -0
- data/lib/command/branch.rb +199 -0
- data/lib/command/checkout.rb +104 -0
- data/lib/command/cherry_pick.rb +51 -0
- data/lib/command/commit.rb +86 -0
- data/lib/command/config.rb +126 -0
- data/lib/command/diff.rb +114 -0
- data/lib/command/fetch.rb +116 -0
- data/lib/command/init.rb +41 -0
- data/lib/command/log.rb +188 -0
- data/lib/command/merge.rb +148 -0
- data/lib/command/push.rb +172 -0
- data/lib/command/receive_pack.rb +92 -0
- data/lib/command/remote.rb +55 -0
- data/lib/command/reset.rb +64 -0
- data/lib/command/rev_list.rb +33 -0
- data/lib/command/revert.rb +69 -0
- data/lib/command/rm.rb +105 -0
- data/lib/command/shared/fast_forward.rb +19 -0
- data/lib/command/shared/print_diff.rb +116 -0
- data/lib/command/shared/receive_objects.rb +37 -0
- data/lib/command/shared/remote_agent.rb +44 -0
- data/lib/command/shared/remote_client.rb +82 -0
- data/lib/command/shared/send_objects.rb +24 -0
- data/lib/command/shared/sequencing.rb +146 -0
- data/lib/command/shared/write_commit.rb +167 -0
- data/lib/command/status.rb +210 -0
- data/lib/command/upload_pack.rb +54 -0
- data/lib/config.rb +240 -0
- data/lib/config/stack.rb +42 -0
- data/lib/database.rb +112 -0
- data/lib/database/author.rb +27 -0
- data/lib/database/backends.rb +57 -0
- data/lib/database/blob.rb +24 -0
- data/lib/database/commit.rb +70 -0
- data/lib/database/entry.rb +7 -0
- data/lib/database/loose.rb +70 -0
- data/lib/database/packed.rb +75 -0
- data/lib/database/tree.rb +77 -0
- data/lib/database/tree_diff.rb +88 -0
- data/lib/diff.rb +46 -0
- data/lib/diff/combined.rb +72 -0
- data/lib/diff/hunk.rb +64 -0
- data/lib/diff/myers.rb +90 -0
- data/lib/editor.rb +59 -0
- data/lib/index.rb +212 -0
- data/lib/index/checksum.rb +44 -0
- data/lib/index/entry.rb +91 -0
- data/lib/lockfile.rb +55 -0
- data/lib/merge/bases.rb +38 -0
- data/lib/merge/common_ancestors.rb +77 -0
- data/lib/merge/diff3.rb +156 -0
- data/lib/merge/inputs.rb +42 -0
- data/lib/merge/resolve.rb +178 -0
- data/lib/pack.rb +45 -0
- data/lib/pack/compressor.rb +83 -0
- data/lib/pack/delta.rb +58 -0
- data/lib/pack/entry.rb +54 -0
- data/lib/pack/expander.rb +54 -0
- data/lib/pack/index.rb +100 -0
- data/lib/pack/indexer.rb +200 -0
- data/lib/pack/numbers.rb +79 -0
- data/lib/pack/reader.rb +98 -0
- data/lib/pack/stream.rb +80 -0
- data/lib/pack/unpacker.rb +62 -0
- data/lib/pack/window.rb +47 -0
- data/lib/pack/writer.rb +92 -0
- data/lib/pack/xdelta.rb +118 -0
- data/lib/pager.rb +24 -0
- data/lib/progress.rb +78 -0
- data/lib/refs.rb +260 -0
- data/lib/remotes.rb +82 -0
- data/lib/remotes/protocol.rb +82 -0
- data/lib/remotes/refspec.rb +70 -0
- data/lib/remotes/remote.rb +57 -0
- data/lib/repository.rb +64 -0
- data/lib/repository/divergence.rb +21 -0
- data/lib/repository/hard_reset.rb +35 -0
- data/lib/repository/inspector.rb +49 -0
- data/lib/repository/migration.rb +168 -0
- data/lib/repository/pending_commit.rb +60 -0
- data/lib/repository/sequencer.rb +118 -0
- data/lib/repository/status.rb +98 -0
- data/lib/rev_list.rb +244 -0
- data/lib/revision.rb +155 -0
- data/lib/sorted_hash.rb +17 -0
- data/lib/temp_file.rb +34 -0
- data/lib/workspace.rb +107 -0
- metadata +103 -9
data/lib/pack/delta.rb
ADDED
@@ -0,0 +1,58 @@
|
|
1
|
+
require "forwardable"
|
2
|
+
|
3
|
+
require_relative "./numbers"
|
4
|
+
require_relative "./xdelta"
|
5
|
+
|
6
|
+
module Pack
|
7
|
+
class Delta
|
8
|
+
|
9
|
+
Copy = Struct.new(:offset, :size) do
|
10
|
+
def self.parse(input, byte)
|
11
|
+
value = Numbers::PackedInt56LE.read(input, byte)
|
12
|
+
offset = value & 0xffffffff
|
13
|
+
size = value >> 32
|
14
|
+
|
15
|
+
Copy.new(offset, size)
|
16
|
+
end
|
17
|
+
|
18
|
+
def to_s
|
19
|
+
bytes = Numbers::PackedInt56LE.write((size << 32) | offset)
|
20
|
+
bytes[0] |= 0x80
|
21
|
+
bytes.pack("C*")
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
Insert = Struct.new(:data) do
|
26
|
+
def self.parse(input, byte)
|
27
|
+
Insert.new(input.read(byte))
|
28
|
+
end
|
29
|
+
|
30
|
+
def to_s
|
31
|
+
[data.bytesize, data].pack("Ca*")
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
extend Forwardable
|
36
|
+
def_delegator :@data, :bytesize, :size
|
37
|
+
|
38
|
+
attr_reader :base, :data
|
39
|
+
|
40
|
+
def initialize(source, target)
|
41
|
+
@base = source.entry
|
42
|
+
@data = sizeof(source) + sizeof(target)
|
43
|
+
|
44
|
+
source.delta_index ||= XDelta.create_index(source.data)
|
45
|
+
|
46
|
+
delta = source.delta_index.compress(target.data)
|
47
|
+
delta.each { |op| @data.concat(op.to_s) }
|
48
|
+
end
|
49
|
+
|
50
|
+
private
|
51
|
+
|
52
|
+
def sizeof(entry)
|
53
|
+
bytes = Numbers::VarIntLE.write(entry.size, 7)
|
54
|
+
bytes.pack("C*")
|
55
|
+
end
|
56
|
+
|
57
|
+
end
|
58
|
+
end
|
data/lib/pack/entry.rb
ADDED
@@ -0,0 +1,54 @@
|
|
1
|
+
require "forwardable"
|
2
|
+
require_relative "./numbers"
|
3
|
+
|
4
|
+
module Pack
|
5
|
+
class Entry
|
6
|
+
|
7
|
+
extend Forwardable
|
8
|
+
def_delegators :@info, :type, :size
|
9
|
+
|
10
|
+
attr_accessor :offset
|
11
|
+
attr_reader :oid, :delta, :depth
|
12
|
+
|
13
|
+
def initialize(oid, info, path, ofs = false)
|
14
|
+
@oid = oid
|
15
|
+
@info = info
|
16
|
+
@path = path
|
17
|
+
@ofs = ofs
|
18
|
+
@delta = nil
|
19
|
+
@depth = 0
|
20
|
+
end
|
21
|
+
|
22
|
+
def sort_key
|
23
|
+
[packed_type, @path&.basename, @path&.dirname, @info.size]
|
24
|
+
end
|
25
|
+
|
26
|
+
def assign_delta(delta)
|
27
|
+
@delta = delta
|
28
|
+
@depth = delta.base.depth + 1
|
29
|
+
end
|
30
|
+
|
31
|
+
def packed_type
|
32
|
+
if @delta
|
33
|
+
@ofs ? OFS_DELTA : REF_DELTA
|
34
|
+
else
|
35
|
+
TYPE_CODES.fetch(@info.type)
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
def packed_size
|
40
|
+
@delta ? @delta.size : @info.size
|
41
|
+
end
|
42
|
+
|
43
|
+
def delta_prefix
|
44
|
+
return "" unless @delta
|
45
|
+
|
46
|
+
if @ofs
|
47
|
+
Numbers::VarIntBE.write(offset - @delta.base.offset)
|
48
|
+
else
|
49
|
+
[@delta.base.oid].pack("H40")
|
50
|
+
end
|
51
|
+
end
|
52
|
+
|
53
|
+
end
|
54
|
+
end
|
@@ -0,0 +1,54 @@
|
|
1
|
+
require "stringio"
|
2
|
+
|
3
|
+
require_relative "./delta"
|
4
|
+
require_relative "./numbers"
|
5
|
+
|
6
|
+
module Pack
|
7
|
+
class Expander
|
8
|
+
|
9
|
+
attr_reader :source_size, :target_size
|
10
|
+
|
11
|
+
def self.expand(source, delta)
|
12
|
+
new(delta).expand(source)
|
13
|
+
end
|
14
|
+
|
15
|
+
def initialize(delta)
|
16
|
+
@delta = StringIO.new(delta)
|
17
|
+
|
18
|
+
@source_size = read_size
|
19
|
+
@target_size = read_size
|
20
|
+
end
|
21
|
+
|
22
|
+
def expand(source)
|
23
|
+
check_size(source, @source_size)
|
24
|
+
target = ""
|
25
|
+
|
26
|
+
until @delta.eof?
|
27
|
+
byte = @delta.readbyte
|
28
|
+
|
29
|
+
if byte < 0x80
|
30
|
+
insert = Delta::Insert.parse(@delta, byte)
|
31
|
+
target.concat(insert.data)
|
32
|
+
else
|
33
|
+
copy = Delta::Copy.parse(@delta, byte)
|
34
|
+
size = (copy.size == 0) ? GIT_MAX_COPY : copy.size
|
35
|
+
target.concat(source.byteslice(copy.offset, size))
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
check_size(target, @target_size)
|
40
|
+
target
|
41
|
+
end
|
42
|
+
|
43
|
+
private
|
44
|
+
|
45
|
+
def read_size
|
46
|
+
Numbers::VarIntLE.read(@delta, 7)[1]
|
47
|
+
end
|
48
|
+
|
49
|
+
def check_size(buffer, size)
|
50
|
+
raise "failed to apply delta" unless buffer.bytesize == size
|
51
|
+
end
|
52
|
+
|
53
|
+
end
|
54
|
+
end
|
data/lib/pack/index.rb
ADDED
@@ -0,0 +1,100 @@
|
|
1
|
+
module Pack
|
2
|
+
class Index
|
3
|
+
|
4
|
+
HEADER_SIZE = 8
|
5
|
+
FANOUT_SIZE = 1024
|
6
|
+
|
7
|
+
OID_LAYER = 2
|
8
|
+
CRC_LAYER = 3
|
9
|
+
OFS_LAYER = 4
|
10
|
+
EXT_LAYER = 5
|
11
|
+
|
12
|
+
SIZES = {
|
13
|
+
OID_LAYER => 20,
|
14
|
+
CRC_LAYER => 4,
|
15
|
+
OFS_LAYER => 4,
|
16
|
+
EXT_LAYER => 8
|
17
|
+
}
|
18
|
+
|
19
|
+
def initialize(input)
|
20
|
+
@input = input
|
21
|
+
load_fanout_table
|
22
|
+
end
|
23
|
+
|
24
|
+
def oid_offset(oid)
|
25
|
+
pos = oid_position(oid)
|
26
|
+
return nil if pos < 0
|
27
|
+
|
28
|
+
offset = read_int32(OFS_LAYER, pos)
|
29
|
+
|
30
|
+
return offset if offset < IDX_MAX_OFFSET
|
31
|
+
|
32
|
+
pos = offset & (IDX_MAX_OFFSET - 1)
|
33
|
+
@input.seek(offset_for(EXT_LAYER, pos))
|
34
|
+
@input.read(8).unpack("Q>").first
|
35
|
+
end
|
36
|
+
|
37
|
+
def prefix_match(name)
|
38
|
+
pos = oid_position(name)
|
39
|
+
return [name] unless pos < 0
|
40
|
+
|
41
|
+
@input.seek(offset_for(OID_LAYER, -1 - pos))
|
42
|
+
oids = []
|
43
|
+
|
44
|
+
loop do
|
45
|
+
oid = @input.read(20).unpack("H40").first
|
46
|
+
return oids unless oid.start_with?(name)
|
47
|
+
oids << oid
|
48
|
+
end
|
49
|
+
end
|
50
|
+
|
51
|
+
private
|
52
|
+
|
53
|
+
def load_fanout_table
|
54
|
+
@input.seek(HEADER_SIZE)
|
55
|
+
@fanout = @input.read(FANOUT_SIZE).unpack("N256")
|
56
|
+
end
|
57
|
+
|
58
|
+
def oid_position(oid)
|
59
|
+
prefix = oid[0..1].to_i(16)
|
60
|
+
packed = [oid].pack("H40")
|
61
|
+
|
62
|
+
low = (prefix == 0) ? 0 : @fanout[prefix - 1]
|
63
|
+
high = @fanout[prefix] - 1
|
64
|
+
|
65
|
+
binary_search(packed, low, high)
|
66
|
+
end
|
67
|
+
|
68
|
+
def read_int32(layer, pos)
|
69
|
+
@input.seek(offset_for(layer, pos))
|
70
|
+
@input.read(4).unpack("N").first
|
71
|
+
end
|
72
|
+
|
73
|
+
def offset_for(layer, pos)
|
74
|
+
offset = HEADER_SIZE + FANOUT_SIZE
|
75
|
+
count = @fanout.last
|
76
|
+
|
77
|
+
SIZES.each { |n, size| offset += size * count if n < layer }
|
78
|
+
|
79
|
+
offset + pos * SIZES[layer]
|
80
|
+
end
|
81
|
+
|
82
|
+
def binary_search(target, low, high)
|
83
|
+
while low <= high
|
84
|
+
mid = (low + high) / 2
|
85
|
+
|
86
|
+
@input.seek(offset_for(OID_LAYER, mid))
|
87
|
+
oid = @input.read(20)
|
88
|
+
|
89
|
+
case oid <=> target
|
90
|
+
when -1 then low = mid + 1
|
91
|
+
when 0 then return mid
|
92
|
+
when 1 then high = mid - 1
|
93
|
+
end
|
94
|
+
end
|
95
|
+
|
96
|
+
-1 - low
|
97
|
+
end
|
98
|
+
|
99
|
+
end
|
100
|
+
end
|
data/lib/pack/indexer.rb
ADDED
@@ -0,0 +1,200 @@
|
|
1
|
+
require "digest/sha1"
|
2
|
+
require "zlib"
|
3
|
+
|
4
|
+
require_relative "./expander"
|
5
|
+
require_relative "./reader"
|
6
|
+
require_relative "../temp_file"
|
7
|
+
|
8
|
+
module Pack
|
9
|
+
class Indexer
|
10
|
+
|
11
|
+
class PackFile
|
12
|
+
attr_reader :digest
|
13
|
+
|
14
|
+
def initialize(pack_dir, name)
|
15
|
+
@file = TempFile.new(pack_dir, name)
|
16
|
+
@digest = Digest::SHA1.new
|
17
|
+
end
|
18
|
+
|
19
|
+
def write(data)
|
20
|
+
@file.write(data)
|
21
|
+
@digest.update(data)
|
22
|
+
end
|
23
|
+
|
24
|
+
def move(name)
|
25
|
+
@file.write(@digest.digest)
|
26
|
+
@file.move(name)
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
def initialize(database, reader, stream, progress)
|
31
|
+
@database = database
|
32
|
+
@reader = reader
|
33
|
+
@stream = stream
|
34
|
+
@progress = progress
|
35
|
+
|
36
|
+
@index = {}
|
37
|
+
@pending = Hash.new { |hash, oid| hash[oid] = [] }
|
38
|
+
|
39
|
+
@pack_file = PackFile.new(@database.pack_path, "tmp_pack")
|
40
|
+
@index_file = PackFile.new(@database.pack_path, "tmp_idx")
|
41
|
+
end
|
42
|
+
|
43
|
+
def process_pack
|
44
|
+
write_header
|
45
|
+
write_objects
|
46
|
+
write_checksum
|
47
|
+
|
48
|
+
resolve_deltas
|
49
|
+
write_index
|
50
|
+
end
|
51
|
+
|
52
|
+
private
|
53
|
+
|
54
|
+
def write_header
|
55
|
+
header = [SIGNATURE, VERSION, @reader.count].pack(HEADER_FORMAT)
|
56
|
+
@pack_file.write(header)
|
57
|
+
end
|
58
|
+
|
59
|
+
def write_objects
|
60
|
+
@progress&.start("Receiving objects", @reader.count)
|
61
|
+
|
62
|
+
@reader.count.times do
|
63
|
+
index_object
|
64
|
+
@progress&.tick(@stream.offset)
|
65
|
+
end
|
66
|
+
@progress&.stop
|
67
|
+
end
|
68
|
+
|
69
|
+
def index_object
|
70
|
+
offset = @stream.offset
|
71
|
+
record, data = @stream.capture { @reader.read_record }
|
72
|
+
crc32 = Zlib.crc32(data)
|
73
|
+
|
74
|
+
@pack_file.write(data)
|
75
|
+
|
76
|
+
case record
|
77
|
+
when Record
|
78
|
+
oid = @database.hash_object(record)
|
79
|
+
@index[oid] = [offset, crc32]
|
80
|
+
when OfsDelta
|
81
|
+
@pending[offset - record.base_ofs].push([offset, crc32])
|
82
|
+
when RefDelta
|
83
|
+
@pending[record.base_oid].push([offset, crc32])
|
84
|
+
end
|
85
|
+
end
|
86
|
+
|
87
|
+
def write_checksum
|
88
|
+
@stream.verify_checksum
|
89
|
+
|
90
|
+
filename = "pack-#{ @pack_file.digest.hexdigest }.pack"
|
91
|
+
@pack_file.move(filename)
|
92
|
+
|
93
|
+
path = @database.pack_path.join(filename)
|
94
|
+
@pack = File.open(path, File::RDONLY)
|
95
|
+
@reader = Reader.new(@pack)
|
96
|
+
end
|
97
|
+
|
98
|
+
def read_record_at(offset)
|
99
|
+
@pack.seek(offset)
|
100
|
+
@reader.read_record
|
101
|
+
end
|
102
|
+
|
103
|
+
def resolve_deltas
|
104
|
+
deltas = @pending.reduce(0) { |n, (_, list)| n + list.size }
|
105
|
+
@progress&.start("Resolving deltas", deltas)
|
106
|
+
|
107
|
+
@index.to_a.each do |oid, (offset, _)|
|
108
|
+
record = read_record_at(offset)
|
109
|
+
|
110
|
+
resolve_delta_base(record, offset)
|
111
|
+
resolve_delta_base(record, oid)
|
112
|
+
end
|
113
|
+
@progress&.stop
|
114
|
+
end
|
115
|
+
|
116
|
+
def resolve_delta_base(record, key)
|
117
|
+
pending = @pending.delete(key)
|
118
|
+
return unless pending
|
119
|
+
|
120
|
+
pending.each do |offset, crc32|
|
121
|
+
resolve_pending(record, offset, crc32)
|
122
|
+
end
|
123
|
+
end
|
124
|
+
|
125
|
+
def resolve_pending(record, offset, crc32)
|
126
|
+
delta = read_record_at(offset)
|
127
|
+
data = Expander.expand(record.data, delta.delta_data)
|
128
|
+
object = Record.new(record.type, data)
|
129
|
+
oid = @database.hash_object(object)
|
130
|
+
|
131
|
+
@index[oid] = [offset, crc32]
|
132
|
+
@progress&.tick
|
133
|
+
|
134
|
+
resolve_delta_base(object, offset)
|
135
|
+
resolve_delta_base(object, oid)
|
136
|
+
end
|
137
|
+
|
138
|
+
def write_index
|
139
|
+
@object_ids = @index.keys.sort
|
140
|
+
|
141
|
+
write_object_table
|
142
|
+
write_crc32
|
143
|
+
write_offsets
|
144
|
+
write_index_checksum
|
145
|
+
end
|
146
|
+
|
147
|
+
def write_object_table
|
148
|
+
header = [IDX_SIGNATURE, VERSION].pack("N2")
|
149
|
+
@index_file.write(header)
|
150
|
+
|
151
|
+
counts = Array.new(256, 0)
|
152
|
+
total = 0
|
153
|
+
|
154
|
+
@object_ids.each { |oid| counts[oid[0..1].to_i(16)] += 1 }
|
155
|
+
|
156
|
+
counts.each do |count|
|
157
|
+
total += count
|
158
|
+
@index_file.write([total].pack("N"))
|
159
|
+
end
|
160
|
+
|
161
|
+
@object_ids.each do |oid|
|
162
|
+
@index_file.write([oid].pack("H40"))
|
163
|
+
end
|
164
|
+
end
|
165
|
+
|
166
|
+
def write_crc32
|
167
|
+
@object_ids.each do |oid|
|
168
|
+
crc32 = @index[oid].last
|
169
|
+
@index_file.write([crc32].pack("N"))
|
170
|
+
end
|
171
|
+
end
|
172
|
+
|
173
|
+
def write_offsets
|
174
|
+
large_offsets = []
|
175
|
+
|
176
|
+
@object_ids.each do |oid|
|
177
|
+
offset = @index[oid].first
|
178
|
+
|
179
|
+
unless offset < IDX_MAX_OFFSET
|
180
|
+
large_offsets.push(offset)
|
181
|
+
offset = IDX_MAX_OFFSET | (large_offsets.size - 1)
|
182
|
+
end
|
183
|
+
@index_file.write([offset].pack("N"))
|
184
|
+
end
|
185
|
+
|
186
|
+
large_offsets.each do |offset|
|
187
|
+
@index_file.write([offset].pack("Q>"))
|
188
|
+
end
|
189
|
+
end
|
190
|
+
|
191
|
+
def write_index_checksum
|
192
|
+
pack_digest = @pack_file.digest
|
193
|
+
@index_file.write(pack_digest.digest)
|
194
|
+
|
195
|
+
filename = "pack-#{ pack_digest.hexdigest }.idx"
|
196
|
+
@index_file.move(filename)
|
197
|
+
end
|
198
|
+
|
199
|
+
end
|
200
|
+
end
|