amp 0.5.2 → 0.5.3
Sign up to get free protection for your applications and to get access to all the features.
- data/.gitignore +12 -0
- data/.hgignore +3 -0
- data/AUTHORS +1 -1
- data/Manifest.txt +99 -38
- data/README.md +3 -3
- data/Rakefile +53 -18
- data/SCHEDULE.markdown +5 -1
- data/TODO.markdown +120 -149
- data/ampfile.rb +3 -1
- data/bin/amp +4 -1
- data/ext/amp/bz2/extconf.rb +1 -1
- data/ext/amp/mercurial_patch/extconf.rb +1 -1
- data/ext/amp/mercurial_patch/mpatch.c +4 -3
- data/ext/amp/priority_queue/extconf.rb +1 -1
- data/ext/amp/support/extconf.rb +1 -1
- data/ext/amp/support/support.c +1 -1
- data/lib/amp.rb +125 -67
- data/lib/amp/commands/command.rb +12 -10
- data/lib/amp/commands/command_support.rb +8 -1
- data/lib/amp/commands/commands/help.rb +2 -20
- data/lib/amp/commands/commands/init.rb +14 -2
- data/lib/amp/commands/commands/templates.rb +6 -4
- data/lib/amp/commands/commands/version.rb +15 -1
- data/lib/amp/commands/commands/workflow.rb +3 -3
- data/lib/amp/commands/commands/workflows/git/add.rb +3 -3
- data/lib/amp/commands/commands/workflows/git/copy.rb +1 -1
- data/lib/amp/commands/commands/workflows/git/rm.rb +4 -2
- data/lib/amp/commands/commands/workflows/hg/add.rb +1 -1
- data/lib/amp/commands/commands/workflows/hg/addremove.rb +2 -2
- data/lib/amp/commands/commands/workflows/hg/annotate.rb +8 -2
- data/lib/amp/commands/commands/workflows/hg/bisect.rb +253 -0
- data/lib/amp/commands/commands/workflows/hg/branch.rb +1 -1
- data/lib/amp/commands/commands/workflows/hg/branches.rb +3 -3
- data/lib/amp/commands/commands/workflows/hg/bundle.rb +3 -3
- data/lib/amp/commands/commands/workflows/hg/clone.rb +4 -5
- data/lib/amp/commands/commands/workflows/hg/commit.rb +37 -1
- data/lib/amp/commands/commands/workflows/hg/copy.rb +2 -1
- data/lib/amp/commands/commands/workflows/hg/debug/index.rb +1 -1
- data/lib/amp/commands/commands/workflows/hg/diff.rb +3 -8
- data/lib/amp/commands/commands/workflows/hg/forget.rb +5 -4
- data/lib/amp/commands/commands/workflows/hg/identify.rb +6 -6
- data/lib/amp/commands/commands/workflows/hg/import.rb +1 -1
- data/lib/amp/commands/commands/workflows/hg/incoming.rb +2 -2
- data/lib/amp/commands/commands/workflows/hg/log.rb +5 -4
- data/lib/amp/commands/commands/workflows/hg/merge.rb +1 -1
- data/lib/amp/commands/commands/workflows/hg/move.rb +5 -3
- data/lib/amp/commands/commands/workflows/hg/outgoing.rb +1 -1
- data/lib/amp/commands/commands/workflows/hg/push.rb +6 -7
- data/lib/amp/commands/commands/workflows/hg/remove.rb +2 -2
- data/lib/amp/commands/commands/workflows/hg/resolve.rb +6 -23
- data/lib/amp/commands/commands/workflows/hg/root.rb +1 -2
- data/lib/amp/commands/commands/workflows/hg/status.rb +21 -12
- data/lib/amp/commands/commands/workflows/hg/tag.rb +2 -2
- data/lib/amp/commands/commands/workflows/hg/untrack.rb +12 -0
- data/lib/amp/commands/commands/workflows/hg/verify.rb +13 -3
- data/lib/amp/commands/commands/workflows/hg/what_changed.rb +18 -0
- data/lib/amp/commands/dispatch.rb +12 -13
- data/lib/amp/dependencies/amp_support.rb +1 -1
- data/lib/amp/dependencies/amp_support/ruby_amp_support.rb +1 -0
- data/lib/amp/dependencies/maruku.rb +136 -0
- data/lib/amp/dependencies/maruku/attributes.rb +227 -0
- data/lib/amp/dependencies/maruku/defaults.rb +71 -0
- data/lib/amp/dependencies/maruku/errors_management.rb +92 -0
- data/lib/amp/dependencies/maruku/helpers.rb +260 -0
- data/lib/amp/dependencies/maruku/input/charsource.rb +326 -0
- data/lib/amp/dependencies/maruku/input/extensions.rb +69 -0
- data/lib/amp/dependencies/maruku/input/html_helper.rb +189 -0
- data/lib/amp/dependencies/maruku/input/linesource.rb +111 -0
- data/lib/amp/dependencies/maruku/input/parse_block.rb +615 -0
- data/lib/amp/dependencies/maruku/input/parse_doc.rb +234 -0
- data/lib/amp/dependencies/maruku/input/parse_span_better.rb +746 -0
- data/lib/amp/dependencies/maruku/input/rubypants.rb +225 -0
- data/lib/amp/dependencies/maruku/input/type_detection.rb +147 -0
- data/lib/amp/dependencies/maruku/input_textile2/t2_parser.rb +163 -0
- data/lib/amp/dependencies/maruku/maruku.rb +33 -0
- data/lib/amp/dependencies/maruku/output/to_ansi.rb +223 -0
- data/lib/amp/dependencies/maruku/output/to_html.rb +991 -0
- data/lib/amp/dependencies/maruku/output/to_markdown.rb +164 -0
- data/lib/amp/dependencies/maruku/output/to_s.rb +56 -0
- data/lib/amp/dependencies/maruku/string_utils.rb +191 -0
- data/lib/amp/dependencies/maruku/structures.rb +167 -0
- data/lib/amp/dependencies/maruku/structures_inspect.rb +87 -0
- data/lib/amp/dependencies/maruku/structures_iterators.rb +61 -0
- data/lib/amp/dependencies/maruku/textile2.rb +1 -0
- data/lib/amp/dependencies/maruku/toc.rb +199 -0
- data/lib/amp/dependencies/maruku/usage/example1.rb +33 -0
- data/lib/amp/dependencies/maruku/version.rb +40 -0
- data/lib/amp/dependencies/priority_queue.rb +2 -1
- data/lib/amp/dependencies/python_config.rb +2 -1
- data/lib/amp/graphs/ancestor.rb +2 -1
- data/lib/amp/graphs/copies.rb +236 -233
- data/lib/amp/help/entries/__default__.erb +31 -0
- data/lib/amp/help/entries/commands.erb +6 -0
- data/lib/amp/help/entries/mdtest.md +35 -0
- data/lib/amp/help/entries/silly +3 -0
- data/lib/amp/help/help.rb +288 -0
- data/lib/amp/profiling_hacks.rb +5 -3
- data/lib/amp/repository/abstract/abstract_changeset.rb +97 -0
- data/lib/amp/repository/abstract/abstract_local_repo.rb +181 -0
- data/lib/amp/repository/abstract/abstract_staging_area.rb +180 -0
- data/lib/amp/repository/abstract/abstract_versioned_file.rb +100 -0
- data/lib/amp/repository/abstract/common_methods/changeset.rb +75 -0
- data/lib/amp/repository/abstract/common_methods/local_repo.rb +277 -0
- data/lib/amp/repository/abstract/common_methods/staging_area.rb +233 -0
- data/lib/amp/repository/abstract/common_methods/versioned_file.rb +71 -0
- data/lib/amp/repository/generic_repo_picker.rb +78 -0
- data/lib/amp/repository/git/repo_format/changeset.rb +336 -0
- data/lib/amp/repository/git/repo_format/staging_area.rb +192 -0
- data/lib/amp/repository/git/repo_format/versioned_file.rb +119 -0
- data/lib/amp/repository/git/repositories/local_repository.rb +164 -0
- data/lib/amp/repository/git/repository.rb +41 -0
- data/lib/amp/repository/mercurial/encoding/mercurial_diff.rb +382 -0
- data/lib/amp/repository/mercurial/encoding/mercurial_patch.rb +1 -0
- data/lib/amp/repository/mercurial/encoding/patch.rb +294 -0
- data/lib/amp/repository/mercurial/encoding/pure_ruby/ruby_mercurial_patch.rb +124 -0
- data/lib/amp/repository/mercurial/merging/merge_ui.rb +327 -0
- data/lib/amp/repository/mercurial/merging/simple_merge.rb +452 -0
- data/lib/amp/repository/mercurial/repo_format/branch_manager.rb +266 -0
- data/lib/amp/repository/mercurial/repo_format/changeset.rb +768 -0
- data/lib/amp/repository/mercurial/repo_format/dir_state.rb +716 -0
- data/lib/amp/repository/mercurial/repo_format/journal.rb +218 -0
- data/lib/amp/repository/mercurial/repo_format/lock.rb +210 -0
- data/lib/amp/repository/mercurial/repo_format/merge_state.rb +228 -0
- data/lib/amp/repository/mercurial/repo_format/staging_area.rb +367 -0
- data/lib/amp/repository/mercurial/repo_format/store.rb +487 -0
- data/lib/amp/repository/mercurial/repo_format/tag_manager.rb +322 -0
- data/lib/amp/repository/mercurial/repo_format/updatable.rb +543 -0
- data/lib/amp/repository/mercurial/repo_format/updater.rb +848 -0
- data/lib/amp/repository/mercurial/repo_format/verification.rb +433 -0
- data/lib/amp/repository/mercurial/repositories/bundle_repository.rb +216 -0
- data/lib/amp/repository/mercurial/repositories/http_repository.rb +386 -0
- data/lib/amp/repository/mercurial/repositories/local_repository.rb +2034 -0
- data/lib/amp/repository/mercurial/repository.rb +119 -0
- data/lib/amp/repository/mercurial/revlogs/bundle_revlogs.rb +249 -0
- data/lib/amp/repository/mercurial/revlogs/changegroup.rb +217 -0
- data/lib/amp/repository/mercurial/revlogs/changelog.rb +339 -0
- data/lib/amp/repository/mercurial/revlogs/file_log.rb +152 -0
- data/lib/amp/repository/mercurial/revlogs/index.rb +500 -0
- data/lib/amp/repository/mercurial/revlogs/manifest.rb +201 -0
- data/lib/amp/repository/mercurial/revlogs/node.rb +20 -0
- data/lib/amp/repository/mercurial/revlogs/revlog.rb +1026 -0
- data/lib/amp/repository/mercurial/revlogs/revlog_support.rb +129 -0
- data/lib/amp/repository/mercurial/revlogs/versioned_file.rb +597 -0
- data/lib/amp/repository/repository.rb +11 -88
- data/lib/amp/server/extension/amp_extension.rb +3 -3
- data/lib/amp/server/fancy_http_server.rb +1 -1
- data/lib/amp/server/fancy_views/_browser.haml +1 -1
- data/lib/amp/server/fancy_views/_diff_file.haml +1 -8
- data/lib/amp/server/fancy_views/changeset.haml +2 -2
- data/lib/amp/server/fancy_views/file.haml +1 -1
- data/lib/amp/server/fancy_views/file_diff.haml +1 -1
- data/lib/amp/support/amp_ui.rb +13 -29
- data/lib/amp/support/generator.rb +1 -1
- data/lib/amp/support/loaders.rb +1 -2
- data/lib/amp/support/logger.rb +10 -16
- data/lib/amp/support/match.rb +18 -4
- data/lib/amp/support/mercurial/ignore.rb +151 -0
- data/lib/amp/support/openers.rb +8 -3
- data/lib/amp/support/support.rb +91 -46
- data/lib/amp/templates/{blank.commit.erb → mercurial/blank.commit.erb} +0 -0
- data/lib/amp/templates/{blank.log.erb → mercurial/blank.log.erb} +0 -0
- data/lib/amp/templates/{default.commit.erb → mercurial/default.commit.erb} +0 -0
- data/lib/amp/templates/{default.log.erb → mercurial/default.log.erb} +0 -0
- data/lib/amp/templates/template.rb +18 -18
- data/man/amp.1 +51 -0
- data/site/src/about/commands.haml +1 -1
- data/site/src/css/amp.css +1 -1
- data/site/src/index.haml +3 -3
- data/tasks/man.rake +39 -0
- data/tasks/stats.rake +1 -10
- data/tasks/yard.rake +1 -50
- data/test/dirstate_tests/test_dir_state.rb +10 -8
- data/test/functional_tests/annotate.out +31 -0
- data/test/functional_tests/test_functional.rb +155 -63
- data/test/localrepo_tests/ampfile.rb +12 -0
- data/test/localrepo_tests/test_local_repo.rb +56 -57
- data/test/manifest_tests/test_manifest.rb +3 -5
- data/test/merge_tests/test_merge.rb +3 -3
- data/test/revlog_tests/test_revlog.rb +14 -6
- data/test/store_tests/test_fncache_store.rb +19 -19
- data/test/test_19_compatibility.rb +46 -0
- data/test/test_base85.rb +2 -2
- data/test/test_bdiff.rb +2 -2
- data/test/test_changegroup.rb +59 -0
- data/test/test_commands.rb +2 -2
- data/test/test_difflib.rb +2 -2
- data/test/test_generator.rb +34 -0
- data/test/test_ignore.rb +203 -0
- data/test/test_journal.rb +18 -13
- data/test/test_match.rb +2 -2
- data/test/test_mdiff.rb +3 -3
- data/test/test_mpatch.rb +3 -3
- data/test/test_multi_io.rb +40 -0
- data/test/test_support.rb +18 -2
- data/test/test_templates.rb +38 -0
- data/test/test_ui.rb +79 -0
- data/test/testutilities.rb +56 -0
- metadata +168 -49
- data/ext/amp/bz2/mkmf.log +0 -38
- data/lib/amp/encoding/mercurial_diff.rb +0 -378
- data/lib/amp/encoding/mercurial_patch.rb +0 -1
- data/lib/amp/encoding/patch.rb +0 -292
- data/lib/amp/encoding/pure_ruby/ruby_mercurial_patch.rb +0 -123
- data/lib/amp/merges/merge_state.rb +0 -164
- data/lib/amp/merges/merge_ui.rb +0 -322
- data/lib/amp/merges/simple_merge.rb +0 -450
- data/lib/amp/repository/branch_manager.rb +0 -234
- data/lib/amp/repository/dir_state.rb +0 -950
- data/lib/amp/repository/journal.rb +0 -203
- data/lib/amp/repository/lock.rb +0 -207
- data/lib/amp/repository/repositories/bundle_repository.rb +0 -214
- data/lib/amp/repository/repositories/http_repository.rb +0 -377
- data/lib/amp/repository/repositories/local_repository.rb +0 -2661
- data/lib/amp/repository/store.rb +0 -485
- data/lib/amp/repository/tag_manager.rb +0 -319
- data/lib/amp/repository/updatable.rb +0 -532
- data/lib/amp/repository/verification.rb +0 -431
- data/lib/amp/repository/versioned_file.rb +0 -475
- data/lib/amp/revlogs/bundle_revlogs.rb +0 -246
- data/lib/amp/revlogs/changegroup.rb +0 -217
- data/lib/amp/revlogs/changelog.rb +0 -338
- data/lib/amp/revlogs/changeset.rb +0 -521
- data/lib/amp/revlogs/file_log.rb +0 -165
- data/lib/amp/revlogs/index.rb +0 -493
- data/lib/amp/revlogs/manifest.rb +0 -195
- data/lib/amp/revlogs/node.rb +0 -18
- data/lib/amp/revlogs/revlog.rb +0 -1045
- data/lib/amp/revlogs/revlog_support.rb +0 -126
- data/lib/amp/support/ignore.rb +0 -144
- data/site/Rakefile +0 -38
- data/test/test_amp.rb +0 -9
- data/test/test_helper.rb +0 -15
data/lib/amp/revlogs/manifest.rb
DELETED
@@ -1,195 +0,0 @@
|
|
1
|
-
module Amp
|
2
|
-
|
3
|
-
class ManifestEntry < DelegateClass(Hash)
|
4
|
-
|
5
|
-
##
|
6
|
-
# Initializes the dictionary. It can be empty, by initializing with no
|
7
|
-
# arguments, or with more data by assigning them.
|
8
|
-
#
|
9
|
-
# It is a hash of Filename => node_id
|
10
|
-
#
|
11
|
-
# @param [Hash] mapping the initial settings of the dictionary
|
12
|
-
# @param [Hash] flags the flag settings of the dictionary
|
13
|
-
def initialize(mapping=nil, flags=nil)
|
14
|
-
@source_hash = mapping || {}
|
15
|
-
super(@source_hash || {})
|
16
|
-
@flags = flags || {}
|
17
|
-
end
|
18
|
-
|
19
|
-
def inspect
|
20
|
-
"#<ManifestEntry " + @source_hash.inspect + "\n" +
|
21
|
-
" " + @flags.inspect + ">"
|
22
|
-
end
|
23
|
-
|
24
|
-
def flags(file=nil)
|
25
|
-
file ? @flags[file] : @flags
|
26
|
-
end
|
27
|
-
|
28
|
-
def files; keys; end
|
29
|
-
|
30
|
-
def delete(*args)
|
31
|
-
super(*args)
|
32
|
-
flags.delete(*args)
|
33
|
-
end
|
34
|
-
|
35
|
-
##
|
36
|
-
# Clones the dictionary
|
37
|
-
def clone
|
38
|
-
self.class.new @source_hash.dup, @flags.dup
|
39
|
-
end
|
40
|
-
|
41
|
-
# @see clone
|
42
|
-
alias_method :dup, :clone
|
43
|
-
|
44
|
-
##
|
45
|
-
# Mark a file to be checked later on
|
46
|
-
#
|
47
|
-
# @param [String] file the file to be marked for later checking
|
48
|
-
# @param []
|
49
|
-
def mark_for_later(file, node)
|
50
|
-
self[file] = nil # notice how we DIDN'T use `self.delete file`
|
51
|
-
flags[file] = node.flags file
|
52
|
-
end
|
53
|
-
|
54
|
-
end
|
55
|
-
|
56
|
-
|
57
|
-
##
|
58
|
-
# = Manifest
|
59
|
-
# A Manifest is a special type of revision log. It stores lists of files
|
60
|
-
# that are being tracked, with some flags associated with each one. The
|
61
|
-
# manifest is where you can go to find what files a revision changed,
|
62
|
-
# and any extra information about the file via its flags.
|
63
|
-
class Manifest < Revlog
|
64
|
-
|
65
|
-
attr_accessor :manifest_list
|
66
|
-
|
67
|
-
##
|
68
|
-
# Parses a bunch of text and interprets it as a manifest entry.
|
69
|
-
# It then maps them onto a ManifestEntry that stores the real
|
70
|
-
# info.
|
71
|
-
#
|
72
|
-
# @param [String] lines the string that contains the information
|
73
|
-
# we need to parse.
|
74
|
-
def self.parse(lines)
|
75
|
-
mf_dict = ManifestEntry.new
|
76
|
-
|
77
|
-
lines.split("\n").each do |line|
|
78
|
-
f, n = line.split("\0")
|
79
|
-
if n.size > 40
|
80
|
-
mf_dict.flags[f] = n[40..-1]
|
81
|
-
mf_dict[f] = n[0..39].unhexlify
|
82
|
-
else
|
83
|
-
mf_dict[f] = n.unhexlify
|
84
|
-
end
|
85
|
-
end
|
86
|
-
|
87
|
-
mf_dict
|
88
|
-
end
|
89
|
-
|
90
|
-
def initialize(opener)
|
91
|
-
@map_cache = nil
|
92
|
-
@list_cache = nil
|
93
|
-
super(opener, "00manifest.i")
|
94
|
-
end
|
95
|
-
|
96
|
-
##
|
97
|
-
# Reads the difference between the given node and the revision
|
98
|
-
# before that.
|
99
|
-
#
|
100
|
-
# @param [String] node the node_id of the revision to diff
|
101
|
-
# @return [ManifestEntry] the dictionary with the info between
|
102
|
-
# the given revision and the one before that
|
103
|
-
def read_delta(node)
|
104
|
-
r = self.revision_index_for_node node
|
105
|
-
return self.class.parse(Diffs::MercurialDiff.patch_text(self.revision_diff(r-1, r)))
|
106
|
-
end
|
107
|
-
|
108
|
-
##
|
109
|
-
# Parses the manifest's data at a given revision's node_id
|
110
|
-
#
|
111
|
-
# @param [String, Symbol] node the node_id of the revision. If a symbol,
|
112
|
-
# it better be :tip or else shit will go down.
|
113
|
-
# @return [ManifestEntry] the dictionary mapping the
|
114
|
-
# flags, filenames, digests, etc from the parsed data
|
115
|
-
def read(node)
|
116
|
-
node = tip if node == :tip
|
117
|
-
|
118
|
-
return ManifestEntry.new if node == NULL_ID
|
119
|
-
return @map_cache[1] if @map_cache && @map_cache[0] == node
|
120
|
-
|
121
|
-
text = decompress_revision node
|
122
|
-
|
123
|
-
@list_cache = text
|
124
|
-
mapping = self.class.parse(text)
|
125
|
-
@map_cache = [node, mapping]
|
126
|
-
mapping
|
127
|
-
end
|
128
|
-
|
129
|
-
##
|
130
|
-
# Digs up the information about how a file changed in the revision
|
131
|
-
# specified by the provided node_id.
|
132
|
-
#
|
133
|
-
# @param [String] nodes the node_id of the revision we're interested in
|
134
|
-
# @param [String] f the path to the file we're interested in
|
135
|
-
# @return [[String, String], [nil, nil]] The data stored in the manifest about the
|
136
|
-
# file. The first String is a digest, the second String is the extra
|
137
|
-
# info stored alongside the file. Returns [nil, nil] if the node is not there
|
138
|
-
def find(node, f)
|
139
|
-
if @map_cache && node == @map_cache[0]
|
140
|
-
return [@map_cache[1][f], @map_cache[1].flags[f]]
|
141
|
-
end
|
142
|
-
mapping = read(node)
|
143
|
-
return [mapping[f], (mapping.flags[f] || "")]
|
144
|
-
end
|
145
|
-
##
|
146
|
-
# Checks the list for files invalid characters that aren't allowed in
|
147
|
-
# filenames.
|
148
|
-
#
|
149
|
-
# @raise [RevlogSupport::RevlogError] if the path contains an invalid
|
150
|
-
# character, raise.
|
151
|
-
def check_forbidden(list)
|
152
|
-
list.each do |f|
|
153
|
-
if f =~ /\n/ || f =~ /\r/
|
154
|
-
raise RevlogSupport::RevlogError.new("\\r and \\n are disallowed in "+
|
155
|
-
"filenames")
|
156
|
-
end
|
157
|
-
end
|
158
|
-
end
|
159
|
-
|
160
|
-
def encode_file(file, manifest)
|
161
|
-
"#{file}\000#{manifest[file].hexlify}#{manifest.flags[file]}\n"
|
162
|
-
end
|
163
|
-
|
164
|
-
|
165
|
-
def add(map, journal, link, p1=nil, p2=nil, changed=nil)
|
166
|
-
if changed || changed.empty? || @list_cache ||
|
167
|
-
@list_cache.empty? || p1.nil? || @map_cache[0] != p1
|
168
|
-
check_forbidden map
|
169
|
-
@list_cache = map.map {|f,n| f}.sort.map {|f| encode_file f, map }.join
|
170
|
-
|
171
|
-
n = add_revision(@list_cache, journal, link, p1, p2)
|
172
|
-
@map_cache = [n, map]
|
173
|
-
|
174
|
-
return n
|
175
|
-
end
|
176
|
-
|
177
|
-
check_forbidden changed[0] # added files, check if they're forbidden
|
178
|
-
|
179
|
-
mapping = Manifest.parse(@list_cache)
|
180
|
-
|
181
|
-
changed[0].each do |x|
|
182
|
-
mapping[x] = map[x].hexlify
|
183
|
-
mapping.flags[x] = map.flags[x]
|
184
|
-
end
|
185
|
-
|
186
|
-
changed[1].each {|x| mapping.delete x }
|
187
|
-
@list_cache = mapping.map {|k, v| k}.sort.map {|fn| encode_file(fn, mapping)}.join
|
188
|
-
|
189
|
-
n = add_revision(@list_cache, journal, link, p1, p2)
|
190
|
-
@map_cache = [n, map]
|
191
|
-
|
192
|
-
n
|
193
|
-
end
|
194
|
-
end
|
195
|
-
end
|
data/lib/amp/revlogs/node.rb
DELETED
@@ -1,18 +0,0 @@
|
|
1
|
-
module Amp
|
2
|
-
module RevlogSupport
|
3
|
-
module Node
|
4
|
-
# the null node ID - just 20 null bytes
|
5
|
-
NULL_ID = "\0" * 20
|
6
|
-
# -1 is the null revision (the last one in the index)
|
7
|
-
NULL_REV = -1
|
8
|
-
|
9
|
-
##
|
10
|
-
# Returns the node in a short hexadecimal format - only 6 bytes => 12 hex bytes
|
11
|
-
#
|
12
|
-
# @return [String] the node, in hex, and chopped a bit
|
13
|
-
def short(node)
|
14
|
-
node.short_hex
|
15
|
-
end
|
16
|
-
end
|
17
|
-
end
|
18
|
-
end
|
data/lib/amp/revlogs/revlog.rb
DELETED
@@ -1,1045 +0,0 @@
|
|
1
|
-
require 'set'
|
2
|
-
|
3
|
-
module Amp
|
4
|
-
|
5
|
-
##
|
6
|
-
# = Revlog
|
7
|
-
# A revlog is a generic file that represents a revision history. This
|
8
|
-
# class, while generic, is extremely importantly and highly functional.
|
9
|
-
# While the {Amp::Manifest} and {Amp::ChangeLog} classes inherit
|
10
|
-
# from Revlog, one can open either file using the base Revlog class.
|
11
|
-
#
|
12
|
-
# A Revision log is based on two things: an index, which stores some
|
13
|
-
# meta-data about each revision in the repository's history, and
|
14
|
-
# some data associated with each revision. The data is stored as
|
15
|
-
# a (possibly zlib-compressed) diff.
|
16
|
-
#
|
17
|
-
# There are two versions of revision logs - version 0 and version NG.
|
18
|
-
# This information is handled by the {Amp::RevlogSupport:Index} classes.
|
19
|
-
#
|
20
|
-
# Sometimes the data is stored in a separate file from the index. This
|
21
|
-
# is up to the system to decide.
|
22
|
-
#
|
23
|
-
class Revlog
|
24
|
-
include Enumerable
|
25
|
-
include RevlogSupport::Node
|
26
|
-
|
27
|
-
# the file paths to the index and data files
|
28
|
-
attr_reader :index_file, :data_file
|
29
|
-
# The actual {Index} object.
|
30
|
-
attr_reader :index
|
31
|
-
|
32
|
-
##
|
33
|
-
# Initializes the revision log with an opener object (which handles how
|
34
|
-
# the interface to opening the files) and the path to the index itself.
|
35
|
-
#
|
36
|
-
# @param [Amp::Opener] opener an object that will handle opening the file
|
37
|
-
# @param [String] indexfile the path to the index file
|
38
|
-
def initialize(opener, indexfile)
|
39
|
-
@opener = opener
|
40
|
-
@index_file = indexfile
|
41
|
-
@data_file = indexfile[0..-3] + ".d"
|
42
|
-
@chunk_cache = nil
|
43
|
-
@index = RevlogSupport::Index.parse(opener, indexfile)
|
44
|
-
|
45
|
-
# add the null, terminating index entry if it isn't already there
|
46
|
-
if @index.index.empty? || @index.is_a?(RevlogSupport::LazyIndex) ||
|
47
|
-
@index.index[-1].node_id.not_null?
|
48
|
-
# the use of @index.index is deliberate!
|
49
|
-
@index.index << RevlogSupport::IndexEntry.new(0,0,0,-1,-1,-1,-1,NULL_ID)
|
50
|
-
end
|
51
|
-
|
52
|
-
end
|
53
|
-
alias_method :revlog_initialize, :initialize
|
54
|
-
|
55
|
-
##
|
56
|
-
# Actually opens the file.
|
57
|
-
def open(path, mode="r")
|
58
|
-
@opener.open(path, mode)
|
59
|
-
end
|
60
|
-
|
61
|
-
##
|
62
|
-
# Returns the requested node as an IndexEntry. Takes either a string or
|
63
|
-
# a fixnum index value.
|
64
|
-
#
|
65
|
-
# @param [String, Fixnum] the index or node ID to look up in the revlog
|
66
|
-
# @return [IndexEntry] the requested index entry.
|
67
|
-
def [](idx)
|
68
|
-
if idx.is_a? String
|
69
|
-
return @index[@index.node_map[idx]]
|
70
|
-
elsif idx.is_a? Array
|
71
|
-
STDERR.puts idx.inspect # KILLME
|
72
|
-
idx
|
73
|
-
else
|
74
|
-
return @index[idx]
|
75
|
-
end
|
76
|
-
end
|
77
|
-
|
78
|
-
##
|
79
|
-
# Returns the unique node_id (a string) for a given revision at _index_.
|
80
|
-
#
|
81
|
-
# @param [Fixnum] index the index into the list, from 0-(num_revisions - 1).
|
82
|
-
# @return [String] the node's ID
|
83
|
-
def node_id_for_index(index)
|
84
|
-
unless @index[index]
|
85
|
-
raise RevlogSupport::LookupError.new("Couldn't find node for id #{index.inspect}")
|
86
|
-
end
|
87
|
-
@index[index].node_id
|
88
|
-
end
|
89
|
-
|
90
|
-
# @see node_id_for_index
|
91
|
-
alias_method :node, :node_id_for_index
|
92
|
-
|
93
|
-
##
|
94
|
-
# Returns the index number for the given node ID.
|
95
|
-
#
|
96
|
-
# @param [String] id the node_id to lookup
|
97
|
-
# @return [Integer] the index into the revision index where you can find
|
98
|
-
# the requested node.
|
99
|
-
def revision_index_for_node(id)
|
100
|
-
unless @index.node_map[id]
|
101
|
-
raise StandardError.new("Couldn't find node for id #{id.inspect}")
|
102
|
-
end
|
103
|
-
@index.node_map[id]
|
104
|
-
end
|
105
|
-
|
106
|
-
##
|
107
|
-
# @see revision_index_for_node
|
108
|
-
alias_method :rev, :revision_index_for_node
|
109
|
-
|
110
|
-
##
|
111
|
-
# Returns the "link revision" index for the given revision index
|
112
|
-
def link_revision_for_index(index)
|
113
|
-
self[index].link_rev
|
114
|
-
end
|
115
|
-
|
116
|
-
##
|
117
|
-
# Returns the node_id's of the parents (1 or 2) of the given node ID.
|
118
|
-
def parents_for_node(id)
|
119
|
-
#index = revision_index_for_node id
|
120
|
-
entry = self[id]
|
121
|
-
[ @index[entry.parent_one_rev].node_id ,
|
122
|
-
@index[entry.parent_two_rev].node_id ]
|
123
|
-
end
|
124
|
-
alias_method :parents, :parents_for_node
|
125
|
-
|
126
|
-
##
|
127
|
-
# Returns the indicies of the parents (1 or 2) of the node at _index_
|
128
|
-
def parent_indices_for_index(index)
|
129
|
-
[ self[index].parent_one_rev ,
|
130
|
-
self[index].parent_two_rev ]
|
131
|
-
end
|
132
|
-
|
133
|
-
##
|
134
|
-
# Returns the size of the data for the revision at _index_.
|
135
|
-
def data_size_for_index(index)
|
136
|
-
self[index].compressed_len
|
137
|
-
end
|
138
|
-
|
139
|
-
##
|
140
|
-
# Returns the uncompressed size of the data for the revision at _index_.
|
141
|
-
def uncompressed_size_for_index(index)
|
142
|
-
len = self[index].uncompressed_len
|
143
|
-
return len if len >= 0
|
144
|
-
|
145
|
-
text = decompress_revision node_id_for_index(index)
|
146
|
-
return text.size
|
147
|
-
end
|
148
|
-
|
149
|
-
##
|
150
|
-
# Returns the offset where the data begins for the revision at _index_.
|
151
|
-
def data_start_for_index(index)
|
152
|
-
RevlogSupport::Support.get_offset self[index].offset_flags
|
153
|
-
end
|
154
|
-
|
155
|
-
##
|
156
|
-
# Returns the offset where the data ends for the revision at _index_.
|
157
|
-
def data_end_for_index(index)
|
158
|
-
data_start_for_index(index) + self[index].compressed_len
|
159
|
-
end
|
160
|
-
|
161
|
-
##
|
162
|
-
# Returns the "base revision" index for the revision at _index_.
|
163
|
-
def base_revision_for_index(index)
|
164
|
-
self[index].base_rev
|
165
|
-
end
|
166
|
-
|
167
|
-
##
|
168
|
-
# Returns the node ID for the index's tip-most revision
|
169
|
-
def tip
|
170
|
-
node_id_for_index(@index.size - 2)
|
171
|
-
end
|
172
|
-
|
173
|
-
##
|
174
|
-
# Returns the number of entries in this revision log.
|
175
|
-
def size
|
176
|
-
@index.size - 1
|
177
|
-
end
|
178
|
-
alias_method :index_size, :size
|
179
|
-
|
180
|
-
##
|
181
|
-
# Returns true if size is 0
|
182
|
-
def empty?
|
183
|
-
index_size.zero?
|
184
|
-
end
|
185
|
-
|
186
|
-
##
|
187
|
-
# Returns each revision as a {Amp::RevlogSupport::IndexEntry}.
|
188
|
-
# Don't iterate over the extra revision -1!
|
189
|
-
def each(&b); @index[0..-2].each(&b); end
|
190
|
-
|
191
|
-
##
|
192
|
-
# Returns all of the indices for all revisions.
|
193
|
-
#
|
194
|
-
# @return [Array] all indicies
|
195
|
-
def all_indices
|
196
|
-
(0..size).to_a
|
197
|
-
end
|
198
|
-
|
199
|
-
##
|
200
|
-
# Returns a hash of all _ancestral_ nodes that can be reached from
|
201
|
-
# the given node ID. Just do [node_id] on the result to check if it's
|
202
|
-
# reachable.
|
203
|
-
def reachable_nodes_for_node(node, stop=nil)
|
204
|
-
reachable = {}
|
205
|
-
to_visit = [node]
|
206
|
-
reachable[node] = true
|
207
|
-
stop_idx = stop ? revision_index_for_node(stop) : 0
|
208
|
-
|
209
|
-
until to_visit.empty?
|
210
|
-
node = to_visit.shift
|
211
|
-
next if node == stop || node.null?
|
212
|
-
parents_for_node(node).each do |parent|
|
213
|
-
next if revision_index_for_node(parent) < stop_idx
|
214
|
-
unless reachable[parent]
|
215
|
-
reachable[parent] = true
|
216
|
-
to_visit << parent
|
217
|
-
end
|
218
|
-
end
|
219
|
-
end
|
220
|
-
|
221
|
-
reachable
|
222
|
-
end
|
223
|
-
|
224
|
-
##
|
225
|
-
# Allows the user to operate on all the ancestors of the given revisions.
|
226
|
-
# One can pass a block, or just call it and get a Set.
|
227
|
-
def ancestors(revisions)
|
228
|
-
revisions = [revisions] unless revisions.kind_of? Array
|
229
|
-
to_visit = revisions.dup
|
230
|
-
seen = Set.new([NULL_REV])
|
231
|
-
until to_visit.empty?
|
232
|
-
parent_indices_for_index(to_visit.shift).each do |parent|
|
233
|
-
unless seen.include? parent
|
234
|
-
to_visit << parent
|
235
|
-
seen << parent
|
236
|
-
yield parent if block_given?
|
237
|
-
end
|
238
|
-
end
|
239
|
-
end
|
240
|
-
seen.delete NULL_REV
|
241
|
-
seen
|
242
|
-
end
|
243
|
-
|
244
|
-
##
|
245
|
-
# Allows the user to operate on all the descendants of the given revisions.
|
246
|
-
# One can pass a block, or just call it and get a Set. Revisions are passed
|
247
|
-
# as indices.
|
248
|
-
def descendants(revisions)
|
249
|
-
seen = Set.new revisions
|
250
|
-
start = revisions.min + 1
|
251
|
-
start.upto self.size do |i|
|
252
|
-
parent_indices_for_index(i).each do |x|
|
253
|
-
if x != NULL_REV && seen.include?(x)
|
254
|
-
seen << i
|
255
|
-
yield i if block_given?
|
256
|
-
break 1
|
257
|
-
end
|
258
|
-
end
|
259
|
-
end
|
260
|
-
seen - revisions
|
261
|
-
end
|
262
|
-
|
263
|
-
##
|
264
|
-
# Returns the topologically sorted list of nodes from the set:
|
265
|
-
# missing = (ancestors(heads) \ ancestors(common))
|
266
|
-
def find_missing(common=[NULL_ID], heads=self.heads)
|
267
|
-
common.map! {|r| revision_index_for_node r}
|
268
|
-
heads.map! {|r| revision_index_for_node r}
|
269
|
-
|
270
|
-
has = {}
|
271
|
-
ancestors(common) {|a| has[a] = true}
|
272
|
-
has[NULL_REV] = true
|
273
|
-
common.each {|r| has[r] = true}
|
274
|
-
|
275
|
-
missing = {}
|
276
|
-
to_visit = heads.reject {|r| has[r]}
|
277
|
-
until to_visit.empty?
|
278
|
-
r = to_visit.shift
|
279
|
-
next if missing.include? r
|
280
|
-
missing[r] = true
|
281
|
-
parent_indices_for_index(r).each do |p|
|
282
|
-
to_visit << p unless has[p]
|
283
|
-
end
|
284
|
-
end
|
285
|
-
|
286
|
-
missing.keys.sort.map {|rev| node_id_for_index rev}
|
287
|
-
end
|
288
|
-
|
289
|
-
##
|
290
|
-
# Return a tuple containing three elements. Elements 1 and 2 contain
|
291
|
-
# a final list bases and heads after all the unreachable ones have been
|
292
|
-
# pruned. Element 0 contains a topologically sorted list of all
|
293
|
-
#
|
294
|
-
# nodes that satisfy these constraints:
|
295
|
-
# 1. All nodes must be descended from a node in roots (the nodes on
|
296
|
-
# roots are considered descended from themselves).
|
297
|
-
# 2. All nodes must also be ancestors of a node in heads (the nodes in
|
298
|
-
# heads are considered to be their own ancestors).
|
299
|
-
#
|
300
|
-
# If roots is unspecified, nullid is assumed as the only root.
|
301
|
-
# If heads is unspecified, it is taken to be the output of the
|
302
|
-
# heads method (i.e. a list of all nodes in the repository that
|
303
|
-
# have no children).
|
304
|
-
#
|
305
|
-
# @param [Array<String>] roots
|
306
|
-
# @param [Array<String>] heads
|
307
|
-
# @return [{:heads => Array<String>, :roots => Array<String>, :between => Array<String>}]
|
308
|
-
def nodes_between(roots=nil, heads=nil)
|
309
|
-
no_nodes = {:roots => [], :heads => [], :between => []}
|
310
|
-
return no_nodes if roots != nil && roots.empty?
|
311
|
-
return no_nodes if heads != nil && heads.empty?
|
312
|
-
|
313
|
-
if roots.nil?
|
314
|
-
roots = [NULL_ID] # Everybody's a descendent of nullid
|
315
|
-
lowest_rev = NULL_REV
|
316
|
-
else
|
317
|
-
roots = roots.dup
|
318
|
-
lowest_rev = roots.map {|r| revision_index_for_node r}.min
|
319
|
-
end
|
320
|
-
|
321
|
-
if lowest_rev == NULL_REV && heads.nil?
|
322
|
-
# We want _all_ the nodes!
|
323
|
-
return {:between => all_indices.map {|i| node_id_for_index i },
|
324
|
-
:roots => [NULL_ID], :heads => self.heads}
|
325
|
-
end
|
326
|
-
|
327
|
-
if heads.nil?
|
328
|
-
# All nodes are ancestors, so the latest ancestor is the last
|
329
|
-
# node.
|
330
|
-
highest_rev = self.size - 1
|
331
|
-
# Set ancestors to None to signal that every node is an ancestor.
|
332
|
-
ancestors = nil
|
333
|
-
# Set heads to an empty dictionary for later discovery of heads
|
334
|
-
heads = {}
|
335
|
-
else
|
336
|
-
heads = heads.dup
|
337
|
-
ancestors = {}
|
338
|
-
|
339
|
-
# Turn heads into a dictionary so we can remove 'fake' heads.
|
340
|
-
# Also, later we will be using it to filter out the heads we can't
|
341
|
-
# find from roots.
|
342
|
-
heads = Hash.with_keys heads, false
|
343
|
-
|
344
|
-
# Start at the top and keep marking parents until we're done.
|
345
|
-
nodes_to_tag = heads.keys
|
346
|
-
highest_rev = nodes_to_tag.map {|r| revision_index_for_node r }.max
|
347
|
-
|
348
|
-
until nodes_to_tag.empty?
|
349
|
-
# grab a node to tag
|
350
|
-
node = nodes_to_tag.pop
|
351
|
-
# Never tag nullid
|
352
|
-
next if node.null?
|
353
|
-
|
354
|
-
# A node's revision number represents its place in a
|
355
|
-
# topologically sorted list of nodes.
|
356
|
-
r = revision_index_for_node node
|
357
|
-
if r >= lowest_rev
|
358
|
-
if !ancestors.include?(node)
|
359
|
-
# If we are possibly a descendent of one of the roots
|
360
|
-
# and we haven't already been marked as an ancestor
|
361
|
-
ancestors[node] = true # mark as ancestor
|
362
|
-
# Add non-nullid parents to list of nodes to tag.
|
363
|
-
nodes_to_tag += parents_for_node(node).reject {|p| p.null? }
|
364
|
-
elsif heads.include? node # We've seen it before, is it a fake head?
|
365
|
-
# So it is, real heads should not be the ancestors of
|
366
|
-
# any other heads.
|
367
|
-
heads.delete_at node
|
368
|
-
end
|
369
|
-
end
|
370
|
-
end
|
371
|
-
|
372
|
-
return no_nodes if ancestors.empty?
|
373
|
-
|
374
|
-
# Now that we have our set of ancestors, we want to remove any
|
375
|
-
# roots that are not ancestors.
|
376
|
-
|
377
|
-
# If one of the roots was nullid, everything is included anyway.
|
378
|
-
if lowest_rev > NULL_REV
|
379
|
-
# But, since we weren't, let's recompute the lowest rev to not
|
380
|
-
# include roots that aren't ancestors.
|
381
|
-
|
382
|
-
# Filter out roots that aren't ancestors of heads
|
383
|
-
roots = roots.select {|rev| ancestors.include? rev}
|
384
|
-
|
385
|
-
return no_nodes if roots.empty? # No more roots? Return empty list
|
386
|
-
|
387
|
-
# Recompute the lowest revision
|
388
|
-
lowest_rev = roots.map {|rev| revision_index_for_node rev}.min
|
389
|
-
else
|
390
|
-
lowest_rev = NULL_REV
|
391
|
-
roots = [NULL_ID]
|
392
|
-
end
|
393
|
-
end
|
394
|
-
|
395
|
-
# Transform our roots list into a 'set' (i.e. a dictionary where the
|
396
|
-
# values don't matter.
|
397
|
-
descendents = Hash.with_keys roots
|
398
|
-
|
399
|
-
# Also, keep the original roots so we can filter out roots that aren't
|
400
|
-
# 'real' roots (i.e. are descended from other roots).
|
401
|
-
roots = descendents.dup
|
402
|
-
|
403
|
-
# Our topologically sorted list of output nodes.
|
404
|
-
ordered_output = []
|
405
|
-
|
406
|
-
# Don't start at nullid since we don't want nullid in our output list,
|
407
|
-
# and if nullid shows up in descedents, empty parents will look like
|
408
|
-
# they're descendents.
|
409
|
-
[lowest_rev, 0].max.upto(highest_rev) do |rev|
|
410
|
-
node = node_id_for_index rev
|
411
|
-
is_descendent = false
|
412
|
-
|
413
|
-
if lowest_rev == NULL_REV # Everybody is a descendent of nullid
|
414
|
-
is_descendent = true
|
415
|
-
elsif descendents.include? node
|
416
|
-
# n is already a descendent
|
417
|
-
is_descendent = true
|
418
|
-
|
419
|
-
# This check only needs to be done here because all the roots
|
420
|
-
# will start being marked is descendents before the loop.
|
421
|
-
if roots.include? node
|
422
|
-
# If n was a root, check if it's a 'real' root.
|
423
|
-
par = parents_for_node node
|
424
|
-
# If any of its parents are descendents, it's not a root.
|
425
|
-
if descendents.include?(par[0]) || descendents.include?(par[1])
|
426
|
-
roots.delete_at node
|
427
|
-
end
|
428
|
-
end
|
429
|
-
else
|
430
|
-
# A node is a descendent if either of its parents are
|
431
|
-
# descendents. (We seeded the dependents list with the roots
|
432
|
-
# up there, remember?)
|
433
|
-
par = parents_for_node node
|
434
|
-
if descendents.include?(par[0]) || descendents.include?(par[1])
|
435
|
-
descendents[node] = true
|
436
|
-
is_descendent = true
|
437
|
-
end
|
438
|
-
end
|
439
|
-
|
440
|
-
if is_descendent && (ancestors.nil? || ancestors.include?(node))
|
441
|
-
# Only include nodes that are both descendents and ancestors.
|
442
|
-
ordered_output << node
|
443
|
-
if !ancestors.nil? && heads.include?(node)
|
444
|
-
# We're trying to figure out which heads are reachable
|
445
|
-
# from roots.
|
446
|
-
# Mark this head as having been reached
|
447
|
-
heads[node] = true
|
448
|
-
elsif ancestors.nil?
|
449
|
-
# Otherwise, we're trying to discover the heads.
|
450
|
-
# Assume this is a head because if it isn't, the next step
|
451
|
-
# will eventually remove it.
|
452
|
-
heads[node] = true
|
453
|
-
|
454
|
-
# But, obviously its parents aren't.
|
455
|
-
parents_for_node(node).each {|parent| heads.delete parent }
|
456
|
-
end
|
457
|
-
end
|
458
|
-
end
|
459
|
-
|
460
|
-
heads = heads.keys.select {|k| heads[k] }
|
461
|
-
roots = roots.keys
|
462
|
-
{:heads => heads, :roots => roots, :between => ordered_output}
|
463
|
-
end
|
464
|
-
|
465
|
-
##
|
466
|
-
# Return the list of all nodes that have no children.
|
467
|
-
#
|
468
|
-
# if start is specified, only heads that are descendants of
|
469
|
-
# start will be returned
|
470
|
-
# if stop is specified, it will consider all the revs from stop
|
471
|
-
# as if they had no children
|
472
|
-
def heads(start=nil, stop=nil)
|
473
|
-
if start.nil? && stop.nil?
|
474
|
-
count = self.size
|
475
|
-
return [NULL_ID] if count == 0
|
476
|
-
is_head = [true] * (count + 1)
|
477
|
-
count.times do |r|
|
478
|
-
e = @index[r]
|
479
|
-
is_head[e.parent_one_rev] = is_head[e.parent_two_rev] = false
|
480
|
-
end
|
481
|
-
return (0..(count-1)).to_a.select {|r| is_head[r]}.map {|r| node_id_for_index r}
|
482
|
-
end
|
483
|
-
start = NULL_ID if start.nil?
|
484
|
-
stop = [] if stop.nil?
|
485
|
-
stop_revs = {}
|
486
|
-
stop.each {|r| stop_revs[revision_index_for_node(r)] = true }
|
487
|
-
start_rev = revision_index_for_node start
|
488
|
-
reachable = {start_rev => 1}
|
489
|
-
heads = {start_rev => 1}
|
490
|
-
(start_rev + 1).upto(self.size - 1) do |r|
|
491
|
-
parent_indices_for_index(r).each do |p|
|
492
|
-
if reachable[p]
|
493
|
-
reachable[r] = 1 unless stop_revs[r]
|
494
|
-
heads[r] = 1
|
495
|
-
end
|
496
|
-
heads.delete p if heads[p] && stop_revs[p].nil?
|
497
|
-
end
|
498
|
-
end
|
499
|
-
|
500
|
-
heads.map {|k,v| node_id_for_index k}
|
501
|
-
end
|
502
|
-
|
503
|
-
##
|
504
|
-
# Returns the children of the node with ID _node_.
|
505
|
-
def children(node)
|
506
|
-
c = []
|
507
|
-
p = revision_index_for_node node
|
508
|
-
(p+1).upto(self.size - 1) do |r|
|
509
|
-
prevs = parent_indices_for_index(r).select {|pr| pr != NULL_REV}
|
510
|
-
prevs.each {|pr| c << node_id_for_index(r) if pr == p} if prevs.any?
|
511
|
-
c << node_id_for_index(r) if p == NULL_REV
|
512
|
-
end
|
513
|
-
c
|
514
|
-
end
|
515
|
-
|
516
|
-
##
|
517
|
-
# Tries to find an exact match for a node with ID _id_. If no match is,
|
518
|
-
# found, then the id is treated as an index number - if that doesn't work,
|
519
|
-
# the revlog will try treating the ID supplied as node_id in hex form.
|
520
|
-
def id_match(id)
|
521
|
-
return node_id_for_index(id) if id.is_a? Integer
|
522
|
-
return id if id.size == 20 && revision_index_for_node(id)
|
523
|
-
rev = id.to_i
|
524
|
-
rev = self.size + rev if rev < 0
|
525
|
-
if id.size == 40
|
526
|
-
node = id.unhexlify
|
527
|
-
r = revision_index_for_node node
|
528
|
-
return node if r
|
529
|
-
end
|
530
|
-
nil
|
531
|
-
end
|
532
|
-
|
533
|
-
##
|
534
|
-
# Tries to find a partial match for a node_id in hex form.
|
535
|
-
def partial_id_match(id)
|
536
|
-
return nil if id.size >= 40
|
537
|
-
l = id.size / 2
|
538
|
-
bin_id = id[0..(l*2 - 1)].unhexlify
|
539
|
-
nl = @index.node_map.keys.select {|k| k[0..(l-1)] == bin_id}
|
540
|
-
nl = nl.select {|n| n.hexlify =~ /^#{id}/}
|
541
|
-
return nl.first if nl.size == 1
|
542
|
-
raise RevlogSupport::LookupError.new("ambiguous ID #{id}") if nl.size > 1
|
543
|
-
nil
|
544
|
-
end
|
545
|
-
|
546
|
-
##
|
547
|
-
# This method will, given an id (or an index) or an ID in hex form,
|
548
|
-
# try to find the given node in the index.
|
549
|
-
def lookup_id(id)
|
550
|
-
n = id_match id
|
551
|
-
return n unless n.nil?
|
552
|
-
n = partial_id_match id
|
553
|
-
return n unless n.nil?
|
554
|
-
raise RevlogSupport::LookupError.new("no match found #{id}")
|
555
|
-
end
|
556
|
-
|
557
|
-
##
|
558
|
-
# Compares a node with the provided text, as a consistency check. Works
|
559
|
-
# using <=> semantics.
|
560
|
-
def cmp(node, text)
|
561
|
-
|
562
|
-
p1, p2 = parents_for_node node
|
563
|
-
return RevlogSupport::Support.history_hash(text, p1, p2) != node
|
564
|
-
end
|
565
|
-
|
566
|
-
##
|
567
|
-
# Loads a block of data into the cache.
|
568
|
-
def load_cache(data_file, start, cache_length)
|
569
|
-
|
570
|
-
if data_file.nil?
|
571
|
-
data_file = open(@index_file) if @index.inline?
|
572
|
-
data_file = open(@data_file) unless @index.inline?
|
573
|
-
end
|
574
|
-
|
575
|
-
# data_file.seek(start, IO::SEEK_SET)
|
576
|
-
# sz = data_file.read.length
|
577
|
-
# data_file.seek(0, IO::SEEK_SET)
|
578
|
-
# $zs = data_file.read.length
|
579
|
-
# puts(@index.inline? ? "------- INLINE" : "-------NOT INLINE") #killme
|
580
|
-
# puts "------- CACHE_LENGTH = #{cache_length}" # KILLME
|
581
|
-
# puts "===" # KILLME
|
582
|
-
# puts "We are going to read #{cache_length} bytes starting at #{start}" # KILLME
|
583
|
-
# puts "Wait a minute... on Ari's machine, there's only #{sz} bytes to read..." # KILLME
|
584
|
-
# puts "Filesize: #{$zs}" # KILLME
|
585
|
-
# puts "===" # KILLME
|
586
|
-
|
587
|
-
data_file.seek(start, IO::SEEK_SET)
|
588
|
-
@chunk_cache = [start, data_file.read(cache_length)]
|
589
|
-
data_file
|
590
|
-
end
|
591
|
-
|
592
|
-
##
|
593
|
-
# Gets a chunk of data from the datafile (or, if inline, from the index
|
594
|
-
# file). Just give it a revision index and which data file to use
|
595
|
-
#
|
596
|
-
# @param [Fixnum] rev the revision index to extract
|
597
|
-
# @param [IO] data_file The IO file descriptor for loading data
|
598
|
-
# @return [String] the raw data from the index (posssibly compressed)
|
599
|
-
def get_chunk(rev, data_file = nil)
|
600
|
-
begin
|
601
|
-
start, length = self.data_start_for_index(rev), self[rev].compressed_len
|
602
|
-
rescue
|
603
|
-
Amp::UI.debug "Failed get_chunk: #{@index_file}:#{rev}"
|
604
|
-
raise
|
605
|
-
end
|
606
|
-
|
607
|
-
#puts "The starting point for the data is: #{data_start_for_index(rev)}" # KILLME
|
608
|
-
#puts "We're reading #{length} bytes. Look at data_start_for_index" # KILLME
|
609
|
-
|
610
|
-
start += ((rev + 1) * @index.entry_size) if @index.inline?
|
611
|
-
|
612
|
-
endpt = start + length
|
613
|
-
offset = 0
|
614
|
-
if @chunk_cache.nil?
|
615
|
-
cache_length = [65536, length].max
|
616
|
-
data_file = load_cache data_file, start, cache_length
|
617
|
-
else
|
618
|
-
cache_start = @chunk_cache[0]
|
619
|
-
cache_length = @chunk_cache[1].size
|
620
|
-
cache_end = cache_start + cache_length
|
621
|
-
if start >= cache_start && endpt <= cache_end
|
622
|
-
offset = start - cache_start
|
623
|
-
else
|
624
|
-
cache_length = [65536, length].max
|
625
|
-
data_file = load_cache data_file, start, cache_length
|
626
|
-
end
|
627
|
-
end
|
628
|
-
|
629
|
-
c = @chunk_cache[1]
|
630
|
-
return "" if c.nil? || c.empty? || length == 0
|
631
|
-
c = c[offset..(offset + length - 1)] if cache_length != length
|
632
|
-
|
633
|
-
RevlogSupport::Support.decompress c
|
634
|
-
end
|
635
|
-
|
636
|
-
##
|
637
|
-
# Unified diffs 2 revisions, based on their indices. They are returned in a sexified
|
638
|
-
# unified diff format.
|
639
|
-
def unified_revision_diff(rev1, rev2)
|
640
|
-
Diffs::MercurialDiff.unified_diff( decompress_revision(self.node_id_for_index(rev1)),
|
641
|
-
decompress_revision(self.node_id_for_index(rev2)))
|
642
|
-
end
|
643
|
-
|
644
|
-
##
|
645
|
-
# Diffs 2 revisions, based on their indices. They are returned in
|
646
|
-
# BinaryDiff format.
|
647
|
-
#
|
648
|
-
# @param [Fixnum] rev1 the index of the source revision
|
649
|
-
# @param [Fixnum] rev2 the index of the destination revision
|
650
|
-
# @return [String] The diff of the 2 revisions.
|
651
|
-
def revision_diff(rev1, rev2)
|
652
|
-
return get_chunk(rev2) if (rev1 + 1 == rev2) &&
|
653
|
-
self[rev1].base_rev == self[rev2].base_rev
|
654
|
-
Diffs::MercurialDiff.text_diff( decompress_revision(node_id_for_index(rev1)),
|
655
|
-
decompress_revision(node_id_for_index(rev2)))
|
656
|
-
end
|
657
|
-
|
658
|
-
##
|
659
|
-
# Given a node ID, extracts that revision and decompresses it. What you get
|
660
|
-
# back will the pristine revision data!
|
661
|
-
#
|
662
|
-
# @param [String] node the Node ID of the revision to extract.
|
663
|
-
# @return [String] the pristine revision data.
|
664
|
-
def decompress_revision(node)
|
665
|
-
return "" if node.nil? || node.null?
|
666
|
-
return @index.cache[2] if @index.cache && @index.cache[0] == node
|
667
|
-
|
668
|
-
|
669
|
-
text = nil
|
670
|
-
rev = revision_index_for_node node
|
671
|
-
base = @index[rev].base_rev
|
672
|
-
|
673
|
-
if @index[rev].offset_flags & 0xFFFF > 0
|
674
|
-
raise RevlogSupport::RevlogError.new("incompatible revision flag %x" %
|
675
|
-
(self.index[rev].offset_flags & 0xFFFF))
|
676
|
-
end
|
677
|
-
data_file = nil
|
678
|
-
|
679
|
-
if @index.cache && @index.cache[1].is_a?(Numeric) && @index.cache[1] >= base && @index.cache[1] < rev
|
680
|
-
base = @index.cache[1]
|
681
|
-
text = @index.cache[2]
|
682
|
-
# load the index if we're lazy (base, rev + 1)
|
683
|
-
end
|
684
|
-
data_file = open(@data_file) if !(@index.inline?) && rev > base + 1
|
685
|
-
text = get_chunk(base, data_file) if text.nil?
|
686
|
-
bins = ((base + 1)..rev).map {|r| get_chunk(r, data_file)}
|
687
|
-
text = Diffs::MercurialPatch.apply_patches(text, bins)
|
688
|
-
|
689
|
-
p1, p2 = parents_for_node node
|
690
|
-
if node != RevlogSupport::Support.history_hash(text, p1, p2)
|
691
|
-
raise RevlogSupport::RevlogError.new("integrity check failed on %s:%d, data:%s" %
|
692
|
-
[(@index.inline? ? @index_file : @data_file), rev, text.inspect])
|
693
|
-
end
|
694
|
-
@index.cache = [node, rev, text]
|
695
|
-
text
|
696
|
-
end
|
697
|
-
|
698
|
-
############ TODO
|
699
|
-
# @todo FINISH THIS METHOD
|
700
|
-
# @todo FIXME
|
701
|
-
# FINISH THIS METHOD
|
702
|
-
# TODO
|
703
|
-
# FIXME
|
704
|
-
def check_inline_size(tr, fp=nil)
|
705
|
-
return unless @index.inline?
|
706
|
-
if fp.nil?
|
707
|
-
fp = open(@index_file, "r")
|
708
|
-
fp.seek(0, IO::SEEK_END)
|
709
|
-
end
|
710
|
-
size = fp.tell
|
711
|
-
return if size < 131072
|
712
|
-
|
713
|
-
trinfo = tr.find(@index_file)
|
714
|
-
if trinfo.nil?
|
715
|
-
raise RevlogSupport::RevlogError.new("#{@index_file} not found in the"+
|
716
|
-
"transaction")
|
717
|
-
end
|
718
|
-
trindex = trinfo[:data]
|
719
|
-
data_offset = data_start_for_index trindex
|
720
|
-
tr.add @data_file, data_offset
|
721
|
-
df = open(@data_file, 'w')
|
722
|
-
|
723
|
-
begin
|
724
|
-
calc = @index.entry_size
|
725
|
-
self.size.times do |r|
|
726
|
-
start = data_start_for_index(r) + (r + 1) * calc
|
727
|
-
length = self[r].compressed_len
|
728
|
-
fp.seek(start)
|
729
|
-
d = fp.read length
|
730
|
-
df.write d
|
731
|
-
end
|
732
|
-
ensure
|
733
|
-
df.close
|
734
|
-
end
|
735
|
-
|
736
|
-
fp.close
|
737
|
-
|
738
|
-
open(@index_file, 'w') do |fp| # automatically atomic
|
739
|
-
@version &= ~ RevlogSupport::Support::REVLOG_NG_INLINE_DATA
|
740
|
-
@inline = false
|
741
|
-
each do |i|
|
742
|
-
# THE FOLLOWING LINE IS NOT CORRECT
|
743
|
-
# IT IS DIRECTLY TRANSLATED PYTHON CODE
|
744
|
-
# I HAVE NO IDEA HOW WE DID THIS BEFORE
|
745
|
-
e = @io.pack_entry @index[i], @node, @version, i
|
746
|
-
fp.write e
|
747
|
-
end
|
748
|
-
end
|
749
|
-
|
750
|
-
tr.replace @index_file, trindex * calc
|
751
|
-
@chunk_cache = nil # reset the cache
|
752
|
-
end
|
753
|
-
|
754
|
-
##
|
755
|
-
# add a revision to the log
|
756
|
-
#
|
757
|
-
# @param [String] text the revision data to add
|
758
|
-
# @param transaction the transaction object used for rollback
|
759
|
-
# @param link the linkrev data to add
|
760
|
-
# @param [String] p1 the parent nodeids of the revision
|
761
|
-
# @param [String] p2 the parent nodeids of the revision
|
762
|
-
# @param d an optional precomputed delta
|
763
|
-
# @return [String] the digest ID referring to the node in the log
|
764
|
-
def add_revision(text, transaction, link, p1, p2, d=nil, index_file_handle=nil)
|
765
|
-
node = RevlogSupport::Support.history_hash(text, p1, p2)
|
766
|
-
return node if @index.node_map[node]
|
767
|
-
curr = index_size
|
768
|
-
prev = curr - 1
|
769
|
-
base = self[prev].base_rev
|
770
|
-
offset = data_end_for_index prev
|
771
|
-
|
772
|
-
if curr > 0
|
773
|
-
if d.nil? || d.empty?
|
774
|
-
ptext = decompress_revision node_id_for_index(prev)
|
775
|
-
d = Diffs::MercurialDiff.text_diff(ptext, text)
|
776
|
-
end
|
777
|
-
data = RevlogSupport::Support.compress d
|
778
|
-
len = data[:compression].size + data[:text].size
|
779
|
-
dist = len + offset - data_start_for_index(base)
|
780
|
-
end
|
781
|
-
|
782
|
-
# Compressed diff > size of actual file
|
783
|
-
if curr == 0 || dist > text.size * 2
|
784
|
-
data = RevlogSupport::Support.compress text
|
785
|
-
len = data[:compression].size + data[:text].size
|
786
|
-
base = curr
|
787
|
-
end
|
788
|
-
|
789
|
-
entry = RevlogSupport::IndexEntry.new(RevlogSupport::Support.offset_version(offset, 0),
|
790
|
-
len, text.size, base, link, rev(p1), rev(p2), node)
|
791
|
-
@index << entry
|
792
|
-
@index.node_map[node] = curr
|
793
|
-
@index.write_entry(@index_file, entry, transaction, data, index_file_handle)
|
794
|
-
@index.cache = [node, curr, text]
|
795
|
-
node
|
796
|
-
end
|
797
|
-
|
798
|
-
##
|
799
|
-
# Finds the most-recent common ancestor for the two nodes.
|
800
|
-
def ancestor(a, b)
|
801
|
-
parent_func = proc do |rev|
|
802
|
-
self.parent_indices_for_index(rev).select {|i| i != NULL_REV }
|
803
|
-
end
|
804
|
-
c = Graphs::AncestorCalculator.ancestors(revision_index_for_node(a),
|
805
|
-
revision_index_for_node(b),
|
806
|
-
parent_func)
|
807
|
-
return NULL_ID if c.nil?
|
808
|
-
node_id_for_index c
|
809
|
-
end
|
810
|
-
|
811
|
-
##
|
812
|
-
# Yields chunks of change-group data for writing to disk, given
|
813
|
-
# a nodelist, a method to lookup stuff. Given a list of changset
|
814
|
-
# revs, return a set of deltas and metadata corresponding to nodes.
|
815
|
-
# the first delta is parent(nodes[0]) -> nodes[0] the receiver is
|
816
|
-
# guaranteed to have this parent as it has all history before these
|
817
|
-
# changesets. parent is parent[0]
|
818
|
-
#
|
819
|
-
# FIXME -- could be the cause of our failures with #pre_push!
|
820
|
-
# @param [[String]] nodelist
|
821
|
-
# @param [Proc, #[], #call] lookup
|
822
|
-
# @param [Proc, #[], #call] info_collect can be left nil
|
823
|
-
def group(nodelist, lookup, info_collect=nil)
|
824
|
-
revs = nodelist.map {|n| rev n }
|
825
|
-
|
826
|
-
# if we don't have any revisions touched by these changesets, bail
|
827
|
-
if revs.empty?
|
828
|
-
yield RevlogSupport::ChangeGroup.closing_chunk
|
829
|
-
return
|
830
|
-
end
|
831
|
-
|
832
|
-
# add the parent of the first rev
|
833
|
-
parent1 = parents_for_node(node(revs[0]))[0]
|
834
|
-
revs.unshift rev(parent1)
|
835
|
-
|
836
|
-
# build deltas
|
837
|
-
0.upto(revs.size - 2) do |d|
|
838
|
-
a, b = revs[d], revs[d + 1]
|
839
|
-
nb = node b
|
840
|
-
|
841
|
-
info_collect[nb] if info_collect
|
842
|
-
|
843
|
-
p = parents(nb)
|
844
|
-
meta = nb + p[0] + p[1] + lookup[nb]
|
845
|
-
|
846
|
-
if a == -1
|
847
|
-
data = decompress_revision nb
|
848
|
-
meta += Diffs::MercurialDiff.trivial_diff_header(d.size)
|
849
|
-
else
|
850
|
-
|
851
|
-
data = revision_diff(a, b)
|
852
|
-
end
|
853
|
-
|
854
|
-
yield RevlogSupport::ChangeGroup.chunk_header(meta.size + data.size)
|
855
|
-
yield meta
|
856
|
-
if data.size > 1048576
|
857
|
-
pos = 0
|
858
|
-
while pos < data.size
|
859
|
-
pos2 = pos + 262144
|
860
|
-
yield data[pos..(pos2-1)]
|
861
|
-
pos = pos2
|
862
|
-
end
|
863
|
-
else
|
864
|
-
yield data
|
865
|
-
end
|
866
|
-
end
|
867
|
-
yield RevlogSupport::ChangeGroup.closing_chunk
|
868
|
-
end
|
869
|
-
|
870
|
-
# Adds a changelog to the index
|
871
|
-
#
|
872
|
-
# @param [StringIO, #string] revisions something we can iterate over (Usually a StringIO)
|
873
|
-
# @param [Proc, #call, #[]] link_mapper
|
874
|
-
# @param [Amp::Journal] journal to start a transaction
|
875
|
-
def add_group(revisions, link_mapper, journal)
|
876
|
-
r = index_size
|
877
|
-
t = r - 1
|
878
|
-
node = nil
|
879
|
-
|
880
|
-
base = prev = RevlogSupport::Node::NULL_REV
|
881
|
-
start = endpt = text_len = 0
|
882
|
-
endpt = data_end_for_index t if r != 0
|
883
|
-
|
884
|
-
index_file_handle = open(@index_file, "a+")
|
885
|
-
index_size = r * @index.entry_size
|
886
|
-
if @index.inline?
|
887
|
-
journal << [@index_file, endpt + index_size, r]
|
888
|
-
data_file_handle = nil
|
889
|
-
else
|
890
|
-
journal << [@index_file, index_size, r]
|
891
|
-
journal << [@data_file, endpt]
|
892
|
-
data_file_handle = open(@data_file, "a")
|
893
|
-
end
|
894
|
-
|
895
|
-
begin #errors abound here i guess
|
896
|
-
chain = nil
|
897
|
-
|
898
|
-
Amp::RevlogSupport::ChangeGroup.each_chunk(revisions) do |chunk|
|
899
|
-
node, parent1, parent2, cs = chunk[0..79].unpack("a20a20a20a20")
|
900
|
-
link = link_mapper.call(cs)
|
901
|
-
|
902
|
-
if @index.node_map[node]
|
903
|
-
chain = node
|
904
|
-
next
|
905
|
-
end
|
906
|
-
delta = chunk[80..-1]
|
907
|
-
[parent1, parent2].each do |parent|
|
908
|
-
unless @index.node_map[parent]
|
909
|
-
raise RevlogSupport::LookupError.new("unknown parent #{parent}"+
|
910
|
-
" in #{@index_file}")
|
911
|
-
end
|
912
|
-
end
|
913
|
-
|
914
|
-
unless chain
|
915
|
-
chain = parent1
|
916
|
-
unless @index.node_map[chain]
|
917
|
-
raise RevlogSupport::LookupError.new("unknown parent #{chain}"+
|
918
|
-
" from #{chain} in #{@index_file}")
|
919
|
-
end
|
920
|
-
end
|
921
|
-
|
922
|
-
if chain == prev
|
923
|
-
cdelta = RevlogSupport::Support.compress delta
|
924
|
-
cdeltalen = cdelta[:compression].size + cdelta[:text].size
|
925
|
-
text_len = Diffs::MercurialPatch.patched_size text_len, delta
|
926
|
-
end
|
927
|
-
|
928
|
-
if chain != prev || (endpt - start + cdeltalen) > text_len * 2
|
929
|
-
#flush our writes here so we can read it in revision
|
930
|
-
data_file_handle.flush if data_file_handle
|
931
|
-
index_file_handle.flush
|
932
|
-
text = decompress_revision(chain)
|
933
|
-
if text.size == 0
|
934
|
-
text = delta[12..-1]
|
935
|
-
else
|
936
|
-
text = Diffs::MercurialPatch.apply_patches(text, [delta])
|
937
|
-
end
|
938
|
-
chk = add_revision(text, journal, link, parent1, parent2,
|
939
|
-
nil, index_file_handle)
|
940
|
-
|
941
|
-
if chk != node
|
942
|
-
raise RevlogSupport::RevlogError.new("consistency error "+
|
943
|
-
"adding group")
|
944
|
-
end
|
945
|
-
text_len = text.size
|
946
|
-
else
|
947
|
-
entry = RevlogSupport::IndexEntry.new(RevlogSupport::Support.offset_version(endpt, 0),
|
948
|
-
cdeltalen,text_len, base, link, rev(parent1), rev(parent2), node)
|
949
|
-
@index << entry
|
950
|
-
@index.node_map[node] = r
|
951
|
-
@index.write_entry(@index_file, entry, journal, cdelta, index_file_handle)
|
952
|
-
end
|
953
|
-
|
954
|
-
|
955
|
-
t, r, chain, prev = r, r + 1, node, node
|
956
|
-
base = self[t].base_rev
|
957
|
-
start = data_start_for_index base
|
958
|
-
endpt = data_end_for_index t
|
959
|
-
end
|
960
|
-
rescue Exception => e
|
961
|
-
puts e
|
962
|
-
puts e.backtrace
|
963
|
-
ensure
|
964
|
-
if data_file_handle && !(data_file_handle.closed?)
|
965
|
-
data_file_handle.close
|
966
|
-
end
|
967
|
-
index_file_handle.close
|
968
|
-
end
|
969
|
-
node
|
970
|
-
end
|
971
|
-
|
972
|
-
##
|
973
|
-
# Strips all revisions after (and including) a given link_index
|
974
|
-
def strip(min_link)
|
975
|
-
return if size == 0
|
976
|
-
|
977
|
-
load_index_map if @index.is_a? RevlogSupport::LazyIndex
|
978
|
-
|
979
|
-
rev = 0
|
980
|
-
all_indices.each {|_rev| rev = _rev; break if @index[rev].link_rev >= min_link }
|
981
|
-
return if rev > all_indices.max
|
982
|
-
|
983
|
-
endpt = data_start_for_index rev
|
984
|
-
unless @index.inline?
|
985
|
-
df = File.open(@data_file, "a")
|
986
|
-
df.truncate(endpt)
|
987
|
-
endpt = rev * @index.entry_size
|
988
|
-
else
|
989
|
-
endpt += rev * @index.entry_size
|
990
|
-
end
|
991
|
-
|
992
|
-
indexf = File.open(@index_file, "a")
|
993
|
-
indexf.truncate(endpt)
|
994
|
-
|
995
|
-
@cache = @index.cache = nil
|
996
|
-
@chunk_cache = nil
|
997
|
-
rev.upto(self.size-1) {|x| @index.node_map.delete(self.node(x)) }
|
998
|
-
@index.index = @index.index[0..rev-1]
|
999
|
-
end
|
1000
|
-
|
1001
|
-
##
|
1002
|
-
# Checks to make sure our data and index files are the right size.
|
1003
|
-
# Returns the differences between expected and actual sizes.
|
1004
|
-
def checksize
|
1005
|
-
expected = 0
|
1006
|
-
expected = [0, data_end_for_index(self.index_size - 1)].max if self.index_size > 0
|
1007
|
-
|
1008
|
-
|
1009
|
-
|
1010
|
-
|
1011
|
-
f = open(@index_file)
|
1012
|
-
f.seek(0, IO::SEEK_END)
|
1013
|
-
actual = f.tell
|
1014
|
-
s = @index.entry_size
|
1015
|
-
i = [0, actual / s].max
|
1016
|
-
di = actual - (i * s)
|
1017
|
-
|
1018
|
-
if @index.inline?
|
1019
|
-
databytes = 0
|
1020
|
-
self.index_size.times do |r|
|
1021
|
-
databytes += [0, self[r].compressed_len].max
|
1022
|
-
end
|
1023
|
-
dd = 0
|
1024
|
-
di = actual - (self.index_size * s) - databytes
|
1025
|
-
else
|
1026
|
-
f = open(@data_file)
|
1027
|
-
f.seek(0, IO::SEEK_END)
|
1028
|
-
actual = f.tell
|
1029
|
-
dd = actual - expected
|
1030
|
-
f.close
|
1031
|
-
end
|
1032
|
-
|
1033
|
-
return {:data_diff => dd, :index_diff => di}
|
1034
|
-
end
|
1035
|
-
|
1036
|
-
##
|
1037
|
-
# Returns all the files this object is concerned with.
|
1038
|
-
def files
|
1039
|
-
res = [ @index_file ]
|
1040
|
-
res << @data_file unless @index.inline?
|
1041
|
-
res
|
1042
|
-
end
|
1043
|
-
|
1044
|
-
end
|
1045
|
-
end
|