build-graph 2.1.1 → 2.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,278 +0,0 @@
1
- //
2
- // ParallelMergeSort.h
3
- // DictionarySort
4
- //
5
- // Created by Samuel Williams on 2/11/11.
6
- // Copyright, 2014, by Samuel G. D. Williams. <http://www.codeotaku.com>
7
- //
8
-
9
- #pragma once
10
-
11
- #include <thread>
12
- #include "Benchmark.h"
13
-
14
- // A parallel merge sort algorithm template implemented using C++11 threads.
15
- namespace ParallelMergeSort
16
- {
17
- /*
18
- # Parallel Merge Algorithm
19
-
20
- This parallel merge algorithm uses two threads and requires no synchrnoisation (e.g. lock free).
21
-
22
- Given two sorted sequences i and j, such that |i| == |j| or |i| == |j|-1, we can merge these together by taking the |i| smallest items and |j| biggest items independently. The final sorted list q which consists of all items from i and j in order, has a basic property such that the lower |i| items in q and the upper |j| items in q are mutually exclusive. Therefore, we can select each half of the list independently:
23
-
24
- ij = [1, 3, 5, 2, 4, 6]
25
- i = [1, 3, 5]
26
- j = [2, 4, 6]
27
-
28
- q = [1, 2, 3, 4, 5, 6]
29
-
30
- In this case, we can see that q[0,3] can be formed by merging the first 3 smallest items, and q[3,6] can be formed by merging the largest 3 items. Because these are mutually exclusive, this process can be done on two threads.
31
-
32
- Other merging algorithms exist, but may require locking. Another approach worth exploring would be to form in parallel n heaps, where all items heap[k] < heap[k+1]. If the heaps can be constructed in sorted order, the destination array will naturally contain the final sorted list.
33
- */
34
-
35
- // This implementation assumes that if there are |i| items on the left side, there must be at least |i| items on the right side.
36
- template <typename ArrayT, typename ComparatorT>
37
- struct ParallelLeftMerge {
38
- ArrayT & source, & destination;
39
- const ComparatorT & comparator;
40
- std::size_t lower_bound, middle_bound;
41
-
42
- void operator()() {
43
- std::size_t left = lower_bound;
44
- std::size_t right = middle_bound;
45
- std::size_t offset = lower_bound;
46
-
47
- while (offset < middle_bound) {
48
- if (comparator(source[left], source[right])) {
49
- destination[offset++] = source[left++];
50
- } else {
51
- destination[offset++] = source[right++];
52
- }
53
- }
54
- }
55
- };
56
-
57
- // This implementation assumes that if there are |j| items on the right side, there are at least |j| - 1 items on the left side.
58
- template <typename ArrayT, typename ComparatorT>
59
- struct ParallelRightMerge {
60
- ArrayT & source, & destination;
61
- const ComparatorT & comparator;
62
- std::size_t lower_bound, middle_bound, upper_bound;
63
-
64
- void operator()() {
65
- std::size_t left = middle_bound-1;
66
- std::size_t right = upper_bound-1;
67
- std::size_t offset = upper_bound-1;
68
-
69
- while (offset >= middle_bound) {
70
- if (comparator(source[left], source[right])) {
71
- destination[offset--] = source[right--];
72
- } else {
73
- destination[offset--] = source[left--];
74
- if (left == lower_bound) {
75
- // There are no more items on left hand side - in this case, there is at most one more item on right side to copy.
76
- if (offset >= middle_bound) {
77
- destination[offset] = source[right];
78
- }
79
-
80
- break;
81
- }
82
- }
83
- }
84
- }
85
- };
86
-
87
- // Merge two sorted sub-sequences sequentially (from left to right).
88
- // Is it possible to merge without copying from source to destination, and what are the performance implications?
89
- template <typename ArrayT, typename ComparatorT>
90
- void merge (ArrayT & source, ArrayT & destination, const ComparatorT & comparator, std::size_t lower_bound, std::size_t middle_bound, std::size_t upper_bound) {
91
- std::size_t left = lower_bound;
92
- std::size_t right = middle_bound;
93
- std::size_t offset = lower_bound;
94
-
95
- // We merge both sub-sequences, defined as [lower_bound, middle_bound] and [middle_bound, upper_bound].
96
- while (true) {
97
- if (comparator(source[left], source[right])) {
98
- destination[offset++] = source[left++];
99
-
100
- // If we have adjusted left, we may have exhausted left side:
101
- if (left == middle_bound) {
102
- // We have no more elements in lower half.
103
- std::copy(source.begin() + right, source.begin() + upper_bound, destination.begin() + offset);
104
- break;
105
- }
106
- } else {
107
- destination[offset++] = source[right++];
108
-
109
- // As above, we may have exhausted right side:
110
- if (right == upper_bound) {
111
- // We have no more elements in upper half.
112
- std::copy(source.begin() + left, source.begin() + middle_bound, destination.begin() + offset);
113
- break;
114
- }
115
- }
116
- }
117
- }
118
-
119
- template <typename ArrayT, typename ComparatorT>
120
- void partition(ArrayT & array, ArrayT & temporary, const ComparatorT & comparator, std::size_t lower_bound, std::size_t upper_bound, std::size_t threaded);
121
-
122
- // This functor is used for parallelizing the top level partition function.
123
- template <typename ArrayT, typename ComparatorT>
124
- struct ParallelPartition {
125
- ArrayT & array, & temporary;
126
- const ComparatorT & comparator;
127
- std::size_t lower_bound, upper_bound, threaded;
128
-
129
- void operator()() {
130
- partition(array, temporary, comparator, lower_bound, upper_bound, threaded);
131
- }
132
- };
133
-
134
- /** Recursive Partition Algorithm.
135
-
136
- This algorithm uses O(2n) memory to reduce the amount of copies that occurs. It does this by using a parity such that at each point in the partition tree we provide a source and destination. Given the functions P (partition) and M (merge), we have the following theorem:
137
-
138
- P(A=source, B=destination) sorts source into destination. A=[...] means that we are considering only a subset of A. Subscript is not given, but should be intuitive given the definition of merge sort. (x) on the left gives the order of each step as performed sequentially.
139
-
140
- == [ PARTITION ] == == [ MERGE ] ==
141
-
142
- (1) P(A=[1,3,4,2], B=[1,3,2,4]) (14) M(A=[1,3,2,4], B): B = [1,2,3,4]
143
- |
144
- (2) |---P(B=[1,3], A=[1,3]) (7) M(B=[1,3], A): A=[1,3]
145
- | |
146
- (3) | |---P(A=[1], B=[1]) (4) M(A=[1], B): B=[1]
147
- (5) | \---P(A=[3], B=[3]) (6) M(A=[3], B): B=[3]
148
- |
149
- (8) \---P(B=[4,2], A=[4,2]) (13) M(B=[4,2], A): A = [2,4]
150
- |
151
- (9) |---P(A=[4],B=[4]) (10) M(A=[4], B): B=[4]
152
- (11) \---P(A=[2],B=[2]) (12) M(A=[2], B): B=[2]
153
-
154
- During merge, we fold back up, and alternate between A and B for the current storage. This avoids the need to dynamically allocate memory during sort and avoids unnecessary copies.
155
-
156
- */
157
-
158
- // Sequential partition algorithm. Provide an array, and an upper and lower bound to sort.
159
- template <typename ArrayT, typename ComparatorT>
160
- void partition(ArrayT & source, ArrayT & destination, const ComparatorT & comparator, const std::size_t & lower_bound, const std::size_t & upper_bound) {
161
- std::size_t count = upper_bound - lower_bound;
162
-
163
- // In the case where count == 1, we are at the very bottom of the tree and both source and destination will be the same.
164
- // The same applies when count == 2, but we might need to swap the items around if they are not in the right order.
165
- if (count == 2) {
166
- if (!comparator(destination[lower_bound], destination[lower_bound+1])) {
167
- std::swap(destination[lower_bound], destination[lower_bound+1]);
168
- }
169
- // After this point, where count > 2, source and destination are different.
170
- } else if (count > 2) {
171
- std::size_t middle_bound = (lower_bound + upper_bound) / 2;
172
-
173
- // While it is possible to simply call partition, we try to avoid recursion by folding up the bottom two cases:
174
- // (count == 1), do nothing
175
- // (count == 2), swap if order is not correct
176
- // (count > 2), partition
177
- // After profilling, I found that the benefit of unrolling (count == 2) was minimal - there was about a 2-3% improvement.
178
-
179
- std::size_t lower_count = middle_bound - lower_bound;
180
- if (lower_count > 1)
181
- partition(destination, source, comparator, lower_bound, middle_bound);
182
-
183
- std::size_t upper_count = upper_bound - middle_bound;
184
- if (upper_count > 1)
185
- partition(destination, source, comparator, middle_bound, upper_bound);
186
-
187
- merge(source, destination, comparator, lower_bound, middle_bound, upper_bound);
188
- }
189
- }
190
-
191
- /** Parallel Partition Algorithm
192
-
193
- This parallel partition algorithm which controls the downward descent of the merge sort algorithm is designed for large datasets. Because merge sort follows a binary tree structure, the work is essentially split between two threads at each node in the tree. Firstly, we must recursively call partition on two separate threads. Once this is done, we have two ascending sequences, and we merge these together, again using two threads, one for left sequence and one for right sequence.
194
-
195
- Because higher level threads will be waiting on lower level threads, the value of threaded should be equal to 2^threaded == processors for best performance.
196
-
197
- */
198
-
199
- // Use this to control whether parallal partition is used.
200
- // For large data sets > 500_000 items, you will see an improvement of about ~50% per thread.
201
- const bool PARALLEL_PARTITION = true;
202
-
203
- // Use this to control whether parallel merge is used.
204
- // For large data sets > 1_000_000 items, you will see an improvement of about 15%.
205
- const bool PARALLEL_MERGE = true;
206
-
207
- // If you make this number too small, e.g. <= 2, you may cause synchronsation issues, because you will force parallelisation
208
- // for base cases which actually need to be sequential to ensure that comparison cache is generated correctly.
209
- const std::size_t PARALLEL_MERGE_MINIMUM_COUNT = 128;
210
-
211
- // Provide an array, and an upper and lower bound, along with the number of threads to use.
212
- template <typename ArrayT, typename ComparatorT>
213
- void partition(ArrayT & source, ArrayT & destination, const ComparatorT & comparator, std::size_t lower_bound, std::size_t upper_bound, std::size_t threaded) {
214
- std::size_t count = upper_bound - lower_bound;
215
-
216
- if (count > 1) {
217
- std::size_t middle_bound = (lower_bound + upper_bound) / 2;
218
-
219
- //Benchmark::WallTime tp;
220
- if (PARALLEL_PARTITION && threaded > 0) {
221
- // We could check whether there is any work to do before creating threads, but we assume
222
- // that threads will only be created high up in the tree by default, so there *should*
223
- // be a significant work available per-thread.
224
- ParallelPartition<ArrayT, ComparatorT>
225
- lower_partition = {destination, source, comparator, lower_bound, middle_bound, threaded - 1},
226
- upper_partition = {destination, source, comparator, middle_bound, upper_bound, threaded - 1};
227
-
228
- std::thread
229
- lower_thread(lower_partition),
230
- upper_thread(upper_partition);
231
-
232
- upper_thread.join();
233
- lower_thread.join();
234
- } else {
235
- // We have hit the bottom of our thread limit - could you use std::sort here for improved performance?
236
- partition(destination, source, comparator, lower_bound, middle_bound);
237
- partition(destination, source, comparator, middle_bound, upper_bound);
238
- }
239
- //std::cerr << "Partition Time: " << tp.total() << " [" << lower_bound << " -> " << upper_bound << " : " << threaded << " ]" << std::endl;
240
-
241
- //Benchmark::WallTime tm;
242
- if (PARALLEL_MERGE && threaded > 0 && count > PARALLEL_MERGE_MINIMUM_COUNT) {
243
- // By the time we get here, we are sure that both left and right partitions have been merged, e.g. we have two ordered sequences [lower_bound, middle_bound] and [middle_bound, upper_bound]. Now, we need to join them together:
244
- ParallelLeftMerge<ArrayT, ComparatorT> left_merge = {source, destination, comparator, lower_bound, middle_bound};
245
- ParallelRightMerge<ArrayT, ComparatorT> right_merge = {source, destination, comparator, lower_bound, middle_bound, upper_bound};
246
-
247
- std::thread
248
- left_thread(left_merge),
249
- right_thread(right_merge);
250
-
251
- left_thread.join();
252
- right_thread.join();
253
- } else {
254
- // We have hit the bottom of our thread limit, or the merge minimum count.
255
- merge(source, destination, comparator, lower_bound, middle_bound, upper_bound);
256
- }
257
- //std::cerr << "Merge Time: " << tm.total() << " [" << lower_bound << " -> " << upper_bound << " : " << threaded << " ]" << std::endl;
258
- }
259
- }
260
-
261
- /** Parallel Merge Sort, main entry point.
262
-
263
- Given an array of items, a comparator functor, use at most 2^threaded threads to sort the items.
264
-
265
- */
266
- template <typename ArrayT, typename ComparatorT>
267
- void sort(ArrayT & array, const ComparatorT & comparator, std::size_t threaded = 2) {
268
- // Is all this swapping around really necessary?
269
- ArrayT temporary(array.begin(), array.end());
270
-
271
- //Benchmark::WallTime ts;
272
- if (threaded == 0)
273
- partition(temporary, array, comparator, 0, array.size());
274
- else
275
- partition(temporary, array, comparator, 0, array.size(), threaded);
276
- //std::cerr << "Total sort time: " << ts.total() << std::endl;
277
- }
278
- }
@@ -1,131 +0,0 @@
1
- //
2
- // main.cpp
3
- // DictionarySort
4
- //
5
- // Created by Samuel Williams on 31/10/11.
6
- // Copyright, 2014, by Samuel G. D. Williams. <http://www.codeotaku.com>
7
- //
8
-
9
- #include <iostream>
10
- #include <algorithm>
11
-
12
- #include "Benchmark.h"
13
- #include "DictionarySort.h"
14
-
15
- // Print out vectors using a simple [item0, item1, ... itemn] format.
16
- template <typename AnyT>
17
- std::ostream& operator<< (std::ostream &o, const std::vector<AnyT> & v)
18
- {
19
- bool first = true;
20
-
21
- o << "[";
22
- for (typename std::vector<AnyT>::const_iterator i = v.begin(); i != v.end(); ++i) {
23
- if (first)
24
- first = false;
25
- else
26
- o << ", ";
27
-
28
- o << *i;
29
- }
30
- o << "]";
31
-
32
- return o;
33
- }
34
-
35
- static void test_parallel_merge ()
36
- {
37
- typedef std::vector<long long> ArrayT;
38
- typedef std::less<long long> ComparatorT;
39
- ComparatorT comparator;
40
-
41
- const long long data[] = {
42
- 2, 4, 6, 8, 12,
43
- 1, 3, 5, 10, 11
44
- };
45
-
46
- ArrayT a(data, data+(sizeof(data)/sizeof(*data)));
47
- ArrayT b(a.size());
48
-
49
- ParallelMergeSort::ParallelLeftMerge<ArrayT, ComparatorT> left_merge = {a, b, comparator, 0, a.size() / 2};
50
- left_merge();
51
-
52
- std::cout << "After Left: " << b << std::endl;
53
-
54
- ParallelMergeSort::ParallelRightMerge<ArrayT, ComparatorT> right_merge = {a, b, comparator, 0, a.size() / 2, a.size()};
55
- right_merge();
56
-
57
- std::cout << "After Right: " << b << std::endl;
58
- }
59
-
60
- static void test_sort ()
61
- {
62
- typedef std::vector<long long> ArrayT;
63
- typedef std::less<long long> ComparatorT;
64
- ComparatorT comparator;
65
-
66
- const long long data[] = {
67
- 11, 2, 4, 6, 8, 10, 12, 1, 3, 5, 7, 9, 13
68
- };
69
-
70
- std::vector<long long> v(data, data+(sizeof(data)/sizeof(*data)));
71
-
72
- std::cerr << "Sorting " << v << std::endl;
73
-
74
- ParallelMergeSort::sort(v, comparator, 0);
75
-
76
- std::cerr << "Sorted " << v << std::endl;
77
- }
78
-
79
- static void test_dictionary ()
80
- {
81
- // This defines a dictionary based on ASCII characters.
82
- typedef DictionarySort::Dictionary<char, DictionarySort::IndexT[256]> ASCIIDictionaryT;
83
-
84
- // For unicode characters, you could use something like this:
85
- // typedef DictionarySort::Dictionary<uint32_t, std::map<uint32_t, DictionarySort::IndexT>> UCS32DictionaryT;
86
- // Be aware that
87
-
88
- std::string s = "AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz";
89
- ASCIIDictionaryT::WordT alphabet(s.begin(), s.end());
90
- ASCIIDictionaryT dictionary(alphabet);
91
-
92
- ASCIIDictionaryT::WordsT words, sorted_words;
93
- const std::size_t MAX_LENGTH = 25;
94
- const std::size_t MAX_COUNT = 2500000;
95
- for (std::size_t i = 0; i < MAX_COUNT; i += 1) {
96
- ASCIIDictionaryT::WordT word;
97
- for (std::size_t j = i; (j-i) <= (i ^ (i * 21)) % MAX_LENGTH; j += 1) {
98
- word.push_back(alphabet[(j ^ (j << (i % 4))) % alphabet.size()]);
99
- }
100
- words.push_back(word);
101
- }
102
-
103
- std::cerr << "Sorting " << words.size() << " words..." << std::endl;
104
- std::cerr << "Sort mode = " << DictionarySort::SORT_MODE << std::endl;
105
-
106
- if (DictionarySort::SORT_MODE > 0)
107
- std::cerr << "Parallel merge thread count: " << (1 << (DictionarySort::SORT_MODE+1)) - 2 << std::endl;
108
-
109
- const int K = 4;
110
- Benchmark::WallTime t;
111
- Benchmark::ProcessorTime processor_time;
112
-
113
- uint64_t checksum;
114
- for (std::size_t i = 0; i < K; i += 1) {
115
- checksum = dictionary.sort(words, sorted_words);
116
- }
117
- Benchmark::TimeT elapsed_time = t.total() / K;
118
-
119
- std::cerr << "Checksum: " << checksum << " ? " << (checksum == 479465310674138860) << std::endl;
120
- std::cerr << "Total Time: " << elapsed_time << std::endl;
121
- }
122
-
123
- int main (int argc, const char * argv[])
124
- {
125
- //test_parallel_merge();
126
- //test_sort();
127
- test_dictionary();
128
-
129
- return 0;
130
- }
131
-
@@ -1 +0,0 @@
1
- main.cpp.o: main.cpp Benchmark.h DictionarySort.h ParallelMergeSort.h
Binary file
@@ -1,69 +0,0 @@
1
- #!/usr/bin/env rspec
2
- # Copyright, 2012, by Samuel G. D. Williams. <http://www.codeotaku.com>
3
- #
4
- # Permission is hereby granted, free of charge, to any person obtaining a copy
5
- # of this software and associated documentation files (the "Software"), to deal
6
- # in the Software without restriction, including without limitation the rights
7
- # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8
- # copies of the Software, and to permit persons to whom the Software is
9
- # furnished to do so, subject to the following conditions:
10
- #
11
- # The above copyright notice and this permission notice shall be included in
12
- # all copies or substantial portions of the Software.
13
- #
14
- # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
- # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
- # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17
- # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
- # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19
- # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20
- # THE SOFTWARE.
21
-
22
- require 'build/graph/node'
23
- require 'build/graph/walker'
24
- require 'build/graph/task'
25
- require 'build/files/glob'
26
-
27
- require_relative 'process_graph'
28
-
29
- RSpec.describe Build::Graph::Task do
30
- it "should wait for children" do
31
- node_a = Build::Graph::Node.new(Build::Files::Paths::NONE, Build::Files::Paths::NONE)
32
- node_b = Build::Graph::Node.new(Build::Files::Paths::NONE, :inherit)
33
-
34
- nodes = Set.new([node_a])
35
-
36
- sequence = []
37
-
38
- # A walker runs repeatedly, updating tasks which have been marked as dirty.
39
- walker = Build::Graph::Walker.new do |walker, node|
40
- task = Build::Graph::Task.new(walker, node)
41
-
42
- task.visit do
43
- sequence << [:entered, node]
44
-
45
- if node == node_a
46
- # This will invoke node_b concurrently, but as it is a child, task.visit won't finish until node_b is done.
47
- task.invoke(node_b)
48
- end
49
- end
50
-
51
- sequence << [:exited, node]
52
- end
53
-
54
- walker.update(nodes)
55
-
56
- expect(walker.tasks.count).to be == 2
57
- expect(walker.failed_tasks.count).to be == 0
58
-
59
- task_b = walker.tasks[node_b]
60
- expect(walker.tasks[node_a].children).to be == [task_b]
61
-
62
- expect(sequence).to be == [
63
- [:entered, node_a],
64
- [:entered, node_b],
65
- [:exited, node_b],
66
- [:exited, node_a]
67
- ]
68
- end
69
- end
@@ -1,125 +0,0 @@
1
- #!/usr/bin/env rspec
2
- # Copyright, 2012, by Samuel G. D. Williams. <http://www.codeotaku.com>
3
- #
4
- # Permission is hereby granted, free of charge, to any person obtaining a copy
5
- # of this software and associated documentation files (the "Software"), to deal
6
- # in the Software without restriction, including without limitation the rights
7
- # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8
- # copies of the Software, and to permit persons to whom the Software is
9
- # furnished to do so, subject to the following conditions:
10
- #
11
- # The above copyright notice and this permission notice shall be included in
12
- # all copies or substantial portions of the Software.
13
- #
14
- # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15
- # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16
- # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17
- # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18
- # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19
- # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20
- # THE SOFTWARE.
21
-
22
- require 'build/graph/node'
23
- require 'build/graph/walker'
24
- require 'build/graph/task'
25
- require 'build/files/glob'
26
-
27
- RSpec.describe Build::Graph::Walker do
28
- it "can generate the same output from multiple tasks" do
29
- test_glob = Build::Files::Glob.new(__dir__, "*.rb")
30
- listing_output = Build::Files::Paths.directory(__dir__, ["listing.txt"])
31
-
32
- node_a = Build::Graph::Node.new(test_glob, listing_output)
33
- node_b = Build::Graph::Node.new(Build::Files::Paths::NONE, listing_output)
34
-
35
- sequence = []
36
-
37
- # A walker runs repeatedly, updating tasks which have been marked as dirty.
38
- walker = Build::Graph::Walker.new do |walker, node|
39
- task = Build::Graph::Task.new(walker, node)
40
-
41
- task.visit do
42
- if node == node_a
43
- task.invoke(node_b)
44
- end
45
-
46
- node.outputs.each do |output|
47
- output.touch
48
- end
49
-
50
- sequence << node
51
- end
52
- end
53
-
54
- edge = double()
55
- walker.outputs[listing_output.first.to_s] ||= [edge]
56
- expect(edge).to receive(:traverse)
57
-
58
- walker.update([node_a, node_a])
59
-
60
- expect(walker.tasks.count).to be == 2
61
- expect(walker.failed_tasks.count).to be == 0
62
- expect(sequence).to be == [node_b, node_a]
63
- end
64
-
65
- it "should be unique" do
66
- test_glob = Build::Files::Glob.new(__dir__, "*.rb")
67
- listing_output = Build::Files::Paths.directory(__dir__, ["listing.txt"])
68
-
69
- node_a = Build::Graph::Node.new(test_glob, listing_output)
70
- node_b = Build::Graph::Node.new(listing_output, Build::Files::Paths::NONE)
71
-
72
- sequence = []
73
-
74
- # A walker runs repeatedly, updating tasks which have been marked as dirty.
75
- walker = Build::Graph::Walker.new do |walker, node|
76
- task = Build::Graph::Task.new(walker, node)
77
-
78
- task.visit do
79
- node.outputs.each do |output|
80
- output.touch
81
- end
82
-
83
- sequence << node
84
- end
85
- end
86
-
87
- walker.update([node_a, node_b])
88
-
89
- expect(walker.tasks.count).to be == 2
90
- expect(walker.failed_tasks.count).to be == 0
91
- expect(sequence).to be == [node_a, node_b]
92
- end
93
-
94
- it "should cascade failure" do
95
- test_glob = Build::Files::Glob.new(__dir__, "*.rb")
96
- listing_output = Build::Files::Paths.directory(__dir__, ["listing.txt"])
97
- summary_output = Build::Files::Paths.directory(__dir__, ["summary.txt"])
98
-
99
- node_a = Build::Graph::Node.new(test_glob, listing_output)
100
- node_b = Build::Graph::Node.new(listing_output, summary_output)
101
-
102
- # A walker runs repeatedly, updating tasks which have been marked as dirty.
103
- walker = Build::Graph::Walker.new do |walker, node|
104
- task = Build::Graph::Task.new(walker, node)
105
-
106
- task.visit do
107
- if node == node_a
108
- raise Build::Graph::TransientError.new('Test Failure')
109
- end
110
- end
111
- end
112
-
113
- walker.update([node_a, node_b])
114
-
115
- expect(walker.tasks.count).to be == 2
116
- expect(walker.failed_tasks.count).to be == 2
117
- expect(listing_output).to be_intersect walker.failed_outputs
118
- expect(summary_output).to be_intersect walker.failed_outputs
119
-
120
- walker.clear_failed
121
-
122
- expect(walker.tasks.count).to be == 0
123
- expect(walker.failed_tasks.count).to be == 0
124
- end
125
- end
data/spec/spec_helper.rb DELETED
@@ -1,13 +0,0 @@
1
-
2
- require 'covered/rspec'
3
-
4
- RSpec.configure do |config|
5
- config.disable_monkey_patching!
6
-
7
- # Enable flags like --only-failures and --next-failure
8
- config.example_status_persistence_file_path = ".rspec_status"
9
-
10
- config.expect_with :rspec do |c|
11
- c.syntax = :expect
12
- end
13
- end