chef 11.14.0.alpha.2-x86-mingw32 → 11.14.0.alpha.3-x86-mingw32

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. checksums.yaml +4 -4
  2. data/bin/chef-service-manager +1 -1
  3. data/lib/chef/application.rb +8 -2
  4. data/lib/chef/chef_fs/command_line.rb +4 -4
  5. data/lib/chef/chef_fs/file_system.rb +3 -3
  6. data/lib/chef/chef_fs/parallelizer.rb +66 -90
  7. data/lib/chef/chef_fs/parallelizer/flatten_enumerable.rb +35 -0
  8. data/lib/chef/chef_fs/parallelizer/parallel_enumerable.rb +279 -0
  9. data/lib/chef/config.rb +36 -2
  10. data/lib/chef/cookbook/cookbook_version_loader.rb +0 -1
  11. data/lib/chef/cookbook/synchronizer.rb +64 -42
  12. data/lib/chef/cookbook_uploader.rb +4 -25
  13. data/lib/chef/cookbook_version.rb +12 -11
  14. data/lib/chef/formatters/error_inspectors/api_error_formatting.rb +18 -1
  15. data/lib/chef/formatters/error_inspectors/cookbook_sync_error_inspector.rb +1 -3
  16. data/lib/chef/knife/bootstrap.rb +23 -1
  17. data/lib/chef/knife/bootstrap/chef-aix.erb +58 -0
  18. data/lib/chef/knife/bootstrap/chef-full.erb +16 -13
  19. data/lib/chef/knife/core/bootstrap_context.rb +25 -1
  20. data/lib/chef/knife/list.rb +9 -8
  21. data/lib/chef/knife/serve.rb +44 -0
  22. data/lib/chef/knife/show.rb +2 -3
  23. data/lib/chef/knife/ssh.rb +1 -0
  24. data/lib/chef/mixin/create_path.rb +20 -4
  25. data/lib/chef/node.rb +19 -3
  26. data/lib/chef/platform/provider_mapping.rb +0 -1
  27. data/lib/chef/platform/query_helpers.rb +4 -3
  28. data/lib/chef/provider/env/windows.rb +10 -3
  29. data/lib/chef/provider/file.rb +1 -1
  30. data/lib/chef/provider/mount.rb +84 -42
  31. data/lib/chef/provider/package/freebsd/base.rb +92 -0
  32. data/lib/chef/provider/package/freebsd/pkg.rb +113 -0
  33. data/lib/chef/provider/package/freebsd/pkgng.rb +80 -0
  34. data/lib/chef/provider/package/freebsd/port.rb +70 -0
  35. data/lib/chef/providers.rb +3 -1
  36. data/lib/chef/resource/chef_gem.rb +2 -1
  37. data/lib/chef/resource/freebsd_package.rb +39 -3
  38. data/lib/chef/resource/lwrp_base.rb +2 -2
  39. data/lib/chef/resource/mount.rb +9 -9
  40. data/lib/chef/util/threaded_job_queue.rb +61 -0
  41. data/lib/chef/version.rb +1 -1
  42. data/lib/chef/version/platform.rb +2 -0
  43. data/lib/chef/whitelist.rb +82 -0
  44. data/lib/chef/win32/registry.rb +0 -1
  45. data/lib/chef/win32/version.rb +4 -3
  46. data/spec/functional/win32/versions_spec.rb +4 -4
  47. data/spec/integration/client/ipv6_spec.rb +1 -1
  48. data/spec/integration/knife/chef_fs_data_store_spec.rb +1 -1
  49. data/spec/integration/knife/chef_repo_path_spec.rb +4 -1
  50. data/spec/integration/knife/common_options_spec.rb +9 -9
  51. data/spec/integration/knife/cookbook_api_ipv6_spec.rb +2 -2
  52. data/spec/integration/knife/deps_spec.rb +3 -0
  53. data/spec/integration/knife/list_spec.rb +3 -0
  54. data/spec/integration/knife/raw_spec.rb +5 -2
  55. data/spec/integration/knife/redirection_spec.rb +4 -1
  56. data/spec/integration/knife/serve_spec.rb +57 -0
  57. data/spec/integration/knife/show_spec.rb +3 -0
  58. data/spec/support/pedant/run_pedant.rb +1 -0
  59. data/spec/support/platform_helpers.rb +7 -5
  60. data/spec/support/shared/context/config.rb +21 -0
  61. data/spec/support/shared/functional/file_resource.rb +52 -0
  62. data/spec/unit/chef_fs/parallelizer.rb +482 -0
  63. data/spec/unit/client_spec.rb +4 -2
  64. data/spec/unit/config_spec.rb +66 -12
  65. data/spec/unit/knife/bootstrap_spec.rb +6 -0
  66. data/spec/unit/knife/core/bootstrap_context_spec.rb +31 -1
  67. data/spec/unit/node_spec.rb +73 -3
  68. data/spec/unit/provider/mount_spec.rb +102 -79
  69. data/spec/unit/provider/package/{freebsd_spec.rb → freebsd/pkg_spec.rb} +19 -32
  70. data/spec/unit/provider/package/freebsd/pkgng_spec.rb +155 -0
  71. data/spec/unit/provider/package/freebsd/port_spec.rb +160 -0
  72. data/spec/unit/resource/chef_gem_spec.rb +5 -0
  73. data/spec/unit/resource/freebsd_package_spec.rb +63 -11
  74. data/spec/unit/resource/mount_spec.rb +11 -0
  75. data/spec/unit/role_spec.rb +5 -1
  76. data/spec/unit/run_lock_spec.rb +2 -0
  77. data/spec/unit/util/threaded_job_queue_spec.rb +51 -0
  78. data/spec/unit/version/platform_spec.rb +1 -1
  79. metadata +176 -161
  80. data/lib/chef/provider/package/freebsd.rb +0 -149
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 2ced39de899d69f11a2f1747224152816cf59df3
4
- data.tar.gz: 11a9790eff2bcf5847c5394681a5f05298646b61
3
+ metadata.gz: ca57eea9ba09666366f61e98b0dab8d63d623c06
4
+ data.tar.gz: 5d11001dc04d0880f93d415ac7db02f732b41057
5
5
  SHA512:
6
- metadata.gz: 68df6f434ed71a0b92f59e1e206bd151508c76a35738cd2f3b4f67244b7cfb70f4f35dd650ff25ef87863bd06c215722d4d10a198d1c5793b0fe2ae7ddf640ae
7
- data.tar.gz: 6dbc426e742eaa3987bd782d76b222975f719e509ed31951254fa48441ca4272f15c1128c5678d3fe746e081d12ad68a6668e20103ab039c769843045e83d0f5
6
+ metadata.gz: a9e09e827e4c901e8fb290a7318a4f14219cf703bcaf6eff670c1a17578ea9d52524a7330177bc5ab616ad39340f40968fa84f5b9f523f4c1f2a03cad3824831
7
+ data.tar.gz: 790b43ac6ddc09eb7a82521cad5cd0feb8e25d234e8f8febdd1f7cb9913d5f15458ddf858f2b7bc0751d251c19815e335e532e31bd56aa9b45dad2154f9936cc
@@ -27,7 +27,7 @@ if Chef::Platform.windows?
27
27
  chef_client_service = {
28
28
  :service_name => "chef-client",
29
29
  :service_display_name => "Chef Client Service",
30
- :service_description => "Runs Opscode Chef Client on regular, configurable intervals.",
30
+ :service_description => "Runs Chef Client on regular, configurable intervals.",
31
31
  :service_file_path => File.expand_path(File.join(File.dirname(__FILE__), '../lib/chef/application/windows_service.rb'))
32
32
  }
33
33
  Chef::Application::WindowsServiceManager.new(chef_client_service).run
@@ -185,17 +185,23 @@ class Chef::Application
185
185
 
186
186
  chef_fs = Chef::ChefFS::Config.new.local_fs
187
187
  chef_fs.write_pretty_json = true
188
+ data_store = Chef::ChefFS::ChefFSDataStore.new(chef_fs)
188
189
  server_options = {}
189
- server_options[:data_store] = Chef::ChefFS::ChefFSDataStore.new(chef_fs)
190
+ server_options[:data_store] = data_store
190
191
  server_options[:log_level] = Chef::Log.level
191
192
  server_options[:port] = Chef::Config.chef_zero.port
192
- Chef::Log.info("Starting chef-zero on port #{Chef::Config.chef_zero.port} with repository at #{server_options[:data_store].chef_fs.fs_description}")
193
+ server_options[:host] = Chef::Config.chef_zero.host
194
+ Chef::Log.info("Starting chef-zero on port #{Chef::Config.chef_zero.port} with repository at #{chef_fs.fs_description}")
193
195
  @chef_zero_server = ChefZero::Server.new(server_options)
194
196
  @chef_zero_server.start_background
195
197
  Chef::Config.chef_server_url = @chef_zero_server.url
196
198
  end
197
199
  end
198
200
 
201
+ def self.chef_zero_server
202
+ @chef_zero_server
203
+ end
204
+
199
205
  def self.destroy_server_connectivity
200
206
  if @chef_zero_server
201
207
  @chef_zero_server.stop
@@ -129,9 +129,9 @@ class Chef
129
129
  end
130
130
 
131
131
  def self.diff(pattern, old_root, new_root, recurse_depth, get_content)
132
- Chef::ChefFS::Parallelizer.parallelize(Chef::ChefFS::FileSystem.list_pairs(pattern, old_root, new_root), :flatten => true) do |old_entry, new_entry|
132
+ Chef::ChefFS::Parallelizer.parallelize(Chef::ChefFS::FileSystem.list_pairs(pattern, old_root, new_root)) do |old_entry, new_entry|
133
133
  diff_entries(old_entry, new_entry, recurse_depth, get_content)
134
- end
134
+ end.flatten(1)
135
135
  end
136
136
 
137
137
  # Diff two known entries (could be files or dirs)
@@ -142,9 +142,9 @@ class Chef
142
142
  if recurse_depth == 0
143
143
  return [ [ :common_subdirectories, old_entry, new_entry ] ]
144
144
  else
145
- return Chef::ChefFS::Parallelizer.parallelize(Chef::ChefFS::FileSystem.child_pairs(old_entry, new_entry), :flatten => true) do |old_child, new_child|
145
+ return Chef::ChefFS::Parallelizer.parallelize(Chef::ChefFS::FileSystem.child_pairs(old_entry, new_entry)) do |old_child, new_child|
146
146
  Chef::ChefFS::CommandLine.diff_entries(old_child, new_child, recurse_depth ? recurse_depth - 1 : nil, get_content)
147
- end
147
+ end.flatten(1)
148
148
  end
149
149
 
150
150
  # If old is a directory and new is a file
@@ -72,8 +72,8 @@ class Chef
72
72
 
73
73
  # Otherwise, go through all children and find any matches
74
74
  elsif entry.dir?
75
- results = Parallelizer::parallelize(entry.children, :flatten => true) { |child| Chef::ChefFS::FileSystem.list(child, pattern) }
76
- results.each(&block)
75
+ results = Parallelizer::parallelize(entry.children) { |child| Chef::ChefFS::FileSystem.list(child, pattern) }
76
+ results.flatten(1).each(&block)
77
77
  end
78
78
  end
79
79
  end
@@ -419,7 +419,7 @@ class Chef
419
419
  end
420
420
 
421
421
  def self.parallel_do(enum, options = {}, &block)
422
- Chef::ChefFS::Parallelizer.parallelize(enum, options, &block).to_a
422
+ Chef::ChefFS::Parallelizer.parallel_do(enum, options, &block)
423
423
  end
424
424
  end
425
425
  end
@@ -1,127 +1,103 @@
1
+ require 'thread'
2
+ require 'chef/chef_fs/parallelizer/parallel_enumerable'
3
+
1
4
  class Chef
2
5
  module ChefFS
6
+ # Tries to balance several guarantees, in order of priority:
7
+ # - don't get deadlocked
8
+ # - provide results in desired order
9
+ # - provide results as soon as they are available
10
+ # - process input as soon as possible
3
11
  class Parallelizer
4
12
  @@parallelizer = nil
5
13
  @@threads = 0
6
14
 
7
15
  def self.threads=(value)
8
- if @@threads != value
9
- @@threads = value
10
- @@parallelizer = nil
11
- end
16
+ @@threads = value
17
+ @@parallelizer.resize(value) if @@parallelizer
12
18
  end
13
19
 
14
- def self.parallelize(enumerator, options = {}, &block)
20
+ def self.parallelizer
15
21
  @@parallelizer ||= Parallelizer.new(@@threads)
16
- @@parallelizer.parallelize(enumerator, options, &block)
17
22
  end
18
23
 
19
- def initialize(threads)
20
- @tasks_mutex = Mutex.new
21
- @tasks = []
22
- @threads = []
23
- 1.upto(threads) do
24
- @threads << Thread.new { worker_loop }
25
- end
24
+ def self.parallelize(enumerable, options = {}, &block)
25
+ parallelizer.parallelize(enumerable, options, &block)
26
26
  end
27
27
 
28
- def parallelize(enumerator, options = {}, &block)
29
- task = ParallelizedResults.new(enumerator, options, &block)
30
- @tasks_mutex.synchronize do
31
- @tasks << task
32
- end
33
- task
28
+ def self.parallel_do(enumerable, options = {}, &block)
29
+ parallelizer.parallel_do(enumerable, options, &block)
34
30
  end
35
31
 
36
- class ParallelizedResults
37
- include Enumerable
32
+ def initialize(num_threads)
33
+ @tasks = Queue.new
34
+ @threads = []
35
+ @stop_thread = {}
36
+ resize(num_threads)
37
+ end
38
38
 
39
- def initialize(enumerator, options, &block)
40
- @inputs = enumerator.to_a
41
- @options = options
42
- @block = block
39
+ def num_threads
40
+ @threads.size
41
+ end
43
42
 
44
- @mutex = Mutex.new
45
- @outputs = []
46
- @status = []
47
- end
43
+ def parallelize(enumerable, options = {}, &block)
44
+ ParallelEnumerable.new(@tasks, enumerable, options, &block)
45
+ end
48
46
 
49
- def each
50
- next_index = 0
51
- while true
52
- # Report any results that already exist
53
- while @status.length > next_index && ([:finished, :exception].include?(@status[next_index]))
54
- if @status[next_index] == :finished
55
- if @options[:flatten]
56
- @outputs[next_index].each do |entry|
57
- yield entry
58
- end
59
- else
60
- yield @outputs[next_index]
61
- end
62
- else
63
- raise @outputs[next_index]
64
- end
65
- next_index = next_index + 1
66
- end
47
+ def parallel_do(enumerable, options = {}, &block)
48
+ ParallelEnumerable.new(@tasks, enumerable, options.merge(:ordered => false), &block).wait
49
+ end
67
50
 
68
- # Pick up a result and process it, if there is one. This ensures we
69
- # move forward even if there are *zero* worker threads available.
70
- if !process_input
71
- # Exit if we're done.
72
- if next_index >= @status.length
73
- break
74
- else
75
- # Ruby 1.8 threading sucks. Wait till we process more things.
76
- sleep(0.05)
77
- end
78
- end
51
+ def stop(wait = true, timeout = nil)
52
+ resize(0, wait, timeout)
53
+ end
54
+
55
+ def resize(to_threads, wait = true, timeout = nil)
56
+ if to_threads < num_threads
57
+ threads_to_stop = @threads[to_threads..num_threads-1]
58
+ @threads = @threads.slice(0, to_threads)
59
+ threads_to_stop.each do |thread|
60
+ @stop_thread[thread] = true
79
61
  end
80
- end
81
62
 
82
- def process_input
83
- # Grab the next one to process
84
- index, input = @mutex.synchronize do
85
- index = @status.length
86
- if index >= @inputs.length
87
- return nil
63
+ if wait
64
+ start_time = Time.now
65
+ threads_to_stop.each do |thread|
66
+ thread_timeout = timeout ? timeout - (Time.now - start_time) : nil
67
+ thread.join(thread_timeout)
88
68
  end
89
- input = @inputs[index]
90
- @status[index] = :started
91
- [ index, input ]
92
69
  end
93
70
 
94
- begin
95
- @outputs[index] = @block.call(input)
96
- @status[index] = :finished
97
- rescue Exception
98
- @outputs[index] = $!
99
- @status[index] = :exception
71
+ else
72
+ num_threads.upto(to_threads - 1) do |i|
73
+ @threads[i] = Thread.new(&method(:worker_loop))
100
74
  end
101
- index
102
75
  end
103
76
  end
104
77
 
78
+ def kill
79
+ @threads.each do |thread|
80
+ Thread.kill(thread)
81
+ @stop_thread.delete(thread)
82
+ end
83
+ @threads = []
84
+ end
85
+
105
86
  private
106
87
 
107
88
  def worker_loop
108
- while true
109
- begin
110
- task = @tasks[0]
111
- if task
112
- if !task.process_input
113
- @tasks_mutex.synchronize do
114
- @tasks.delete(task)
115
- end
116
- end
117
- else
118
- # Ruby 1.8 threading sucks. Wait a bit to see if another task comes in.
119
- sleep(0.05)
89
+ begin
90
+ while !@stop_thread[Thread.current]
91
+ begin
92
+ task = @tasks.pop
93
+ task.call
94
+ rescue
95
+ puts "ERROR #{$!}"
96
+ puts $!.backtrace
120
97
  end
121
- rescue
122
- puts "ERROR #{$!}"
123
- puts $!.backtrace
124
98
  end
99
+ ensure
100
+ @stop_thread.delete(Thread.current)
125
101
  end
126
102
  end
127
103
  end
@@ -0,0 +1,35 @@
1
+ class Chef
2
+ module ChefFS
3
+ class Parallelizer
4
+ class FlattenEnumerable
5
+ include Enumerable
6
+
7
+ def initialize(enum, levels = nil)
8
+ @enum = enum
9
+ @levels = levels
10
+ end
11
+
12
+ attr_reader :enum
13
+ attr_reader :levels
14
+
15
+ def each(&block)
16
+ enum.each do |value|
17
+ flatten(value, levels, &block)
18
+ end
19
+ end
20
+
21
+ private
22
+
23
+ def flatten(value, levels, &block)
24
+ if levels != 0 && value.respond_to?(:each) && !value.is_a?(String)
25
+ value.each do |child|
26
+ flatten(child, levels.nil? ? levels : levels-1, &block)
27
+ end
28
+ else
29
+ block.call(value)
30
+ end
31
+ end
32
+ end
33
+ end
34
+ end
35
+ end
@@ -0,0 +1,279 @@
1
+ require 'chef/chef_fs/parallelizer/flatten_enumerable'
2
+
3
+ class Chef
4
+ module ChefFS
5
+ class Parallelizer
6
+ class ParallelEnumerable
7
+ include Enumerable
8
+
9
+ # options:
10
+ # :ordered [true|false] - whether the output should stay in the same order
11
+ # as the input (even though it may not actually be processed in that
12
+ # order). Default: true
13
+ # :stop_on_exception [true|false] - if true, when an exception occurs in either
14
+ # input or output, we wait for any outstanding processing to complete,
15
+ # but will not process any new inputs. Default: false
16
+ # :main_thread_processing [true|false] - whether the main thread pulling
17
+ # on each() is allowed to process inputs. Default: true
18
+ # NOTE: If you set this to false, parallelizer.kill will stop each()
19
+ # in its tracks, so you need to know for sure that won't happen.
20
+ def initialize(parent_task_queue, input_enumerable, options = {}, &block)
21
+ @parent_task_queue = parent_task_queue
22
+ @input_enumerable = input_enumerable
23
+ @options = options
24
+ @block = block
25
+
26
+ @unconsumed_input = Queue.new
27
+ @in_process = {}
28
+ @unconsumed_output = Queue.new
29
+ end
30
+
31
+ attr_reader :parent_task_queue
32
+ attr_reader :input_enumerable
33
+ attr_reader :options
34
+ attr_reader :block
35
+
36
+ def each
37
+ each_with_input do |output, index, input, type|
38
+ yield output
39
+ end
40
+ end
41
+
42
+ def each_with_index
43
+ each_with_input do |output, index, input|
44
+ yield output, index
45
+ end
46
+ end
47
+
48
+ def each_with_input
49
+ exception = nil
50
+ each_with_exceptions do |output, index, input, type|
51
+ if type == :exception
52
+ if @options[:ordered] == false
53
+ exception ||= output
54
+ else
55
+ raise output
56
+ end
57
+ else
58
+ yield output, index, input
59
+ end
60
+ end
61
+ raise exception if exception
62
+ end
63
+
64
+ def each_with_exceptions(&block)
65
+ if @options[:ordered] == false
66
+ each_with_exceptions_unordered(&block)
67
+ else
68
+ each_with_exceptions_ordered(&block)
69
+ end
70
+ end
71
+
72
+ def wait
73
+ exception = nil
74
+ each_with_exceptions_unordered do |output, index, input, type|
75
+ exception ||= output if type == :exception
76
+ end
77
+ raise exception if exception
78
+ end
79
+
80
+ # Enumerable methods
81
+ def restricted_copy(enumerable)
82
+ ParallelEnumerable.new(@parent_task_queue, enumerable, @options, &@block)
83
+ end
84
+
85
+ alias :original_count :count
86
+
87
+ def count(*args, &block)
88
+ if args.size == 0 && block.nil?
89
+ @input_enumerable.count
90
+ else
91
+ original_count(*args, &block)
92
+ end
93
+ end
94
+
95
+ def first(n=nil)
96
+ if n
97
+ restricted_copy(@input_enumerable.first(n)).to_a
98
+ else
99
+ first(1)[0]
100
+ end
101
+ end
102
+
103
+ def drop(n)
104
+ restricted_copy(@input_enumerable.drop(n)).to_a
105
+ end
106
+
107
+ def flatten(levels = nil)
108
+ FlattenEnumerable.new(self, levels)
109
+ end
110
+
111
+ def take(n)
112
+ restricted_copy(@input_enumerable.take(n)).to_a
113
+ end
114
+
115
+ if Enumerable.method_defined?(:lazy)
116
+ class RestrictedLazy
117
+ def initialize(parallel_enumerable, actual_lazy)
118
+ @parallel_enumerable = parallel_enumerable
119
+ @actual_lazy = actual_lazy
120
+ end
121
+
122
+ def drop(*args, &block)
123
+ input = @parallel_enumerable.input_enumerable.lazy.drop(*args, &block)
124
+ @parallel_enumerable.restricted_copy(input)
125
+ end
126
+
127
+ def take(*args, &block)
128
+ input = @parallel_enumerable.input_enumerable.lazy.take(*args, &block)
129
+ @parallel_enumerable.restricted_copy(input)
130
+ end
131
+
132
+ def method_missing(method, *args, &block)
133
+ @actual_lazy.send(:method, *args, &block)
134
+ end
135
+ end
136
+
137
+ alias :original_lazy :lazy
138
+
139
+ def lazy
140
+ RestrictedLazy.new(self, original_lazy)
141
+ end
142
+ end
143
+
144
+ private
145
+
146
+ def each_with_exceptions_unordered
147
+ if @each_running
148
+ raise "each() called on parallel enumerable twice simultaneously! Bad mojo"
149
+ end
150
+ @each_running = true
151
+ begin
152
+ # Grab all the inputs, yielding any responses during enumeration
153
+ # in case the enumeration itself takes time
154
+ begin
155
+ @input_enumerable.each_with_index do |input, index|
156
+ @unconsumed_input.push([ input, index ])
157
+ @parent_task_queue.push(method(:process_one))
158
+
159
+ stop_processing_input = false
160
+ while !@unconsumed_output.empty?
161
+ output, index, input, type = @unconsumed_output.pop
162
+ yield output, index, input, type
163
+ if type == :exception && @options[:stop_on_exception]
164
+ stop_processing_input = true
165
+ break
166
+ end
167
+ end
168
+
169
+ if stop_processing_input
170
+ break
171
+ end
172
+ end
173
+ rescue
174
+ # We still want to wait for the rest of the outputs to process
175
+ @unconsumed_output.push([$!, nil, nil, :exception])
176
+ if @options[:stop_on_exception]
177
+ @unconsumed_input.clear
178
+ end
179
+ end
180
+
181
+ while !finished?
182
+ # yield thread to others (for 1.8.7)
183
+ if @unconsumed_output.empty?
184
+ sleep(0.01)
185
+ end
186
+
187
+ while !@unconsumed_output.empty?
188
+ yield @unconsumed_output.pop
189
+ end
190
+
191
+ # If no one is working on our tasks and we're allowed to
192
+ # work on them in the main thread, process an input to
193
+ # move things forward.
194
+ if @in_process.size == 0 && !(@options[:main_thread_processing] == false)
195
+ process_one
196
+ end
197
+ end
198
+ ensure
199
+ # If someone called "first" or something that exits the enumerator
200
+ # early, we want to make sure and throw away any extra results
201
+ # (gracefully) so that the next enumerator can start over.
202
+ if !finished?
203
+ stop
204
+ end
205
+ @each_running = false
206
+ end
207
+ end
208
+
209
+ def each_with_exceptions_ordered
210
+ next_to_yield = 0
211
+ unconsumed = {}
212
+ each_with_exceptions_unordered do |output, index, input, type|
213
+ unconsumed[index] = [ output, input, type ]
214
+ while unconsumed[next_to_yield]
215
+ input_output = unconsumed.delete(next_to_yield)
216
+ yield input_output[0], next_to_yield, input_output[1], input_output[2]
217
+ next_to_yield += 1
218
+ end
219
+ end
220
+ input_exception = unconsumed.delete(nil)
221
+ if input_exception
222
+ yield input_exception[0], next_to_yield, input_exception[1], input_exception[2]
223
+ end
224
+ end
225
+
226
+ def stop
227
+ @unconsumed_input.clear
228
+ while @in_process.size > 0
229
+ sleep(0.05)
230
+ end
231
+ @unconsumed_output.clear
232
+ end
233
+
234
+ #
235
+ # This is thread safe only if called from the main thread pulling on each().
236
+ # The order of these checks is important, as well, to be thread safe.
237
+ # 1. If @unconsumed_input.empty? is true, then we will never have any more
238
+ # work legitimately picked up.
239
+ # 2. If @in_process == 0, then there is no work in process, and because ofwhen unconsumed_input is empty, it will never go back up, because
240
+ # this is called after the input enumerator is finished. Note that switching #2 and #1
241
+ # could cause a race, because in_process is incremented *before* consuming input.
242
+ # 3. If @unconsumed_output.empty? is true, then we are done with outputs.
243
+ # Thus, 1+2 means no more output will ever show up, and 3 means we've passed all
244
+ # existing outputs to the user.
245
+ #
246
+ def finished?
247
+ @unconsumed_input.empty? && @in_process.size == 0 && @unconsumed_output.empty?
248
+ end
249
+
250
+ def process_one
251
+ @in_process[Thread.current] = true
252
+ begin
253
+ begin
254
+ input, index = @unconsumed_input.pop(true)
255
+ process_input(input, index)
256
+ rescue ThreadError
257
+ end
258
+ ensure
259
+ @in_process.delete(Thread.current)
260
+ end
261
+ end
262
+
263
+ def process_input(input, index)
264
+ begin
265
+ output = @block.call(input)
266
+ @unconsumed_output.push([ output, index, input, :result ])
267
+ rescue
268
+ if @options[:stop_on_exception]
269
+ @unconsumed_input.clear
270
+ end
271
+ @unconsumed_output.push([ $!, index, input, :exception ])
272
+ end
273
+
274
+ index
275
+ end
276
+ end
277
+ end
278
+ end
279
+ end