packet 0.1.0 → 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,13 +1,26 @@
1
1
  module Packet
2
2
  module NbioHelper
3
- # nonblocking method of reading data
4
- # when method returns nil it probably means that client diconnected
3
+ def packet_classify(original_string)
4
+ word_parts = original_string.split('_')
5
+ return word_parts.map { |x| x.capitalize}.join
6
+ end
7
+
8
+ def gen_worker_key(worker_name,job_key = nil)
9
+ return worker_name if job_key.nil?
10
+ return "#{worker_name}_#{job_key}".to_sym
11
+ end
12
+
5
13
  def read_data(t_sock)
6
14
  sock_data = ""
7
15
  begin
8
- while(sock_data << t_sock.read_nonblock(1023)); end
16
+ while(t_data = t_sock.recv_nonblock(1023))
17
+ raise DisconnectError.new(t_sock) if t_data.empty?
18
+ sock_data << t_data
19
+ end
9
20
  rescue Errno::EAGAIN
10
21
  return sock_data
22
+ rescue Errno::EWOULDBLOCK
23
+ return sock_data
11
24
  rescue
12
25
  raise DisconnectError.new(t_sock)
13
26
  end
@@ -25,19 +38,9 @@ module Packet
25
38
  p_sock.write_nonblock(t_data)
26
39
  rescue Errno::EAGAIN
27
40
  return
41
+ rescue Errno::EPIPE
42
+ raise DisconnectError.new(p_sock)
28
43
  end
29
-
30
- # loop do
31
- # begin
32
- # written_length = p_sock.write_nonblock(t_data)
33
- # rescue Errno::EAGAIN
34
- # break
35
- # end
36
- # break if written_length >= t_length
37
- # t_data = t_data[written_length..-1]
38
- # break if t_data.empty?
39
- # t_length = t_data.length
40
- # end
41
44
  end
42
45
 
43
46
  # method writes data to socket in a non blocking manner, but doesn't care if there is a error writing data
@@ -47,6 +50,8 @@ module Packet
47
50
  p_sock.write_nonblock(t_data)
48
51
  rescue Errno::EAGAIN
49
52
  return
53
+ rescue Errno::EPIPE
54
+ raise DisconnectError.new(p_sock)
50
55
  end
51
56
  end
52
57
 
@@ -56,26 +61,18 @@ module Packet
56
61
  dump_length = object_dump.length.to_s
57
62
  length_str = dump_length.rjust(9,'0')
58
63
  final_data = length_str + object_dump
59
-
60
- # total_length = final_data.length
61
- # loop do
62
- # begin
63
- # written_length = p_sock.write_nonblock(final_data)
64
- # rescue Errno::EAGAIN
65
- # break
66
- # end
67
- # break if written_length >= total_length
68
- # final_data = final_data[written_length..-1]
69
- # break if final_data.empty?
70
- # total_length = final_data.length
71
- # end
72
-
73
64
  begin
74
65
  p_sock.write_nonblock(final_data)
75
66
  rescue Errno::EAGAIN
67
+ puts "EAGAIN Error while writing socket"
76
68
  return
69
+ rescue Errno::EINTR
70
+ puts "Interrupt error"
71
+ return
72
+ rescue Errno::EPIPE
73
+ puts "Pipe error"
74
+ raise DisconnectError.new(p_sock)
77
75
  end
78
76
  end
79
-
80
77
  end
81
78
  end
@@ -1,15 +1,15 @@
1
1
  require "socket"
2
2
  require "yaml"
3
3
  require "forwardable"
4
- require "attribute_accessors"
5
- require "buftok"
4
+ require "ostruct"
5
+ require "thread"
6
+
6
7
  require "bin_parser"
7
8
 
8
- require "ostruct"
9
- require "socket"
10
9
 
11
10
  require "packet_guid"
12
- require "ruby_hacks"
11
+ require "class_helpers"
12
+ require "thread_pool"
13
13
  require "double_keyed_hash"
14
14
  require "event"
15
15
 
@@ -33,5 +33,5 @@ require "worker"
33
33
  PACKET_APP = File.expand_path'../' unless defined?(PACKET_APP)
34
34
 
35
35
  module Packet
36
- VERSION='0.1.0'
36
+ VERSION='0.1.1'
37
37
  end
@@ -1,64 +1,58 @@
1
- # FIXME: Some code is duplicated between worker class and this Reactor class, that can be fixed
2
- # with help of creation of Connection class and enabling automatic inheritance of that class and
3
- # mixing in of methods from that class.
4
1
  module Packet
5
2
  class Reactor
6
3
  include Core
4
+ #set_thread_pool_size(20)
7
5
  attr_accessor :fd_writers, :msg_writers,:msg_reader
6
+ attr_accessor :result_hash
7
+
8
8
  attr_accessor :live_workers
9
9
  after_connection :provide_workers
10
10
 
11
+ def self.server_logger= (log_file_name)
12
+ @@server_logger = log_file_name
13
+ end
14
+
11
15
  def self.run
12
16
  master_reactor_instance = new
17
+ # master_reactor_instance.result_hash = {}
13
18
  master_reactor_instance.live_workers = DoubleKeyedHash.new
14
19
  yield(master_reactor_instance)
15
20
  master_reactor_instance.load_workers
16
21
  master_reactor_instance.start_reactor
17
22
  end # end of run method
18
23
 
24
+ def set_result_hash(hash)
25
+ @result_hash = hash
26
+ end
27
+
28
+ def update_result(worker_key,result)
29
+ @result_hash ||= {}
30
+ @result_hash[worker_key.to_sym] = result
31
+ end
32
+
19
33
  def provide_workers(handler_instance,t_sock)
20
34
  class << handler_instance
21
35
  extend Forwardable
22
36
  attr_accessor :workers,:connection,:reactor, :initialized,:signature
37
+ attr_accessor :thread_pool
23
38
  include NbioHelper
24
- def send_data p_data
25
- begin
26
- write_data(p_data,connection)
27
- rescue Errno::EPIPE
28
- # probably a callback, when there is a error in writing to the socket
29
- end
30
- end
31
- def invoke_init
32
- @initialized = true
33
- post_init
34
- end
35
-
36
- def close_connection
37
- unbind
38
- reactor.remove_connection(connection)
39
- end
40
-
41
- def close_connection_after_writing
42
- connection.flush
43
- unbind
44
- reactor.remove_connection(connection)
45
- end
46
-
39
+ include Connection
47
40
  def ask_worker(*args)
48
41
  worker_name = args.shift
49
42
  data_options = *args
43
+ worker_name_key = gen_worker_key(worker_name,data_options[:job_key])
50
44
  data_options[:client_signature] = connection.fileno
51
- workers[worker_name].send_request(data_options)
45
+ reactor.live_workers[worker_name_key].send_request(data_options)
52
46
  end
53
47
 
54
- def send_object p_object
55
- dump_object(p_object,connection)
56
- end
57
- def_delegators :@reactor, :start_server, :connect, :add_periodic_timer, :add_timer, :cancel_timer,:reconnect, :start_worker
48
+ def_delegators(:@reactor, :start_server, :connect, :add_periodic_timer, \
49
+ :add_timer, :cancel_timer,:reconnect, :start_worker,:delete_worker)
50
+
58
51
  end
59
52
  handler_instance.workers = @live_workers
60
53
  handler_instance.connection = t_sock
61
54
  handler_instance.reactor = self
55
+ handler_instance.thread_pool = @thread_pool
62
56
  end
63
57
 
64
58
  # FIXME: right now, each worker is tied to its connection and this can be problematic
@@ -75,41 +69,42 @@ module Packet
75
69
  end
76
70
  end
77
71
 
78
- # method loads workers in new processes
79
- # FIXME: this method can be fixed, so as worker code can be actually, required
80
- # only in forked process and hence saving upon the memory involved
81
- # where worker is actually required in master as well as in worker.
82
- def load_workers
72
+ def delete_worker(worker_options = {})
73
+ worker_name = worker_options[:worker]
74
+ worker_name_key = gen_worker_key(worker_name,worker_options[:job_key])
75
+ worker_options[:method] = :exit
76
+ @live_workers[worker_name_key].send_request(worker_options)
77
+ end
83
78
 
79
+ def load_workers
84
80
  if defined?(WORKER_ROOT)
85
81
  worker_root = WORKER_ROOT
86
82
  else
87
83
  worker_root = "#{PACKET_APP}/worker"
88
84
  end
89
85
  t_workers = Dir["#{worker_root}/**/*.rb"]
90
- return if t_workers.blank?
86
+ return if t_workers.empty?
91
87
  t_workers.each do |b_worker|
92
88
  worker_name = File.basename(b_worker,".rb")
93
89
  require worker_name
94
- worker_klass = Object.const_get(worker_name.classify)
90
+ worker_klass = Object.const_get(packet_classify(worker_name))
95
91
  next if worker_klass.no_auto_load
96
92
  fork_and_load(worker_klass)
97
93
  end
98
-
99
- # FIXME: easiest and yet perhaps a bit ugly, its just to make sure that from each
100
- # worker proxy one can access other workers
101
- @live_workers.each do |key,worker_instance|
102
- worker_instance.workers = @live_workers
103
- end
104
94
  end
105
95
 
106
- def start_worker(worker_name,options = {})
107
- require worker_name.to_s
108
- worker_klass = Object.const_get(worker_name.classify)
109
- fork_and_load(worker_klass,options)
96
+ def start_worker(worker_options = { })
97
+ worker_name = worker_options[:worker].to_s
98
+ worker_name_key = gen_worker_key(worker_name,worker_options[:job_key])
99
+ return if @live_workers[worker_name_key]
100
+ worker_options.delete(:worker)
101
+ require worker_name
102
+ worker_klass = Object.const_get(packet_classify(worker_name))
103
+ fork_and_load(worker_klass,worker_options)
110
104
  end
111
105
 
112
106
  # method forks given worker file in a new process
107
+ # method should use job_key if provided in options hash.
113
108
  def fork_and_load(worker_klass,worker_options = { })
114
109
  t_worker_name = worker_klass.worker_name
115
110
  worker_pimp = worker_klass.worker_proxy.to_s
@@ -122,20 +117,23 @@ module Packet
122
117
 
123
118
  if((pid = fork()).nil?)
124
119
  $0 = "ruby #{worker_klass.worker_name}"
125
- master_write_end.close
126
- master_read_end.close
127
- master_write_fd.close
128
- # master_write_end.close if master_write_end
129
- worker_klass.start_worker(:write_end => worker_write_end,:read_end => worker_read_end,:read_fd => worker_read_fd,:options => worker_options)
120
+ [master_write_end,master_read_end,master_write_fd].each { |x| x.close }
121
+
122
+ worker_klass.start_worker(:write_end => worker_write_end,:read_end => worker_read_end,\
123
+ :read_fd => worker_read_fd,:options => worker_options)
130
124
  end
131
125
  Process.detach(pid)
132
126
 
133
- unless worker_pimp.blank?
127
+ worker_name_key = gen_worker_key(t_worker_name,worker_options[:job_key])
128
+
129
+ if worker_pimp && !worker_pimp.empty?
134
130
  require worker_pimp
135
- pimp_klass = Object.const_get(worker_pimp.classify)
136
- @live_workers[t_worker_name,master_read_end.fileno] = pimp_klass.new(master_write_end,pid,self)
131
+ pimp_klass = Object.const_get(packet_classify(worker_pimp))
132
+ @live_workers[worker_name_key,master_read_end.fileno] = pimp_klass.new(master_write_end,pid,self)
137
133
  else
138
- @live_workers[t_worker_name,master_read_end.fileno] = Packet::MetaPimp.new(master_write_end,pid,self)
134
+ t_pimp = Packet::MetaPimp.new(master_write_end,pid,self)
135
+ t_pimp.worker_key = worker_name_key
136
+ @live_workers[worker_name_key,master_read_end.fileno] = t_pimp
139
137
  end
140
138
 
141
139
  worker_read_end.close
@@ -143,6 +141,5 @@ module Packet
143
141
  worker_read_fd.close
144
142
  read_ios << master_read_end
145
143
  end # end of fork_and_load method
146
-
147
144
  end # end of Reactor class
148
145
  end # end of Packet module
@@ -1,6 +1,7 @@
1
1
  module Packet
2
2
  class Pimp
3
3
  include NbioHelper
4
+ extend ClassHelpers
4
5
  extend Forwardable
5
6
  iattr_accessor :pimp_name
6
7
  attr_accessor :lifeline, :pid, :signature
@@ -0,0 +1,54 @@
1
+ module Packet
2
+ class WorkData
3
+ attr_accessor :data,:block
4
+ def initialize(args,&block)
5
+ @data = args
6
+ @block = block
7
+ end
8
+ end
9
+
10
+ class ThreadPool
11
+ attr_accessor :size
12
+ attr_accessor :threads
13
+ attr_accessor :work_queue
14
+ def initialize(size)
15
+ @size = size
16
+ @threads = []
17
+ @work_queue = Queue.new
18
+ @running_tasks = Queue.new
19
+ @size.times { add_thread }
20
+ end
21
+ def defer(*args,&block)
22
+ @work_queue << WorkData.new(args,&block)
23
+ end
24
+
25
+ def add_thread
26
+ @threads << Thread.new do
27
+ while true
28
+ task = @work_queue.pop
29
+ @running_tasks << task
30
+ block_arity = task.block.arity
31
+ begin
32
+ block_arity == 0 ? task.block.call : task.block.call(*(task.data))
33
+ rescue
34
+ puts $!
35
+ puts $!.backtrace
36
+ end
37
+ @running_tasks.pop
38
+ end
39
+ end
40
+ end
41
+
42
+ # method ensures exclusive run of deferred tasks for 0.5 seconds, so as they do get a chance to run.
43
+ def exclusive_run
44
+ if @running_tasks.empty? && @work_queue.empty?
45
+ return
46
+ else
47
+ sleep(0.005)
48
+ return
49
+ end
50
+ end
51
+ end # end of ThreadPool class
52
+
53
+ end # end of Packet module
54
+
@@ -0,0 +1,63 @@
1
+ =begin
2
+ There are many ordered hash implementation of ordered hashes, but this one is for packet.
3
+ Nothing more, nothing less.
4
+ =end
5
+
6
+ module Packet
7
+ class TimerStore
8
+ attr_accessor :order
9
+ def initialize
10
+ @order = []
11
+ @container = { }
12
+ end
13
+
14
+ def store(timer)
15
+ int_time = timer.scheduled_time.to_i
16
+ @container[int_time] ||= []
17
+ @container[int_time] << timer
18
+
19
+ if @container.empty?
20
+ @order << int_time
21
+ return
22
+ end
23
+ if @order.last <= key
24
+ @order << int_time
25
+ else
26
+ index = bin_search_for_key(o,@order.length - 1,int_time)
27
+ @order.insert(index,int_time)
28
+ end
29
+ end
30
+
31
+ def bin_search_for_key(lower_index,upper_index,key)
32
+ return upper_index if(upper_index - lower_index <= 1)
33
+ pivot = (lower_index + upper_index)/2
34
+ if @order[pivot] == key
35
+ return pivot
36
+ elsif @order[pivot] < key
37
+ bin_search_for_key(pivot,upper_index,key)
38
+ else
39
+ bin_search_for_key(lower_index,pivot,key)
40
+ end
41
+ end
42
+ end
43
+
44
+ def each
45
+ @order.each_with_index do |x,i|
46
+ if x <= Time.now.to_i
47
+ @container[x].each { |timer| yield x }
48
+ @container.delete(x)
49
+ @order.delete_at(i)
50
+ end
51
+ end
52
+ end
53
+
54
+ def delete(timer)
55
+ int_time = timer.scheduled_time
56
+ @container[int_time] && @container[int_time].delete(timer)
57
+
58
+ if(!@container[int_time] || @container[int_time].empty?)
59
+ @order.delete(timer)
60
+ end
61
+ end
62
+ end
63
+
@@ -5,6 +5,7 @@ module Packet
5
5
  iattr_accessor :fd_reader,:msg_writer,:msg_reader,:worker_name
6
6
  iattr_accessor :worker_proxy
7
7
  iattr_accessor :no_auto_load
8
+
8
9
  attr_accessor :worker_started, :worker_options
9
10
  after_connection :provide_workers
10
11
 
@@ -60,40 +61,15 @@ module Packet
60
61
  class << handler_instance
61
62
  extend Forwardable
62
63
  attr_accessor :worker, :connection, :reactor, :initialized, :signature
64
+ attr_accessor :thread_pool
63
65
  include NbioHelper
64
- def send_data p_data
65
- begin
66
- write_data(p_data,connection)
67
- rescue Errno::EPIPE
68
- # probably a callback
69
- end
70
- end
71
-
72
- def invoke_init
73
- @initialized = true
74
- post_init
75
- end
76
-
77
- def close_connection
78
- unbind
79
- reactor.remove_connection(connection)
80
- end
81
-
82
- def close_connection_after_writing
83
- connection.flush
84
- unbind
85
- reactor.remove_connection(connection)
86
- end
87
-
88
- def send_object p_object
89
- dump_object(p_object,connection)
90
- end
91
-
66
+ include Connection
92
67
  def_delegators :@reactor, :start_server, :connect, :add_periodic_timer, :add_timer, :cancel_timer,:reconnect
93
68
  end
94
69
  handler_instance.connection = connection
95
70
  handler_instance.worker = self
96
71
  handler_instance.reactor = self
72
+ handler_instance.thread_pool = @thread_pool
97
73
  end
98
74
 
99
75
  def log log_data