smartfox_jruby 0.1 → 0.2.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/README.md +78 -0
- data/lib/smartfox_jruby.rb +6 -5
- data/lib/smartfox_jruby/common.rb +25 -22
- data/lib/smartfox_jruby/sfs_adapter.rb +137 -135
- data/lib/smartfox_jruby/sfs_runner.rb +91 -88
- data/lib/smartfox_jruby/sfs_worker.rb +199 -197
- metadata +52 -110
- data/Gemfile +0 -9
- data/Jarfile +0 -6
- data/smartfox_jruby.gemspec +0 -25
@@ -3,120 +3,123 @@ require File.expand_path(File.dirname(__FILE__) + '/common')
|
|
3
3
|
java_import java.lang.Runtime
|
4
4
|
java_import java.lang.System
|
5
5
|
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
6
|
+
module SmartfoxJruby
|
7
|
+
class SfsRunner
|
8
|
+
|
9
|
+
def initialize(home_dir, opts = {})
|
10
|
+
@home_dir = home_dir
|
11
|
+
@opts = opts
|
12
|
+
@launched = false
|
13
|
+
@fault = false
|
14
|
+
@pid = nil
|
15
|
+
end
|
15
16
|
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
17
|
+
def run!
|
18
|
+
Thread.new do
|
19
|
+
begin
|
20
|
+
info "Changing dir to #{work_dir}"
|
21
|
+
Dir.chdir(work_dir) do
|
22
|
+
info "Running cmd #{cmd}"
|
23
|
+
IO.popen(cmd) do |output|
|
24
|
+
@pid = output.pid
|
25
|
+
info "Running child with pid=#{output.pid}..."
|
26
|
+
output.each do |line|
|
27
|
+
debug(line.gsub(/\n/, ""))
|
28
|
+
@launched = true if line =~ /SmartFoxServer 2X \(.+\) READY!/
|
29
|
+
end
|
28
30
|
end
|
31
|
+
@fault = true
|
29
32
|
end
|
30
|
-
|
33
|
+
rescue Exception => e
|
34
|
+
error "#{e}"
|
31
35
|
end
|
32
|
-
rescue Exception => e
|
33
|
-
error "#{e}"
|
34
36
|
end
|
35
37
|
end
|
36
|
-
end
|
37
38
|
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
39
|
+
def kill!
|
40
|
+
info "Checking running processes: #{pids.join(",")}"
|
41
|
+
pids.each { |pid|
|
42
|
+
pid = pid.try(:strip).try(:to_i)
|
43
|
+
info "Killing the process with pid=#{pid}..."
|
44
|
+
Process.kill("KILL", pid) if Process.alive?(pid)
|
45
|
+
wait_with_timeout(5) { !Process.alive?(pid) } rescue ""
|
46
|
+
}
|
47
|
+
end
|
47
48
|
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
49
|
+
def run_and_wait!(opts = {})
|
50
|
+
run!
|
51
|
+
wait_until_launched_or_fault(opts[:timeout])
|
52
|
+
end
|
52
53
|
|
53
54
|
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
55
|
+
def kill_and_wait!(opts = {})
|
56
|
+
kill!
|
57
|
+
wait_until_terminated(opts[:timeout])
|
58
|
+
end
|
58
59
|
|
59
|
-
|
60
|
-
|
61
|
-
|
60
|
+
def running?
|
61
|
+
!pids.blank? && pids.map { |pid| Process.alive?(pid.to_i) }.include?(true)
|
62
|
+
end
|
62
63
|
|
63
|
-
|
64
|
-
|
65
|
-
|
64
|
+
def launched?
|
65
|
+
@launched
|
66
|
+
end
|
66
67
|
|
67
|
-
|
68
|
-
|
69
|
-
|
68
|
+
def fault?
|
69
|
+
@fault
|
70
|
+
end
|
70
71
|
|
71
|
-
|
72
|
-
|
73
|
-
|
72
|
+
def wait_until_terminated(timeout = nil)
|
73
|
+
wait_with_timeout(timeout) { !running? }
|
74
|
+
end
|
74
75
|
|
75
|
-
|
76
|
-
|
77
|
-
|
76
|
+
def wait_until_launched_or_fault(timeout = nil)
|
77
|
+
wait_with_timeout(timeout) { launched? or fault? }
|
78
|
+
end
|
78
79
|
|
79
|
-
|
80
|
+
private
|
80
81
|
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
82
|
+
def pids
|
83
|
+
pids_out = `pgrep -f "#{work_dir}"`
|
84
|
+
(pids_out.try(:split, "\n")) || []
|
85
|
+
end
|
85
86
|
|
86
|
-
|
87
|
-
|
88
|
-
|
87
|
+
def error(msg)
|
88
|
+
@opts[:logger].error(msg) unless @opts[:logger].blank?
|
89
|
+
end
|
89
90
|
|
90
|
-
|
91
|
-
|
92
|
-
|
91
|
+
def debug(msg)
|
92
|
+
@opts[:logger].debug(msg) unless @opts[:logger].blank?
|
93
|
+
end
|
93
94
|
|
94
|
-
|
95
|
-
|
96
|
-
|
95
|
+
def info(msg)
|
96
|
+
@opts[:logger].info(msg) unless @opts[:logger].blank?
|
97
|
+
end
|
97
98
|
|
98
|
-
|
99
|
-
|
100
|
-
|
99
|
+
def work_dir
|
100
|
+
Pathname.new(@home_dir).join("SFS2X").to_s
|
101
|
+
end
|
101
102
|
|
102
|
-
|
103
|
-
|
104
|
-
|
103
|
+
def env
|
104
|
+
%W[].to_java :string
|
105
|
+
end
|
105
106
|
|
106
|
-
|
107
|
-
|
108
|
-
|
107
|
+
def cmd
|
108
|
+
%Q[#{java_bin} -cp #{classpath} #{java_opts} -Dsmartfox.work_dir="#{work_dir}" com.smartfoxserver.v2.Main]
|
109
|
+
end
|
109
110
|
|
110
|
-
|
111
|
-
|
112
|
-
|
111
|
+
def java_bin
|
112
|
+
Pathname.new(System.getProperty("java.home")).join("bin").join("java")
|
113
|
+
end
|
113
114
|
|
114
|
-
|
115
|
-
|
116
|
-
|
115
|
+
def java_opts
|
116
|
+
%Q[-XX:MaxPermSize=512m -Xms128m -Xms1024m]
|
117
|
+
end
|
118
|
+
|
119
|
+
def classpath
|
120
|
+
%Q[aimy:./:lib/*:lib/Jetty/*:extensions/__lib__/*]
|
121
|
+
end
|
117
122
|
|
118
|
-
def classpath
|
119
|
-
%Q[aimy:./:lib/*:lib/Jetty/*:extensions/__lib__/*]
|
120
123
|
end
|
121
124
|
|
122
125
|
end
|
@@ -1,256 +1,258 @@
|
|
1
1
|
require 'thread'
|
2
2
|
require File.expand_path(File.dirname(__FILE__) + '/common')
|
3
3
|
|
4
|
-
module SmartfoxJruby
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
4
|
+
module SmartfoxJruby
|
5
|
+
module SfsWorker
|
6
|
+
class Processor
|
7
|
+
attr_accessor :opts
|
8
|
+
attr_accessor :name
|
9
|
+
attr_accessor :chained
|
10
|
+
attr_accessor :current
|
11
|
+
attr_accessor :blocks
|
12
|
+
|
13
|
+
def initialize(name, opts = {}, &block)
|
14
|
+
@blocks = [block]
|
15
|
+
@name = name
|
16
|
+
@current = self
|
17
|
+
@chained = nil
|
18
|
+
@opts = opts
|
19
|
+
end
|
19
20
|
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
21
|
+
def chain(name, &block)
|
22
|
+
link.chained = Processor.new(name, @opts, &block)
|
23
|
+
self
|
24
|
+
end
|
24
25
|
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
26
|
+
def link
|
27
|
+
p = current
|
28
|
+
p = p.chained while (!p.try(:chained).blank?)
|
29
|
+
p
|
30
|
+
end
|
30
31
|
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
32
|
+
def append(&block)
|
33
|
+
unless link.blank?
|
34
|
+
link.blocks << block
|
35
|
+
else
|
36
|
+
current.blocks << block
|
37
|
+
end
|
38
|
+
self
|
36
39
|
end
|
37
|
-
self
|
38
|
-
end
|
39
40
|
|
40
|
-
|
41
|
-
|
42
|
-
|
41
|
+
def completed?
|
42
|
+
current.blank?
|
43
|
+
end
|
43
44
|
|
44
|
-
|
45
|
-
|
46
|
-
|
45
|
+
def mandatory?
|
46
|
+
@opts[:mandatory].nil? || @opts[:mandatory]
|
47
|
+
end
|
47
48
|
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
49
|
+
def event(name, data = {})
|
50
|
+
if @current.name.to_s == name.to_s
|
51
|
+
@current.blocks.each { |b| b.call(data) }
|
52
|
+
@current = (@current.chained.blank?) ? nil : @current.chained
|
53
|
+
return true
|
54
|
+
end
|
55
|
+
false
|
53
56
|
end
|
54
|
-
false
|
55
|
-
end
|
56
57
|
|
57
|
-
|
58
|
-
|
58
|
+
def to_s
|
59
|
+
"Proc[#{current.try(:name)}] --> #{current.try(:chained)}"
|
60
|
+
end
|
59
61
|
end
|
60
|
-
end
|
61
62
|
|
62
|
-
|
63
|
-
|
64
|
-
|
63
|
+
class Request
|
64
|
+
attr_reader :name
|
65
|
+
attr_reader :data
|
65
66
|
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
67
|
+
def initialize(name, data = {})
|
68
|
+
@name = name
|
69
|
+
@data = data
|
70
|
+
end
|
70
71
|
|
71
|
-
|
72
|
-
|
73
|
-
|
72
|
+
def to_extension_request
|
73
|
+
ExtensionRequest.new(@name.to_s, @data)
|
74
|
+
end
|
74
75
|
|
75
|
-
|
76
|
-
|
76
|
+
def to_s
|
77
|
+
"Req[#{@name}]#{data.to_json}"
|
78
|
+
end
|
77
79
|
end
|
78
|
-
end
|
79
80
|
|
80
|
-
|
81
|
-
|
82
|
-
|
81
|
+
class Response < Request
|
82
|
+
def to_s
|
83
|
+
"Resp[#{@name}]#{data.to_json}"
|
84
|
+
end
|
83
85
|
end
|
84
|
-
end
|
85
86
|
|
86
|
-
|
87
|
+
class ContextWorker
|
87
88
|
|
88
|
-
|
89
|
-
|
90
|
-
|
89
|
+
def request(name, data = {}, opts = {})
|
90
|
+
@worker.request(name, data, opts.merge(:context => @context))
|
91
|
+
end
|
91
92
|
|
92
|
-
|
93
|
-
|
94
|
-
|
93
|
+
def append_processor(opts = {}, &block)
|
94
|
+
@worker.append_processor(opts.merge(:context => @context), &block)
|
95
|
+
end
|
95
96
|
|
96
|
-
|
97
|
-
|
98
|
-
|
97
|
+
def expect(name, opts={}, &block)
|
98
|
+
@worker.expect(name, opts.merge(:context => @context), &block)
|
99
|
+
end
|
99
100
|
|
100
|
-
|
101
|
-
|
102
|
-
|
101
|
+
def initialize(context, worker)
|
102
|
+
@context = context
|
103
|
+
@worker = worker
|
104
|
+
end
|
103
105
|
end
|
104
|
-
end
|
105
106
|
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
107
|
+
class Worker
|
108
|
+
attr_reader :send_queue
|
109
|
+
attr_reader :events_queue
|
110
|
+
attr_reader :processors
|
111
|
+
attr_reader :smart_fox
|
112
|
+
attr_reader :opts
|
113
|
+
|
114
|
+
def initialize(smart_fox, opts = {})
|
115
|
+
@send_queue = []
|
116
|
+
@processors = []
|
117
|
+
@events_queue = []
|
118
|
+
@opts = opts
|
119
|
+
@opts[:timeout] ||= 20
|
120
|
+
@smart_fox = smart_fox
|
121
|
+
@mutex = Mutex.new
|
122
|
+
@send_qm = Mutex.new
|
123
|
+
end
|
123
124
|
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
125
|
+
def append_processor(opts = {}, &block)
|
126
|
+
debug "appending processor with context #{opts[:context]}"
|
127
|
+
if opts[:context]
|
128
|
+
context = opts[:context].append(&block)
|
129
|
+
else
|
130
|
+
context = processors.last.append(&block)
|
131
|
+
end
|
132
|
+
ContextWorker.new(context, self)
|
130
133
|
end
|
131
|
-
ContextWorker.new(context, self)
|
132
|
-
end
|
133
134
|
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
135
|
+
def request(name, data = {}, opts = {})
|
136
|
+
debug "create request #{name} with context #{opts[:context]} and opts #{opts[:serialize_opts]}"
|
137
|
+
data = data.to_sfsobject(opts[:serialize_opts]) if data.is_a?(Hash)
|
138
|
+
req = Request.new(name, data)
|
139
|
+
if !opts[:context].blank? && opts[:context].is_a?(Processor)
|
140
|
+
debug "appending #{req} to processor #{opts[:context]}"
|
141
|
+
context = opts[:context].append {
|
142
|
+
@send_qm.synchronize { send_queue << req }
|
143
|
+
}
|
144
|
+
else
|
145
|
+
debug "adding #{req} to send_queue \n #{dump_state}"
|
146
|
+
context = req
|
141
147
|
@send_qm.synchronize { send_queue << req }
|
142
|
-
|
143
|
-
|
144
|
-
debug "adding #{req} to send_queue \n #{dump_state}"
|
145
|
-
context = req
|
146
|
-
@send_qm.synchronize { send_queue << req }
|
148
|
+
end
|
149
|
+
ContextWorker.new(context, self)
|
147
150
|
end
|
148
|
-
ContextWorker.new(context, self)
|
149
|
-
end
|
150
151
|
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
156
|
-
|
157
|
-
|
152
|
+
def expect(name, opts = {}, &block)
|
153
|
+
debug "Expecting to get response #{name} with context #{opts[:context]}"
|
154
|
+
unless opts[:context].blank?
|
155
|
+
if opts[:context].is_a?(Processor)
|
156
|
+
context = opts[:context].chain(name, &block)
|
157
|
+
elsif opts[:context].is_a?(Request)
|
158
|
+
context = Processor.new(name, :context => opts[:context], &block)
|
159
|
+
@mutex.synchronize { processors << context }
|
160
|
+
end
|
161
|
+
else
|
162
|
+
context = Processor.new(name, &block)
|
158
163
|
@mutex.synchronize { processors << context }
|
159
164
|
end
|
160
|
-
|
161
|
-
context = Processor.new(name, &block)
|
162
|
-
@mutex.synchronize { processors << context }
|
165
|
+
ContextWorker.new(context, self)
|
163
166
|
end
|
164
|
-
ContextWorker.new(context, self)
|
165
|
-
end
|
166
167
|
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
168
|
+
def response(name, data = {})
|
169
|
+
info "Got response #{name} (#{data.to_json})..."
|
170
|
+
@mutex.synchronize { events_queue << Response.new(name, data) }
|
171
|
+
end
|
171
172
|
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
173
|
+
def perform!
|
174
|
+
while !all_events_caught?
|
175
|
+
while !send_queue.blank?
|
176
|
+
req = nil
|
177
|
+
@send_qm.synchronize { req = send_queue.shift }
|
178
|
+
debug "sending request #{req.name}..."
|
179
|
+
smart_fox.send(req.to_extension_request) unless smart_fox.blank?
|
180
|
+
@last_act = Time.now.to_i
|
181
|
+
end
|
182
|
+
process_events
|
183
|
+
check_timeouts
|
180
184
|
end
|
181
|
-
process_events
|
182
|
-
check_timeouts
|
183
185
|
end
|
184
|
-
end
|
185
186
|
|
186
|
-
|
187
|
-
|
188
|
-
|
187
|
+
def all_events_caught?
|
188
|
+
processors.blank? || processors.collect { |p| p.mandatory? }.blank?
|
189
|
+
end
|
189
190
|
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
191
|
+
def wait_all_events_caught
|
192
|
+
debug "Waiting all events being caught..."
|
193
|
+
begin
|
194
|
+
wait_with_timeout { all_events_caught? }
|
195
|
+
rescue WaitTimeoutException => e
|
196
|
+
raise "Failed to catch all the events:"+ dump_state
|
197
|
+
end
|
196
198
|
end
|
197
|
-
end
|
198
199
|
|
199
|
-
|
200
|
+
private
|
200
201
|
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
202
|
+
def dump_state
|
203
|
+
"events_queue = \n\t-#{events_queue.join("\n\t-")}" +
|
204
|
+
" \n processors = \n\t-#{processors.join("\n\t-")}" +
|
205
|
+
" \n send_queue = \n\t-#{send_queue.join("\n\t-")}"
|
206
|
+
end
|
206
207
|
|
207
|
-
|
208
|
-
|
209
|
-
|
208
|
+
def check_timeouts
|
209
|
+
if !@last_act.blank? && Time.now.to_i > @last_act + opts[:timeout]
|
210
|
+
raise "Worker timeout! Latest interaction was #{Time.now.to_i - @last_act} sec ago!\n #{dump_state}"
|
211
|
+
end
|
210
212
|
end
|
211
|
-
end
|
212
213
|
|
213
|
-
|
214
|
-
|
215
|
-
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
226
|
-
|
227
|
-
|
228
|
-
|
229
|
-
|
230
|
-
|
231
|
-
|
232
|
-
|
214
|
+
def process_events
|
215
|
+
wait_with_timeout { !events_queue.blank? } rescue nil
|
216
|
+
ei = 0
|
217
|
+
debug "Processing events..."
|
218
|
+
while ei < events_queue.size
|
219
|
+
event = events_queue[ei]
|
220
|
+
pi = 0
|
221
|
+
debug "Processing event #{event.name}..."
|
222
|
+
while pi < processors.size
|
223
|
+
processor = processors[pi]
|
224
|
+
debug "Trying processor #{processor.name}..."
|
225
|
+
if processor.event(event.name, event.data)
|
226
|
+
@last_act = Time.now.to_i
|
227
|
+
debug "Processor found #{processor}"
|
228
|
+
@mutex.synchronize { events_queue.delete_at(ei) }
|
229
|
+
ei -= 1
|
230
|
+
if processor.completed?
|
231
|
+
debug "Processor completed #{processor.name}."
|
232
|
+
@mutex.synchronize { processors.delete_at(pi) }
|
233
|
+
pi -= 1
|
234
|
+
end
|
233
235
|
end
|
236
|
+
pi += 1
|
234
237
|
end
|
235
|
-
|
238
|
+
ei += 1
|
236
239
|
end
|
237
|
-
|
240
|
+
debug "Events processing finished. #{dump_state}"
|
238
241
|
end
|
239
|
-
debug "Events processing finished. #{dump_state}"
|
240
|
-
end
|
241
242
|
|
242
|
-
|
243
|
-
|
244
|
-
|
243
|
+
def debug(msg)
|
244
|
+
puts "[#{time}] DEBUG #{msg}" if opts[:debug]
|
245
|
+
end
|
245
246
|
|
246
|
-
|
247
|
-
|
248
|
-
|
247
|
+
def info(msg)
|
248
|
+
puts "[#{time}] INFO #{msg}"
|
249
|
+
end
|
249
250
|
|
250
|
-
|
251
|
-
|
251
|
+
def time
|
252
|
+
Time.now.strftime("%Y-%m-%d %H:%M:%S")
|
253
|
+
end
|
252
254
|
end
|
253
255
|
end
|
254
|
-
end
|
255
256
|
|
257
|
+
end
|
256
258
|
|