palava_machine 1.0.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +15 -0
- data/ChangeLog.md +2 -0
- data/Gemfile +4 -0
- data/ProtocolChangeLog.md +2 -0
- data/Rakefile +163 -0
- data/ReadMe.md +56 -0
- data/bin/palava-machine +4 -0
- data/bin/palava-machine-daemon +7 -0
- data/lib/palava_machine.rb +13 -0
- data/lib/palava_machine/client_message.rb +46 -0
- data/lib/palava_machine/jobs.rb +3 -0
- data/lib/palava_machine/jobs/export_stats_job.rb +59 -0
- data/lib/palava_machine/manager.rb +311 -0
- data/lib/palava_machine/runner.rb +66 -0
- data/lib/palava_machine/server.rb +64 -0
- data/lib/palava_machine/server/core_events.rb +49 -0
- data/lib/palava_machine/server/core_support.rb +50 -0
- data/lib/palava_machine/server/server_info.rb +20 -0
- data/lib/palava_machine/server/verify_handshake.rb +23 -0
- data/lib/palava_machine/socket_store.rb +46 -0
- data/lib/palava_machine/version.rb +8 -0
- data/palava_machine.gemspec +40 -0
- metadata +282 -0
checksums.yaml
ADDED
@@ -0,0 +1,15 @@
|
|
1
|
+
---
|
2
|
+
!binary "U0hBMQ==":
|
3
|
+
metadata.gz: !binary |-
|
4
|
+
OGNiZjliNTM4ZmUyY2E0MGQwNjRjMGQ3OGQ3MDliZWRjMmI0MTQ3OQ==
|
5
|
+
data.tar.gz: !binary |-
|
6
|
+
MDkzM2Q5MWZiZWU3YWIzZjM1NjcwNDVmYzc2M2QxNzM0ZmMxZWEzYQ==
|
7
|
+
!binary "U0hBNTEy":
|
8
|
+
metadata.gz: !binary |-
|
9
|
+
YTVkMmM3ZDUzN2UyMGQ0ZTg0NDg4YzBmNjY3ZjFmNmE1OTRkOWJjYjAzZjhh
|
10
|
+
YzE1ZDBhNzgwNGM3ZTRjYmNiZDgzMTA5M2NiZTc4YWUyNDU5ODc2OGEzYTg2
|
11
|
+
ZDJlNzM1ZDBlNjZlMDNmYTlkYjQ3NjZjNjYxYmJjYjc4MDY4YjA=
|
12
|
+
data.tar.gz: !binary |-
|
13
|
+
NjBlY2JlZDI5N2ZlYTg0NjFjNTZiNjAzZTY5YmEwMzM5YmYyZGEyZGM3MzQ5
|
14
|
+
MzZkZGRmMjQ5MGYyNGNlZjMyM2IwZGU3ODE0MjdhYzcwZDI4MmYxZTRkMjY3
|
15
|
+
MGEyNmEzMDE1MmFiNTJkMjNiOGY4ZTg0YjlhOTA4M2M1ZmJiMzU=
|
data/ChangeLog.md
ADDED
data/Gemfile
ADDED
data/Rakefile
ADDED
@@ -0,0 +1,163 @@
|
|
1
|
+
require 'fileutils'
|
2
|
+
require 'bundler/setup'
|
3
|
+
require 'yaml'
|
4
|
+
require 'resque/tasks'
|
5
|
+
require 'resque_scheduler/tasks'
|
6
|
+
|
7
|
+
ROOT_PATH = File.dirname(__FILE__) + '/../'
|
8
|
+
|
9
|
+
# Start a worker with proper env vars and output redirection
|
10
|
+
def run_worker(queue, count = 1)
|
11
|
+
puts "Starting #{count} worker(s) with QUEUE: #{queue}"
|
12
|
+
ops = {:pgroup => true, :err => [(ROOT_PATH + "log/resque.workers.error.log").to_s, "a"],
|
13
|
+
:out => [(ROOT_PATH + "log/resque.workers.log").to_s, "a"]}
|
14
|
+
env_vars = {"QUEUE" => queue.to_s}
|
15
|
+
count.times {
|
16
|
+
## Using Kernel.spawn and Process.detach because regular system() call would
|
17
|
+
## cause the processes to quit when capistrano finishes
|
18
|
+
pid = spawn(env_vars, "rake resque:work", ops)
|
19
|
+
Process.detach(pid)
|
20
|
+
}
|
21
|
+
end
|
22
|
+
|
23
|
+
# Start a scheduler, requires resque_scheduler >= 2.0.0.f
|
24
|
+
def run_scheduler
|
25
|
+
puts "Starting resque scheduler"
|
26
|
+
env_vars = {
|
27
|
+
"BACKGROUND" => "1",
|
28
|
+
"PIDFILE" => (ROOT_PATH + "/pid/resque-scheduler.pid").to_s,
|
29
|
+
"VERBOSE" => "1"
|
30
|
+
}
|
31
|
+
ops = {:pgroup => true, :err => [(ROOT_PATH + "log/resque.scheduler.error.log").to_s, "a"],
|
32
|
+
:out => [(ROOT_PATH + "log/resque.scheduler.log").to_s, "a"]}
|
33
|
+
pid = spawn(env_vars, "rake resque:scheduler", ops)
|
34
|
+
Process.detach(pid)
|
35
|
+
end
|
36
|
+
|
37
|
+
|
38
|
+
namespace :deploy do
|
39
|
+
# desc 'whiskey_disk deploy hook'
|
40
|
+
# task :post_deploy => %w[
|
41
|
+
# resque:restart_workers
|
42
|
+
# resque:restart_scheduler
|
43
|
+
# ]
|
44
|
+
end
|
45
|
+
|
46
|
+
|
47
|
+
namespace :resque do
|
48
|
+
task :environment do
|
49
|
+
require 'resque'
|
50
|
+
require 'resque_scheduler'
|
51
|
+
require 'resque/scheduler'
|
52
|
+
|
53
|
+
Resque.redis = 'localhost:6379'
|
54
|
+
Resque.schedule = YAML.load_file('config/schedule.yml')
|
55
|
+
require_relative 'jobs'
|
56
|
+
end
|
57
|
+
|
58
|
+
task :setup => :environment
|
59
|
+
|
60
|
+
desc "Restart running workers"
|
61
|
+
task :restart_workers => :environment do
|
62
|
+
Rake::Task['resque:stop_workers'].invoke
|
63
|
+
Rake::Task['resque:start_workers'].invoke
|
64
|
+
end
|
65
|
+
|
66
|
+
desc "Quit running workers"
|
67
|
+
task :stop_workers => :environment do
|
68
|
+
pids = Array.new
|
69
|
+
Resque.workers.each do |worker|
|
70
|
+
pids.concat(worker.worker_pids)
|
71
|
+
end
|
72
|
+
if pids.empty?
|
73
|
+
puts "No workers to kill"
|
74
|
+
else
|
75
|
+
syscmd = "kill -s QUIT #{pids.join(' ')}"
|
76
|
+
puts "Running syscmd: #{syscmd}"
|
77
|
+
system(syscmd)
|
78
|
+
end
|
79
|
+
end
|
80
|
+
|
81
|
+
desc "Start workers"
|
82
|
+
task :start_workers => :environment do
|
83
|
+
run_worker("*", 2)
|
84
|
+
run_worker("high", 1)
|
85
|
+
end
|
86
|
+
|
87
|
+
desc "Restart scheduler"
|
88
|
+
task :restart_scheduler => :environment do
|
89
|
+
Rake::Task['resque:stop_scheduler'].invoke
|
90
|
+
Rake::Task['resque:start_scheduler'].invoke
|
91
|
+
end
|
92
|
+
|
93
|
+
desc "Quit scheduler"
|
94
|
+
task :stop_scheduler => :environment do
|
95
|
+
pidfile = ROOT_PATH + "pid/resque-scheduler.pid"
|
96
|
+
if !File.exists?(pidfile)
|
97
|
+
puts "Scheduler not running"
|
98
|
+
else
|
99
|
+
pid = File.read(pidfile).to_i
|
100
|
+
syscmd = "kill -s QUIT #{pid}"
|
101
|
+
puts "Running syscmd: #{syscmd}"
|
102
|
+
system(syscmd)
|
103
|
+
FileUtils.rm_f(pidfile)
|
104
|
+
end
|
105
|
+
end
|
106
|
+
|
107
|
+
desc "Start scheduler"
|
108
|
+
task :start_scheduler => :environment do
|
109
|
+
run_scheduler
|
110
|
+
end
|
111
|
+
|
112
|
+
desc "Reload schedule"
|
113
|
+
task :reload_schedule => :environment do
|
114
|
+
pidfile = ROOT_PATH + "pid/resque-scheduler.pid"
|
115
|
+
|
116
|
+
if !File.exists?(pidfile)
|
117
|
+
puts "Scheduler not running"
|
118
|
+
else
|
119
|
+
pid = File.read(pidfile).to_i
|
120
|
+
syscmd = "kill -s USR2 #{pid}"
|
121
|
+
puts "Running syscmd: #{syscmd}"
|
122
|
+
system(syscmd)
|
123
|
+
end
|
124
|
+
end
|
125
|
+
end
|
126
|
+
|
127
|
+
# # #
|
128
|
+
|
129
|
+
def gemspec
|
130
|
+
name = Dir['*.gemspec'].first
|
131
|
+
@gemspec ||= eval(File.read(name), binding, name)
|
132
|
+
end
|
133
|
+
|
134
|
+
desc "Build the gem"
|
135
|
+
task :gem => :gemspec do
|
136
|
+
sh "gem build #{gemspec.name}.gemspec"
|
137
|
+
FileUtils.mkdir_p 'pkg'
|
138
|
+
FileUtils.mv "#{gemspec.name}-#{gemspec.version}.gem", 'pkg'
|
139
|
+
end
|
140
|
+
|
141
|
+
desc "Install the gem locally"
|
142
|
+
task :install => :gem do
|
143
|
+
sh %{gem install pkg/#{gemspec.name}-#{gemspec.version}.gem --no-rdoc --no-ri}
|
144
|
+
end
|
145
|
+
|
146
|
+
desc "Generate the gemspec"
|
147
|
+
task :generate do
|
148
|
+
puts gemspec.to_ruby
|
149
|
+
end
|
150
|
+
|
151
|
+
desc "Validate the gemspec"
|
152
|
+
task :gemspec do
|
153
|
+
gemspec.validate
|
154
|
+
end
|
155
|
+
|
156
|
+
desc 'rspec specs'
|
157
|
+
task :spec do
|
158
|
+
sh %[rspec spec]
|
159
|
+
end
|
160
|
+
|
161
|
+
task :default => :spec
|
162
|
+
task :test => :spec
|
163
|
+
|
data/ReadMe.md
ADDED
@@ -0,0 +1,56 @@
|
|
1
|
+
# PalavaMachine
|
2
|
+
|
3
|
+
## Description
|
4
|
+
|
5
|
+
PalavaMachine is a WebRTC signaling server. Signaling describes the process of finding other peers and exchange information about how to establish a media connection.
|
6
|
+
|
7
|
+
The server is implemented in [EventMachine](http://rubyeventmachine.com/) and [Redis PubSub](http://redis.io/topics/pubsub) and communication to the clients is done via WebSockets. See it in action at [palava.tv.](https://palava.tv)
|
8
|
+
|
9
|
+
## What can I do with it?
|
10
|
+
|
11
|
+
*This is a pre-release for interested Ruby/JS/WebRTC developers*. If you are unsure, what to use this gem for, you'll just need to wait. We'll soon put a more detailed instructions on our [blog](https://blog.palava.tv).
|
12
|
+
|
13
|
+
## Installation & Usage
|
14
|
+
|
15
|
+
Make sure you have redis(http://redis.io/download) installed, then clone this repository and run
|
16
|
+
|
17
|
+
$ bundle install
|
18
|
+
|
19
|
+
Start the server with
|
20
|
+
|
21
|
+
$ bin/palava-machine
|
22
|
+
|
23
|
+
Alternatively, download the [palava_machine gem](http://rubygems.org/gems/palava_machine) from rubygems.org:
|
24
|
+
|
25
|
+
$ gem install palava_machine
|
26
|
+
|
27
|
+
And run:
|
28
|
+
|
29
|
+
$ palava-machine
|
30
|
+
|
31
|
+
### Deamonized Version
|
32
|
+
|
33
|
+
The PalavaMachine can be started as a daemon process for production usage:
|
34
|
+
|
35
|
+
$ palava-machine-daemon start
|
36
|
+
|
37
|
+
Stop it with
|
38
|
+
|
39
|
+
$ palava-machine-daemon stop
|
40
|
+
|
41
|
+
### Specs
|
42
|
+
|
43
|
+
To run the test suite use
|
44
|
+
|
45
|
+
$ rspec
|
46
|
+
|
47
|
+
## Credits
|
48
|
+
|
49
|
+
Open Source License information following soon!
|
50
|
+
|
51
|
+
(c) 2013 Jan Lelis, jan@signaling.io
|
52
|
+
(c) 2013 Marius Melzer, marius@signaling.io
|
53
|
+
(c) 2013 Stephan Thamm, thammi@chaossource.net
|
54
|
+
(c) 2013 Kilian Ulbrich, kilian@innovailable.eu
|
55
|
+
|
56
|
+
Part of the [palava project](https://blog.palava.tv)
|
data/bin/palava-machine
ADDED
@@ -0,0 +1,13 @@
|
|
1
|
+
require_relative 'palava_machine/version' unless defined? PalavaMachine::VERSION
|
2
|
+
|
3
|
+
module PalavaMachine
|
4
|
+
class MessageParsingError < StandardError; end
|
5
|
+
|
6
|
+
class MessageError < StandardError
|
7
|
+
attr_reader :ws
|
8
|
+
|
9
|
+
def initialize(ws)
|
10
|
+
@ws = ws
|
11
|
+
end
|
12
|
+
end
|
13
|
+
end
|
@@ -0,0 +1,46 @@
|
|
1
|
+
require_relative 'version'
|
2
|
+
|
3
|
+
require 'json'
|
4
|
+
|
5
|
+
module PalavaMachine
|
6
|
+
class ClientMessage
|
7
|
+
RULES = {
|
8
|
+
info: [],
|
9
|
+
join_room: [:room_id, :status],
|
10
|
+
leave_room: [],
|
11
|
+
send_to_peer: [:peer_id, :data],
|
12
|
+
update_status: [:status],
|
13
|
+
}
|
14
|
+
|
15
|
+
def initialize(message, connection_id = nil)
|
16
|
+
begin
|
17
|
+
@_data = JSON.parse(message)
|
18
|
+
rescue # TODO find exact json error to catch
|
19
|
+
raise MessageParsingError, 'invalid message'
|
20
|
+
end
|
21
|
+
|
22
|
+
raise MessageParsingError, 'invalid message: not a hash' unless @_data.instance_of?(Hash)
|
23
|
+
@connection_id = connection_id
|
24
|
+
end
|
25
|
+
|
26
|
+
def [](w)
|
27
|
+
@_data[w]
|
28
|
+
end
|
29
|
+
|
30
|
+
def valid?
|
31
|
+
RULES.keys.include?(name) or raise MessageParsingError, 'unknown event'
|
32
|
+
end
|
33
|
+
|
34
|
+
def name
|
35
|
+
@name ||= @_data['event'] && @_data['event'].to_sym or raise(MessageParsingError, 'no event given')
|
36
|
+
end
|
37
|
+
|
38
|
+
def connection_id
|
39
|
+
@connection_id or raise MessageParsingError, 'connection id used but not set'
|
40
|
+
end
|
41
|
+
|
42
|
+
def arguments
|
43
|
+
valid? && RULES[name].map{ |data_key| @_data[data_key.to_s] }
|
44
|
+
end
|
45
|
+
end
|
46
|
+
end
|
@@ -0,0 +1,59 @@
|
|
1
|
+
require 'set'
|
2
|
+
require 'redis'
|
3
|
+
require 'mongo'
|
4
|
+
|
5
|
+
class ExportStatsJob
|
6
|
+
class StatsExporter
|
7
|
+
STATS_NAMESPACE = "store:stats"
|
8
|
+
|
9
|
+
def initialize(redis_address, mongo_address)
|
10
|
+
@redis = Redis.new(host: 'localhost', port: 6379)
|
11
|
+
@mongo = Mongo::MongoClient.new#(mongo_address)
|
12
|
+
@times = Set.new
|
13
|
+
end
|
14
|
+
|
15
|
+
def import_timestamps!(ns)
|
16
|
+
redis_pattern = "#{STATS_NAMESPACE}:#{ns}:*"
|
17
|
+
offset = redis_pattern.size - 1
|
18
|
+
@times.merge @redis.keys(redis_pattern).map{ |key|
|
19
|
+
key[offset..-1].to_i
|
20
|
+
}
|
21
|
+
end
|
22
|
+
|
23
|
+
# remove timestamps which are not closed (+ grace time)
|
24
|
+
def prune_timestamps!
|
25
|
+
limit = Time.now.utc.to_i - 3660
|
26
|
+
@times.select!{ |time| time < limit }
|
27
|
+
puts "Transfering #{@times.length} timespans"
|
28
|
+
end
|
29
|
+
|
30
|
+
def store_in_mongo!
|
31
|
+
collection = @mongo.db("plv_stats").collection("rtc")
|
32
|
+
|
33
|
+
@times.each { |time|
|
34
|
+
collection.insert(
|
35
|
+
"c_at" => time,
|
36
|
+
"connection_time" => get_and_delete_from_redis("connection_time", time),
|
37
|
+
"room_peaks" => get_and_delete_from_redis("room_peaks", time),
|
38
|
+
)
|
39
|
+
}
|
40
|
+
end
|
41
|
+
|
42
|
+
def get_and_delete_from_redis(ns, time)
|
43
|
+
key = "#{STATS_NAMESPACE}:#{ns}:#{time}"
|
44
|
+
data = @redis.hgetall(key) || {}
|
45
|
+
@redis.del(key)
|
46
|
+
data
|
47
|
+
end
|
48
|
+
end
|
49
|
+
|
50
|
+
class << self
|
51
|
+
def perform(redis_address = 'localhost:6379', mongo_address = 'localhost:27017')
|
52
|
+
se = StatsExporter.new(redis_address, mongo_address)
|
53
|
+
se.import_timestamps! "room_peaks"
|
54
|
+
se.import_timestamps! "connection_time"
|
55
|
+
se.prune_timestamps!
|
56
|
+
se.store_in_mongo!
|
57
|
+
end
|
58
|
+
end
|
59
|
+
end
|
@@ -0,0 +1,311 @@
|
|
1
|
+
require_relative 'version'
|
2
|
+
require_relative 'socket_store'
|
3
|
+
|
4
|
+
require 'em-hiredis'
|
5
|
+
require 'json'
|
6
|
+
require 'digest/sha2'
|
7
|
+
require 'time'
|
8
|
+
require 'logger'
|
9
|
+
require 'logger/colors'
|
10
|
+
require 'forwardable'
|
11
|
+
|
12
|
+
|
13
|
+
module PalavaMachine
|
14
|
+
class Manager
|
15
|
+
extend Forwardable
|
16
|
+
|
17
|
+
|
18
|
+
attr_reader :connections
|
19
|
+
|
20
|
+
|
21
|
+
def_delegators :@log, :debug, :info, :warn, :error, :fatal
|
22
|
+
|
23
|
+
|
24
|
+
PAYLOAD_NEW_PEER = lambda { |connection_id, status = nil|
|
25
|
+
payload = { event: 'new_peer', peer_id: connection_id }
|
26
|
+
payload[:status] = status if status
|
27
|
+
payload.to_json
|
28
|
+
}
|
29
|
+
|
30
|
+
PAYLOAD_PEER_LEFT = lambda { |connection_id| {
|
31
|
+
event: 'peer_left',
|
32
|
+
sender_id: connection_id,
|
33
|
+
}.to_json }
|
34
|
+
|
35
|
+
|
36
|
+
SCRIPT_JOIN_ROOM = <<-LUA
|
37
|
+
local members = redis.call('smembers', KEYS[1])
|
38
|
+
local count = 0
|
39
|
+
for _, peer_id in pairs(members) do
|
40
|
+
redis.call('publish', "ps:connection:" .. peer_id, ARGV[2])
|
41
|
+
count = count + 1
|
42
|
+
end
|
43
|
+
redis.call('sadd', KEYS[1], ARGV[1])
|
44
|
+
if count == 0 or tonumber(redis.call('get', KEYS[2])) <= count then
|
45
|
+
redis.call('set', KEYS[2], count + 1)
|
46
|
+
end
|
47
|
+
redis.call('set', KEYS[3], ARGV[3])
|
48
|
+
redis.call('set', KEYS[4], ARGV[4])
|
49
|
+
return members
|
50
|
+
LUA
|
51
|
+
|
52
|
+
SCRIPT_LEAVE_ROOM = <<-LUA
|
53
|
+
redis.call('hincrby', KEYS[7], (ARGV[3] - tonumber(redis.call('get', KEYS[3]))) / 60, 1) --stats
|
54
|
+
redis.call('srem', KEYS[1], ARGV[1])
|
55
|
+
redis.call('del', KEYS[3])
|
56
|
+
redis.call('del', KEYS[4])
|
57
|
+
redis.call('del', KEYS[5])
|
58
|
+
|
59
|
+
if redis.call('scard', KEYS[1]) == 0 then -- also delete room if it is empty
|
60
|
+
redis.call('hincrby', KEYS[6], redis.call('get', KEYS[2]), 1) --stats
|
61
|
+
redis.call('del', KEYS[1])
|
62
|
+
redis.call('del', KEYS[2])
|
63
|
+
else -- tell others in room
|
64
|
+
for _, peer_id in pairs(redis.call('smembers', KEYS[1])) do
|
65
|
+
redis.call('publish', "ps:connection:" .. peer_id, ARGV[2])
|
66
|
+
end
|
67
|
+
end
|
68
|
+
LUA
|
69
|
+
|
70
|
+
|
71
|
+
def initialize(options = {})
|
72
|
+
@redis_address = 'localhost:6379'
|
73
|
+
@redis_db = options[:db] || 0
|
74
|
+
@connections = SocketStore.new
|
75
|
+
@log = Logger.new(STDOUT)
|
76
|
+
@log.level = Logger::DEBUG
|
77
|
+
@log.formatter = proc{ |level, datetime, _, msg|
|
78
|
+
"#{datetime.strftime '%F %T'} | #{msg}\n"
|
79
|
+
}
|
80
|
+
end
|
81
|
+
|
82
|
+
def initialize_in_em
|
83
|
+
@redis = EM::Hiredis.connect "redis://#{@redis_address}/#{@redis_db}"
|
84
|
+
@publisher = @redis.pubsub
|
85
|
+
@subscriber = EM::Hiredis.connect("redis://#{@redis_address}/#{@redis_db}").pubsub # You need an extra connection for subs
|
86
|
+
@redis.on :failed do
|
87
|
+
@log.error 'Could not connect to Redis server'
|
88
|
+
end
|
89
|
+
end
|
90
|
+
|
91
|
+
def announce_connection(ws)
|
92
|
+
connection_id = @connections.register_connection(ws)
|
93
|
+
info "#{connection_id} <open>"
|
94
|
+
|
95
|
+
@subscriber.subscribe "ps:connection:#{connection_id}" do |payload|
|
96
|
+
# debug "SUB payload #{payload} for <#{connection_id}>"
|
97
|
+
ws.send_text(payload)
|
98
|
+
end
|
99
|
+
end
|
100
|
+
|
101
|
+
def return_error(connection_id, message)
|
102
|
+
raise MessageError.new(@connections[connection_id]), message
|
103
|
+
end
|
104
|
+
|
105
|
+
def unannounce_connection(ws, close_ws = false)
|
106
|
+
if connection_id = @connections.unregister_connection(ws)
|
107
|
+
info "#{connection_id} <close>"
|
108
|
+
leave_room(connection_id)
|
109
|
+
@subscriber.unsubscribe "ps:connection:#{connection_id}"
|
110
|
+
if close_ws && ws.state != :closed # currently not used FIXME
|
111
|
+
ws.close
|
112
|
+
end
|
113
|
+
end
|
114
|
+
end
|
115
|
+
|
116
|
+
def join_room(connection_id, room_id, status)
|
117
|
+
return_error connection_id, 'no room id given' if !room_id || room_id.empty?
|
118
|
+
return_error connection_id, 'room id too long' if room_id.size > 50
|
119
|
+
|
120
|
+
@redis.get "store:connection:room:#{connection_id}" do |res|
|
121
|
+
return_error connection_id, 'already joined another room' if res
|
122
|
+
room_id = Digest::SHA512.hexdigest(room_id)
|
123
|
+
info "#{connection_id} joins ##{room_id[0..10]}... #{status}"
|
124
|
+
|
125
|
+
script_join_room(connection_id, room_id, status){ |members|
|
126
|
+
return_error connection_id, 'room is full' unless members
|
127
|
+
|
128
|
+
update_status_without_notifying_peers(connection_id, status){
|
129
|
+
if members.empty?
|
130
|
+
send_joined_room(connection_id, [])
|
131
|
+
else
|
132
|
+
get_statuses_for_members(members) do |members_with_statuses|
|
133
|
+
send_joined_room(connection_id, members_with_statuses)
|
134
|
+
end
|
135
|
+
end
|
136
|
+
}
|
137
|
+
}
|
138
|
+
end
|
139
|
+
end
|
140
|
+
|
141
|
+
def script_join_room(connection_id, room_id, status, &block)
|
142
|
+
@redis.eval \
|
143
|
+
SCRIPT_JOIN_ROOM,
|
144
|
+
4,
|
145
|
+
"store:room:members:#{room_id}",
|
146
|
+
"store:room:peak_members:#{room_id}",
|
147
|
+
"store:connection:joined:#{connection_id}",
|
148
|
+
"store:connection:room:#{connection_id}",
|
149
|
+
connection_id,
|
150
|
+
PAYLOAD_NEW_PEER[connection_id, status],
|
151
|
+
Time.now.getutc.to_i,
|
152
|
+
room_id,
|
153
|
+
&block
|
154
|
+
end
|
155
|
+
private :script_join_room
|
156
|
+
|
157
|
+
def get_statuses_for_members(members)
|
158
|
+
member_count = members.size
|
159
|
+
members_with_statuses = []
|
160
|
+
members.each { |peer_id|
|
161
|
+
@redis.hgetall("store:connection:status:#{peer_id}") do |status_array|
|
162
|
+
members_with_statuses << { peer_id: peer_id, status: Hash[status_array.each_slice(2).to_a] }
|
163
|
+
yield members_with_statuses if members_with_statuses.size == member_count
|
164
|
+
end
|
165
|
+
}
|
166
|
+
end
|
167
|
+
private :get_statuses_for_members
|
168
|
+
|
169
|
+
def send_joined_room(connection_id, members_with_statuses)
|
170
|
+
@connections[connection_id].send_text({
|
171
|
+
event: 'joined_room',
|
172
|
+
own_id: connection_id,
|
173
|
+
peers: members_with_statuses,
|
174
|
+
}.to_json)
|
175
|
+
end
|
176
|
+
private :send_joined_room
|
177
|
+
|
178
|
+
def leave_room(connection_id)
|
179
|
+
@redis.get("store:connection:room:#{connection_id}") do |room_id|
|
180
|
+
next unless room_id # return_error connection_id, 'currently not in any room'
|
181
|
+
|
182
|
+
info "#{connection_id} leaves ##{room_id[0..10]}..."
|
183
|
+
script_leave_room(connection_id, room_id)
|
184
|
+
end
|
185
|
+
end
|
186
|
+
|
187
|
+
def script_leave_room(connection_id, room_id, &block)
|
188
|
+
now = Time.now.getutc.to_i
|
189
|
+
hour = now - now % (60 * 60)
|
190
|
+
|
191
|
+
@redis.eval \
|
192
|
+
SCRIPT_LEAVE_ROOM,
|
193
|
+
7,
|
194
|
+
"store:room:members:#{room_id}",
|
195
|
+
"store:room:peak_members:#{room_id}",
|
196
|
+
"store:connection:joined:#{connection_id}",
|
197
|
+
"store:connection:room:#{connection_id}",
|
198
|
+
"store:connection:status:#{connection_id}",
|
199
|
+
"store:stats:room_peaks:#{hour}",
|
200
|
+
"store:stats:connection_time:#{hour}",
|
201
|
+
connection_id,
|
202
|
+
PAYLOAD_PEER_LEFT[connection_id],
|
203
|
+
now,
|
204
|
+
&block
|
205
|
+
end
|
206
|
+
private :script_leave_room
|
207
|
+
|
208
|
+
def update_status(connection_id, input_status)
|
209
|
+
@redis.get("store:connection:room:#{connection_id}") do |room_id|
|
210
|
+
return_error connection_id, 'currently not in any room' unless room_id
|
211
|
+
|
212
|
+
update_status_without_notifying_peers(connection_id, input_status){
|
213
|
+
@redis.smembers("store:room:members:#{room_id}") do |members|
|
214
|
+
members.each { |peer_id|
|
215
|
+
@publisher.publish "ps:connection:#{peer_id}", {
|
216
|
+
event: 'peer_updated_status',
|
217
|
+
status: input_status,
|
218
|
+
sender_id: connection_id,
|
219
|
+
}.to_json
|
220
|
+
}
|
221
|
+
end
|
222
|
+
}
|
223
|
+
end
|
224
|
+
end
|
225
|
+
|
226
|
+
def send_to_peer(connection_id, peer_id, data)
|
227
|
+
unless data.instance_of? Hash
|
228
|
+
return_error connection_id, "cannot send raw data"
|
229
|
+
end
|
230
|
+
|
231
|
+
@redis.get("store:connection:room:#{connection_id}") do |room_id|
|
232
|
+
return_error connection_id, 'currently not in any room' unless room_id
|
233
|
+
|
234
|
+
@redis.sismember("store:room:members:#{room_id}", peer_id) do |is_member|
|
235
|
+
return_error connection_id, 'unknown peer' if is_member.nil? || is_member.zero?
|
236
|
+
|
237
|
+
unless %w[offer answer ice_candidate].include? data['event']
|
238
|
+
return_error connection_id, 'event not allowed'
|
239
|
+
end
|
240
|
+
|
241
|
+
@publisher.publish "ps:connection:#{peer_id}", (data || {}).merge("sender_id" => connection_id).to_json
|
242
|
+
end
|
243
|
+
end
|
244
|
+
end
|
245
|
+
|
246
|
+
def announce_shutdown(seconds = 0)
|
247
|
+
warn "Announcing shutdown in #{seconds} seconds"
|
248
|
+
@connections.sockets.each { |ws|
|
249
|
+
ws.send_text({
|
250
|
+
event: 'shutdown',
|
251
|
+
seconds: seconds,
|
252
|
+
}.to_json)
|
253
|
+
}
|
254
|
+
end
|
255
|
+
|
256
|
+
def shutdown!(seconds = 0)
|
257
|
+
sleep(seconds)
|
258
|
+
@connections.dup.sockets.each{ |ws| ws.close(4200) } # TODO double check this one
|
259
|
+
end
|
260
|
+
|
261
|
+
|
262
|
+
private
|
263
|
+
|
264
|
+
|
265
|
+
# TODO shorten
|
266
|
+
def update_status_without_notifying_peers(connection_id, input_status, &block)
|
267
|
+
if !input_status
|
268
|
+
block.call
|
269
|
+
return false
|
270
|
+
end
|
271
|
+
|
272
|
+
status = {}
|
273
|
+
|
274
|
+
if input_status['name']
|
275
|
+
if !input_status['name'] || input_status['name'] =~ /\A\s*\z/
|
276
|
+
return_error connection_id, 'blank name not allowed'
|
277
|
+
end
|
278
|
+
|
279
|
+
if input_status['name'].size > 50
|
280
|
+
return_error connection_id, 'name too long'
|
281
|
+
end
|
282
|
+
|
283
|
+
begin
|
284
|
+
valid_encoding = input_status['name'] =~ /\A\p{ASCII}+\z/
|
285
|
+
rescue Encoding::CompatibilityError
|
286
|
+
valid_encoding = false
|
287
|
+
end
|
288
|
+
|
289
|
+
if !valid_encoding
|
290
|
+
input_status['name'] = '*' * input_status['name'].size
|
291
|
+
end
|
292
|
+
|
293
|
+
status['name'] = input_status['name']
|
294
|
+
end
|
295
|
+
|
296
|
+
if input_status['user_agent']
|
297
|
+
unless %w[firefox chrome unknown].include? input_status['user_agent']
|
298
|
+
return_error connection_id, 'unknown user agent'
|
299
|
+
end
|
300
|
+
|
301
|
+
status['user_agent'] = input_status['user_agent']
|
302
|
+
end
|
303
|
+
|
304
|
+
unless status.empty?
|
305
|
+
@redis.hmset "store:connection:status:#{connection_id}", *status.to_a.flatten, &block
|
306
|
+
true
|
307
|
+
end
|
308
|
+
end
|
309
|
+
|
310
|
+
end
|
311
|
+
end
|