seapig-server 0.1.3 → 0.1.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/MIT-LICENSE +1 -1
- data/README.rdoc +19 -24
- data/bin/seapig-server +24 -20
- data/bin/seapig-server-intro +757 -0
- data/lib/seapig/version.rb +1 -1
- metadata +3 -2
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 02ea92f8574a95efe7cbb84a4273a3f333fa0104
|
4
|
+
data.tar.gz: d7d0b375e2b6d801387c14420eebc938f15ad2ab
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: ff15f55ebfcce98672f6a820a54c254c1f4d6b5c3055fb4fd50975bec7021291d4fde0535edd1d6b84ee154725c3d315261243a543f9cc91b4c98ce4c118ff72
|
7
|
+
data.tar.gz: 17650ede8b9698155b93a1699ee61b9f597d805602c77eda65af2c0428d0e7eace0803c02b41e6af4f08292e92f3588fb9883093a1cfafa5a62c49eefe226dc9
|
data/MIT-LICENSE
CHANGED
data/README.rdoc
CHANGED
@@ -1,36 +1,31 @@
|
|
1
1
|
= Seapig
|
2
2
|
|
3
|
-
|
3
|
+
Seapig is a websocket-based master-slave json object replication/synchronisation system.
|
4
|
+
|
5
|
+
No docs exist yet.
|
4
6
|
|
5
7
|
To cover:
|
6
8
|
|
7
|
-
* what are seapigs and why they exist? (why not just having a
|
9
|
+
* what are seapigs and why they exist? (why not just having a stream?)
|
8
10
|
* link to https://www.youtube.com/watch?v=_y4DbZivHCY
|
9
|
-
* properties of seapig system
|
10
|
-
*
|
11
|
-
* only diffs are sent
|
12
|
-
*
|
13
|
-
*
|
11
|
+
* properties of a seapig system
|
12
|
+
* objects are cached in memory
|
13
|
+
* only diffs are sent over websocket when objects change
|
14
|
+
* seapig handles re-synchronization of objects on link loss
|
15
|
+
* objects are generated in separate processes
|
16
|
+
* objects are regenerated on dependency change (e.g. immediately on db change)
|
14
17
|
* describe current limits
|
15
|
-
*
|
16
|
-
*
|
17
|
-
*
|
18
|
-
*
|
19
|
-
* postgres only (but is that really a problem? ;))
|
18
|
+
* there is no rate-limiting
|
19
|
+
* object is dropped from cache when last client un-listens
|
20
|
+
* seapig-client: no timeout detection
|
21
|
+
* seapig-rails: postgres only (but is that really a problem? ;))
|
20
22
|
* graph of server/client side seapig object states
|
21
|
-
* disclaimer on non-efficient code. it's all a draft, a test of idea
|
23
|
+
* disclaimer on non-efficient code. it's all a draft, a test of an idea
|
22
24
|
* rails not needed
|
23
25
|
* stuff will change
|
24
|
-
* seapig
|
26
|
+
* seapig == firebase for apps that need real db
|
25
27
|
* works nicely with mithril
|
26
28
|
* how to use this shit
|
27
|
-
*
|
28
|
-
*
|
29
|
-
|
30
|
-
* bundle exec ruby ../../bin/seapig-worker.rb ws://127.0.0.1:3001/seapig
|
31
|
-
* bundle exec ruby ../../bin/seapig-notifier.rb ws://127.0.0.1:3001/seapig
|
32
|
-
* application.js needs: require seapig/seapig
|
33
|
-
* application.js needs: require json-patch
|
34
|
-
* ActiveRecord models that are used for triggering regeneration of data need:
|
35
|
-
* acts_as_seapig_dependency
|
36
|
-
* and seapig_dependency_changed after commits
|
29
|
+
* write some docs
|
30
|
+
* link to example projects
|
31
|
+
* link to seapig-rails, seapig-client-ruby, seapig-router, etc.
|
data/bin/seapig-server
CHANGED
@@ -112,23 +112,23 @@ module SeapigObjectStore
|
|
112
112
|
|
113
113
|
|
114
114
|
def self.consumer_unregister(pattern_or_id, client)
|
115
|
-
raise "Unregister without register" if not @@consumers[pattern_or_id].include?(client)
|
115
|
+
raise "Unregister without register" if (not @@consumers[pattern_or_id]) or (not @@consumers[pattern_or_id].include?(client))
|
116
116
|
@@consumers[pattern_or_id].delete(client)
|
117
117
|
@@consumers.delete(pattern_or_id) if @@consumers[pattern_or_id].size == 0
|
118
118
|
self.matching(pattern_or_id,@@producers.merge(@@objects_by_id)).each { |matching_id|
|
119
119
|
@@objects_by_id[matching_id].consumer_unregister(pattern_or_id, client) if @@objects_by_id[matching_id]
|
120
|
-
self.despawn(@@objects_by_id[matching_id]) if @@objects_by_id[matching_id] and (not @@objects_by_id[matching_id].alive?) and (not @@dependents[
|
120
|
+
self.despawn(@@objects_by_id[matching_id]) if @@objects_by_id[matching_id] and (not @@objects_by_id[matching_id].alive?) and (not @@dependents[matching_id])
|
121
121
|
}
|
122
122
|
end
|
123
123
|
|
124
124
|
|
125
125
|
def self.producer_unregister(pattern_or_id,client)
|
126
|
-
raise "Unregister without register" if not @@producers[pattern_or_id].include?(client)
|
126
|
+
raise "Unregister without register" if (not @@producers[pattern_or_id]) or (not @@producers[pattern_or_id].include?(client))
|
127
127
|
@@producers[pattern_or_id].delete(client)
|
128
128
|
@@producers.delete(pattern_or_id) if @@producers[pattern_or_id].size == 0
|
129
129
|
self.matching(pattern_or_id,@@consumers.merge(@@dependents)).each { |matching_id|
|
130
130
|
@@objects_by_id[matching_id].producer_unregister(pattern_or_id, client) if @@objects_by_id[matching_id]
|
131
|
-
self.despawn(@@objects_by_id[matching_id]) if @@objects_by_id[matching_id] and (not @@objects_by_id[matching_id].alive?) and (not @@dependents[
|
131
|
+
self.despawn(@@objects_by_id[matching_id]) if @@objects_by_id[matching_id] and (not @@objects_by_id[matching_id].alive?) and (not @@dependents[matching_id])
|
132
132
|
}
|
133
133
|
end
|
134
134
|
|
@@ -481,6 +481,11 @@ class Client
|
|
481
481
|
end
|
482
482
|
|
483
483
|
|
484
|
+
def inspect
|
485
|
+
id
|
486
|
+
end
|
487
|
+
|
488
|
+
|
484
489
|
def destroy
|
485
490
|
puts "Client disconnected:\n "+@index.to_s if DEBUG
|
486
491
|
@@clients_by_socket.delete(@socket)
|
@@ -637,22 +642,6 @@ class Client
|
|
637
642
|
end
|
638
643
|
|
639
644
|
|
640
|
-
class InternalClient
|
641
|
-
|
642
|
-
def self.produce
|
643
|
-
end
|
644
|
-
|
645
|
-
def initialize
|
646
|
-
SeapigObjectStore.producer_register("SeapigServer::Objects", self)
|
647
|
-
end
|
648
|
-
|
649
|
-
def object_produce(object_id, object_version)
|
650
|
-
objects =
|
651
|
-
SeapigObjectStore.version_set(object_id,new_version,objects,object_version)
|
652
|
-
end
|
653
|
-
|
654
|
-
end
|
655
|
-
|
656
645
|
|
657
646
|
#TODO:
|
658
647
|
# * change protocol to use "pattern" instead of "id"
|
@@ -734,4 +723,19 @@ EM.run {
|
|
734
723
|
}
|
735
724
|
|
736
725
|
|
726
|
+
close_reader, close_writer = IO.pipe
|
727
|
+
|
728
|
+
EM.watch(close_reader) { |connection|
|
729
|
+
connection.notify_readable = true
|
730
|
+
connection.define_singleton_method(:notify_readable) do
|
731
|
+
puts "Shutting down" if INFO or DEBUG
|
732
|
+
exit
|
733
|
+
end
|
734
|
+
}
|
735
|
+
|
736
|
+
Signal.trap("INT") {
|
737
|
+
puts "SIGINT received, scheduling exit." if DEBUG
|
738
|
+
close_writer.write('.')
|
739
|
+
}
|
740
|
+
|
737
741
|
}
|
@@ -0,0 +1,757 @@
|
|
1
|
+
#!/bin/env ruby
|
2
|
+
# coding: utf-8
|
3
|
+
require 'websocket-eventmachine-server'
|
4
|
+
require 'narray'
|
5
|
+
require 'oj'
|
6
|
+
require 'jsondiff'
|
7
|
+
require 'hana'
|
8
|
+
require 'set'
|
9
|
+
|
10
|
+
|
11
|
+
DEBUG = (ARGV[0] == "debug")
|
12
|
+
INFO = (DEBUG or ARGV[0] == "info")
|
13
|
+
HOST = (ARGV[1] or "127.0.0.1").split(":")[0]
|
14
|
+
PORT = ((ARGV[1] or '').split(':')[1] or "3001").to_i
|
15
|
+
|
16
|
+
OBJECT_CACHE_SIZE = 1
|
17
|
+
|
18
|
+
$stdout.sync = true
|
19
|
+
|
20
|
+
Oj.default_options = { mode: :compat }
|
21
|
+
|
22
|
+
|
23
|
+
module WebSocket
|
24
|
+
module Frame
|
25
|
+
class Data < String
|
26
|
+
def getbytes(start_index, count)
|
27
|
+
data = self[start_index, count]
|
28
|
+
if @masking_key
|
29
|
+
payload_na = NArray.to_na(data,"byte")
|
30
|
+
mask_na = NArray.to_na((@masking_key.pack("C*")*((data.size/4) + 1))[0...data.size],"byte")
|
31
|
+
data = (mask_na ^ payload_na).to_s
|
32
|
+
end
|
33
|
+
data
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
|
40
|
+
|
41
|
+
class String
|
42
|
+
|
43
|
+
def starexp
|
44
|
+
Regexp.new(Regexp.escape(self).gsub('\*','.*?'))
|
45
|
+
end
|
46
|
+
|
47
|
+
|
48
|
+
def starexp?
|
49
|
+
self.include?('*')
|
50
|
+
end
|
51
|
+
|
52
|
+
end
|
53
|
+
|
54
|
+
|
55
|
+
Signal.trap("USR1") {
|
56
|
+
t1 = Time.new; GC.start ;d = Time.new - t1
|
57
|
+
puts "Long GC run:\n %.3fs"%(d) if DEBUG and d > 0.05
|
58
|
+
}
|
59
|
+
|
60
|
+
|
61
|
+
#
|
62
|
+
# Code is layered, with each layer only communicating with neighbouring layers (e.g. object store never directly talks to em or sockets).
|
63
|
+
#
|
64
|
+
# Object Store is a singleton responsible for:
|
65
|
+
# * managing objects' lifetime
|
66
|
+
# * dependency tracking and triggering object rebuilds (aka. production)
|
67
|
+
# * tracking of available producers and consumers
|
68
|
+
#
|
69
|
+
# Client class is responsible for:
|
70
|
+
# * keeping track of clients and their state
|
71
|
+
# * keeping network communication efficient (diffing)
|
72
|
+
#
|
73
|
+
# Eventmachine main loop is a:
|
74
|
+
# * router between physical world and Client class / instances
|
75
|
+
#
|
76
|
+
|
77
|
+
|
78
|
+
module SeapigObjectStore
|
79
|
+
|
80
|
+
@@objects_by_id = {} # {id => object}; stores all existing SeapigObjects
|
81
|
+
|
82
|
+
@@producers = {} # {pattern_or_id => {client}}; for assessing spawning possibility
|
83
|
+
@@consumers = {} # {pattern_or_id => {client}}; for assessing spawning need, for assessing holding need
|
84
|
+
|
85
|
+
@@dependents = {} # {id_depended_on => {id_depending}}; for assessing spawning need, for assessing holding need, for assessing reproduction need
|
86
|
+
@@dependencies = {} # {id_depending => {id_depended_on}}; for updating dependents
|
87
|
+
|
88
|
+
@@queue = [] # [object]; objects in need of production
|
89
|
+
@@producing = {} # {client => object}; for assessing client busy status
|
90
|
+
@@produced = {} # {id_being_produced => {version}}; for assessing enqueuing/dequeuing need
|
91
|
+
|
92
|
+
|
93
|
+
def self.consumer_register(pattern_or_id, client)
|
94
|
+
@@consumers[pattern_or_id] = Set.new if not @@consumers[pattern_or_id]
|
95
|
+
@@consumers[pattern_or_id].add(client)
|
96
|
+
self.matching(pattern_or_id, @@producers.merge(@@objects_by_id)).each { |matching_id|
|
97
|
+
@@objects_by_id[matching_id].consumer_register(pattern_or_id, client) if @@objects_by_id[matching_id]
|
98
|
+
self.spawn(matching_id) if not @@objects_by_id[matching_id]
|
99
|
+
}
|
100
|
+
end
|
101
|
+
|
102
|
+
|
103
|
+
def self.producer_register(pattern_or_id, client)
|
104
|
+
@@producers[pattern_or_id] = Set.new if not @@producers[pattern_or_id]
|
105
|
+
@@producers[pattern_or_id].add(client)
|
106
|
+
self.matching(pattern_or_id, @@consumers.merge(@@dependents)).each { |matching_id|
|
107
|
+
@@objects_by_id[matching_id].producer_register(pattern_or_id, client) if @@objects_by_id[matching_id]
|
108
|
+
self.spawn(matching_id) if not @@objects_by_id[matching_id]
|
109
|
+
}
|
110
|
+
self.dequeue(client,nil) if not @@producing[client]
|
111
|
+
end
|
112
|
+
|
113
|
+
|
114
|
+
def self.consumer_unregister(pattern_or_id, client)
|
115
|
+
raise "Unregister without register" if (not @@consumers[pattern_or_id]) or (not @@consumers[pattern_or_id].include?(client))
|
116
|
+
@@consumers[pattern_or_id].delete(client)
|
117
|
+
@@consumers.delete(pattern_or_id) if @@consumers[pattern_or_id].size == 0
|
118
|
+
self.matching(pattern_or_id,@@producers.merge(@@objects_by_id)).each { |matching_id|
|
119
|
+
@@objects_by_id[matching_id].consumer_unregister(pattern_or_id, client) if @@objects_by_id[matching_id]
|
120
|
+
self.despawn(@@objects_by_id[matching_id]) if @@objects_by_id[matching_id] and (not @@objects_by_id[matching_id].alive?) and (not @@dependents[matching_id])
|
121
|
+
}
|
122
|
+
end
|
123
|
+
|
124
|
+
|
125
|
+
def self.producer_unregister(pattern_or_id,client)
|
126
|
+
raise "Unregister without register" if (not @@producers[pattern_or_id]) or (not @@producers[pattern_or_id].include?(client))
|
127
|
+
@@producers[pattern_or_id].delete(client)
|
128
|
+
@@producers.delete(pattern_or_id) if @@producers[pattern_or_id].size == 0
|
129
|
+
self.matching(pattern_or_id,@@consumers.merge(@@dependents)).each { |matching_id|
|
130
|
+
@@objects_by_id[matching_id].producer_unregister(pattern_or_id, client) if @@objects_by_id[matching_id]
|
131
|
+
self.despawn(@@objects_by_id[matching_id]) if @@objects_by_id[matching_id] and (not @@objects_by_id[matching_id].alive?) and (not @@dependents[matching_id])
|
132
|
+
}
|
133
|
+
end
|
134
|
+
|
135
|
+
|
136
|
+
def self.version_get(client,id,version)
|
137
|
+
raise "version_get called on starexp, that doesn't make sense" if id.starexp?
|
138
|
+
return [0,{}] if not @@objects_by_id.has_key?(id)
|
139
|
+
@@objects_by_id[id].version_get(version)
|
140
|
+
end
|
141
|
+
|
142
|
+
|
143
|
+
# data can be one of:
|
144
|
+
# - Hash => given version corresponds to given data
|
145
|
+
# - false => given version has no data (aka. stall)
|
146
|
+
# - true => given version exists (data unknown)
|
147
|
+
# - nil => given version could not be generated (data unknown)
|
148
|
+
def self.version_set(client,id,version,data,requested_version)
|
149
|
+
raise "Update of pattern doesn't make sense" if id.starexp?
|
150
|
+
|
151
|
+
if requested_version != false
|
152
|
+
raise "client not in @@producing" if not @@producing[client]
|
153
|
+
raise "requested_version (%s) not in @@produced[id] (%s)"%[requested_version.inspect,@@produced[id].inspect] if not @@produced[id].include?(requested_version)
|
154
|
+
@@producing.delete(client)
|
155
|
+
@@produced[id].delete(requested_version) # also on disconnection / unproducer / test
|
156
|
+
@@produced.delete(id) if @@produced[id].size == 0
|
157
|
+
end
|
158
|
+
|
159
|
+
if @@objects_by_id.has_key?(id) or @@dependents[id] or @@consumers.keys.find { |pattern| id =~ pattern.starexp }
|
160
|
+
object = (@@objects_by_id[id] or self.spawn(id))
|
161
|
+
accepted = object.version_set(data, version, requested_version)
|
162
|
+
if accepted
|
163
|
+
puts "Version accepted" if DEBUG
|
164
|
+
(@@dependents[id] or Set.new).each { |dependent_id|
|
165
|
+
raise if not @@objects_by_id.has_key?(dependent_id)
|
166
|
+
next if not (dependent = @@objects_by_id[dependent_id])
|
167
|
+
if dependent.version_needed and dependent.version_needed[id] and version.kind_of?(Integer) and dependent.version_needed[id].kind_of?(Integer) and dependent.version_needed[id] < version
|
168
|
+
dependent.version_needed[id] = version
|
169
|
+
enqueue(dependent)
|
170
|
+
end
|
171
|
+
}
|
172
|
+
if version.kind_of? Hash
|
173
|
+
object.version_needed = {} if (not object.version_needed) or object.version_needed.kind_of?(Integer)
|
174
|
+
old_dependencies = (@@dependencies[id] or Set.new)
|
175
|
+
new_dependencies = (@@dependencies[id] = Set.new(version.keys))
|
176
|
+
(new_dependencies - old_dependencies).each { |added_dependency|
|
177
|
+
object.version_needed[added_dependency] = SeapigObject.version_newer((@@objects_by_id[added_dependency] ? @@objects_by_id[added_dependency].version_latest : 0), (version[added_dependency] or 0))
|
178
|
+
dependent_add(added_dependency, object.id)
|
179
|
+
}
|
180
|
+
(old_dependencies & new_dependencies).each { |kept_dependency|
|
181
|
+
object.version_needed[kept_dependency] = SeapigObject.version_newer((@@objects_by_id[kept_dependency] ? @@objects_by_id[kept_dependency].version_latest : 0), (version[kept_dependency] or 0))
|
182
|
+
}
|
183
|
+
(old_dependencies - new_dependencies).each { |removed_dependency|
|
184
|
+
object.version_needed.delete(removed_dependency)
|
185
|
+
dependent_remove(removed_dependency, object.id)
|
186
|
+
}
|
187
|
+
else
|
188
|
+
object.version_needed = version
|
189
|
+
end
|
190
|
+
end
|
191
|
+
enqueue(object)
|
192
|
+
end
|
193
|
+
|
194
|
+
dequeue(client,nil) if requested_version != false and not @@producing[client]
|
195
|
+
end
|
196
|
+
|
197
|
+
|
198
|
+
def self.cache_get(object_id, key)
|
199
|
+
return nil if not @@objects_by_id.has_key?(object_id)
|
200
|
+
@@objects_by_id[object_id].cache_get(key)
|
201
|
+
end
|
202
|
+
|
203
|
+
|
204
|
+
def self.cache_set(object_id, key, value)
|
205
|
+
return value if not @@objects_by_id.has_key?(object_id)
|
206
|
+
@@objects_by_id[object_id].cache_set(key, value)
|
207
|
+
value
|
208
|
+
end
|
209
|
+
|
210
|
+
|
211
|
+
private
|
212
|
+
|
213
|
+
|
214
|
+
class SeapigObject
|
215
|
+
|
216
|
+
attr_reader :id, :versions, :direct_producers, :wildcard_producers
|
217
|
+
attr_accessor :version_needed
|
218
|
+
|
219
|
+
def initialize(id)
|
220
|
+
@id = id
|
221
|
+
@versions = [ [0, {}] ]
|
222
|
+
@direct_consumers = Set.new
|
223
|
+
@wildcard_consumers = {}
|
224
|
+
@direct_producers = Set.new
|
225
|
+
@wildcard_producers = {}
|
226
|
+
@version_needed = nil
|
227
|
+
@cache = []
|
228
|
+
end
|
229
|
+
|
230
|
+
|
231
|
+
def destroy
|
232
|
+
@wildcard_consumers.keys.each { |client|
|
233
|
+
client.object_destroy(@id)
|
234
|
+
}
|
235
|
+
end
|
236
|
+
|
237
|
+
|
238
|
+
def version_get(object_version)
|
239
|
+
@versions.assoc(object_version) or [0,{}]
|
240
|
+
end
|
241
|
+
|
242
|
+
|
243
|
+
def version_set(data,version,requested_version)
|
244
|
+
return false if data == nil
|
245
|
+
return false if not SeapigObject.version_newer?(version_latest, version)
|
246
|
+
@version_needed = version if data == true and ((not @version_needed) or SeapigObject.version_newer?(@version_needed, version))
|
247
|
+
return false if data == true
|
248
|
+
@versions << [version,data]
|
249
|
+
(Set.new(@wildcard_consumers.keys)+@direct_consumers).each { |client| client.object_update(@id, version, data) } if data
|
250
|
+
versions_with_valid_data = 0
|
251
|
+
discard_below = @versions.size - 1
|
252
|
+
while discard_below > 0 and versions_with_valid_data < 1
|
253
|
+
versions_with_valid_data += 1 if @versions[discard_below][1]
|
254
|
+
discard_below -= 1
|
255
|
+
end
|
256
|
+
discard_below.times { @versions.shift }
|
257
|
+
true
|
258
|
+
end
|
259
|
+
|
260
|
+
|
261
|
+
def version_latest
|
262
|
+
return nil if not @versions[-1]
|
263
|
+
@versions[-1][0]
|
264
|
+
end
|
265
|
+
|
266
|
+
|
267
|
+
def self.version_newer?(latest,vb)
|
268
|
+
# return true if latest.nil? and (not vb.nil?)
|
269
|
+
# return false if (not latest.nil?) and vb.nil?
|
270
|
+
return latest < vb if (not latest.kind_of?(Hash)) and (not vb.kind_of?(Hash))
|
271
|
+
return true if (not latest.kind_of?(Hash)) and ( vb.kind_of?(Hash))
|
272
|
+
return false if ( latest.kind_of?(Hash)) and (not vb.kind_of?(Hash))
|
273
|
+
(latest.keys & vb.keys).each { |key|
|
274
|
+
return true if version_newer?(latest[key], vb[key])
|
275
|
+
}
|
276
|
+
return vb.size < latest.size #THINK: is this the right way to go...
|
277
|
+
end
|
278
|
+
|
279
|
+
|
280
|
+
def self.version_newer(va,vb)
|
281
|
+
version_newer?(va,vb) ? vb : va
|
282
|
+
end
|
283
|
+
|
284
|
+
|
285
|
+
def consumer_register(pattern,client)
|
286
|
+
return false if ((not pattern.starexp?) and @direct_consumers.include?(client)) or (pattern.starexp? and @wildcard_consumers[client] and @wildcard_consumers[client].include?(pattern))
|
287
|
+
if pattern.starexp?
|
288
|
+
(@wildcard_consumers[client] ||= Set.new).add(pattern)
|
289
|
+
else
|
290
|
+
@direct_consumers.add(client)
|
291
|
+
end
|
292
|
+
latest_known_version, latest_known_data = @versions.reverse.find { |version,data| data }
|
293
|
+
(Set.new(@wildcard_consumers.keys)+@direct_consumers).each { |client| client.object_update(@id, latest_known_version, latest_known_data) }
|
294
|
+
end
|
295
|
+
|
296
|
+
|
297
|
+
def producer_register(pattern,client)
|
298
|
+
return false if ((not pattern.starexp?) and @direct_producers.include?(client)) or (pattern.starexp? and @wildcard_producers[client] and @wildcard_producers[client].include?(pattern))
|
299
|
+
if pattern.starexp?
|
300
|
+
(@wildcard_producers[client] ||= Set.new).add(pattern)
|
301
|
+
else
|
302
|
+
@direct_producers.add(client)
|
303
|
+
end
|
304
|
+
end
|
305
|
+
|
306
|
+
|
307
|
+
def consumer_unregister(pattern,client)
|
308
|
+
raise "Unregister without register" if (not @direct_consumers.include?(client)) and ((not @wildcard_consumers.has_key?(client)) or (not @wildcard_consumers[client].include?(pattern)))
|
309
|
+
if pattern.starexp?
|
310
|
+
@wildcard_consumers[client].delete(pattern)
|
311
|
+
@wildcard_consumers.delete(client) if @wildcard_consumers[client].size == 0
|
312
|
+
else
|
313
|
+
@direct_consumers.delete(client)
|
314
|
+
end
|
315
|
+
end
|
316
|
+
|
317
|
+
|
318
|
+
def producer_unregister(pattern,client)
|
319
|
+
raise "Unregister without register" if (not @direct_producers.include?(client)) and ((not @wildcard_producers.has_key?(client)) or (not @wildcard_producers[client].include?(pattern)))
|
320
|
+
if pattern.starexp?
|
321
|
+
@wildcard_producers[client].delete(pattern)
|
322
|
+
@wildcard_producers.delete(client) if @wildcard_producers[client].size == 0
|
323
|
+
else
|
324
|
+
@direct_producers.delete(client)
|
325
|
+
end
|
326
|
+
end
|
327
|
+
|
328
|
+
|
329
|
+
def cache_get(key)
|
330
|
+
ret = @cache.assoc(key)
|
331
|
+
puts "Cache "+(ret ? "hit" : "miss") if DEBUG
|
332
|
+
ret and ret[1]
|
333
|
+
end
|
334
|
+
|
335
|
+
|
336
|
+
def cache_set(key, value)
|
337
|
+
@cache.delete(old_entry) if old_entry = @cache.assoc(key)
|
338
|
+
@cache << [key,value] if OBJECT_CACHE_SIZE > 0
|
339
|
+
@cache = @cache[-OBJECT_CACHE_SIZE..-1] if @cache.size > OBJECT_CACHE_SIZE
|
340
|
+
end
|
341
|
+
|
342
|
+
|
343
|
+
def alive?
|
344
|
+
(@direct_consumers.size > 0 or (@wildcard_consumers.size > 0 and @direct_producers.size > 0))
|
345
|
+
end
|
346
|
+
|
347
|
+
|
348
|
+
def inspect
|
349
|
+
'<SO:%s:%s:%s:%s:%s:%s:%s>'%[@id, @versions.map { |v| v[0] }.inspect,@direct_producers.map(&:id).inspect,@wildcard_producers.keys.map(&:id).inspect,@direct_consumers.map(&:id).inspect,@wildcard_consumers.keys.map(&:id).inspect,@version_needed.inspect]
|
350
|
+
end
|
351
|
+
|
352
|
+
end
|
353
|
+
|
354
|
+
|
355
|
+
def self.matching(pattern,check_against)
|
356
|
+
if pattern.starexp?
|
357
|
+
check_against.each_key.map { |id|
|
358
|
+
id if (not id.starexp?) and (id =~ pattern.starexp)
|
359
|
+
}.compact
|
360
|
+
else
|
361
|
+
(check_against.each_key.find { |id|
|
362
|
+
(id.starexp? and pattern =~ id.starexp) or ((not id.starexp?) and pattern == id)
|
363
|
+
}) ? [pattern] : []
|
364
|
+
end
|
365
|
+
end
|
366
|
+
|
367
|
+
|
368
|
+
def self.spawn(id)
|
369
|
+
puts "Creating:\n "+id if DEBUG
|
370
|
+
@@objects_by_id[id] = object = SeapigObject.new(id)
|
371
|
+
@@producers.each_pair.map { |pattern,clients| clients.each { |client| object.producer_register(pattern,client) if pattern.starexp? and (id =~ pattern.starexp) or (id == pattern) } }
|
372
|
+
@@consumers.each_pair.map { |pattern,clients| clients.each { |client| object.consumer_register(pattern,client) if pattern.starexp? and (id =~ pattern.starexp) or (id == pattern) } }
|
373
|
+
enqueue(object)
|
374
|
+
object
|
375
|
+
end
|
376
|
+
|
377
|
+
|
378
|
+
def self.despawn(object)
|
379
|
+
puts "Deleting:\n "+object.id if DEBUG
|
380
|
+
raise "Despawning object that should stay alive" if object.alive? or @@dependents[object.id]
|
381
|
+
object.destroy
|
382
|
+
(@@dependencies.delete(object.id) or []).each { |dependency_id|
|
383
|
+
dependent_remove(dependency_id, object.id)
|
384
|
+
}
|
385
|
+
@@objects_by_id.delete(object.id)
|
386
|
+
end
|
387
|
+
|
388
|
+
|
389
|
+
def self.enqueue(object)
|
390
|
+
if object.version_needed and object.version_latest == object.version_needed
|
391
|
+
@@queue.delete(object)
|
392
|
+
else
|
393
|
+
return if @@queue.include?(object) or (@@produced[object.id] and @@produced[object.id].include?(object.version_needed))
|
394
|
+
@@queue << object
|
395
|
+
(Set.new(object.direct_producers) + object.wildcard_producers.keys).find { |client|
|
396
|
+
dequeue(client, object) if not @@producing[client]
|
397
|
+
}
|
398
|
+
end
|
399
|
+
end
|
400
|
+
|
401
|
+
|
402
|
+
def self.dequeue(client,object)
|
403
|
+
object = @@queue.find { |candidate_object| candidate_object.direct_producers.include?(client) or candidate_object.wildcard_producers.has_key?(client) } if not object
|
404
|
+
return false if not @@queue.include?(object)
|
405
|
+
version_snapshot = (object.version_needed == nil ? nil : (object.version_needed.kind_of?(Fixnum) ? object.version_needed : object.version_needed.clone))
|
406
|
+
client.object_produce(object.id, version_snapshot)
|
407
|
+
@@queue.delete(object)
|
408
|
+
@@producing[client] = object
|
409
|
+
(@@produced[object.id] ||= Set.new) << version_snapshot
|
410
|
+
end
|
411
|
+
|
412
|
+
|
413
|
+
def self.dependent_add(id, dependent)
|
414
|
+
@@dependents[id] = Set.new if not @@dependents[id]
|
415
|
+
@@dependents[id] << dependent
|
416
|
+
self.matching(id, @@producers).each { |matching_id|
|
417
|
+
self.spawn(matching_id) if not @@objects_by_id[matching_id]
|
418
|
+
}
|
419
|
+
end
|
420
|
+
|
421
|
+
|
422
|
+
def self.dependent_remove(id, dependent)
|
423
|
+
@@dependents[id].delete(dependent)
|
424
|
+
@@dependents.delete(id) if @@dependents[id].size == 0
|
425
|
+
self.despawn(@@objects_by_id[id]) if @@objects_by_id.include?(id) and (not @@objects_by_id[id].alive?) and (not @@dependents[id])
|
426
|
+
end
|
427
|
+
|
428
|
+
|
429
|
+
def self.pp
|
430
|
+
[
|
431
|
+
"Objects:", @@objects_by_id.values.map { |object| " %s"%[object.inspect] }.join("\n"),
|
432
|
+
"Queue:", @@queue.map { |object| " %s"%[object.inspect] }.join("\n"),
|
433
|
+
"Producing:", @@producing.map { |client,object| " %s - %s"%[client.id,object.id] }.join("\n"),
|
434
|
+
"Produced:", @@produced.map { |object,versions| " %s - %s"%[object,versions.inspect] }.join("\n")
|
435
|
+
].select { |str| str.size > 0 }.join("\n")+"\n"
|
436
|
+
end
|
437
|
+
|
438
|
+
|
439
|
+
|
440
|
+
|
441
|
+
|
442
|
+
|
443
|
+
end
|
444
|
+
|
445
|
+
|
446
|
+
|
447
|
+
#TODO:
|
448
|
+
# * Refactor to have ClientSpace class/module with Clients inside
|
449
|
+
|
450
|
+
|
451
|
+
class Client
|
452
|
+
|
453
|
+
attr_reader :produces, :consumes, :socket, :producing, :index, :pong_time
|
454
|
+
attr_accessor :options
|
455
|
+
|
456
|
+
@@clients_by_socket = {}
|
457
|
+
@@count = 0
|
458
|
+
|
459
|
+
|
460
|
+
def self.[](socket)
|
461
|
+
@@clients_by_socket[socket]
|
462
|
+
end
|
463
|
+
|
464
|
+
|
465
|
+
def initialize(socket)
|
466
|
+
@index = @@count += 1
|
467
|
+
puts "Client connected:\n "+@index.to_s if DEBUG
|
468
|
+
@socket = socket
|
469
|
+
@options = {}
|
470
|
+
@produces = Set.new
|
471
|
+
@consumes = Set.new
|
472
|
+
@versions = {}
|
473
|
+
@producing = nil
|
474
|
+
@@clients_by_socket[socket] = self
|
475
|
+
self.pong
|
476
|
+
end
|
477
|
+
|
478
|
+
|
479
|
+
def id
|
480
|
+
(@options['name'] or "") + ':' + @index.to_s
|
481
|
+
end
|
482
|
+
|
483
|
+
|
484
|
+
def inspect
|
485
|
+
id
|
486
|
+
end
|
487
|
+
|
488
|
+
|
489
|
+
def destroy
|
490
|
+
puts "Client disconnected:\n "+@index.to_s if DEBUG
|
491
|
+
@@clients_by_socket.delete(@socket)
|
492
|
+
@produces.each { |pattern| SeapigObjectStore.producer_unregister(pattern,self) }
|
493
|
+
@consumes.each { |pattern| SeapigObjectStore.consumer_unregister(pattern,self) }
|
494
|
+
producing = @producing
|
495
|
+
@producing = nil
|
496
|
+
SeapigObjectStore.version_set(self,producing[0],nil,nil,producing[1]) if producing
|
497
|
+
end
|
498
|
+
|
499
|
+
|
500
|
+
def producer_register(pattern, known_version)
|
501
|
+
@produces.add(pattern)
|
502
|
+
SeapigObjectStore.producer_register(pattern, self)
|
503
|
+
SeapigObjectStore.version_set(self, pattern, known_version, true, false) if known_version and not pattern.starexp?
|
504
|
+
end
|
505
|
+
|
506
|
+
|
507
|
+
def producer_unregister(pattern)
|
508
|
+
@produces.delete(pattern)
|
509
|
+
SeapigObjectStore.producer_unregister(pattern, self)
|
510
|
+
if @producing and (pattern.starexp? ? (@producing[0] =~ pattern.starexp) : (@producing[0] == pattern)) #NOTE: overlaping production patterns are not supported
|
511
|
+
producing = @producing
|
512
|
+
@producing = nil
|
513
|
+
SeapigObjectStore.version_set(self,producing[0],nil,nil,producing[1])
|
514
|
+
end
|
515
|
+
end
|
516
|
+
|
517
|
+
|
518
|
+
def consumer_register(pattern, known_version)
|
519
|
+
@consumes.add(pattern)
|
520
|
+
@versions[pattern] = known_version if not pattern.starexp?
|
521
|
+
SeapigObjectStore.consumer_register(pattern, self)
|
522
|
+
gc_versions
|
523
|
+
end
|
524
|
+
|
525
|
+
|
526
|
+
def consumer_unregister(pattern)
|
527
|
+
@consumes.delete(pattern)
|
528
|
+
SeapigObjectStore.consumer_unregister(pattern, self)
|
529
|
+
gc_versions
|
530
|
+
end
|
531
|
+
|
532
|
+
|
533
|
+
def gc_versions
|
534
|
+
@versions.keys.each { |object_id|
|
535
|
+
@versions.delete(object_id) if not (@consumes).find { |pattern|
|
536
|
+
pattern.starexp? and (object_id =~ pattern.starexp) or (pattern == object_id)
|
537
|
+
}
|
538
|
+
}
|
539
|
+
end
|
540
|
+
|
541
|
+
|
542
|
+
def object_update(object_id, object_version, object_data)
|
543
|
+
#THINK: should we propagate stalls to clients?
|
544
|
+
return if object_version == 0 or object_version == @versions[object_id]
|
545
|
+
old_version, old_data = SeapigObjectStore.version_get(self,object_id,(@versions[object_id] or 0))
|
546
|
+
data = if old_version == 0
|
547
|
+
{ "value" => object_data }
|
548
|
+
else
|
549
|
+
diff = SeapigObjectStore.cache_get(object_id,[:diff,old_version,object_version])
|
550
|
+
diff = SeapigObjectStore.cache_set(object_id,[:diff,old_version,object_version],JsonDiff.generate(old_data, object_data)) if not diff
|
551
|
+
{ "patch" => diff }
|
552
|
+
end
|
553
|
+
|
554
|
+
json = Oj.dump({
|
555
|
+
"action" => 'object-update',
|
556
|
+
"id" => object_id,
|
557
|
+
"old_version" => old_version,
|
558
|
+
"new_version" => object_version,
|
559
|
+
}.merge(data))
|
560
|
+
puts "Sending:\n %8iB %s to %s"%[json.size, object_id, id] if DEBUG
|
561
|
+
@versions[object_id] = object_version
|
562
|
+
@socket.send json
|
563
|
+
end
|
564
|
+
|
565
|
+
|
566
|
+
def object_destroy(object_id)
|
567
|
+
@socket.send Oj.dump("action" => 'object-destroy', "id" => object_id)
|
568
|
+
end
|
569
|
+
|
570
|
+
|
571
|
+
def object_patch(object_id, patch, value, from_version, to_version)
|
572
|
+
raise "patching wildcard object. no." if object_id.starexp?
|
573
|
+
requested_object_id, requested_version = @producing
|
574
|
+
if requested_object_id == object_id
|
575
|
+
@producing = nil
|
576
|
+
else
|
577
|
+
requested_version = false
|
578
|
+
end
|
579
|
+
new_version = to_version
|
580
|
+
|
581
|
+
new_data = if patch
|
582
|
+
object_version, object_data = SeapigObjectStore.version_get(self,object_id,from_version)
|
583
|
+
print "Patching:\n version: "+object_version.inspect+"\n from_version: "+from_version.inspect+"\n to_version: "+to_version.inspect+"\n patch_size: "+(patch and patch.size.to_s or "nil")+"\n --> " if DEBUG
|
584
|
+
if from_version == object_version
|
585
|
+
puts 'clean' if DEBUG
|
586
|
+
new_data = Oj.load(Oj.dump(object_data))
|
587
|
+
begin
|
588
|
+
Hana::Patch.new(patch).apply(new_data) if patch
|
589
|
+
rescue Exception => e
|
590
|
+
puts "Patching failed!\n Old object: "+object_data.inspect+"\n Patch: "+patch.inspect if DEBUG
|
591
|
+
raise e
|
592
|
+
end
|
593
|
+
new_data
|
594
|
+
else
|
595
|
+
puts "can't update object, couldn't find base version" if DEBUG
|
596
|
+
nil
|
597
|
+
end
|
598
|
+
elsif value != nil
|
599
|
+
print "Setting:\n version: "+object_version.inspect+"\n from_version: "+from_version.inspect+"\n to_version: "+to_version.inspect+"\n value_size: "+(value.inspect.size.to_s)+"\n" if DEBUG
|
600
|
+
value
|
601
|
+
else
|
602
|
+
nil
|
603
|
+
end
|
604
|
+
|
605
|
+
SeapigObjectStore.version_set(self,object_id,new_version,new_data,requested_version)
|
606
|
+
end
|
607
|
+
|
608
|
+
|
609
|
+
def object_produce(object_id, object_version)
|
610
|
+
raise "Can't produce a wildcard object" if object_id.starexp?
|
611
|
+
raise "Client already producing something (producing: %s, trying to assign: %s)"%[@producing.inspect, [object_id,object_version].inspect] if @producing
|
612
|
+
raise "Can't produce that pattern: "+@produces.inspect+" "+object_id.inspect if not @produces.find { |pattern| object_id =~ pattern.starexp }
|
613
|
+
puts "Assigning:\n "+object_id+':'+object_version.inspect+' to: '+self.id if DEBUG
|
614
|
+
@socket.send Oj.dump("action" => 'object-produce', "id" => object_id, "version"=>object_version)
|
615
|
+
@producing = [object_id, object_version]
|
616
|
+
end
|
617
|
+
|
618
|
+
|
619
|
+
def pong
|
620
|
+
@pong_time = Time.new
|
621
|
+
end
|
622
|
+
|
623
|
+
|
624
|
+
def self.send_pings
|
625
|
+
@@clients_by_socket.keys.each { |socket| socket.ping }
|
626
|
+
end
|
627
|
+
|
628
|
+
|
629
|
+
def self.send_heartbeats
|
630
|
+
@@clients_by_socket.each_pair { |socket,client| socket.send Oj.dump('action' => 'heartbeat') if client.options['heartbeat'] }
|
631
|
+
end
|
632
|
+
|
633
|
+
|
634
|
+
def self.check_ping_timeouts
|
635
|
+
@@clients_by_socket.each_pair { |socket,client| socket.close if Time.new - client.pong_time > 60 }
|
636
|
+
end
|
637
|
+
|
638
|
+
|
639
|
+
def self.pp
|
640
|
+
"Clients:\n"+@@clients_by_socket.values.map { |client| " %-20s produces:%s consumes:%s"%[client.id,client.produces.to_a,client.consumes.to_a] }.join("\n")+"\n"
|
641
|
+
end
|
642
|
+
end
|
643
|
+
|
644
|
+
|
645
|
+
class InternalClient
|
646
|
+
|
647
|
+
def self.produce
|
648
|
+
end
|
649
|
+
|
650
|
+
def initialize
|
651
|
+
SeapigObjectStore.producer_register("SeapigServer::Objects", self)
|
652
|
+
end
|
653
|
+
|
654
|
+
def object_produce(object_id, object_version)
|
655
|
+
objects =
|
656
|
+
SeapigObjectStore.version_set(object_id,new_version,objects,object_version)
|
657
|
+
end
|
658
|
+
|
659
|
+
end
|
660
|
+
|
661
|
+
|
662
|
+
#TODO:
|
663
|
+
# * change protocol to use "pattern" instead of "id"
|
664
|
+
# * change "object-patch" to something nicer
|
665
|
+
|
666
|
+
processing_times = []
|
667
|
+
processing_times_sum = 0
|
668
|
+
|
669
|
+
EM.run {
|
670
|
+
|
671
|
+
|
672
|
+
WebSocket::EventMachine::Server.start(host: HOST, port: PORT) { |client_socket|
|
673
|
+
|
674
|
+
client_socket.onmessage { |message|
|
675
|
+
begin
|
676
|
+
started_at = Time.new
|
677
|
+
client = Client[client_socket]
|
678
|
+
message = Oj.load message
|
679
|
+
puts "-"*80 + ' ' + Time.new.to_s if DEBUG
|
680
|
+
print "Message:\n from: %-20s\n action: %-30s\n param: %-50s "%[client.id, message['action'], Oj.dump(message.select { |k,v| ['pattern','id','options'].include?(k) })] if DEBUG
|
681
|
+
puts if DEBUG
|
682
|
+
object_id = message['id'] if message['id']
|
683
|
+
case message['action']
|
684
|
+
when 'object-producer-register'
|
685
|
+
fail unless message['pattern']
|
686
|
+
client.producer_register(message['pattern'],message['known-version'])
|
687
|
+
when 'object-producer-unregister'
|
688
|
+
fail unless message['pattern']
|
689
|
+
client.producer_unregister(message['pattern'])
|
690
|
+
when 'object-patch'
|
691
|
+
fail unless message['id']
|
692
|
+
client.object_patch(object_id,message['patch'], message['value'], message['old_version'], message['new_version'])
|
693
|
+
when 'object-consumer-register'
|
694
|
+
fail unless message['id']
|
695
|
+
client.consumer_register(object_id,message['known-version'])
|
696
|
+
when 'object-consumer-unregister'
|
697
|
+
fail unless message['id']
|
698
|
+
client.consumer_unregister(object_id)
|
699
|
+
when 'client-options-set'
|
700
|
+
fail unless message['options']
|
701
|
+
client.options = message['options']
|
702
|
+
else
|
703
|
+
raise 'WTF, got message with action: ' + message['action'].inspect
|
704
|
+
end
|
705
|
+
processing_times << (Time.new.to_f - started_at.to_f)
|
706
|
+
processing_times_sum += processing_times[-1]
|
707
|
+
if DEBUG
|
708
|
+
puts Client.pp
|
709
|
+
puts SeapigObjectStore.pp
|
710
|
+
puts "Processing:\n time: %.3fs\n count: %i\n average: %.3fs\n total: %.3fs"%[processing_times[-1], processing_times.size, processing_times_sum / processing_times.size, processing_times_sum]
|
711
|
+
end
|
712
|
+
puts "message:%3i t:%.3fs Σt:%.3fs t̅:%.3fs"%[processing_times.size, processing_times[-1], processing_times_sum, processing_times_sum / processing_times.size,] if INFO and not DEBUG
|
713
|
+
rescue => e
|
714
|
+
puts "Message processing error:\n "
|
715
|
+
p e
|
716
|
+
e.backtrace.each { |line| puts line }
|
717
|
+
raise
|
718
|
+
end
|
719
|
+
}
|
720
|
+
|
721
|
+
|
722
|
+
client_socket.onopen { Client.new(client_socket) }
|
723
|
+
client_socket.onclose { Client[client_socket].destroy if Client[client_socket] }
|
724
|
+
client_socket.onpong { Client[client_socket].pong }
|
725
|
+
}
|
726
|
+
|
727
|
+
puts "Listening on %s:%s"%[HOST,PORT] if INFO or DEBUG
|
728
|
+
Socket.open(:UNIX, :DGRAM) { |s| s.connect(Socket.pack_sockaddr_un(ENV['NOTIFY_SOCKET'])); s.sendmsg "READY=1" } if ENV['NOTIFY_SOCKET']
|
729
|
+
|
730
|
+
EM.add_periodic_timer(10) { Client.send_pings }
|
731
|
+
EM.add_periodic_timer(10) { Client.send_heartbeats }
|
732
|
+
EM.add_periodic_timer(10) { Client.check_ping_timeouts }
|
733
|
+
|
734
|
+
EM.add_periodic_timer(1) {
|
735
|
+
now = Time.new
|
736
|
+
puts "CPU time used: %7.3f%%"%[(processing_times_sum-$last_processing_times_sum)*100.0/(now - $last_cpu_time)] if $last_cpu_time and DEBUG
|
737
|
+
$last_cpu_time = now
|
738
|
+
$last_processing_times_sum = processing_times_sum
|
739
|
+
}
|
740
|
+
|
741
|
+
|
742
|
+
close_reader, close_writer = IO.pipe
|
743
|
+
|
744
|
+
EM.watch(close_reader) { |connection|
|
745
|
+
connection.notify_readable = true
|
746
|
+
connection.define_singleton_method(:notify_readable) do
|
747
|
+
puts "Shutting down" if INFO or DEBUG
|
748
|
+
exit
|
749
|
+
end
|
750
|
+
}
|
751
|
+
|
752
|
+
Signal.trap("INT") {
|
753
|
+
puts "SIGINT received, scheduling exit." if DEBUG
|
754
|
+
close_writer.write('.')
|
755
|
+
}
|
756
|
+
|
757
|
+
}
|
data/lib/seapig/version.rb
CHANGED
metadata
CHANGED
@@ -1,14 +1,14 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: seapig-server
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.1.
|
4
|
+
version: 0.1.4
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- yunta
|
8
8
|
autorequire:
|
9
9
|
bindir: bin
|
10
10
|
cert_chain: []
|
11
|
-
date:
|
11
|
+
date: 2017-01-15 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: websocket-eventmachine-server
|
@@ -92,6 +92,7 @@ files:
|
|
92
92
|
- README.rdoc
|
93
93
|
- Rakefile
|
94
94
|
- bin/seapig-server
|
95
|
+
- bin/seapig-server-intro
|
95
96
|
- lib/seapig-server.rb
|
96
97
|
- lib/seapig/version.rb
|
97
98
|
- test/dummy/log/development.log
|