cs_fix 0.1.1-x86-mingw32

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,27 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ # Runs backup server. This server monitors a set of directories (blobs/patterns).
4
+ # The files in those directories are indexed (calculating their SHA1).
5
+ # A remote server copies new/changed files in the backup server. The backup server
6
+ # index those new files too and send the content data (their SHA1 to the original server).
7
+
8
+ # NOTE this file mainly is a copy of content_server
9
+
10
+ begin
11
+ require 'yaml'
12
+ require 'params'
13
+ require 'run_in_background'
14
+ require 'content_server'
15
+ rescue LoadError
16
+ require 'rubygems'
17
+ require 'yaml'
18
+ require 'params'
19
+ require 'run_in_background'
20
+ require 'content_server'
21
+ end
22
+ include BBFS
23
+
24
+ Params.init ARGV
25
+ Log.init
26
+
27
+ RunInBackground.run { ContentServer.run_backup_server }
@@ -0,0 +1,34 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ # Runs content server. This server monitors a set of directories (blobs/patterns).
4
+ # The files in those directories are indexed (calculating their SHA1).
5
+ # Each unique content is backed up to the remote (backup) server.
6
+
7
+ begin
8
+ print "1"
9
+ require 'yaml'
10
+ print "2"
11
+ require 'params'
12
+ print "3"
13
+ require 'run_in_background'
14
+ print "4"
15
+ require 'content_server'
16
+ print "5"
17
+ rescue LoadError
18
+ print "6"
19
+ require 'rubygems'
20
+ print "7"
21
+ require 'yaml'
22
+ print "8"
23
+ require 'params'
24
+ print "9"
25
+ require 'run_in_background'
26
+ print "0"
27
+ require 'content_server'
28
+ end
29
+ include BBFS
30
+
31
+ Params.init ARGV
32
+ Log.init
33
+
34
+ RunInBackground.run { ContentServer.run }
@@ -0,0 +1,189 @@
1
+ require 'fileutils'
2
+ require 'set'
3
+ require 'thread'
4
+
5
+ require 'content_data'
6
+ require 'content_server/content_receiver'
7
+ require 'content_server/queue_indexer'
8
+ require 'content_server/queue_copy'
9
+ require 'content_server/remote_content'
10
+ require 'file_indexing'
11
+ require 'file_monitoring'
12
+ require 'log'
13
+ require 'networking/tcp'
14
+ require 'params'
15
+
16
+
17
+
18
+ # Content server. Monitors files, index local files, listen to backup server content,
19
+ # copy changes and new files to backup server.
20
+ module BBFS
21
+ module ContentServer
22
+ Params.string('remote_server', 'localhost', 'IP or DNS of backup server.')
23
+ Params.string('backup_username', nil, 'Backup server username.')
24
+ Params.string('backup_password', nil, 'Backup server password.')
25
+ Params.integer('backup_file_listening_port', 4444, 'Listening port in backup server for files')
26
+ Params.string('content_data_path', File.expand_path('~/.bbfs/var/content.data'),
27
+ 'ContentData file path.')
28
+ Params.string('monitoring_config_path', File.expand_path('~/.bbfs/etc/file_monitoring.yml'),
29
+ 'Configuration file for monitoring.')
30
+ Params.integer('remote_content_port', 3333, 'Default port for remote content copy.')
31
+ Params.integer('backup_check_delay', 5, 'Time between two content vs backup checks.')
32
+
33
+ def run
34
+ all_threads = []
35
+
36
+ # # # # # # # # # # # #
37
+ # Initialize/Start monitoring
38
+ monitoring_events = Queue.new
39
+ fm = FileMonitoring::FileMonitoring.new
40
+ fm.set_config_path(Params['monitoring_config_path'])
41
+ fm.set_event_queue(monitoring_events)
42
+ # Start monitoring and writing changes to queue
43
+ all_threads << Thread.new do
44
+ fm.monitor_files
45
+ end
46
+
47
+ # # # # # # # # # # # # # # # # # # # # # # # # #
48
+ # Initialize/Start backup server content data listener
49
+ #backup_server_content_data = nil
50
+ #backup_server_content_data_queue = Queue.new
51
+ #content_data_receiver = ContentDataReceiver.new(
52
+ # backup_server_content_data_queue,
53
+ # Params['remote_listening_port'])
54
+ # Start listening to backup server
55
+ #all_threads << Thread.new do
56
+ # content_data_receiver.run
57
+ #end
58
+
59
+ # # # # # # # # # # # # # #
60
+ # Initialize/Start local indexer
61
+ local_server_content_data_queue = Queue.new
62
+ queue_indexer = QueueIndexer.new(monitoring_events,
63
+ local_server_content_data_queue,
64
+ Params['content_data_path'])
65
+ # Start indexing on demand and write changes to queue
66
+ all_threads << queue_indexer.run
67
+
68
+ # # # # # # # # # # # # # # # # # # # # # #
69
+ # Initialize/Start content data comparator
70
+ copy_files_events = Queue.new
71
+ local_dynamic_content_data = ContentData::DynamicContentData.new
72
+ all_threads << Thread.new do
73
+ # backup_server_content_data = ContentData::ContentData.new
74
+ # local_server_content_data = nil
75
+ while true do
76
+
77
+ # Note: This thread should be the only consumer of local_server_content_data_queue
78
+ Log.info 'Waiting on local server content data.'
79
+ local_server_content_data = local_server_content_data_queue.pop
80
+ local_dynamic_content_data.update(local_server_content_data)
81
+ #
82
+ # # Note: This thread should be the only consumer of backup_server_content_data_queue
83
+ # # Note: The server will wait in the first time on pop until backup sends it's content data
84
+ # while backup_server_content_data_queue.size > 0
85
+ # Log.info 'Waiting on backup server content data.'
86
+ # backup_server_content_data = backup_server_content_data_queue.pop
87
+ # end
88
+
89
+ # Log.info 'Updating file copy queue.'
90
+ # Log.debug1 "local_server_content_data #{local_server_content_data}."
91
+ # Log.debug1 "backup_server_content_data #{backup_server_content_data}."
92
+ # # Remove backup content data from local server
93
+ # content_to_copy = ContentData::ContentData.remove(backup_server_content_data, local_server_content_data)
94
+ # content_to_copy = local_server_content_data
95
+ # # Add copy instruction in case content is not empty
96
+ # Log.debug1 "Content to copy: #{content_to_copy}"
97
+ # copy_files_events.push([:COPY_MESSAGE, content_to_copy]) unless content_to_copy.empty?
98
+ end
99
+ end
100
+
101
+ remote_content_client = RemoteContentClient.new(local_dynamic_content_data,
102
+ Params['remote_content_port'])
103
+ all_threads << remote_content_client.tcp_thread
104
+
105
+ # # # # # # # # # # # # # # # #
106
+ # Start copying files on demand
107
+ copy_server = FileCopyServer.new(copy_files_events, Params['backup_file_listening_port'])
108
+ all_threads.concat(copy_server.run())
109
+
110
+ # Finalize server threads.
111
+ all_threads.each { |t| t.abort_on_exception = true }
112
+ all_threads.each { |t| t.join }
113
+ # Should never reach this line.
114
+ end
115
+ module_function :run
116
+
117
+ def run_backup_server
118
+ all_threads = []
119
+
120
+ # # # # # # # # # # # #
121
+ # Initialize/Start monitoring
122
+ monitoring_events = Queue.new
123
+ fm = FileMonitoring::FileMonitoring.new
124
+ fm.set_config_path(Params['monitoring_config_path'])
125
+ fm.set_event_queue(monitoring_events)
126
+ # Start monitoring and writing changes to queue
127
+ all_threads << Thread.new do
128
+ fm.monitor_files
129
+ end
130
+
131
+ # # # # # # # # # # # # # #
132
+ # Initialize/Start local indexer
133
+ local_server_content_data_queue = Queue.new
134
+ queue_indexer = QueueIndexer.new(monitoring_events,
135
+ local_server_content_data_queue,
136
+ Params['content_data_path'])
137
+ # Start indexing on demand and write changes to queue
138
+ all_threads << queue_indexer.run
139
+
140
+ # # # # # # # # # # # # # # # # # # # # # # # # # # #
141
+ # Initialize/Start backup server content data sender
142
+ dynamic_content_data = ContentData::DynamicContentData.new
143
+ #content_data_sender = ContentDataSender.new(
144
+ # Params['remote_server'],
145
+ # Params['remote_listening_port'])
146
+ # Start sending to backup server
147
+ all_threads << Thread.new do
148
+ while true do
149
+ Log.info 'Waiting on local server content data queue.'
150
+ cd = local_server_content_data_queue.pop
151
+ # content_data_sender.send_content_data(cd)
152
+ dynamic_content_data.update(cd)
153
+ end
154
+ end
155
+
156
+ content_server_dynamic_content_data = ContentData::DynamicContentData.new
157
+ remote_content = ContentServer::RemoteContent.new(content_server_dynamic_content_data,
158
+ Params['remote_server'],
159
+ Params['remote_content_port'],
160
+ Params['backup_destination_folder'])
161
+ all_threads.concat(remote_content.run())
162
+
163
+ file_copy_client = FileCopyClient.new(Params['remote_server'],
164
+ Params['backup_file_listening_port'],
165
+ dynamic_content_data)
166
+ all_threads.concat(file_copy_client.threads)
167
+
168
+ # Each
169
+ all_threads << Thread.new do
170
+ loop do
171
+ sleep(Params['backup_check_delay'])
172
+ local_cd = dynamic_content_data.last_content_data()
173
+ remote_cd = content_server_dynamic_content_data.last_content_data()
174
+ diff = ContentData::ContentData.remove(local_cd, remote_cd)
175
+ Log.debug2("Files to send? #{!diff.empty?}")
176
+ file_copy_client.request_copy(diff) unless diff.empty?
177
+ end
178
+ end
179
+
180
+
181
+ all_threads.each { |t| t.abort_on_exception = true }
182
+ all_threads.each { |t| t.join }
183
+ # Should never reach this line.
184
+ end
185
+ module_function :run_backup_server
186
+
187
+ end # module ContentServer
188
+ end # module BBFS
189
+
@@ -0,0 +1,62 @@
1
+ require 'log'
2
+ require 'params'
3
+ require 'socket'
4
+
5
+ module BBFS
6
+ module ContentServer
7
+
8
+ class ContentDataReceiver
9
+ def initialize queue, port
10
+ @queue = queue
11
+ @port = port
12
+ end
13
+
14
+ def run
15
+ Socket.tcp_server_loop(@port) do |sock, client_addrinfo|
16
+ while size_of_data = sock.read(4)
17
+ size_of_data = size_of_data.unpack("l")[0]
18
+ Log.debug3 "Size of data: #{size_of_data}"
19
+ data = sock.read(size_of_data)
20
+ #Log.debug3 "Data received: #{data}"
21
+ unmarshaled_data = Marshal.load(data)
22
+ #Log.debug3 "Unmarshaled data: #{unmarshaled_data}"
23
+ @queue.push unmarshaled_data
24
+ Log.debug3 "Socket closed? #{sock.closed?}."
25
+ break if sock.closed?
26
+ Log.debug1 'Waiting on sock.read'
27
+ end
28
+ Log.debug1 'Exited, socket closed or read returned nil.'
29
+ end
30
+ end
31
+ end
32
+
33
+ class ContentDataSender
34
+
35
+ def initialize host, port
36
+ @host = host
37
+ @port = port
38
+ open_socket
39
+ end
40
+
41
+ def open_socket
42
+ Log.debug1 "Connecting to content server #{@host}:#{@port}."
43
+ @tcp_socket = TCPSocket.new(@host, @port)
44
+ end
45
+
46
+ def send_content_data content_data
47
+ open_socket if @tcp_socket.closed?
48
+ #Log.debug3 "Data to send: #{content_data}"
49
+ marshal_data = Marshal.dump(content_data)
50
+ Log.debug3 "Marshaled size: #{marshal_data.length}."
51
+ data_size = [marshal_data.length].pack("l")
52
+ #Log.debug3 "Marshaled data: #{marshal_data}."
53
+ if data_size.nil? || marshal_data.nil?
54
+ Log.debug3 'Send data is nil!!!!!!!!'
55
+ end
56
+ @tcp_socket.write data_size
57
+ @tcp_socket.write marshal_data
58
+ end
59
+ end
60
+
61
+ end
62
+ end
@@ -0,0 +1,282 @@
1
+ require 'tempfile'
2
+ require 'thread'
3
+
4
+ require 'file_indexing/index_agent'
5
+ require 'log'
6
+
7
+ module BBFS
8
+ module ContentServer
9
+
10
+ Params.integer('streaming_chunk_size', 2*1024*1024,
11
+ 'Max number of content bytes to send in one chunk.')
12
+ Params.integer('file_streaming_timeout', 5*60,
13
+ 'If no action is taken on a file streamer, abort copy.')
14
+ Params.string('backup_destination_folder', '',
15
+ 'Backup server destination folder, default is the relative local folder.')
16
+
17
+ class Stream
18
+ attr_reader :checksum, :path, :tmp_path, :file, :size
19
+ def initialize(checksum, path, file, size)
20
+ @checksum = checksum
21
+ @path = path
22
+ @file = file
23
+ @size = size
24
+ end
25
+
26
+ def self.close_delete_stream(checksum, streams_hash)
27
+ if streams_hash.key?(checksum)
28
+ Log.info("close_delete_stream #{streams_hash[checksum].file}")
29
+ begin
30
+ streams_hash[checksum].file.close()
31
+ rescue IOError => e
32
+ Log.warning("While closing stream, could not close file #{streams_hash[checksum].path}." \
33
+ " #{e.to_s}")
34
+ end
35
+ streams_hash.delete(checksum)
36
+ end
37
+ end
38
+
39
+ end
40
+
41
+ class FileStreamer
42
+ attr_reader :thread
43
+
44
+ :NEW_STREAM
45
+ :ABORT_STREAM
46
+ :RESET_STREAM
47
+ :COPY_CHUNK
48
+
49
+ def initialize(send_chunk_clb, abort_streaming_clb=nil)
50
+ @send_chunk_clb = send_chunk_clb
51
+ @abort_streaming_clb = abort_streaming_clb
52
+ @stream_queue = Queue.new
53
+
54
+ # Used from internal thread only.
55
+ @streams = {}
56
+ @thread = run
57
+ end
58
+
59
+ def copy_another_chuck(checksum)
60
+ @stream_queue << [:COPY_CHUNK, checksum]
61
+ end
62
+
63
+ def start_streaming(checksum, path)
64
+ @stream_queue << [:NEW_STREAM, [checksum, path]]
65
+ end
66
+
67
+ def abort_streaming(checksum)
68
+ @stream_queue << [:ABORT_STREAM, checksum]
69
+ end
70
+
71
+ def reset_streaming(checksum, new_offset)
72
+ @stream_queue << [:RESET_STREAM, [checksum, new_offset]]
73
+ end
74
+
75
+ def run
76
+ return Thread.new do
77
+ loop {
78
+ checksum = handle(@stream_queue.pop)
79
+ }
80
+ end
81
+ end
82
+
83
+ def handle(message)
84
+ type, content = message
85
+ if type == :NEW_STREAM
86
+ checksum, path = content
87
+ reset_stream(checksum, path, 0)
88
+ @stream_queue << [:COPY_CHUNK, checksum] if @streams.key?(checksum)
89
+ elsif type == :ABORT_STREAM
90
+ checksum = content
91
+ Stream.close_delete_stream(checksum, @streams)
92
+ elsif type == :RESET_STREAM
93
+ checksum, new_offset = content
94
+ reset_stream(checksum, nil, new_offset)
95
+ @stream_queue << [:COPY_CHUNK, checksum] if @streams.key?(checksum)
96
+ elsif type == :COPY_CHUNK
97
+ checksum = content
98
+ if @streams.key?(checksum)
99
+ offset = @streams[checksum].file.pos
100
+ Log.debug1("Sending chunk for #{checksum}, offset #{offset}.")
101
+ chunk = @streams[checksum].file.read(Params['streaming_chunk_size'])
102
+ if chunk.nil?
103
+ # No more to read, send end of file.
104
+ @send_chunk_clb.call(checksum, offset, @streams[checksum].size, nil, nil)
105
+ Stream.close_delete_stream(checksum, @streams)
106
+ else
107
+ chunk_checksum = FileIndexing::IndexAgent.get_content_checksum(chunk)
108
+ @send_chunk_clb.call(checksum, offset, @streams[checksum].size, chunk, chunk_checksum)
109
+ end
110
+ else
111
+ Log.info("No checksum found to copy chunk. #{checksum}.")
112
+ end
113
+ end
114
+
115
+ end
116
+
117
+ def reset_stream(checksum, path, offset)
118
+ if !@streams.key? checksum
119
+ begin
120
+ file = File.new(path, 'rb')
121
+ if offset > 0
122
+ file.seek(offset)
123
+ end
124
+ Log.info("File streamer: #{file.to_s}.")
125
+ rescue IOError => e
126
+ Log.warning("Could not stream local file #{path}. #{e.to_s}")
127
+ end
128
+ @streams[checksum] = Stream.new(checksum, path, file, file.size)
129
+ else
130
+ @streams[checksum].file.seek(offset)
131
+ end
132
+ end
133
+ end
134
+
135
+ # Start implementing as dummy, no self thread for now.
136
+ # Later when we need it to response and send aborts, timeouts, ect, it will
137
+ # need self thread.
138
+ class FileReceiver
139
+
140
+ def initialize(file_done_clb=nil, file_abort_clb=nil, reset_copy=nil)
141
+ @file_done_clb = file_done_clb
142
+ @file_abort_clb = file_abort_clb
143
+ @reset_copy = reset_copy
144
+ @streams = {}
145
+ end
146
+
147
+ def receive_chunk(file_checksum, offset, file_size, content, content_checksum)
148
+ # If standard chunk copy.
149
+ if !content.nil? && !content_checksum.nil?
150
+ received_content_checksum = FileIndexing::IndexAgent.get_content_checksum(content)
151
+ comment = "Calculated received chunk with content checksum #{received_content_checksum}" \
152
+ " vs message content checksum #{content_checksum}, " \
153
+ "file checksum #{file_checksum}"
154
+ Log.debug1(comment) if content_checksum == received_content_checksum
155
+ # TODO should be here a kind of abort?
156
+ if content_checksum != received_content_checksum
157
+ Log.warning(comment)
158
+ new_offset = 0
159
+ if @streams.key?(file_checksum)
160
+ new_offset = @streams[file_checksum].file.pos
161
+ end
162
+ @reset_copy.call(file_checksum, new_offset) unless @reset_copy.nil?
163
+ return false
164
+ end
165
+
166
+ if !@streams.key?(file_checksum)
167
+ handle_new_stream(file_checksum, file_size)
168
+ end
169
+ # We still check @streams has the key, because file can fail to open.
170
+ if @streams.key?(file_checksum)
171
+ return handle_new_chunk(file_checksum, offset, content)
172
+ else
173
+ Log.warning('Cannot handle chunk, stream does not exists, sending abort.')
174
+ @file_abort_clb.call(file_checksum) unless @file_abort_clb.nil?
175
+ return false
176
+ end
177
+ # If last chunk copy.
178
+ elsif content.nil? && content_checksum.nil?
179
+ handle_last_chunk(file_checksum)
180
+ return false
181
+ else
182
+ Log.warning("Unexpected receive chuck message. file_checksum:#{file_checksum}, " \
183
+ "content.nil?:#{content.nil?}, content_checksum:#{content_checksum}")
184
+ return false
185
+ end
186
+ end
187
+
188
+ # open new stream
189
+ def handle_new_stream(file_checksum, file_size)
190
+ # final destination path
191
+ tmp_path = FileReceiver.destination_filename(
192
+ File.join(Params['backup_destination_folder'], 'tmp'),
193
+ file_checksum)
194
+ path = FileReceiver.destination_filename(Params['backup_destination_folder'],
195
+ file_checksum)
196
+ if File.exists?(path)
197
+ Log.warning("File already exists (#{path}) not writing.")
198
+ @file_abort_clb.call(file_checksum) unless @file_abort_clb.nil?
199
+ else
200
+ # the file will be moved from tmp location once the transfer will be done
201
+ # system will use the checksum and some more unique key for tmp file name
202
+ FileUtils.makedirs(File.dirname(tmp_path)) unless File.directory?(File.dirname(tmp_path))
203
+ tmp_file = file = File.new(tmp_path, 'wb')
204
+ @streams[file_checksum] = Stream.new(file_checksum, tmp_path, tmp_file, file_size)
205
+ end
206
+ end
207
+
208
+ # write chunk to temp file
209
+ def handle_new_chunk(file_checksum, offset, content)
210
+ if offset == @streams[file_checksum].file.pos
211
+ FileReceiver.write_string_to_file(content, @streams[file_checksum].file)
212
+ Log.info("Written already #{@streams[file_checksum].file.pos} bytes, " \
213
+ "out of #{@streams[file_checksum].size} " \
214
+ "(#{100.0*@streams[file_checksum].file.size/@streams[file_checksum].size}%)")
215
+ return true
216
+ else
217
+ # Offset is wrong, send reset/resume copy from correct offset.
218
+ Log.warning("Received chunk with incorrect offset #{offset}, should " \
219
+ "be #{@streams[file_checksum].file.pos}, file_checksum:#{file_checksum}")
220
+ @reset_copy.call(file_checksum, @streams[file_checksum].file.pos) unless @reset_copy.nil?
221
+ return false
222
+ end
223
+ end
224
+
225
+ # copy file to permanent location
226
+ # close stream
227
+ # remove temp file
228
+ # check written file
229
+ def handle_last_chunk(file_checksum)
230
+ if @streams.key?(file_checksum)
231
+ # Make the directory if does not exists.
232
+ path = FileReceiver.destination_filename(Params['backup_destination_folder'],
233
+ file_checksum)
234
+ Log.debug1("Moving tmp file #{@streams[file_checksum].path} to #{path}")
235
+ Log.debug1("Creating directory: #{path}")
236
+ file_dir = File.dirname(path)
237
+ FileUtils.makedirs(file_dir) unless File.directory?(file_dir)
238
+ # Move tmp file to permanent location.
239
+ tmp_file_path = @streams[file_checksum].path
240
+ Stream.close_delete_stream(file_checksum, @streams) # temp file will be closed here
241
+
242
+ local_file_checksum = FileIndexing::IndexAgent.get_checksum(tmp_file_path)
243
+ message = "Local checksum (#{local_file_checksum}) received checksum (#{file_checksum})."
244
+ if local_file_checksum == file_checksum
245
+ Log.info(message)
246
+ begin
247
+ File.rename(tmp_file_path, path)
248
+ Log.info("End move tmp file to permanent location #{path}.")
249
+ @file_done_clb.call(local_file_checksum, path) unless @file_done_clb.nil?
250
+ rescue IOError => e
251
+ Log.warning("Could not move tmp file to permanent file #{path}. #{e.to_s}")
252
+ end
253
+ else
254
+ Log.error(message)
255
+ Log.debug1("Deleting tmp file: #{tmp_file_path}")
256
+ File.delete(tmp_file_path)
257
+ end
258
+ else
259
+ Log.error("Handling last chunk and tmp stream does not exists.")
260
+ end
261
+ end
262
+
263
+ def self.write_string_to_file(str, file)
264
+ bytes_to_write = str.bytesize
265
+ Log.info("writing to file: #{file.to_s}, #{bytes_to_write} bytes.")
266
+ while bytes_to_write > 0
267
+ bytes_to_write -= file.write(str)
268
+ end
269
+ end
270
+
271
+ # Creates destination filename for backup server, input is base folder and sha1.
272
+ # for example: folder:/mnt/hd1/bbbackup, sha1:d0be2dc421be4fcd0172e5afceea3970e2f3d940
273
+ # dest filename: /mnt/hd1/bbbackup/d0/be/2d/d0be2dc421be4fcd0172e5afceea3970e2f3d940
274
+ def self.destination_filename(folder, sha1)
275
+ File.join(folder, sha1[0,2], sha1[2,2], sha1)
276
+ end
277
+
278
+ private :handle_new_stream, :handle_new_chunk, :handle_last_chunk
279
+ end
280
+
281
+ end
282
+ end
@@ -0,0 +1,203 @@
1
+ require 'thread'
2
+
3
+ require 'content_server/file_streamer'
4
+ require 'file_indexing/index_agent'
5
+ require 'log'
6
+ require 'networking/tcp'
7
+
8
+ module BBFS
9
+ module ContentServer
10
+ Params.integer('ack_timeout', 5, 'Timeout of ack from backup server in seconds.')
11
+
12
+ # Copy message types.
13
+ :ACK_MESSAGE
14
+ :COPY_MESSAGE
15
+ :SEND_COPY_MESSAGE
16
+ :COPY_CHUNK
17
+ :COPY_CHUNK_FROM_REMOTE
18
+ :ABORT_COPY # Asks the sender to abort file copy.
19
+ :RESET_RESUME_COPY # Sends the stream sender to resend chunk or resume from different offset.
20
+
21
+ # Simple copier, gets inputs events (files to copy), requests ack from backup to copy
22
+ # then copies one file.
23
+ class FileCopyServer
24
+ def initialize(copy_input_queue, port)
25
+ # Local simple tcp connection.
26
+ @backup_tcp = Networking::TCPServer.new(port, method(:receive_message))
27
+ @copy_input_queue = copy_input_queue
28
+ # Stores for each checksum, the file source path.
29
+ # TODO(kolman): If there are items in copy_prepare which timeout (don't get ack),
30
+ # resend the ack request.
31
+ @copy_prepare = {}
32
+ @file_streamer = FileStreamer.new(method(:send_chunk))
33
+ end
34
+
35
+ def send_chunk(*arg)
36
+ @copy_input_queue.push([:COPY_CHUNK, arg])
37
+ end
38
+
39
+ def receive_message(addr_info, message)
40
+ # Add ack message to copy queue.
41
+ Log.info("message received: #{message}")
42
+ @copy_input_queue.push(message)
43
+ end
44
+
45
+ def run()
46
+ threads = []
47
+ threads << @backup_tcp.tcp_thread if @backup_tcp != nil
48
+ threads << Thread.new do
49
+ while true do
50
+ Log.info 'Waiting on copy files events.'
51
+ message_type, message_content = @copy_input_queue.pop
52
+
53
+ if message_type == :COPY_MESSAGE
54
+ Log.info "Copy file event: #{message_content}"
55
+ # Prepare source,dest map for copy.
56
+ message_content.instances.each { |key, instance|
57
+ # If not already sending.
58
+ if !@copy_prepare.key?(instance.checksum) || !@copy_prepare[instance.checksum][1]
59
+ @copy_prepare[instance.checksum] = [instance.full_path, false]
60
+ Log.info("Sending ack for: #{instance.checksum}")
61
+ @backup_tcp.send_obj([:ACK_MESSAGE, [instance.checksum, Time.now.to_i]])
62
+ end
63
+ }
64
+ elsif message_type == :ACK_MESSAGE
65
+ # Received ack from backup, copy file if all is good.
66
+ # The timestamp is of local content server! not backup server!
67
+ timestamp, ack, checksum = message_content
68
+
69
+ Log.info("Ack (#{ack}) received for: #{checksum}, timestamp: #{timestamp} " \
70
+ "now: #{Time.now.to_i}")
71
+
72
+ # Copy file if ack (does not exists on backup and not too much time passed)
73
+ if ack && (Time.now.to_i - timestamp < Params['ack_timeout'])
74
+ if !@copy_prepare.key?(checksum) || @copy_prepare[checksum][1]
75
+ Log.warning("File was aborted, copied, or started copy just now: #{checksum}")
76
+ else
77
+ path = @copy_prepare[checksum][0]
78
+ Log.info "Streaming file: #{checksum} #{path}."
79
+ @file_streamer.start_streaming(checksum, path)
80
+ # Ack received, setting prepare to true
81
+ @copy_prepare[checksum][1] = true
82
+ end
83
+ else
84
+ Log.debug1("Ack timed out span: #{Time.now.to_i - timestamp} > " \
85
+ "timeout: #{Params['ack_timeout']}")
86
+ end
87
+ elsif message_type == :COPY_CHUNK_FROM_REMOTE
88
+ checksum = message_content
89
+ @file_streamer.copy_another_chuck(checksum)
90
+ elsif message_type == :COPY_CHUNK
91
+ # We open the message here for printing info and deleting copy_prepare!
92
+ file_checksum, offset, file_size, content, content_checksum = message_content
93
+ Log.info("Send chunk for file #{file_checksum}, offset: #{offset} " \
94
+ "filesize: #{file_size}.")
95
+ # Blocking send.
96
+ @backup_tcp.send_obj([:COPY_CHUNK, message_content])
97
+ if content.nil? and content_checksum.nil?
98
+ @copy_prepare.delete(file_checksum)
99
+ end
100
+ elsif message_type == :ABORT_COPY
101
+ Log.info("Aborting file copy: #{message_content}")
102
+ if @copy_prepare.key?(message_content)
103
+ Log.info("Aborting: #{@copy_prepare[message_content][0]}")
104
+ @copy_prepare.delete(message_content)
105
+ end
106
+ @file_streamer.abort_streaming(message_content)
107
+ elsif message_type == :RESET_RESUME_COPY
108
+ file_checksum, new_offset = message_content
109
+ Log.info("Resetting/Resuming file (#{file_checksum}) copy to #{new_offset}")
110
+ @file_streamer.reset_streaming(file_checksum, new_offset)
111
+ else
112
+ Log.error("Copy event not supported: #{message_type}")
113
+ end # handle messages here
114
+ end
115
+ Log.error("Should not reach here, loop should continue.")
116
+ end
117
+ end
118
+ end # class QueueCopy
119
+
120
+ class FileCopyClient
121
+ def initialize(host, port, dynamic_content_data)
122
+ @local_queue = Queue.new
123
+ @dynamic_content_data = dynamic_content_data
124
+ @tcp_server = Networking::TCPClient.new(host, port, method(:handle_message))
125
+ @file_receiver = FileReceiver.new(method(:done_copy),
126
+ method(:abort_copy),
127
+ method(:reset_copy))
128
+ @local_thread = Thread.new do
129
+ loop do
130
+ handle(@local_queue.pop)
131
+ end
132
+ end
133
+ @local_thread.abort_on_exception = true
134
+ end
135
+
136
+ def threads
137
+ ret = [@local_thread]
138
+ ret << @tcp_server.tcp_thread if @tcp_server != nil
139
+ return ret
140
+ end
141
+
142
+ def request_copy(content_data)
143
+ handle_message([:SEND_COPY_MESSAGE, content_data])
144
+ end
145
+
146
+ def abort_copy(checksum)
147
+ handle_message([:ABORT_COPY, checksum])
148
+ end
149
+
150
+ def reset_copy(checksum, new_offset)
151
+ handle_message([:RESET_RESUME_COPY, [checksum, new_offset]])
152
+ end
153
+
154
+ def done_copy(local_file_checksum, local_path)
155
+ Log.info("Done copy file: #{local_path}, #{local_file_checksum}")
156
+ end
157
+
158
+ def handle_message(message)
159
+ Log.debug2('QueueFileReceiver handle message')
160
+ @local_queue.push(message)
161
+ end
162
+
163
+ # This is a function which receives the messages (file or ack) and return answer in case
164
+ # of ack. Note that it is being executed from the class thread only!
165
+ def handle(message)
166
+ message_type, message_content = message
167
+ if message_type == :SEND_COPY_MESSAGE
168
+ Log.debug1("Requesting file (content data) to copy.")
169
+ Log.debug3("File requested: #{message_content.to_s}")
170
+ bytes_written = @tcp_server.send_obj([:COPY_MESSAGE, message_content])
171
+ Log.debug1("Sending copy message succeeded? bytes_written: #{bytes_written}.")
172
+ elsif message_type == :COPY_CHUNK
173
+ Log.debug1('Chunk received.')
174
+ if @file_receiver.receive_chunk(*message_content)
175
+ file_checksum, offset, file_size, content, content_checksum = message_content
176
+ @tcp_server.send_obj([:COPY_CHUNK_FROM_REMOTE, file_checksum])
177
+ end
178
+ elsif message_type == :ACK_MESSAGE
179
+ checksum, timestamp = message_content
180
+ # Here we should check file existence
181
+ Log.debug1("Returning ack for: #{checksum}, timestamp: #{timestamp}")
182
+ Log.debug1("Ack: #{!@dynamic_content_data.exists?(checksum)}")
183
+ @tcp_server.send_obj([:ACK_MESSAGE, [timestamp,
184
+ !@dynamic_content_data.exists?(checksum),
185
+ checksum]])
186
+ elsif message_type == :ABORT_COPY
187
+ @tcp_server.send_obj([:ABORT_COPY, message_content])
188
+ elsif message_type == :RESET_RESUME_COPY
189
+ @tcp_server.send_obj([:RESET_RESUME_COPY, message_content])
190
+ else
191
+ Log.error("Unexpected message type: #{message_type}")
192
+ end
193
+ end
194
+
195
+ # Creates destination filename for backup server, input is base folder and sha1.
196
+ # for example: folder:/mnt/hd1/bbbackup, sha1:d0be2dc421be4fcd0172e5afceea3970e2f3d940
197
+ # dest filename: /mnt/hd1/bbbackup/d0/be/2d/d0be2dc421be4fcd0172e5afceea3970e2f3d940
198
+ def self.destination_filename(folder, sha1)
199
+ File.join(folder, sha1[0,2], sha1[2,2], sha1)
200
+ end
201
+ end # class QueueFileReceiver
202
+ end
203
+ end
@@ -0,0 +1,110 @@
1
+ require 'file_indexing/index_agent'
2
+ require 'file_indexing/indexer_patterns'
3
+ require 'log'
4
+
5
+ module BBFS
6
+ module ContentServer
7
+
8
+ # Simple indexer, gets inputs events (files to index) and outputs
9
+ # content data updates into output queue.
10
+ class QueueIndexer
11
+
12
+ def initialize(input_queue, output_queue, content_data_path)
13
+ @input_queue = input_queue
14
+ @output_queue = output_queue
15
+ @content_data_path = content_data_path
16
+ end
17
+
18
+ def run
19
+ server_content_data = ContentData::ContentData.new
20
+ # Shallow check content data files.
21
+ tmp_content_data = ContentData::ContentData.new
22
+ tmp_content_data.from_file(@content_data_path) if File.exists?(@content_data_path)
23
+ tmp_content_data.instances.each_value do |instance|
24
+ # Skipp instances (files) which did not pass the shallow check.
25
+ Log.info('Shallow checking content data:')
26
+ if shallow_check(instance)
27
+ Log.info("exists: #{instance.full_path}")
28
+ server_content_data.add_content(tmp_content_data.contents[instance.checksum])
29
+ server_content_data.add_instance(instance)
30
+ else
31
+ Log.info("changed: #{instance.full_path}")
32
+ # Add non existing and changed files to index queue.
33
+ @input_queue.push([FileMonitoring::FileStatEnum::STABLE, instance.full_path])
34
+ end
35
+ end
36
+
37
+ # Start indexing on demand and write changes to queue
38
+ thread = Thread.new do
39
+ while true do
40
+ Log.info 'Waiting on index input queue.'
41
+ state, is_dir, path = @input_queue.pop
42
+ Log.info "event: #{state}, #{is_dir}, #{path}."
43
+
44
+ # index files and add to copy queue
45
+ # delete directory with it's sub files
46
+ # delete file
47
+ if state == FileMonitoring::FileStatEnum::STABLE && !is_dir
48
+ Log.info "Indexing content #{path}."
49
+ index_agent = FileIndexing::IndexAgent.new
50
+ indexer_patterns = FileIndexing::IndexerPatterns.new
51
+ indexer_patterns.add_pattern(path)
52
+ index_agent.index(indexer_patterns, server_content_data)
53
+ Log.info("Failed files: #{index_agent.failed_files.to_a.join(',')}.") \
54
+ if !index_agent.failed_files.empty?
55
+ Log.info("indexed content #{index_agent.indexed_content}.")
56
+ server_content_data.merge index_agent.indexed_content
57
+ elsif ((state == FileMonitoring::FileStatEnum::NON_EXISTING ||
58
+ state == FileMonitoring::FileStatEnum::CHANGED) && !is_dir)
59
+ # If file content changed, we should remove old instance.
60
+ key = FileIndexing::IndexAgent.global_path(path)
61
+ # Check if deleted file exists at content data.
62
+ Log.info("Instance to remove: #{key}")
63
+ if server_content_data.instances.key?(key)
64
+ instance_to_remove = server_content_data.instances[key]
65
+ # Remove file from content data only if it does not pass the shallow check, i.e.,
66
+ # content has changed/removed.
67
+ if !shallow_check(instance_to_remove)
68
+ content_to_remove = server_content_data.contents[instance_to_remove.checksum]
69
+ # Remove the deleted instance.
70
+ content_data_to_remove = ContentData::ContentData.new
71
+ content_data_to_remove.add_content(content_to_remove)
72
+ content_data_to_remove.add_instance(instance_to_remove)
73
+ # Remove the file.
74
+ server_content_data = ContentData::ContentData.remove_instances(
75
+ content_data_to_remove, server_content_data)
76
+ end
77
+ end
78
+ elsif state == FileMonitoring::FileStatEnum::NON_EXISTING && is_dir
79
+ Log.info("NonExisting/Changed: #{path}")
80
+ # Remove directory but only when non-existing.
81
+ Log.info("Directory to remove: #{path}")
82
+ global_dir = FileIndexing::IndexAgent.global_path(path)
83
+ server_content_data = ContentData::ContentData.remove_directory(
84
+ server_content_data, global_dir)
85
+ else
86
+ Log.info("This case should not be handled: #{state}, #{is_dir}, #{path}.")
87
+ end
88
+ # TODO(kolman): Don't write to file each change?
89
+ Log.info "Writing server content data to #{@content_data_path}."
90
+ server_content_data.to_file(@content_data_path)
91
+
92
+ Log.info 'Adding server content data to queue.'
93
+ @output_queue.push(ContentData::ContentData.new(server_content_data))
94
+ end # while true do
95
+ end # Thread.new do
96
+ thread
97
+ end # def run
98
+
99
+ # Check file existence, check it's size and modification date.
100
+ # If something wrong reindex the file and update content data.
101
+ def shallow_check(instance)
102
+ shallow_instance = FileIndexing::IndexAgent.create_shallow_instance(instance.full_path)
103
+ return false unless shallow_instance
104
+ return (shallow_instance.size == instance.size &&
105
+ shallow_instance.modification_time == instance.modification_time)
106
+ end
107
+
108
+ end # class QueueIndexer
109
+ end
110
+ end
@@ -0,0 +1,97 @@
1
+ require 'thread'
2
+
3
+ require 'content_data/dynamic_content_data'
4
+ require 'log'
5
+ require 'networking/tcp'
6
+ require 'params'
7
+
8
+ module BBFS
9
+ module ContentServer
10
+
11
+ Params.integer('remote_content_timeout', 10, 'Remote content desired freshness in seconds.')
12
+ Params.integer('max_content_timeout', 60*60, 'Remote content force refresh in seconds.')
13
+
14
+ # TODO(kolman): Use only one tcp/ip socket by utilizing one NQueue for many queues!
15
+ class RemoteContent
16
+ def initialize(dynamic_content_data, host, port, local_backup_folder)
17
+ @dynamic_content_data = dynamic_content_data
18
+ @remote_tcp = Networking::TCPClient.new(host, port, method(:receive_content))
19
+ @last_update_timestamp = nil
20
+ @content_server_content_data_path = File.join(local_backup_folder, 'remote',
21
+ host + '_' + port.to_s)
22
+ end
23
+
24
+ def receive_content(message)
25
+ Log.debug1("Remote content data received: #{message.to_s}")
26
+ ref = @dynamic_content_data.last_content_data
27
+ @dynamic_content_data.update(message)
28
+
29
+ max_time_span = Params['max_content_timeout']
30
+ if !@last_update_timestamp.nil?
31
+ max_time_span = Time.now.to_i - @last_update_timestamp
32
+ end
33
+
34
+ @last_update_timestamp = Time.now.to_i
35
+
36
+ if ref != message || max_time_span >= Params['max_content_timeout']
37
+ Log.debug2("Remote content data changed or max time span is large, writing.")
38
+ Log.debug3("max_time_span: #{max_time_span}")
39
+ write_to = File.join(@content_server_content_data_path,
40
+ @last_update_timestamp.to_s + '.cd')
41
+ FileUtils.makedirs(@content_server_content_data_path) unless \
42
+ File.directory?(@content_server_content_data_path)
43
+ count = File.open(write_to, 'wb') { |f| f.write(message.to_s) }
44
+ else
45
+ Log.debug2("No need to write remote content data, it has not changed.")
46
+ end
47
+ end
48
+
49
+ def run()
50
+ threads = []
51
+ threads << @remote_tcp.tcp_thread if @remote_tcp != nil
52
+ threads << Thread.new do
53
+ loop do
54
+ # if need content data
55
+ if @last_update_timestamp.nil?
56
+ sleep_time_span = Params['remote_content_timeout']
57
+ else
58
+ sleep_time_span = Time.now.to_i - @last_update_timestamp
59
+ end
60
+
61
+ if sleep_time_span >= Params['remote_content_timeout']
62
+ # Send ping!
63
+ Log.debug2('Sending remote contend request.')
64
+ bytes_written = @remote_tcp.send_obj(nil)
65
+ Log.debug3("Bytes written #{bytes_written}.")
66
+ end
67
+
68
+ sleep_time_span = Time.now.to_i - @last_update_timestamp \
69
+ unless @last_update_timestamp.nil?
70
+ Log.debug2("sleep_time_span: #{sleep_time_span}")
71
+ sleep(sleep_time_span) if sleep_time_span > 0
72
+ end
73
+ end
74
+ end
75
+ end
76
+
77
+ class RemoteContentClient
78
+ def initialize(dynamic_content_data, port)
79
+ @dynamic_content_data = dynamic_content_data
80
+ @tcp_server = Networking::TCPServer.new(port, method(:content_requested))
81
+ end
82
+
83
+ def content_requested(addr_info, message)
84
+ # Send response.
85
+ Log.debug1('Local content data requested.')
86
+ @tcp_server.send_obj(@dynamic_content_data.last_content_data)
87
+ end
88
+
89
+ def tcp_thread
90
+ return @tcp_server.tcp_thread if @tcp_server != nil
91
+ nil
92
+ end
93
+
94
+ end
95
+
96
+ end
97
+ end
@@ -0,0 +1,5 @@
1
+ module BBFS
2
+ module ContentServer
3
+ VERSION = "0.1.1"
4
+ end
5
+ end
@@ -0,0 +1,27 @@
1
+ require 'rspec'
2
+
3
+ require_relative '../../lib/file_copy/copy.rb'
4
+
5
+ module BBFS
6
+ module ContentServer
7
+ module Spec
8
+
9
+ describe 'Backup Listener' do
10
+
11
+ end
12
+
13
+ describe 'Local file monitor' do
14
+
15
+ end
16
+
17
+ describe 'Local file indexer' do
18
+
19
+ end
20
+
21
+ describe 'File copier' do
22
+
23
+ end
24
+
25
+ end
26
+ end
27
+ end
@@ -0,0 +1,71 @@
1
+ require 'log'
2
+ require 'rspec'
3
+ require 'stringio'
4
+
5
+ require_relative '../../lib/content_server/file_streamer'
6
+
7
+ # Uncomment to debug spec.
8
+ BBFS::Params['log_write_to_console'] = false
9
+ BBFS::Params['log_write_to_file'] = false
10
+ BBFS::Params['log_debug_level'] = 0
11
+ BBFS::Params['streaming_chunk_size'] = 5
12
+ BBFS::Params.init ARGV
13
+ BBFS::Log.init
14
+ # Have to be set to test chunking mechanism.
15
+
16
+ module BBFS
17
+ module ContentServer
18
+ module Spec
19
+ describe 'FileStreamer' do
20
+ it 'should copy one file chunk by chunks and validate content' do
21
+ Log.info('#0 start')
22
+ orig_file = StringIO.new('Some content. Some content. Some content. Some content.')
23
+ Log.info("orig_file #{orig_file.to_s}.")
24
+
25
+ # should simulate Tempfile object, thus need to add to this object Tempfile methsods
26
+ # that are absent in StringIO, but used in tested ruby code
27
+ dest_file = StringIO.new
28
+ def dest_file.path
29
+ '/tmp/path/tmp_basename'
30
+ end
31
+ def dest_file.unlink
32
+ true
33
+ end
34
+ Log.info("dest_file #{dest_file.to_s}.")
35
+
36
+ streamer = nil
37
+ done = lambda{ |checksum, filename|
38
+ Log.info('#4 streaming done, check content ok.')
39
+ dest_file.string().should eq(orig_file.string())
40
+
41
+ Log.info('#5 exiting streamer thread.')
42
+ streamer.thread.exit
43
+ }
44
+
45
+ receiver = BBFS::ContentServer::FileReceiver.new(done)
46
+ send_chunk = lambda { |*args|
47
+ receiver.receive_chunk(*args)
48
+ streamer.copy_another_chuck('da39a3ee5e6b4b0d3255bfef95601890afd80709')
49
+ }
50
+
51
+ Log.info('#2 start streaming.')
52
+ # This is for FileStreamer :NEW_STREAM and FileReceiver :receive_chunk
53
+ ::File.stub(:new).and_return(orig_file, dest_file)
54
+ ::FileUtils.stub(:makedirs).and_return(true)
55
+ ::FileUtils.stub(:copy_file).and_return(true)
56
+ # This is for FileReceiver :handle_last_chunk
57
+ ::File.stub(:rename)
58
+ # This is for Index agent 'get_checksum' which opens file, read content and validates
59
+ # checksum.
60
+ ::File.stub(:open).and_return(dest_file)
61
+
62
+ streamer = BBFS::ContentServer::FileStreamer.new(send_chunk)
63
+ Log.info('#3 start streaming.')
64
+ streamer.start_streaming('da39a3ee5e6b4b0d3255bfef95601890afd80709', 'dummy')
65
+ streamer.thread.join()
66
+ sleep Params['log_param_max_elapsed_time_in_seconds_from_last_flush'] + 1
67
+ end
68
+ end
69
+ end
70
+ end
71
+ end
metadata ADDED
@@ -0,0 +1,136 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: cs_fix
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.1.1
5
+ prerelease:
6
+ platform: x86-mingw32
7
+ authors:
8
+ - Gena Petelko, Kolman Vornovitsky
9
+ autorequire:
10
+ bindir: bin
11
+ cert_chain: []
12
+ date: 2012-10-16 00:00:00.000000000Z
13
+ dependencies:
14
+ - !ruby/object:Gem::Dependency
15
+ name: content_data
16
+ requirement: &9385656 !ruby/object:Gem::Requirement
17
+ none: false
18
+ requirements:
19
+ - - ! '>='
20
+ - !ruby/object:Gem::Version
21
+ version: '0'
22
+ type: :runtime
23
+ prerelease: false
24
+ version_requirements: *9385656
25
+ - !ruby/object:Gem::Dependency
26
+ name: file_indexing
27
+ requirement: &9385332 !ruby/object:Gem::Requirement
28
+ none: false
29
+ requirements:
30
+ - - ! '>='
31
+ - !ruby/object:Gem::Version
32
+ version: '0'
33
+ type: :runtime
34
+ prerelease: false
35
+ version_requirements: *9385332
36
+ - !ruby/object:Gem::Dependency
37
+ name: file_monitoring
38
+ requirement: &9384996 !ruby/object:Gem::Requirement
39
+ none: false
40
+ requirements:
41
+ - - ! '>='
42
+ - !ruby/object:Gem::Version
43
+ version: '0'
44
+ type: :runtime
45
+ prerelease: false
46
+ version_requirements: *9384996
47
+ - !ruby/object:Gem::Dependency
48
+ name: log
49
+ requirement: &9384684 !ruby/object:Gem::Requirement
50
+ none: false
51
+ requirements:
52
+ - - ! '>='
53
+ - !ruby/object:Gem::Version
54
+ version: '0'
55
+ type: :runtime
56
+ prerelease: false
57
+ version_requirements: *9384684
58
+ - !ruby/object:Gem::Dependency
59
+ name: networking
60
+ requirement: &9384384 !ruby/object:Gem::Requirement
61
+ none: false
62
+ requirements:
63
+ - - ! '>='
64
+ - !ruby/object:Gem::Version
65
+ version: '0'
66
+ type: :runtime
67
+ prerelease: false
68
+ version_requirements: *9384384
69
+ - !ruby/object:Gem::Dependency
70
+ name: params
71
+ requirement: &9384084 !ruby/object:Gem::Requirement
72
+ none: false
73
+ requirements:
74
+ - - ! '>='
75
+ - !ruby/object:Gem::Version
76
+ version: '0'
77
+ type: :runtime
78
+ prerelease: false
79
+ version_requirements: *9384084
80
+ - !ruby/object:Gem::Dependency
81
+ name: rib_fix
82
+ requirement: &9383748 !ruby/object:Gem::Requirement
83
+ none: false
84
+ requirements:
85
+ - - ! '>='
86
+ - !ruby/object:Gem::Version
87
+ version: '0'
88
+ type: :runtime
89
+ prerelease: false
90
+ version_requirements: *9383748
91
+ description: Monitor and Index a directory and back it up to backup server.
92
+ email: kolmanv@gmail.com
93
+ executables:
94
+ - content_server
95
+ - backup_server
96
+ extensions: []
97
+ extra_rdoc_files: []
98
+ files:
99
+ - lib/content_server.rb
100
+ - lib/content_server/content_receiver.rb
101
+ - lib/content_server/file_streamer.rb
102
+ - lib/content_server/queue_copy.rb
103
+ - lib/content_server/queue_indexer.rb
104
+ - lib/content_server/remote_content.rb
105
+ - lib/content_server/version.rb
106
+ - spec/content_server/content_server_spec.rb
107
+ - spec/content_server/file_streamer_spec.rb
108
+ - bin/content_server
109
+ - bin/backup_server
110
+ homepage: http://github.com/kolmanv/bbfs
111
+ licenses: []
112
+ post_install_message:
113
+ rdoc_options: []
114
+ require_paths:
115
+ - lib
116
+ required_ruby_version: !ruby/object:Gem::Requirement
117
+ none: false
118
+ requirements:
119
+ - - ! '>='
120
+ - !ruby/object:Gem::Version
121
+ version: '0'
122
+ required_rubygems_version: !ruby/object:Gem::Requirement
123
+ none: false
124
+ requirements:
125
+ - - ! '>='
126
+ - !ruby/object:Gem::Version
127
+ version: '0'
128
+ requirements: []
129
+ rubyforge_project:
130
+ rubygems_version: 1.8.8
131
+ signing_key:
132
+ specification_version: 3
133
+ summary: Servers for backing up content.
134
+ test_files:
135
+ - spec/content_server/content_server_spec.rb
136
+ - spec/content_server/file_streamer_spec.rb