myobie-starling 0.9.10
Sign up to get free protection for your applications and to get access to all the features.
- data/CHANGELOG +56 -0
- data/LICENSE +20 -0
- data/README.rdoc +106 -0
- data/Rakefile +56 -0
- data/bin/starling +6 -0
- data/bin/starling_top +57 -0
- data/etc/sample-config.yml +9 -0
- data/etc/starling.redhat +66 -0
- data/etc/starling.ubuntu +71 -0
- data/lib/starling.rb +164 -0
- data/lib/starling/handler.rb +237 -0
- data/lib/starling/persistent_queue.rb +156 -0
- data/lib/starling/queue_collection.rb +147 -0
- data/lib/starling/server.rb +125 -0
- data/lib/starling/server_runner.rb +317 -0
- data/spec/starling_server_spec.rb +216 -0
- metadata +107 -0
data/lib/starling.rb
ADDED
@@ -0,0 +1,164 @@
|
|
1
|
+
require 'memcache'
|
2
|
+
|
3
|
+
class Starling < MemCache
|
4
|
+
|
5
|
+
WAIT_TIME = 0.25
|
6
|
+
alias_method :_original_get, :get
|
7
|
+
alias_method :_original_delete, :delete
|
8
|
+
|
9
|
+
##
|
10
|
+
# fetch an item from a queue.
|
11
|
+
|
12
|
+
def get(*args)
|
13
|
+
loop do
|
14
|
+
response = _original_get(*args)
|
15
|
+
return response unless response.nil?
|
16
|
+
sleep WAIT_TIME
|
17
|
+
end
|
18
|
+
end
|
19
|
+
|
20
|
+
##
|
21
|
+
# will return the next item or nil
|
22
|
+
|
23
|
+
def fetch(*args)
|
24
|
+
_original_get(*args)
|
25
|
+
end
|
26
|
+
|
27
|
+
##
|
28
|
+
# Delete the key (queue) from all Starling servers. This is necessary
|
29
|
+
# because the random way a server is chosen in #get_server_for_key
|
30
|
+
# implies that the queue could easily be spread across the entire
|
31
|
+
# Starling cluster.
|
32
|
+
|
33
|
+
def delete(key, expiry = 0)
|
34
|
+
with_servers do
|
35
|
+
_original_delete(key, expiry)
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
##
|
40
|
+
# Provides a way to work with a specific list of servers by
|
41
|
+
# forcing all calls to #get_server_for_key to use a specific
|
42
|
+
# server, and changing that server each time that the call
|
43
|
+
# yields to the block provided. This helps work around the
|
44
|
+
# normally random nature of the #get_server_for_key method.
|
45
|
+
#
|
46
|
+
# Acquires the mutex for the entire duration of the call
|
47
|
+
# since unrelated calls to #get_server_for_key might be
|
48
|
+
# adversely affected by the non_random result.
|
49
|
+
def with_servers(my_servers = @servers.dup)
|
50
|
+
return unless block_given?
|
51
|
+
with_lock do
|
52
|
+
my_servers.each do |server|
|
53
|
+
@force_server = server
|
54
|
+
yield
|
55
|
+
end
|
56
|
+
@force_server = nil
|
57
|
+
end
|
58
|
+
end
|
59
|
+
|
60
|
+
##
|
61
|
+
# insert +value+ into +queue+.
|
62
|
+
#
|
63
|
+
# +expiry+ is expressed as a UNIX timestamp
|
64
|
+
#
|
65
|
+
# If +raw+ is true, +value+ will not be Marshalled. If +raw+ = :yaml, +value+
|
66
|
+
# will be serialized with YAML, instead.
|
67
|
+
|
68
|
+
def set(queue, value, expiry = 0, raw = false)
|
69
|
+
retries = 0
|
70
|
+
begin
|
71
|
+
if raw == :yaml
|
72
|
+
value = YAML.dump(value)
|
73
|
+
raw = true
|
74
|
+
end
|
75
|
+
|
76
|
+
super(queue, value, expiry, raw)
|
77
|
+
rescue MemCache::MemCacheError => e
|
78
|
+
retries += 1
|
79
|
+
sleep WAIT_TIME
|
80
|
+
retry unless retries > 3
|
81
|
+
raise e
|
82
|
+
end
|
83
|
+
end
|
84
|
+
|
85
|
+
##
|
86
|
+
# returns the number of items in +queue+. If +queue+ is +:all+, a hash of all
|
87
|
+
# queue sizes will be returned.
|
88
|
+
|
89
|
+
def sizeof(queue, statistics = nil)
|
90
|
+
statistics ||= stats
|
91
|
+
|
92
|
+
if queue == :all
|
93
|
+
queue_sizes = {}
|
94
|
+
available_queues(statistics).each do |queue|
|
95
|
+
queue_sizes[queue] = sizeof(queue, statistics)
|
96
|
+
end
|
97
|
+
return queue_sizes
|
98
|
+
end
|
99
|
+
|
100
|
+
statistics.inject(0) { |m,(k,v)| m + v["queue_#{queue}_items"].to_i }
|
101
|
+
end
|
102
|
+
|
103
|
+
##
|
104
|
+
# returns a list of available (currently allocated) queues.
|
105
|
+
|
106
|
+
def available_queues(statistics = nil)
|
107
|
+
statistics ||= stats
|
108
|
+
|
109
|
+
statistics.map { |k,v|
|
110
|
+
v.keys
|
111
|
+
}.flatten.uniq.grep(/^queue_(.*)_items/).map { |v|
|
112
|
+
v.gsub(/^queue_/, '').gsub(/_items$/, '')
|
113
|
+
}.reject { |v|
|
114
|
+
v =~ /_total$/ || v =~ /_expired$/
|
115
|
+
}
|
116
|
+
end
|
117
|
+
|
118
|
+
##
|
119
|
+
# iterator to flush +queue+. Each element will be passed to the provided
|
120
|
+
# +block+
|
121
|
+
|
122
|
+
def flush(queue)
|
123
|
+
sizeof(queue).times do
|
124
|
+
v = get(queue)
|
125
|
+
yield v if block_given?
|
126
|
+
end
|
127
|
+
end
|
128
|
+
|
129
|
+
private
|
130
|
+
|
131
|
+
def get_server_for_key(key)
|
132
|
+
raise ArgumentError, "illegal character in key #{key.inspect}" if key =~ /\s/
|
133
|
+
raise ArgumentError, "key too long #{key.inspect}" if key.length > 250
|
134
|
+
raise MemCacheError, "No servers available" if @servers.empty?
|
135
|
+
return @force_server if @force_server
|
136
|
+
|
137
|
+
@servers.each do |server|
|
138
|
+
return server if server.alive?
|
139
|
+
end
|
140
|
+
|
141
|
+
raise MemCacheError, "No servers available (all dead)"
|
142
|
+
end
|
143
|
+
end
|
144
|
+
|
145
|
+
|
146
|
+
class MemCache
|
147
|
+
|
148
|
+
protected
|
149
|
+
|
150
|
+
##
|
151
|
+
# Ensure that everything within the given block is executed
|
152
|
+
# within the locked mutex if this client is multithreaded.
|
153
|
+
# If the client isn't multithreaded, the block is simply executed.
|
154
|
+
def with_lock
|
155
|
+
return unless block_given?
|
156
|
+
begin
|
157
|
+
@mutex.lock if @multithread
|
158
|
+
yield
|
159
|
+
ensure
|
160
|
+
@mutex.unlock if @multithread
|
161
|
+
end
|
162
|
+
end
|
163
|
+
|
164
|
+
end
|
@@ -0,0 +1,237 @@
|
|
1
|
+
module StarlingServer
|
2
|
+
|
3
|
+
##
|
4
|
+
# This is an internal class that's used by Starling::Server to handle the
|
5
|
+
# MemCache protocol and act as an interface between the Server and the
|
6
|
+
# QueueCollection.
|
7
|
+
|
8
|
+
class Handler < EventMachine::Connection
|
9
|
+
|
10
|
+
DATA_PACK_FMT = "Ia*".freeze
|
11
|
+
|
12
|
+
# ERROR responses
|
13
|
+
ERR_UNKNOWN_COMMAND = "CLIENT_ERROR bad command line format\r\n".freeze
|
14
|
+
|
15
|
+
# GET Responses
|
16
|
+
GET_COMMAND = /\Aget (.{1,250})\s*\r\n/m
|
17
|
+
GET_RESPONSE = "VALUE %s %s %s\r\n%s\r\nEND\r\n".freeze
|
18
|
+
GET_RESPONSE_EMPTY = "END\r\n".freeze
|
19
|
+
|
20
|
+
# SET Responses
|
21
|
+
SET_COMMAND = /\Aset (.{1,250}) ([0-9]+) ([0-9]+) ([0-9]+)\r\n/m
|
22
|
+
SET_RESPONSE_SUCCESS = "STORED\r\n".freeze
|
23
|
+
SET_RESPONSE_FAILURE = "NOT STORED\r\n".freeze
|
24
|
+
SET_CLIENT_DATA_ERROR = "CLIENT_ERROR bad data chunk\r\nERROR\r\n".freeze
|
25
|
+
|
26
|
+
# DELETE Responses
|
27
|
+
DELETE_COMMAND = /\Adelete (.{1,250}) ([0-9]+)\r\n/m
|
28
|
+
DELETE_RESPONSE = "END\r\n".freeze
|
29
|
+
|
30
|
+
# STAT Response
|
31
|
+
STATS_COMMAND = /\Astats\r\n/m
|
32
|
+
STATS_RESPONSE = "STAT pid %d\r
|
33
|
+
STAT uptime %d\r
|
34
|
+
STAT time %d\r
|
35
|
+
STAT version %s\r
|
36
|
+
STAT rusage_user %0.6f\r
|
37
|
+
STAT rusage_system %0.6f\r
|
38
|
+
STAT curr_items %d\r
|
39
|
+
STAT total_items %d\r
|
40
|
+
STAT bytes %d\r
|
41
|
+
STAT curr_connections %d\r
|
42
|
+
STAT total_connections %d\r
|
43
|
+
STAT cmd_get %d\r
|
44
|
+
STAT cmd_set %d\r
|
45
|
+
STAT get_hits %d\r
|
46
|
+
STAT get_misses %d\r
|
47
|
+
STAT bytes_read %d\r
|
48
|
+
STAT bytes_written %d\r
|
49
|
+
STAT limit_maxbytes %d\r
|
50
|
+
%sEND\r\n".freeze
|
51
|
+
QUEUE_STATS_RESPONSE = "STAT queue_%s_items %d\r
|
52
|
+
STAT queue_%s_total_items %d\r
|
53
|
+
STAT queue_%s_logsize %d\r
|
54
|
+
STAT queue_%s_expired_items %d\r
|
55
|
+
STAT queue_%s_age %d\r\n".freeze
|
56
|
+
|
57
|
+
SHUTDOWN_COMMAND = /\Ashutdown\r\n/m
|
58
|
+
|
59
|
+
QUIT_COMMAND = /\Aquit\r\n/m
|
60
|
+
|
61
|
+
@@next_session_id = 1
|
62
|
+
|
63
|
+
##
|
64
|
+
# Creates a new handler for the MemCache protocol that communicates with a
|
65
|
+
# given client.
|
66
|
+
|
67
|
+
def initialize(options = {})
|
68
|
+
@opts = options
|
69
|
+
end
|
70
|
+
|
71
|
+
##
|
72
|
+
# Process incoming commands from the attached client.
|
73
|
+
|
74
|
+
def post_init
|
75
|
+
@stash = []
|
76
|
+
@data = ""
|
77
|
+
@data_buf = ""
|
78
|
+
@server = @opts[:server]
|
79
|
+
@logger = StarlingServer::Base.logger
|
80
|
+
@expiry_stats = Hash.new(0)
|
81
|
+
@expected_length = nil
|
82
|
+
@server.stats[:total_connections] += 1
|
83
|
+
set_comm_inactivity_timeout @opts[:timeout]
|
84
|
+
@queue_collection = @opts[:queue]
|
85
|
+
|
86
|
+
@session_id = @@next_session_id
|
87
|
+
@@next_session_id += 1
|
88
|
+
|
89
|
+
peer = Socket.unpack_sockaddr_in(get_peername)
|
90
|
+
#@logger.debug "(#{@session_id}) New session from #{peer[1]}:#{peer[0]}"
|
91
|
+
end
|
92
|
+
|
93
|
+
def receive_data(incoming)
|
94
|
+
@server.stats[:bytes_read] += incoming.size
|
95
|
+
@data << incoming
|
96
|
+
|
97
|
+
while data = @data.slice!(/.*?\r\n/m)
|
98
|
+
response = process(data)
|
99
|
+
end
|
100
|
+
|
101
|
+
send_data response if response
|
102
|
+
end
|
103
|
+
|
104
|
+
def process(data)
|
105
|
+
data = @data_buf + data if @data_buf.size > 0
|
106
|
+
# our only non-normal state is consuming an object's data
|
107
|
+
# when @expected_length is present
|
108
|
+
if @expected_length && data.size == @expected_length
|
109
|
+
response = set_data(data)
|
110
|
+
@data_buf = ""
|
111
|
+
return response
|
112
|
+
elsif @expected_length
|
113
|
+
@data_buf = data
|
114
|
+
return
|
115
|
+
end
|
116
|
+
case data
|
117
|
+
when SET_COMMAND
|
118
|
+
@server.stats[:set_requests] += 1
|
119
|
+
set($1, $2, $3, $4.to_i)
|
120
|
+
when GET_COMMAND
|
121
|
+
@server.stats[:get_requests] += 1
|
122
|
+
get($1)
|
123
|
+
when STATS_COMMAND
|
124
|
+
stats
|
125
|
+
when SHUTDOWN_COMMAND
|
126
|
+
# no point in responding, they'll never get it.
|
127
|
+
Runner::shutdown
|
128
|
+
when DELETE_COMMAND
|
129
|
+
delete $1
|
130
|
+
when QUIT_COMMAND
|
131
|
+
# ignore the command, client is closing connection.
|
132
|
+
return nil
|
133
|
+
else
|
134
|
+
logger.warn "Unknown command: #{data}."
|
135
|
+
respond ERR_UNKNOWN_COMMAND
|
136
|
+
end
|
137
|
+
rescue => e
|
138
|
+
logger.error "Error handling request: #{e}."
|
139
|
+
logger.debug e.backtrace.join("\n")
|
140
|
+
respond GET_RESPONSE_EMPTY
|
141
|
+
end
|
142
|
+
|
143
|
+
def unbind
|
144
|
+
#@logger.debug "(#{@session_id}) connection ends"
|
145
|
+
end
|
146
|
+
|
147
|
+
private
|
148
|
+
|
149
|
+
def delete(queue)
|
150
|
+
@queue_collection.delete(queue)
|
151
|
+
respond DELETE_RESPONSE
|
152
|
+
end
|
153
|
+
|
154
|
+
def respond(str, *args)
|
155
|
+
response = sprintf(str, *args)
|
156
|
+
@server.stats[:bytes_written] += response.length
|
157
|
+
response
|
158
|
+
end
|
159
|
+
|
160
|
+
def set(key, flags, expiry, len)
|
161
|
+
@expected_length = len + 2
|
162
|
+
@stash = [ key, flags, expiry ]
|
163
|
+
nil
|
164
|
+
end
|
165
|
+
|
166
|
+
def set_data(incoming)
|
167
|
+
key, flags, expiry = @stash
|
168
|
+
data = incoming.slice(0...@expected_length-2)
|
169
|
+
@stash = []
|
170
|
+
@expected_length = nil
|
171
|
+
|
172
|
+
internal_data = [expiry.to_i, data].pack(DATA_PACK_FMT)
|
173
|
+
if @queue_collection.put(key, internal_data)
|
174
|
+
respond SET_RESPONSE_SUCCESS
|
175
|
+
else
|
176
|
+
respond SET_RESPONSE_FAILURE
|
177
|
+
end
|
178
|
+
end
|
179
|
+
|
180
|
+
def get(key)
|
181
|
+
now = Time.now.to_i
|
182
|
+
|
183
|
+
while response = @queue_collection.take(key)
|
184
|
+
expiry, data = response.unpack(DATA_PACK_FMT)
|
185
|
+
|
186
|
+
break if expiry == 0 || expiry >= now
|
187
|
+
|
188
|
+
@expiry_stats[key] += 1
|
189
|
+
expiry, data = nil
|
190
|
+
end
|
191
|
+
|
192
|
+
if data
|
193
|
+
respond GET_RESPONSE, key, 0, data.size, data
|
194
|
+
else
|
195
|
+
respond GET_RESPONSE_EMPTY
|
196
|
+
end
|
197
|
+
end
|
198
|
+
|
199
|
+
def stats
|
200
|
+
respond STATS_RESPONSE,
|
201
|
+
Process.pid, # pid
|
202
|
+
Time.now - @server.stats(:start_time), # uptime
|
203
|
+
Time.now.to_i, # time
|
204
|
+
StarlingServer::VERSION, # version
|
205
|
+
Process.times.utime, # rusage_user
|
206
|
+
Process.times.stime, # rusage_system
|
207
|
+
@queue_collection.stats(:current_size), # curr_items
|
208
|
+
@queue_collection.stats(:total_items), # total_items
|
209
|
+
@queue_collection.stats(:current_bytes), # bytes
|
210
|
+
@server.stats(:connections), # curr_connections
|
211
|
+
@server.stats(:total_connections), # total_connections
|
212
|
+
@server.stats(:get_requests), # get count
|
213
|
+
@server.stats(:set_requests), # set count
|
214
|
+
@queue_collection.stats(:get_hits),
|
215
|
+
@queue_collection.stats(:get_misses),
|
216
|
+
@server.stats(:bytes_read), # total bytes read
|
217
|
+
@server.stats(:bytes_written), # total bytes written
|
218
|
+
0, # limit_maxbytes
|
219
|
+
queue_stats
|
220
|
+
end
|
221
|
+
|
222
|
+
def queue_stats
|
223
|
+
@queue_collection.queues.inject("") do |m,(k,v)|
|
224
|
+
m + sprintf(QUEUE_STATS_RESPONSE,
|
225
|
+
k, v.length,
|
226
|
+
k, v.total_items,
|
227
|
+
k, v.logsize,
|
228
|
+
k, @expiry_stats[k],
|
229
|
+
k, v.current_age)
|
230
|
+
end
|
231
|
+
end
|
232
|
+
|
233
|
+
def logger
|
234
|
+
@logger
|
235
|
+
end
|
236
|
+
end
|
237
|
+
end
|
@@ -0,0 +1,156 @@
|
|
1
|
+
module StarlingServer
|
2
|
+
|
3
|
+
##
|
4
|
+
# PersistentQueue is a subclass of Ruby's thread-safe Queue class. It adds a
|
5
|
+
# transactional log to the in-memory Queue, which enables quickly rebuilding
|
6
|
+
# the Queue in the event of a sever outage.
|
7
|
+
|
8
|
+
class PersistentQueue < Queue
|
9
|
+
|
10
|
+
##
|
11
|
+
# When a log reaches the SOFT_LOG_MAX_SIZE, the Queue will wait until
|
12
|
+
# it is empty, and will then rotate the log file.
|
13
|
+
|
14
|
+
SOFT_LOG_MAX_SIZE = 16 * (1024**2) # 16 MB
|
15
|
+
|
16
|
+
TRX_CMD_PUSH = "\000".freeze
|
17
|
+
TRX_CMD_POP = "\001".freeze
|
18
|
+
|
19
|
+
TRX_PUSH = "\000%s%s".freeze
|
20
|
+
TRX_POP = "\001".freeze
|
21
|
+
|
22
|
+
attr_reader :initial_bytes
|
23
|
+
attr_reader :total_items
|
24
|
+
attr_reader :logsize
|
25
|
+
attr_reader :current_age
|
26
|
+
|
27
|
+
##
|
28
|
+
# Create a new PersistentQueue at +persistence_path+/+queue_name+.
|
29
|
+
# If a queue log exists at that path, the Queue will be loaded from
|
30
|
+
# disk before being available for use.
|
31
|
+
|
32
|
+
def initialize(persistence_path, queue_name, debug = false)
|
33
|
+
@persistence_path = persistence_path
|
34
|
+
@queue_name = queue_name
|
35
|
+
@total_items = 0
|
36
|
+
super()
|
37
|
+
@initial_bytes = replay_transaction_log(debug)
|
38
|
+
@current_age = 0
|
39
|
+
end
|
40
|
+
|
41
|
+
##
|
42
|
+
# Pushes +value+ to the queue. By default, +push+ will write to the
|
43
|
+
# transactional log. Set +log_trx=false+ to override this behaviour.
|
44
|
+
|
45
|
+
def push(value, log_trx = true)
|
46
|
+
if log_trx
|
47
|
+
raise NoTransactionLog unless @trx
|
48
|
+
size = [value.size].pack("I")
|
49
|
+
transaction sprintf(TRX_PUSH, size, value)
|
50
|
+
end
|
51
|
+
|
52
|
+
@total_items += 1
|
53
|
+
super([now_usec, value])
|
54
|
+
end
|
55
|
+
|
56
|
+
##
|
57
|
+
# Retrieves data from the queue.
|
58
|
+
|
59
|
+
def pop(log_trx = true)
|
60
|
+
raise NoTransactionLog if log_trx && !@trx
|
61
|
+
|
62
|
+
begin
|
63
|
+
rv = super(!log_trx)
|
64
|
+
rescue ThreadError
|
65
|
+
puts "WARNING: The queue was empty when trying to pop(). Technically this shouldn't ever happen. Probably a bug in the transactional underpinnings. Or maybe shutdown didn't happen cleanly at some point. Ignoring."
|
66
|
+
rv = [now_usec, '']
|
67
|
+
end
|
68
|
+
transaction "\001" if log_trx
|
69
|
+
@current_age = (now_usec - rv[0]) / 1000
|
70
|
+
rv[1]
|
71
|
+
end
|
72
|
+
|
73
|
+
##
|
74
|
+
# Safely closes the transactional queue.
|
75
|
+
|
76
|
+
def close
|
77
|
+
# Ok, yeah, this is lame, and is *technically* a race condition. HOWEVER,
|
78
|
+
# the QueueCollection *should* have stopped processing requests, and I don't
|
79
|
+
# want to add yet another Mutex around all the push and pop methods. So we
|
80
|
+
# do the next simplest thing, and minimize the time we'll stick around before
|
81
|
+
# @trx is nil.
|
82
|
+
@not_trx = @trx
|
83
|
+
@trx = nil
|
84
|
+
@not_trx.close
|
85
|
+
end
|
86
|
+
|
87
|
+
def purge
|
88
|
+
close
|
89
|
+
File.delete(log_path)
|
90
|
+
end
|
91
|
+
|
92
|
+
private
|
93
|
+
|
94
|
+
def log_path #:nodoc:
|
95
|
+
File.join(@persistence_path, @queue_name)
|
96
|
+
end
|
97
|
+
|
98
|
+
def reopen_log #:nodoc:
|
99
|
+
@trx = File.new(log_path, File::CREAT|File::RDWR)
|
100
|
+
@logsize = File.size(log_path)
|
101
|
+
end
|
102
|
+
|
103
|
+
def rotate_log #:nodoc:
|
104
|
+
@trx.close
|
105
|
+
backup_logfile = "#{log_path}.#{Time.now.to_i}"
|
106
|
+
File.rename(log_path, backup_logfile)
|
107
|
+
reopen_log
|
108
|
+
File.unlink(backup_logfile)
|
109
|
+
end
|
110
|
+
|
111
|
+
def replay_transaction_log(debug) #:nodoc:
|
112
|
+
reopen_log
|
113
|
+
bytes_read = 0
|
114
|
+
|
115
|
+
print "Reading back transaction log for #{@queue_name} " if debug
|
116
|
+
|
117
|
+
while !@trx.eof?
|
118
|
+
cmd = @trx.read(1)
|
119
|
+
case cmd
|
120
|
+
when TRX_CMD_PUSH
|
121
|
+
print ">" if debug
|
122
|
+
raw_size = @trx.read(4)
|
123
|
+
next unless raw_size
|
124
|
+
size = raw_size.unpack("I").first
|
125
|
+
data = @trx.read(size)
|
126
|
+
next unless data
|
127
|
+
push(data, false)
|
128
|
+
bytes_read += data.size
|
129
|
+
when TRX_CMD_POP
|
130
|
+
print "<" if debug
|
131
|
+
bytes_read -= pop(false).size
|
132
|
+
else
|
133
|
+
puts "Error reading transaction log: " +
|
134
|
+
"I don't understand '#{cmd}' (skipping)." if debug
|
135
|
+
end
|
136
|
+
end
|
137
|
+
|
138
|
+
print " done.\n" if debug
|
139
|
+
|
140
|
+
return bytes_read
|
141
|
+
end
|
142
|
+
|
143
|
+
def transaction(data) #:nodoc:
|
144
|
+
raise "no transaction log handle. that totally sucks." unless @trx
|
145
|
+
|
146
|
+
@trx.write_nonblock data
|
147
|
+
@logsize += data.size
|
148
|
+
rotate_log if @logsize > SOFT_LOG_MAX_SIZE && self.length == 0
|
149
|
+
end
|
150
|
+
|
151
|
+
def now_usec
|
152
|
+
now = Time.now
|
153
|
+
now.to_i * 1000000 + now.usec
|
154
|
+
end
|
155
|
+
end
|
156
|
+
end
|