syncache 1.0.0 → 1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/COPYING +676 -0
- data/ChangeLog.mtn +183 -0
- data/README.rdoc +21 -14
- data/bin/syncache-drb +204 -0
- data/lib/syncache/remote.rb +89 -0
- data/lib/syncache/syncache.rb +283 -0
- data/lib/syncache.rb +3 -248
- data/man/syncache-drb.1 +44 -0
- data/setup.rb +1360 -0
- data/syncache.gemspec +24 -62
- data/test/tc_remote.rb +86 -0
- data/test/{test_syncache.rb → tc_syncache.rb} +5 -8
- data/test/ts_syncache.rb +15 -0
- metadata +49 -57
- data/.document +0 -5
- data/.gitignore +0 -21
- data/LICENSE +0 -20
- data/Rakefile +0 -63
- data/VERSION +0 -1
- data/lib/syncache_sync_patch.rb +0 -36
- data/test/helper.rb +0 -9
@@ -0,0 +1,283 @@
|
|
1
|
+
# SynCache: thread-safe time-limited cache with flexible replacement policy
|
2
|
+
# (originally written for Samizdat project)
|
3
|
+
#
|
4
|
+
# Copyright (c) 2002-2011 Dmitry Borodaenko <angdraug@debian.org>
|
5
|
+
#
|
6
|
+
# This program is free software.
|
7
|
+
# You can distribute/modify this program under the terms of
|
8
|
+
# the GNU General Public License version 3 or later.
|
9
|
+
#
|
10
|
+
# vim: et sw=2 sts=2 ts=8 tw=0
|
11
|
+
|
12
|
+
module SynCache
|
13
|
+
|
14
|
+
FOREVER = 60 * 60 * 24 * 365 * 5 # 5 years
|
15
|
+
|
16
|
+
class CacheError < RuntimeError; end
|
17
|
+
|
18
|
+
class CacheEntry
|
19
|
+
def initialize(ttl = nil, value = nil)
|
20
|
+
@value = value
|
21
|
+
@ttl = ttl
|
22
|
+
@dirty = false
|
23
|
+
record_access
|
24
|
+
|
25
|
+
@sync = Mutex.new
|
26
|
+
end
|
27
|
+
|
28
|
+
# stores the value object
|
29
|
+
attr_accessor :value
|
30
|
+
|
31
|
+
# change this to make the entry expire sooner
|
32
|
+
attr_accessor :ttl
|
33
|
+
|
34
|
+
# use this to synchronize access to +value+
|
35
|
+
attr_reader :sync
|
36
|
+
|
37
|
+
# record the fact that the entry was accessed
|
38
|
+
#
|
39
|
+
def record_access
|
40
|
+
return if @dirty
|
41
|
+
@expires = Time.now + (@ttl or FOREVER)
|
42
|
+
end
|
43
|
+
|
44
|
+
# entries with lowest index will be replaced first
|
45
|
+
#
|
46
|
+
def replacement_index
|
47
|
+
@expires
|
48
|
+
end
|
49
|
+
|
50
|
+
# check if entry is stale
|
51
|
+
#
|
52
|
+
def stale?
|
53
|
+
@expires < Time.now
|
54
|
+
end
|
55
|
+
|
56
|
+
# mark entry as dirty and schedule it to expire at given time
|
57
|
+
#
|
58
|
+
def expire_at(time)
|
59
|
+
@expires = time if @expires > time
|
60
|
+
@dirty = true
|
61
|
+
end
|
62
|
+
end
|
63
|
+
|
64
|
+
class Cache
|
65
|
+
|
66
|
+
# a float number of seconds to sleep when a race condition is detected
|
67
|
+
# (actual delay is randomized to avoid live lock situation)
|
68
|
+
#
|
69
|
+
LOCK_SLEEP = 0.2
|
70
|
+
|
71
|
+
# _ttl_ (time to live) is time in seconds from the last access until cache
|
72
|
+
# entry is expired (set to _nil_ to disable time limit)
|
73
|
+
#
|
74
|
+
# _max_size_ is max number of objects in cache
|
75
|
+
#
|
76
|
+
# _flush_delay_ is used to rate-limit flush operations: if less than that
|
77
|
+
# number of seconds has passed since last flush, next flush will be delayed;
|
78
|
+
# default is no rate limit
|
79
|
+
#
|
80
|
+
def initialize(ttl = 60*60, max_size = 5000, flush_delay = nil)
|
81
|
+
@ttl = ttl
|
82
|
+
@max_size = max_size
|
83
|
+
@debug = false
|
84
|
+
|
85
|
+
if @flush_delay = flush_delay
|
86
|
+
@last_flush = Time.now
|
87
|
+
end
|
88
|
+
|
89
|
+
@sync = Mutex.new
|
90
|
+
@cache = {}
|
91
|
+
end
|
92
|
+
|
93
|
+
# set to _true_ to report every single cache operation
|
94
|
+
#
|
95
|
+
attr_accessor :debug
|
96
|
+
|
97
|
+
# remove all values from cache
|
98
|
+
#
|
99
|
+
# if _base_ is given, only values with keys matching the base (using
|
100
|
+
# <tt>===</tt> operator) are removed
|
101
|
+
#
|
102
|
+
def flush(base = nil)
|
103
|
+
debug { 'flush ' << base.to_s }
|
104
|
+
|
105
|
+
@sync.synchronize do
|
106
|
+
|
107
|
+
if @flush_delay
|
108
|
+
next_flush = @last_flush + @flush_delay
|
109
|
+
|
110
|
+
if next_flush > Time.now
|
111
|
+
flush_at(next_flush, base)
|
112
|
+
else
|
113
|
+
flush_now(base)
|
114
|
+
@last_flush = Time.now
|
115
|
+
end
|
116
|
+
|
117
|
+
else
|
118
|
+
flush_now(base)
|
119
|
+
end
|
120
|
+
end
|
121
|
+
end
|
122
|
+
|
123
|
+
# remove single value from cache
|
124
|
+
#
|
125
|
+
def delete(key)
|
126
|
+
debug { 'delete ' << key.to_s }
|
127
|
+
|
128
|
+
@sync.synchronize do
|
129
|
+
@cache.delete(key)
|
130
|
+
end
|
131
|
+
end
|
132
|
+
|
133
|
+
# store new value in cache
|
134
|
+
#
|
135
|
+
# see also Cache#fetch_or_add
|
136
|
+
#
|
137
|
+
def []=(key, value)
|
138
|
+
debug { '[]= ' << key.to_s }
|
139
|
+
|
140
|
+
entry = get_locked_entry(key)
|
141
|
+
begin
|
142
|
+
return entry.value = value
|
143
|
+
ensure
|
144
|
+
entry.sync.unlock
|
145
|
+
end
|
146
|
+
end
|
147
|
+
|
148
|
+
# retrieve value from cache if it's still fresh
|
149
|
+
#
|
150
|
+
# see also Cache#fetch_or_add
|
151
|
+
#
|
152
|
+
def [](key)
|
153
|
+
debug { '[] ' << key.to_s }
|
154
|
+
|
155
|
+
entry = get_locked_entry(key, false)
|
156
|
+
unless entry.nil?
|
157
|
+
begin
|
158
|
+
return entry.value
|
159
|
+
ensure
|
160
|
+
entry.sync.unlock
|
161
|
+
end
|
162
|
+
end
|
163
|
+
end
|
164
|
+
|
165
|
+
# initialize missing cache entry from supplied block
|
166
|
+
#
|
167
|
+
# this is the preferred method of adding values to the cache as it locks the
|
168
|
+
# key for the duration of computation of the supplied block to prevent
|
169
|
+
# parallel execution of resource-intensive actions
|
170
|
+
#
|
171
|
+
def fetch_or_add(key)
|
172
|
+
debug { 'fetch_or_add ' << key.to_s }
|
173
|
+
|
174
|
+
entry = get_locked_entry(key)
|
175
|
+
begin
|
176
|
+
if entry.value.nil?
|
177
|
+
entry.value = yield
|
178
|
+
end
|
179
|
+
return entry.value
|
180
|
+
ensure
|
181
|
+
entry.sync.unlock
|
182
|
+
end
|
183
|
+
end
|
184
|
+
|
185
|
+
private
|
186
|
+
|
187
|
+
# immediate flush (delete all entries matching _base_)
|
188
|
+
#
|
189
|
+
# must be run from inside global lock, see #flush
|
190
|
+
#
|
191
|
+
def flush_now(base = nil)
|
192
|
+
if base
|
193
|
+
@cache.delete_if {|key, entry| base === key }
|
194
|
+
else
|
195
|
+
@cache = {}
|
196
|
+
end
|
197
|
+
end
|
198
|
+
|
199
|
+
# delayed flush (ensure all entries matching _base_ expire no later than _next_flush_)
|
200
|
+
#
|
201
|
+
# must be run from inside global lock, see #flush
|
202
|
+
#
|
203
|
+
def flush_at(next_flush, base = nil)
|
204
|
+
@cache.each do |key, entry|
|
205
|
+
next if base and not base === key
|
206
|
+
entry.expire_at(next_flush)
|
207
|
+
end
|
208
|
+
end
|
209
|
+
|
210
|
+
def add_blank_entry(key)
|
211
|
+
@sync.locked? or raise CacheError,
|
212
|
+
'add_entry called while @sync is not locked'
|
213
|
+
|
214
|
+
had_same_key = @cache.has_key?(key)
|
215
|
+
entry = @cache[key] = CacheEntry.new(@ttl)
|
216
|
+
check_size unless had_same_key
|
217
|
+
entry
|
218
|
+
end
|
219
|
+
|
220
|
+
def get_locked_entry(key, add_if_missing=true)
|
221
|
+
debug { "get_locked_entry #{key}, #{add_if_missing}" }
|
222
|
+
|
223
|
+
entry = nil # scope fix
|
224
|
+
entry_locked = false
|
225
|
+
until entry_locked do
|
226
|
+
@sync.synchronize do
|
227
|
+
entry = @cache[key]
|
228
|
+
|
229
|
+
if entry.nil? or entry.stale?
|
230
|
+
if add_if_missing
|
231
|
+
entry = add_blank_entry(key)
|
232
|
+
else
|
233
|
+
@cache.delete(key) unless entry.nil?
|
234
|
+
return nil
|
235
|
+
end
|
236
|
+
end
|
237
|
+
|
238
|
+
entry_locked = entry.sync.try_lock
|
239
|
+
end
|
240
|
+
sleep(rand * LOCK_SLEEP) unless entry_locked
|
241
|
+
end
|
242
|
+
|
243
|
+
entry.record_access
|
244
|
+
entry
|
245
|
+
end
|
246
|
+
|
247
|
+
# remove oldest item from cache if size limit reached
|
248
|
+
#
|
249
|
+
def check_size
|
250
|
+
debug { 'check_size' }
|
251
|
+
|
252
|
+
return unless @max_size.kind_of? Numeric
|
253
|
+
|
254
|
+
if @sync.locked?
|
255
|
+
check_size_internal
|
256
|
+
else
|
257
|
+
@sync.synchronize { check_size_internal }
|
258
|
+
end
|
259
|
+
end
|
260
|
+
|
261
|
+
def check_size_internal
|
262
|
+
while @cache.size > @max_size do
|
263
|
+
# optimize: supplement hash with queue
|
264
|
+
oldest = @cache.keys.min {|a, b| @cache[a].replacement_index <=> @cache[b].replacement_index }
|
265
|
+
@cache.delete(oldest)
|
266
|
+
end
|
267
|
+
end
|
268
|
+
|
269
|
+
# send debug output to syslog if enabled
|
270
|
+
#
|
271
|
+
def debug
|
272
|
+
return unless @debug
|
273
|
+
message = Thread.current.to_s + ' ' + yield
|
274
|
+
if defined?(Syslog) and Syslog.opened?
|
275
|
+
Syslog.debug(message)
|
276
|
+
else
|
277
|
+
STDERR << 'syncache: ' + message + "\n"
|
278
|
+
STDERR.flush
|
279
|
+
end
|
280
|
+
end
|
281
|
+
end
|
282
|
+
|
283
|
+
end # module SynCache
|
data/lib/syncache.rb
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
# SynCache: thread-safe time-limited cache with flexible replacement policy
|
2
2
|
# (originally written for Samizdat project)
|
3
3
|
#
|
4
|
-
# Copyright (c) 2002-
|
4
|
+
# Copyright (c) 2002-2011 Dmitry Borodaenko <angdraug@debian.org>
|
5
5
|
#
|
6
6
|
# This program is free software.
|
7
7
|
# You can distribute/modify this program under the terms of
|
@@ -9,250 +9,5 @@
|
|
9
9
|
#
|
10
10
|
# vim: et sw=2 sts=2 ts=8 tw=0
|
11
11
|
|
12
|
-
require '
|
13
|
-
require '
|
14
|
-
|
15
|
-
module SynCache
|
16
|
-
|
17
|
-
FOREVER = 60 * 60 * 24 * 365 * 5 # 5 years
|
18
|
-
|
19
|
-
class CacheEntry
|
20
|
-
def initialize(ttl = nil, value = nil)
|
21
|
-
@value = value
|
22
|
-
@ttl = ttl
|
23
|
-
@dirty = false
|
24
|
-
record_access
|
25
|
-
|
26
|
-
@sync = Sync.new
|
27
|
-
end
|
28
|
-
|
29
|
-
# stores the value object
|
30
|
-
attr_accessor :value
|
31
|
-
|
32
|
-
# change this to make the entry expire sooner
|
33
|
-
attr_accessor :ttl
|
34
|
-
|
35
|
-
# use this to synchronize access to +value+
|
36
|
-
attr_reader :sync
|
37
|
-
|
38
|
-
# record the fact that the entry was accessed
|
39
|
-
#
|
40
|
-
def record_access
|
41
|
-
return if @dirty
|
42
|
-
@expires = Time.now + (@ttl or FOREVER)
|
43
|
-
end
|
44
|
-
|
45
|
-
# entries with lowest index will be replaced first
|
46
|
-
#
|
47
|
-
def replacement_index
|
48
|
-
@expires
|
49
|
-
end
|
50
|
-
|
51
|
-
# check if entry is stale
|
52
|
-
#
|
53
|
-
def stale?
|
54
|
-
@expires < Time.now
|
55
|
-
end
|
56
|
-
|
57
|
-
# mark entry as dirty and schedule it to expire at given time
|
58
|
-
#
|
59
|
-
def expire_at(time)
|
60
|
-
@expires = time if @expires > time
|
61
|
-
@dirty = true
|
62
|
-
end
|
63
|
-
end
|
64
|
-
|
65
|
-
class Cache
|
66
|
-
|
67
|
-
# set to _true_ to report every single cache operation to syslog
|
68
|
-
#
|
69
|
-
DEBUG = false
|
70
|
-
|
71
|
-
# a float number of seconds to sleep when a race condition is detected
|
72
|
-
# (actual delay is randomized to avoid live lock situation)
|
73
|
-
#
|
74
|
-
LOCK_SLEEP = 0.2
|
75
|
-
|
76
|
-
# _ttl_ (time to live) is time in seconds from the last access until cache
|
77
|
-
# entry is expired (set to _nil_ to disable time limit)
|
78
|
-
#
|
79
|
-
# _max_size_ is max number of objects in cache
|
80
|
-
#
|
81
|
-
# _flush_delay_ is used to rate-limit flush operations: if less than that
|
82
|
-
# number of seconds has passed since last flush, next flush will be delayed;
|
83
|
-
# default is no rate limit
|
84
|
-
#
|
85
|
-
def initialize(ttl = 60*60, max_size = 5000, flush_delay = nil)
|
86
|
-
@ttl = ttl
|
87
|
-
@max_size = max_size
|
88
|
-
|
89
|
-
if @flush_delay = flush_delay
|
90
|
-
@last_flush = Time.now
|
91
|
-
end
|
92
|
-
|
93
|
-
@sync = Sync.new
|
94
|
-
@cache = {}
|
95
|
-
end
|
96
|
-
|
97
|
-
# remove all values from cache
|
98
|
-
#
|
99
|
-
# if _base_ is given, only values with keys matching the base (using
|
100
|
-
# <tt>===</tt> operator) are removed
|
101
|
-
#
|
102
|
-
def flush(base = nil)
|
103
|
-
debug('flush ' << base.to_s)
|
104
|
-
|
105
|
-
@sync.synchronize do
|
106
|
-
|
107
|
-
if @flush_delay
|
108
|
-
next_flush = @last_flush + @flush_delay
|
109
|
-
|
110
|
-
if next_flush > Time.now
|
111
|
-
flush_at(next_flush, base)
|
112
|
-
else
|
113
|
-
flush_now(base)
|
114
|
-
@last_flush = Time.now
|
115
|
-
end
|
116
|
-
|
117
|
-
else
|
118
|
-
flush_now(base)
|
119
|
-
end
|
120
|
-
end
|
121
|
-
end
|
122
|
-
|
123
|
-
# remove single value from cache
|
124
|
-
#
|
125
|
-
def delete(key)
|
126
|
-
debug('delete ' << key.to_s)
|
127
|
-
|
128
|
-
@sync.synchronize do
|
129
|
-
@cache.delete(key)
|
130
|
-
end
|
131
|
-
end
|
132
|
-
|
133
|
-
# store new value in cache
|
134
|
-
#
|
135
|
-
# see also Cache#fetch_or_add
|
136
|
-
#
|
137
|
-
def []=(key, value)
|
138
|
-
debug('[]= ' << key.to_s)
|
139
|
-
|
140
|
-
entry = get_entry(key)
|
141
|
-
entry.sync.synchronize do
|
142
|
-
entry.value = value
|
143
|
-
end
|
144
|
-
value
|
145
|
-
end
|
146
|
-
|
147
|
-
# retrieve value from cache if it's still fresh
|
148
|
-
#
|
149
|
-
# see also Cache#fetch_or_add
|
150
|
-
#
|
151
|
-
def [](key)
|
152
|
-
debug('[] ' << key.to_s)
|
153
|
-
|
154
|
-
entry = get_entry(key)
|
155
|
-
entry.sync.synchronize(:SH) do
|
156
|
-
entry.value
|
157
|
-
end
|
158
|
-
end
|
159
|
-
|
160
|
-
# initialize missing cache entry from supplied block
|
161
|
-
#
|
162
|
-
# this is the preferred method of adding values to the cache as it locks the
|
163
|
-
# key for the duration of computation of the supplied block to prevent
|
164
|
-
# parallel execution of resource-intensive actions
|
165
|
-
#
|
166
|
-
def fetch_or_add(key)
|
167
|
-
debug('fetch_or_add ' << key.to_s)
|
168
|
-
|
169
|
-
entry = nil # scope fix
|
170
|
-
entry_locked = false
|
171
|
-
until entry_locked do
|
172
|
-
@sync.synchronize do
|
173
|
-
entry = get_entry(key)
|
174
|
-
entry_locked = entry.sync.try_lock # fixme
|
175
|
-
end
|
176
|
-
sleep(rand * LOCK_SLEEP) unless entry_locked
|
177
|
-
end
|
178
|
-
|
179
|
-
begin
|
180
|
-
entry.record_access
|
181
|
-
entry.value ||= yield
|
182
|
-
ensure
|
183
|
-
entry.sync.unlock
|
184
|
-
end
|
185
|
-
end
|
186
|
-
|
187
|
-
private
|
188
|
-
|
189
|
-
# immediate flush (delete all entries matching _base_)
|
190
|
-
#
|
191
|
-
# must be run from inside global lock, see #flush
|
192
|
-
#
|
193
|
-
def flush_now(base = nil)
|
194
|
-
if base
|
195
|
-
@cache.delete_if {|key, entry| base === key }
|
196
|
-
else
|
197
|
-
@cache = {}
|
198
|
-
end
|
199
|
-
end
|
200
|
-
|
201
|
-
# delayed flush (ensure all entries matching _base_ expire no later than _next_flush_)
|
202
|
-
#
|
203
|
-
# must be run from inside global lock, see #flush
|
204
|
-
#
|
205
|
-
def flush_at(next_flush, base = nil)
|
206
|
-
@cache.each do |key, entry|
|
207
|
-
next if base and not base === key
|
208
|
-
entry.expire_at(next_flush)
|
209
|
-
end
|
210
|
-
end
|
211
|
-
|
212
|
-
def get_entry(key)
|
213
|
-
debug('get_entry ' << key.to_s)
|
214
|
-
|
215
|
-
@sync.synchronize do
|
216
|
-
entry = @cache[key]
|
217
|
-
|
218
|
-
if entry.kind_of?(CacheEntry)
|
219
|
-
if entry.stale?
|
220
|
-
@cache[key] = entry = CacheEntry.new(@ttl)
|
221
|
-
end
|
222
|
-
else
|
223
|
-
@cache[key] = entry = CacheEntry.new(@ttl)
|
224
|
-
check_size
|
225
|
-
end
|
226
|
-
|
227
|
-
entry.record_access
|
228
|
-
entry
|
229
|
-
end
|
230
|
-
end
|
231
|
-
|
232
|
-
# remove oldest item from cache if size limit reached
|
233
|
-
#
|
234
|
-
def check_size
|
235
|
-
debug('check_size')
|
236
|
-
|
237
|
-
return unless @max_size.kind_of? Numeric
|
238
|
-
|
239
|
-
@sync.synchronize do
|
240
|
-
while @cache.size > @max_size do
|
241
|
-
# optimize: supplement hash with queue
|
242
|
-
oldest = @cache.keys.min {|a, b| @cache[a].replacement_index <=> @cache[b].replacement_index }
|
243
|
-
|
244
|
-
@cache.delete(oldest)
|
245
|
-
end
|
246
|
-
end
|
247
|
-
end
|
248
|
-
|
249
|
-
# send debug output to syslog if enabled
|
250
|
-
#
|
251
|
-
def debug(message)
|
252
|
-
if DEBUG and defined?(Syslog) and Syslog.opened?
|
253
|
-
Syslog.debug(Thread.current.to_s << ' ' << message)
|
254
|
-
end
|
255
|
-
end
|
256
|
-
end
|
257
|
-
|
258
|
-
end # module SynCache
|
12
|
+
require 'syncache/syncache'
|
13
|
+
require 'syncache/remote'
|
data/man/syncache-drb.1
ADDED
@@ -0,0 +1,44 @@
|
|
1
|
+
.TH "SYNCACHE-DRB" "1"
|
2
|
+
.SH "NAME"
|
3
|
+
syncache-drb - SynCache dRuby object cache server
|
4
|
+
.SH "SYNOPSIS"
|
5
|
+
.PP
|
6
|
+
\fBsyncache-drb\fP [ \fBoptions\fP ] [ \fBURI\fP ]
|
7
|
+
.SH "DESCRIPTION"
|
8
|
+
.PP
|
9
|
+
\fBsyncache-drb\fP starts a Distributed Ruby server providing a
|
10
|
+
SynCache::Cache object.
|
11
|
+
.PP
|
12
|
+
SynCache::Cache is a thread-safe time-limited object cache with flexible
|
13
|
+
replacement strategy.
|
14
|
+
.SH "OPTIONS"
|
15
|
+
.IP "\fBURI\fP" 4
|
16
|
+
A URI with druby: schema that the DRb server binds to, default is
|
17
|
+
\fBdruby://localhost:9000\fP
|
18
|
+
.IP "\fB--help\fP" 4
|
19
|
+
Display usage information and quit.
|
20
|
+
.IP "\fB--ttl\fP SECONDS" 4
|
21
|
+
Time-to-live value for cache entries, default is 24 hours.
|
22
|
+
.IP "\fB--size\fP ENTRIES" 4
|
23
|
+
Maximum number of objects in cache, default is 10000.
|
24
|
+
.IP "\fB--flush-delay\fP SECONDS" 4
|
25
|
+
Rate-limit flush operations. If less than that number of seconds has passed
|
26
|
+
since last flush, next flush will be delayed. Default is no rate limit.
|
27
|
+
.IP "\fB--user\fP USER" 4
|
28
|
+
Run as USER if started as root. Default is nobody.
|
29
|
+
.IP "\fB--error-log\fP ERROR_LOG_PATH" 4
|
30
|
+
File to write errors to. Default is /dev/null. When run as root,
|
31
|
+
the file is chowned to USER:adm.
|
32
|
+
.IP "\fB--debug\fP" 4
|
33
|
+
Enable debug mode. If an error log is specified with --error-log, all
|
34
|
+
messages will be sent there instead of syslog.
|
35
|
+
.IP "\fB--pidfile\fP PATH" 4
|
36
|
+
Path to pidfile. By default, pidfile is created under /var/run/syncache-drb/
|
37
|
+
when run as root, or under $TMPDIR otherwise. Location should be writeable by
|
38
|
+
USER.
|
39
|
+
|
40
|
+
.SH "AUTHOR"
|
41
|
+
.PP
|
42
|
+
This manual page was written by Dmitry Borodaenko <angdraug@debian.org>.
|
43
|
+
Permission is granted to copy, distribute and/or modify this document
|
44
|
+
under the terms of the GNU GPL version 3 or later.
|