lumberjack_mongo_device 1.0.0
Sign up to get free protection for your applications and to get access to all the features.
- data/MIT_LICENSE +20 -0
- data/README.rdoc +17 -0
- data/Rakefile +56 -0
- data/VERSION +1 -0
- data/lib/lumberjack_mongo_device.rb +145 -0
- data/spec/lumberjack_mongo_device_spec.rb +221 -0
- data/spec/spec_helper.rb +40 -0
- data/spec/tmp/db/mongod.lock +0 -0
- data/spec/tmp/db/test.0 +0 -0
- data/spec/tmp/db/test.1 +0 -0
- data/spec/tmp/db/test.ns +0 -0
- data/spec/tmp/mongo.log +253 -0
- data/spec/tmp/mongo.pid +0 -0
- metadata +112 -0
data/MIT_LICENSE
ADDED
@@ -0,0 +1,20 @@
|
|
1
|
+
Copyright (c) 2011 Brian Durand
|
2
|
+
|
3
|
+
Permission is hereby granted, free of charge, to any person obtaining
|
4
|
+
a copy of this software and associated documentation files (the
|
5
|
+
"Software"), to deal in the Software without restriction, including
|
6
|
+
without limitation the rights to use, copy, modify, merge, publish,
|
7
|
+
distribute, sublicense, and/or sell copies of the Software, and to
|
8
|
+
permit persons to whom the Software is furnished to do so, subject to
|
9
|
+
the following conditions:
|
10
|
+
|
11
|
+
The above copyright notice and this permission notice shall be
|
12
|
+
included in all copies or substantial portions of the Software.
|
13
|
+
|
14
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
15
|
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
16
|
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
17
|
+
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
18
|
+
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
19
|
+
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
20
|
+
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
data/README.rdoc
ADDED
@@ -0,0 +1,17 @@
|
|
1
|
+
= Lumberjack Mongo Device
|
2
|
+
|
3
|
+
This gem provides a logging device for the lumberjack gem that will log to a MongoDB collection.
|
4
|
+
|
5
|
+
MongoDB can make a good destination for centralized log data because it is fast, clients don't need to wait for confirmation that log messages are persisted, and it has a concept of capped collections which are fixed in size and will simply roll over and start writing from the beginning when they are full.
|
6
|
+
|
7
|
+
== Example Usage
|
8
|
+
|
9
|
+
require 'lumberjack_mongo_device'
|
10
|
+
|
11
|
+
# Connect to mongodb on localhost:27017 and use the "log" collection in the "app" database.
|
12
|
+
# The log collection will be created as a capped collection with a maximum size of 1GB.
|
13
|
+
device = Lumberjack::MongoDevice.new(:db => "app", :collection => "log", :size => 1024 ** 3)
|
14
|
+
logger = Lumberjack::Logger.new(device)
|
15
|
+
logger.info("Write me to MongoDB!")
|
16
|
+
|
17
|
+
See MongoDevice for more details.
|
data/Rakefile
ADDED
@@ -0,0 +1,56 @@
|
|
1
|
+
require 'rubygems'
|
2
|
+
require 'rake'
|
3
|
+
require 'rake/gempackagetask'
|
4
|
+
require 'rake/rdoctask'
|
5
|
+
|
6
|
+
desc 'Default: run unit tests.'
|
7
|
+
task :default => :test
|
8
|
+
|
9
|
+
desc 'RVM likes to call it tests'
|
10
|
+
task :tests => :test
|
11
|
+
|
12
|
+
begin
|
13
|
+
require 'rspec'
|
14
|
+
require 'rspec/core/rake_task'
|
15
|
+
desc 'Run the unit tests'
|
16
|
+
RSpec::Core::RakeTask.new(:test)
|
17
|
+
rescue LoadError
|
18
|
+
task :test do
|
19
|
+
STDERR.puts "You must have rspec 2.0 installed to run the tests"
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
desc 'Generate rdoc.'
|
24
|
+
Rake::RDocTask.new(:rdoc) do |rdoc|
|
25
|
+
rdoc.rdoc_dir = 'rdoc'
|
26
|
+
rdoc.options << '--title' << 'Lumberjack Mongo Device' << '--line-numbers' << '--inline-source' << '--main' << 'README.rdoc'
|
27
|
+
rdoc.rdoc_files.include('README.rdoc')
|
28
|
+
rdoc.rdoc_files.include('lib/**/*.rb')
|
29
|
+
end
|
30
|
+
|
31
|
+
namespace :rbx do
|
32
|
+
desc "Cleanup *.rbc files in lib directory"
|
33
|
+
task :delete_rbc_files do
|
34
|
+
FileList["lib/**/*.rbc"].each do |rbc_file|
|
35
|
+
File.delete(rbc_file)
|
36
|
+
end
|
37
|
+
nil
|
38
|
+
end
|
39
|
+
end
|
40
|
+
|
41
|
+
spec_file = File.expand_path('../lumberjack_mongo_device.gemspec', __FILE__)
|
42
|
+
if File.exist?(spec_file)
|
43
|
+
spec = eval(File.read(spec_file))
|
44
|
+
|
45
|
+
Rake::GemPackageTask.new(spec) do |p|
|
46
|
+
p.gem_spec = spec
|
47
|
+
end
|
48
|
+
Rake.application["package"].prerequisites.unshift("rbx:delete_rbc_files")
|
49
|
+
|
50
|
+
desc "Release to rubygems.org"
|
51
|
+
task :release => :package do
|
52
|
+
require 'rake/gemcutter'
|
53
|
+
Rake::Gemcutter::Tasks.new(spec).define
|
54
|
+
Rake::Task['gem:push'].invoke
|
55
|
+
end
|
56
|
+
end
|
data/VERSION
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
1.0.0
|
@@ -0,0 +1,145 @@
|
|
1
|
+
require 'mongo'
|
2
|
+
require 'lumberjack'
|
3
|
+
|
4
|
+
module Lumberjack
|
5
|
+
# Write Lumberjack log entries to a MongoDB collection.
|
6
|
+
#
|
7
|
+
# Log entries will be stored as documents in a collection with fields for:
|
8
|
+
#
|
9
|
+
# * time
|
10
|
+
# * severity (as a string i.e. "DEBUG")
|
11
|
+
# * progname
|
12
|
+
# * pid
|
13
|
+
# * unit_of_work_id
|
14
|
+
# * message
|
15
|
+
class MongoDevice < Device
|
16
|
+
TIME = "time"
|
17
|
+
SEVERITY = "severity"
|
18
|
+
PROGNAME = "progname"
|
19
|
+
PID = "pid"
|
20
|
+
UNIT_OF_WORK_ID = "unit_of_work_id"
|
21
|
+
MESSAGE = "message"
|
22
|
+
|
23
|
+
DEFAULT_BUFFER_SIZE = 50
|
24
|
+
|
25
|
+
# Get the MongoDB collection that is being written to.
|
26
|
+
attr_reader :collection
|
27
|
+
|
28
|
+
# The size of the internal buffer. Log entries are buffered so they can be sent to MongoDB in batches for efficiency.
|
29
|
+
attr_accessor :buffer_size
|
30
|
+
|
31
|
+
# Initialize the device by passing in either a Mongo::Collection object or a hash of options
|
32
|
+
# to create the collection. Available options are:
|
33
|
+
#
|
34
|
+
# * <tt>:host</tt> - The host name to connect to (defaults to localhost).
|
35
|
+
# * <tt>:port</tt> - The port to connect to (defaults to 27017).
|
36
|
+
# * <tt>:db</tt> - The database name to use (required).
|
37
|
+
# * <tt>:collection</tt> - The collection name to use (required).
|
38
|
+
# * <tt>:username</tt> - The username to authenticate with for database connections (optional).
|
39
|
+
# * <tt>:password</tt> - The password to authenticate with for database connections (optional).
|
40
|
+
# * <tt>:max</tt> - If the collection does not aleady exist it will be capped at this number of records.
|
41
|
+
# * <tt>:size</tt> - If the collection does not aleady exist it will be capped at this size in bytes.
|
42
|
+
# * <tt>:buffer_size</tt> - The number of entries that will be buffered before they are sent to MongoDB.
|
43
|
+
#
|
44
|
+
# If the collection does not already exist, it will be created. If either the <tt>:max</tt> or <tt>:size</tt>
|
45
|
+
# options are provided, it will be created as a capped collection. Indexes will be created on +unit_of_work_id+
|
46
|
+
# and +time+.
|
47
|
+
def initialize(collection_or_options, options = nil)
|
48
|
+
if collection_or_options.is_a?(Hash)
|
49
|
+
options = collection_or_options.dup
|
50
|
+
host = options.delete(:host)
|
51
|
+
port = options.delete(:port)
|
52
|
+
db_name = options.delete(:db)
|
53
|
+
collection = options.delete(:collection)
|
54
|
+
username = options.delete(:username)
|
55
|
+
password = options.delete(:password)
|
56
|
+
max = options.delete(:max)
|
57
|
+
size = options.delete(:size)
|
58
|
+
|
59
|
+
@buffer_size = options.delete(:buffer_size) || DEFAULT_BUFFER_SIZE
|
60
|
+
|
61
|
+
connection = Mongo::Connection.new(host, port, options)
|
62
|
+
db = connection.db(db_name)
|
63
|
+
db.authenticate(username, password) if username && password
|
64
|
+
if db.collections.collect{|coll| coll.name}.include?(collection.to_s)
|
65
|
+
@collection = db.collection(collection)
|
66
|
+
else
|
67
|
+
begin
|
68
|
+
@collection = db.create_collection(collection, :capped => (max || size), :max => max, :size => size)
|
69
|
+
@collection.ensure_index(:time)
|
70
|
+
@collection.ensure_index(:unit_of_work_id)
|
71
|
+
rescue Mongo::OperationFailure
|
72
|
+
# Create collection can fail if multiple processes try to create it at once.
|
73
|
+
@collection = db.collection(collection)
|
74
|
+
raise unless @collection
|
75
|
+
end
|
76
|
+
end
|
77
|
+
else
|
78
|
+
@collection = collection_or_options
|
79
|
+
@buffer_size = options[:buffer_size] if options
|
80
|
+
@buffer_size ||= DEFAULT_BUFFER_SIZE
|
81
|
+
end
|
82
|
+
|
83
|
+
@buffer = []
|
84
|
+
@lock = Mutex.new
|
85
|
+
end
|
86
|
+
|
87
|
+
def write(entry)
|
88
|
+
@lock.synchronize do
|
89
|
+
@buffer << entry
|
90
|
+
end
|
91
|
+
flush if @buffer.size >= @buffer_size
|
92
|
+
end
|
93
|
+
|
94
|
+
def flush
|
95
|
+
docs = []
|
96
|
+
@lock.synchronize do
|
97
|
+
@buffer.each do |entry|
|
98
|
+
docs << {:time => entry.time, :severity => entry.severity_label, :progname => entry.progname, :pid => entry.pid, :unit_of_work_id => entry.unit_of_work_id, :message => entry.message}
|
99
|
+
end
|
100
|
+
begin
|
101
|
+
@collection.insert(docs)
|
102
|
+
rescue => e
|
103
|
+
puts e.inspect
|
104
|
+
puts e.backtrace.join("\n")
|
105
|
+
$stderr.write("#{e.class.name}: #{e.message}#{' at ' + e.backtrace.first if e.backtrace}")
|
106
|
+
@buffer.each do |entry|
|
107
|
+
$stderr.puts(entry.to_s)
|
108
|
+
end
|
109
|
+
$stderr.flush
|
110
|
+
ensure
|
111
|
+
@buffer.clear
|
112
|
+
end
|
113
|
+
end
|
114
|
+
end
|
115
|
+
|
116
|
+
def close
|
117
|
+
flush
|
118
|
+
@lock.synchronize do
|
119
|
+
@collection.db.connection.close
|
120
|
+
end
|
121
|
+
end
|
122
|
+
|
123
|
+
# Retrieve Lumberjack::LogEntry objects from the MongoDB collection. If a block is given, it will be yielded to
|
124
|
+
# with each entry. Otherwise, it will return an array of all the entries.
|
125
|
+
def find(selector, options = {}, &block)
|
126
|
+
entries = []
|
127
|
+
@collection.find(selector, options) do |cursor|
|
128
|
+
cursor.each do |doc|
|
129
|
+
entry = LogEntry.new(doc[TIME], doc[SEVERITY], doc[MESSAGE], doc[PROGNAME], doc[PID], doc[UNIT_OF_WORK_ID])
|
130
|
+
if block_given?
|
131
|
+
yield entry
|
132
|
+
else
|
133
|
+
entries << entry
|
134
|
+
end
|
135
|
+
end
|
136
|
+
end
|
137
|
+
block_given? ? nil : entries
|
138
|
+
end
|
139
|
+
|
140
|
+
# Retrieve the last entries from the log.
|
141
|
+
def last(number_of_entries = 1)
|
142
|
+
find(nil, :sort => [:_id, :descending], :limit => number_of_entries).reverse
|
143
|
+
end
|
144
|
+
end
|
145
|
+
end
|
@@ -0,0 +1,221 @@
|
|
1
|
+
require 'spec_helper'
|
2
|
+
|
3
|
+
describe Lumberjack::MongoDevice do
|
4
|
+
|
5
|
+
before :all do
|
6
|
+
start_mongo
|
7
|
+
end
|
8
|
+
|
9
|
+
after :all do
|
10
|
+
stop_mongo
|
11
|
+
end
|
12
|
+
|
13
|
+
after :each do
|
14
|
+
collection.drop
|
15
|
+
collection.db.connection.close
|
16
|
+
end
|
17
|
+
|
18
|
+
let(:time_1){ Time.parse("2011-02-01T18:32:31Z") }
|
19
|
+
let(:time_2){ Time.parse("2011-02-01T18:32:32Z") }
|
20
|
+
let(:time_3){ Time.parse("2011-02-01T18:32:33Z") }
|
21
|
+
let(:entry_1){ Lumberjack::LogEntry.new(time_1, Lumberjack::Severity::INFO, "message 1", "test", 12345, "ABCD") }
|
22
|
+
let(:entry_2){ Lumberjack::LogEntry.new(time_2, Lumberjack::Severity::WARN, "message 2", "spec", 4321, "1234") }
|
23
|
+
let(:entry_3){ Lumberjack::LogEntry.new(time_3, Lumberjack::Severity::ERROR, "message 3", "test", 12345, "ABCD") }
|
24
|
+
|
25
|
+
let(:db){ Mongo::Connection.new.db("test") }
|
26
|
+
let(:collection){ db.collection("log") }
|
27
|
+
|
28
|
+
it "should use an existing collection" do
|
29
|
+
device = Lumberjack::MongoDevice.new(collection)
|
30
|
+
device.write(entry_1)
|
31
|
+
device.flush
|
32
|
+
collection.count.should == 1
|
33
|
+
end
|
34
|
+
|
35
|
+
it "should connect to a specified database collection on a specified host and port" do
|
36
|
+
connection = db.connection
|
37
|
+
collection
|
38
|
+
Mongo::Connection.should_receive(:new).with("127.0.0.1", 12345, :safe => true).and_return(connection)
|
39
|
+
connection.should_receive(:db).with("test").and_return(db)
|
40
|
+
db.should_receive(:create_collection).with("log", :capped => nil, :size => nil, :max => nil).and_return(collection)
|
41
|
+
device = Lumberjack::MongoDevice.new(:host => "127.0.0.1", :port => 12345, :safe => true, :db => "test", :collection => "log")
|
42
|
+
end
|
43
|
+
|
44
|
+
it "should connect to a server and authenticate to use the database if :username and :password are specified" do
|
45
|
+
connection = db.connection
|
46
|
+
collection
|
47
|
+
Mongo::Connection.should_receive(:new).and_return(connection)
|
48
|
+
connection.should_receive(:db).with("test").and_return(db)
|
49
|
+
db.should_receive(:authenticate).with("user", "pass")
|
50
|
+
device = Lumberjack::MongoDevice.new(:username => "user", :password => "pass", :db => "test", :collection => "log")
|
51
|
+
end
|
52
|
+
|
53
|
+
it "should connect to a server and create a new collection if it doesn't exist" do
|
54
|
+
db.collections.collect{|c| c.name}.should_not include("log")
|
55
|
+
device = Lumberjack::MongoDevice.new(:db => "test", :collection => "log")
|
56
|
+
db.collections.collect{|c| c.name}.should include("log")
|
57
|
+
collection.index_information.collect{|k, v| v["key"].collect{|k1, v1| k1}.join}.sort.should == ["_id", "time", "unit_of_work_id"]
|
58
|
+
end
|
59
|
+
|
60
|
+
it "should connect to a server and create a new capped collection if it doesn't exist and :size is specified" do
|
61
|
+
db.collections.collect{|c| c.name}.should_not include("log")
|
62
|
+
device = Lumberjack::MongoDevice.new(:db => "test", :collection => "log", :size => 32768)
|
63
|
+
db.collections.collect{|c| c.name}.should include("log")
|
64
|
+
collection.options["capped"].should
|
65
|
+
collection.options["size"].should == 32768
|
66
|
+
end
|
67
|
+
|
68
|
+
it "should connect to a server and create a new capped collection if it doesn't exist and :max is specified" do
|
69
|
+
db.collections.collect{|c| c.name}.should_not include("log")
|
70
|
+
device = Lumberjack::MongoDevice.new(:db => "test", :collection => "log", :max => 1000)
|
71
|
+
db.collections.collect{|c| c.name}.should include("log")
|
72
|
+
collection.options["capped"].should
|
73
|
+
collection.options["max"].should == 1000
|
74
|
+
end
|
75
|
+
|
76
|
+
it "should connect to a server and use an existing collection if it exists" do
|
77
|
+
collection.create_index(:pid)
|
78
|
+
db.collections.collect{|c| c.name}.should include("log")
|
79
|
+
device = Lumberjack::MongoDevice.new(:db => "test", :collection => "log", :max => 1000)
|
80
|
+
db.collections.collect{|c| c.name}.should include("log")
|
81
|
+
collection.options.should == nil
|
82
|
+
collection.index_information.collect{|k, v| v["key"].collect{|k1, v1| k1}.join}.sort.should == ["_id", "pid"]
|
83
|
+
end
|
84
|
+
|
85
|
+
it "should write entries to the collection" do
|
86
|
+
device = Lumberjack::MongoDevice.new(:db => "test", :collection => "log")
|
87
|
+
device.write(entry_1)
|
88
|
+
device.write(entry_2)
|
89
|
+
device.write(entry_3)
|
90
|
+
device.flush
|
91
|
+
collection.count.should == 3
|
92
|
+
doc = collection.find_one(:message => "message 1")
|
93
|
+
doc["time"].should == time_1
|
94
|
+
doc["severity"].should == "INFO"
|
95
|
+
doc["progname"].should == entry_1.progname
|
96
|
+
doc["pid"].should == entry_1.pid
|
97
|
+
doc["unit_of_work_id"].should == entry_1.unit_of_work_id
|
98
|
+
doc["message"].should == entry_1.message
|
99
|
+
end
|
100
|
+
|
101
|
+
it "should close and flush a connection" do
|
102
|
+
device = Lumberjack::MongoDevice.new(:db => "test", :collection => "log")
|
103
|
+
connection = device.collection.db.connection
|
104
|
+
connection.should_receive(:close)
|
105
|
+
device.should_receive(:flush)
|
106
|
+
device.close
|
107
|
+
end
|
108
|
+
|
109
|
+
context "buffering" do
|
110
|
+
it "should buffer log entries and insert them in batches" do
|
111
|
+
device = Lumberjack::MongoDevice.new(:db => "test", :collection => "log")
|
112
|
+
device.write(entry_1)
|
113
|
+
device.write(entry_2)
|
114
|
+
device.write(entry_3)
|
115
|
+
collection.count.should == 0
|
116
|
+
device.flush
|
117
|
+
collection.count.should == 3
|
118
|
+
end
|
119
|
+
|
120
|
+
it "should be able to control the buffer size" do
|
121
|
+
device = Lumberjack::MongoDevice.new(:db => "test", :collection => "log")
|
122
|
+
device.buffer_size = 2
|
123
|
+
device.write(entry_1)
|
124
|
+
collection.count.should == 0
|
125
|
+
device.write(entry_2)
|
126
|
+
collection.count.should == 2
|
127
|
+
device.write(entry_3)
|
128
|
+
collection.count.should == 2
|
129
|
+
device.flush
|
130
|
+
collection.count.should == 3
|
131
|
+
end
|
132
|
+
|
133
|
+
it "should set the buffer size on initialize with options" do
|
134
|
+
device = Lumberjack::MongoDevice.new(:db => "test", :collection => "log", :buffer_size => 2)
|
135
|
+
device.buffer_size = 2
|
136
|
+
device.write(entry_1)
|
137
|
+
collection.count.should == 0
|
138
|
+
device.write(entry_2)
|
139
|
+
collection.count.should == 2
|
140
|
+
device.write(entry_3)
|
141
|
+
collection.count.should == 2
|
142
|
+
device.flush
|
143
|
+
collection.count.should == 3
|
144
|
+
end
|
145
|
+
|
146
|
+
it "should set the buffer size on initialize with a collection" do
|
147
|
+
device = Lumberjack::MongoDevice.new(collection, :buffer_size => 2)
|
148
|
+
device.buffer_size = 2
|
149
|
+
device.write(entry_1)
|
150
|
+
collection.count.should == 0
|
151
|
+
device.write(entry_2)
|
152
|
+
collection.count.should == 2
|
153
|
+
device.write(entry_3)
|
154
|
+
collection.count.should == 2
|
155
|
+
device.flush
|
156
|
+
collection.count.should == 3
|
157
|
+
end
|
158
|
+
|
159
|
+
it "should output to standard error if the buffer can't be written'" do
|
160
|
+
end
|
161
|
+
end
|
162
|
+
|
163
|
+
context "finding" do
|
164
|
+
let(:device){ Lumberjack::MongoDevice.new(:db => "test", :collection => "log") }
|
165
|
+
|
166
|
+
before :each do
|
167
|
+
device.write(entry_1)
|
168
|
+
device.write(entry_2)
|
169
|
+
device.write(entry_3)
|
170
|
+
device.flush
|
171
|
+
end
|
172
|
+
|
173
|
+
it "should find entries and yield them to a block" do
|
174
|
+
entries = []
|
175
|
+
device.find({:progname => "test"}, :sort => :time) do |entry|
|
176
|
+
entries << entry
|
177
|
+
end
|
178
|
+
|
179
|
+
entries.size.should == 2
|
180
|
+
|
181
|
+
entries.first.time.should == entry_1.time
|
182
|
+
entries.first.severity.should == entry_1.severity
|
183
|
+
entries.first.progname.should == entry_1.progname
|
184
|
+
entries.first.pid.should == entry_1.pid
|
185
|
+
entries.first.unit_of_work_id.should == entry_1.unit_of_work_id
|
186
|
+
entries.first.message.should == entry_1.message
|
187
|
+
|
188
|
+
entries.last.time.should == entry_3.time
|
189
|
+
entries.last.severity.should == entry_3.severity
|
190
|
+
entries.last.progname.should == entry_3.progname
|
191
|
+
entries.last.pid.should == entry_3.pid
|
192
|
+
entries.last.unit_of_work_id.should == entry_3.unit_of_work_id
|
193
|
+
entries.last.message.should == entry_3.message
|
194
|
+
end
|
195
|
+
|
196
|
+
it "should find entries and return them as an array" do
|
197
|
+
entries = device.find({:progname => "test"}, :sort => :time)
|
198
|
+
|
199
|
+
entries.size.should == 2
|
200
|
+
|
201
|
+
entries.first.time.should == entry_1.time
|
202
|
+
entries.first.severity.should == entry_1.severity
|
203
|
+
entries.first.progname.should == entry_1.progname
|
204
|
+
entries.first.pid.should == entry_1.pid
|
205
|
+
entries.first.unit_of_work_id.should == entry_1.unit_of_work_id
|
206
|
+
entries.first.message.should == entry_1.message
|
207
|
+
|
208
|
+
entries.last.time.should == entry_3.time
|
209
|
+
entries.last.severity.should == entry_3.severity
|
210
|
+
entries.last.progname.should == entry_3.progname
|
211
|
+
entries.last.pid.should == entry_3.pid
|
212
|
+
entries.last.unit_of_work_id.should == entry_3.unit_of_work_id
|
213
|
+
entries.last.message.should == entry_3.message
|
214
|
+
end
|
215
|
+
|
216
|
+
it "should find the last entries in the log" do
|
217
|
+
device.last.collect{|e| e.message}.should == [entry_3.message]
|
218
|
+
device.last(2).collect{|e| e.message}.should == [entry_2.message, entry_3.message]
|
219
|
+
end
|
220
|
+
end
|
221
|
+
end
|
data/spec/spec_helper.rb
ADDED
@@ -0,0 +1,40 @@
|
|
1
|
+
require 'rubygems'
|
2
|
+
require 'fileutils'
|
3
|
+
|
4
|
+
if ENV["MONGO_VERSION"]
|
5
|
+
gem "mongo", ENV["MONGO_VERSION"]
|
6
|
+
else
|
7
|
+
gem "mongo"
|
8
|
+
end
|
9
|
+
|
10
|
+
MONGO_PATH = ENV["MONGO_PATH"] || File.dirname(`which mongod`.chomp)
|
11
|
+
|
12
|
+
require File.expand_path("../../lib/lumberjack_mongo_device.rb", __FILE__)
|
13
|
+
|
14
|
+
def tmp_dir
|
15
|
+
File.expand_path("../tmp", __FILE__)
|
16
|
+
end
|
17
|
+
|
18
|
+
def start_mongo
|
19
|
+
FileUtils.rm_r(tmp_dir) if File.exist?(tmp_dir)
|
20
|
+
FileUtils.mkdir_p(File.join(tmp_dir, "db"))
|
21
|
+
`'#{MONGO_PATH}/mongod' --logpath '#{tmp_dir}/mongo.log' --pidfilepath '#{tmp_dir}/mongo.pid' --dbpath '#{tmp_dir}/db' --nohttpinterface --fork`
|
22
|
+
|
23
|
+
# Ensure server is accepting connections
|
24
|
+
stop_time = Time.now + 10
|
25
|
+
loop do
|
26
|
+
raise "Mongo server failed to start up in 10 seconds" if Time.now >= stop_time
|
27
|
+
begin
|
28
|
+
connection = Mongo::Connection.new
|
29
|
+
connection.close
|
30
|
+
break
|
31
|
+
rescue Mongo::ConnectionFailure
|
32
|
+
sleep(0.1)
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
37
|
+
def stop_mongo
|
38
|
+
pid = File.read("#{tmp_dir}/mongo.pid").chomp
|
39
|
+
`kill -15 #{pid}`
|
40
|
+
end
|
File without changes
|
data/spec/tmp/db/test.0
ADDED
Binary file
|
data/spec/tmp/db/test.1
ADDED
Binary file
|
data/spec/tmp/db/test.ns
ADDED
Binary file
|
data/spec/tmp/mongo.log
ADDED
@@ -0,0 +1,253 @@
|
|
1
|
+
Wed Feb 9 12:38:46 MongoDB starting : pid=62193 port=27017 dbpath=/Users/bdurand/dev/projects/lumberjack_mongo_device/spec/tmp/db 64-bit
|
2
|
+
Wed Feb 9 12:38:46 db version v1.6.5, pdfile version 4.5
|
3
|
+
Wed Feb 9 12:38:46 git version: 0eb017e9b2828155a67c5612183337b89e12e291
|
4
|
+
Wed Feb 9 12:38:46 sys info: Darwin erh2.10gen.cc 9.6.0 Darwin Kernel Version 9.6.0: Mon Nov 24 17:37:00 PST 2008; root:xnu-1228.9.59~1/RELEASE_I386 i386 BOOST_LIB_VERSION=1_40
|
5
|
+
Wed Feb 9 12:38:46 [initandlisten] waiting for connections on port 27017
|
6
|
+
Wed Feb 9 12:38:46 [initandlisten] connection accepted from 127.0.0.1:62975 #1
|
7
|
+
Wed Feb 9 12:38:46 [conn1] end connection 127.0.0.1:62975
|
8
|
+
Wed Feb 9 12:38:46 [initandlisten] connection accepted from 127.0.0.1:62978 #2
|
9
|
+
Wed Feb 9 12:38:46 [conn2] end connection 127.0.0.1:62978
|
10
|
+
Wed Feb 9 12:38:46 [initandlisten] connection accepted from 127.0.0.1:62981 #3
|
11
|
+
Wed Feb 9 12:38:46 [conn3] end connection 127.0.0.1:62981
|
12
|
+
Wed Feb 9 12:38:46 [initandlisten] connection accepted from 127.0.0.1:62984 #4
|
13
|
+
Wed Feb 9 12:38:46 allocating new datafile /Users/bdurand/dev/projects/lumberjack_mongo_device/spec/tmp/db/test.ns, filling with zeroes...
|
14
|
+
Wed Feb 9 12:38:46 done allocating datafile /Users/bdurand/dev/projects/lumberjack_mongo_device/spec/tmp/db/test.ns, size: 16MB, took 0.035 secs
|
15
|
+
Wed Feb 9 12:38:47 allocating new datafile /Users/bdurand/dev/projects/lumberjack_mongo_device/spec/tmp/db/test.0, filling with zeroes...
|
16
|
+
Wed Feb 9 12:38:47 done allocating datafile /Users/bdurand/dev/projects/lumberjack_mongo_device/spec/tmp/db/test.0, size: 64MB, took 0.361 secs
|
17
|
+
Wed Feb 9 12:38:50 allocating new datafile /Users/bdurand/dev/projects/lumberjack_mongo_device/spec/tmp/db/test.1, filling with zeroes...
|
18
|
+
Wed Feb 9 12:38:50 [conn4] building new index on { _id: 1 } for test.log
|
19
|
+
Wed Feb 9 12:38:50 [conn4] done for 0 records 0.022secs
|
20
|
+
Wed Feb 9 12:38:50 [conn4] insert test.log 3795ms
|
21
|
+
Wed Feb 9 12:38:50 [conn4] CMD: drop test.log
|
22
|
+
Wed Feb 9 12:38:50 [conn4] end connection 127.0.0.1:62984
|
23
|
+
Wed Feb 9 12:38:50 [initandlisten] connection accepted from 127.0.0.1:62987 #5
|
24
|
+
Wed Feb 9 12:38:50 [conn5] end connection 127.0.0.1:62987
|
25
|
+
Wed Feb 9 12:38:50 [initandlisten] connection accepted from 127.0.0.1:62990 #6
|
26
|
+
Wed Feb 9 12:38:50 [conn6] building new index on { _id: 1 } for test.log
|
27
|
+
Wed Feb 9 12:38:50 [conn6] done for 0 records 0secs
|
28
|
+
Wed Feb 9 12:38:50 [conn6] info: creating collection test.log on add index
|
29
|
+
building new index on { time: 1 } for test.log
|
30
|
+
Wed Feb 9 12:38:50 [conn6] done for 0 records 0secs
|
31
|
+
Wed Feb 9 12:38:50 [conn6] building new index on { unit_of_work_id: 1 } for test.log
|
32
|
+
Wed Feb 9 12:38:50 [conn6] done for 0 records 0.001secs
|
33
|
+
Wed Feb 9 12:38:50 [conn6] CMD: drop test.log
|
34
|
+
Wed Feb 9 12:38:50 [conn6] end connection 127.0.0.1:62990
|
35
|
+
Wed Feb 9 12:38:50 [initandlisten] connection accepted from 127.0.0.1:62993 #7
|
36
|
+
Wed Feb 9 12:38:50 [conn7] end connection 127.0.0.1:62993
|
37
|
+
Wed Feb 9 12:38:50 [initandlisten] connection accepted from 127.0.0.1:62996 #8
|
38
|
+
Wed Feb 9 12:38:50 [conn8] building new index on { _id: 1 } for test.log
|
39
|
+
Wed Feb 9 12:38:50 [conn8] done for 0 records 0secs
|
40
|
+
Wed Feb 9 12:38:50 [conn8] building new index on { time: 1 } for test.log
|
41
|
+
Wed Feb 9 12:38:50 [conn8] done for 0 records 0secs
|
42
|
+
Wed Feb 9 12:38:50 [conn8] building new index on { unit_of_work_id: 1 } for test.log
|
43
|
+
Wed Feb 9 12:38:50 [conn8] done for 0 records 0secs
|
44
|
+
Wed Feb 9 12:38:50 [conn8] CMD: drop test.log
|
45
|
+
Wed Feb 9 12:38:50 [conn8] end connection 127.0.0.1:62996
|
46
|
+
Wed Feb 9 12:38:50 [initandlisten] connection accepted from 127.0.0.1:62999 #9
|
47
|
+
Wed Feb 9 12:38:50 [conn9] end connection 127.0.0.1:62999
|
48
|
+
Wed Feb 9 12:38:50 [initandlisten] connection accepted from 127.0.0.1:63002 #10
|
49
|
+
Wed Feb 9 12:38:50 [initandlisten] connection accepted from 127.0.0.1:63005 #11
|
50
|
+
Wed Feb 9 12:38:50 [conn11] end connection 127.0.0.1:63005
|
51
|
+
Wed Feb 9 12:38:50 [initandlisten] connection accepted from 127.0.0.1:63008 #12
|
52
|
+
Wed Feb 9 12:38:50 [conn12] building new index on { _id: 1 } for test.log
|
53
|
+
Wed Feb 9 12:38:50 [conn12] done for 0 records 0secs
|
54
|
+
Wed Feb 9 12:38:50 [conn12] building new index on { time: 1 } for test.log
|
55
|
+
Wed Feb 9 12:38:50 [conn12] done for 0 records 0secs
|
56
|
+
Wed Feb 9 12:38:50 [conn12] building new index on { unit_of_work_id: 1 } for test.log
|
57
|
+
Wed Feb 9 12:38:50 [conn12] done for 0 records 0secs
|
58
|
+
Wed Feb 9 12:38:50 done allocating datafile /Users/bdurand/dev/projects/lumberjack_mongo_device/spec/tmp/db/test.1, size: 128MB, took 0.108 secs
|
59
|
+
Wed Feb 9 12:38:50 [conn10] CMD: drop test.log
|
60
|
+
Wed Feb 9 12:38:50 [conn10] end connection 127.0.0.1:63002
|
61
|
+
Wed Feb 9 12:38:50 [initandlisten] connection accepted from 127.0.0.1:63011 #13
|
62
|
+
Wed Feb 9 12:38:50 [conn13] end connection 127.0.0.1:63011
|
63
|
+
Wed Feb 9 12:38:50 [initandlisten] connection accepted from 127.0.0.1:63014 #14
|
64
|
+
Wed Feb 9 12:38:50 [initandlisten] connection accepted from 127.0.0.1:63017 #15
|
65
|
+
Wed Feb 9 12:38:50 [conn15] end connection 127.0.0.1:63017
|
66
|
+
Wed Feb 9 12:38:50 [initandlisten] connection accepted from 127.0.0.1:63020 #16
|
67
|
+
Wed Feb 9 12:38:51 [conn16] building new index on { _id: 1 } for test.log
|
68
|
+
Wed Feb 9 12:38:51 [conn16] done for 0 records 0secs
|
69
|
+
Wed Feb 9 12:38:51 [conn16] building new index on { time: 1 } for test.log
|
70
|
+
Wed Feb 9 12:38:51 [conn16] done for 0 records 0secs
|
71
|
+
Wed Feb 9 12:38:51 [conn16] building new index on { unit_of_work_id: 1 } for test.log
|
72
|
+
Wed Feb 9 12:38:51 [conn16] done for 0 records 0secs
|
73
|
+
Wed Feb 9 12:38:51 [conn14] CMD: drop test.log
|
74
|
+
Wed Feb 9 12:38:51 [conn14] end connection 127.0.0.1:63014
|
75
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63023 #17
|
76
|
+
Wed Feb 9 12:38:51 [conn17] end connection 127.0.0.1:63023
|
77
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63026 #18
|
78
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63029 #19
|
79
|
+
Wed Feb 9 12:38:51 [conn19] end connection 127.0.0.1:63029
|
80
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63032 #20
|
81
|
+
Wed Feb 9 12:38:51 [conn20] building new index on { _id: 1 } for test.log
|
82
|
+
Wed Feb 9 12:38:51 [conn20] done for 0 records 0secs
|
83
|
+
Wed Feb 9 12:38:51 [conn20] building new index on { time: 1 } for test.log
|
84
|
+
Wed Feb 9 12:38:51 [conn20] done for 0 records 0secs
|
85
|
+
Wed Feb 9 12:38:51 [conn20] building new index on { unit_of_work_id: 1 } for test.log
|
86
|
+
Wed Feb 9 12:38:51 [conn20] done for 0 records 0secs
|
87
|
+
Wed Feb 9 12:38:51 [conn18] CMD: drop test.log
|
88
|
+
Wed Feb 9 12:38:51 [conn18] end connection 127.0.0.1:63026
|
89
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63035 #21
|
90
|
+
Wed Feb 9 12:38:51 [conn21] end connection 127.0.0.1:63035
|
91
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63038 #22
|
92
|
+
Wed Feb 9 12:38:51 [conn22] building new index on { _id: 1 } for test.log
|
93
|
+
Wed Feb 9 12:38:51 [conn22] done for 0 records 0secs
|
94
|
+
Wed Feb 9 12:38:51 [conn22] info: creating collection test.log on add index
|
95
|
+
building new index on { pid: 1 } for test.log
|
96
|
+
Wed Feb 9 12:38:51 [conn22] done for 0 records 0secs
|
97
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63041 #23
|
98
|
+
Wed Feb 9 12:38:51 [conn23] end connection 127.0.0.1:63041
|
99
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63044 #24
|
100
|
+
Wed Feb 9 12:38:51 [conn22] CMD: drop test.log
|
101
|
+
Wed Feb 9 12:38:51 [conn22] end connection 127.0.0.1:63038
|
102
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63047 #25
|
103
|
+
Wed Feb 9 12:38:51 [conn25] end connection 127.0.0.1:63047
|
104
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63050 #26
|
105
|
+
Wed Feb 9 12:38:51 [conn26] building new index on { _id: 1 } for test.log
|
106
|
+
Wed Feb 9 12:38:51 [conn26] done for 0 records 0secs
|
107
|
+
Wed Feb 9 12:38:51 [conn26] building new index on { time: 1 } for test.log
|
108
|
+
Wed Feb 9 12:38:51 [conn26] done for 0 records 0secs
|
109
|
+
Wed Feb 9 12:38:51 [conn26] building new index on { unit_of_work_id: 1 } for test.log
|
110
|
+
Wed Feb 9 12:38:51 [conn26] done for 0 records 0secs
|
111
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63053 #27
|
112
|
+
Wed Feb 9 12:38:51 [conn27] end connection 127.0.0.1:63053
|
113
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63056 #28
|
114
|
+
Wed Feb 9 12:38:51 [conn28] query test.log ntoreturn:1 reslen:168 nscanned:1 { message: "message 1" } nreturned:1 131ms
|
115
|
+
Wed Feb 9 12:38:51 [conn28] CMD: drop test.log
|
116
|
+
Wed Feb 9 12:38:51 [conn28] end connection 127.0.0.1:63056
|
117
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63059 #29
|
118
|
+
Wed Feb 9 12:38:51 [conn29] end connection 127.0.0.1:63059
|
119
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63062 #30
|
120
|
+
Wed Feb 9 12:38:51 [conn30] building new index on { _id: 1 } for test.log
|
121
|
+
Wed Feb 9 12:38:51 [conn30] done for 0 records 0secs
|
122
|
+
Wed Feb 9 12:38:51 [conn30] building new index on { time: 1 } for test.log
|
123
|
+
Wed Feb 9 12:38:51 [conn30] done for 0 records 0secs
|
124
|
+
Wed Feb 9 12:38:51 [conn30] building new index on { unit_of_work_id: 1 } for test.log
|
125
|
+
Wed Feb 9 12:38:51 [conn30] done for 0 records 0secs
|
126
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63065 #31
|
127
|
+
Wed Feb 9 12:38:51 [conn31] end connection 127.0.0.1:63065
|
128
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63068 #32
|
129
|
+
Wed Feb 9 12:38:51 [conn32] CMD: drop test.log
|
130
|
+
Wed Feb 9 12:38:51 [conn32] end connection 127.0.0.1:63068
|
131
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63071 #33
|
132
|
+
Wed Feb 9 12:38:51 [conn33] end connection 127.0.0.1:63071
|
133
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63074 #34
|
134
|
+
Wed Feb 9 12:38:51 [conn34] building new index on { _id: 1 } for test.log
|
135
|
+
Wed Feb 9 12:38:51 [conn34] done for 0 records 0secs
|
136
|
+
Wed Feb 9 12:38:51 [conn34] building new index on { time: 1 } for test.log
|
137
|
+
Wed Feb 9 12:38:51 [conn34] done for 0 records 0secs
|
138
|
+
Wed Feb 9 12:38:51 [conn34] building new index on { unit_of_work_id: 1 } for test.log
|
139
|
+
Wed Feb 9 12:38:51 [conn34] done for 0 records 0secs
|
140
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63077 #35
|
141
|
+
Wed Feb 9 12:38:51 [conn35] end connection 127.0.0.1:63077
|
142
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63080 #36
|
143
|
+
Wed Feb 9 12:38:51 [conn36] CMD: drop test.log
|
144
|
+
Wed Feb 9 12:38:51 [conn36] end connection 127.0.0.1:63080
|
145
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63083 #37
|
146
|
+
Wed Feb 9 12:38:51 [conn37] end connection 127.0.0.1:63083
|
147
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63086 #38
|
148
|
+
Wed Feb 9 12:38:51 [conn38] building new index on { _id: 1 } for test.log
|
149
|
+
Wed Feb 9 12:38:51 [conn38] done for 0 records 0secs
|
150
|
+
Wed Feb 9 12:38:51 [conn38] building new index on { time: 1 } for test.log
|
151
|
+
Wed Feb 9 12:38:51 [conn38] done for 0 records 0secs
|
152
|
+
Wed Feb 9 12:38:51 [conn38] building new index on { unit_of_work_id: 1 } for test.log
|
153
|
+
Wed Feb 9 12:38:51 [conn38] done for 0 records 0secs
|
154
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63089 #39
|
155
|
+
Wed Feb 9 12:38:51 [conn39] end connection 127.0.0.1:63089
|
156
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63092 #40
|
157
|
+
Wed Feb 9 12:38:51 [conn40] CMD: drop test.log
|
158
|
+
Wed Feb 9 12:38:51 [conn40] end connection 127.0.0.1:63092
|
159
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63095 #41
|
160
|
+
Wed Feb 9 12:38:51 [conn41] end connection 127.0.0.1:63095
|
161
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63098 #42
|
162
|
+
Wed Feb 9 12:38:51 [conn42] building new index on { _id: 1 } for test.log
|
163
|
+
Wed Feb 9 12:38:51 [conn42] done for 0 records 0secs
|
164
|
+
Wed Feb 9 12:38:51 [conn42] building new index on { time: 1 } for test.log
|
165
|
+
Wed Feb 9 12:38:51 [conn42] done for 0 records 0secs
|
166
|
+
Wed Feb 9 12:38:51 [conn42] building new index on { unit_of_work_id: 1 } for test.log
|
167
|
+
Wed Feb 9 12:38:51 [conn42] done for 0 records 0secs
|
168
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63101 #43
|
169
|
+
Wed Feb 9 12:38:51 [conn43] end connection 127.0.0.1:63101
|
170
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63104 #44
|
171
|
+
Wed Feb 9 12:38:51 [conn44] CMD: drop test.log
|
172
|
+
Wed Feb 9 12:38:51 [conn44] end connection 127.0.0.1:63104
|
173
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63107 #45
|
174
|
+
Wed Feb 9 12:38:51 [conn45] end connection 127.0.0.1:63107
|
175
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63110 #46
|
176
|
+
Wed Feb 9 12:38:51 [conn46] building new index on { _id: 1 } for test.log
|
177
|
+
Wed Feb 9 12:38:51 [conn46] done for 0 records 0secs
|
178
|
+
Wed Feb 9 12:38:51 [conn46] CMD: drop test.log
|
179
|
+
Wed Feb 9 12:38:51 [conn46] end connection 127.0.0.1:63110
|
180
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63113 #47
|
181
|
+
Wed Feb 9 12:38:51 [conn47] end connection 127.0.0.1:63113
|
182
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63116 #48
|
183
|
+
Wed Feb 9 12:38:51 [conn48] end connection 127.0.0.1:63116
|
184
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63119 #49
|
185
|
+
Wed Feb 9 12:38:51 [conn49] end connection 127.0.0.1:63119
|
186
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63122 #50
|
187
|
+
Wed Feb 9 12:38:51 [conn50] building new index on { _id: 1 } for test.log
|
188
|
+
Wed Feb 9 12:38:51 [conn50] done for 0 records 0secs
|
189
|
+
Wed Feb 9 12:38:51 [conn50] building new index on { time: 1 } for test.log
|
190
|
+
Wed Feb 9 12:38:51 [conn50] done for 0 records 0secs
|
191
|
+
Wed Feb 9 12:38:51 [conn50] building new index on { unit_of_work_id: 1 } for test.log
|
192
|
+
Wed Feb 9 12:38:51 [conn50] done for 0 records 0secs
|
193
|
+
Wed Feb 9 12:38:51 [conn50] query test.log reslen:301 nscanned:3 { $query: { progname: "test" }, $orderby: { time: 1 } } nreturned:2 173ms
|
194
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63125 #51
|
195
|
+
Wed Feb 9 12:38:51 [conn51] end connection 127.0.0.1:63125
|
196
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63128 #52
|
197
|
+
Wed Feb 9 12:38:51 [conn52] CMD: drop test.log
|
198
|
+
Wed Feb 9 12:38:51 [conn52] end connection 127.0.0.1:63128
|
199
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63131 #53
|
200
|
+
Wed Feb 9 12:38:51 [conn53] end connection 127.0.0.1:63131
|
201
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63134 #54
|
202
|
+
Wed Feb 9 12:38:51 [conn54] building new index on { _id: 1 } for test.log
|
203
|
+
Wed Feb 9 12:38:51 [conn54] done for 0 records 0secs
|
204
|
+
Wed Feb 9 12:38:51 [conn54] building new index on { time: 1 } for test.log
|
205
|
+
Wed Feb 9 12:38:51 [conn54] done for 0 records 0secs
|
206
|
+
Wed Feb 9 12:38:51 [conn54] building new index on { unit_of_work_id: 1 } for test.log
|
207
|
+
Wed Feb 9 12:38:51 [conn54] done for 0 records 0secs
|
208
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63137 #55
|
209
|
+
Wed Feb 9 12:38:51 [conn55] end connection 127.0.0.1:63137
|
210
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63140 #56
|
211
|
+
Wed Feb 9 12:38:51 [conn56] CMD: drop test.log
|
212
|
+
Wed Feb 9 12:38:51 [conn56] end connection 127.0.0.1:63140
|
213
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63143 #57
|
214
|
+
Wed Feb 9 12:38:51 [conn57] end connection 127.0.0.1:63143
|
215
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63146 #58
|
216
|
+
Wed Feb 9 12:38:51 [conn58] building new index on { _id: 1 } for test.log
|
217
|
+
Wed Feb 9 12:38:51 [conn58] done for 0 records 0secs
|
218
|
+
Wed Feb 9 12:38:51 [conn58] building new index on { time: 1 } for test.log
|
219
|
+
Wed Feb 9 12:38:51 [conn58] done for 0 records 0secs
|
220
|
+
Wed Feb 9 12:38:51 [conn58] building new index on { unit_of_work_id: 1 } for test.log
|
221
|
+
Wed Feb 9 12:38:51 [conn58] done for 0 records 0secs
|
222
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63149 #59
|
223
|
+
Wed Feb 9 12:38:51 [conn59] end connection 127.0.0.1:63149
|
224
|
+
Wed Feb 9 12:38:51 [initandlisten] connection accepted from 127.0.0.1:63152 #60
|
225
|
+
Wed Feb 9 12:38:51 [conn60] CMD: drop test.log
|
226
|
+
Wed Feb 9 12:38:51 [conn60] end connection 127.0.0.1:63152
|
227
|
+
Wed Feb 9 12:38:51 got kill or ctrl c or hup signal 15 (Terminated), will terminate after current cmd ends
|
228
|
+
Wed Feb 9 12:38:51 [interruptThread] now exiting
|
229
|
+
Wed Feb 9 12:38:51 dbexit:
|
230
|
+
|
231
|
+
Wed Feb 9 12:38:51 [interruptThread] shutdown: going to close listening sockets...
|
232
|
+
Wed Feb 9 12:38:51 [interruptThread] closing listening socket: 4
|
233
|
+
Wed Feb 9 12:38:51 [interruptThread] closing listening socket: 5
|
234
|
+
Wed Feb 9 12:38:51 [interruptThread] shutdown: going to flush oplog...
|
235
|
+
Wed Feb 9 12:38:51 [interruptThread] shutdown: going to close sockets...
|
236
|
+
Wed Feb 9 12:38:51 [interruptThread] shutdown: waiting for fs preallocator...
|
237
|
+
Wed Feb 9 12:38:51 [conn50] end connection 127.0.0.1:63122
|
238
|
+
Wed Feb 9 12:38:51 [conn42] end connection 127.0.0.1:63098
|
239
|
+
Wed Feb 9 12:38:51 [conn24] end connection 127.0.0.1:63044
|
240
|
+
Wed Feb 9 12:38:51 [conn12] end connection 127.0.0.1:63008
|
241
|
+
Wed Feb 9 12:38:51 [conn20] end connection 127.0.0.1:63032
|
242
|
+
Wed Feb 9 12:38:51 [conn26] end connection 127.0.0.1:63050
|
243
|
+
Wed Feb 9 12:38:51 [conn16] end connection 127.0.0.1:63020
|
244
|
+
Wed Feb 9 12:38:51 [conn30] end connection 127.0.0.1:63062
|
245
|
+
Wed Feb 9 12:38:51 [conn34] end connection 127.0.0.1:63074
|
246
|
+
Wed Feb 9 12:38:51 [conn54] end connection 127.0.0.1:63134
|
247
|
+
Wed Feb 9 12:38:51 [conn38] end connection 127.0.0.1:63086
|
248
|
+
Wed Feb 9 12:38:51 [conn58] end connection 127.0.0.1:63146
|
249
|
+
Wed Feb 9 12:38:53 [interruptThread] shutdown: closing all files...
|
250
|
+
Wed Feb 9 12:38:53 closeAllFiles() finished
|
251
|
+
|
252
|
+
Wed Feb 9 12:38:53 [interruptThread] shutdown: removing fs lock...
|
253
|
+
Wed Feb 9 12:38:53 dbexit: really exiting now
|
data/spec/tmp/mongo.pid
ADDED
File without changes
|
metadata
ADDED
@@ -0,0 +1,112 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: lumberjack_mongo_device
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
hash: 23
|
5
|
+
prerelease:
|
6
|
+
segments:
|
7
|
+
- 1
|
8
|
+
- 0
|
9
|
+
- 0
|
10
|
+
version: 1.0.0
|
11
|
+
platform: ruby
|
12
|
+
authors:
|
13
|
+
- Brian Durand
|
14
|
+
autorequire:
|
15
|
+
bindir: bin
|
16
|
+
cert_chain: []
|
17
|
+
|
18
|
+
date: 2011-02-11 00:00:00 -06:00
|
19
|
+
default_executable:
|
20
|
+
dependencies:
|
21
|
+
- !ruby/object:Gem::Dependency
|
22
|
+
name: mongo
|
23
|
+
prerelease: false
|
24
|
+
requirement: &id001 !ruby/object:Gem::Requirement
|
25
|
+
none: false
|
26
|
+
requirements:
|
27
|
+
- - ">="
|
28
|
+
- !ruby/object:Gem::Version
|
29
|
+
hash: 25
|
30
|
+
segments:
|
31
|
+
- 1
|
32
|
+
- 1
|
33
|
+
- 5
|
34
|
+
version: 1.1.5
|
35
|
+
type: :runtime
|
36
|
+
version_requirements: *id001
|
37
|
+
- !ruby/object:Gem::Dependency
|
38
|
+
name: lumberjack
|
39
|
+
prerelease: false
|
40
|
+
requirement: &id002 !ruby/object:Gem::Requirement
|
41
|
+
none: false
|
42
|
+
requirements:
|
43
|
+
- - ~>
|
44
|
+
- !ruby/object:Gem::Version
|
45
|
+
hash: 15
|
46
|
+
segments:
|
47
|
+
- 1
|
48
|
+
- 0
|
49
|
+
version: "1.0"
|
50
|
+
type: :runtime
|
51
|
+
version_requirements: *id002
|
52
|
+
description: A logging device for the lumberjack gem that writes log entries to a MongoDB collection.
|
53
|
+
email:
|
54
|
+
- bdurand@embellishedvisions.com
|
55
|
+
executables: []
|
56
|
+
|
57
|
+
extensions: []
|
58
|
+
|
59
|
+
extra_rdoc_files:
|
60
|
+
- README.rdoc
|
61
|
+
files:
|
62
|
+
- README.rdoc
|
63
|
+
- VERSION
|
64
|
+
- Rakefile
|
65
|
+
- MIT_LICENSE
|
66
|
+
- lib/lumberjack_mongo_device.rb
|
67
|
+
- spec/lumberjack_mongo_device_spec.rb
|
68
|
+
- spec/spec_helper.rb
|
69
|
+
- spec/tmp/db/mongod.lock
|
70
|
+
- spec/tmp/db/test.0
|
71
|
+
- spec/tmp/db/test.1
|
72
|
+
- spec/tmp/db/test.ns
|
73
|
+
- spec/tmp/mongo.log
|
74
|
+
- spec/tmp/mongo.pid
|
75
|
+
has_rdoc: true
|
76
|
+
homepage: http://github.com/bdurand/lumberjack_mongo_device
|
77
|
+
licenses: []
|
78
|
+
|
79
|
+
post_install_message:
|
80
|
+
rdoc_options:
|
81
|
+
- --charset=UTF-8
|
82
|
+
- --main
|
83
|
+
- README.rdoc
|
84
|
+
require_paths:
|
85
|
+
- lib
|
86
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
87
|
+
none: false
|
88
|
+
requirements:
|
89
|
+
- - ">="
|
90
|
+
- !ruby/object:Gem::Version
|
91
|
+
hash: 3
|
92
|
+
segments:
|
93
|
+
- 0
|
94
|
+
version: "0"
|
95
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
96
|
+
none: false
|
97
|
+
requirements:
|
98
|
+
- - ">="
|
99
|
+
- !ruby/object:Gem::Version
|
100
|
+
hash: 3
|
101
|
+
segments:
|
102
|
+
- 0
|
103
|
+
version: "0"
|
104
|
+
requirements: []
|
105
|
+
|
106
|
+
rubyforge_project:
|
107
|
+
rubygems_version: 1.5.0
|
108
|
+
signing_key:
|
109
|
+
specification_version: 3
|
110
|
+
summary: A logging device for the lumberjack gem that writes log entries to a MongoDB collection.
|
111
|
+
test_files: []
|
112
|
+
|