lpm 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/LICENSE +3 -0
- data/README +3 -0
- data/Rakefile +45 -0
- data/bin/lpm +121 -0
- data/lib/config/stipulation.rb +1 -0
- data/lib/lpm.rb +8 -0
- data/lib/lpm/error.rb +3 -0
- data/lib/lpm/proc.rb +67 -0
- data/lib/lpm/process.rb +136 -0
- data/lib/lpm/utils.rb +170 -0
- data/lib/server.rb +70 -0
- data/lib/utils.rb +13 -0
- data/lib/xiaoqian.rb +257 -0
- data/test/self_test/em_periodic.rb +38 -0
- data/test/test_proc.rb +17 -0
- data/test/test_process.rb +11 -0
- data/test/test_utils.rb +14 -0
- metadata +72 -0
data/LICENSE
ADDED
data/README
ADDED
data/Rakefile
ADDED
@@ -0,0 +1,45 @@
|
|
1
|
+
#
|
2
|
+
# To change this template, choose Tools | Templates
|
3
|
+
# and open the template in the editor.
|
4
|
+
|
5
|
+
|
6
|
+
require 'rubygems'
|
7
|
+
require 'rake'
|
8
|
+
require 'rake/clean'
|
9
|
+
require 'rake/gempackagetask'
|
10
|
+
require 'rake/rdoctask'
|
11
|
+
require 'rake/testtask'
|
12
|
+
|
13
|
+
spec = Gem::Specification.new do |s|
|
14
|
+
s.name = 'lpm'
|
15
|
+
s.version = '1.0.0'
|
16
|
+
s.has_rdoc = true
|
17
|
+
s.extra_rdoc_files = ['README', 'LICENSE']
|
18
|
+
s.summary = 'Linux Proc Monitor'
|
19
|
+
s.description = s.summary
|
20
|
+
s.author = 'Charles Cui'
|
21
|
+
s.email = 'zheng.cuizh@gmail.com'
|
22
|
+
s.executables = ['lpm']
|
23
|
+
s.files = %w(LICENSE README Rakefile) + Dir.glob("{bin,lib,spec,test}/**/*")
|
24
|
+
s.require_path = "lib"
|
25
|
+
s.bindir = "bin"
|
26
|
+
end
|
27
|
+
|
28
|
+
Rake::GemPackageTask.new(spec) do |p|
|
29
|
+
p.gem_spec = spec
|
30
|
+
p.need_tar = true
|
31
|
+
p.need_zip = true
|
32
|
+
end
|
33
|
+
|
34
|
+
Rake::RDocTask.new do |rdoc|
|
35
|
+
files =['README', 'LICENSE', 'lib/**/*.rb']
|
36
|
+
rdoc.rdoc_files.add(files)
|
37
|
+
rdoc.main = "README" # page to start on
|
38
|
+
rdoc.title = "lpm Docs"
|
39
|
+
rdoc.rdoc_dir = 'doc/rdoc' # rdoc output folder
|
40
|
+
rdoc.options << '--line-numbers'
|
41
|
+
end
|
42
|
+
|
43
|
+
Rake::TestTask.new do |t|
|
44
|
+
t.test_files = FileList['test/**/*.rb']
|
45
|
+
end
|
data/bin/lpm
ADDED
@@ -0,0 +1,121 @@
|
|
1
|
+
#!/usr/bin/env ruby
|
2
|
+
|
3
|
+
#--
|
4
|
+
# Copyright (c) 2009, Charles Cui
|
5
|
+
# All rights reserved. Licensed under the BSD license. See LICENSE for details
|
6
|
+
#++
|
7
|
+
|
8
|
+
# $: << File.expand_path(File.join(File.dirname(__FILE__),"..","lib"))
|
9
|
+
require 'rubygems'
|
10
|
+
require 'socket'
|
11
|
+
require 'optparse'
|
12
|
+
require "logger"
|
13
|
+
|
14
|
+
require File.join(File.dirname(__FILE__),"..","lib","utils.rb")
|
15
|
+
require File.join(File.dirname(__FILE__),"..","lib","xiaoqian.rb")
|
16
|
+
require File.join(File.dirname(__FILE__),"..","lib","server.rb")
|
17
|
+
|
18
|
+
vars = {}
|
19
|
+
ARGV.options do
|
20
|
+
|opts|
|
21
|
+
opts.banner = <<DOC
|
22
|
+
XiaoQian [also named => L(inux) P(roc) M(onitor)] :
|
23
|
+
Mail/Gtalk: zheng.cuizh@gmail.com
|
24
|
+
|
25
|
+
Usage: #{File.basename($0)} [options] argv...
|
26
|
+
(sudo )lpm -t 15 --memcache=10.2.226.133:11011,10.2.226.133:11012 -P --pids=28261,28263 --log_level=1
|
27
|
+
DOC
|
28
|
+
|
29
|
+
opts.on_head("LPM optins useage:")
|
30
|
+
|
31
|
+
vars[:port] = 9876
|
32
|
+
opts.on("-p", "--port=PORT", Integer,
|
33
|
+
"lpm rest server requires a port number.","default port=#{vars[:port]}") {|vars[:port]|}
|
34
|
+
|
35
|
+
# separater
|
36
|
+
opts.on()
|
37
|
+
|
38
|
+
vars[:push] = false
|
39
|
+
opts.on("-P","--[no-]push",
|
40
|
+
"if push is given, the --memcache must be set,", # multiline description
|
41
|
+
"and a job will push server performance data to memcache server.",
|
42
|
+
"default --push=#{vars[:push]}.") {|vars[:push]|}
|
43
|
+
|
44
|
+
vars[:timer] = 10
|
45
|
+
vars[:default_timer] = 10
|
46
|
+
opts.on("-t","--timer=Second", Integer,
|
47
|
+
"periodic timer to push cache",
|
48
|
+
"default --timer=#{vars[:default_timer]}(second).") {|vars[:timer]|}
|
49
|
+
|
50
|
+
# array
|
51
|
+
opts.on("-m", "--memcache=[LIST,LIST]", Array,
|
52
|
+
"memcache list,such as : --memcache=10.2.226.38:11211,10.2.226.37:11211","memcache server(s) must be alive.") {|vars[:memcache]|}
|
53
|
+
|
54
|
+
vars[:log] = "/var/log/lpm.log"
|
55
|
+
opts.on("-l","--log=LogPath",
|
56
|
+
"default --log=#{vars[:log]}"){|vars[:log]|}
|
57
|
+
|
58
|
+
vars[:default_log_level] = 3
|
59
|
+
opts.on("--log_level=LogLevel",Integer,"1 => DEBUG","2 => INFO","3 => WARN","4 => ERROR","5 => FATAL",
|
60
|
+
"default --log_level=#{vars[:default_log_level]}"){|vars[:log_level]|}
|
61
|
+
|
62
|
+
opts.on("--pids=[PID,PID]", Array,
|
63
|
+
"pids list,such as : --pids=1234,2134,4321") {|vars[:pids]|}
|
64
|
+
|
65
|
+
# separater
|
66
|
+
opts.on_tail
|
67
|
+
opts.on_tail("common options:")
|
68
|
+
|
69
|
+
# no argument, shows at tail
|
70
|
+
opts.on_tail("--help", "show this message") {puts opts; exit}
|
71
|
+
|
72
|
+
# no argument
|
73
|
+
opts.on_tail("--version", "show version") do
|
74
|
+
puts "XiaoQian [L(inux) P(roc) M(onitor)] : 1.0.1"
|
75
|
+
exit
|
76
|
+
end
|
77
|
+
opts.parse!
|
78
|
+
end
|
79
|
+
|
80
|
+
$logger = nil
|
81
|
+
|
82
|
+
begin
|
83
|
+
# $logger = Logger.new(vars[:log])
|
84
|
+
Logger::Formatter::Format = "%s, [%s#%d] %5s -- %s: %s#{$/}"
|
85
|
+
$logger = Logger.new(vars[:log],'daily',1048576)
|
86
|
+
if vars[:log_level] && (vars[:log_level] >= 1 || vars[:log_level] <= 5) then
|
87
|
+
$logger.level = vars[:log_level]
|
88
|
+
else
|
89
|
+
$logger.level = vars[:default_log_level]
|
90
|
+
end
|
91
|
+
rescue Exception => e
|
92
|
+
$logger = nil
|
93
|
+
p "params error:#{e.to_s}"
|
94
|
+
p "can't access log file => #{vars[:log]}"
|
95
|
+
end
|
96
|
+
|
97
|
+
$logger.info "LPMServer is starting..."
|
98
|
+
$logger.info "Server parameters are below : #{vars.inspect}"
|
99
|
+
$logger.info "LPMServer will running @ 0.0.0.0:#{vars[:port]}"
|
100
|
+
|
101
|
+
Daemon.start do
|
102
|
+
pid = []
|
103
|
+
pid << Process.fork do
|
104
|
+
$logger.info "LPMServer is starting..."
|
105
|
+
LPMServer.run('0.0.0.0',vars[:port])
|
106
|
+
end
|
107
|
+
|
108
|
+
if pid[0] then
|
109
|
+
pid << Process.fork do
|
110
|
+
$logger.info "Xiaoqian is starting..."
|
111
|
+
if vars[:push] then
|
112
|
+
vars[:timer] = vars[:default_timer] if (vars[:timer].to_i < 0)
|
113
|
+
Xiaoqian.run(vars[:timer].to_i,{:pids=>vars[:pids],:caches=>vars[:memcache],:log=>$logger}) if $logger
|
114
|
+
end
|
115
|
+
end
|
116
|
+
end
|
117
|
+
|
118
|
+
$logger.info "LPMServer,Xiaoqian = #{pid.inspect}"
|
119
|
+
Process.waitall
|
120
|
+
$logger.info "LPM main process exit."
|
121
|
+
end
|
@@ -0,0 +1 @@
|
|
1
|
+
# relations with LPMP(Caichen) in memcache
|
data/lib/lpm.rb
ADDED
@@ -0,0 +1,8 @@
|
|
1
|
+
require File.join(File.dirname(__FILE__),'lpm','proc.rb')
|
2
|
+
require File.join(File.dirname(__FILE__),'lpm','process.rb')
|
3
|
+
require File.join(File.dirname(__FILE__),'lpm','error.rb')
|
4
|
+
require File.join(File.dirname(__FILE__),'lpm','utils.rb')
|
5
|
+
require File.join(File.dirname(__FILE__),'utils.rb')
|
6
|
+
require File.join(File.dirname(__FILE__),'xiaoqian.rb')
|
7
|
+
|
8
|
+
raise PlatFormError,'The LPM works on Linux only.' unless RUBY_PLATFORM =~ /linux/
|
data/lib/lpm/error.rb
ADDED
data/lib/lpm/proc.rb
ADDED
@@ -0,0 +1,67 @@
|
|
1
|
+
module LPM
|
2
|
+
class Proc
|
3
|
+
|
4
|
+
class << self
|
5
|
+
def cpuinfo(*arguments)
|
6
|
+
begin
|
7
|
+
cpus_info = IO.read('/proc/cpuinfo').split("#{$/*2}")
|
8
|
+
return cpus_info.map do |cpu_info|
|
9
|
+
h = {}
|
10
|
+
cpu_info.split($/).each do |line|
|
11
|
+
kv = line.split(/\s*\:\s*/)
|
12
|
+
h[kv[0]] = kv[1]
|
13
|
+
end
|
14
|
+
h
|
15
|
+
end
|
16
|
+
rescue Exception => e
|
17
|
+
# => do nothing
|
18
|
+
return "{\"exception\":\"#{e.class}\",\"msg\":\"#{e}\"}"
|
19
|
+
ensure
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
def meminfo(*arguments)
|
24
|
+
begin
|
25
|
+
mem_info = IO.read('/proc/meminfo')
|
26
|
+
h = {}
|
27
|
+
mem_info.split($/).each do |line|
|
28
|
+
kv = line.split(/\s*\:\s*/)
|
29
|
+
h[kv[0].to_sym] = kv[1].chomp(' kB')
|
30
|
+
end
|
31
|
+
h
|
32
|
+
rescue Exception => e
|
33
|
+
# => do nothing
|
34
|
+
return "{\"exception\":\"#{e.class}\",\"msg\":\"#{e}\"}"
|
35
|
+
ensure
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
def loadavg(*arguments)
|
40
|
+
begin
|
41
|
+
load_avg = IO.read('/proc/loadavg').split(/\s+/)
|
42
|
+
h = {}
|
43
|
+
h[:last] = load_avg.pop
|
44
|
+
h[:running],h[:total] = load_avg.pop.split(/\//)
|
45
|
+
h[:load] = load_avg
|
46
|
+
h
|
47
|
+
rescue Exception => e
|
48
|
+
# => do nothing
|
49
|
+
return "{\"exception\":\"#{e.class}\",\"msg\":\"#{e}\"}"
|
50
|
+
ensure
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
def uptime(*arguments)
|
55
|
+
begin
|
56
|
+
total,idle = IO.read('/proc/uptime').split
|
57
|
+
{:total=>total,:idle=>idle}
|
58
|
+
rescue Exception => e
|
59
|
+
# => do nothing
|
60
|
+
return "{\"exception\":\"#{e.class}\",\"msg\":\"#{e}\"}"
|
61
|
+
ensure
|
62
|
+
end
|
63
|
+
end
|
64
|
+
end
|
65
|
+
|
66
|
+
end
|
67
|
+
end
|
data/lib/lpm/process.rb
ADDED
@@ -0,0 +1,136 @@
|
|
1
|
+
|
2
|
+
|
3
|
+
module LPM
|
4
|
+
class Process
|
5
|
+
attr :pid
|
6
|
+
def initialize pid
|
7
|
+
@pid = pid
|
8
|
+
@pwd = "/proc/#{@pid}"
|
9
|
+
raise NoPidExist,"The process which pid is #{@pid} does not exist." unless File.directory? @pwd
|
10
|
+
end
|
11
|
+
|
12
|
+
def attr(*arguments)
|
13
|
+
h = {}
|
14
|
+
begin
|
15
|
+
Dir.chdir("#{@pwd}/attr")
|
16
|
+
Dir.entries("#{@pwd}/attr").each do |d|
|
17
|
+
next if (d == '.') || (d == '..')
|
18
|
+
h[d.to_sym] = IO.read("#{@pwd}/attr/#{d}")
|
19
|
+
end
|
20
|
+
rescue Exception => e
|
21
|
+
# => do nothing
|
22
|
+
h = "{\"exception\":\"#{e.class}\",\"msg\":\"#{e}\"}"
|
23
|
+
end
|
24
|
+
h
|
25
|
+
end
|
26
|
+
|
27
|
+
def cwd(*arguments)
|
28
|
+
begin
|
29
|
+
File.readlink("#{@pwd}/cwd")
|
30
|
+
rescue Exception => e
|
31
|
+
# => do nothing
|
32
|
+
"{\"exception\":\"#{e.class}\",\"msg\":\"#{e}\"}"
|
33
|
+
end
|
34
|
+
end
|
35
|
+
|
36
|
+
def exe(*arguments)
|
37
|
+
begin
|
38
|
+
File.readlink("#{@pwd}/exe")
|
39
|
+
rescue Exception => e
|
40
|
+
# => do nothing
|
41
|
+
"{\"exception\":\"#{e.class}\",\"msg\":\"#{e}\"}"
|
42
|
+
end
|
43
|
+
end
|
44
|
+
|
45
|
+
def root(*arguments)
|
46
|
+
begin
|
47
|
+
File.readlink("#{@pwd}/root")
|
48
|
+
rescue Exception => e
|
49
|
+
# => do nothing
|
50
|
+
"{\"exception\":\"#{e.class}\",\"msg\":\"#{e}\"}"
|
51
|
+
end
|
52
|
+
end
|
53
|
+
|
54
|
+
def fd(*arguments)
|
55
|
+
r = []
|
56
|
+
begin
|
57
|
+
Dir.chdir("#{@pwd}/fd")
|
58
|
+
Dir.entries("#{@pwd}/fd").each do |d|
|
59
|
+
next if (d == '.') || (d == '..')
|
60
|
+
if FileTest.symlink?(d)
|
61
|
+
r << {d.to_sym => File.readlink(d)}
|
62
|
+
else
|
63
|
+
r << {d.to_sym => d}
|
64
|
+
end
|
65
|
+
end
|
66
|
+
rescue Exception => e
|
67
|
+
# => do nothing
|
68
|
+
r = "{\"exception\":\"#{e.class}\",\"msg\":\"#{e}\"}"
|
69
|
+
ensure
|
70
|
+
r
|
71
|
+
end
|
72
|
+
end
|
73
|
+
|
74
|
+
def smaps(*arguments)
|
75
|
+
r = []
|
76
|
+
begin
|
77
|
+
_fd = File.open("#{@pwd}/#{smaps}",'r')
|
78
|
+
_ls = _fd.readlines
|
79
|
+
|
80
|
+
while(_ls!=[])
|
81
|
+
h = {}
|
82
|
+
8.times do
|
83
|
+
k,v = _ls.shift.split(/\s*\:\s*/)
|
84
|
+
h[k.to_sym] = v
|
85
|
+
end
|
86
|
+
r << h
|
87
|
+
end
|
88
|
+
|
89
|
+
rescue Exception => e
|
90
|
+
# => do nothing
|
91
|
+
r = "{\"exception\":\"#{e.class}\",\"msg\":\"#{e}\"}"
|
92
|
+
ensure
|
93
|
+
_fd.close
|
94
|
+
end
|
95
|
+
r
|
96
|
+
end
|
97
|
+
|
98
|
+
def status(*arguments)
|
99
|
+
begin
|
100
|
+
_status = IO.read("#{@pwd}/status").split($/)
|
101
|
+
h = {}
|
102
|
+
_status.each do |line|
|
103
|
+
kv = line.split(/\s*\:\s*/)
|
104
|
+
h[kv[0].to_sym] = kv[1]
|
105
|
+
end
|
106
|
+
h
|
107
|
+
rescue Exception => e
|
108
|
+
# => do nothing
|
109
|
+
h = "{\"exception\":\"#{e.class}\",\"msg\":\"#{e}\"}"
|
110
|
+
ensure
|
111
|
+
end
|
112
|
+
end
|
113
|
+
|
114
|
+
def method_missing(method_name,*args)
|
115
|
+
begin
|
116
|
+
IO.read("#{@pwd}/#{method_name}")
|
117
|
+
rescue Exception => e
|
118
|
+
# => do nothing
|
119
|
+
"{\"exception\":\"#{e.class}\",\"msg\":\"#{e}\"}"
|
120
|
+
end
|
121
|
+
end
|
122
|
+
end
|
123
|
+
|
124
|
+
module ClassMethods
|
125
|
+
|
126
|
+
end
|
127
|
+
|
128
|
+
module InstanceMethods
|
129
|
+
|
130
|
+
end
|
131
|
+
|
132
|
+
def self.included(receiver)
|
133
|
+
receiver.extend ClassMethods
|
134
|
+
receiver.send :include, InstanceMethods
|
135
|
+
end
|
136
|
+
end
|
data/lib/lpm/utils.rb
ADDED
@@ -0,0 +1,170 @@
|
|
1
|
+
module LPM
|
2
|
+
|
3
|
+
# you could use utils like this:
|
4
|
+
# LPM::Utils.cpu(5)
|
5
|
+
# => {"cpu"=>100.0, "cpu0"=>100.0, "cpu1"=>100.0}
|
6
|
+
|
7
|
+
# also could use Utils as below:
|
8
|
+
# LPM::Proc.cpu(5)
|
9
|
+
# => {"cpu"=>100.0, "cpu0"=>100.0, "cpu1"=>100.0}
|
10
|
+
module Utils
|
11
|
+
module Proc
|
12
|
+
def pids(*arguments)
|
13
|
+
Dir.chdir '/proc'
|
14
|
+
Dir['[0-9]*'].inject([]) { |r, p|
|
15
|
+
r << {:pid => p, :cmdline => IO.read("#{p}/cmdline").gsub(/\000/,' ')}
|
16
|
+
}
|
17
|
+
end
|
18
|
+
|
19
|
+
def date(*arguments)
|
20
|
+
`date +"%F %T"`.chomp
|
21
|
+
end
|
22
|
+
|
23
|
+
def df(*arguments)
|
24
|
+
size,use,avai = 0,0,0
|
25
|
+
str = `df`
|
26
|
+
arr = str.split($/)
|
27
|
+
arr.shift
|
28
|
+
arr.each { |l| x = l.split; size += x[1].to_i; use += x[2].to_i; avai += x[3].to_i }
|
29
|
+
{:size => size,:use => use,:avai => avai}
|
30
|
+
end
|
31
|
+
|
32
|
+
# ulimit -n is the most useful.
|
33
|
+
# it limitted the count of total available socket.
|
34
|
+
def ulimit(*arguments)
|
35
|
+
`ulimit -a`.split($/).map do |line|
|
36
|
+
a,b = line.split(/\s*\(.*?\)\s*/)
|
37
|
+
{a=>b}
|
38
|
+
end
|
39
|
+
end
|
40
|
+
|
41
|
+
# attention: sysctl seperator is ':' in macos.
|
42
|
+
def sysctl(*arguments)
|
43
|
+
`sysctl -a`.split($/).map do |line|
|
44
|
+
a,b = line.split(/\s*=\s*/)
|
45
|
+
{a=>b}
|
46
|
+
end
|
47
|
+
end
|
48
|
+
|
49
|
+
def locale(*arguments)
|
50
|
+
`locale`.split($/).map do |line|
|
51
|
+
a,b = line.split(/\s*=\s*/)
|
52
|
+
{a=>b}
|
53
|
+
end
|
54
|
+
end
|
55
|
+
|
56
|
+
# get cpu useage.
|
57
|
+
# total_0=USER[0]+NICE[0]+SYSTEM[0]+IDLE[0]+IOWAIT[0]+IRQ[0]+SOFTIRQ[0]
|
58
|
+
# sleep x
|
59
|
+
# total_1=USER[1]+NICE[1]+SYSTEM[1]+IDLE[1]+IOWAIT[1]+IRQ[1]+SOFTIRQ[1]
|
60
|
+
# cpu_usage=(IDLE[0]-IDLE[1]) / (total_0-total_1) * 100
|
61
|
+
|
62
|
+
# scale is 100
|
63
|
+
# cpu(5) => {"cpu3"=>99.3333333333333, "cpu"=>99.75, "cpu0"=>100.0, "cpu1"=>100.0, "cpu2"=>99.3355481727575}
|
64
|
+
|
65
|
+
def cpu(interv=3,debug=false,*arguments)
|
66
|
+
begin
|
67
|
+
interv = interv.to_i
|
68
|
+
rescue NoMethodError => e
|
69
|
+
interv = 3
|
70
|
+
end
|
71
|
+
|
72
|
+
def getCpuStat(debug=false)
|
73
|
+
h = {}
|
74
|
+
IO.read('/proc/stat').split($/).map do|line|
|
75
|
+
a = line.split
|
76
|
+
# p "line => #{a.inspect}" if debug
|
77
|
+
# user nice system idle iowait irq softirq
|
78
|
+
#cpu 1961269 660 285319 1290544224 1522385 2383 0
|
79
|
+
if(a[0]=~/cpu/)
|
80
|
+
# raise CpuLineSizeError,"The count of words each line is not equal 8." if a.size != 8
|
81
|
+
# p "cpu line => #{a.inspect}" if debug
|
82
|
+
h[a[0]] = {:total => a[1..-1].inject(0) do |sum,i|
|
83
|
+
sum += i.to_f
|
84
|
+
end,
|
85
|
+
:idle => a[4].to_f}
|
86
|
+
# p "idle => #{h[a[0]][:idle]}" if debug
|
87
|
+
end
|
88
|
+
end
|
89
|
+
h
|
90
|
+
end
|
91
|
+
|
92
|
+
def getCpuUseage(h1,h2,debug=false)
|
93
|
+
# p h1.inspect if debug
|
94
|
+
# p h2.inspect if debug
|
95
|
+
# p "h2[:idle]-h1[:idle] = #{(h2[:idle]-h1[:idle]).to_f}" if debug
|
96
|
+
# p "h2[:total]-h1[:total] = #{(h2[:total]-h1[:total]).to_f}" if debug
|
97
|
+
100 - (h2[:idle]-h1[:idle]).to_f/(h2[:total]-h1[:total]).to_f * 100
|
98
|
+
end
|
99
|
+
|
100
|
+
cpu_old,cpu_new = {},{}
|
101
|
+
|
102
|
+
th = Thread.fork do
|
103
|
+
cpu_old = getCpuStat(debug)
|
104
|
+
sleep interv
|
105
|
+
cpu_new = getCpuStat(debug)
|
106
|
+
end
|
107
|
+
|
108
|
+
th.join
|
109
|
+
|
110
|
+
r = {}
|
111
|
+
cpu_old.each do |k,v|
|
112
|
+
r[k.to_sym] =getCpuUseage(cpu_old[k],cpu_new[k],debug)
|
113
|
+
end
|
114
|
+
r
|
115
|
+
end
|
116
|
+
|
117
|
+
def hostname(*arguments)
|
118
|
+
`hostname`.chomp
|
119
|
+
end
|
120
|
+
end
|
121
|
+
|
122
|
+
# LPM::Process included the Process module,
|
123
|
+
# you can use as:
|
124
|
+
# => LPM::Process.new(pid).cpu
|
125
|
+
# then you will get that pid process's cpu total useage.
|
126
|
+
module Process
|
127
|
+
|
128
|
+
# rate = ((utime2+stime2) - (utime1+stime1)) / (total2 - total1)
|
129
|
+
# total = user + sys + idle + nice or get value from /proc/uptime
|
130
|
+
def cpu(interv=3,*arguments)
|
131
|
+
utime1,stime1,starttime1,total1,idle1 = nil
|
132
|
+
utime2,stime2,starttime2,total2,idle2 = nil
|
133
|
+
|
134
|
+
# t = Thread.fork do
|
135
|
+
utime1,stime1,starttime1 = IO.read("#{@pwd}/stat").split.values_at(15,16,23).map{|x|x.to_f}
|
136
|
+
# Attention : uptime file's end contain $/
|
137
|
+
# "3394903.84 3389669.77\n"
|
138
|
+
# if run in Thread,
|
139
|
+
# Process will hang on IO.read('/proc/uptime')
|
140
|
+
# like this => http://blade.nagaokaut.ac.jp/cgi-bin/scat.rb/ruby/ruby-talk/125125
|
141
|
+
# i don't know why.
|
142
|
+
total1,idle1 = IO.read('/proc/uptime').chop.split.map{|x|x.to_f}
|
143
|
+
sleep interv
|
144
|
+
utime2,stime2,starttime2 = IO.read("#{@pwd}/stat").split.values_at(15,16,23).map{|x|x.to_f}
|
145
|
+
total2,idle2 = IO.read('/proc/uptime').chop.split.map{|x|x.to_f}
|
146
|
+
# end
|
147
|
+
|
148
|
+
# t.join
|
149
|
+
((utime2+stime2) - (utime1+stime1)).to_f / (total2 - total1).to_f
|
150
|
+
end
|
151
|
+
|
152
|
+
def mem(*arguments)
|
153
|
+
(IO.read("#{@pwd}/statm").split.shift.to_f/1024).to_s
|
154
|
+
end
|
155
|
+
end
|
156
|
+
|
157
|
+
class << self;include Proc;end
|
158
|
+
end
|
159
|
+
|
160
|
+
# insert into LPM::Proc's meta class.
|
161
|
+
begin
|
162
|
+
(class << LPM::Proc;include Utils::Proc;end) unless LPM::Proc.include? Utils::Proc
|
163
|
+
LPM::Process.send :include,Utils::Process
|
164
|
+
rescue NameError
|
165
|
+
require File.join(File.dirname(__FILE__),'proc.rb')
|
166
|
+
require File.join(File.dirname(__FILE__),'process.rb')
|
167
|
+
retry
|
168
|
+
end
|
169
|
+
|
170
|
+
end
|
data/lib/server.rb
ADDED
@@ -0,0 +1,70 @@
|
|
1
|
+
require 'rubygems'
|
2
|
+
require File.join(File.dirname(__FILE__),'lpm.rb')
|
3
|
+
require File.join(File.dirname(__FILE__),'utils.rb')
|
4
|
+
|
5
|
+
begin
|
6
|
+
gem 'sinatra'
|
7
|
+
require 'sinatra/base'
|
8
|
+
require 'activesupport' unless ({}.respond_to? :to_json)
|
9
|
+
rescue LoadError => e
|
10
|
+
p <<DOC
|
11
|
+
-----------------------------------------------------------------------
|
12
|
+
LPM say:
|
13
|
+
The LPM server require sinatra and activesupport.
|
14
|
+
Please exec "gem install sinatra" or "gem install activesupport" first.
|
15
|
+
-----------------------------------------------------------------------
|
16
|
+
Ruby say:
|
17
|
+
#{e}
|
18
|
+
-----------------------------------------------------------------------
|
19
|
+
DOC
|
20
|
+
exit(-1)
|
21
|
+
end
|
22
|
+
|
23
|
+
$LPMPool = {}
|
24
|
+
|
25
|
+
class AppServer < Sinatra::Base
|
26
|
+
get '/' do
|
27
|
+
"{\"msg\":\"Welcome using LPM(Linux Proc Monitor)\"}"
|
28
|
+
end
|
29
|
+
|
30
|
+
get '/lpm/:verbose' do
|
31
|
+
r = ''
|
32
|
+
case params[:verbose]
|
33
|
+
when /ver/i
|
34
|
+
r = "0.1.2";break;
|
35
|
+
when /proc/i
|
36
|
+
redirect '/proc'
|
37
|
+
when /process/i
|
38
|
+
redirect '/process'
|
39
|
+
end
|
40
|
+
r
|
41
|
+
end
|
42
|
+
|
43
|
+
get '/proc/:method/*' do
|
44
|
+
if params[:method] == nil then
|
45
|
+
"Monitor target is needed,such as <a href='/proc/cpuinfo'>/proc/cpuinfo</a>"
|
46
|
+
else
|
47
|
+
(LPM::Proc.send params[:method],*(params[:splat].split('/'))).to_json
|
48
|
+
end
|
49
|
+
end
|
50
|
+
|
51
|
+
get '/process/:pid/:method/*' do
|
52
|
+
if params[:method] != nil and params[:pid].to_i >= 0 then
|
53
|
+
begin
|
54
|
+
a_lpm = $LPMPool[params[:pid]]||($LPMPool[params[:pid]] = LPM::Process.new(params[:pid]))
|
55
|
+
(a_lpm.send params[:method],*(params[:splat].split('/'))).to_json
|
56
|
+
rescue NoPidExist => e
|
57
|
+
%Q-{"success":false,"msg":#{e}}-
|
58
|
+
end
|
59
|
+
else
|
60
|
+
"Monitor target and pid is needed,such as <a href='/process/909/status'>/proc/cpuinfo</a>"
|
61
|
+
end
|
62
|
+
end
|
63
|
+
end
|
64
|
+
|
65
|
+
class LPMServer
|
66
|
+
def self.run(host='0.0.0.0',port=9876)
|
67
|
+
# Daemon.start{AppServer.run! :host => host, :port => port}
|
68
|
+
AppServer.run! :host => host, :port => port
|
69
|
+
end
|
70
|
+
end
|
data/lib/utils.rb
ADDED
@@ -0,0 +1,13 @@
|
|
1
|
+
class Daemon
|
2
|
+
def Daemon.start
|
3
|
+
exit!(0) if fork
|
4
|
+
Process::setsid
|
5
|
+
exit!(0) if fork
|
6
|
+
Dir::chdir("/")
|
7
|
+
File::umask(0)
|
8
|
+
STDIN.reopen("/dev/null")
|
9
|
+
STDOUT.reopen("/dev/null", "w")
|
10
|
+
STDERR.reopen("/dev/null", "w")
|
11
|
+
yield if block_given?
|
12
|
+
end
|
13
|
+
end
|
data/lib/xiaoqian.rb
ADDED
@@ -0,0 +1,257 @@
|
|
1
|
+
require "rubygems"
|
2
|
+
require "eventmachine"
|
3
|
+
require "memcache"
|
4
|
+
require "logger"
|
5
|
+
require 'activesupport' unless ({}.respond_to? :to_json)
|
6
|
+
|
7
|
+
require File.join(File.dirname(__FILE__),'lpm.rb')
|
8
|
+
|
9
|
+
class Xiaoqian
|
10
|
+
|
11
|
+
# in memcache
|
12
|
+
# XQList:
|
13
|
+
# [
|
14
|
+
# '10.2.226.37',
|
15
|
+
# '10.2.226.38',
|
16
|
+
# '10.2.226.39'->pids:
|
17
|
+
# [
|
18
|
+
# 12345,
|
19
|
+
# 23456
|
20
|
+
# 34567
|
21
|
+
# ]
|
22
|
+
# ]
|
23
|
+
|
24
|
+
# based on pids in memcache,XiaoQian server will periodic update processes array.
|
25
|
+
|
26
|
+
# caches => ['10.2.226.38:11211','10.2.226.38:11212','10.2.226.39:11211']
|
27
|
+
# cacches => ['127.0.0.1:11011','127.0.0.1:11012']
|
28
|
+
# Xiaoqian.new(15,{:pids=>[28261,28263],:caches=>['10.2.226.133:11011','10.2.226.133:11012']})
|
29
|
+
def initialize(timer,opts = {:pids=>[],:caches=>[]})
|
30
|
+
@local_ip = get_local_ip
|
31
|
+
@timer = timer
|
32
|
+
@logger = opts[:log]
|
33
|
+
|
34
|
+
# @XQcaches = get_cache_clients(caches)
|
35
|
+
@XQcaches = MemCache.new(opts[:caches],:namespace=>"XQ#{@local_ip}")
|
36
|
+
@QNYHcaches = MemCache.new(opts[:caches],:namespace=>"QNYH")
|
37
|
+
|
38
|
+
@process = get_processes_instance(opts[:pids])
|
39
|
+
init_cache_config(opts[:pids],@local_ip)
|
40
|
+
end
|
41
|
+
|
42
|
+
def init_cache_config(pids,local_ip)
|
43
|
+
# load File.join(File.dirname(__FILE__),'config','stipulation.rb')
|
44
|
+
@XQcaches.set :localhost,local_ip
|
45
|
+
@XQcaches.set :pids,pids
|
46
|
+
|
47
|
+
|
48
|
+
register_QNYH_XQList
|
49
|
+
end
|
50
|
+
|
51
|
+
def register_QNYH_XQList
|
52
|
+
list = @QNYHcaches.get(:XQList) || []
|
53
|
+
unless list.include?(@local_ip) then
|
54
|
+
list << @local_ip
|
55
|
+
@QNYHcaches.set(:XQList,list)
|
56
|
+
end
|
57
|
+
list
|
58
|
+
end
|
59
|
+
|
60
|
+
def get_local_ip
|
61
|
+
require 'socket'
|
62
|
+
IPSocket.getaddress(Socket.gethostname)
|
63
|
+
end
|
64
|
+
|
65
|
+
# proIns =>
|
66
|
+
# {12345 => <LPM::Process>,67890 => <LPM::Process>}
|
67
|
+
def update_processes_instance(proIns={},pids=[])
|
68
|
+
keys = proIns.keys
|
69
|
+
pids.each do |pid|
|
70
|
+
begin
|
71
|
+
proIns[pid] = LPM::Process.new(pid) unless keys.include?(pid)
|
72
|
+
rescue Exception => e
|
73
|
+
@logger.error "update_processes_instance(#{proIns.inspect},#{pids.inspect})"
|
74
|
+
@logger.error e
|
75
|
+
end
|
76
|
+
end
|
77
|
+
keys = proIns.keys
|
78
|
+
(keys - pids).each{ |var| proIns.delete var }
|
79
|
+
proIns
|
80
|
+
end
|
81
|
+
|
82
|
+
def get_processes_instance(pids=[])
|
83
|
+
h = {}
|
84
|
+
if pids then
|
85
|
+
pids.each{ |pid|
|
86
|
+
begin
|
87
|
+
h[pid] = LPM::Process.new(pid)
|
88
|
+
rescue Exception => e
|
89
|
+
@logger.error "get_processes_instance(#{pids.inspect})"
|
90
|
+
@logger.error e
|
91
|
+
end
|
92
|
+
}
|
93
|
+
end
|
94
|
+
h
|
95
|
+
end
|
96
|
+
|
97
|
+
def get_cache_clients(caches=[])
|
98
|
+
|
99
|
+
# EM::P::Memcache.connect =>
|
100
|
+
# # Connect to a memcached server (must support NOREPLY, memcached >= 1.2.4)
|
101
|
+
# def self.connect host = 'localhost', port = 11211
|
102
|
+
# EM.connect host, port, self, host, port
|
103
|
+
# end
|
104
|
+
caches.inject([]){|arr,ca|
|
105
|
+
host,port = ca.split(':')
|
106
|
+
if port then
|
107
|
+
arr << EM::P::Memcache.connect(host,port.to_i)
|
108
|
+
else
|
109
|
+
arr << EM::P::Memcache.connect(host)
|
110
|
+
end
|
111
|
+
}
|
112
|
+
end
|
113
|
+
|
114
|
+
def wrap_data(data)
|
115
|
+
{:ts => Time.now.to_i,:data => data}
|
116
|
+
end
|
117
|
+
|
118
|
+
# Data from Job
|
119
|
+
# => {:ts=>123456789,:data=><Hash>}
|
120
|
+
|
121
|
+
def process_job(timer)
|
122
|
+
@logger.error "[Pid => #{Process.pid}] -- Process Job start."
|
123
|
+
EM.run{
|
124
|
+
EM.add_periodic_timer(timer){
|
125
|
+
@logger.info "process_job is called."
|
126
|
+
@process = update_processes_instance(@process,@XQcaches.get(:pids))
|
127
|
+
@logger.info "@process => #{@process.inspect}."
|
128
|
+
r = []
|
129
|
+
t = []
|
130
|
+
|
131
|
+
@process.each do |key,ins|
|
132
|
+
tmp = {}
|
133
|
+
t << Thread.fork{
|
134
|
+
tmp.update({:cpu => ins.cpu})
|
135
|
+
}
|
136
|
+
tmp.update({
|
137
|
+
:pid => ins.pid,
|
138
|
+
:cwd => ins.cwd,
|
139
|
+
:exe => ins.exe,
|
140
|
+
:root => ins.root,
|
141
|
+
:cmdline => ins.cmdline,
|
142
|
+
:memory => ins.mem
|
143
|
+
})
|
144
|
+
r << tmp
|
145
|
+
end
|
146
|
+
t.each{|x|x.join}
|
147
|
+
@logger.info "@process result => #{r.inspect}"
|
148
|
+
@XQcaches.set :host_process_grid,wrap_data({:root => r,:total => r.size}.to_json)
|
149
|
+
}
|
150
|
+
}
|
151
|
+
end
|
152
|
+
|
153
|
+
# LPM::Proc or LPM::Process data will be wraped in json format.
|
154
|
+
# This format must be fit to the require of LPMP's javascript.
|
155
|
+
|
156
|
+
def proc_job(timer)
|
157
|
+
@logger.error "[Pid => #{Process.pid}] -- Proc Job start."
|
158
|
+
EM.run{
|
159
|
+
# cpu
|
160
|
+
EM.add_periodic_timer(timer){
|
161
|
+
# cpu
|
162
|
+
@logger.debug "proc_job's cpu is called."
|
163
|
+
@XQcaches.set :host_proc_cpu,wrap_data({:value => LPM::Proc.cpu[:cpu].to_f,:time => Time.now.strftime("%m-%d %Hh:%Mm:%Ss")}.to_json)
|
164
|
+
}
|
165
|
+
# loadavg
|
166
|
+
EM.add_periodic_timer(timer){
|
167
|
+
# loadavg
|
168
|
+
@logger.debug "proc_job's loadavg is called."
|
169
|
+
@XQcaches.set :host_proc_loadavg,wrap_data({:value => LPM::Proc.loadavg[:load][0].to_f,:time => Time.now.strftime("%m-%d %Hh:%Mm:%Ss")}.to_json)
|
170
|
+
}
|
171
|
+
# meminfo
|
172
|
+
EM.add_periodic_timer(timer){
|
173
|
+
# meminfo
|
174
|
+
@logger.debug "proc_job's meminfo is called."
|
175
|
+
@XQcaches.set :host_proc_meminfo,wrap_data({:root => LPM::Proc.meminfo.inject([]){|a,e|
|
176
|
+
a << {:type => e[0],:count => e[1]}
|
177
|
+
},:time => Time.now.strftime("%m-%d %Hh:%Mm:%Ss")}.to_json)
|
178
|
+
}
|
179
|
+
# df
|
180
|
+
EM.add_periodic_timer(timer){
|
181
|
+
# df
|
182
|
+
@logger.debug "proc_job's df is called."
|
183
|
+
@XQcaches.set :host_proc_df,wrap_data({:root => LPM::Proc.df.inject([]){|a,e|
|
184
|
+
a << {:type => e[0],:count => e[1]}
|
185
|
+
},:time => Time.now.strftime("%m-%d %Hh:%Mm:%Ss")}.to_json)
|
186
|
+
}
|
187
|
+
# pids
|
188
|
+
EM.add_periodic_timer(timer){
|
189
|
+
# pids
|
190
|
+
@logger.debug "proc_job's pids is called."
|
191
|
+
@XQcaches.set :host_proc_pids,wrap_data(LPM::Proc.pids.map{ |e| {:text => e[:cmdline],:leaf => true,:cls => "file",:qtip => e[:pid]} }.to_json)
|
192
|
+
}
|
193
|
+
|
194
|
+
# grid
|
195
|
+
EM.add_periodic_timer(timer){
|
196
|
+
@logger.info "proc_job's grid is called."
|
197
|
+
tmp = {}
|
198
|
+
t = Thread.fork{
|
199
|
+
tmp.update({:cpu => LPM::Proc.cpu["cpu"]})
|
200
|
+
}
|
201
|
+
tmp.update({
|
202
|
+
:ip => @local_ip,
|
203
|
+
:hostname => LPM::Proc.hostname,
|
204
|
+
:loadavg => LPM::Proc.loadavg[:load][0],
|
205
|
+
:mem_total => LPM::Proc.meminfo[:MemTotal],#.chomp(' kB'),
|
206
|
+
:mem_free => LPM::Proc.meminfo[:MemFree],#.chomp(' kB'),
|
207
|
+
:uptime_total => LPM::Proc.uptime[:total],
|
208
|
+
:uptime_idle => LPM::Proc.uptime[:idle],
|
209
|
+
:disk_size => LPM::Proc.df[:size],
|
210
|
+
:disk_use => LPM::Proc.df[:use],
|
211
|
+
:date => LPM::Proc.date
|
212
|
+
})
|
213
|
+
t.join
|
214
|
+
@logger.info "proc_grid => #{tmp.inspect}."
|
215
|
+
@XQcaches.set :host_proc_grid,wrap_data(tmp)
|
216
|
+
}
|
217
|
+
}
|
218
|
+
end
|
219
|
+
|
220
|
+
def work_job
|
221
|
+
timer = @timer
|
222
|
+
@logger.error "work_job will run,and interval timer is #{timer}."
|
223
|
+
p = []
|
224
|
+
p << Process.fork{
|
225
|
+
begin
|
226
|
+
self.proc_job(timer)
|
227
|
+
rescue Exception => e
|
228
|
+
@logger.fatal "Proc Job fatal error!"
|
229
|
+
@logger.fatal e.to_s
|
230
|
+
end
|
231
|
+
}
|
232
|
+
if p[0] then
|
233
|
+
p << Process.fork{
|
234
|
+
begin
|
235
|
+
self.process_job(timer)
|
236
|
+
rescue Exception => e
|
237
|
+
@logger.fatal "Process Job fatal error!"
|
238
|
+
@logger.fatal e.to_s
|
239
|
+
end
|
240
|
+
}
|
241
|
+
end
|
242
|
+
@logger.error "work_job is running."
|
243
|
+
@logger.error "process and proc job's pid => #{p.inspect}."
|
244
|
+
Process.waitall
|
245
|
+
@logger.error "work_job shutdown."
|
246
|
+
end
|
247
|
+
|
248
|
+
def self.run(timer,opts = {:pids=>[],:caches=>[]})
|
249
|
+
begin
|
250
|
+
# Daemon.start{Xiaoqian.new(timer,{:pids=>opts[:pisd],:caches=>opts[:caches],:log=>opts[:log]}).work_job}
|
251
|
+
Xiaoqian.new(timer,{:pids=>opts[:pids],:caches=>opts[:caches],:log=>opts[:log]}).work_job
|
252
|
+
rescue Exception => e
|
253
|
+
opts[:log].error e
|
254
|
+
opts[:log].error "Xiaoqian.new(#{timer},#{{:pids=>opts[:pids],:caches=>opts[:caches],:log=>opts[:log]}}).work_job"
|
255
|
+
end
|
256
|
+
end
|
257
|
+
end
|
@@ -0,0 +1,38 @@
|
|
1
|
+
require "rubygems"
|
2
|
+
require "eventmachine"
|
3
|
+
require "memcache"
|
4
|
+
|
5
|
+
@cache = MemCache.new(['127.0.0.1:11011'],:namespace=>"XQ#{@local_ip}")
|
6
|
+
|
7
|
+
EM.run{
|
8
|
+
p 'EM.run 1st block.'
|
9
|
+
EM.add_periodic_timer(5){
|
10
|
+
@cache.set :Timer1,"Time now => #{Time.now.to_s}"
|
11
|
+
p 'set Timer1'
|
12
|
+
}
|
13
|
+
|
14
|
+
EM.add_periodic_timer(5){
|
15
|
+
puts @cache.get(:Timer1)
|
16
|
+
}
|
17
|
+
|
18
|
+
EM.add_periodic_timer(7){
|
19
|
+
@cache.set :Timer2,"Hello dear All. I'm logger :)"
|
20
|
+
p 'set Timer2'
|
21
|
+
}
|
22
|
+
|
23
|
+
EM.add_periodic_timer(7){
|
24
|
+
puts @cache.get(:Timer2)
|
25
|
+
}
|
26
|
+
}
|
27
|
+
|
28
|
+
EM.run{
|
29
|
+
p 'EM.run 2nd block.'
|
30
|
+
EM.add_periodic_timer(3){
|
31
|
+
@cache.set :Timer3,"Time now => #{Time.now.to_s}"
|
32
|
+
p 'set Timer1'
|
33
|
+
}
|
34
|
+
|
35
|
+
EM.add_periodic_timer(3){
|
36
|
+
puts @cache.get(:Timer1)
|
37
|
+
}
|
38
|
+
}
|
data/test/test_proc.rb
ADDED
@@ -0,0 +1,17 @@
|
|
1
|
+
require File.join(File.dirname(__FILE__),'..','lib','lpm.rb')
|
2
|
+
|
3
|
+
require "Benchmark"
|
4
|
+
require "pp"
|
5
|
+
|
6
|
+
Benchmark.bm do |x|
|
7
|
+
x.report("CPU"){pp LPM::Proc.cpu}
|
8
|
+
x.report("CPU(5)"){pp LPM::Proc.cpu(5)}
|
9
|
+
x.report("CPU(10)"){pp LPM::Proc.cpu(10)}
|
10
|
+
x.report("CPU(15)"){pp LPM::Proc.cpu(15)}
|
11
|
+
end
|
12
|
+
|
13
|
+
# => below test is for LPM::Proc
|
14
|
+
pp "LPM::Proc.loadavg => #{LPM::Proc.loadavg}"
|
15
|
+
pp "LPM::Proc.uptime => #{LPM::Proc.uptime}"
|
16
|
+
pp "LPM::Proc.meminfo => #{LPM::Proc.meminfo}"
|
17
|
+
pp "LPM::Proc.cpuinfo => #{LPM::Proc.cpuinfo}"
|
data/test/test_utils.rb
ADDED
@@ -0,0 +1,14 @@
|
|
1
|
+
require File.join(File.dirname(__FILE__),'..','lib','lpm.rb')
|
2
|
+
|
3
|
+
require "Benchmark"
|
4
|
+
require "pp"
|
5
|
+
|
6
|
+
# => below test is for LPM::Utils
|
7
|
+
pp "LPM::Utils.ulimit => #{LPM::Utils.ulimit}"
|
8
|
+
pp "LPM::Utils.locale => #{LPM::Utils.locale}"
|
9
|
+
pp "LPM::Utils.sysctl => #{LPM::Utils.sysctl}"
|
10
|
+
|
11
|
+
# => LPM::Utils can also be used as
|
12
|
+
pp "LPM::Proc.ulimit => #{LPM::Proc.ulimit}"
|
13
|
+
pp "LPM::Proc.locale => #{LPM::Proc.locale}"
|
14
|
+
pp "LPM::Proc.sysctl => #{LPM::Proc.sysctl}"
|
metadata
ADDED
@@ -0,0 +1,72 @@
|
|
1
|
+
--- !ruby/object:Gem::Specification
|
2
|
+
name: lpm
|
3
|
+
version: !ruby/object:Gem::Version
|
4
|
+
version: 1.0.0
|
5
|
+
platform: ruby
|
6
|
+
authors:
|
7
|
+
- Charles Cui
|
8
|
+
autorequire:
|
9
|
+
bindir: bin
|
10
|
+
cert_chain: []
|
11
|
+
|
12
|
+
date: 2009-10-29 00:00:00 +08:00
|
13
|
+
default_executable:
|
14
|
+
dependencies: []
|
15
|
+
|
16
|
+
description: Linux Proc Monitor
|
17
|
+
email: zheng.cuizh@gmail.com
|
18
|
+
executables:
|
19
|
+
- lpm
|
20
|
+
extensions: []
|
21
|
+
|
22
|
+
extra_rdoc_files:
|
23
|
+
- README
|
24
|
+
- LICENSE
|
25
|
+
files:
|
26
|
+
- LICENSE
|
27
|
+
- README
|
28
|
+
- Rakefile
|
29
|
+
- bin/lpm
|
30
|
+
- lib/config/stipulation.rb
|
31
|
+
- lib/lpm/error.rb
|
32
|
+
- lib/lpm/proc.rb
|
33
|
+
- lib/lpm/process.rb
|
34
|
+
- lib/lpm/utils.rb
|
35
|
+
- lib/lpm.rb
|
36
|
+
- lib/server.rb
|
37
|
+
- lib/utils.rb
|
38
|
+
- lib/xiaoqian.rb
|
39
|
+
- test/self_test/em_periodic.rb
|
40
|
+
- test/test_proc.rb
|
41
|
+
- test/test_process.rb
|
42
|
+
- test/test_utils.rb
|
43
|
+
has_rdoc: true
|
44
|
+
homepage:
|
45
|
+
licenses: []
|
46
|
+
|
47
|
+
post_install_message:
|
48
|
+
rdoc_options: []
|
49
|
+
|
50
|
+
require_paths:
|
51
|
+
- lib
|
52
|
+
required_ruby_version: !ruby/object:Gem::Requirement
|
53
|
+
requirements:
|
54
|
+
- - ">="
|
55
|
+
- !ruby/object:Gem::Version
|
56
|
+
version: "0"
|
57
|
+
version:
|
58
|
+
required_rubygems_version: !ruby/object:Gem::Requirement
|
59
|
+
requirements:
|
60
|
+
- - ">="
|
61
|
+
- !ruby/object:Gem::Version
|
62
|
+
version: "0"
|
63
|
+
version:
|
64
|
+
requirements: []
|
65
|
+
|
66
|
+
rubyforge_project:
|
67
|
+
rubygems_version: 1.3.5
|
68
|
+
signing_key:
|
69
|
+
specification_version: 3
|
70
|
+
summary: Linux Proc Monitor
|
71
|
+
test_files: []
|
72
|
+
|