rbbt 1.2.5 → 2.0.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/README.rdoc +2 -138
- metadata +69 -214
- data/LICENSE +0 -20
- data/bin/rbbt_config +0 -245
- data/install_scripts/classifier/R/classify.R +0 -36
- data/install_scripts/classifier/Rakefile +0 -140
- data/install_scripts/get_abner.sh +0 -2
- data/install_scripts/get_banner.sh +0 -25
- data/install_scripts/get_biocreative.sh +0 -72
- data/install_scripts/get_crf++.sh +0 -26
- data/install_scripts/get_entrez.sh +0 -4
- data/install_scripts/get_go.sh +0 -4
- data/install_scripts/get_polysearch.sh +0 -8
- data/install_scripts/ner/Rakefile +0 -206
- data/install_scripts/ner/config/default.rb +0 -52
- data/install_scripts/norm/Rakefile +0 -219
- data/install_scripts/norm/config/cue_default.rb +0 -10
- data/install_scripts/norm/config/tokens_default.rb +0 -86
- data/install_scripts/norm/functions.sh +0 -23
- data/install_scripts/organisms/Ath.Rakefile +0 -55
- data/install_scripts/organisms/Cal.Rakefile +0 -84
- data/install_scripts/organisms/Cel.Rakefile +0 -109
- data/install_scripts/organisms/Hsa.Rakefile +0 -140
- data/install_scripts/organisms/Mmu.Rakefile +0 -77
- data/install_scripts/organisms/Rakefile +0 -43
- data/install_scripts/organisms/Rno.Rakefile +0 -88
- data/install_scripts/organisms/Sce.Rakefile +0 -66
- data/install_scripts/organisms/Spo.Rakefile +0 -40
- data/install_scripts/organisms/rake-include.rb +0 -252
- data/install_scripts/wordlists/consonants +0 -897
- data/install_scripts/wordlists/stopwords +0 -1
- data/lib/rbbt.rb +0 -83
- data/lib/rbbt/bow/bow.rb +0 -88
- data/lib/rbbt/bow/classifier.rb +0 -116
- data/lib/rbbt/bow/dictionary.rb +0 -187
- data/lib/rbbt/ner/abner.rb +0 -34
- data/lib/rbbt/ner/banner.rb +0 -73
- data/lib/rbbt/ner/dictionaryNER.rb +0 -98
- data/lib/rbbt/ner/regexpNER.rb +0 -70
- data/lib/rbbt/ner/rner.rb +0 -227
- data/lib/rbbt/ner/rnorm.rb +0 -143
- data/lib/rbbt/ner/rnorm/cue_index.rb +0 -80
- data/lib/rbbt/ner/rnorm/tokens.rb +0 -217
- data/lib/rbbt/sources/biocreative.rb +0 -75
- data/lib/rbbt/sources/biomart.rb +0 -105
- data/lib/rbbt/sources/entrez.rb +0 -211
- data/lib/rbbt/sources/go.rb +0 -85
- data/lib/rbbt/sources/gscholar.rb +0 -74
- data/lib/rbbt/sources/organism.rb +0 -241
- data/lib/rbbt/sources/polysearch.rb +0 -117
- data/lib/rbbt/sources/pubmed.rb +0 -248
- data/lib/rbbt/util/arrayHash.rb +0 -266
- data/lib/rbbt/util/filecache.rb +0 -72
- data/lib/rbbt/util/index.rb +0 -47
- data/lib/rbbt/util/misc.rb +0 -106
- data/lib/rbbt/util/open.rb +0 -251
- data/lib/rbbt/util/rake.rb +0 -183
- data/lib/rbbt/util/simpleDSL.rb +0 -87
- data/lib/rbbt/util/tmpfile.rb +0 -35
- data/tasks/install.rake +0 -124
- data/test/rbbt/bow/test_bow.rb +0 -33
- data/test/rbbt/bow/test_classifier.rb +0 -72
- data/test/rbbt/bow/test_dictionary.rb +0 -91
- data/test/rbbt/ner/rnorm/test_cue_index.rb +0 -57
- data/test/rbbt/ner/rnorm/test_tokens.rb +0 -70
- data/test/rbbt/ner/test_abner.rb +0 -17
- data/test/rbbt/ner/test_banner.rb +0 -17
- data/test/rbbt/ner/test_dictionaryNER.rb +0 -122
- data/test/rbbt/ner/test_regexpNER.rb +0 -33
- data/test/rbbt/ner/test_rner.rb +0 -126
- data/test/rbbt/ner/test_rnorm.rb +0 -47
- data/test/rbbt/sources/test_biocreative.rb +0 -38
- data/test/rbbt/sources/test_biomart.rb +0 -31
- data/test/rbbt/sources/test_entrez.rb +0 -49
- data/test/rbbt/sources/test_go.rb +0 -24
- data/test/rbbt/sources/test_organism.rb +0 -59
- data/test/rbbt/sources/test_polysearch.rb +0 -27
- data/test/rbbt/sources/test_pubmed.rb +0 -39
- data/test/rbbt/util/test_arrayHash.rb +0 -257
- data/test/rbbt/util/test_filecache.rb +0 -37
- data/test/rbbt/util/test_index.rb +0 -31
- data/test/rbbt/util/test_misc.rb +0 -20
- data/test/rbbt/util/test_open.rb +0 -110
- data/test/rbbt/util/test_simpleDSL.rb +0 -57
- data/test/rbbt/util/test_tmpfile.rb +0 -21
- data/test/test_helper.rb +0 -4
- data/test/test_rbbt.rb +0 -11
@@ -1 +0,0 @@
|
|
1
|
-
a been get least our them whether about before getting left ourselves then which after being go less out there while again between goes let over these who ago but going like per they whoever all by gone make put this whom almost came got many putting those whose also can gotten may same through why always cannot had maybe saw till will am come has me see to with an could have mine seen too within and did having more shall two without another do he most she unless won't any does her much should until would anybody doing here my so up wouldn't anyhow done him myself some upon yet anyone down his never somebody us you anything each how no someone very your anyway else i none something was are even if not stand we as ever in now such went at every into of sure were away everyone is off take what back everything isn't on than whatever be for it one that what's became from just onto the when because front last or their where
|
data/lib/rbbt.rb
DELETED
@@ -1,83 +0,0 @@
|
|
1
|
-
require 'fileutils'
|
2
|
-
require 'yaml'
|
3
|
-
|
4
|
-
|
5
|
-
# This module implements a number of utilities aimed at performing Text
|
6
|
-
# Mining of BioMedical data. I includes the following:
|
7
|
-
#
|
8
|
-
# * Multi-purpose Named Entity Recognition and Normalization. And training data for
|
9
|
-
# Gene Mention from the BioCreative competition.
|
10
|
-
# * Document Classification
|
11
|
-
# * Interfaces to Gene Ontology, Entrez Gene, BioMart and PubMed
|
12
|
-
#
|
13
|
-
# There are a number of classes to help gather and integrate the
|
14
|
-
# information from all the sources. It is design to be very flexible,
|
15
|
-
# but with a sensible set of defaults.
|
16
|
-
#
|
17
|
-
module Rbbt
|
18
|
-
|
19
|
-
class NoConfig < Exception; end
|
20
|
-
|
21
|
-
@@rootdir = File.dirname(File.dirname(__FILE__))
|
22
|
-
|
23
|
-
@@datadir = @@cachedir = @@tmpdir = nil
|
24
|
-
|
25
|
-
def self.load_config
|
26
|
-
if File.exist?(File.join(@@rootdir, 'rbbt.config'))
|
27
|
-
config = YAML.load_file(File.join(@@rootdir, 'rbbt.config'))
|
28
|
-
if config.is_a? Hash
|
29
|
-
@@datadir = config['datadir'] if config['datadir']
|
30
|
-
@@cachedir = config['cachedir'] if config['cachedir']
|
31
|
-
@@tmpdir = config['tmpdir'] if config['tmpdir']
|
32
|
-
end
|
33
|
-
end
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
if File.exist?(File.join(ENV['HOME'], '.rbbt'))
|
38
|
-
config = YAML.load_file(File.join(ENV['HOME'], '.rbbt') )
|
39
|
-
if config.is_a? Hash
|
40
|
-
@@datadir = config['datadir'] if config['datadir']
|
41
|
-
@@cachedir = config['cachedir'] if config['cachedir']
|
42
|
-
@@tmpdir = config['tmpdir'] if config['tmpdir']
|
43
|
-
end
|
44
|
-
end
|
45
|
-
|
46
|
-
if @@datadir.nil? || @@cachedir.nil? || @@tmpdir.nil?
|
47
|
-
raise NoConfig, "rbbt not configured. Edit #{File.join(@@rootdir, 'rbbt.config')} or $HOME/.rbbt"
|
48
|
-
end
|
49
|
-
|
50
|
-
|
51
|
-
FileUtils.mkdir_p @@datadir unless File.exist? @@datadir
|
52
|
-
FileUtils.mkdir_p @@cachedir unless File.exist? @@cachedir
|
53
|
-
FileUtils.mkdir_p @@tmpdir unless File.exist? @@tmpdir
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
# For some reason banner.jar must be loaded before abner.jar
|
58
|
-
ENV['CLASSPATH'] ||= ""
|
59
|
-
ENV['CLASSPATH'] += ":" + %w(banner abner).collect{|pkg| File.join(datadir, "third_party", pkg, "#{ pkg }.jar")}.join(":")
|
60
|
-
end
|
61
|
-
|
62
|
-
def self.rootdir
|
63
|
-
@@rootdir
|
64
|
-
end
|
65
|
-
|
66
|
-
|
67
|
-
def self.datadir
|
68
|
-
@@datadir
|
69
|
-
end
|
70
|
-
|
71
|
-
def self.cachedir
|
72
|
-
@@cachedir
|
73
|
-
end
|
74
|
-
|
75
|
-
def self.tmpdir
|
76
|
-
@@tmpdir
|
77
|
-
end
|
78
|
-
|
79
|
-
|
80
|
-
self.load_config
|
81
|
-
end
|
82
|
-
|
83
|
-
|
data/lib/rbbt/bow/bow.rb
DELETED
@@ -1,88 +0,0 @@
|
|
1
|
-
require 'rbbt'
|
2
|
-
require 'stemmer'
|
3
|
-
require 'rbbt/util/misc'
|
4
|
-
|
5
|
-
# This module provides methods to extract a bag of words (or bag of bigrams)
|
6
|
-
# representation for strings of text, and to produce a vector representations
|
7
|
-
# of that bag of words for a given list of terms. This BOW representations of
|
8
|
-
# the texts is usually first used to build a Dictionary, and then, with the
|
9
|
-
# best selection of terms as determined by the Dictionary::TF_IDF.best of
|
10
|
-
# Dictionary::KL.best methods, determine the vector representations for that
|
11
|
-
# text.
|
12
|
-
module BagOfWords
|
13
|
-
|
14
|
-
# Divide the input string into an array of words (sequences of \w characters).
|
15
|
-
# Words are stemmed and filtered to remove stopwords and words with less than
|
16
|
-
# 2 characters. The list of stopwords is a global variable defined in
|
17
|
-
# 'rbbt/util/misc'.
|
18
|
-
def self.words(text)
|
19
|
-
return [] if text.nil?
|
20
|
-
raise "Stopword list not loaded. Have you installed the wordlists? (rbbt_config prepare wordlists)" if $stopwords.nil?
|
21
|
-
text.scan(/\w+/).
|
22
|
-
collect{|word| word.downcase.stem}.
|
23
|
-
select{|word|
|
24
|
-
! $stopwords.include?(word) &&
|
25
|
-
word.length > 2 &&
|
26
|
-
word =~ /[a-z]/
|
27
|
-
}
|
28
|
-
end
|
29
|
-
|
30
|
-
# Take the array of words for the text and form all the bigrams
|
31
|
-
def self.bigrams(text)
|
32
|
-
words = words(text)
|
33
|
-
bigrams = []
|
34
|
-
lastword = nil
|
35
|
-
|
36
|
-
words.each{|word|
|
37
|
-
if lastword
|
38
|
-
bigrams << "#{lastword} #{word}"
|
39
|
-
end
|
40
|
-
lastword = word
|
41
|
-
}
|
42
|
-
|
43
|
-
words + bigrams
|
44
|
-
end
|
45
|
-
|
46
|
-
# Given an array of terms return a hash with the number of appearances of
|
47
|
-
# each term
|
48
|
-
def self.count(terms)
|
49
|
-
count = Hash.new(0)
|
50
|
-
terms.each{|word| count[word] += 1}
|
51
|
-
count
|
52
|
-
end
|
53
|
-
|
54
|
-
|
55
|
-
# Given a string of text find all the words (or bigrams) and return a hash
|
56
|
-
# with their counts
|
57
|
-
def self.terms(text, bigrams = true)
|
58
|
-
|
59
|
-
if bigrams
|
60
|
-
count(bigrams(text))
|
61
|
-
else
|
62
|
-
count(words(text))
|
63
|
-
end
|
64
|
-
end
|
65
|
-
|
66
|
-
# Given a string of text and a list of terms, which may or may not contain
|
67
|
-
# bigrams, return an array with one entry per term which holds the number of
|
68
|
-
# occurrences of each term in the text.
|
69
|
-
def self.features(text, terms, bigrams = nil)
|
70
|
-
bigrams ||= terms.select{|term| term =~ / /}.any?
|
71
|
-
count = bigrams ? count(bigrams(text)) : count(words(text))
|
72
|
-
count.values_at(*terms)
|
73
|
-
end
|
74
|
-
end
|
75
|
-
|
76
|
-
class String
|
77
|
-
# Shortcut for BagOfWords.words(self)
|
78
|
-
def words
|
79
|
-
BagOfWords.words(self)
|
80
|
-
end
|
81
|
-
|
82
|
-
# Shortcut for BagOfWords.bigrams(self)
|
83
|
-
def bigrams
|
84
|
-
BagOfWords.bigrams(self)
|
85
|
-
end
|
86
|
-
end
|
87
|
-
|
88
|
-
|
data/lib/rbbt/bow/classifier.rb
DELETED
@@ -1,116 +0,0 @@
|
|
1
|
-
require 'rbbt/bow/bow'
|
2
|
-
require 'rsruby'
|
3
|
-
|
4
|
-
# This class uses R to build and use classification models. It needs the
|
5
|
-
# 'e1071' R package.
|
6
|
-
class Classifier
|
7
|
-
|
8
|
-
|
9
|
-
# Given the path to a features file, which specifies a number of instances
|
10
|
-
# along with their classes and features in a tab separated format, it uses R
|
11
|
-
# to build a svm model which is save to file in the path specified as
|
12
|
-
# modelfile.
|
13
|
-
def self.create_model(featuresfile, modelfile, dictfile = nil)
|
14
|
-
|
15
|
-
r = RSRuby.instance
|
16
|
-
r.source(File.join(Rbbt.datadir, 'classifier/R/classify.R'))
|
17
|
-
r.BOW_classification_model(featuresfile, modelfile)
|
18
|
-
|
19
|
-
nil
|
20
|
-
end
|
21
|
-
|
22
|
-
attr_reader :terms
|
23
|
-
|
24
|
-
# Loads an R interpreter which loads the svm model under modelfile.
|
25
|
-
def initialize(modelfile)
|
26
|
-
@r = RSRuby.instance
|
27
|
-
@r.library('e1071')
|
28
|
-
@r.source(File.join(Rbbt.datadir, 'classifier/R/classify.R'))
|
29
|
-
|
30
|
-
@r.load(modelfile)
|
31
|
-
|
32
|
-
@model = @r.svm_model
|
33
|
-
@terms = @r.eval_R("terms = unlist(attr(attr(svm.model$terms,'factors'),'dimnames')[2])")
|
34
|
-
end
|
35
|
-
|
36
|
-
def classify_feature_array(input) #:nodoc:
|
37
|
-
@r.assign('input', input)
|
38
|
-
|
39
|
-
@r.eval_R('input = t(as.data.frame(input))')
|
40
|
-
@r.eval_R('rownames(input) <- NULL')
|
41
|
-
@r.eval_R('colnames(input) <- terms')
|
42
|
-
|
43
|
-
results = @r.eval_R('BOW.classification.classify(svm.model, input, svm.weights)')
|
44
|
-
results.sort.collect{|p| p[1]}
|
45
|
-
end
|
46
|
-
|
47
|
-
def classify_feature_hash(input) #:nodoc:
|
48
|
-
names = []
|
49
|
-
features = []
|
50
|
-
input.each{|name, feats|
|
51
|
-
names << name.to_s
|
52
|
-
features << feats
|
53
|
-
}
|
54
|
-
|
55
|
-
@r.assign('input', features)
|
56
|
-
@r.assign('input.names', names)
|
57
|
-
|
58
|
-
@r.eval_R('input = t(as.data.frame(input))')
|
59
|
-
@r.eval_R('rownames(input) <- input.names')
|
60
|
-
@r.eval_R('colnames(input) <- terms')
|
61
|
-
|
62
|
-
@r.eval_R('BOW.classification.classify(svm.model, input, svm.weights)')
|
63
|
-
end
|
64
|
-
|
65
|
-
def classify_text_array(input) #:nodoc:
|
66
|
-
features = input.collect{|text|
|
67
|
-
BagOfWords.features(text, @terms)
|
68
|
-
}
|
69
|
-
|
70
|
-
classify_feature_array(features)
|
71
|
-
end
|
72
|
-
|
73
|
-
def classify_text_hash(input) #:nodoc:
|
74
|
-
features = {}
|
75
|
-
input.each{|key,text|
|
76
|
-
features[key] = BagOfWords.features(text, @terms)
|
77
|
-
}
|
78
|
-
|
79
|
-
classify_feature_hash(features)
|
80
|
-
end
|
81
|
-
|
82
|
-
|
83
|
-
# This is a polymorphic method. The input variable may be a single input, in
|
84
|
-
# which case the results will be just the class, a hash of inputs, in which
|
85
|
-
# case the result will be a hash with the results for each input, or an
|
86
|
-
# array, in which case the result is an array of the results in the same
|
87
|
-
# order. Each input may also be in the form of a string, in which case it
|
88
|
-
# will be transformed into a feature vector, or an array in which case it
|
89
|
-
# will be considered as an feature vector itself.
|
90
|
-
def classify(input)
|
91
|
-
if input.is_a? String
|
92
|
-
return classify_text_array([input]).first
|
93
|
-
end
|
94
|
-
|
95
|
-
|
96
|
-
if input.is_a? Hash
|
97
|
-
return {} if input.empty?
|
98
|
-
if input.values.first.is_a? String
|
99
|
-
return classify_text_hash(input)
|
100
|
-
elsif input.values.first.is_a? Array
|
101
|
-
return classify_feature_hash(input)
|
102
|
-
end
|
103
|
-
end
|
104
|
-
|
105
|
-
if input.is_a? Array
|
106
|
-
return [] if input.empty?
|
107
|
-
if input.first.is_a? String
|
108
|
-
return classify_text_array(input)
|
109
|
-
elsif input.first.is_a? Array
|
110
|
-
return classify_feature_array(input)
|
111
|
-
end
|
112
|
-
end
|
113
|
-
|
114
|
-
end
|
115
|
-
|
116
|
-
end
|
data/lib/rbbt/bow/dictionary.rb
DELETED
@@ -1,187 +0,0 @@
|
|
1
|
-
class Dictionary
|
2
|
-
attr_reader :terms
|
3
|
-
def initialize
|
4
|
-
@terms = Hash.new(0)
|
5
|
-
end
|
6
|
-
|
7
|
-
def add(terms, &block)
|
8
|
-
terms.each{|term, count|
|
9
|
-
@terms[term] += count
|
10
|
-
}
|
11
|
-
end
|
12
|
-
end
|
13
|
-
|
14
|
-
class Dictionary::TF_IDF
|
15
|
-
attr_reader :terms, :docs, :total_terms, :num_docs
|
16
|
-
|
17
|
-
def initialize(options = {})
|
18
|
-
@term_limit = {
|
19
|
-
:limit => 500_000,
|
20
|
-
}.merge(options)[:limit]
|
21
|
-
|
22
|
-
@terms = Hash.new(0)
|
23
|
-
@docs = Hash.new(0)
|
24
|
-
@num_docs = 0
|
25
|
-
@total_terms = 0
|
26
|
-
end
|
27
|
-
|
28
|
-
|
29
|
-
def add(terms)
|
30
|
-
if @term_limit && @terms.length > @term_limit
|
31
|
-
terms = terms.delete_if{|term, count| !@terms.include? term }
|
32
|
-
end
|
33
|
-
|
34
|
-
terms.each{|term, count|
|
35
|
-
@terms[term] += count
|
36
|
-
@total_terms += count
|
37
|
-
@docs[term] += 1
|
38
|
-
}
|
39
|
-
@num_docs += 1
|
40
|
-
end
|
41
|
-
|
42
|
-
def df
|
43
|
-
df = Hash.new(0)
|
44
|
-
@docs.each{|term, count|
|
45
|
-
df[term] = count.to_f / @num_docs
|
46
|
-
}
|
47
|
-
df
|
48
|
-
end
|
49
|
-
|
50
|
-
def tf
|
51
|
-
tf = Hash.new(0)
|
52
|
-
@terms.each{|term, count|
|
53
|
-
tf[term] = count.to_f / @total_terms
|
54
|
-
}
|
55
|
-
tf
|
56
|
-
end
|
57
|
-
|
58
|
-
def idf
|
59
|
-
idf = Hash.new(0)
|
60
|
-
num_docs = @num_docs.to_f
|
61
|
-
@docs.each{|term, count|
|
62
|
-
idf[term] = Math::log(num_docs / count)
|
63
|
-
}
|
64
|
-
idf
|
65
|
-
end
|
66
|
-
|
67
|
-
def tf_idf
|
68
|
-
tf_idf = Hash.new(0)
|
69
|
-
num_docs = @num_docs.to_f
|
70
|
-
@docs.each{|term, count|
|
71
|
-
tf_idf[term] = @terms[term].to_f / @total_terms * Math::log(num_docs / count)
|
72
|
-
}
|
73
|
-
tf_idf
|
74
|
-
end
|
75
|
-
|
76
|
-
def best(options = {})
|
77
|
-
hi, low, limit = {
|
78
|
-
:low => 0,
|
79
|
-
:hi => 1,
|
80
|
-
}.merge(options).
|
81
|
-
values_at(:hi, :low, :limit)
|
82
|
-
|
83
|
-
num_docs = @num_docs.to_f
|
84
|
-
best = df.select{|term, value|
|
85
|
-
value >= low && value <= hi
|
86
|
-
}.collect{|p|
|
87
|
-
term = p.first
|
88
|
-
df_value = p.last
|
89
|
-
[term,
|
90
|
-
@terms[term].to_f / num_docs * Math::log(1.0/df_value)
|
91
|
-
]
|
92
|
-
}
|
93
|
-
if limit
|
94
|
-
Hash[*best.sort{|a,b| b[1] <=> a[1]}.slice(0, limit).flatten]
|
95
|
-
else
|
96
|
-
Hash[*best.flatten]
|
97
|
-
end
|
98
|
-
end
|
99
|
-
|
100
|
-
def weights(options = {})
|
101
|
-
best_terms = best(options).keys
|
102
|
-
weights = {}
|
103
|
-
|
104
|
-
num_docs = @num_docs.to_f
|
105
|
-
best_terms.each{|term|
|
106
|
-
weights[term] = Math::log(num_docs / @docs[term])
|
107
|
-
}
|
108
|
-
weights
|
109
|
-
end
|
110
|
-
|
111
|
-
end
|
112
|
-
|
113
|
-
class Dictionary::KL
|
114
|
-
attr_reader :pos_dict, :neg_dict
|
115
|
-
|
116
|
-
def initialize(options = {})
|
117
|
-
@pos_dict = Dictionary::TF_IDF.new(options)
|
118
|
-
@neg_dict = Dictionary::TF_IDF.new(options)
|
119
|
-
end
|
120
|
-
|
121
|
-
def terms
|
122
|
-
(pos_dict.terms.keys + neg_dict.terms.keys).uniq
|
123
|
-
end
|
124
|
-
|
125
|
-
def add(terms, c)
|
126
|
-
dict = (c == :+ || c == '+' ? @pos_dict : @neg_dict)
|
127
|
-
dict.add(terms)
|
128
|
-
end
|
129
|
-
|
130
|
-
def kl
|
131
|
-
kl = {}
|
132
|
-
pos_df = @pos_dict.df
|
133
|
-
neg_df = @neg_dict.df
|
134
|
-
|
135
|
-
terms.each{|term|
|
136
|
-
pos = pos_df[term]
|
137
|
-
neg = neg_df[term]
|
138
|
-
|
139
|
-
pos = 0.000001 if pos == 0
|
140
|
-
pos = 0.999999 if pos == 1
|
141
|
-
neg = 0.000001 if neg == 0
|
142
|
-
neg = 0.999999 if neg == 1
|
143
|
-
|
144
|
-
kl[term] = pos * Math::log(pos / neg) + neg * Math::log(neg / pos)
|
145
|
-
}
|
146
|
-
kl
|
147
|
-
end
|
148
|
-
|
149
|
-
def best(options = {})
|
150
|
-
hi, low, limit = {
|
151
|
-
:low => 0,
|
152
|
-
:hi => 1,
|
153
|
-
}.merge(options).
|
154
|
-
values_at(:hi, :low, :limit)
|
155
|
-
|
156
|
-
pos_df = @pos_dict.df
|
157
|
-
neg_df = @neg_dict.df
|
158
|
-
|
159
|
-
best = {}
|
160
|
-
terms.select{|term|
|
161
|
-
pos_df[term] >= low && pos_df[term] <= hi ||
|
162
|
-
neg_df[term] >= low && neg_df[term] <= hi
|
163
|
-
}.each{|term|
|
164
|
-
pos = pos_df[term]
|
165
|
-
neg = neg_df[term]
|
166
|
-
|
167
|
-
pos = 0.000001 if pos == 0
|
168
|
-
pos = 0.999999 if pos == 1
|
169
|
-
neg = 0.000001 if neg == 0
|
170
|
-
neg = 0.999999 if neg == 1
|
171
|
-
|
172
|
-
best[term] = pos * Math::log(pos / neg) + neg * Math::log(neg / pos)
|
173
|
-
}
|
174
|
-
if limit
|
175
|
-
Hash[*best.sort{|a,b| b[1] <=> a[1]}.slice(0, limit).flatten]
|
176
|
-
else
|
177
|
-
Hash[*best.flatten]
|
178
|
-
end
|
179
|
-
end
|
180
|
-
|
181
|
-
def weights(options = {})
|
182
|
-
best(options)
|
183
|
-
end
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
end
|