retreval 0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/CHANGELOG +7 -0
- data/README.md +321 -0
- data/TODO +5 -0
- data/bin/retreval +5 -0
- data/example/gold_standard.yml +48 -0
- data/example/query_results.yml +23 -0
- data/lib/retreval/gold_standard.rb +424 -0
- data/lib/retreval/options.rb +66 -0
- data/lib/retreval/query_result.rb +511 -0
- data/lib/retreval/runner.rb +121 -0
- data/output_avg_precision.yml +2 -0
- data/output_statistics.yml +82 -0
- data/retreval.gemspec +16 -0
- data/test/test_gold_standard.rb +111 -0
- data/test/test_query_result.rb +166 -0
- metadata +390 -0
@@ -0,0 +1,66 @@
|
|
1
|
+
require 'optparse'
|
2
|
+
require 'ostruct'
|
3
|
+
|
4
|
+
module Retreval
|
5
|
+
|
6
|
+
# Some options that can be passed when the script is run from the commandline
|
7
|
+
class Options
|
8
|
+
|
9
|
+
attr_accessor :query_result_set_file, :gold_standard_file, :format, :interactive, :output
|
10
|
+
|
11
|
+
# Just initialize the OptionParser and try to parse the arguments
|
12
|
+
def initialize(args)
|
13
|
+
parse(args)
|
14
|
+
end
|
15
|
+
|
16
|
+
private
|
17
|
+
|
18
|
+
# Parse the arguments that were passed
|
19
|
+
def parse(args)
|
20
|
+
OptionParser.new do |opts|
|
21
|
+
opts.banner = "Usage: retreval [options]"
|
22
|
+
opts.separator "Mandatory options (choose one):"
|
23
|
+
|
24
|
+
opts.on("-l", "--load <gold-standard-file>", "Load the gold standard from this file") do |file|
|
25
|
+
@gold_standard_file = file
|
26
|
+
end
|
27
|
+
|
28
|
+
opts.on("-q", "--queries <query-results-file>", "Load the query result set from this file") do |file|
|
29
|
+
@query_result_set_file = file
|
30
|
+
end
|
31
|
+
|
32
|
+
opts.on("-f", "--format <format>", "Use this data format when parsing files. Can be one of <yaml|plain>") do |format|
|
33
|
+
@format = format
|
34
|
+
end
|
35
|
+
|
36
|
+
opts.on("-o", "--output <output-file-prefix>", "Use this prefix for creating output files, default is 'output'.") do |file|
|
37
|
+
@output = file
|
38
|
+
end
|
39
|
+
|
40
|
+
opts.separator "Common options:"
|
41
|
+
|
42
|
+
opts.on_tail("-v", "--verbose", "Verbose (debug) mode") do
|
43
|
+
$verbose = true
|
44
|
+
end
|
45
|
+
|
46
|
+
opts.on_tail("-h", "--help", "Show this message") do
|
47
|
+
puts opts
|
48
|
+
exit
|
49
|
+
end
|
50
|
+
|
51
|
+
begin
|
52
|
+
args = ["-h"] if args.empty?
|
53
|
+
opts.parse!(args)
|
54
|
+
|
55
|
+
# Make some default assumptions
|
56
|
+
@output = "output" if @output.nil?
|
57
|
+
|
58
|
+
rescue OptionParser::ParseError => e
|
59
|
+
STDERR.puts e.message, "\n", opts
|
60
|
+
exit(-1)
|
61
|
+
end
|
62
|
+
|
63
|
+
end # opts
|
64
|
+
end # self.parse
|
65
|
+
end # Class options
|
66
|
+
end # module
|
@@ -0,0 +1,511 @@
|
|
1
|
+
require 'ostruct'
|
2
|
+
|
3
|
+
module Retreval
|
4
|
+
|
5
|
+
|
6
|
+
# A QueryResultSet combines multiple QueryResults and stores them. It is possible to load
|
7
|
+
# a set of QueryResults from a YAML file.
|
8
|
+
class QueryResultSet
|
9
|
+
|
10
|
+
attr_reader :query_results
|
11
|
+
|
12
|
+
# Creates a new QueryResultSet, with a specified GoldStandard
|
13
|
+
# Called by:
|
14
|
+
# QueryResultSet.new :gold_standard => my_gold_standard
|
15
|
+
def initialize(args)
|
16
|
+
@query_results = Array.new
|
17
|
+
@gold_standard = args[:gold_standard]
|
18
|
+
raise "Can not create a Query Result set without a gold standard that they belong to" if args[:gold_standard].nil?
|
19
|
+
end
|
20
|
+
|
21
|
+
# Parses a YAML file containing many result sets, either ranked or unranked.
|
22
|
+
# The file should look like this:
|
23
|
+
# - query: Test query
|
24
|
+
# ranked: true
|
25
|
+
# documents:
|
26
|
+
# - id: first_document.txt
|
27
|
+
# score: 95
|
28
|
+
# - id: second_document.txt
|
29
|
+
# score: 38
|
30
|
+
# - query: Second query
|
31
|
+
# ranked: true
|
32
|
+
# documents:
|
33
|
+
# - id: another_doc.txt
|
34
|
+
# score: 12
|
35
|
+
# - id: yet_another_one.txt
|
36
|
+
# score: 1
|
37
|
+
# ... and so on.
|
38
|
+
def load_from_yaml_file(file)
|
39
|
+
begin
|
40
|
+
|
41
|
+
ydoc = YAML.load(File.open(file, "r"))
|
42
|
+
|
43
|
+
ydoc.each do |entry|
|
44
|
+
query = entry["query"] # => the query string
|
45
|
+
ranked = entry["ranked"] # => a boolean flag if ranked or not
|
46
|
+
documents = entry["documents"] # => an array of documents
|
47
|
+
|
48
|
+
# Determine whether this will be a ranked or unranked result set
|
49
|
+
if ranked
|
50
|
+
resultset = RankedQueryResult.new :query => query, :gold_standard => @gold_standard
|
51
|
+
else
|
52
|
+
resultset = UnrankedQueryResult.new :query => query, :gold_standard => @gold_standard
|
53
|
+
end
|
54
|
+
|
55
|
+
# Find all documents for this query result
|
56
|
+
documents.each do |document_element|
|
57
|
+
document = document_element["id"]
|
58
|
+
score = document_element["score"]
|
59
|
+
resultset.add_document :document => document, :score => score
|
60
|
+
end
|
61
|
+
|
62
|
+
@query_results << resultset
|
63
|
+
|
64
|
+
end
|
65
|
+
rescue Exception => e
|
66
|
+
raise "Error while parsing the YAML document: " + e.message
|
67
|
+
end
|
68
|
+
end
|
69
|
+
|
70
|
+
|
71
|
+
# Adds a QueryResult to the list of results for this set
|
72
|
+
def add_result(result)
|
73
|
+
@query_results << result
|
74
|
+
end
|
75
|
+
|
76
|
+
# Calculates the Mean Average Precision for each RankedQueryResult in this set.
|
77
|
+
# This method should only be called when all sets have been calculated, or else it will
|
78
|
+
# take a really long time to perform all necessary calculations.
|
79
|
+
def mean_average_precision
|
80
|
+
total_ranked_query_results = @query_results.count { |result| result.class == RankedQueryResult }
|
81
|
+
@mean_average_precision = @query_results.inject(0.0) { |sum, result| result.class == RankedQueryResult ? sum + result.average_precision : sum } / total_ranked_query_results
|
82
|
+
end
|
83
|
+
|
84
|
+
end
|
85
|
+
|
86
|
+
# A QueryResult contains a list of results for a given query. It can either be
|
87
|
+
# ranked or unranked. You can't instantiate such a class - use the subclasses
|
88
|
+
# RankedQueryResult and UnrankedQueryResult instead.
|
89
|
+
class QueryResult
|
90
|
+
|
91
|
+
attr_reader :query, :documents
|
92
|
+
attr_accessor :gold_standard
|
93
|
+
|
94
|
+
# Creates a new QueryResult with a specified query string and an optional array of documents.
|
95
|
+
# This class is abstract, so you have to create an UnrankedQueryResult or a RankedQueryResult instead
|
96
|
+
def initialize(args)
|
97
|
+
|
98
|
+
if self.class == QueryResult
|
99
|
+
raise "Can not instantiate a QueryResult. Use a RankedQueryResult or UnrankedQueryResult instead."
|
100
|
+
end
|
101
|
+
|
102
|
+
# Get the string of the query
|
103
|
+
@query = Query.new :querystring => args[:query]
|
104
|
+
raise "Can not create a Query Result without a query string specified" if args[:query].nil?
|
105
|
+
|
106
|
+
# get the documents
|
107
|
+
# documents is a Hash - each document contains a document (from the Class document) with a score
|
108
|
+
# documents can also be omitted from the call and can be added later
|
109
|
+
@documents = Hash.new
|
110
|
+
args[:documents].each { |document| add_document(document) } unless args[:documents].nil?
|
111
|
+
|
112
|
+
# set the gold standard
|
113
|
+
@gold_standard = args[:gold_standard]
|
114
|
+
raise "Can not create a Query Result without a gold standard that it belongs to" if args[:gold_standard].nil?
|
115
|
+
|
116
|
+
end
|
117
|
+
|
118
|
+
|
119
|
+
# Loads Documents from a simple YAML file
|
120
|
+
# Each entry should contain:
|
121
|
+
# * "document" The identifier of the Document
|
122
|
+
# * "score" The relevancy score for this Document
|
123
|
+
def load_from_yaml_file(file)
|
124
|
+
|
125
|
+
begin
|
126
|
+
@ydoc = YAML::load(File.open(file, "r"))
|
127
|
+
@ydoc.each do |entry|
|
128
|
+
document = entry["document"] # => the identifier of the document
|
129
|
+
score = entry["score"] # => the relevancy score
|
130
|
+
add_document :document => document, :score => score
|
131
|
+
end
|
132
|
+
|
133
|
+
rescue Exception => e
|
134
|
+
raise "Error while parsing the YAML document: " + e.message
|
135
|
+
end
|
136
|
+
|
137
|
+
end
|
138
|
+
|
139
|
+
|
140
|
+
# Adds a single ResultDocument to the result
|
141
|
+
# Call this with:
|
142
|
+
# add_document :document => "test_document", :score => 13
|
143
|
+
# Alternatively:
|
144
|
+
# add_document :id => "test_document", :score => 13
|
145
|
+
def add_document(args)
|
146
|
+
document_id = args[:document]
|
147
|
+
if document_id.nil?
|
148
|
+
if args[:id].nil?
|
149
|
+
raise "Can not add a new Document to a Query Result without a document identifier"
|
150
|
+
else
|
151
|
+
document_id = args[:id]
|
152
|
+
end
|
153
|
+
end
|
154
|
+
|
155
|
+
doc = ResultDocument.new :id => document_id, :score => args[:score]
|
156
|
+
@documents[document_id] = doc
|
157
|
+
end
|
158
|
+
|
159
|
+
|
160
|
+
# This is essentially the same as add_document
|
161
|
+
def <<(args)
|
162
|
+
add_document args
|
163
|
+
end
|
164
|
+
|
165
|
+
|
166
|
+
# Prints a pretty contingency table summary
|
167
|
+
def print_contingency_table
|
168
|
+
results = calculate
|
169
|
+
|
170
|
+
tp = results[:true_positives]
|
171
|
+
fp = results[:false_positives]
|
172
|
+
tn = results[:true_negatives]
|
173
|
+
fn = results[:false_negatives]
|
174
|
+
|
175
|
+
print "\t\t"
|
176
|
+
print "| Relevant\t| Nonrelevant\t| Total\n"
|
177
|
+
print "---------------------------------------------------------\n"
|
178
|
+
print "Retrieved\t| " + tp.to_s + " \t\t| " + fp.to_s + " \t\t| " + (tp+fp).to_s + " \n"
|
179
|
+
print "Not Retrieved\t| " + fn.to_s + " \t\t| " + tn.to_s + " \t\t| " + (fn+tn).to_s + " \n"
|
180
|
+
print "---------------------------------------------------------\n"
|
181
|
+
print "\t\t| " + (tp+fn).to_s + " \t\t| " + (fp+tn).to_s + " \t\t| " + (tp+fp+tn+fn).to_s + "\n"
|
182
|
+
print "\n"
|
183
|
+
end
|
184
|
+
|
185
|
+
|
186
|
+
# Calculates the F-measure, weighing precision and recall.
|
187
|
+
# See: http://nlp.stanford.edu/IR-book/html/htmledition/evaluation-of-unranked-retrieval-sets-1.html
|
188
|
+
def f_measure(args = {:beta => 1})
|
189
|
+
|
190
|
+
# Get intermediate results for both un/ranked results
|
191
|
+
results = calculate
|
192
|
+
|
193
|
+
precision = results[:precision]
|
194
|
+
recall = results[:recall]
|
195
|
+
|
196
|
+
begin
|
197
|
+
# The user has the option to supply either alpha or beta (or both, doesn't matter)
|
198
|
+
unless args[:alpha].nil?
|
199
|
+
alpha = args[:alpha].to_f
|
200
|
+
beta_squared = (1 - alpha) / alpha
|
201
|
+
end
|
202
|
+
|
203
|
+
unless args[:beta].nil?
|
204
|
+
beta = args[:beta].to_f
|
205
|
+
beta_squared = beta * beta
|
206
|
+
end
|
207
|
+
|
208
|
+
((beta_squared + 1) * precision * recall) / ((beta_squared * precision) + recall)
|
209
|
+
|
210
|
+
rescue Exception => e
|
211
|
+
raise "Error while calculating F-Measure: " + e.message
|
212
|
+
end
|
213
|
+
|
214
|
+
end
|
215
|
+
|
216
|
+
|
217
|
+
# Clean up every ResultDocument from this QueryResult that does not appear to have
|
218
|
+
# a Judgement in the GoldStandard.
|
219
|
+
def cleanup
|
220
|
+
@documents.keep_if { |key, document| @gold_standard.contains_judgement? :document => document.id, :query => @query.querystring }
|
221
|
+
end
|
222
|
+
|
223
|
+
|
224
|
+
private
|
225
|
+
|
226
|
+
# This is the method that performs all necessary calculations on a set of results.
|
227
|
+
# Never call this. It should be called automatically.
|
228
|
+
def calculate(resultset = nil)
|
229
|
+
|
230
|
+
# Use the gold standard we initially received
|
231
|
+
standard = @gold_standard
|
232
|
+
|
233
|
+
# If there is an unranked result to be calculated, we will
|
234
|
+
if resultset.nil?
|
235
|
+
unranked = true
|
236
|
+
resultset = OpenStruct.new
|
237
|
+
resultset.documents = @documents.values
|
238
|
+
resultset.query = @query
|
239
|
+
end
|
240
|
+
|
241
|
+
begin
|
242
|
+
all_items = standard.documents.length # => all documents this gold standard contains
|
243
|
+
retrieved_items = resultset.documents.length # => all items retrieved for this information need
|
244
|
+
not_retrieved_items = all_items - retrieved_items # => all items NOT retrieved for this information need
|
245
|
+
retrieved_relevant_items = 0 # => the count of retrieved and relevant documents
|
246
|
+
not_retrieved_relevant_items = 0 # => the count of nonretrieved relevant documents
|
247
|
+
|
248
|
+
# Get the query we are working on
|
249
|
+
query = resultset.query
|
250
|
+
|
251
|
+
# Get the document sets we are working on
|
252
|
+
retrieved_documents = resultset.documents # => This is an Array all the time
|
253
|
+
not_retrieved_documents = standard.documents.reject { |key, doc| retrieved_documents.include? doc } # => This is a Hash
|
254
|
+
|
255
|
+
# Check whether each of the retrieved documents is relevant or not ...
|
256
|
+
retrieved_documents.each do |doc|
|
257
|
+
relevant = standard.relevant? :document => doc.id, :query => query.querystring
|
258
|
+
retrieved_relevant_items +=1 if relevant
|
259
|
+
end
|
260
|
+
retrieved_nonrelevant_items = retrieved_items - retrieved_relevant_items
|
261
|
+
|
262
|
+
# ... do the same for nonretrieved documents. This is a hash, so we only take the values
|
263
|
+
not_retrieved_documents.values.each do |doc|
|
264
|
+
relevant = standard.relevant? :document => doc.id, :query => query.querystring
|
265
|
+
not_retrieved_relevant_items += 1 if relevant
|
266
|
+
end
|
267
|
+
not_retrieved_nonrelevant_items = not_retrieved_items - not_retrieved_relevant_items
|
268
|
+
|
269
|
+
# Now calculate the sum counts
|
270
|
+
relevant_items = retrieved_relevant_items + not_retrieved_relevant_items
|
271
|
+
nonrelevant_items = retrieved_nonrelevant_items + not_retrieved_nonrelevant_items
|
272
|
+
|
273
|
+
# Finally, calculate precision and recall
|
274
|
+
precision = retrieved_relevant_items.to_f / retrieved_items.to_f
|
275
|
+
if relevant_items != 0
|
276
|
+
recall = retrieved_relevant_items.to_f / relevant_items.to_f
|
277
|
+
else
|
278
|
+
recall = 0
|
279
|
+
end
|
280
|
+
|
281
|
+
# This hash will be available as the result later
|
282
|
+
results = {
|
283
|
+
:precision => precision,
|
284
|
+
:recall => recall,
|
285
|
+
:false_negatives => not_retrieved_relevant_items,
|
286
|
+
:false_positives => retrieved_nonrelevant_items,
|
287
|
+
:true_negatives => not_retrieved_nonrelevant_items,
|
288
|
+
:true_positives => retrieved_relevant_items
|
289
|
+
}
|
290
|
+
|
291
|
+
# Also, if we're doing a ranked evaluation, we want to find out if the
|
292
|
+
# newly added document is relevant (thus increasing precision and recall)
|
293
|
+
unless unranked
|
294
|
+
results[:document] = retrieved_documents.last.id
|
295
|
+
results[:relevant] = standard.relevant? :document => results[:document], :query => query.querystring
|
296
|
+
end
|
297
|
+
|
298
|
+
results
|
299
|
+
|
300
|
+
end
|
301
|
+
end
|
302
|
+
end
|
303
|
+
|
304
|
+
|
305
|
+
|
306
|
+
# A RankedQueryResult is a QueryResult with special functions
|
307
|
+
# for ranked retrieval evaluation.
|
308
|
+
class RankedQueryResult < QueryResult
|
309
|
+
|
310
|
+
# Creates a new RankedQueryResult. One has to specify the query string and can
|
311
|
+
# optionally pass a Document array too. The rank of the Document will be defined by
|
312
|
+
# its position in the array only.
|
313
|
+
# Called by:
|
314
|
+
# RankedQueryResult.new :documents => array_of_document_ids, :query => "my query"
|
315
|
+
# RankedQueryResult.new :query => "my query"
|
316
|
+
def initialize(args)
|
317
|
+
super(args)
|
318
|
+
end
|
319
|
+
|
320
|
+
|
321
|
+
# Calculates the 11-point precision and the average interpolated precision.
|
322
|
+
# See: http://nlp.stanford.edu/IR-book/html/htmledition/evaluation-of-ranked-retrieval-results-1.html
|
323
|
+
def eleven_point_precision
|
324
|
+
|
325
|
+
statistics unless @calculated
|
326
|
+
|
327
|
+
@recall_levels = Hash.new
|
328
|
+
|
329
|
+
# Find out what recall/precision pairs we already know
|
330
|
+
@results.each_with_index do |row, index|
|
331
|
+
precision = row[:precision]
|
332
|
+
recall = row[:recall]
|
333
|
+
@recall_levels[recall] = precision
|
334
|
+
end
|
335
|
+
|
336
|
+
begin
|
337
|
+
@eleven_point_precision = Hash.new
|
338
|
+
|
339
|
+
# Calculate the 11 points
|
340
|
+
# This outer loop effectively iterates from 0.0 to 1.0
|
341
|
+
(0..10).each_with_index do |recall_level, index|
|
342
|
+
recall_level = recall_level.to_f / 10
|
343
|
+
@eleven_point_precision[recall_level] = 0
|
344
|
+
|
345
|
+
# Look in our known recall levels (stored as the keys)
|
346
|
+
@recall_levels.keys.each do |key|
|
347
|
+
|
348
|
+
# If we find a known recall equal or higher to the one from the 11-point
|
349
|
+
# scale, return the precision at that level
|
350
|
+
# E.g. if our level is 0.3 and we have a known precision at 0.5, this
|
351
|
+
# one will be used as our interpolated precision
|
352
|
+
if key >= recall_level
|
353
|
+
precision_for_level = @recall_levels[key]
|
354
|
+
# Store the interpolated precision at the current level, e.g. 0.3
|
355
|
+
@eleven_point_precision[recall_level] = precision_for_level
|
356
|
+
break
|
357
|
+
end
|
358
|
+
end
|
359
|
+
end
|
360
|
+
|
361
|
+
# Now calculate an average precision for this statistic
|
362
|
+
# That's a neat line of ruby code, is it?
|
363
|
+
@eleven_point_average = @eleven_point_precision.values.inject(0.0) { |sum, precision| sum + precision } / 11
|
364
|
+
|
365
|
+
rescue
|
366
|
+
raise "Error while calculating the 11-point precision map!"
|
367
|
+
end
|
368
|
+
|
369
|
+
@eleven_point_precision
|
370
|
+
|
371
|
+
end
|
372
|
+
|
373
|
+
|
374
|
+
# Calculates the precision and recall for each rank and returns
|
375
|
+
# it in a Hash of results
|
376
|
+
def statistics(max = 0)
|
377
|
+
|
378
|
+
return @results if @calculated
|
379
|
+
|
380
|
+
begin
|
381
|
+
|
382
|
+
# If no maximum parameter is given, all documents are evalutated
|
383
|
+
# This should be the default for normal evaluations
|
384
|
+
max = @documents.length if max == 0 or max > @documents.length
|
385
|
+
|
386
|
+
# Calculate precision and recall for the top i documents only
|
387
|
+
@results = Array.new
|
388
|
+
for i in (1..max)
|
389
|
+
subset = OpenStruct.new
|
390
|
+
subset.documents = Array.new
|
391
|
+
subset.query = @query
|
392
|
+
@documents.values.each_with_index do |doc, index|
|
393
|
+
# Only get the subset of documents
|
394
|
+
subset.documents << doc
|
395
|
+
break if index == i - 1
|
396
|
+
end
|
397
|
+
results = calculate(subset)
|
398
|
+
@results << results
|
399
|
+
end
|
400
|
+
|
401
|
+
# Now mark everything as calculated and return it
|
402
|
+
@calculated = true
|
403
|
+
@results
|
404
|
+
|
405
|
+
rescue Exception => e
|
406
|
+
raise "Error while calculating results: " + e.message
|
407
|
+
end
|
408
|
+
|
409
|
+
end
|
410
|
+
|
411
|
+
|
412
|
+
# Returns the average precision. It is the average of precisions computed at
|
413
|
+
# the point of each of the relevant documents in the ranked sequence.
|
414
|
+
def average_precision
|
415
|
+
begin
|
416
|
+
# Calculate the results first if we haven't done this before
|
417
|
+
statistics unless @calculated
|
418
|
+
|
419
|
+
total_relevant_documents = @gold_standard.documents.values.count { |doc| @gold_standard.relevant? :document => doc.id, :query => @query.querystring }
|
420
|
+
|
421
|
+
if total_relevant_documents > 0
|
422
|
+
# The sum is calculated by adding the precision for a relevant document, or 0 for a nonrelevant document
|
423
|
+
return @average_precision = @results.inject(0.0) { |sum, document| document[:relevant] ? document[:precision] + sum : sum } / total_relevant_documents
|
424
|
+
else
|
425
|
+
return @average_precision = 0
|
426
|
+
end
|
427
|
+
rescue Exception => e
|
428
|
+
raise "Error while calculating average precision: " + e.message
|
429
|
+
end
|
430
|
+
|
431
|
+
end
|
432
|
+
|
433
|
+
|
434
|
+
# Prints a pretty table for 11-point interpolated precision
|
435
|
+
def print_eleven_point_precision_table
|
436
|
+
|
437
|
+
# Calculate the results first if we haven't done this before
|
438
|
+
statistics unless @calculated
|
439
|
+
|
440
|
+
data = eleven_point_precision
|
441
|
+
print "Recall\tInterpolated Precision\n"
|
442
|
+
data.each_pair do |recall, precision|
|
443
|
+
print recall.to_s + "\t" + "%.3f" % precision + "\n"
|
444
|
+
end
|
445
|
+
print "--------------------------------------\n"
|
446
|
+
print "Avg.\t" + "%.3f" % @eleven_point_average + "\n"
|
447
|
+
print "\n"
|
448
|
+
|
449
|
+
end
|
450
|
+
|
451
|
+
|
452
|
+
# Prints a pretty table for ranked results
|
453
|
+
def print_ranked_table
|
454
|
+
|
455
|
+
# Calculate the results first if we haven't done this before
|
456
|
+
statistics unless @calculated
|
457
|
+
|
458
|
+
# Use the results to print a table
|
459
|
+
print "Query: #{@query.querystring}\n"
|
460
|
+
print "Index\tRelevant\tPrecision\tRecall\tScore\t\tDocument ID\n"
|
461
|
+
@results.each_with_index do |row, index|
|
462
|
+
precision = "%.3f" % row[:precision]
|
463
|
+
document = @documents.values[index].id
|
464
|
+
recall = "%.3f" % row[:recall]
|
465
|
+
relevant = row[:relevant] ? "[X]" : "[ ]"
|
466
|
+
print "#{index+1}\t" + relevant + "\t\t" + precision + "\t\t" + recall + "\t" + @documents.values[index].score.to_s + "\t" + document + "\n"
|
467
|
+
end
|
468
|
+
print "\n"
|
469
|
+
|
470
|
+
end
|
471
|
+
|
472
|
+
end
|
473
|
+
|
474
|
+
|
475
|
+
|
476
|
+
# An UnrankedQueryResult is a QueryResult with no special functions.
|
477
|
+
class UnrankedQueryResult < QueryResult
|
478
|
+
|
479
|
+
# Creates a new RankedQueryResult. One has to specify the query string and can
|
480
|
+
# optionally pass a Document array too.
|
481
|
+
# Called by:
|
482
|
+
# QueryResult.new :documents => array_of_document_ids, :query => "my query"
|
483
|
+
# QueryResult.new :query => "my query"
|
484
|
+
def initialize(args)
|
485
|
+
super(args)
|
486
|
+
end
|
487
|
+
|
488
|
+
# Calculates precision and recall and returns them in a Hash
|
489
|
+
def statistics
|
490
|
+
@calculated ? @results : calculate
|
491
|
+
end
|
492
|
+
|
493
|
+
end
|
494
|
+
|
495
|
+
|
496
|
+
# A ResultDocument, in contrast to a Document, can also have a
|
497
|
+
# score that was determined to compute its rank in an information need.
|
498
|
+
# The score will only be output for informational purposes.
|
499
|
+
class ResultDocument < Document
|
500
|
+
|
501
|
+
attr_reader :score
|
502
|
+
|
503
|
+
# Creates a new ResultDocument
|
504
|
+
def initialize(args)
|
505
|
+
super(args)
|
506
|
+
@score = args[:score]
|
507
|
+
end
|
508
|
+
|
509
|
+
end
|
510
|
+
|
511
|
+
end
|