sdsykes-ferret 0.11.6.19
Sign up to get free protection for your applications and to get access to all the features.
- data/CHANGELOG +24 -0
- data/MIT-LICENSE +20 -0
- data/README +102 -0
- data/Rakefile +338 -0
- data/TODO +17 -0
- data/TUTORIAL +231 -0
- data/bin/ferret-browser +79 -0
- data/ext/analysis.c +1555 -0
- data/ext/analysis.h +219 -0
- data/ext/api.c +69 -0
- data/ext/api.h +27 -0
- data/ext/array.c +123 -0
- data/ext/array.h +53 -0
- data/ext/bitvector.c +540 -0
- data/ext/bitvector.h +272 -0
- data/ext/compound_io.c +383 -0
- data/ext/config.h +42 -0
- data/ext/document.c +156 -0
- data/ext/document.h +53 -0
- data/ext/except.c +120 -0
- data/ext/except.h +168 -0
- data/ext/extconf.rb +14 -0
- data/ext/ferret.c +402 -0
- data/ext/ferret.h +91 -0
- data/ext/filter.c +156 -0
- data/ext/fs_store.c +483 -0
- data/ext/global.c +418 -0
- data/ext/global.h +117 -0
- data/ext/hash.c +567 -0
- data/ext/hash.h +473 -0
- data/ext/hashset.c +170 -0
- data/ext/hashset.h +187 -0
- data/ext/header.h +58 -0
- data/ext/helper.c +62 -0
- data/ext/helper.h +13 -0
- data/ext/inc/lang.h +48 -0
- data/ext/inc/threading.h +31 -0
- data/ext/index.c +6425 -0
- data/ext/index.h +961 -0
- data/ext/lang.h +66 -0
- data/ext/libstemmer.c +92 -0
- data/ext/libstemmer.h +79 -0
- data/ext/mempool.c +87 -0
- data/ext/mempool.h +35 -0
- data/ext/modules.h +162 -0
- data/ext/multimapper.c +310 -0
- data/ext/multimapper.h +51 -0
- data/ext/posh.c +1006 -0
- data/ext/posh.h +1007 -0
- data/ext/priorityqueue.c +151 -0
- data/ext/priorityqueue.h +143 -0
- data/ext/q_boolean.c +1608 -0
- data/ext/q_const_score.c +161 -0
- data/ext/q_filtered_query.c +209 -0
- data/ext/q_fuzzy.c +268 -0
- data/ext/q_match_all.c +148 -0
- data/ext/q_multi_term.c +677 -0
- data/ext/q_parser.c +2825 -0
- data/ext/q_phrase.c +1126 -0
- data/ext/q_prefix.c +100 -0
- data/ext/q_range.c +350 -0
- data/ext/q_span.c +2402 -0
- data/ext/q_term.c +337 -0
- data/ext/q_wildcard.c +171 -0
- data/ext/r_analysis.c +2575 -0
- data/ext/r_index.c +3472 -0
- data/ext/r_qparser.c +585 -0
- data/ext/r_search.c +4105 -0
- data/ext/r_store.c +513 -0
- data/ext/r_utils.c +963 -0
- data/ext/ram_store.c +471 -0
- data/ext/search.c +1741 -0
- data/ext/search.h +885 -0
- data/ext/similarity.c +150 -0
- data/ext/similarity.h +82 -0
- data/ext/sort.c +983 -0
- data/ext/stem_ISO_8859_1_danish.c +338 -0
- data/ext/stem_ISO_8859_1_danish.h +16 -0
- data/ext/stem_ISO_8859_1_dutch.c +635 -0
- data/ext/stem_ISO_8859_1_dutch.h +16 -0
- data/ext/stem_ISO_8859_1_english.c +1156 -0
- data/ext/stem_ISO_8859_1_english.h +16 -0
- data/ext/stem_ISO_8859_1_finnish.c +792 -0
- data/ext/stem_ISO_8859_1_finnish.h +16 -0
- data/ext/stem_ISO_8859_1_french.c +1276 -0
- data/ext/stem_ISO_8859_1_french.h +16 -0
- data/ext/stem_ISO_8859_1_german.c +512 -0
- data/ext/stem_ISO_8859_1_german.h +16 -0
- data/ext/stem_ISO_8859_1_italian.c +1091 -0
- data/ext/stem_ISO_8859_1_italian.h +16 -0
- data/ext/stem_ISO_8859_1_norwegian.c +296 -0
- data/ext/stem_ISO_8859_1_norwegian.h +16 -0
- data/ext/stem_ISO_8859_1_porter.c +776 -0
- data/ext/stem_ISO_8859_1_porter.h +16 -0
- data/ext/stem_ISO_8859_1_portuguese.c +1035 -0
- data/ext/stem_ISO_8859_1_portuguese.h +16 -0
- data/ext/stem_ISO_8859_1_spanish.c +1119 -0
- data/ext/stem_ISO_8859_1_spanish.h +16 -0
- data/ext/stem_ISO_8859_1_swedish.c +307 -0
- data/ext/stem_ISO_8859_1_swedish.h +16 -0
- data/ext/stem_KOI8_R_russian.c +701 -0
- data/ext/stem_KOI8_R_russian.h +16 -0
- data/ext/stem_UTF_8_danish.c +344 -0
- data/ext/stem_UTF_8_danish.h +16 -0
- data/ext/stem_UTF_8_dutch.c +653 -0
- data/ext/stem_UTF_8_dutch.h +16 -0
- data/ext/stem_UTF_8_english.c +1176 -0
- data/ext/stem_UTF_8_english.h +16 -0
- data/ext/stem_UTF_8_finnish.c +808 -0
- data/ext/stem_UTF_8_finnish.h +16 -0
- data/ext/stem_UTF_8_french.c +1296 -0
- data/ext/stem_UTF_8_french.h +16 -0
- data/ext/stem_UTF_8_german.c +526 -0
- data/ext/stem_UTF_8_german.h +16 -0
- data/ext/stem_UTF_8_italian.c +1113 -0
- data/ext/stem_UTF_8_italian.h +16 -0
- data/ext/stem_UTF_8_norwegian.c +302 -0
- data/ext/stem_UTF_8_norwegian.h +16 -0
- data/ext/stem_UTF_8_porter.c +794 -0
- data/ext/stem_UTF_8_porter.h +16 -0
- data/ext/stem_UTF_8_portuguese.c +1055 -0
- data/ext/stem_UTF_8_portuguese.h +16 -0
- data/ext/stem_UTF_8_russian.c +709 -0
- data/ext/stem_UTF_8_russian.h +16 -0
- data/ext/stem_UTF_8_spanish.c +1137 -0
- data/ext/stem_UTF_8_spanish.h +16 -0
- data/ext/stem_UTF_8_swedish.c +313 -0
- data/ext/stem_UTF_8_swedish.h +16 -0
- data/ext/stopwords.c +401 -0
- data/ext/store.c +692 -0
- data/ext/store.h +777 -0
- data/ext/term_vectors.c +352 -0
- data/ext/threading.h +31 -0
- data/ext/utilities.c +446 -0
- data/ext/win32.h +54 -0
- data/lib/ferret.rb +29 -0
- data/lib/ferret/browser.rb +246 -0
- data/lib/ferret/browser/s/global.js +192 -0
- data/lib/ferret/browser/s/style.css +148 -0
- data/lib/ferret/browser/views/document/list.rhtml +49 -0
- data/lib/ferret/browser/views/document/show.rhtml +27 -0
- data/lib/ferret/browser/views/error/index.rhtml +7 -0
- data/lib/ferret/browser/views/help/index.rhtml +8 -0
- data/lib/ferret/browser/views/home/index.rhtml +29 -0
- data/lib/ferret/browser/views/layout.rhtml +22 -0
- data/lib/ferret/browser/views/term-vector/index.rhtml +4 -0
- data/lib/ferret/browser/views/term/index.rhtml +199 -0
- data/lib/ferret/browser/views/term/termdocs.rhtml +1 -0
- data/lib/ferret/browser/webrick.rb +14 -0
- data/lib/ferret/document.rb +130 -0
- data/lib/ferret/field_infos.rb +44 -0
- data/lib/ferret/index.rb +786 -0
- data/lib/ferret/number_tools.rb +157 -0
- data/lib/ferret_version.rb +3 -0
- data/setup.rb +1555 -0
- data/test/test_all.rb +5 -0
- data/test/test_helper.rb +24 -0
- data/test/threading/number_to_spoken.rb +132 -0
- data/test/threading/thread_safety_index_test.rb +79 -0
- data/test/threading/thread_safety_read_write_test.rb +76 -0
- data/test/threading/thread_safety_test.rb +133 -0
- data/test/unit/analysis/tc_analyzer.rb +548 -0
- data/test/unit/analysis/tc_token_stream.rb +646 -0
- data/test/unit/index/tc_index.rb +762 -0
- data/test/unit/index/tc_index_reader.rb +699 -0
- data/test/unit/index/tc_index_writer.rb +437 -0
- data/test/unit/index/th_doc.rb +315 -0
- data/test/unit/largefile/tc_largefile.rb +46 -0
- data/test/unit/query_parser/tc_query_parser.rb +238 -0
- data/test/unit/search/tc_filter.rb +135 -0
- data/test/unit/search/tc_fuzzy_query.rb +147 -0
- data/test/unit/search/tc_index_searcher.rb +61 -0
- data/test/unit/search/tc_multi_searcher.rb +128 -0
- data/test/unit/search/tc_multiple_search_requests.rb +58 -0
- data/test/unit/search/tc_search_and_sort.rb +179 -0
- data/test/unit/search/tc_sort.rb +49 -0
- data/test/unit/search/tc_sort_field.rb +27 -0
- data/test/unit/search/tc_spans.rb +190 -0
- data/test/unit/search/tm_searcher.rb +384 -0
- data/test/unit/store/tc_fs_store.rb +77 -0
- data/test/unit/store/tc_ram_store.rb +35 -0
- data/test/unit/store/tm_store.rb +34 -0
- data/test/unit/store/tm_store_lock.rb +68 -0
- data/test/unit/tc_document.rb +81 -0
- data/test/unit/ts_analysis.rb +2 -0
- data/test/unit/ts_index.rb +2 -0
- data/test/unit/ts_largefile.rb +4 -0
- data/test/unit/ts_query_parser.rb +2 -0
- data/test/unit/ts_search.rb +2 -0
- data/test/unit/ts_store.rb +2 -0
- data/test/unit/ts_utils.rb +2 -0
- data/test/unit/utils/tc_bit_vector.rb +295 -0
- data/test/unit/utils/tc_number_tools.rb +117 -0
- data/test/unit/utils/tc_priority_queue.rb +106 -0
- metadata +285 -0
@@ -0,0 +1,46 @@
|
|
1
|
+
require File.dirname(__FILE__) + "/../../test_helper"
|
2
|
+
|
3
|
+
class SampleLargeTest < Test::Unit::TestCase
|
4
|
+
include Ferret::Index
|
5
|
+
include Ferret::Search
|
6
|
+
include Ferret::Store
|
7
|
+
include Ferret::Utils
|
8
|
+
|
9
|
+
INDEX_DIR = File.dirname(__FILE__) + "/../../temp/largefile"
|
10
|
+
RECORDS = 750
|
11
|
+
RECORD_SIZE = 10e5
|
12
|
+
|
13
|
+
def setup
|
14
|
+
@index = Index.new(:path => INDEX_DIR, :create_if_missing => true, :key => :id)
|
15
|
+
create_index! if @index.size == 0 or ENV["RELOAD_LARGE_INDEX"]
|
16
|
+
end
|
17
|
+
|
18
|
+
def test_file_index_created
|
19
|
+
assert @index.size == RECORDS, "Index size should be #{RECORDS}, is #{@index.size}"
|
20
|
+
end
|
21
|
+
|
22
|
+
def test_keys_work
|
23
|
+
@index << {:content => "foo", :id => RECORDS - 4}
|
24
|
+
assert @index.size == RECORDS, "Index size should be #{RECORDS}, is #{@index.size}"
|
25
|
+
end
|
26
|
+
|
27
|
+
def test_read_file_after_two_gigs
|
28
|
+
assert @index.reader[RECORDS - 5].load.is_a?Hash
|
29
|
+
end
|
30
|
+
|
31
|
+
def create_index!
|
32
|
+
@@already_built_large_index ||= false
|
33
|
+
return if @@already_built_large_index
|
34
|
+
@@already_built_large_index = true
|
35
|
+
a = "a"
|
36
|
+
RECORDS.times { |i|
|
37
|
+
seq = (a.succ! + " ") * RECORD_SIZE
|
38
|
+
record = {:id => i, :content => seq}
|
39
|
+
@index << record
|
40
|
+
print "i"
|
41
|
+
STDOUT.flush
|
42
|
+
}
|
43
|
+
puts "o"
|
44
|
+
@index.optimize
|
45
|
+
end
|
46
|
+
end
|
@@ -0,0 +1,238 @@
|
|
1
|
+
require File.dirname(__FILE__) + "/../../test_helper"
|
2
|
+
|
3
|
+
class QueryParserTest < Test::Unit::TestCase
|
4
|
+
include Ferret::Analysis
|
5
|
+
|
6
|
+
def test_strings()
|
7
|
+
parser = Ferret::QueryParser.new(:default_field => "xxx",
|
8
|
+
:fields => ["xxx", "field", "f1", "f2"],
|
9
|
+
:tokenized_fields => ["xxx", "f1", "f2"])
|
10
|
+
pairs = [
|
11
|
+
['', ''],
|
12
|
+
['*:word', 'word field:word f1:word f2:word'],
|
13
|
+
['word', 'word'],
|
14
|
+
['field:word', 'field:word'],
|
15
|
+
['"word1 word2 word#"', '"word1 word2 word"'],
|
16
|
+
['"word1 %%% word3"', '"word1 <> word3"~1'],
|
17
|
+
['field:"one two three"', 'field:"one two three"'],
|
18
|
+
['field:"one %%% three"', 'field:"one %%% three"'],
|
19
|
+
['f1:"one %%% three"', 'f1:"one <> three"~1'],
|
20
|
+
['field:"one <> three"', 'field:"one <> three"'],
|
21
|
+
['field:"one <> three <>"', 'field:"one <> three"'],
|
22
|
+
['field:"one <> <> <> three <>"', 'field:"one <> <> <> three"'],
|
23
|
+
['field:"one <> 222 <> three|four|five <>"', 'field:"one <> 222 <> three|four|five"'],
|
24
|
+
['field:"on1|tw2 THREE|four|five six|seven"', 'field:"on1|tw2 THREE|four|five six|seven"'],
|
25
|
+
['field:"testing|trucks"', 'field:"testing|trucks"'],
|
26
|
+
['[aaa bbb]', '[aaa bbb]'],
|
27
|
+
['{aaa bbb]', '{aaa bbb]'],
|
28
|
+
['field:[aaa bbb}', 'field:[aaa bbb}'],
|
29
|
+
['{aaa bbb}', '{aaa bbb}'],
|
30
|
+
['{aaa>', '{aaa>'],
|
31
|
+
['[aaa>', '[aaa>'],
|
32
|
+
['field:<a\ aa}', 'field:<a aa}'],
|
33
|
+
['<aaa]', '<aaa]'],
|
34
|
+
['>aaa', '{aaa>'],
|
35
|
+
['>=aaa', '[aaa>'],
|
36
|
+
['<aaa', '<aaa}'],
|
37
|
+
['[A>', '[a>'],
|
38
|
+
['field:<=aaa', 'field:<aaa]'],
|
39
|
+
['REQ one REQ two', '+one +two'],
|
40
|
+
['REQ one two', '+one two'],
|
41
|
+
['one REQ two', 'one +two'],
|
42
|
+
['+one +two', '+one +two'],
|
43
|
+
['+one two', '+one two'],
|
44
|
+
['one +two', 'one +two'],
|
45
|
+
['-one -two', '-one -two'],
|
46
|
+
['-one two', '-one two'],
|
47
|
+
['one -two', 'one -two'],
|
48
|
+
['!one !two', '-one -two'],
|
49
|
+
['!one two', '-one two'],
|
50
|
+
['one !two', 'one -two'],
|
51
|
+
['NOT one NOT two', '-one -two'],
|
52
|
+
['NOT one two', '-one two'],
|
53
|
+
['one NOT two', 'one -two'],
|
54
|
+
['NOT two', '-two +*'],
|
55
|
+
['one two', 'one two'],
|
56
|
+
['one OR two', 'one two'],
|
57
|
+
['one AND two', '+one +two'],
|
58
|
+
['one two AND three', 'one two +three'],
|
59
|
+
['one two OR three', 'one two three'],
|
60
|
+
['one (two AND three)', 'one (+two +three)'],
|
61
|
+
['one AND (two OR three)', '+one +(two three)'],
|
62
|
+
['field:(one AND (two OR three))', '+field:one +(field:two field:three)'],
|
63
|
+
['one AND (two OR [aaa vvv})', '+one +(two [aaa vvv})'],
|
64
|
+
['one AND (f1:two OR f2:three) AND four', '+one +(f1:two f2:three) +four'],
|
65
|
+
['one^1.23', 'one^1.23'],
|
66
|
+
['(one AND two)^100.23', '(+one +two)^100.23'],
|
67
|
+
['field:(one AND two)^100.23', '(+field:one +field:two)^100.23'],
|
68
|
+
['field:(one AND [aaa bbb]^23.3)^100.23', '(+field:one +field:[aaa bbb]^23.3)^100.23'],
|
69
|
+
['(REQ field:"one two three")^23', 'field:"one two three"^23.0'],
|
70
|
+
['asdf~0.2', 'asdf~0.2'],
|
71
|
+
['field:asdf~0.2', 'field:asdf~0.2'],
|
72
|
+
['asdf~0.2^100.0', 'asdf~0.2^100.0'],
|
73
|
+
['field:asdf~0.2^0.1', 'field:asdf~0.2^0.1'],
|
74
|
+
['field:"asdf <> asdf|asdf"~4', 'field:"asdf <> asdf|asdf"~4'],
|
75
|
+
['"one two three four five"~5', '"one two three four five"~5'],
|
76
|
+
['ab?de', 'ab?de'],
|
77
|
+
['ab*de', 'ab*de'],
|
78
|
+
['asdf?*?asd*dsf?asfd*asdf?', 'asdf?*?asd*dsf?asfd*asdf?'],
|
79
|
+
['field:a* AND field:(b*)', '+field:a* +field:b*'],
|
80
|
+
['field:abc~ AND field:(b*)', '+field:abc~ +field:b*'],
|
81
|
+
['asdf?*?asd*dsf?asfd*asdf?^20.0', 'asdf?*?asd*dsf?asfd*asdf?^20.0'],
|
82
|
+
|
83
|
+
['*:xxx', 'xxx field:xxx f1:xxx f2:xxx'],
|
84
|
+
['f1|f2:xxx', 'f1:xxx f2:xxx'],
|
85
|
+
|
86
|
+
['*:asd~0.2', 'asd~0.2 field:asd~0.2 f1:asd~0.2 f2:asd~0.2'],
|
87
|
+
['f1|f2:asd~0.2', 'f1:asd~0.2 f2:asd~0.2'],
|
88
|
+
|
89
|
+
['*:a?d*^20.0', '(a?d* field:a?d* f1:a?d* f2:a?d*)^20.0'],
|
90
|
+
['f1|f2:a?d*^20.0', '(f1:a?d* f2:a?d*)^20.0'],
|
91
|
+
|
92
|
+
['*:"asdf <> xxx|yyy"', '"asdf <> xxx|yyy" field:"asdf <> xxx|yyy" f1:"asdf <> xxx|yyy" f2:"asdf <> xxx|yyy"'],
|
93
|
+
['f1|f2:"asdf <> xxx|yyy"', 'f1:"asdf <> xxx|yyy" f2:"asdf <> xxx|yyy"'],
|
94
|
+
['f1|f2:"asdf <> do|yyy"', 'f1:"asdf <> yyy" f2:"asdf <> yyy"'],
|
95
|
+
['f1|f2:"do|cat"', 'f1:cat f2:cat'],
|
96
|
+
|
97
|
+
['*:[bbb xxx]', '[bbb xxx] field:[bbb xxx] f1:[bbb xxx] f2:[bbb xxx]'],
|
98
|
+
['f1|f2:[bbb xxx]', 'f1:[bbb xxx] f2:[bbb xxx]'],
|
99
|
+
|
100
|
+
['*:(xxx AND bbb)', '+(xxx field:xxx f1:xxx f2:xxx) +(bbb field:bbb f1:bbb f2:bbb)'],
|
101
|
+
['f1|f2:(xxx AND bbb)', '+(f1:xxx f2:xxx) +(f1:bbb f2:bbb)'],
|
102
|
+
['asdf?*?asd*dsf?asfd*asdf?^20.0', 'asdf?*?asd*dsf?asfd*asdf?^20.0'],
|
103
|
+
['"onewordphrase"', 'onewordphrase'],
|
104
|
+
["who'd", "who'd"]
|
105
|
+
]
|
106
|
+
|
107
|
+
pairs.each do |query_str, expected|
|
108
|
+
assert_equal(expected, parser.parse(query_str).to_s("xxx"))
|
109
|
+
end
|
110
|
+
end
|
111
|
+
|
112
|
+
def test_qp_with_standard_analyzer()
|
113
|
+
parser = Ferret::QueryParser.new(:default_field => "xxx",
|
114
|
+
:fields => ["xxx", "key"],
|
115
|
+
:analyzer => StandardAnalyzer.new)
|
116
|
+
pairs = [
|
117
|
+
['key:1234', 'key:1234'],
|
118
|
+
['key:(1234 and Dave)', 'key:1234 key:dave'],
|
119
|
+
['key:(1234)', 'key:1234'],
|
120
|
+
['and the but they with', '']
|
121
|
+
]
|
122
|
+
|
123
|
+
pairs.each do |query_str, expected|
|
124
|
+
assert_equal(expected, parser.parse(query_str).to_s("xxx"))
|
125
|
+
end
|
126
|
+
|
127
|
+
end
|
128
|
+
|
129
|
+
def test_qp_changing_fields()
|
130
|
+
parser = Ferret::QueryParser.new(:default_field => "xxx",
|
131
|
+
:fields => ["xxx", "key"],
|
132
|
+
:analyzer => WhiteSpaceAnalyzer.new)
|
133
|
+
assert_equal('word key:word', parser.parse("*:word").to_s("xxx"))
|
134
|
+
|
135
|
+
parser.fields = ["xxx", "one", "two", "three"]
|
136
|
+
assert_equal('word one:word two:word three:word',
|
137
|
+
parser.parse("*:word").to_s("xxx"))
|
138
|
+
assert_equal('three:word four:word',
|
139
|
+
parser.parse("three:word four:word").to_s("xxx"))
|
140
|
+
end
|
141
|
+
|
142
|
+
def test_qp_allow_any_field()
|
143
|
+
parser = Ferret::QueryParser.new(:default_field => "xxx",
|
144
|
+
:fields => ["xxx", "key"],
|
145
|
+
:analyzer => WhiteSpaceAnalyzer.new,
|
146
|
+
:validate_fields => true)
|
147
|
+
|
148
|
+
assert_equal('key:word',
|
149
|
+
parser.parse("key:word song:word").to_s("xxx"))
|
150
|
+
assert_equal('word key:word', parser.parse("*:word").to_s("xxx"))
|
151
|
+
|
152
|
+
|
153
|
+
parser = Ferret::QueryParser.new(:default_field => "xxx",
|
154
|
+
:fields => ["xxx", "key"],
|
155
|
+
:analyzer => WhiteSpaceAnalyzer.new)
|
156
|
+
|
157
|
+
assert_equal('key:word song:word',
|
158
|
+
parser.parse("key:word song:word").to_s("xxx"))
|
159
|
+
assert_equal('word key:word', parser.parse("*:word").to_s("xxx"))
|
160
|
+
end
|
161
|
+
|
162
|
+
def do_test_query_parse_exception_raised(str)
|
163
|
+
parser = Ferret::QueryParser.new(:default_field => "xxx",
|
164
|
+
:fields => ["f1", "f2", "f3"],
|
165
|
+
:handle_parse_errors => false)
|
166
|
+
assert_raise(Ferret::QueryParser::QueryParseException,
|
167
|
+
str + " should have failed") do
|
168
|
+
parser.parse(str)
|
169
|
+
end
|
170
|
+
end
|
171
|
+
|
172
|
+
def test_or_default
|
173
|
+
parser = Ferret::QueryParser.new(:default_field => :*,
|
174
|
+
:fields => [:x, :y],
|
175
|
+
:or_default => false,
|
176
|
+
:analyzer => StandardAnalyzer.new)
|
177
|
+
pairs = [
|
178
|
+
['word', 'x:word y:word'],
|
179
|
+
['word1 word2', '+(x:word1 y:word1) +(x:word2 y:word2)']
|
180
|
+
]
|
181
|
+
|
182
|
+
pairs.each do |query_str, expected|
|
183
|
+
assert_equal(expected, parser.parse(query_str).to_s(""))
|
184
|
+
end
|
185
|
+
end
|
186
|
+
|
187
|
+
def test_prefix_query
|
188
|
+
parser = Ferret::QueryParser.new(:default_field => "xxx",
|
189
|
+
:fields => ["xxx"],
|
190
|
+
:analyzer => StandardAnalyzer.new)
|
191
|
+
assert_equal(Ferret::Search::PrefixQuery, parser.parse("asdg*").class)
|
192
|
+
assert_equal(Ferret::Search::WildcardQuery, parser.parse("a?dg*").class)
|
193
|
+
assert_equal(Ferret::Search::WildcardQuery, parser.parse("a*dg*").class)
|
194
|
+
assert_equal(Ferret::Search::WildcardQuery, parser.parse("adg*c").class)
|
195
|
+
end
|
196
|
+
|
197
|
+
def test_bad_queries
|
198
|
+
parser = Ferret::QueryParser.new(:default_field => "xxx",
|
199
|
+
:fields => ["f1", "f2"])
|
200
|
+
|
201
|
+
pairs = [
|
202
|
+
['::*word', 'word'],
|
203
|
+
['::*&)(*^&*(', ''],
|
204
|
+
['::*&one)(*two(*&"', '"one two"~1'],
|
205
|
+
[':', ''],
|
206
|
+
['[, ]', ''],
|
207
|
+
['{, }', ''],
|
208
|
+
['!', ''],
|
209
|
+
['+', ''],
|
210
|
+
['~', ''],
|
211
|
+
['^', ''],
|
212
|
+
['-', ''],
|
213
|
+
['|', ''],
|
214
|
+
['<, >', ''],
|
215
|
+
['=', ''],
|
216
|
+
['<script>', 'script']
|
217
|
+
]
|
218
|
+
|
219
|
+
pairs.each do |query_str, expected|
|
220
|
+
do_test_query_parse_exception_raised(query_str)
|
221
|
+
assert_equal(expected, parser.parse(query_str).to_s("xxx"))
|
222
|
+
end
|
223
|
+
end
|
224
|
+
|
225
|
+
def test_use_keywords_switch
|
226
|
+
analyzer = LetterAnalyzer.new
|
227
|
+
parser = Ferret::QueryParser.new(:analyzer => analyzer,
|
228
|
+
:default_field => "xxx")
|
229
|
+
assert_equal("+www (+xxx +yyy) -zzz",
|
230
|
+
parser.parse("REQ www (xxx AND yyy) OR NOT zzz").to_s("xxx"))
|
231
|
+
|
232
|
+
parser = Ferret::QueryParser.new(:analyzer => analyzer,
|
233
|
+
:default_field => "xxx",
|
234
|
+
:use_keywords => false)
|
235
|
+
assert_equal("req www (xxx and yyy) or not zzz",
|
236
|
+
parser.parse("REQ www (xxx AND yyy) OR NOT zzz").to_s("xxx"))
|
237
|
+
end
|
238
|
+
end
|
@@ -0,0 +1,135 @@
|
|
1
|
+
require File.dirname(__FILE__) + "/../../test_helper"
|
2
|
+
|
3
|
+
|
4
|
+
class FilterTest < Test::Unit::TestCase
|
5
|
+
include Ferret::Search
|
6
|
+
include Ferret::Analysis
|
7
|
+
include Ferret::Index
|
8
|
+
|
9
|
+
def setup()
|
10
|
+
@dir = Ferret::Store::RAMDirectory.new()
|
11
|
+
iw = IndexWriter.new(:dir => @dir,
|
12
|
+
:analyzer => WhiteSpaceAnalyzer.new(),
|
13
|
+
:create => true)
|
14
|
+
[
|
15
|
+
{:int => "0", :date => "20040601", :switch => "on"},
|
16
|
+
{:int => "1", :date => "20041001", :switch => "off"},
|
17
|
+
{:int => "2", :date => "20051101", :switch => "on"},
|
18
|
+
{:int => "3", :date => "20041201", :switch => "off"},
|
19
|
+
{:int => "4", :date => "20051101", :switch => "on"},
|
20
|
+
{:int => "5", :date => "20041201", :switch => "off"},
|
21
|
+
{:int => "6", :date => "20050101", :switch => "on"},
|
22
|
+
{:int => "7", :date => "20040701", :switch => "off"},
|
23
|
+
{:int => "8", :date => "20050301", :switch => "on"},
|
24
|
+
{:int => "9", :date => "20050401", :switch => "off"}
|
25
|
+
].each {|doc| iw << doc}
|
26
|
+
iw.close
|
27
|
+
end
|
28
|
+
|
29
|
+
def teardown()
|
30
|
+
@dir.close()
|
31
|
+
end
|
32
|
+
|
33
|
+
def do_test_top_docs(searcher, query, expected, filter)
|
34
|
+
top_docs = searcher.search(query, {:filter => filter})
|
35
|
+
#puts top_docs
|
36
|
+
assert_equal(expected.size, top_docs.hits.size)
|
37
|
+
top_docs.total_hits.times do |i|
|
38
|
+
assert_equal(expected[i], top_docs.hits[i].doc)
|
39
|
+
end
|
40
|
+
end
|
41
|
+
|
42
|
+
def test_filter_proc
|
43
|
+
searcher = Searcher.new(@dir)
|
44
|
+
q = MatchAllQuery.new()
|
45
|
+
filter_proc = lambda {|doc, score, s| (s[doc][:int] % 2) == 0}
|
46
|
+
top_docs = searcher.search(q, :filter_proc => filter_proc)
|
47
|
+
top_docs.hits.each do |hit|
|
48
|
+
assert_equal(0, searcher[hit.doc][:int] % 2)
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
def test_range_filter
|
53
|
+
searcher = Searcher.new(@dir)
|
54
|
+
q = MatchAllQuery.new()
|
55
|
+
rf = RangeFilter.new(:int, :>= => "2", :<= => "6")
|
56
|
+
do_test_top_docs(searcher, q, [2,3,4,5,6], rf)
|
57
|
+
rf = RangeFilter.new(:int, :>= => "2", :< => "6")
|
58
|
+
do_test_top_docs(searcher, q, [2,3,4,5], rf)
|
59
|
+
rf = RangeFilter.new(:int, :> => "2", :<= => "6")
|
60
|
+
do_test_top_docs(searcher, q, [3,4,5,6], rf)
|
61
|
+
rf = RangeFilter.new(:int, :> => "2", :< => "6")
|
62
|
+
do_test_top_docs(searcher, q, [3,4,5], rf)
|
63
|
+
rf = RangeFilter.new(:int, :>= => "6")
|
64
|
+
do_test_top_docs(searcher, q, [6,7,8,9], rf)
|
65
|
+
rf = RangeFilter.new(:int, :> => "6")
|
66
|
+
do_test_top_docs(searcher, q, [7,8,9], rf)
|
67
|
+
rf = RangeFilter.new(:int, :<= => "2")
|
68
|
+
do_test_top_docs(searcher, q, [0,1,2], rf)
|
69
|
+
rf = RangeFilter.new(:int, :< => "2")
|
70
|
+
do_test_top_docs(searcher, q, [0,1], rf)
|
71
|
+
|
72
|
+
bits = rf.bits(searcher.reader)
|
73
|
+
assert(bits[0])
|
74
|
+
assert(bits[1])
|
75
|
+
assert(!bits[2])
|
76
|
+
assert(!bits[3])
|
77
|
+
assert(!bits[4])
|
78
|
+
end
|
79
|
+
|
80
|
+
def test_range_filter_errors
|
81
|
+
assert_raise(ArgumentError) {f = RangeFilter.new(:f, :> => "b", :< => "a")}
|
82
|
+
assert_raise(ArgumentError) {f = RangeFilter.new(:f, :include_lower => true)}
|
83
|
+
assert_raise(ArgumentError) {f = RangeFilter.new(:f, :include_upper => true)}
|
84
|
+
end
|
85
|
+
|
86
|
+
def test_query_filter()
|
87
|
+
searcher = Searcher.new(@dir)
|
88
|
+
q = MatchAllQuery.new()
|
89
|
+
qf = QueryFilter.new(TermQuery.new(:switch, "on"))
|
90
|
+
do_test_top_docs(searcher, q, [0,2,4,6,8], qf)
|
91
|
+
# test again to test caching doesn't break it
|
92
|
+
do_test_top_docs(searcher, q, [0,2,4,6,8], qf)
|
93
|
+
qf = QueryFilter.new(TermQuery.new(:switch, "off"))
|
94
|
+
do_test_top_docs(searcher, q, [1,3,5,7,9], qf)
|
95
|
+
|
96
|
+
bits = qf.bits(searcher.reader)
|
97
|
+
assert(bits[1])
|
98
|
+
assert(bits[3])
|
99
|
+
assert(bits[5])
|
100
|
+
assert(bits[7])
|
101
|
+
assert(bits[9])
|
102
|
+
assert(!bits[0])
|
103
|
+
assert(!bits[2])
|
104
|
+
assert(!bits[4])
|
105
|
+
assert(!bits[6])
|
106
|
+
assert(!bits[8])
|
107
|
+
end
|
108
|
+
|
109
|
+
def test_filtered_query
|
110
|
+
searcher = Searcher.new(@dir)
|
111
|
+
q = MatchAllQuery.new()
|
112
|
+
rf = RangeFilter.new(:int, :>= => "2", :<= => "6")
|
113
|
+
rq = FilteredQuery.new(q, rf)
|
114
|
+
qf = QueryFilter.new(TermQuery.new(:switch, "on"))
|
115
|
+
do_test_top_docs(searcher, rq, [2,4,6], qf)
|
116
|
+
query = FilteredQuery.new(rq, qf)
|
117
|
+
rf2 = RangeFilter.new(:int, :>= => "3")
|
118
|
+
do_test_top_docs(searcher, query, [4,6], rf2)
|
119
|
+
end
|
120
|
+
|
121
|
+
class CustomFilter
|
122
|
+
def bits(ir)
|
123
|
+
bv = Ferret::Utils::BitVector.new
|
124
|
+
bv[0] = bv[2] = bv[4] = true
|
125
|
+
bv
|
126
|
+
end
|
127
|
+
end
|
128
|
+
|
129
|
+
def test_custom_filter
|
130
|
+
searcher = Searcher.new(@dir)
|
131
|
+
q = MatchAllQuery.new
|
132
|
+
filt = CustomFilter.new
|
133
|
+
do_test_top_docs(searcher, q, [0, 2, 4], filt)
|
134
|
+
end
|
135
|
+
end
|
@@ -0,0 +1,147 @@
|
|
1
|
+
require File.dirname(__FILE__) + "/../../test_helper"
|
2
|
+
|
3
|
+
class FuzzyQueryTest < Test::Unit::TestCase
|
4
|
+
include Ferret::Search
|
5
|
+
include Ferret::Store
|
6
|
+
include Ferret::Analysis
|
7
|
+
include Ferret::Index
|
8
|
+
|
9
|
+
def add_doc(text, writer)
|
10
|
+
writer << {:field => text}
|
11
|
+
end
|
12
|
+
|
13
|
+
def setup()
|
14
|
+
@dir = RAMDirectory.new()
|
15
|
+
end
|
16
|
+
|
17
|
+
def teardown()
|
18
|
+
@dir.close()
|
19
|
+
end
|
20
|
+
|
21
|
+
def do_test_top_docs(is, query, expected)
|
22
|
+
top_docs = is.search(query)
|
23
|
+
assert_equal(expected.length, top_docs.total_hits,
|
24
|
+
"expected #{expected.length} hits but got #{top_docs.total_hits}")
|
25
|
+
assert_equal(expected.length, top_docs.hits.size)
|
26
|
+
top_docs.total_hits.times do |i|
|
27
|
+
assert_equal(expected[i], top_docs.hits[i].doc)
|
28
|
+
end
|
29
|
+
end
|
30
|
+
|
31
|
+
def do_prefix_test(is, text, prefix, expected)
|
32
|
+
fq = FuzzyQuery.new(:field, text, :prefix_length => prefix)
|
33
|
+
#puts is.explain(fq, 0)
|
34
|
+
#puts is.explain(fq, 1)
|
35
|
+
do_test_top_docs(is, fq, expected)
|
36
|
+
end
|
37
|
+
|
38
|
+
def test_fuzziness()
|
39
|
+
iw = IndexWriter.new(:dir => @dir,
|
40
|
+
:analyzer => WhiteSpaceAnalyzer.new(),
|
41
|
+
:create => true)
|
42
|
+
add_doc("aaaaa", iw)
|
43
|
+
add_doc("aaaab", iw)
|
44
|
+
add_doc("aaabb", iw)
|
45
|
+
add_doc("aabbb", iw)
|
46
|
+
add_doc("abbbb", iw)
|
47
|
+
add_doc("bbbbb", iw)
|
48
|
+
add_doc("ddddd", iw)
|
49
|
+
add_doc("ddddddddddddddddddddd", iw) # test max_distances problem
|
50
|
+
add_doc("aaaaaaaaaaaaaaaaaaaaaaa", iw) # test max_distances problem
|
51
|
+
#iw.optimize()
|
52
|
+
iw.close()
|
53
|
+
|
54
|
+
|
55
|
+
is = Searcher.new(@dir)
|
56
|
+
|
57
|
+
fq = FuzzyQuery.new(:field, "aaaaa", :prefix_length => 5)
|
58
|
+
|
59
|
+
do_prefix_test(is, "aaaaaaaaaaaaaaaaaaaaaa", 1, [8])
|
60
|
+
do_prefix_test(is, "aaaaa", 0, [0,1,2])
|
61
|
+
do_prefix_test(is, "aaaaa", 1, [0,1,2])
|
62
|
+
do_prefix_test(is, "aaaaa", 2, [0,1,2])
|
63
|
+
do_prefix_test(is, "aaaaa", 3, [0,1,2])
|
64
|
+
do_prefix_test(is, "aaaaa", 4, [0,1])
|
65
|
+
do_prefix_test(is, "aaaaa", 5, [0])
|
66
|
+
do_prefix_test(is, "aaaaa", 6, [0])
|
67
|
+
|
68
|
+
do_prefix_test(is, "xxxxx", 0, [])
|
69
|
+
|
70
|
+
do_prefix_test(is, "aaccc", 0, [])
|
71
|
+
|
72
|
+
do_prefix_test(is, "aaaac", 0, [0,1,2])
|
73
|
+
do_prefix_test(is, "aaaac", 1, [0,1,2])
|
74
|
+
do_prefix_test(is, "aaaac", 2, [0,1,2])
|
75
|
+
do_prefix_test(is, "aaaac", 3, [0,1,2])
|
76
|
+
do_prefix_test(is, "aaaac", 4, [0,1])
|
77
|
+
do_prefix_test(is, "aaaac", 5, [])
|
78
|
+
|
79
|
+
do_prefix_test(is, "ddddX", 0, [6])
|
80
|
+
do_prefix_test(is, "ddddX", 1, [6])
|
81
|
+
do_prefix_test(is, "ddddX", 2, [6])
|
82
|
+
do_prefix_test(is, "ddddX", 3, [6])
|
83
|
+
do_prefix_test(is, "ddddX", 4, [6])
|
84
|
+
do_prefix_test(is, "ddddX", 5, [])
|
85
|
+
|
86
|
+
fq = FuzzyQuery.new(:anotherfield, "ddddX", :prefix_length => 0)
|
87
|
+
top_docs = is.search(fq)
|
88
|
+
assert_equal(0, top_docs.total_hits)
|
89
|
+
|
90
|
+
is.close()
|
91
|
+
end
|
92
|
+
|
93
|
+
def test_fuzziness_long()
|
94
|
+
iw = IndexWriter.new(:dir => @dir,
|
95
|
+
:analyzer => WhiteSpaceAnalyzer.new(),
|
96
|
+
:create => true)
|
97
|
+
add_doc("aaaaaaa", iw)
|
98
|
+
add_doc("segment", iw)
|
99
|
+
iw.optimize()
|
100
|
+
iw.close()
|
101
|
+
is = Searcher.new(@dir)
|
102
|
+
|
103
|
+
# not similar enough:
|
104
|
+
do_prefix_test(is, "xxxxx", 0, [])
|
105
|
+
|
106
|
+
# edit distance to "aaaaaaa" = 3, this matches because the string is longer than
|
107
|
+
# in testDefaultFuzziness so a bigger difference is allowed:
|
108
|
+
do_prefix_test(is, "aaaaccc", 0, [0])
|
109
|
+
|
110
|
+
# now with prefix
|
111
|
+
do_prefix_test(is, "aaaaccc", 1, [0])
|
112
|
+
do_prefix_test(is, "aaaaccc", 4, [0])
|
113
|
+
do_prefix_test(is, "aaaaccc", 5, [])
|
114
|
+
|
115
|
+
# no match, more than half of the characters is wrong:
|
116
|
+
do_prefix_test(is, "aaacccc", 0, [])
|
117
|
+
|
118
|
+
# now with prefix
|
119
|
+
do_prefix_test(is, "aaacccc", 1, [])
|
120
|
+
|
121
|
+
# "student" and "stellent" are indeed similar to "segment" by default:
|
122
|
+
do_prefix_test(is, "student", 0, [1])
|
123
|
+
do_prefix_test(is, "stellent", 0, [1])
|
124
|
+
|
125
|
+
# now with prefix
|
126
|
+
do_prefix_test(is, "student", 2, [])
|
127
|
+
do_prefix_test(is, "stellent", 2, [])
|
128
|
+
|
129
|
+
# "student" doesn't match anymore thanks to increased minimum similarity:
|
130
|
+
fq = FuzzyQuery.new(:field, "student",
|
131
|
+
:min_similarity => 0.6,
|
132
|
+
:prefix_length => 0)
|
133
|
+
|
134
|
+
top_docs = is.search(fq)
|
135
|
+
assert_equal(0, top_docs.total_hits)
|
136
|
+
|
137
|
+
assert_raise(ArgumentError) do
|
138
|
+
fq = FuzzyQuery.new(:f, "s", :min_similarity => 1.1)
|
139
|
+
end
|
140
|
+
assert_raise(ArgumentError) do
|
141
|
+
fq = FuzzyQuery.new(:f, "s", :min_similarity => -0.1)
|
142
|
+
end
|
143
|
+
|
144
|
+
is.close()
|
145
|
+
end
|
146
|
+
|
147
|
+
end
|