ferret 0.9.6 → 0.10.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (295) hide show
  1. data/MIT-LICENSE +1 -1
  2. data/README +12 -24
  3. data/Rakefile +38 -54
  4. data/TODO +14 -17
  5. data/ext/analysis.c +982 -823
  6. data/ext/analysis.h +133 -76
  7. data/ext/array.c +96 -58
  8. data/ext/array.h +40 -13
  9. data/ext/bitvector.c +476 -118
  10. data/ext/bitvector.h +264 -22
  11. data/ext/compound_io.c +217 -229
  12. data/ext/defines.h +49 -0
  13. data/ext/document.c +107 -317
  14. data/ext/document.h +31 -65
  15. data/ext/except.c +81 -36
  16. data/ext/except.h +117 -55
  17. data/ext/extconf.rb +2 -9
  18. data/ext/ferret.c +211 -104
  19. data/ext/ferret.h +22 -11
  20. data/ext/filter.c +97 -82
  21. data/ext/fs_store.c +348 -367
  22. data/ext/global.c +226 -188
  23. data/ext/global.h +44 -26
  24. data/ext/hash.c +474 -391
  25. data/ext/hash.h +441 -68
  26. data/ext/hashset.c +124 -96
  27. data/ext/hashset.h +169 -20
  28. data/ext/helper.c +56 -5
  29. data/ext/helper.h +7 -0
  30. data/ext/inc/lang.h +29 -49
  31. data/ext/inc/threading.h +31 -0
  32. data/ext/ind.c +288 -278
  33. data/ext/ind.h +68 -0
  34. data/ext/index.c +5688 -0
  35. data/ext/index.h +663 -616
  36. data/ext/lang.h +29 -49
  37. data/ext/libstemmer.c +3 -3
  38. data/ext/mem_pool.c +84 -0
  39. data/ext/mem_pool.h +35 -0
  40. data/ext/posh.c +1006 -0
  41. data/ext/posh.h +1007 -0
  42. data/ext/priorityqueue.c +117 -194
  43. data/ext/priorityqueue.h +135 -39
  44. data/ext/q_boolean.c +1305 -1108
  45. data/ext/q_const_score.c +106 -93
  46. data/ext/q_filtered_query.c +138 -135
  47. data/ext/q_fuzzy.c +206 -242
  48. data/ext/q_match_all.c +94 -80
  49. data/ext/q_multi_term.c +663 -0
  50. data/ext/q_parser.c +667 -593
  51. data/ext/q_phrase.c +992 -555
  52. data/ext/q_prefix.c +72 -61
  53. data/ext/q_range.c +235 -210
  54. data/ext/q_span.c +1480 -1166
  55. data/ext/q_term.c +273 -246
  56. data/ext/q_wildcard.c +127 -114
  57. data/ext/r_analysis.c +1720 -711
  58. data/ext/r_index.c +3049 -0
  59. data/ext/r_qparser.c +433 -146
  60. data/ext/r_search.c +2934 -1993
  61. data/ext/r_store.c +372 -143
  62. data/ext/r_utils.c +941 -0
  63. data/ext/ram_store.c +330 -326
  64. data/ext/search.c +1291 -668
  65. data/ext/search.h +403 -702
  66. data/ext/similarity.c +91 -113
  67. data/ext/similarity.h +45 -30
  68. data/ext/sort.c +721 -484
  69. data/ext/stopwords.c +361 -273
  70. data/ext/store.c +556 -58
  71. data/ext/store.h +706 -126
  72. data/ext/tags +3578 -2780
  73. data/ext/term_vectors.c +352 -0
  74. data/ext/threading.h +31 -0
  75. data/ext/win32.h +54 -0
  76. data/lib/ferret.rb +5 -17
  77. data/lib/ferret/document.rb +130 -2
  78. data/lib/ferret/index.rb +577 -26
  79. data/lib/ferret/number_tools.rb +157 -0
  80. data/lib/ferret_version.rb +3 -0
  81. data/test/test_helper.rb +5 -13
  82. data/test/unit/analysis/tc_analyzer.rb +513 -1
  83. data/test/unit/analysis/{ctc_tokenstream.rb → tc_token_stream.rb} +23 -0
  84. data/test/unit/index/tc_index.rb +183 -240
  85. data/test/unit/index/tc_index_reader.rb +312 -479
  86. data/test/unit/index/tc_index_writer.rb +397 -13
  87. data/test/unit/index/th_doc.rb +269 -206
  88. data/test/unit/query_parser/tc_query_parser.rb +40 -33
  89. data/test/unit/search/tc_filter.rb +59 -71
  90. data/test/unit/search/tc_fuzzy_query.rb +24 -16
  91. data/test/unit/search/tc_index_searcher.rb +23 -201
  92. data/test/unit/search/tc_multi_searcher.rb +78 -226
  93. data/test/unit/search/tc_search_and_sort.rb +93 -81
  94. data/test/unit/search/tc_sort.rb +23 -23
  95. data/test/unit/search/tc_sort_field.rb +7 -7
  96. data/test/unit/search/tc_spans.rb +51 -47
  97. data/test/unit/search/tm_searcher.rb +339 -0
  98. data/test/unit/store/tc_fs_store.rb +1 -1
  99. data/test/unit/store/tm_store_lock.rb +3 -3
  100. data/test/unit/tc_document.rb +81 -0
  101. data/test/unit/ts_analysis.rb +1 -1
  102. data/test/unit/ts_utils.rb +1 -1
  103. data/test/unit/utils/tc_bit_vector.rb +288 -0
  104. data/test/unit/utils/tc_number_tools.rb +117 -0
  105. data/test/unit/utils/tc_priority_queue.rb +106 -0
  106. metadata +140 -301
  107. data/CHANGELOG +0 -9
  108. data/ext/dummy.exe +0 -0
  109. data/ext/field.c +0 -408
  110. data/ext/frtio.h +0 -13
  111. data/ext/inc/except.h +0 -90
  112. data/ext/index_io.c +0 -382
  113. data/ext/index_rw.c +0 -2658
  114. data/ext/lang.c +0 -41
  115. data/ext/nix_io.c +0 -134
  116. data/ext/q_multi_phrase.c +0 -380
  117. data/ext/r_doc.c +0 -582
  118. data/ext/r_index_io.c +0 -1021
  119. data/ext/r_term.c +0 -219
  120. data/ext/term.c +0 -820
  121. data/ext/termdocs.c +0 -611
  122. data/ext/vector.c +0 -637
  123. data/ext/w32_io.c +0 -150
  124. data/lib/ferret/analysis.rb +0 -11
  125. data/lib/ferret/analysis/analyzers.rb +0 -112
  126. data/lib/ferret/analysis/standard_tokenizer.rb +0 -71
  127. data/lib/ferret/analysis/token.rb +0 -100
  128. data/lib/ferret/analysis/token_filters.rb +0 -86
  129. data/lib/ferret/analysis/token_stream.rb +0 -26
  130. data/lib/ferret/analysis/tokenizers.rb +0 -112
  131. data/lib/ferret/analysis/word_list_loader.rb +0 -27
  132. data/lib/ferret/document/document.rb +0 -152
  133. data/lib/ferret/document/field.rb +0 -312
  134. data/lib/ferret/index/compound_file_io.rb +0 -338
  135. data/lib/ferret/index/document_writer.rb +0 -289
  136. data/lib/ferret/index/field_infos.rb +0 -279
  137. data/lib/ferret/index/fields_io.rb +0 -181
  138. data/lib/ferret/index/index.rb +0 -675
  139. data/lib/ferret/index/index_file_names.rb +0 -33
  140. data/lib/ferret/index/index_reader.rb +0 -503
  141. data/lib/ferret/index/index_writer.rb +0 -534
  142. data/lib/ferret/index/multi_reader.rb +0 -377
  143. data/lib/ferret/index/multiple_term_doc_pos_enum.rb +0 -98
  144. data/lib/ferret/index/segment_infos.rb +0 -130
  145. data/lib/ferret/index/segment_merge_info.rb +0 -49
  146. data/lib/ferret/index/segment_merge_queue.rb +0 -16
  147. data/lib/ferret/index/segment_merger.rb +0 -358
  148. data/lib/ferret/index/segment_reader.rb +0 -412
  149. data/lib/ferret/index/segment_term_enum.rb +0 -169
  150. data/lib/ferret/index/segment_term_vector.rb +0 -58
  151. data/lib/ferret/index/term.rb +0 -53
  152. data/lib/ferret/index/term_buffer.rb +0 -83
  153. data/lib/ferret/index/term_doc_enum.rb +0 -291
  154. data/lib/ferret/index/term_enum.rb +0 -52
  155. data/lib/ferret/index/term_info.rb +0 -37
  156. data/lib/ferret/index/term_infos_io.rb +0 -321
  157. data/lib/ferret/index/term_vector_offset_info.rb +0 -20
  158. data/lib/ferret/index/term_vectors_io.rb +0 -553
  159. data/lib/ferret/query_parser.rb +0 -312
  160. data/lib/ferret/query_parser/query_parser.tab.rb +0 -928
  161. data/lib/ferret/search.rb +0 -50
  162. data/lib/ferret/search/boolean_clause.rb +0 -100
  163. data/lib/ferret/search/boolean_query.rb +0 -299
  164. data/lib/ferret/search/boolean_scorer.rb +0 -294
  165. data/lib/ferret/search/caching_wrapper_filter.rb +0 -40
  166. data/lib/ferret/search/conjunction_scorer.rb +0 -99
  167. data/lib/ferret/search/disjunction_sum_scorer.rb +0 -205
  168. data/lib/ferret/search/exact_phrase_scorer.rb +0 -32
  169. data/lib/ferret/search/explanation.rb +0 -41
  170. data/lib/ferret/search/field_cache.rb +0 -215
  171. data/lib/ferret/search/field_doc.rb +0 -31
  172. data/lib/ferret/search/field_sorted_hit_queue.rb +0 -184
  173. data/lib/ferret/search/filter.rb +0 -11
  174. data/lib/ferret/search/filtered_query.rb +0 -130
  175. data/lib/ferret/search/filtered_term_enum.rb +0 -79
  176. data/lib/ferret/search/fuzzy_query.rb +0 -154
  177. data/lib/ferret/search/fuzzy_term_enum.rb +0 -247
  178. data/lib/ferret/search/hit_collector.rb +0 -34
  179. data/lib/ferret/search/hit_queue.rb +0 -11
  180. data/lib/ferret/search/index_searcher.rb +0 -200
  181. data/lib/ferret/search/match_all_query.rb +0 -104
  182. data/lib/ferret/search/multi_phrase_query.rb +0 -216
  183. data/lib/ferret/search/multi_searcher.rb +0 -261
  184. data/lib/ferret/search/multi_term_query.rb +0 -65
  185. data/lib/ferret/search/non_matching_scorer.rb +0 -22
  186. data/lib/ferret/search/phrase_positions.rb +0 -55
  187. data/lib/ferret/search/phrase_query.rb +0 -214
  188. data/lib/ferret/search/phrase_scorer.rb +0 -152
  189. data/lib/ferret/search/prefix_query.rb +0 -54
  190. data/lib/ferret/search/query.rb +0 -140
  191. data/lib/ferret/search/query_filter.rb +0 -51
  192. data/lib/ferret/search/range_filter.rb +0 -103
  193. data/lib/ferret/search/range_query.rb +0 -139
  194. data/lib/ferret/search/req_excl_scorer.rb +0 -125
  195. data/lib/ferret/search/req_opt_sum_scorer.rb +0 -70
  196. data/lib/ferret/search/score_doc.rb +0 -38
  197. data/lib/ferret/search/score_doc_comparator.rb +0 -114
  198. data/lib/ferret/search/scorer.rb +0 -91
  199. data/lib/ferret/search/similarity.rb +0 -278
  200. data/lib/ferret/search/sloppy_phrase_scorer.rb +0 -47
  201. data/lib/ferret/search/sort.rb +0 -112
  202. data/lib/ferret/search/sort_comparator.rb +0 -60
  203. data/lib/ferret/search/sort_field.rb +0 -91
  204. data/lib/ferret/search/spans.rb +0 -12
  205. data/lib/ferret/search/spans/near_spans_enum.rb +0 -304
  206. data/lib/ferret/search/spans/span_first_query.rb +0 -79
  207. data/lib/ferret/search/spans/span_near_query.rb +0 -108
  208. data/lib/ferret/search/spans/span_not_query.rb +0 -130
  209. data/lib/ferret/search/spans/span_or_query.rb +0 -176
  210. data/lib/ferret/search/spans/span_query.rb +0 -25
  211. data/lib/ferret/search/spans/span_scorer.rb +0 -74
  212. data/lib/ferret/search/spans/span_term_query.rb +0 -105
  213. data/lib/ferret/search/spans/span_weight.rb +0 -84
  214. data/lib/ferret/search/spans/spans_enum.rb +0 -44
  215. data/lib/ferret/search/term_query.rb +0 -128
  216. data/lib/ferret/search/term_scorer.rb +0 -183
  217. data/lib/ferret/search/top_docs.rb +0 -36
  218. data/lib/ferret/search/top_field_docs.rb +0 -17
  219. data/lib/ferret/search/weight.rb +0 -54
  220. data/lib/ferret/search/wildcard_query.rb +0 -26
  221. data/lib/ferret/search/wildcard_term_enum.rb +0 -61
  222. data/lib/ferret/stemmers.rb +0 -1
  223. data/lib/ferret/stemmers/porter_stemmer.rb +0 -218
  224. data/lib/ferret/store.rb +0 -5
  225. data/lib/ferret/store/buffered_index_io.rb +0 -190
  226. data/lib/ferret/store/directory.rb +0 -141
  227. data/lib/ferret/store/fs_store.rb +0 -381
  228. data/lib/ferret/store/index_io.rb +0 -245
  229. data/lib/ferret/store/ram_store.rb +0 -286
  230. data/lib/ferret/utils.rb +0 -8
  231. data/lib/ferret/utils/bit_vector.rb +0 -123
  232. data/lib/ferret/utils/date_tools.rb +0 -138
  233. data/lib/ferret/utils/number_tools.rb +0 -91
  234. data/lib/ferret/utils/parameter.rb +0 -41
  235. data/lib/ferret/utils/priority_queue.rb +0 -120
  236. data/lib/ferret/utils/string_helper.rb +0 -47
  237. data/lib/ferret/utils/thread_local.rb +0 -28
  238. data/lib/ferret/utils/weak_key_hash.rb +0 -60
  239. data/lib/rferret.rb +0 -37
  240. data/rake_utils/code_statistics.rb +0 -106
  241. data/test/benchmark/tb_ram_store.rb +0 -76
  242. data/test/benchmark/tb_rw_vint.rb +0 -26
  243. data/test/functional/thread_safety_index_test.rb +0 -81
  244. data/test/functional/thread_safety_test.rb +0 -137
  245. data/test/longrunning/tc_numbertools.rb +0 -60
  246. data/test/longrunning/tm_store.rb +0 -19
  247. data/test/unit/analysis/ctc_analyzer.rb +0 -532
  248. data/test/unit/analysis/data/wordfile +0 -6
  249. data/test/unit/analysis/rtc_letter_tokenizer.rb +0 -20
  250. data/test/unit/analysis/rtc_lower_case_filter.rb +0 -20
  251. data/test/unit/analysis/rtc_lower_case_tokenizer.rb +0 -27
  252. data/test/unit/analysis/rtc_per_field_analyzer_wrapper.rb +0 -39
  253. data/test/unit/analysis/rtc_porter_stem_filter.rb +0 -16
  254. data/test/unit/analysis/rtc_standard_analyzer.rb +0 -20
  255. data/test/unit/analysis/rtc_standard_tokenizer.rb +0 -20
  256. data/test/unit/analysis/rtc_stop_analyzer.rb +0 -20
  257. data/test/unit/analysis/rtc_stop_filter.rb +0 -14
  258. data/test/unit/analysis/rtc_white_space_analyzer.rb +0 -21
  259. data/test/unit/analysis/rtc_white_space_tokenizer.rb +0 -20
  260. data/test/unit/analysis/rtc_word_list_loader.rb +0 -32
  261. data/test/unit/analysis/tc_token.rb +0 -25
  262. data/test/unit/document/rtc_field.rb +0 -28
  263. data/test/unit/document/tc_document.rb +0 -47
  264. data/test/unit/document/tc_field.rb +0 -98
  265. data/test/unit/index/rtc_compound_file_io.rb +0 -107
  266. data/test/unit/index/rtc_field_infos.rb +0 -127
  267. data/test/unit/index/rtc_fields_io.rb +0 -167
  268. data/test/unit/index/rtc_multiple_term_doc_pos_enum.rb +0 -83
  269. data/test/unit/index/rtc_segment_infos.rb +0 -74
  270. data/test/unit/index/rtc_segment_term_docs.rb +0 -17
  271. data/test/unit/index/rtc_segment_term_enum.rb +0 -60
  272. data/test/unit/index/rtc_segment_term_vector.rb +0 -71
  273. data/test/unit/index/rtc_term_buffer.rb +0 -57
  274. data/test/unit/index/rtc_term_info.rb +0 -19
  275. data/test/unit/index/rtc_term_infos_io.rb +0 -192
  276. data/test/unit/index/rtc_term_vectors_io.rb +0 -108
  277. data/test/unit/index/tc_term.rb +0 -27
  278. data/test/unit/index/tc_term_voi.rb +0 -18
  279. data/test/unit/search/rtc_similarity.rb +0 -37
  280. data/test/unit/search/rtc_sort_field.rb +0 -14
  281. data/test/unit/search/tc_multi_searcher2.rb +0 -126
  282. data/test/unit/store/rtc_fs_store.rb +0 -62
  283. data/test/unit/store/rtc_ram_store.rb +0 -15
  284. data/test/unit/store/rtm_store.rb +0 -150
  285. data/test/unit/store/rtm_store_lock.rb +0 -2
  286. data/test/unit/ts_document.rb +0 -2
  287. data/test/unit/utils/rtc_bit_vector.rb +0 -73
  288. data/test/unit/utils/rtc_date_tools.rb +0 -50
  289. data/test/unit/utils/rtc_number_tools.rb +0 -59
  290. data/test/unit/utils/rtc_parameter.rb +0 -40
  291. data/test/unit/utils/rtc_priority_queue.rb +0 -62
  292. data/test/unit/utils/rtc_string_helper.rb +0 -21
  293. data/test/unit/utils/rtc_thread.rb +0 -61
  294. data/test/unit/utils/rtc_weak_key_hash.rb +0 -25
  295. data/test/utils/number_to_spoken.rb +0 -132
@@ -1,6 +0,0 @@
1
- and
2
- to
3
- it
4
- the
5
- there
6
- their
@@ -1,20 +0,0 @@
1
- require File.dirname(__FILE__) + "/../../test_helper"
2
-
3
- class LetterTokenizerTest < Test::Unit::TestCase
4
- include Ferret::Analysis
5
- include Ferret::Utils::StringHelper
6
-
7
- def test_lettertokenizer()
8
- input = StringReader.new('DBalmain@gmail.com is My e-mail 523@#$ address. 23#@$')
9
- t = LetterTokenizer.new(input)
10
- assert_equal(Token.new("DBalmain", 0, 8), t.next())
11
- assert_equal(Token.new("gmail", 9, 14), t.next())
12
- assert_equal(Token.new("com", 15, 18), t.next())
13
- assert_equal(Token.new("is", 19, 21), t.next())
14
- assert_equal(Token.new("My", 22, 24), t.next())
15
- assert_equal(Token.new("e", 25, 26), t.next())
16
- assert_equal(Token.new("mail", 27, 31), t.next())
17
- assert_equal(Token.new("address", 39, 46), t.next())
18
- assert(! t.next())
19
- end
20
- end
@@ -1,20 +0,0 @@
1
- require File.dirname(__FILE__) + "/../../test_helper"
2
-
3
- class LowerCaseFilterTest < Test::Unit::TestCase
4
- include Ferret::Analysis
5
- include Ferret::Utils::StringHelper
6
-
7
- def test_lowercasefilter()
8
- input = StringReader.new('DBalmain@gmail.com is My E-Mail 52 #$ ADDRESS. 23#@$')
9
- t = LowerCaseFilter.new(WhiteSpaceTokenizer.new(input))
10
- assert_equal(Token.new('dbalmain@gmail.com', 0, 18), t.next())
11
- assert_equal(Token.new('is', 19, 21), t.next())
12
- assert_equal(Token.new('my', 22, 24), t.next())
13
- assert_equal(Token.new('e-mail', 25, 31), t.next())
14
- assert_equal(Token.new('52', 32, 34), t.next())
15
- assert_equal(Token.new('#$', 37, 39), t.next())
16
- assert_equal(Token.new('address.', 40, 48), t.next())
17
- assert_equal(Token.new('23#@$', 49, 54), t.next())
18
- assert(! t.next())
19
- end
20
- end
@@ -1,27 +0,0 @@
1
- require File.dirname(__FILE__) + "/../../test_helper"
2
-
3
- class LowerCaseTokenizerTest < Test::Unit::TestCase
4
- include Ferret::Analysis
5
- include Ferret::Utils::StringHelper
6
-
7
- def test_normalize()
8
- lt = LowerCaseTokenizer.new(StringReader.new(""))
9
- assert_equal('!', lt.__send__(:normalize,"!"))
10
- assert_equal('r', lt.__send__(:normalize,"r"))
11
- assert_equal('r', lt.__send__(:normalize,"R"))
12
- end
13
-
14
- def test_lowercase_tokenizer()
15
- input = StringReader.new('DBalmain@gmail.com is My E-Mail 523@#$ ADDRESS. 23#@$')
16
- t = LowerCaseTokenizer.new(input)
17
- assert_equal(Token.new("dbalmain", 0, 8), t.next())
18
- assert_equal(Token.new("gmail", 9, 14), t.next())
19
- assert_equal(Token.new("com", 15, 18), t.next())
20
- assert_equal(Token.new("is", 19, 21), t.next())
21
- assert_equal(Token.new("my", 22, 24), t.next())
22
- assert_equal(Token.new("e", 25, 26), t.next())
23
- assert_equal(Token.new("mail", 27, 31), t.next())
24
- assert_equal(Token.new("address", 39, 46), t.next())
25
- assert(! t.next())
26
- end
27
- end
@@ -1,39 +0,0 @@
1
- require File.dirname(__FILE__) + "/../../test_helper"
2
-
3
- class PerFieldAnalyzerWrapperTest < Test::Unit::TestCase
4
- include Ferret::Analysis
5
- include Ferret::Utils::StringHelper
6
- def test_perfieldanalyzerwrapper()
7
- aw = PerFieldAnalyzerWrapper.new(Analyzer.new())
8
- aw.add_analyzer("abstract", WhiteSpaceAnalyzer.new())
9
- aw.add_analyzer("body", StopAnalyzer.new(['is', 'my', 'address']))
10
- input = StringReader.new('DBalmain@gmail.com is My e-mail ADDRESS')
11
- t = aw.token_stream("title", input)
12
- assert_equal(Token.new("dbalmain", 0, 8), t.next())
13
- assert_equal(Token.new("gmail", 9, 14), t.next())
14
- assert_equal(Token.new("com", 15, 18), t.next())
15
- assert_equal(Token.new("is", 19, 21), t.next())
16
- assert_equal(Token.new("my", 22, 24), t.next())
17
- assert_equal(Token.new("e", 25, 26), t.next())
18
- assert_equal(Token.new("mail", 27, 31), t.next())
19
- assert_equal(Token.new("address", 32, 39), t.next())
20
- assert(! t.next())
21
- input.reset()
22
- t = aw.token_stream("abstract", input)
23
- assert_equal(Token.new('DBalmain@gmail.com', 0, 18), t.next())
24
- assert_equal(Token.new('is', 19, 21), t.next())
25
- assert_equal(Token.new('My', 22, 24), t.next())
26
- assert_equal(Token.new('e-mail', 25, 31), t.next())
27
- assert_equal(Token.new("ADDRESS", 32, 39), t.next())
28
- if ( token = t.next()): puts token.text end
29
- assert(! t.next())
30
- input.reset()
31
- t = aw.token_stream("body", input)
32
- assert_equal(Token.new("dbalmain", 0, 8), t.next())
33
- assert_equal(Token.new("gmail", 9, 14), t.next())
34
- assert_equal(Token.new("com", 15, 18), t.next())
35
- assert_equal(Token.new("e", 25, 26), t.next())
36
- assert_equal(Token.new("mail", 27, 31), t.next())
37
- assert(! t.next())
38
- end
39
- end
@@ -1,16 +0,0 @@
1
- require File.dirname(__FILE__) + "/../../test_helper"
2
-
3
- class PorterStemFilterTest < Test::Unit::TestCase
4
- include Ferret::Analysis
5
- include Ferret::Utils::StringHelper
6
-
7
- def test_porterstempfilter()
8
- input = StringReader.new('breath Breathes BreatHed BREATHING')
9
- t = PorterStemFilter.new(LowerCaseFilter.new(WhiteSpaceTokenizer.new(input)))
10
- assert_equal(Token.new('breath', 0, 6), t.next())
11
- assert_equal(Token.new('breath', 7, 15), t.next())
12
- assert_equal(Token.new('breath', 16, 24), t.next())
13
- assert_equal(Token.new('breath', 25, 34), t.next())
14
- assert(! t.next())
15
- end
16
- end
@@ -1,20 +0,0 @@
1
- require File.dirname(__FILE__) + "/../../test_helper"
2
-
3
- class StandardAnalyzerTest < Test::Unit::TestCase
4
- include Ferret::Utils::StringHelper
5
- include Ferret::Analysis
6
-
7
- def test_standard_analyzer()
8
- input = StringReader.new('D.Ba_l-n@gma-l.com AB&Sons Toys\'r\'us you\'re she\'s, #$%^$%*& job@dot I.B.M. the an AnD THEIR')
9
- sa = StandardAnalyzer.new()
10
- t = sa.token_stream("field", input)
11
- assert_equal(Token.new("d.ba_l-n@gma-l.com", 0, 18), t.next())
12
- assert_equal(Token.new("ab&sons", 19, 26), t.next())
13
- assert_equal(Token.new("toys'r'us", 27, 36), t.next())
14
- assert_equal(Token.new("you're", 37, 43), t.next())
15
- assert_equal(Token.new("she", 44, 49), t.next())
16
- assert_equal(Token.new("job@dot", 60, 67), t.next())
17
- assert_equal(Token.new("ibm", 68, 74), t.next())
18
- assert(! t.next())
19
- end
20
- end
@@ -1,20 +0,0 @@
1
- require File.dirname(__FILE__) + "/../../test_helper"
2
-
3
- class StandardTokenizerTest < Test::Unit::TestCase
4
- include Ferret::Analysis
5
- include Ferret::Utils::StringHelper
6
-
7
- def test_lettertokenizer()
8
- input = StringReader.new('DBalmain@gmail.com is My e-mail 523@#$ address. 23#@$')
9
- t = StandardTokenizer.new(input)
10
- assert_equal(Token.new("DBalmain@gmail.com", 0, 18), t.next())
11
- assert_equal(Token.new("is", 19, 21), t.next())
12
- assert_equal(Token.new("My", 22, 24), t.next())
13
- assert_equal(Token.new("e", 25, 26), t.next())
14
- assert_equal(Token.new("mail", 27, 31), t.next())
15
- assert_equal(Token.new("523", 32, 35), t.next())
16
- assert_equal(Token.new("address", 39, 46), t.next())
17
- assert_equal(Token.new("23", 48, 50), t.next())
18
- assert(! t.next())
19
- end
20
- end
@@ -1,20 +0,0 @@
1
- require File.dirname(__FILE__) + "/../../test_helper"
2
-
3
- class StopAnalyzerTest < Test::Unit::TestCase
4
- include Ferret::Analysis
5
- include Ferret::Utils::StringHelper
6
-
7
- def test_stopanalyzer()
8
- input = StringReader.new('The Quick AND the DEAD the and to it there their')
9
- a = StopAnalyzer.new()
10
- t = a.token_stream("field name", input)
11
- assert_equal(Token.new('quick', 4, 9), t.next())
12
- assert_equal(Token.new('dead', 18, 22), t.next())
13
- assert(! t.next())
14
- input = StringReader.new("David Balmain")
15
- a = StopAnalyzer.new(["david"])
16
- t = a.token_stream("field name", input)
17
- assert_equal(Token.new('balmain', 6, 13), t.next())
18
- assert(! t.next())
19
- end
20
- end
@@ -1,14 +0,0 @@
1
- require File.dirname(__FILE__) + "/../../test_helper"
2
-
3
- class StopFilterTest < Test::Unit::TestCase
4
- include Ferret::Analysis
5
- include Ferret::Utils::StringHelper
6
-
7
- def test_stopfilter()
8
- input = StringReader.new('The Quick AND the DEAD the and to it there their')
9
- t = StopFilter.new_with_file(LowerCaseTokenizer.new(input), File.dirname(__FILE__) + '/data/wordfile')
10
- assert_equal(Token.new('quick', 4, 9), t.next())
11
- assert_equal(Token.new('dead', 18, 22), t.next())
12
- assert(! t.next())
13
- end
14
- end
@@ -1,21 +0,0 @@
1
- require File.dirname(__FILE__) + "/../../test_helper"
2
-
3
- class WhiteSpaceAnalyzerTest < Test::Unit::TestCase
4
- include Ferret::Analysis
5
- include Ferret::Utils::StringHelper
6
-
7
- def test_whitespaceanalyzer()
8
- input = StringReader.new('DBalmain@gmail.com is My e-mail 52 #$ address. 23#@$')
9
- a = WhiteSpaceAnalyzer.new()
10
- t = a.token_stream("field", input)
11
- assert_equal(Token.new('DBalmain@gmail.com', 0, 18), t.next())
12
- assert_equal(Token.new('is', 19, 21), t.next())
13
- assert_equal(Token.new('My', 22, 24), t.next())
14
- assert_equal(Token.new('e-mail', 25, 31), t.next())
15
- assert_equal(Token.new('52', 32, 34), t.next())
16
- assert_equal(Token.new('#$', 37, 39), t.next())
17
- assert_equal(Token.new('address.', 40, 48), t.next())
18
- assert_equal(Token.new('23#@$', 49, 54), t.next())
19
- assert(! t.next())
20
- end
21
- end
@@ -1,20 +0,0 @@
1
- require File.dirname(__FILE__) + "/../../test_helper"
2
-
3
- class WhiteSpaceTokenizerTest < Test::Unit::TestCase
4
- include Ferret::Analysis
5
- include Ferret::Utils::StringHelper
6
-
7
- def test_whitespacetokenizer()
8
- input = StringReader.new('DBalmain@gmail.com is My e-mail 52 #$ address. 23#@$')
9
- t = WhiteSpaceTokenizer.new(input)
10
- assert_equal(Token.new('DBalmain@gmail.com', 0, 18), t.next())
11
- assert_equal(Token.new('is', 19, 21), t.next())
12
- assert_equal(Token.new('My', 22, 24), t.next())
13
- assert_equal(Token.new('e-mail', 25, 31), t.next())
14
- assert_equal(Token.new('52', 32, 34), t.next())
15
- assert_equal(Token.new('#$', 37, 39), t.next())
16
- assert_equal(Token.new('address.', 40, 48), t.next())
17
- assert_equal(Token.new('23#@$', 49, 54), t.next())
18
- assert(! t.next())
19
- end
20
- end
@@ -1,32 +0,0 @@
1
- require File.dirname(__FILE__) + "/../../test_helper"
2
-
3
- class WordListLoaderTest < Test::Unit::TestCase
4
- include Ferret::Analysis
5
- def test_word_set_from_file()
6
- wl = WordListLoader.word_set_from_file(File.dirname(__FILE__) + '/data/wordfile')
7
- assert_equal(6, wl.size())
8
- assert(wl.member?('and'))
9
- assert(wl.member?('to'))
10
- assert(wl.member?('it'))
11
- assert(wl.member?('the'))
12
- assert(wl.member?('there'))
13
- assert(wl.member?('their'))
14
- assert(!wl.member?('horse'))
15
- assert(!wl.member?('judo'))
16
- assert(!wl.member?('dairy'))
17
- end
18
-
19
- def test_word_set_from_array()
20
- wl = WordListLoader.word_set_from_array(['and','to','it','the','there','their'])
21
- assert_equal(6, wl.size())
22
- assert(wl.member?('and'))
23
- assert(wl.member?('to'))
24
- assert(wl.member?('it'))
25
- assert(wl.member?('the'))
26
- assert(wl.member?('there'))
27
- assert(wl.member?('their'))
28
- assert(!wl.member?('horse'))
29
- assert(!wl.member?('judo'))
30
- assert(!wl.member?('dairy'))
31
- end
32
- end
@@ -1,25 +0,0 @@
1
- require File.dirname(__FILE__) + "/../../test_helper"
2
-
3
- class TokenTest < Test::Unit::TestCase
4
- include Ferret::Analysis
5
-
6
- def test_token()
7
- tk1 = Token.new("DBalmain", 1, 8, 5, "token")
8
- assert_equal(tk1, Token.new("DBalmain", 1, 8))
9
- assert_not_equal(tk1, Token.new("DBalmain", 0, 8))
10
- assert_not_equal(tk1, Token.new("DBalmain", 1, 9))
11
- assert_not_equal(tk1, Token.new("Dbalmain", 1, 8))
12
- assert(tk1 < Token.new("CBalmain", 2, 7))
13
- assert(tk1 > Token.new("EBalmain", 0, 9))
14
- assert(tk1 < Token.new("CBalmain", 1, 9))
15
- assert(tk1 > Token.new("EBalmain", 1, 7))
16
- assert(tk1 < Token.new("EBalmain", 1, 8))
17
- assert(tk1 > Token.new("CBalmain", 1, 8))
18
- assert_equal("DBalmain", tk1.text)
19
- tk1.text = "Hello"
20
- assert_equal("Hello", tk1.text)
21
- assert_equal(1, tk1.start_offset)
22
- assert_equal(8, tk1.end_offset)
23
- assert_equal(5, tk1.pos_inc)
24
- end
25
- end
@@ -1,28 +0,0 @@
1
- require File.dirname(__FILE__) + "/../../test_helper"
2
-
3
-
4
- class FieldTest < Test::Unit::TestCase
5
- include Ferret::Document
6
- include Ferret::Utils
7
-
8
- def test_store()
9
- assert_equal("COMPRESS", Field::Store::COMPRESS.to_s)
10
- assert_equal("YES", Field::Store::YES.to_s)
11
- assert_equal("NO", Field::Store::NO.to_s)
12
- end
13
-
14
- def test_index()
15
- assert_equal("TOKENIZED", Field::Index::TOKENIZED.to_s)
16
- assert_equal("UNTOKENIZED", Field::Index::UNTOKENIZED.to_s)
17
- assert_equal("NO", Field::Index::NO.to_s)
18
- assert_equal("NO_NORMS", Field::Index::NO_NORMS.to_s)
19
- end
20
-
21
- def test_term_vector()
22
- assert_equal("YES", Field::TermVector::YES.to_s)
23
- assert_equal("NO", Field::TermVector::NO.to_s)
24
- assert_equal("WITH_POSITIONS", Field::TermVector::WITH_POSITIONS.to_s)
25
- assert_equal("WITH_OFFSETS", Field::TermVector::WITH_OFFSETS.to_s)
26
- assert_equal("WITH_POSITIONS_OFFSETS", Field::TermVector::WITH_POSITIONS_OFFSETS.to_s)
27
- end
28
- end
@@ -1,47 +0,0 @@
1
- require File.dirname(__FILE__) + "/../../test_helper"
2
-
3
- class DocumentTest < Test::Unit::TestCase
4
- include Ferret::Document
5
- def test_document()
6
- doc = Document.new()
7
- f11 = Field.new("field1", "value1", Field::Store::YES, Field::Index::NO)
8
- f12 = Field.new("field1", "value2", Field::Store::YES, Field::Index::NO)
9
- f13 = Field.new("field1", "value3", Field::Store::YES, Field::Index::NO)
10
- f21 = Field.new("field2", "value1", Field::Store::YES, Field::Index::NO)
11
- doc.add_field(f11)
12
- doc.add_field(f12)
13
- doc.add_field(f13)
14
- doc.add_field(f21)
15
- assert_equal(3, doc.fields("field1").size)
16
- assert_equal(1, doc.fields("field2").size)
17
- field = doc.remove_field("field1")
18
- assert_equal(2, doc.fields("field1").size)
19
- assert_equal(f11, field)
20
- assert_equal("value2 value3", doc.values("field1"))
21
- doc.remove_fields("field1")
22
- assert_equal(nil, doc.field("field1"))
23
- end
24
-
25
- def test_binary_string()
26
- tmp = []
27
- 256.times {|i| tmp[i] = i}
28
- bin1 = tmp.pack("c*")
29
- tmp = []
30
- 56.times {|i| tmp[i] = i}
31
- bin2 = tmp.pack("c*")
32
- doc = Document.new()
33
- fs1 = Field.new("field1", "value1", Field::Store::YES, Field::Index::NO)
34
- fs2 = Field.new("field1", "value2", Field::Store::YES, Field::Index::NO)
35
- fb1 = Field.new_binary_field("field1", bin1, Field::Store::YES)
36
- fb2 = Field.new_binary_field("field1", bin2, Field::Store::YES)
37
-
38
- doc.add_field(fs1)
39
- doc.add_field(fs2)
40
- doc.add_field(fb1)
41
- doc.add_field(fb2)
42
-
43
- assert_equal(4, doc.fields("field1").size)
44
- assert_equal("value1 value2", doc.values("field1").strip)
45
- assert_equal([bin1, bin2], doc.binaries("field1"))
46
- end
47
- end
@@ -1,98 +0,0 @@
1
- require File.dirname(__FILE__) + "/../../test_helper"
2
-
3
-
4
- class FieldTest < Test::Unit::TestCase
5
- include Ferret::Document
6
- include Ferret::Utils
7
-
8
- def test_store()
9
- assert_not_nil(Field::Store::COMPRESS)
10
- assert_not_nil(Field::Store::YES)
11
- assert_not_nil(Field::Store::NO)
12
- end
13
-
14
- def test_index()
15
- assert_not_nil(Field::Index::TOKENIZED)
16
- assert_not_nil(Field::Index::UNTOKENIZED)
17
- assert_not_nil(Field::Index::NO)
18
- assert_not_nil(Field::Index::NO_NORMS)
19
- end
20
-
21
- def test_term_vector()
22
- assert_not_nil(Field::TermVector::YES)
23
- assert_not_nil(Field::TermVector::NO)
24
- assert_not_nil(Field::TermVector::WITH_POSITIONS)
25
- assert_not_nil(Field::TermVector::WITH_OFFSETS)
26
- assert_not_nil(Field::TermVector::WITH_POSITIONS_OFFSETS)
27
- end
28
-
29
- def test_standard_field()
30
- f = Field.new("name", "value", Field::Store::COMPRESS, Field::Index::TOKENIZED)
31
- assert_equal("name", f.name)
32
- assert_equal("value", f.data)
33
- assert_equal(true, f.stored?)
34
- assert_equal(true, f.compressed?)
35
- assert_equal(true, f.indexed?)
36
- assert_equal(true, f.tokenized?)
37
- assert_equal(false, f.store_term_vector?)
38
- assert_equal(false, f.store_offsets?)
39
- assert_equal(false, f.store_positions?)
40
- assert_equal(false, f.omit_norms?)
41
- assert_equal(false, f.binary?)
42
- assert_equal("stored/compressed,indexed,tokenized,<name:value>", f.to_s)
43
- f.data = "183"
44
- f.boost = 0.001
45
- assert_equal("183", f.data)
46
- assert(0.001 =~ f.boost)
47
- end
48
-
49
- def test_set_store()
50
- f = Field.new("name", "", Field::Store::COMPRESS, Field::Index::TOKENIZED)
51
- f.store = Field::Store::NO
52
- assert_equal(false, f.stored?)
53
- assert_equal(false, f.compressed?)
54
- assert_equal("indexed,tokenized,<name:>", f.to_s)
55
- end
56
-
57
- def test_set_index()
58
- f = Field.new("name", "value", Field::Store::COMPRESS, Field::Index::TOKENIZED)
59
- f.index = Field::Index::NO
60
- assert_equal(false, f.indexed?)
61
- assert_equal(false, f.tokenized?)
62
- assert_equal(false, f.omit_norms?)
63
- assert_equal("stored/compressed,<name:value>", f.to_s)
64
- f.index = Field::Index::NO_NORMS
65
- assert_equal(true, f.indexed?)
66
- assert_equal(false, f.tokenized?)
67
- assert_equal(true, f.omit_norms?)
68
- assert_equal("stored/compressed,indexed,omit_norms,<name:value>", f.to_s)
69
- end
70
-
71
- def test_set_term_vector()
72
- f = Field.new("name", "value", Field::Store::COMPRESS, Field::Index::TOKENIZED)
73
- f.term_vector = Field::TermVector::WITH_POSITIONS_OFFSETS
74
- assert_equal(true, f.store_term_vector?)
75
- assert_equal(true, f.store_offsets?)
76
- assert_equal(true, f.store_positions?)
77
- assert_equal("stored/compressed,indexed,tokenized,store_term_vector,store_offsets,store_positions,<name:value>", f.to_s)
78
- end
79
-
80
- def test_new_binary_field()
81
- tmp = []
82
- 256.times {|i| tmp[i] = i}
83
- bin = tmp.pack("c*")
84
- f = Field.new_binary_field("name", bin, Field::Store::YES)
85
- assert_equal("name", f.name)
86
- assert_equal(bin, f.data)
87
- assert_equal(true, f.stored?)
88
- assert_equal(false, f.compressed?)
89
- assert_equal(false, f.indexed?)
90
- assert_equal(false, f.tokenized?)
91
- assert_equal(false, f.store_term_vector?)
92
- assert_equal(false, f.store_offsets?)
93
- assert_equal(false, f.store_positions?)
94
- assert_equal(false, f.omit_norms?)
95
- assert_equal(true, f.binary?)
96
- assert_equal("stored/uncompressed,binary,<name:=bin_data=>", f.to_s)
97
- end
98
- end