ferret 0.11.4 → 0.11.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (50) hide show
  1. data/Rakefile +1 -0
  2. data/TUTORIAL +3 -3
  3. data/ext/analysis.c +12 -9
  4. data/ext/array.c +10 -10
  5. data/ext/array.h +8 -1
  6. data/ext/bitvector.c +2 -2
  7. data/ext/except.c +1 -1
  8. data/ext/ferret.c +2 -2
  9. data/ext/ferret.h +1 -1
  10. data/ext/fs_store.c +13 -2
  11. data/ext/global.c +4 -4
  12. data/ext/global.h +6 -0
  13. data/ext/hash.c +1 -1
  14. data/ext/helper.c +1 -1
  15. data/ext/helper.h +1 -1
  16. data/ext/index.c +48 -22
  17. data/ext/index.h +17 -16
  18. data/ext/mempool.c +4 -1
  19. data/ext/mempool.h +1 -1
  20. data/ext/multimapper.c +2 -2
  21. data/ext/q_fuzzy.c +2 -2
  22. data/ext/q_multi_term.c +2 -2
  23. data/ext/q_parser.c +39 -8
  24. data/ext/q_range.c +32 -1
  25. data/ext/r_analysis.c +66 -28
  26. data/ext/r_index.c +18 -19
  27. data/ext/r_qparser.c +21 -6
  28. data/ext/r_search.c +74 -49
  29. data/ext/r_store.c +1 -1
  30. data/ext/r_utils.c +17 -17
  31. data/ext/search.c +10 -5
  32. data/ext/search.h +3 -1
  33. data/ext/sort.c +2 -2
  34. data/ext/stopwords.c +23 -34
  35. data/ext/store.c +9 -9
  36. data/ext/store.h +5 -4
  37. data/lib/ferret/document.rb +2 -2
  38. data/lib/ferret/field_infos.rb +37 -35
  39. data/lib/ferret/index.rb +16 -6
  40. data/lib/ferret/number_tools.rb +2 -2
  41. data/lib/ferret_version.rb +1 -1
  42. data/test/unit/analysis/tc_token_stream.rb +40 -0
  43. data/test/unit/index/tc_index.rb +64 -101
  44. data/test/unit/index/tc_index_reader.rb +13 -0
  45. data/test/unit/largefile/tc_largefile.rb +46 -0
  46. data/test/unit/query_parser/tc_query_parser.rb +17 -1
  47. data/test/unit/search/tc_multiple_search_requests.rb +58 -0
  48. data/test/unit/search/tm_searcher.rb +27 -1
  49. data/test/unit/ts_largefile.rb +4 -0
  50. metadata +147 -144
data/lib/ferret/index.rb CHANGED
@@ -322,8 +322,13 @@ module Ferret::Index
322
322
  # sort:: A Sort object or sort string describing how the field
323
323
  # should be sorted. A sort string is made up of field names
324
324
  # which cannot contain spaces and the word "DESC" if you
325
- # want the field reversed, all seperated by commas. For
326
- # example; "rating DESC, author, title"
325
+ # want the field reversed, all separated by commas. For
326
+ # example; "rating DESC, author, title". Note that Ferret
327
+ # will try to determine a field's type by looking at the
328
+ # first term in the index and seeing if it can be parsed as
329
+ # an integer or a float. Keep this in mind as you may need
330
+ # to specify a fields type to sort it correctly. For more
331
+ # on this, see the documentation for SortField
327
332
  # filter:: a Filter object to filter the search results with
328
333
  # filter_proc:: a filter Proc is a Proc which takes the doc_id, the score
329
334
  # and the Searcher object as its parameters and returns a
@@ -360,8 +365,13 @@ module Ferret::Index
360
365
  # sort:: A Sort object or sort string describing how the field
361
366
  # should be sorted. A sort string is made up of field names
362
367
  # which cannot contain spaces and the word "DESC" if you
363
- # want the field reversed, all seperated by commas. For
364
- # example; "rating DESC, author, title"
368
+ # want the field reversed, all separated by commas. For
369
+ # example; "rating DESC, author, title". Note that Ferret
370
+ # will try to determine a field's type by looking at the
371
+ # first term in the index and seeing if it can be parsed as
372
+ # an integer or a float. Keep this in mind as you may need
373
+ # to specify a fields type to sort it correctly. For more
374
+ # on this, see the documentation for SortField
365
375
  # filter:: a Filter object to filter the search results with
366
376
  # filter_proc:: a filter Proc is a Proc which takes the doc_id, the score
367
377
  # and the Searcher object as its parameters and returns a
@@ -451,7 +461,7 @@ module Ferret::Index
451
461
  ensure_writer_open()
452
462
  ensure_searcher_open()
453
463
  query = do_process_query(query)
454
- @searcher.search_each(query) do |doc, score|
464
+ @searcher.search_each(query, :limit => :all) do |doc, score|
455
465
  @reader.delete(doc)
456
466
  end
457
467
  flush() if @auto_flush
@@ -623,7 +633,7 @@ module Ferret::Index
623
633
  #
624
634
  # directory:: This can either be a Store::Directory object or a String
625
635
  # representing the path to the directory where you would
626
- # like to store the the index.
636
+ # like to store the index.
627
637
  #
628
638
  # create:: True if you'd like to create the directory if it doesn't
629
639
  # exist or copy over an existing directory. False if you'd
@@ -3,7 +3,7 @@ require 'time'
3
3
 
4
4
  class Float
5
5
  # Return true if the float is within +precision+ of the other value +o+. This
6
- # is used to accomodate for floating point errors.
6
+ # is used to accommodate for floating point errors.
7
7
  #
8
8
  # o:: value to compare with
9
9
  # precision:: the precision to use in the comparison.
@@ -49,7 +49,7 @@ class Integer
49
49
 
50
50
  # Convert the number to a lexicographically sortable string by padding with
51
51
  # 0s. You should make sure that you set the width to a number large enough to
52
- # accomodate all possible values. Also note that this method will not work
52
+ # accommodate all possible values. Also note that this method will not work
53
53
  # with negative numbers. That is negative numbers will sort in the opposite
54
54
  # direction as positive numbers. If you have very large numbers or a mix of
55
55
  # positive and negative numbers you should use the Integer#to_s_lex method
@@ -1,3 +1,3 @@
1
1
  module Ferret
2
- VERSION = '0.11.4'
2
+ VERSION = '0.11.5'
3
3
  end
@@ -508,6 +508,11 @@ module Ferret::Analysis
508
508
  return Token.new(normalize(term), term_start, term_end)
509
509
  end
510
510
 
511
+ def text=(text)
512
+ @ss = StringScanner.new(text)
513
+ end
514
+
515
+
511
516
  protected
512
517
  # returns the regular expression used to find the next token
513
518
  TOKEN_RE = /[[:alpha:]]+/
@@ -521,6 +526,23 @@ module Ferret::Analysis
521
526
  def normalize(str) return str end
522
527
  end
523
528
 
529
+ class MyReverseTokenFilter < TokenStream
530
+ def initialize(token_stream)
531
+ @token_stream = token_stream
532
+ end
533
+
534
+ def text=(text)
535
+ @token_stream.text = text
536
+ end
537
+
538
+ def next()
539
+ if token = @token_stream.next
540
+ token.text = token.text.reverse
541
+ end
542
+ token
543
+ end
544
+ end
545
+
524
546
  class MyCSVTokenizer < MyRegExpTokenizer
525
547
  protected
526
548
  # returns the regular expression used to find the next token
@@ -551,6 +573,24 @@ class CustomTokenizerTest < Test::Unit::TestCase
551
573
  assert_equal(Token.new("2nd field", 12, 21), t.next)
552
574
  assert_equal(Token.new(" p a d d e d f i e l d ", 22, 48), t.next)
553
575
  assert(! t.next())
576
+ t = MyReverseTokenFilter.new(
577
+ AsciiLowerCaseFilter.new(MyCSVTokenizer.new(input)))
578
+ assert_equal(Token.new("dleif tsrif", 0, 11), t.next)
579
+ assert_equal(Token.new("dleif dn2", 12, 21), t.next)
580
+ assert_equal(Token.new(" d l e i f d e d d a p ", 22, 48), t.next)
581
+ t.text = "one,TWO,three"
582
+ assert_equal(Token.new("eno", 0, 3), t.next)
583
+ assert_equal(Token.new("owt", 4, 7), t.next)
584
+ assert_equal(Token.new("eerht", 8, 13), t.next)
585
+ t = AsciiLowerCaseFilter.new(
586
+ MyReverseTokenFilter.new(MyCSVTokenizer.new(input)))
587
+ assert_equal(Token.new("dleif tsrif", 0, 11), t.next)
588
+ assert_equal(Token.new("dleif dn2", 12, 21), t.next)
589
+ assert_equal(Token.new(" d l e i f d e d d a p ", 22, 48), t.next)
590
+ t.text = "one,TWO,three"
591
+ assert_equal(Token.new("eno", 0, 3), t.next)
592
+ assert_equal(Token.new("owt", 4, 7), t.next)
593
+ assert_equal(Token.new("eerht", 8, 13), t.next)
554
594
  end
555
595
  end
556
596
 
@@ -658,6 +658,19 @@ class IndexTest < Test::Unit::TestCase
658
658
  assert_raise(StandardError) {i.close}
659
659
  end
660
660
 
661
+ def check_highlight(index, q, excerpt_length, num_excerpts, expected, field = :field)
662
+ highlights = index.highlight(q, 0,
663
+ :excerpt_length => excerpt_length,
664
+ :num_excerpts => num_excerpts,
665
+ :field => field)
666
+ assert_equal(expected, highlights)
667
+ highlights = index.highlight(q, 1,
668
+ :excerpt_length => excerpt_length,
669
+ :num_excerpts => num_excerpts,
670
+ :field => field)
671
+ assert_equal(expected, highlights)
672
+ end
673
+
661
674
  def test_highlighter()
662
675
  index = Ferret::I.new(:default_field => :field,
663
676
  :default_input_field => :field,
@@ -665,109 +678,49 @@ class IndexTest < Test::Unit::TestCase
665
678
  [
666
679
  "the words we are searching for are one and two also " +
667
680
  "sometimes looking for them as a phrase like this; one " +
668
- "two lets see how it goes"
681
+ "two lets see how it goes",
682
+ [
683
+ "the words we",
684
+ "are searching",
685
+ "for are one",
686
+ "and two also",
687
+ "sometimes looking",
688
+ "for them as a",
689
+ "phrase like this;",
690
+ "one two lets see",
691
+ "how it goes"
692
+ ]
669
693
  ].each {|doc| index << doc }
670
694
 
671
- highlights = index.highlight("one", 0,
672
- :excerpt_length => 10,
673
- :num_excerpts => 1)
674
-
675
- assert_equal(1, highlights.size)
676
- assert_equal("...are <b>one</b>...", highlights[0])
677
-
678
- highlights = index.highlight("one", 0,
679
- :excerpt_length => 10,
680
- :num_excerpts => 2)
681
- assert_equal(2, highlights.size)
682
- assert_equal("...are <b>one</b>...", highlights[0])
683
- assert_equal("...this; <b>one</b>...", highlights[1])
684
-
685
- highlights = index.highlight("one", 0,
686
- :excerpt_length => 10,
687
- :num_excerpts => 3)
688
- assert_equal(3, highlights.size)
689
- assert_equal("the words...", highlights[0])
690
- assert_equal("...are <b>one</b>...", highlights[1])
691
- assert_equal("...this; <b>one</b>...", highlights[2])
692
-
693
- highlights = index.highlight("one", 0,
694
- :excerpt_length => 10,
695
- :num_excerpts => 4)
696
- assert_equal(3, highlights.size)
697
- assert_equal("the words we are...", highlights[0])
698
- assert_equal("...are <b>one</b>...", highlights[1])
699
- assert_equal("...this; <b>one</b>...", highlights[2])
700
-
701
- highlights = index.highlight("one", 0,
702
- :excerpt_length => 10,
703
- :num_excerpts => 5)
704
- assert_equal(2, highlights.size)
705
- assert_equal("the words we are searching for are <b>one</b>...", highlights[0])
706
- assert_equal("...this; <b>one</b>...", highlights[1])
707
-
708
- highlights = index.highlight("one", 0,
709
- :excerpt_length => 10,
710
- :num_excerpts => 20)
711
- assert_equal(1, highlights.size)
712
- assert_equal("the words we are searching for are <b>one</b> and two also " +
713
- "sometimes looking for them as a phrase like this; <b>one</b> " +
714
- "two lets see how it goes", highlights[0])
715
-
716
- highlights = index.highlight("one", 0,
717
- :excerpt_length => 1000,
718
- :num_excerpts => 1)
719
- assert_equal(1, highlights.size)
720
- assert_equal("the words we are searching for are <b>one</b> and two also " +
721
- "sometimes looking for them as a phrase like this; <b>one</b> " +
722
- "two lets see how it goes", highlights[0])
723
-
724
- highlights = index.highlight("(one two)", 0,
725
- :excerpt_length => 15,
726
- :num_excerpts => 2)
727
- assert_equal(2, highlights.size)
728
- assert_equal("...<b>one</b> and <b>two</b>...", highlights[0])
729
- assert_equal("...this; <b>one</b> <b>two</b>...", highlights[1])
730
-
731
- highlights = index.highlight('one two "one two"', 0,
732
- :excerpt_length => 15,
733
- :num_excerpts => 2)
734
- assert_equal(2, highlights.size)
735
- assert_equal("...<b>one</b> and <b>two</b>...", highlights[0])
736
- assert_equal("...this; <b>one two</b>...", highlights[1])
737
-
738
- highlights = index.highlight('"one two"', 0,
739
- :excerpt_length => 15,
740
- :num_excerpts => 1)
741
- assert_equal(1, highlights.size)
742
- # should have a higher priority since it the merger of three matches
743
- assert_equal("...this; <b>one two</b>...", highlights[0])
744
-
745
- highlights = index.highlight('"one two"', 0, :field => :not_a_field,
746
- :excerpt_length => 15,
747
- :num_excerpts => 1)
748
- assert_nil(highlights)
749
-
750
- highlights = index.highlight("wrong_field:one", 0, :field => :wrong_field,
751
- :excerpt_length => 15,
752
- :num_excerpts => 1)
753
- assert_nil(highlights)
754
-
755
- highlights = index.highlight('"the words" "for are one and two" ' +
756
- 'words one two', 0,
757
- :excerpt_length => 10,
758
- :num_excerpts => 1)
759
- assert_equal(1, highlights.size)
760
- assert_equal("<b>the words</b>...", highlights[0])
761
-
762
- highlights = index.highlight('"the words" "for are one and two" ' +
763
- 'words one two', 0,
764
- :excerpt_length => 20,
765
- :num_excerpts => 2)
766
- assert_equal(2, highlights.size)
767
- assert_equal("<b>the words</b> we are...", highlights[0])
768
- assert_equal("...<b>for are one and two</b>...", highlights[1])
769
-
770
-
695
+ check_highlight(index, "one", 10, 1, ["...are <b>one</b>..."])
696
+ check_highlight(index, "one", 10, 2,
697
+ ["...are <b>one</b>...","...this; <b>one</b>..."])
698
+ check_highlight(index, "one", 10, 3,
699
+ ["the words...","...are <b>one</b>...","...this; <b>one</b>..."])
700
+ check_highlight(index, "one", 10, 4,
701
+ ["the words we are...","...are <b>one</b>...","...this; <b>one</b>..."])
702
+ check_highlight(index, "one", 10, 5,
703
+ ["the words we are searching for are <b>one</b>...","...this; <b>one</b>..."])
704
+ check_highlight(index, "one", 10, 20,
705
+ ["the words we are searching for are <b>one</b> and two also " +
706
+ "sometimes looking for them as a phrase like this; <b>one</b> " +
707
+ "two lets see how it goes"])
708
+ check_highlight(index, "one", 200, 1,
709
+ ["the words we are searching for are <b>one</b> and two also " +
710
+ "sometimes looking for them as a phrase like this; <b>one</b> " +
711
+ "two lets see how it goes"])
712
+ check_highlight(index, "(one two)", 15, 2,
713
+ ["...<b>one</b> and <b>two</b>...","...this; <b>one</b> <b>two</b>..."])
714
+ check_highlight(index, 'one two "one two"', 15, 2,
715
+ ["...<b>one</b> and <b>two</b>...","...this; <b>one two</b>..."])
716
+ check_highlight(index, 'one two "one two"', 15, 1,
717
+ ["...this; <b>one two</b>..."])
718
+ check_highlight(index, '"one two"', 15, 1, nil, :not_a_field)
719
+ check_highlight(index, 'wrong_field:one', 15, 1, nil, :wrong_field)
720
+ check_highlight(index, '"the words" "for are one and two" words one two', 10, 1,
721
+ ["<b>the words</b>..."])
722
+ check_highlight(index, '"the words" "for are one and two" words one two', 20, 2,
723
+ ["<b>the words</b> we are...","...<b>for are one and two</b>..."])
771
724
  index.close
772
725
  end
773
726
 
@@ -796,4 +749,14 @@ class IndexTest < Test::Unit::TestCase
796
749
  assert_equal('[]', index.search("xxx").to_json)
797
750
  index.close
798
751
  end
752
+
753
+ def test_large_query_delete
754
+ index = Ferret::I.new
755
+ 20.times do
756
+ index << {:id => 'one'}
757
+ index << {:id => 'two'}
758
+ end
759
+ index.query_delete('id:one')
760
+ assert_equal(20, index.size)
761
+ end
799
762
  end
@@ -378,6 +378,19 @@ module IndexReaderCommon
378
378
  ir2.close()
379
379
  ir3.close()
380
380
  end
381
+
382
+ def test_latest
383
+ assert(@ir.latest?)
384
+ ir2 = ir_new()
385
+ assert(ir2.latest?)
386
+
387
+ ir2.delete(0)
388
+ ir2.commit()
389
+ assert(ir2.latest?)
390
+ assert(!@ir.latest?)
391
+
392
+ ir2.close()
393
+ end
381
394
  end
382
395
 
383
396
  class MultiReaderTest < Test::Unit::TestCase
@@ -0,0 +1,46 @@
1
+ require File.dirname(__FILE__) + "/../../test_helper"
2
+
3
+ class SampleLargeTest < Test::Unit::TestCase
4
+ include Ferret::Index
5
+ include Ferret::Search
6
+ include Ferret::Store
7
+ include Ferret::Utils
8
+
9
+ INDEX_DIR = File.dirname(__FILE__) + "/../../temp/largefile"
10
+ RECORDS = 750
11
+ RECORD_SIZE = 10e5
12
+
13
+ def setup
14
+ @index = Index.new(:path => INDEX_DIR, :create_if_missing => true, :key => :id)
15
+ create_index! if @index.size == 0 or ENV["RELOAD_LARGE_INDEX"]
16
+ end
17
+
18
+ def test_file_index_created
19
+ assert @index.size == RECORDS, "Index size should be #{RECORDS}, is #{@index.size}"
20
+ end
21
+
22
+ def test_keys_work
23
+ @index << {:content => "foo", :id => RECORDS - 4}
24
+ assert @index.size == RECORDS, "Index size should be #{RECORDS}, is #{@index.size}"
25
+ end
26
+
27
+ def test_read_file_after_two_gigs
28
+ assert @index.reader[RECORDS - 5].load.is_a?Hash
29
+ end
30
+
31
+ def create_index!
32
+ @@already_built_large_index ||= false
33
+ return if @@already_built_large_index
34
+ @@already_built_large_index = true
35
+ a = "a"
36
+ RECORDS.times { |i|
37
+ seq = (a.succ! + " ") * RECORD_SIZE
38
+ record = {:id => i, :content => seq}
39
+ @index << record
40
+ print "i"
41
+ STDOUT.flush
42
+ }
43
+ puts "o"
44
+ @index.optimize
45
+ end
46
+ end
@@ -22,7 +22,7 @@ class QueryParserTest < Test::Unit::TestCase
22
22
  ['field:"one <> <> <> three <>"', 'field:"one <> <> <> three"'],
23
23
  ['field:"one <> 222 <> three|four|five <>"', 'field:"one <> 222 <> three|four|five"'],
24
24
  ['field:"on1|tw2 THREE|four|five six|seven"', 'field:"on1|tw2 THREE|four|five six|seven"'],
25
- ['field:"testing|trucks"', 'field:testing field:trucks'],
25
+ ['field:"testing|trucks"', 'field:"testing|trucks"'],
26
26
  ['[aaa bbb]', '[aaa bbb]'],
27
27
  ['{aaa bbb]', '{aaa bbb]'],
28
28
  ['field:[aaa bbb}', 'field:[aaa bbb}'],
@@ -91,6 +91,8 @@ class QueryParserTest < Test::Unit::TestCase
91
91
 
92
92
  ['*:"asdf <> xxx|yyy"', '"asdf <> xxx|yyy" field:"asdf <> xxx|yyy" f1:"asdf <> xxx|yyy" f2:"asdf <> xxx|yyy"'],
93
93
  ['f1|f2:"asdf <> xxx|yyy"', 'f1:"asdf <> xxx|yyy" f2:"asdf <> xxx|yyy"'],
94
+ ['f1|f2:"asdf <> do|yyy"', 'f1:"asdf <> yyy" f2:"asdf <> yyy"'],
95
+ ['f1|f2:"do|cat"', 'f1:cat f2:cat'],
94
96
 
95
97
  ['*:[bbb xxx]', '[bbb xxx] field:[bbb xxx] f1:[bbb xxx] f2:[bbb xxx]'],
96
98
  ['f1|f2:[bbb xxx]', 'f1:[bbb xxx] f2:[bbb xxx]'],
@@ -219,4 +221,18 @@ class QueryParserTest < Test::Unit::TestCase
219
221
  assert_equal(expected, parser.parse(query_str).to_s("xxx"))
220
222
  end
221
223
  end
224
+
225
+ def test_use_keywords_switch
226
+ analyzer = LetterAnalyzer.new
227
+ parser = Ferret::QueryParser.new(:analyzer => analyzer,
228
+ :default_field => "xxx")
229
+ assert_equal("+www (+xxx +yyy) -zzz",
230
+ parser.parse("REQ www (xxx AND yyy) OR NOT zzz").to_s("xxx"))
231
+
232
+ parser = Ferret::QueryParser.new(:analyzer => analyzer,
233
+ :default_field => "xxx",
234
+ :use_keywords => false)
235
+ assert_equal("req www (xxx and yyy) or not zzz",
236
+ parser.parse("REQ www (xxx AND yyy) OR NOT zzz").to_s("xxx"))
237
+ end
222
238
  end
@@ -0,0 +1,58 @@
1
+ require File.dirname(__FILE__) + "/../../test_helper"
2
+
3
+ class MultipleSearchRequestsTest < Test::Unit::TestCase
4
+ include Ferret::Search
5
+ include Ferret::Store
6
+ include Ferret::Analysis
7
+ include Ferret::Index
8
+
9
+ def setup()
10
+ dpath = File.expand_path(File.join(File.dirname(__FILE__),
11
+ '../../temp/fsdir'))
12
+ fs_dir = Ferret::Store::FSDirectory.new(dpath, true)
13
+
14
+ iw = IndexWriter.new(:dir => fs_dir, :create => true, :key => [:id])
15
+ 1000.times do |x|
16
+ doc = {:id => x}
17
+ iw << doc
18
+ end
19
+ iw.close()
20
+ fs_dir.close()
21
+
22
+ @ix = Index.new(:path => dpath, :create => true, :key => [:id])
23
+ end
24
+
25
+ def tear_down()
26
+ @ix.close
27
+ end
28
+
29
+ def test_repeated_queries_segmentation_fault
30
+ 1000.times do |x|
31
+ bq = BooleanQuery.new()
32
+ tq1 = TermQuery.new(:id, 1)
33
+ tq2 = TermQuery.new(:another_id, 1)
34
+ bq.add_query(tq1, :must)
35
+ bq.add_query(tq2, :must)
36
+ top_docs = @ix.search(bq)
37
+ end
38
+ end
39
+
40
+ def test_repeated_queries_bus_error
41
+ 1000.times do |x|
42
+ bq = BooleanQuery.new()
43
+ tq1 = TermQuery.new(:id, '1')
44
+ tq2 = TermQuery.new(:another_id, '1')
45
+ tq3 = TermQuery.new(:yet_another_id, '1')
46
+ tq4 = TermQuery.new(:still_another_id, '1')
47
+ tq5 = TermQuery.new(:one_more_id, '1')
48
+ tq6 = TermQuery.new(:and_another_id, '1')
49
+ bq.add_query(tq1, :must)
50
+ bq.add_query(tq2, :must)
51
+ bq.add_query(tq3, :must)
52
+ bq.add_query(tq4, :must)
53
+ bq.add_query(tq5, :must)
54
+ bq.add_query(tq6, :must)
55
+ top_docs = @ix.search(bq)
56
+ end
57
+ end
58
+ end