picky 0.3.0 → 0.9.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/lib/picky/application.rb +2 -2
- data/lib/picky/cacher/partial/default.rb +1 -1
- data/lib/picky/configuration/field.rb +8 -10
- data/lib/picky/configuration/indexes.rb +6 -6
- data/lib/picky/configuration/queries.rb +4 -3
- data/lib/picky/cores.rb +2 -2
- data/lib/picky/extensions/array.rb +2 -12
- data/lib/picky/generator.rb +27 -4
- data/lib/picky/index/bundle.rb +5 -41
- data/lib/picky/index/bundle_checker.rb +58 -0
- data/lib/picky/index/type.rb +4 -1
- data/lib/picky/index/wrappers/exact_first.rb +57 -0
- data/lib/picky/indexes.rb +12 -19
- data/lib/picky/loader.rb +7 -8
- data/lib/picky/query/allocation.rb +1 -1
- data/lib/picky/query/combinations.rb +9 -6
- data/lib/picky/query/combinator.rb +11 -5
- data/lib/picky/rack/harakiri.rb +1 -1
- data/lib/picky/results/base.rb +4 -12
- data/lib/picky/results/live.rb +0 -6
- data/lib/picky/routing.rb +17 -17
- data/lib/picky/sources/csv.rb +1 -2
- data/lib/picky/sources/db.rb +0 -1
- data/lib/picky/sources/delicious.rb +41 -0
- data/lib/picky/tokenizers/base.rb +52 -43
- data/lib/picky/tokenizers/default/index.rb +7 -0
- data/lib/picky/tokenizers/default/query.rb +7 -0
- data/lib/picky/tokenizers/index.rb +0 -9
- data/lib/picky/tokenizers/query.rb +0 -9
- data/lib/tasks/application.rake +1 -1
- data/lib/tasks/cache.rake +41 -48
- data/lib/tasks/framework.rake +1 -1
- data/lib/tasks/index.rake +22 -12
- data/lib/tasks/server.rake +3 -3
- data/lib/tasks/shortcuts.rake +9 -2
- data/lib/tasks/statistics.rake +8 -8
- data/lib/tasks/try.rake +4 -2
- data/project_prototype/Gemfile +1 -1
- data/project_prototype/app/application.rb +7 -3
- data/spec/lib/cacher/partial/default_spec.rb +1 -1
- data/spec/lib/cacher/partial/none_spec.rb +12 -0
- data/spec/lib/cacher/partial/subtoken_spec.rb +29 -1
- data/spec/lib/configuration/field_spec.rb +162 -3
- data/spec/lib/configuration/indexes_spec.rb +150 -0
- data/spec/lib/cores_spec.rb +43 -0
- data/spec/lib/extensions/module_spec.rb +27 -16
- data/spec/lib/generator_spec.rb +3 -3
- data/spec/lib/index/bundle_checker_spec.rb +67 -0
- data/spec/lib/index/bundle_spec.rb +0 -50
- data/spec/lib/index/type_spec.rb +47 -0
- data/spec/lib/index/wrappers/exact_first_spec.rb +95 -0
- data/spec/lib/indexers/base_spec.rb +18 -2
- data/spec/lib/loader_spec.rb +21 -1
- data/spec/lib/query/allocation_spec.rb +25 -0
- data/spec/lib/query/base_spec.rb +37 -0
- data/spec/lib/query/combination_spec.rb +10 -1
- data/spec/lib/query/combinations_spec.rb +82 -3
- data/spec/lib/query/combinator_spec.rb +45 -0
- data/spec/lib/query/token_spec.rb +24 -0
- data/spec/lib/rack/harakiri_spec.rb +28 -0
- data/spec/lib/results/base_spec.rb +24 -0
- data/spec/lib/results/live_spec.rb +15 -0
- data/spec/lib/routing_spec.rb +5 -0
- data/spec/lib/sources/db_spec.rb +31 -1
- data/spec/lib/sources/delicious_spec.rb +75 -0
- data/spec/lib/tokenizers/base_spec.rb +160 -49
- data/spec/lib/tokenizers/default/index_spec.rb +11 -0
- data/spec/lib/tokenizers/default/query_spec.rb +11 -0
- metadata +26 -5
- data/lib/picky/index/combined.rb +0 -45
- data/lib/picky/tokenizers/default.rb +0 -3
data/lib/picky/sources/csv.rb
CHANGED
@@ -1,5 +1,3 @@
|
|
1
|
-
require 'csv'
|
2
|
-
|
3
1
|
module Sources
|
4
2
|
|
5
3
|
# Describes a CSV source, a file with csv in it.
|
@@ -12,6 +10,7 @@ module Sources
|
|
12
10
|
attr_reader :file_name, :field_names
|
13
11
|
|
14
12
|
def initialize *field_names, options
|
13
|
+
require 'csv'
|
15
14
|
@field_names = field_names
|
16
15
|
@file_name = Hash === options && options[:file] || raise_no_file_given(field_names)
|
17
16
|
end
|
data/lib/picky/sources/db.rb
CHANGED
@@ -0,0 +1,41 @@
|
|
1
|
+
module Sources
|
2
|
+
|
3
|
+
class Delicious < Base
|
4
|
+
|
5
|
+
def initialize username, password
|
6
|
+
require 'www/delicious'
|
7
|
+
@username = username
|
8
|
+
@password = password
|
9
|
+
end
|
10
|
+
|
11
|
+
# Harvests the data to index.
|
12
|
+
#
|
13
|
+
def harvest _, field
|
14
|
+
get_data do |uid, data|
|
15
|
+
indexed_id = uid
|
16
|
+
text = data[field.name]
|
17
|
+
next unless text
|
18
|
+
text.force_encoding 'utf-8' # TODO Still needed?
|
19
|
+
yield indexed_id, text
|
20
|
+
end
|
21
|
+
end
|
22
|
+
|
23
|
+
#
|
24
|
+
#
|
25
|
+
def get_data
|
26
|
+
@generated_id ||= 0
|
27
|
+
@posts ||= WWW::Delicious.new(@username, @password).posts_recent(:count => 100)
|
28
|
+
@posts.each do |post|
|
29
|
+
data = {
|
30
|
+
:title => post.title,
|
31
|
+
:tags => post.tags.join(' '),
|
32
|
+
:url => post.url.to_s
|
33
|
+
}
|
34
|
+
@generated_id += 1
|
35
|
+
yield @generated_id, data
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
end
|
40
|
+
|
41
|
+
end
|
@@ -4,75 +4,80 @@ module Tokenizers
|
|
4
4
|
#
|
5
5
|
class Base
|
6
6
|
|
7
|
+
# TODO use frozen EMPTY_STRING for ''
|
8
|
+
#
|
9
|
+
|
7
10
|
# Stopwords.
|
8
11
|
#
|
9
|
-
def
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
end
|
12
|
+
def stopwords regexp
|
13
|
+
@remove_stopwords_regexp = regexp
|
14
|
+
end
|
15
|
+
def remove_stopwords text
|
16
|
+
text.gsub! @remove_stopwords_regexp, '' if @remove_stopwords_regexp
|
17
|
+
text
|
18
|
+
end
|
19
|
+
@@non_single_stopword_regexp = /^\b[\w:]+?\b[\.\*\~]?\s?$/
|
20
|
+
def remove_non_single_stopwords text
|
21
|
+
return text if text.match @@non_single_stopword_regexp
|
22
|
+
remove_stopwords text
|
21
23
|
end
|
22
|
-
def remove_stopwords text; end
|
23
24
|
|
24
25
|
# Contraction.
|
25
26
|
#
|
26
|
-
def
|
27
|
-
|
28
|
-
|
29
|
-
|
27
|
+
def contracts_expressions what, to_what
|
28
|
+
@contract_what = what
|
29
|
+
@contract_to_what = to_what
|
30
|
+
end
|
31
|
+
def contract text
|
32
|
+
text.gsub! @contract_what, @contract_to_what if @contract_what
|
30
33
|
end
|
31
|
-
def contract text; end
|
32
34
|
|
33
35
|
# Illegals.
|
34
36
|
#
|
35
37
|
# TODO Should there be a legal?
|
36
38
|
#
|
37
|
-
def
|
38
|
-
|
39
|
-
|
40
|
-
|
39
|
+
def removes_characters regexp
|
40
|
+
@removes_characters_regexp = regexp
|
41
|
+
end
|
42
|
+
def remove_illegals text
|
43
|
+
text.gsub! @removes_characters_regexp, '' if @removes_characters_regexp
|
44
|
+
text
|
41
45
|
end
|
42
|
-
def remove_illegals text; end
|
43
46
|
|
44
47
|
# Splitting.
|
45
48
|
#
|
46
|
-
def
|
47
|
-
|
48
|
-
|
49
|
-
|
49
|
+
def splits_text_on regexp
|
50
|
+
@splits_text_on_regexp = regexp
|
51
|
+
end
|
52
|
+
def split text
|
53
|
+
text.split @splits_text_on_regexp
|
50
54
|
end
|
51
|
-
def split text; end
|
52
55
|
|
53
56
|
# Normalizing.
|
54
57
|
#
|
55
|
-
def
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
58
|
+
def normalizes_words regexp_replaces
|
59
|
+
@normalizes_words_regexp_replaces = regexp_replaces
|
60
|
+
end
|
61
|
+
def normalize_with_patterns text
|
62
|
+
return text unless @normalizes_words_regexp_replaces
|
63
|
+
|
64
|
+
@normalizes_words_regexp_replaces.each do |regex, replace|
|
65
|
+
# This should be sufficient
|
66
|
+
#
|
67
|
+
text.gsub!(regex, replace) and break
|
64
68
|
end
|
69
|
+
remove_after_normalizing_illegals text
|
70
|
+
text
|
65
71
|
end
|
66
|
-
def normalize_with_patterns text; end
|
67
72
|
|
68
73
|
# Illegal after normalizing.
|
69
74
|
#
|
70
|
-
def
|
71
|
-
|
72
|
-
|
73
|
-
|
75
|
+
def removes_characters_after_splitting regexp
|
76
|
+
@removes_characters_after_splitting_regexp = regexp
|
77
|
+
end
|
78
|
+
def remove_after_normalizing_illegals text
|
79
|
+
text.gsub! @removes_characters_after_splitting_regexp, '' if @removes_characters_after_splitting_regexp
|
74
80
|
end
|
75
|
-
def remove_after_normalizing_illegals text; end
|
76
81
|
|
77
82
|
# Returns a number of tokens, generated from the given text.
|
78
83
|
#
|
@@ -93,6 +98,10 @@ module Tokenizers
|
|
93
98
|
|
94
99
|
def initialize substituter = UmlautSubstituter.new
|
95
100
|
@substituter = substituter
|
101
|
+
|
102
|
+
# TODO Default handling.
|
103
|
+
#
|
104
|
+
splits_text_on(/\s/)
|
96
105
|
end
|
97
106
|
|
98
107
|
# Hooks.
|
@@ -5,15 +5,6 @@ module Tokenizers
|
|
5
5
|
#
|
6
6
|
class Index < Base
|
7
7
|
|
8
|
-
# Default handling definitions. Override in config.
|
9
|
-
#
|
10
|
-
removes_characters(//)
|
11
|
-
stopwords(//)
|
12
|
-
contracts_expressions(//, '')
|
13
|
-
splits_text_on(/\s/)
|
14
|
-
normalizes_words([])
|
15
|
-
removes_characters_after_splitting(//)
|
16
|
-
|
17
8
|
# Default indexing preprocessing hook.
|
18
9
|
#
|
19
10
|
# Does:
|
@@ -13,15 +13,6 @@ module Tokenizers
|
|
13
13
|
#
|
14
14
|
class Query < Base
|
15
15
|
|
16
|
-
# Default query tokenizer behaviour. Override in config.
|
17
|
-
#
|
18
|
-
removes_characters(//)
|
19
|
-
stopwords(//)
|
20
|
-
contracts_expressions(//, '')
|
21
|
-
splits_text_on(/\s/)
|
22
|
-
normalizes_words([])
|
23
|
-
removes_characters_after_splitting(//)
|
24
|
-
|
25
16
|
def preprocess text
|
26
17
|
remove_illegals text # Remove illegal characters
|
27
18
|
remove_non_single_stopwords text # remove stop words
|
data/lib/tasks/application.rake
CHANGED
data/lib/tasks/cache.rake
CHANGED
@@ -1,53 +1,46 @@
|
|
1
1
|
namespace :cache do
|
2
2
|
|
3
|
-
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
end
|
12
|
-
|
13
|
-
desc "Generates the index cache files."
|
14
|
-
task :generate => :application do
|
15
|
-
Indexes.generate_caches
|
16
|
-
puts "Caches generated."
|
17
|
-
end
|
3
|
+
# Move to index namespace.
|
4
|
+
#
|
5
|
+
|
6
|
+
# desc "Generates the index cache files."
|
7
|
+
# task :generate => :application do
|
8
|
+
# Indexes.generate_caches
|
9
|
+
# puts "Caches generated."
|
10
|
+
# end
|
18
11
|
|
19
|
-
desc "Generates a specific index cache file like field=books:title. Note: Index tables need to be there. Will generate just the cache."
|
20
|
-
task :only => :application do
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
end
|
25
|
-
|
26
|
-
|
27
|
-
desc 'Checks the index cache files'
|
28
|
-
task :check => :application do
|
29
|
-
|
30
|
-
|
31
|
-
end
|
32
|
-
|
33
|
-
|
34
|
-
desc "Removes the index cache files."
|
35
|
-
task :clear => :application do
|
36
|
-
|
37
|
-
|
38
|
-
end
|
39
|
-
|
40
|
-
|
41
|
-
desc 'Backup the index cache files'
|
42
|
-
task :backup => :application do
|
43
|
-
|
44
|
-
|
45
|
-
end
|
46
|
-
|
47
|
-
desc 'Restore the index cache files'
|
48
|
-
task :restore => :application do
|
49
|
-
|
50
|
-
|
51
|
-
end
|
12
|
+
# desc "Generates a specific index cache file like field=books:title. Note: Index tables need to be there. Will generate just the cache."
|
13
|
+
# task :only => :application do
|
14
|
+
# type_and_field = ENV['FIELD'] || ENV['field']
|
15
|
+
# type, field = type_and_field.split ':'
|
16
|
+
# Indexes.generate_cache_only type.to_sym, field.to_sym
|
17
|
+
# end
|
18
|
+
|
19
|
+
|
20
|
+
# desc 'Checks the index cache files'
|
21
|
+
# task :check => :application do
|
22
|
+
# Indexes.check_caches
|
23
|
+
# puts "All caches look ok."
|
24
|
+
# end
|
25
|
+
|
26
|
+
|
27
|
+
# desc "Removes the index cache files."
|
28
|
+
# task :clear => :application do
|
29
|
+
# Indexes.clear_caches
|
30
|
+
# puts "All index cache files removed."
|
31
|
+
# end
|
32
|
+
|
33
|
+
|
34
|
+
# desc 'Backup the index cache files'
|
35
|
+
# task :backup => :application do
|
36
|
+
# Indexes.backup_caches
|
37
|
+
# puts "Index cache files moved to the backup directory"
|
38
|
+
# end
|
39
|
+
|
40
|
+
# desc 'Restore the index cache files'
|
41
|
+
# task :restore => :application do
|
42
|
+
# Indexes.restore_caches
|
43
|
+
# puts "Index cache files restored from the backup directory"
|
44
|
+
# end
|
52
45
|
|
53
46
|
end
|
data/lib/tasks/framework.rake
CHANGED
data/lib/tasks/index.rake
CHANGED
@@ -2,24 +2,34 @@
|
|
2
2
|
#
|
3
3
|
namespace :index do
|
4
4
|
|
5
|
-
|
6
|
-
|
7
|
-
|
5
|
+
# Not necessary anymore.
|
6
|
+
#
|
7
|
+
# namespace :directories do
|
8
|
+
# desc 'Creates the directory structure for the indexes.'
|
9
|
+
# task :make => :application do
|
10
|
+
# Indexes.create_directory_structure
|
11
|
+
# puts "Directory structure generated."
|
12
|
+
# end
|
13
|
+
# end
|
14
|
+
|
15
|
+
desc "Takes a snapshot, indexes, and caches."
|
16
|
+
task :generate, [:order] => :application do |_, options|
|
17
|
+
randomly = (options.order == 'ordered') ? false : true
|
18
|
+
Indexes.index randomly
|
8
19
|
end
|
9
20
|
|
10
|
-
desc "
|
11
|
-
task :
|
21
|
+
desc "Generates the index snapshots."
|
22
|
+
task :generate_snapshots => :application do
|
12
23
|
Indexes.take_snapshot
|
13
|
-
Indexes.configuration.index
|
14
24
|
end
|
15
25
|
|
16
|
-
desc "E.g. Generates a specific index table. Note:
|
17
|
-
task :only, [:type, :field] => :application do |_, options|
|
18
|
-
|
19
|
-
|
20
|
-
end
|
26
|
+
# desc "E.g. Generates a specific index table. Note: intermediate indexes need to be there."
|
27
|
+
# task :only, [:type, :field] => :application do |_, options|
|
28
|
+
# type, field = options.type, options.field
|
29
|
+
# Indexes.generate_index_only type.to_sym, field.to_sym
|
30
|
+
# end
|
21
31
|
|
22
|
-
desc "
|
32
|
+
desc "Generates a specific index from index snapshots."
|
23
33
|
task :specific, [:type, :field] => :application do |_, options|
|
24
34
|
type, field = options.type, options.field
|
25
35
|
Indexes.generate_index_only type.to_sym, field.to_sym
|
data/lib/tasks/server.rake
CHANGED
@@ -11,7 +11,7 @@ namespace :server do
|
|
11
11
|
pid.blank? ? nil : pid.chomp
|
12
12
|
end
|
13
13
|
|
14
|
-
desc "Start the unicorns.
|
14
|
+
desc "Start the unicorns. (Wehee!)"
|
15
15
|
task :start => :framework do
|
16
16
|
chdir_to_root
|
17
17
|
# Rake::Task[:"solr:start"].invoke # TODO Move to better place.
|
@@ -21,13 +21,13 @@ namespace :server do
|
|
21
21
|
exec command
|
22
22
|
end
|
23
23
|
|
24
|
-
desc "Stop the unicorns. Blam!"
|
24
|
+
desc "Stop the unicorns. (Blam!)"
|
25
25
|
task :stop => :framework do
|
26
26
|
`kill -QUIT #{current_pid}` if current_pid
|
27
27
|
# Rake::Task[:"solr:stop"].invoke # TODO Move to better place.
|
28
28
|
end
|
29
29
|
|
30
|
-
desc "Restart the unicorns
|
30
|
+
desc "Restart the unicorns."
|
31
31
|
task :restart do
|
32
32
|
Rake::Task[:"server:stop"].invoke
|
33
33
|
sleep 5
|
data/lib/tasks/shortcuts.rake
CHANGED
@@ -1,6 +1,13 @@
|
|
1
|
-
desc "Shortcut for
|
1
|
+
desc "Shortcut for index:generate."
|
2
2
|
task :index => :application do
|
3
|
-
|
3
|
+
Rake::Task[:'index:generate'].invoke
|
4
|
+
end
|
5
|
+
|
6
|
+
desc "Shortcut for try:both"
|
7
|
+
task :try, [:text, :type_and_field] => :application do |_, options|
|
8
|
+
text, type_and_field = options.text, options.type_and_field
|
9
|
+
|
10
|
+
Rake::Task[:'try:both'].invoke text, type_and_field
|
4
11
|
end
|
5
12
|
|
6
13
|
desc "shortcut for server:start"
|
data/lib/tasks/statistics.rake
CHANGED
@@ -1,13 +1,13 @@
|
|
1
1
|
namespace :statistics do
|
2
2
|
|
3
|
-
desc "start the server"
|
4
|
-
task :start => :application do
|
5
|
-
|
6
|
-
end
|
3
|
+
# desc "start the server"
|
4
|
+
# task :start => :application do
|
5
|
+
# Statistics.start unless PICKY_ENVIRONMENT == 'test'
|
6
|
+
# end
|
7
7
|
|
8
|
-
desc "stop the server"
|
9
|
-
task :stop => :application do
|
10
|
-
|
11
|
-
end
|
8
|
+
# desc "stop the server"
|
9
|
+
# task :stop => :application do
|
10
|
+
# Statistics.stop unless PICKY_ENVIRONMENT == 'test'
|
11
|
+
# end
|
12
12
|
|
13
13
|
end
|
data/lib/tasks/try.rake
CHANGED
@@ -6,7 +6,7 @@ namespace :try do
|
|
6
6
|
task :index, [:text, :type_and_field] => :application do |_, options|
|
7
7
|
text, type_and_field = options.text, options.type_and_field
|
8
8
|
|
9
|
-
tokenizer = type_and_field ? Indexes.find(*type_and_field.split(':')).tokenizer : Tokenizers::Index
|
9
|
+
tokenizer = type_and_field ? Indexes.find(*type_and_field.split(':')).tokenizer : Tokenizers::Default::Index
|
10
10
|
|
11
11
|
puts "\"#{text}\" is index tokenized as #{tokenizer.tokenize(text).to_a}"
|
12
12
|
end
|
@@ -17,7 +17,9 @@ namespace :try do
|
|
17
17
|
|
18
18
|
# TODO tokenize destroys the original text...
|
19
19
|
#
|
20
|
-
|
20
|
+
# TODO Use the Query Tokenizer.
|
21
|
+
#
|
22
|
+
puts "\"#{text}\" is query tokenized as #{Tokenizers::Default::Query.tokenize(text.dup).to_a.map(&:to_s).map(&:to_sym)}"
|
21
23
|
end
|
22
24
|
|
23
25
|
desc "Try the given text with both the index and the query (type:field optional)."
|
data/project_prototype/Gemfile
CHANGED
@@ -18,9 +18,13 @@ class PickySearch < Application
|
|
18
18
|
Sources::CSV.new(:title, :author, :isbn, :year, :publisher, :subjects, :file => 'app/library.csv'),
|
19
19
|
# Use a database as source:
|
20
20
|
# Sources::DB.new('SELECT id, title, author, isbn13 as isbn FROM books', :file => 'app/db.yml'),
|
21
|
-
field(:title,
|
22
|
-
|
23
|
-
|
21
|
+
field(:title,
|
22
|
+
:partial => Partial::Subtoken.new(:down_to => 1), # Index partial down to character 1 (default: -3),
|
23
|
+
# e.g. florian -> floria, flori, flor, flo, fl, f
|
24
|
+
# Like this, you'll find florian even when entering just an "f".
|
25
|
+
:similarity => Similarity::DoubleLevenshtone.new(3)), # Up to three similar title word indexed.
|
26
|
+
field(:author, :partial => Partial::Subtoken.new(:down_to => 1)),
|
27
|
+
field(:isbn, :partial => Partial::None.new) # Partially searching on an ISBN makes not much sense, neither does similarity.
|
24
28
|
|
25
29
|
# Defines the maximum tokens (words) that pass through to the engine.
|
26
30
|
#
|
@@ -6,7 +6,7 @@ describe Cacher::Partial::Default do
|
|
6
6
|
Cacher::Partial::Default.should be_kind_of(Cacher::Partial::Subtoken)
|
7
7
|
end
|
8
8
|
it "should be a the right down to" do
|
9
|
-
Cacher::Partial::Default.down_to.should ==
|
9
|
+
Cacher::Partial::Default.down_to.should == -3
|
10
10
|
end
|
11
11
|
it "should be a the right starting at" do
|
12
12
|
Cacher::Partial::Default.starting_at.should == -1
|
@@ -0,0 +1,12 @@
|
|
1
|
+
require 'spec_helper'
|
2
|
+
|
3
|
+
describe Cacher::Partial::None do
|
4
|
+
|
5
|
+
it "has the right superclass" do
|
6
|
+
Cacher::Partial::None.should < Cacher::Partial::Strategy
|
7
|
+
end
|
8
|
+
it "returns an empty index" do
|
9
|
+
Cacher::Partial::None.new.generate_from(:unimportant).should == {}
|
10
|
+
end
|
11
|
+
|
12
|
+
end
|
@@ -122,7 +122,7 @@ describe Cacher::Partial::Subtoken do
|
|
122
122
|
end
|
123
123
|
end
|
124
124
|
end
|
125
|
-
context 'starting_at
|
125
|
+
context 'starting_at set' do
|
126
126
|
before(:each) do
|
127
127
|
@cacher = Cacher::Partial::Subtoken.new :down_to => 4, :starting_at => -2
|
128
128
|
end
|
@@ -148,6 +148,34 @@ describe Cacher::Partial::Subtoken do
|
|
148
148
|
end
|
149
149
|
end
|
150
150
|
end
|
151
|
+
context 'starting_at set' do
|
152
|
+
before(:each) do
|
153
|
+
@cacher = Cacher::Partial::Subtoken.new :down_to => 4, :starting_at => 0
|
154
|
+
end
|
155
|
+
describe 'starting_at' do
|
156
|
+
it 'should return the right value' do
|
157
|
+
@cacher.starting_at.should == 0
|
158
|
+
end
|
159
|
+
end
|
160
|
+
describe 'down_to' do
|
161
|
+
it 'should return the right value' do
|
162
|
+
@cacher.down_to.should == 4
|
163
|
+
end
|
164
|
+
end
|
165
|
+
describe 'generate_from' do
|
166
|
+
it 'should generate the right index' do
|
167
|
+
@cacher.generate_from( :florian => [1], :flavia => [2] ).should == {
|
168
|
+
:florian => [1],
|
169
|
+
:floria => [1],
|
170
|
+
:flori => [1],
|
171
|
+
:flor => [1],
|
172
|
+
:flavia => [2],
|
173
|
+
:flavi => [2],
|
174
|
+
:flav => [2]
|
175
|
+
}
|
176
|
+
end
|
177
|
+
end
|
178
|
+
end
|
151
179
|
end
|
152
180
|
|
153
181
|
end
|