twitter_cldr 1.0.0
Sign up to get free protection for your applications and to get access to all the features.
- data/LICENSE +177 -0
- data/NOTICE +26 -0
- data/README.md +83 -0
- data/Rakefile +36 -0
- data/lib/ext/calendars/date.rb +20 -0
- data/lib/ext/calendars/datetime.rb +36 -0
- data/lib/ext/calendars/time.rb +20 -0
- data/lib/ext/localized_object.rb +15 -0
- data/lib/ext/numbers/bignum.rb +3 -0
- data/lib/ext/numbers/fixnum.rb +3 -0
- data/lib/ext/numbers/float.rb +3 -0
- data/lib/ext/numbers/localized_number.rb +53 -0
- data/lib/ext/strings/symbol.rb +17 -0
- data/lib/formatters/base.rb +36 -0
- data/lib/formatters/calendars/date_formatter.rb +9 -0
- data/lib/formatters/calendars/datetime_formatter.rb +217 -0
- data/lib/formatters/calendars/time_formatter.rb +9 -0
- data/lib/formatters/numbers/currency_formatter.rb +25 -0
- data/lib/formatters/numbers/decimal_formatter.rb +22 -0
- data/lib/formatters/numbers/helpers/base.rb +18 -0
- data/lib/formatters/numbers/helpers/fraction.rb +29 -0
- data/lib/formatters/numbers/helpers/integer.rb +46 -0
- data/lib/formatters/numbers/number_formatter.rb +48 -0
- data/lib/formatters/numbers/percent_formatter.rb +18 -0
- data/lib/formatters/plurals/rules.rb +34 -0
- data/lib/shared/currencies.rb +33 -0
- data/lib/shared/languages.rb +48 -0
- data/lib/shared/resources.rb +45 -0
- data/lib/shared/timezones.rb +1 -0
- data/lib/tokenizers/base.rb +112 -0
- data/lib/tokenizers/calendars/date_tokenizer.rb +24 -0
- data/lib/tokenizers/calendars/datetime_tokenizer.rb +46 -0
- data/lib/tokenizers/calendars/time_tokenizer.rb +24 -0
- data/lib/tokenizers/key_path.rb +30 -0
- data/lib/tokenizers/numbers/number_tokenizer.rb +50 -0
- data/lib/tokenizers/token.rb +17 -0
- data/lib/twitter_cldr.rb +104 -0
- data/lib/version.rb +3 -0
- data/resources/ar/calendars.yml +145 -0
- data/resources/ar/languages.yml +498 -0
- data/resources/ar/numbers.yml +35 -0
- data/resources/ar/plurals.yml +1 -0
- data/resources/da/calendars.yml +157 -0
- data/resources/da/languages.yml +508 -0
- data/resources/da/numbers.yml +31 -0
- data/resources/da/plurals.yml +1 -0
- data/resources/de/calendars.yml +152 -0
- data/resources/de/languages.yml +508 -0
- data/resources/de/numbers.yml +31 -0
- data/resources/de/plurals.yml +1 -0
- data/resources/en/calendars.yml +145 -0
- data/resources/en/languages.yml +510 -0
- data/resources/en/numbers.yml +31 -0
- data/resources/en/plurals.yml +1 -0
- data/resources/es/calendars.yml +145 -0
- data/resources/es/languages.yml +508 -0
- data/resources/es/numbers.yml +30 -0
- data/resources/es/plurals.yml +1 -0
- data/resources/fa/calendars.yml +150 -0
- data/resources/fa/languages.yml +484 -0
- data/resources/fa/numbers.yml +30 -0
- data/resources/fa/plurals.yml +1 -0
- data/resources/fi/calendars.yml +176 -0
- data/resources/fi/languages.yml +508 -0
- data/resources/fi/numbers.yml +31 -0
- data/resources/fi/plurals.yml +1 -0
- data/resources/fil/calendars.yml +159 -0
- data/resources/fil/languages.yml +115 -0
- data/resources/fil/numbers.yml +31 -0
- data/resources/fil/plurals.yml +1 -0
- data/resources/fr/calendars.yml +149 -0
- data/resources/fr/languages.yml +508 -0
- data/resources/fr/numbers.yml +31 -0
- data/resources/fr/plurals.yml +1 -0
- data/resources/he/calendars.yml +145 -0
- data/resources/he/languages.yml +266 -0
- data/resources/he/numbers.yml +31 -0
- data/resources/he/plurals.yml +1 -0
- data/resources/hi/calendars.yml +144 -0
- data/resources/hi/languages.yml +505 -0
- data/resources/hi/numbers.yml +31 -0
- data/resources/hi/plurals.yml +1 -0
- data/resources/hu/calendars.yml +145 -0
- data/resources/hu/languages.yml +508 -0
- data/resources/hu/numbers.yml +30 -0
- data/resources/hu/plurals.yml +1 -0
- data/resources/id/calendars.yml +145 -0
- data/resources/id/languages.yml +506 -0
- data/resources/id/numbers.yml +30 -0
- data/resources/id/plurals.yml +1 -0
- data/resources/it/calendars.yml +164 -0
- data/resources/it/languages.yml +503 -0
- data/resources/it/numbers.yml +30 -0
- data/resources/it/plurals.yml +1 -0
- data/resources/ja/calendars.yml +157 -0
- data/resources/ja/languages.yml +502 -0
- data/resources/ja/numbers.yml +30 -0
- data/resources/ja/plurals.yml +1 -0
- data/resources/ko/calendars.yml +133 -0
- data/resources/ko/languages.yml +505 -0
- data/resources/ko/numbers.yml +30 -0
- data/resources/ko/plurals.yml +1 -0
- data/resources/ms/calendars.yml +145 -0
- data/resources/ms/languages.yml +54 -0
- data/resources/ms/numbers.yml +30 -0
- data/resources/ms/plurals.yml +1 -0
- data/resources/nl/calendars.yml +145 -0
- data/resources/nl/languages.yml +508 -0
- data/resources/nl/numbers.yml +31 -0
- data/resources/nl/plurals.yml +1 -0
- data/resources/no/calendars.yml +122 -0
- data/resources/no/languages.yml +508 -0
- data/resources/no/numbers.yml +30 -0
- data/resources/no/plurals.yml +1 -0
- data/resources/pl/calendars.yml +161 -0
- data/resources/pl/languages.yml +504 -0
- data/resources/pl/numbers.yml +31 -0
- data/resources/pl/plurals.yml +1 -0
- data/resources/pt/calendars.yml +145 -0
- data/resources/pt/languages.yml +508 -0
- data/resources/pt/numbers.yml +31 -0
- data/resources/pt/plurals.yml +1 -0
- data/resources/ru/calendars.yml +176 -0
- data/resources/ru/languages.yml +508 -0
- data/resources/ru/numbers.yml +30 -0
- data/resources/ru/plurals.yml +1 -0
- data/resources/shared/currencies.yml +451 -0
- data/resources/sv/calendars.yml +145 -0
- data/resources/sv/languages.yml +508 -0
- data/resources/sv/numbers.yml +31 -0
- data/resources/sv/plurals.yml +1 -0
- data/resources/th/calendars.yml +145 -0
- data/resources/th/languages.yml +507 -0
- data/resources/th/numbers.yml +30 -0
- data/resources/th/plurals.yml +1 -0
- data/resources/tr/calendars.yml +145 -0
- data/resources/tr/languages.yml +508 -0
- data/resources/tr/numbers.yml +30 -0
- data/resources/tr/plurals.yml +1 -0
- data/resources/ur/calendars.yml +133 -0
- data/resources/ur/languages.yml +81 -0
- data/resources/ur/numbers.yml +31 -0
- data/resources/ur/plurals.yml +1 -0
- data/resources/zh/calendars.yml +169 -0
- data/resources/zh/languages.yml +506 -0
- data/resources/zh/numbers.yml +30 -0
- data/resources/zh/plurals.yml +1 -0
- data/resources/zh-Hant/calendars.yml +141 -0
- data/resources/zh-Hant/languages.yml +409 -0
- data/resources/zh-Hant/numbers.yml +30 -0
- data/spec/ext/calendars/date_spec.rb +45 -0
- data/spec/ext/calendars/datetime_spec.rb +43 -0
- data/spec/ext/calendars/time_spec.rb +45 -0
- data/spec/ext/numbers/bignum_spec.rb +19 -0
- data/spec/ext/numbers/fixnum_spec.rb +19 -0
- data/spec/ext/numbers/float_spec.rb +19 -0
- data/spec/ext/numbers/localized_number_spec.rb +53 -0
- data/spec/ext/strings/symbol_spec.rb +23 -0
- data/spec/formatters/base_spec.rb +12 -0
- data/spec/formatters/calendars/datetime_formatter_spec.rb +324 -0
- data/spec/formatters/numbers/currency_formatter_spec.rb +27 -0
- data/spec/formatters/numbers/decimal_formatter_spec.rb +30 -0
- data/spec/formatters/numbers/helpers/fraction_spec.rb +22 -0
- data/spec/formatters/numbers/helpers/integer_spec.rb +99 -0
- data/spec/formatters/numbers/number_formatter_spec.rb +79 -0
- data/spec/formatters/numbers/percent_formatter_spec.rb +12 -0
- data/spec/formatters/plurals/rules_spec.rb +73 -0
- data/spec/shared/currencies_spec.rb +55 -0
- data/spec/shared/languages_spec.rb +82 -0
- data/spec/shared/resources_spec.rb +44 -0
- data/spec/spec_helper.rb +31 -0
- data/spec/tokenizers/base_spec.rb +134 -0
- data/spec/tokenizers/calendars/date_tokenizer_spec.rb +36 -0
- data/spec/tokenizers/calendars/datetime_tokenizer_spec.rb +52 -0
- data/spec/tokenizers/calendars/time_tokenizer_spec.rb +34 -0
- data/spec/tokenizers/key_path_spec.rb +44 -0
- data/spec/tokenizers/numbers/number_tokenizer_spec.rb +60 -0
- data/spec/tokenizers/token_spec.rb +18 -0
- data/spec/twitter_cldr_spec.rb +53 -0
- metadata +293 -0
@@ -0,0 +1,73 @@
|
|
1
|
+
require File.join(File.dirname(File.dirname(File.dirname(__FILE__))), "spec_helper")
|
2
|
+
include TwitterCldr::Formatters::Plurals
|
3
|
+
|
4
|
+
describe Rules do
|
5
|
+
describe "#get_resource" do
|
6
|
+
it "calls eval on the hash that gets returned, lambdas and all" do
|
7
|
+
result = Rules.send(:get_resource, :ru)
|
8
|
+
result.should include(:ru)
|
9
|
+
result[:ru].should include(:i18n)
|
10
|
+
result[:ru][:i18n].should include(:plural)
|
11
|
+
result[:ru][:i18n][:plural].should include(:keys)
|
12
|
+
result[:ru][:i18n][:plural][:keys].size.should == 4
|
13
|
+
|
14
|
+
result[:ru][:i18n][:plural].should include(:rule)
|
15
|
+
result[:ru][:i18n][:plural][:rule].should be_a(Proc)
|
16
|
+
end
|
17
|
+
end
|
18
|
+
|
19
|
+
describe "#rule_for" do
|
20
|
+
it "returns :one for English 1, :other for everything else" do
|
21
|
+
Rules.rule_for(1, :en).should == :one
|
22
|
+
[5, 9, 10, 20, 60, 99, 100, 103, 141].each do |num|
|
23
|
+
Rules.rule_for(num, :en).should == :other
|
24
|
+
end
|
25
|
+
end
|
26
|
+
|
27
|
+
it "returns the correct values for Russian rules" do
|
28
|
+
Rules.rule_for(1, :ru).should == :one
|
29
|
+
Rules.rule_for(2, :ru).should == :few
|
30
|
+
Rules.rule_for(3, :ru).should == :few
|
31
|
+
Rules.rule_for(4, :ru).should == :few
|
32
|
+
Rules.rule_for(5, :ru).should == :many
|
33
|
+
Rules.rule_for(6, :ru).should == :many
|
34
|
+
Rules.rule_for(7, :ru).should == :many
|
35
|
+
Rules.rule_for(8, :ru).should == :many
|
36
|
+
Rules.rule_for(9, :ru).should == :many
|
37
|
+
Rules.rule_for(10, :ru).should == :many
|
38
|
+
Rules.rule_for(11, :ru).should == :many
|
39
|
+
|
40
|
+
Rules.rule_for(101, :ru).should == :one
|
41
|
+
Rules.rule_for(102, :ru).should == :few
|
42
|
+
Rules.rule_for(111, :ru).should == :many
|
43
|
+
end
|
44
|
+
|
45
|
+
it "returns :other if there's an error" do
|
46
|
+
stub(Rules).get_resource { lambda { raise "Jelly beans" } }
|
47
|
+
Rules.rule_for(1, :en).should == :other
|
48
|
+
Rules.rule_for(1, :ru).should == :other
|
49
|
+
end
|
50
|
+
end
|
51
|
+
|
52
|
+
describe "#all_for" do
|
53
|
+
it "returns a list of all applicable rules for the given locale" do
|
54
|
+
Rules.all_for(:en).each { |rule| [:one, :other].should include(rule) }
|
55
|
+
Rules.all_for(:ru).each { |rule| [:one, :few, :many, :other].should include(rule) }
|
56
|
+
end
|
57
|
+
|
58
|
+
it "should return an empty array on error" do
|
59
|
+
stub(Rules).get_resource { lambda { raise "Jelly beans" } }
|
60
|
+
Rules.all_for(:en).should == []
|
61
|
+
Rules.all_for(:ru).should == []
|
62
|
+
end
|
63
|
+
end
|
64
|
+
|
65
|
+
describe "#all" do
|
66
|
+
it "gets rules for the default locale (usually supplied by FastGettext)" do
|
67
|
+
mock(TwitterCldr).get_locale { :ru }
|
68
|
+
rules = Rules.all
|
69
|
+
rules.size.should == 4
|
70
|
+
rules.each { |rule| [:one, :few, :many, :other].should include(rule) }
|
71
|
+
end
|
72
|
+
end
|
73
|
+
end
|
@@ -0,0 +1,55 @@
|
|
1
|
+
require File.join(File.dirname(__FILE__), %w[.. spec_helper])
|
2
|
+
include TwitterCldr::Shared
|
3
|
+
|
4
|
+
TEST_COUNTRIES = ["Australia", "Thailand", "Russia", "China", "Japan", "Peru", "South Africa", "India", "South Korea", "United Kingdom"]
|
5
|
+
TEST_CODES = ["AUD", "THB", "RUB", "CNY", "JPY", "PEN", "ZAR", "INR", "KRW", "GBP"]
|
6
|
+
|
7
|
+
describe Currencies do
|
8
|
+
describe "#countries" do
|
9
|
+
it "should list all supported countries" do
|
10
|
+
countries = Currencies.countries
|
11
|
+
countries.size.should == 112
|
12
|
+
TEST_COUNTRIES.each { |country| countries.should include(country) }
|
13
|
+
end
|
14
|
+
end
|
15
|
+
|
16
|
+
describe "#currency_codes" do
|
17
|
+
it "should list all supported country codes" do
|
18
|
+
codes = Currencies.currency_codes
|
19
|
+
codes.size.should == 112
|
20
|
+
TEST_CODES.each { |code| codes.should include(code) }
|
21
|
+
end
|
22
|
+
end
|
23
|
+
|
24
|
+
describe "#for_country" do
|
25
|
+
it "should return all information for the given country" do
|
26
|
+
data = Currencies.for_country("Peru")
|
27
|
+
data.should be_a(Hash)
|
28
|
+
|
29
|
+
data.should include(:code)
|
30
|
+
data[:code].should == "PEN"
|
31
|
+
data.should include(:currency)
|
32
|
+
data[:currency].should == "Nuevo Sol"
|
33
|
+
data.should include(:symbol)
|
34
|
+
data[:symbol].should == "S/."
|
35
|
+
|
36
|
+
data.should_not include(:country)
|
37
|
+
end
|
38
|
+
end
|
39
|
+
|
40
|
+
describe "#for_code" do
|
41
|
+
it "should return all information for the given currency code" do
|
42
|
+
data = Currencies.for_code("PEN")
|
43
|
+
data.should be_a(Hash)
|
44
|
+
|
45
|
+
data.should include(:country)
|
46
|
+
data[:country].should == "Peru"
|
47
|
+
data.should include(:currency)
|
48
|
+
data[:currency].should == "Nuevo Sol"
|
49
|
+
data.should include(:symbol)
|
50
|
+
data[:symbol].should == "S/."
|
51
|
+
|
52
|
+
data.should_not include(:code)
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
@@ -0,0 +1,82 @@
|
|
1
|
+
require File.join(File.dirname(__FILE__), %w[.. spec_helper])
|
2
|
+
include TwitterCldr::Shared
|
3
|
+
|
4
|
+
describe Languages do
|
5
|
+
describe "#translate_language" do
|
6
|
+
it "should translate a language from one locale to another" do
|
7
|
+
Languages.translate_language("Russian", :en, :es).should == "ruso"
|
8
|
+
Languages.translate_language("ruso", :es, :en).should == "Russian"
|
9
|
+
Languages.translate_language("Spanish", :en, :es).should == "español"
|
10
|
+
Languages.translate_language("ruso", :es, :ru).should == "русский"
|
11
|
+
end
|
12
|
+
|
13
|
+
it "should be capitalization agnostic" do
|
14
|
+
Languages.translate_language("russian", :en, :es).should == "ruso"
|
15
|
+
Languages.translate_language("RUSSIAN", :en, :es).should == "ruso"
|
16
|
+
end
|
17
|
+
|
18
|
+
it "defaults the destination language to English (or whatever FastGettext.locale is)" do
|
19
|
+
Languages.translate_language("Ruso", :es).should == "Russian"
|
20
|
+
Languages.translate_language("русский", :ru).should == "Russian"
|
21
|
+
end
|
22
|
+
|
23
|
+
it "defaults source and destination language to English if not given" do
|
24
|
+
Languages.translate_language("Russian").should == "Russian"
|
25
|
+
FastGettext.locale = :es
|
26
|
+
Languages.translate_language("Russian").should == "ruso"
|
27
|
+
end
|
28
|
+
|
29
|
+
it "successfully translates locale codes that are and are not in the CLDR using the locale map" do
|
30
|
+
Languages.translate_language("Russian", :en, :'zh-cn').should == "俄文"
|
31
|
+
Languages.translate_language("Russian", :en, :'zh').should == "俄文"
|
32
|
+
end
|
33
|
+
|
34
|
+
it "should return nil if no translation was found" do
|
35
|
+
Languages.translate_language("Russian", :en, :blarg).should == nil
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
describe "#from_code_for_locale" do
|
40
|
+
it "should return the language in the correct locale for the given locale code (i.e. es in English should be Spanish)" do
|
41
|
+
Languages.from_code_for_locale(:es, :en).should == "Spanish"
|
42
|
+
Languages.from_code_for_locale(:en, :es).should == "inglés"
|
43
|
+
end
|
44
|
+
end
|
45
|
+
|
46
|
+
describe "#from_code" do
|
47
|
+
it "should return the language in the default locale for the given locale code" do
|
48
|
+
Languages.from_code(:es).should == "Spanish"
|
49
|
+
Languages.from_code(:ru).should == "Russian"
|
50
|
+
FastGettext.locale = :es
|
51
|
+
Languages.from_code(:es).should == "español"
|
52
|
+
end
|
53
|
+
end
|
54
|
+
|
55
|
+
describe "#all_for" do
|
56
|
+
it "should return a hash of all languages for the given language code" do
|
57
|
+
langs = Languages.all_for(:es)
|
58
|
+
langs.should be_a(Hash)
|
59
|
+
langs[:ru].should == "ruso"
|
60
|
+
end
|
61
|
+
|
62
|
+
it "should return an empty hash for an invalid language" do
|
63
|
+
langs = Languages.all_for(:blarg)
|
64
|
+
langs.should == {}
|
65
|
+
end
|
66
|
+
end
|
67
|
+
|
68
|
+
describe "#all" do
|
69
|
+
it "should use the default language to get the language hash" do
|
70
|
+
langs = Languages.all
|
71
|
+
langs.should be_a(Hash)
|
72
|
+
langs[:ru].should == "Russian"
|
73
|
+
langs[:de].should == "German"
|
74
|
+
|
75
|
+
FastGettext.locale = :es
|
76
|
+
langs = Languages.all
|
77
|
+
langs.should be_a(Hash)
|
78
|
+
langs[:ru].should == "ruso"
|
79
|
+
langs[:de].should == "alemán"
|
80
|
+
end
|
81
|
+
end
|
82
|
+
end
|
@@ -0,0 +1,44 @@
|
|
1
|
+
require File.join(File.dirname(__FILE__), %w[.. spec_helper])
|
2
|
+
include TwitterCldr::Shared
|
3
|
+
|
4
|
+
describe Resources do
|
5
|
+
before(:each) do
|
6
|
+
@resource = Resources.new
|
7
|
+
end
|
8
|
+
|
9
|
+
describe "#resource_for" do
|
10
|
+
it "loads the requested resource from disk only once" do
|
11
|
+
# note that it should convert the string "de" into a symbol
|
12
|
+
mock(@resource).data_for(:de, "racehorse").once { "german racehorse resource" }
|
13
|
+
|
14
|
+
# do it twice - the second one shouldn't call data_for
|
15
|
+
@resource.resource_for("de", "racehorse").should == "german racehorse resource"
|
16
|
+
@resource.resource_for("de", "racehorse").should == "german racehorse resource"
|
17
|
+
end
|
18
|
+
end
|
19
|
+
|
20
|
+
describe "#data_for" do
|
21
|
+
it "loads the correct file for the given locale and resource" do
|
22
|
+
mock(YAML).load("data") { { "key" => "value" } }
|
23
|
+
mock(File).read(File.join(File.dirname(File.dirname(File.dirname(File.expand_path(__FILE__)))), "resources", "de", "racehorse.yml")) { "data" }
|
24
|
+
@resource.resource_for("de", "racehorse").should == { :key => "value" }
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
describe "#deep_symbolize_keys" do
|
29
|
+
it "should work with a regular hash" do
|
30
|
+
result = @resource.send(:deep_symbolize_keys, { "twitter" => "rocks", "my" => "socks" })
|
31
|
+
result.should == { :twitter => "rocks", :my => "socks"}
|
32
|
+
end
|
33
|
+
|
34
|
+
it "should work with nested hashes" do
|
35
|
+
result = @resource.send(:deep_symbolize_keys, { "twitter" => { "rocks" => "my socks" } })
|
36
|
+
result.should == { :twitter => { :rocks => "my socks" } }
|
37
|
+
end
|
38
|
+
|
39
|
+
it "should work with nested hashes and arrays" do
|
40
|
+
result = @resource.send(:deep_symbolize_keys, { "twitter" => { "rocks_my" => [{ "socks" => "and mind" }, { "hard" => "core" }] } })
|
41
|
+
result.should == { :twitter => { :rocks_my => [{ :socks => "and mind" }, { :hard => "core" }] } }
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
data/spec/spec_helper.rb
ADDED
@@ -0,0 +1,31 @@
|
|
1
|
+
require File.expand_path(File.join(File.dirname(__FILE__), %w[.. lib twitter_cldr]))
|
2
|
+
FIXTURE_DIR = File.expand_path(File.join(File.dirname(__FILE__), %w[fixtures]))
|
3
|
+
|
4
|
+
class FastGettext
|
5
|
+
class << self
|
6
|
+
def locale
|
7
|
+
@@locale || :en
|
8
|
+
end
|
9
|
+
|
10
|
+
def locale=(value)
|
11
|
+
@@locale = value
|
12
|
+
end
|
13
|
+
end
|
14
|
+
end
|
15
|
+
|
16
|
+
Spec::Runner.configure do |config|
|
17
|
+
config.mock_with :rr
|
18
|
+
|
19
|
+
config.before(:each) do
|
20
|
+
FastGettext.locale = :en
|
21
|
+
end
|
22
|
+
end
|
23
|
+
|
24
|
+
def check_token_list(got, expected)
|
25
|
+
got.size.should == expected.size
|
26
|
+
expected.each_with_index do |exp_hash, index|
|
27
|
+
exp_hash.each_pair do |exp_key, exp_val|
|
28
|
+
got[index].send(exp_key).should == exp_val
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
@@ -0,0 +1,134 @@
|
|
1
|
+
require File.join(File.dirname(File.dirname(__FILE__)), "spec_helper")
|
2
|
+
include TwitterCldr::Tokenizers
|
3
|
+
|
4
|
+
# normally, base can't be instantiated, so we have to patch it here
|
5
|
+
module TwitterCldr
|
6
|
+
module Tokenizers
|
7
|
+
class Base
|
8
|
+
def init_resources; end
|
9
|
+
end
|
10
|
+
end
|
11
|
+
end
|
12
|
+
|
13
|
+
describe Base do
|
14
|
+
before(:each) do
|
15
|
+
@base = Base.new # do NOT do this in production - must use subclass
|
16
|
+
end
|
17
|
+
|
18
|
+
it "creating a new base without a locale should default to English, with a locale should not" do
|
19
|
+
@base.locale.should == :en
|
20
|
+
Base.new(:locale => :de).locale.should == :de
|
21
|
+
end
|
22
|
+
|
23
|
+
# tokenize_pattern is supposed to take a pattern found in the YAML resource files and break it into placeholders and plaintext.
|
24
|
+
# Placeholders are delimited by single and double curly braces, plaintext is everything else.
|
25
|
+
describe "#tokenize_pattern" do
|
26
|
+
context "with double curlies (path names, essentially)" do
|
27
|
+
it "should work with a placeholder only" do
|
28
|
+
@base.send(:tokenize_pattern, "{{place}}").should == [{ :value => "{{place}}", :type => :placeholder }]
|
29
|
+
end
|
30
|
+
|
31
|
+
it "should work with two placeholders separated by a space" do
|
32
|
+
@base.send(:tokenize_pattern, "{{first}} {{second}}").should == [{ :value => "{{first}}", :type => :placeholder },
|
33
|
+
{ :value => " ", :type => :plaintext },
|
34
|
+
{ :value => "{{second}}", :type => :placeholder }]
|
35
|
+
end
|
36
|
+
|
37
|
+
it "should work when surrounded by plaintext" do
|
38
|
+
@base.send(:tokenize_pattern, "being {{totally}} awesome").should == [{ :value => "being ", :type => :plaintext },
|
39
|
+
{ :value => "{{totally}}", :type => :placeholder },
|
40
|
+
{ :value => " awesome", :type => :plaintext }]
|
41
|
+
end
|
42
|
+
end
|
43
|
+
|
44
|
+
context "with single curlies (indexes)" do
|
45
|
+
it "should work with a placeholder only" do
|
46
|
+
@base.send(:tokenize_pattern, "{1}").should == [{ :value => "{1}", :type => :placeholder }]
|
47
|
+
end
|
48
|
+
|
49
|
+
it "should work with two placeholders separated by a space" do
|
50
|
+
@base.send(:tokenize_pattern, "{0} {1}").should == [{ :value => "{0}", :type => :placeholder },
|
51
|
+
{ :value => " ", :type => :plaintext },
|
52
|
+
{ :value => "{1}", :type => :placeholder }]
|
53
|
+
end
|
54
|
+
|
55
|
+
it "should work when surrounded by plaintext" do
|
56
|
+
@base.send(:tokenize_pattern, "only {1} dragon").should == [{ :value => "only ", :type => :plaintext },
|
57
|
+
{ :value => "{1}", :type => :placeholder },
|
58
|
+
{ :value => " dragon", :type => :plaintext }]
|
59
|
+
end
|
60
|
+
end
|
61
|
+
end
|
62
|
+
|
63
|
+
describe "#choose_placeholder" do
|
64
|
+
before(:each) do
|
65
|
+
@placeholders = [{ :name => "wallace", :object => "man" }, { :name => "gromit", :object => "dog" }]
|
66
|
+
end
|
67
|
+
|
68
|
+
it "should choose the correct named placeholder" do
|
69
|
+
@base.send(:choose_placeholder, "{{wallace}}", @placeholders).should == "man"
|
70
|
+
@base.send(:choose_placeholder, "{{gromit}}", @placeholders).should == "dog"
|
71
|
+
end
|
72
|
+
|
73
|
+
it "should choose the correct placeholder by array index" do
|
74
|
+
@base.send(:choose_placeholder, "{0}", @placeholders).should == "man"
|
75
|
+
@base.send(:choose_placeholder, "{1}", @placeholders).should == "dog"
|
76
|
+
end
|
77
|
+
end
|
78
|
+
|
79
|
+
describe "#expand_pattern" do
|
80
|
+
it "recursively calls expand_pattern if a symbol (keypath) is given" do
|
81
|
+
mock(@base).traverse(:'another.path') { "found_me" }
|
82
|
+
mock(@base).pattern_for("found_me") { "pattern_text" }
|
83
|
+
mock.proxy(@base).expand_pattern("pattern_text", :fake_type)
|
84
|
+
mock.proxy(@base).expand_pattern(:'another.path', :fake_type)
|
85
|
+
@base.send(:expand_pattern, :'another.path', :fake_type).should == [{ :value => "pattern_text", :type => :plaintext }]
|
86
|
+
end
|
87
|
+
|
88
|
+
it "expands placeholders as necessary" do
|
89
|
+
placeholder_obj = Object.new
|
90
|
+
mock(placeholder_obj).tokens(:type => :man) { ["token1", "token2"] }
|
91
|
+
@base.placeholders = [{ :name => "wallace", :object => placeholder_obj }]
|
92
|
+
@base.send(:expand_pattern, "{{wallace}} rules", :man).should == ["token1", "token2", { :type => :plaintext, :value => " rules" }]
|
93
|
+
end
|
94
|
+
|
95
|
+
it "doesn't choke if the placeholder can't be found" do
|
96
|
+
@base.placeholders = [{ :name => "gromit", :object => "dog" }]
|
97
|
+
@base.send(:expand_pattern, "{{wallace}} rules", :man).should == [{ :type => :plaintext, :value => " rules" }]
|
98
|
+
end
|
99
|
+
end
|
100
|
+
|
101
|
+
describe "#traverse" do
|
102
|
+
before(:each) do
|
103
|
+
@tree = { :admiral => { :captain => { :commander => { :lieutenant => "Found Me!" } } } }
|
104
|
+
end
|
105
|
+
|
106
|
+
it "should find the correct value in the hash" do
|
107
|
+
@base.send(:traverse, :'admiral.captain.commander.lieutenant', @tree).should == "Found Me!"
|
108
|
+
end
|
109
|
+
|
110
|
+
it "shouldn't choke if the path doesn't exist" do
|
111
|
+
@base.send(:traverse, :'admiral.captain.commander.lieutenant.ensign', @tree).should == nil
|
112
|
+
end
|
113
|
+
end
|
114
|
+
|
115
|
+
# Not to be confused with tokenize_pattern, which pulls out placeholders. Tokenize_format actually splits a completely
|
116
|
+
# expanded format string into whatever parts are defined by the subclass's token type and token splitter regexes.
|
117
|
+
describe "#tokenize_format" do
|
118
|
+
it "assigns the right token types to the tokens" do
|
119
|
+
stub(@base).token_splitter_regex { /([abc])/ }
|
120
|
+
stub(@base).token_type_regexes { [{ :type => :a, :regex => /a/ },
|
121
|
+
{ :type => :b, :regex => /b/ },
|
122
|
+
{ :type => :c, :regex => /c/ },
|
123
|
+
{ :type => :plaintext, :regex => // }] }
|
124
|
+
tokens = @base.send(:tokenize_format, "a b c")
|
125
|
+
tokens.size.should == 5
|
126
|
+
|
127
|
+
tokens[0].value.should == "a"; tokens[0].type.should == :a
|
128
|
+
tokens[1].value.should == " "; tokens[1].type.should == :plaintext
|
129
|
+
tokens[2].value.should == "b"; tokens[2].type.should == :b
|
130
|
+
tokens[3].value.should == " "; tokens[3].type.should == :plaintext
|
131
|
+
tokens[4].value.should == "c"; tokens[4].type.should == :c
|
132
|
+
end
|
133
|
+
end
|
134
|
+
end
|
@@ -0,0 +1,36 @@
|
|
1
|
+
require File.join(File.dirname(File.dirname(File.dirname(__FILE__))), "spec_helper")
|
2
|
+
include TwitterCldr::Tokenizers
|
3
|
+
|
4
|
+
describe DateTokenizer do
|
5
|
+
describe "#tokens" do
|
6
|
+
it "should tokenize plaintext segments correctly (i.e. Spanish)" do
|
7
|
+
tokenizer = DateTokenizer.new(:locale => :es)
|
8
|
+
got = tokenizer.tokens(:type => :full)
|
9
|
+
expected = [{ :value => "EEEE", :type => :pattern },
|
10
|
+
{ :value => " ", :type => :plaintext },
|
11
|
+
{ :value => "d", :type => :pattern },
|
12
|
+
{ :value => " ", :type => :plaintext },
|
13
|
+
{ :value => "'de'", :type => :plaintext },
|
14
|
+
{ :value => " ", :type => :plaintext },
|
15
|
+
{ :value => "MMMM", :type => :pattern },
|
16
|
+
{ :value => " ", :type => :plaintext },
|
17
|
+
{ :value => "'de'", :type => :plaintext },
|
18
|
+
{ :value => " ", :type => :plaintext },
|
19
|
+
{ :value => "y", :type => :pattern }]
|
20
|
+
check_token_list(got, expected)
|
21
|
+
end
|
22
|
+
|
23
|
+
it "should tokenize patterns with non-latin characters correctly (i.e. Japanese)" do
|
24
|
+
tokenizer = DateTokenizer.new(:locale => :ja)
|
25
|
+
got = tokenizer.tokens(:type => :full)
|
26
|
+
expected = [{ :value => "y", :type => :pattern },
|
27
|
+
{ :value => "年", :type => :plaintext },
|
28
|
+
{ :value => "M", :type => :pattern },
|
29
|
+
{ :value => "月", :type => :plaintext },
|
30
|
+
{ :value => "d", :type => :pattern },
|
31
|
+
{ :value => "日", :type => :plaintext },
|
32
|
+
{ :value => "EEEE", :type => :pattern }]
|
33
|
+
check_token_list(got, expected)
|
34
|
+
end
|
35
|
+
end
|
36
|
+
end
|
@@ -0,0 +1,52 @@
|
|
1
|
+
require File.join(File.dirname(File.dirname(File.dirname(__FILE__))), "spec_helper")
|
2
|
+
include TwitterCldr::Tokenizers
|
3
|
+
|
4
|
+
describe DateTimeTokenizer do
|
5
|
+
describe "#initialize" do
|
6
|
+
it "chooses gregorian as the calendar type if none is specified" do
|
7
|
+
DateTimeTokenizer.new.calendar_type.should == :gregorian
|
8
|
+
DateTimeTokenizer.new(:calendar_type => :julian).calendar_type.should == :julian
|
9
|
+
end
|
10
|
+
|
11
|
+
it "initializes individual date and time placeholder tokenizers" do
|
12
|
+
placeholders = DateTimeTokenizer.new.placeholders
|
13
|
+
placeholders[0][:name].should == :date
|
14
|
+
placeholders[0][:object].should be_a(DateTokenizer)
|
15
|
+
placeholders[1][:name].should == :time
|
16
|
+
placeholders[1][:object].should be_a(TimeTokenizer)
|
17
|
+
end
|
18
|
+
end
|
19
|
+
|
20
|
+
describe "#tokens" do
|
21
|
+
it "should choose the default date time path if no other type is specified" do
|
22
|
+
tokenizer = DateTimeTokenizer.new
|
23
|
+
mock.proxy(tokenizer.paths)[:default]
|
24
|
+
tokenizer.tokens
|
25
|
+
end
|
26
|
+
|
27
|
+
it "should expand date and time placeholders and return the correct list of tokens" do
|
28
|
+
tokenizer = DateTimeTokenizer.new(:locale => :es)
|
29
|
+
got = tokenizer.tokens(:type => :full)
|
30
|
+
expected = [{ :value => "HH", :type => :pattern },
|
31
|
+
{ :value => ":", :type => :plaintext },
|
32
|
+
{ :value => "mm", :type => :pattern },
|
33
|
+
{ :value => ":", :type => :plaintext },
|
34
|
+
{ :value => "ss", :type => :pattern },
|
35
|
+
{ :value => " ", :type => :plaintext },
|
36
|
+
{ :value => "zzzz", :type => :pattern },
|
37
|
+
{ :value => " ", :type => :plaintext },
|
38
|
+
{ :value => "EEEE", :type => :pattern },
|
39
|
+
{ :value => " ", :type => :plaintext },
|
40
|
+
{ :value => "d", :type => :pattern },
|
41
|
+
{ :value => " ", :type => :plaintext },
|
42
|
+
{ :value => "'de'", :type => :plaintext },
|
43
|
+
{ :value => " ", :type => :plaintext },
|
44
|
+
{ :value => "MMMM", :type => :pattern },
|
45
|
+
{ :value => " ", :type => :plaintext },
|
46
|
+
{ :value => "'de'", :type => :plaintext },
|
47
|
+
{ :value => " ", :type => :plaintext },
|
48
|
+
{ :value => "y", :type => :pattern }]
|
49
|
+
check_token_list(got, expected)
|
50
|
+
end
|
51
|
+
end
|
52
|
+
end
|
@@ -0,0 +1,34 @@
|
|
1
|
+
require File.join(File.dirname(File.dirname(File.dirname(__FILE__))), "spec_helper")
|
2
|
+
include TwitterCldr::Tokenizers
|
3
|
+
|
4
|
+
describe DateTokenizer do
|
5
|
+
describe "#tokens" do
|
6
|
+
it "should tokenize a time string correctly (i.e. German)" do
|
7
|
+
tokenizer = TimeTokenizer.new(:locale => :de)
|
8
|
+
got = tokenizer.tokens(:type => :full)
|
9
|
+
expected = [{ :value => "HH", :type => :pattern },
|
10
|
+
{ :value => ":", :type => :plaintext },
|
11
|
+
{ :value => "mm", :type => :pattern },
|
12
|
+
{ :value => ":", :type => :plaintext },
|
13
|
+
{ :value => "ss", :type => :pattern },
|
14
|
+
{ :value => " ", :type => :plaintext },
|
15
|
+
{ :value => "zzzz", :type => :pattern }]
|
16
|
+
check_token_list(got, expected)
|
17
|
+
end
|
18
|
+
|
19
|
+
it "should tokenize patterns with non-latin characters correctly (i.e. Korean)" do
|
20
|
+
tokenizer = TimeTokenizer.new(:locale => :ko)
|
21
|
+
got = tokenizer.tokens(:type => :full)
|
22
|
+
expected = [{ :value => "a", :type => :pattern },
|
23
|
+
{ :value => " ", :type => :plaintext },
|
24
|
+
{ :value => "hh", :type => :pattern },
|
25
|
+
{ :value => "시 ", :type => :plaintext },
|
26
|
+
{ :value => "mm", :type => :pattern },
|
27
|
+
{ :value => "분 ", :type => :plaintext },
|
28
|
+
{ :value => "ss", :type => :pattern },
|
29
|
+
{ :value => "초 ", :type => :plaintext },
|
30
|
+
{ :value => "zzzz", :type => :pattern }]
|
31
|
+
check_token_list(got, expected)
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
@@ -0,0 +1,44 @@
|
|
1
|
+
require File.join(File.dirname(File.dirname(__FILE__)), "spec_helper")
|
2
|
+
include TwitterCldr::Tokenizers
|
3
|
+
|
4
|
+
describe KeyPath do
|
5
|
+
describe "#dirname" do
|
6
|
+
it "should strip off the last element" do
|
7
|
+
KeyPath.dirname("castle.in.the.sky").should == "castle.in.the"
|
8
|
+
end
|
9
|
+
|
10
|
+
it "shouldn't choke if given an empty string" do
|
11
|
+
KeyPath.dirname("").should == ""
|
12
|
+
end
|
13
|
+
end
|
14
|
+
|
15
|
+
describe "#join" do
|
16
|
+
it "joins two args with two dots" do
|
17
|
+
KeyPath.join("seahawks.", ".rule").should == "seahawks.rule"
|
18
|
+
end
|
19
|
+
|
20
|
+
it "joins two args with one dot at the end of the first" do
|
21
|
+
KeyPath.join("seahawks.", "rule").should == "seahawks.rule"
|
22
|
+
end
|
23
|
+
|
24
|
+
it "joins two args with one dot at the beginning of the second" do
|
25
|
+
KeyPath.join("seahawks", ".rule").should == "seahawks.rule"
|
26
|
+
end
|
27
|
+
|
28
|
+
it "joins two args with no dots" do
|
29
|
+
KeyPath.join("seahawks", "rule").should == "seahawks.rule"
|
30
|
+
end
|
31
|
+
end
|
32
|
+
|
33
|
+
describe "#split_path" do
|
34
|
+
it "should split the path by dots" do
|
35
|
+
KeyPath.split_path("rain.in.spain").should == ["rain", "in", "spain"]
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
describe "#join_path" do
|
40
|
+
it "should join the path with dots" do
|
41
|
+
KeyPath.join_path(["rain", "in", "spain"]).should == "rain.in.spain"
|
42
|
+
end
|
43
|
+
end
|
44
|
+
end
|
@@ -0,0 +1,60 @@
|
|
1
|
+
require File.join(File.dirname(File.dirname(File.dirname(__FILE__))), "spec_helper")
|
2
|
+
include TwitterCldr::Tokenizers
|
3
|
+
|
4
|
+
describe NumberTokenizer do
|
5
|
+
describe "#initialize" do
|
6
|
+
it "chooses decimal as the default type if no other type is specified" do
|
7
|
+
NumberTokenizer.new.type.should == :decimal
|
8
|
+
NumberTokenizer.new(:type => :percent).type.should == :percent
|
9
|
+
end
|
10
|
+
end
|
11
|
+
|
12
|
+
describe "#full_path_for" do
|
13
|
+
it "should fill in the type and return the full path to the requested format" do
|
14
|
+
NumberTokenizer.new.send(:full_path_for, :default).should == "numbers.formats.decimal.patterns.default"
|
15
|
+
end
|
16
|
+
end
|
17
|
+
|
18
|
+
describe "#tokens" do
|
19
|
+
it "ensures that positive and negative entries are created (as necessary)" do
|
20
|
+
tokenizer = NumberTokenizer.new(:locale => :tr)
|
21
|
+
tokenizer.tokens
|
22
|
+
root = tokenizer.resource[:numbers][:formats][:decimal][:patterns]
|
23
|
+
root.should include(:positive)
|
24
|
+
root.should include(:negative)
|
25
|
+
end
|
26
|
+
|
27
|
+
it "gets tokens for a latin language (i.e. Portuguese)" do
|
28
|
+
tokenizer = NumberTokenizer.new(:locale => :pt)
|
29
|
+
got = tokenizer.tokens
|
30
|
+
expected = [{ :value => "", :type => :pattern },
|
31
|
+
{ :value => "#,##0.###", :type => :pattern }]
|
32
|
+
check_token_list(got, expected)
|
33
|
+
end
|
34
|
+
|
35
|
+
it "gets tokens for a non-latin language (i.e. Russian)" do
|
36
|
+
tokenizer = NumberTokenizer.new(:locale => :ru)
|
37
|
+
got = tokenizer.tokens
|
38
|
+
expected = [{ :value => "", :type => :pattern },
|
39
|
+
{ :value => "#,##0.###", :type => :pattern }]
|
40
|
+
check_token_list(got, expected)
|
41
|
+
end
|
42
|
+
|
43
|
+
it "correctly parses suffixes (i.e. Russian currency)" do
|
44
|
+
tokenizer = NumberTokenizer.new(:locale => :ru, :type => :currency)
|
45
|
+
got = tokenizer.tokens
|
46
|
+
expected = [{ :value => "", :type => :pattern },
|
47
|
+
{ :value => "#,##0.00", :type => :pattern },
|
48
|
+
{ :value => " ¤", :type => :pattern }]
|
49
|
+
check_token_list(got, expected)
|
50
|
+
end
|
51
|
+
|
52
|
+
it "correctly parses prefixes (i.e. Italian currency)" do
|
53
|
+
tokenizer = NumberTokenizer.new(:locale => :it, :type => :currency)
|
54
|
+
got = tokenizer.tokens
|
55
|
+
expected = [{ :value => "¤ ", :type => :pattern },
|
56
|
+
{ :value => "#,##0.00", :type => :pattern }]
|
57
|
+
check_token_list(got, expected)
|
58
|
+
end
|
59
|
+
end
|
60
|
+
end
|
@@ -0,0 +1,18 @@
|
|
1
|
+
require File.join(File.dirname(File.dirname(__FILE__)), "spec_helper")
|
2
|
+
include TwitterCldr::Tokenizers
|
3
|
+
|
4
|
+
describe Token do
|
5
|
+
describe "#initialize" do
|
6
|
+
it "should set instance variables passed in the options hash" do
|
7
|
+
token = Token.new(:type => "my_type", :value => "my_value")
|
8
|
+
token.type.should == "my_type"
|
9
|
+
token.value.should == "my_value"
|
10
|
+
end
|
11
|
+
end
|
12
|
+
|
13
|
+
describe "#to_s" do
|
14
|
+
it "should return the token's value" do
|
15
|
+
Token.new(:value => "my_value").to_s.should == "my_value"
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|