mack-localization 0.8.1 → 0.8.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,169 @@
1
+ require File.join(File.dirname(__FILE__), "handlers", "utf8_handler")
2
+ require File.join(File.dirname(__FILE__), "handlers", "passthru_handler")
3
+
4
+ # Encapsulates all the functionality related to the Chars proxy.
5
+ module ActiveSupport::Multibyte
6
+ # Chars enables you to work transparently with multibyte encodings in the Ruby String class without having extensive
7
+ # knowledge about the encoding. A Chars object accepts a string upon initialization and proxies String methods in an
8
+ # encoding safe manner. All the normal String methods are also implemented on the proxy.
9
+ #
10
+ # String methods are proxied through the Chars object, and can be accessed through the +chars+ method. Methods
11
+ # which would normally return a String object now return a Chars object so methods can be chained.
12
+ #
13
+ # "The Perfect String ".chars.downcase.strip.normalize #=> "the perfect string"
14
+ #
15
+ # Chars objects are perfectly interchangeable with String objects as long as no explicit class checks are made.
16
+ # If certain methods do explicitly check the class, call +to_s+ before you pass chars objects to them.
17
+ #
18
+ # bad.explicit_checking_method "T".chars.downcase.to_s
19
+ #
20
+ # The actual operations on the string are delegated to handlers. Theoretically handlers can be implemented for
21
+ # any encoding, but the default handler handles UTF-8. This handler is set during initialization, if you want to
22
+ # use you own handler, you can set it on the Chars class. Look at the UTF8Handler source for an example how to
23
+ # implement your own handler. If you your own handler to work on anything but UTF-8 you probably also
24
+ # want to override Chars#handler.
25
+ #
26
+ # ActiveSupport::Multibyte::Chars.handler = MyHandler
27
+ #
28
+ # Note that a few methods are defined on Chars instead of the handler because they are defined on Object or Kernel
29
+ # and method_missing can't catch them.
30
+ class Chars
31
+
32
+ attr_reader :string # The contained string
33
+ alias_method :to_s, :string
34
+
35
+ include Comparable
36
+
37
+ # The magic method to make String and Chars comparable
38
+ def to_str
39
+ # Using any other ways of overriding the String itself will lead you all the way from infinite loops to
40
+ # core dumps. Don't go there.
41
+ @string
42
+ end
43
+
44
+ # Makes unicode string look like a string in the console
45
+ def inspect
46
+ @string.inspect
47
+ end
48
+
49
+ def is_a?(type)
50
+ if type == String
51
+ true
52
+ else
53
+ super
54
+ end
55
+ end
56
+
57
+ # Fix [] for single numbers
58
+ def [](num)
59
+ if num.is_a?(Fixnum)
60
+ self[num..num]
61
+ else
62
+ super
63
+ end
64
+ end
65
+
66
+ # Create a new Chars instance.
67
+ def initialize(str)
68
+ @string = (str.string rescue str)
69
+ end
70
+
71
+ def each &block
72
+ split(//).each(&block)
73
+ end
74
+
75
+ def inject &block
76
+ split(//).inject(&block)
77
+ end
78
+
79
+ def collect &block
80
+ split(//).collect(&block)
81
+ end
82
+
83
+ alias_method :map, :collect
84
+
85
+ def to_a
86
+ split(//)
87
+ end
88
+
89
+ # Returns -1, 0 or +1 depending on whether the Chars object is to be sorted before, equal or after the
90
+ # object on the right side of the operation. It accepts any object that implements +to_s+. See String.<=>
91
+ # for more details.
92
+ def <=>(other); @string <=> other.to_s; end
93
+
94
+ # Works just like String#split, with the exception that the items in the resulting list are Chars
95
+ # instances instead of String. This makes chaining methods easier.
96
+ def split(*args)
97
+ @string.split(*args).map { |i| i.chars }
98
+ end
99
+
100
+ # Gsub works exactly the same as gsub on a normal string.
101
+ def gsub(*a, &b); @string.gsub(*a, &b).chars; end
102
+
103
+ # Like String.=~ only it returns the character offset (in codepoints) instead of the byte offset.
104
+ def =~(other)
105
+ handler.translate_offset(@string, @string =~ other)
106
+ end
107
+
108
+ # Try to forward all undefined methods to the handler, when a method is not defined on the handler, send it to
109
+ # the contained string. Method_missing is also responsible for making the bang! methods destructive.
110
+ def method_missing(m, *a, &b)
111
+ begin
112
+ # Simulate methods with a ! at the end because we can't touch the enclosed string from the handlers.
113
+ if m.to_s =~ /^(.*)\!$/
114
+ result = handler.send($1, @string, *a, &b)
115
+ if result == @string
116
+ result = nil
117
+ else
118
+ @string.replace result
119
+ end
120
+ else
121
+ result = handler.send(m, @string, *a, &b)
122
+ end
123
+ rescue NoMethodError
124
+ result = @string.send(m, *a, &b)
125
+ rescue Handlers::EncodingError
126
+ @string.replace handler.tidy_bytes(@string)
127
+ retry
128
+ end
129
+
130
+ if result.kind_of?(String)
131
+ result.chars
132
+ else
133
+ result
134
+ end
135
+ end
136
+
137
+ # Set the handler class for the Char objects.
138
+ def self.handler=(klass)
139
+ @@handler = klass
140
+ end
141
+
142
+ # Returns the proper handler for the contained string depending on $KCODE and the encoding of the string. This
143
+ # method is used internally to always redirect messages to the proper classes depending on the context.
144
+ def handler
145
+ if utf8_pragma?
146
+ @@handler
147
+ else
148
+ ActiveSupport::Multibyte::Handlers::PassthruHandler
149
+ end
150
+ end
151
+
152
+ private
153
+
154
+ # +utf8_pragma+ checks if it can send this string to the handlers. It makes sure @string isn't nil and $KCODE is
155
+ # set to 'UTF8'.
156
+ def utf8_pragma?
157
+ !@string.nil? && ($KCODE == 'UTF8')
158
+ end
159
+ end
160
+ end
161
+
162
+ # When we can load the utf8proc library, override normalization with the faster methods
163
+ begin
164
+ require 'utf8proc_native'
165
+ require File.join(File.dirname(__FILE__), "handlers", "utf8_handler_proc")
166
+ ActiveSupport::Multibyte::Chars.handler = ActiveSupport::Multibyte::Handlers::UTF8HandlerProc
167
+ rescue LoadError
168
+ ActiveSupport::Multibyte::Chars.handler = ActiveSupport::Multibyte::Handlers::UTF8Handler
169
+ end
@@ -0,0 +1,149 @@
1
+ #!/usr/bin/env ruby
2
+ #begin
3
+ # require File.dirname(__FILE__) + '/../../../active_support'
4
+ #rescue IOError
5
+ #end
6
+ require 'open-uri'
7
+ require 'tmpdir'
8
+
9
+ module ActiveSupport::Multibyte::Handlers #:nodoc:
10
+ class UnicodeDatabase #:nodoc:
11
+ def self.load
12
+ [Hash.new(Codepoint.new),[],{},{}]
13
+ end
14
+ end
15
+
16
+ class UnicodeTableGenerator #:nodoc:
17
+ BASE_URI = "http://www.unicode.org/Public/#{ActiveSupport::Multibyte::UNICODE_VERSION}/ucd/"
18
+ SOURCES = {
19
+ :codepoints => BASE_URI + 'UnicodeData.txt',
20
+ :composition_exclusion => BASE_URI + 'CompositionExclusions.txt',
21
+ :grapheme_break_property => BASE_URI + 'auxiliary/GraphemeBreakProperty.txt',
22
+ :cp1252 => 'http://unicode.org/Public/MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1252.TXT'
23
+ }
24
+
25
+ def initialize
26
+ @ucd = UnicodeDatabase.new
27
+
28
+ default = Codepoint.new
29
+ default.combining_class = 0
30
+ default.uppercase_mapping = 0
31
+ default.lowercase_mapping = 0
32
+ @ucd.codepoints = Hash.new(default)
33
+
34
+ @ucd.composition_exclusion = []
35
+ @ucd.composition_map = {}
36
+ @ucd.boundary = {}
37
+ @ucd.cp1252 = {}
38
+ end
39
+
40
+ def parse_codepoints(line)
41
+ codepoint = Codepoint.new
42
+ raise "Could not parse input." unless line =~ /^
43
+ ([0-9A-F]+); # code
44
+ ([^;]+); # name
45
+ ([A-Z]+); # general category
46
+ ([0-9]+); # canonical combining class
47
+ ([A-Z]+); # bidi class
48
+ (<([A-Z]*)>)? # decomposition type
49
+ ((\ ?[0-9A-F]+)*); # decompomposition mapping
50
+ ([0-9]*); # decimal digit
51
+ ([0-9]*); # digit
52
+ ([^;]*); # numeric
53
+ ([YN]*); # bidi mirrored
54
+ ([^;]*); # unicode 1.0 name
55
+ ([^;]*); # iso comment
56
+ ([0-9A-F]*); # simple uppercase mapping
57
+ ([0-9A-F]*); # simple lowercase mapping
58
+ ([0-9A-F]*)$/ix # simple titlecase mapping
59
+ codepoint.code = $1.hex
60
+ #codepoint.name = $2
61
+ #codepoint.category = $3
62
+ codepoint.combining_class = Integer($4)
63
+ #codepoint.bidi_class = $5
64
+ codepoint.decomp_type = $7
65
+ codepoint.decomp_mapping = ($8=='') ? nil : $8.split.collect { |element| element.hex }
66
+ #codepoint.bidi_mirrored = ($13=='Y') ? true : false
67
+ codepoint.uppercase_mapping = ($16=='') ? 0 : $16.hex
68
+ codepoint.lowercase_mapping = ($17=='') ? 0 : $17.hex
69
+ #codepoint.titlecase_mapping = ($18=='') ? nil : $18.hex
70
+ @ucd.codepoints[codepoint.code] = codepoint
71
+ end
72
+
73
+ def parse_grapheme_break_property(line)
74
+ if line =~ /^([0-9A-F\.]+)\s*;\s*([\w]+)\s*#/
75
+ type = $2.downcase.intern
76
+ @ucd.boundary[type] ||= []
77
+ if $1.include? '..'
78
+ parts = $1.split '..'
79
+ @ucd.boundary[type] << (parts[0].hex..parts[1].hex)
80
+ else
81
+ @ucd.boundary[type] << $1.hex
82
+ end
83
+ end
84
+ end
85
+
86
+ def parse_composition_exclusion(line)
87
+ if line =~ /^([0-9A-F]+)/i
88
+ @ucd.composition_exclusion << $1.hex
89
+ end
90
+ end
91
+
92
+ def parse_cp1252(line)
93
+ if line =~ /^([0-9A-Fx]+)\s([0-9A-Fx]+)/i
94
+ @ucd.cp1252[$1.hex] = $2.hex
95
+ end
96
+ end
97
+
98
+ def create_composition_map
99
+ @ucd.codepoints.each do |_, cp|
100
+ if !cp.nil? and cp.combining_class == 0 and cp.decomp_type.nil? and !cp.decomp_mapping.nil? and cp.decomp_mapping.length == 2 and @ucd[cp.decomp_mapping[0]].combining_class == 0 and !@ucd.composition_exclusion.include?(cp.code)
101
+ @ucd.composition_map[cp.decomp_mapping[0]] ||= {}
102
+ @ucd.composition_map[cp.decomp_mapping[0]][cp.decomp_mapping[1]] = cp.code
103
+ end
104
+ end
105
+ end
106
+
107
+ def normalize_boundary_map
108
+ @ucd.boundary.each do |k,v|
109
+ if [:lf, :cr].include? k
110
+ @ucd.boundary[k] = v[0]
111
+ end
112
+ end
113
+ end
114
+
115
+ def parse
116
+ SOURCES.each do |type, url|
117
+ filename = File.join(Dir.tmpdir, "#{url.split('/').last}")
118
+ unless File.exist?(filename)
119
+ $stderr.puts "Downloading #{url.split('/').last}"
120
+ File.open(filename, 'wb') do |target|
121
+ open(url) do |source|
122
+ source.each_line { |line| target.write line }
123
+ end
124
+ end
125
+ end
126
+ File.open(filename) do |file|
127
+ file.each_line { |line| send "parse_#{type}".intern, line }
128
+ end
129
+ end
130
+ create_composition_map
131
+ normalize_boundary_map
132
+ end
133
+
134
+ def dump_to(filename)
135
+ File.open(filename, 'wb') do |f|
136
+ f.write Marshal.dump([@ucd.codepoints, @ucd.composition_exclusion, @ucd.composition_map, @ucd.boundary, @ucd.cp1252])
137
+ end
138
+ end
139
+ end
140
+ end
141
+
142
+ if __FILE__ == $0
143
+ filename = ActiveSupport::Multibyte::Handlers::UnicodeDatabase.filename
144
+ generator = ActiveSupport::Multibyte::Handlers::UnicodeTableGenerator.new
145
+ generator.parse
146
+ print "Writing to: #{filename}"
147
+ generator.dump_to filename
148
+ puts " (#{File.size(filename)} bytes)"
149
+ end
@@ -0,0 +1,9 @@
1
+ # Chars uses this handler when $KCODE is not set to 'UTF8'. Because this handler doesn't define any methods all call
2
+ # will be forwarded to String.
3
+ class ActiveSupport::Multibyte::Handlers::PassthruHandler
4
+
5
+ # Return the original byteoffset
6
+ def self.translate_offset(string, byte_offset) #:nodoc:
7
+ byte_offset
8
+ end
9
+ end
@@ -0,0 +1,442 @@
1
+ # Contains all the handlers and helper classes
2
+ module ActiveSupport::Multibyte::Handlers
3
+ class EncodingError < ArgumentError; end
4
+
5
+ class Codepoint #:nodoc:
6
+ attr_accessor :code, :combining_class, :decomp_type, :decomp_mapping, :uppercase_mapping, :lowercase_mapping
7
+ end
8
+
9
+ class UnicodeDatabase #:nodoc:
10
+ attr_accessor :codepoints, :composition_exclusion, :composition_map, :boundary, :cp1252
11
+
12
+ # Creates a new UnicodeDatabase instance and loads the database.
13
+ def initialize
14
+ begin
15
+ @codepoints, @composition_exclusion, @composition_map, @boundary, @cp1252 = self.class.load
16
+ rescue Exception => e
17
+ raise IOError.new("Couldn't load the unicode tables for UTF8Handler (#{e.message}), handler is unusable")
18
+ end
19
+ @codepoints ||= Hash.new(Codepoint.new)
20
+ @composition_exclusion ||= []
21
+ @composition_map ||= {}
22
+ @boundary ||= {}
23
+ @cp1252 ||= {}
24
+
25
+ # Redefine the === method so we can write shorter rules for grapheme cluster breaks
26
+ @boundary.each do |k,_|
27
+ @boundary[k].instance_eval do
28
+ def ===(other)
29
+ detect { |i| i === other } ? true : false
30
+ end
31
+ end if @boundary[k].kind_of?(Array)
32
+ end
33
+ end
34
+
35
+ # Shortcut to ucd.codepoints[]
36
+ def [](index); @codepoints[index]; end
37
+
38
+ # Returns the directory in which the data files are stored
39
+ def self.dirname
40
+ File.dirname(__FILE__) + '/../../values/'
41
+ end
42
+
43
+ # Returns the filename for the data file for this version
44
+ def self.filename
45
+ File.expand_path File.join(dirname, "unicode_tables.dat")
46
+ end
47
+
48
+ # Loads the unicode database and returns all the internal objects of UnicodeDatabase
49
+ def self.load
50
+ File.open(self.filename, 'rb') { |f| Marshal.load f.read }
51
+ end
52
+ end
53
+
54
+ # UTF8Handler implements Unicode aware operations for strings, these operations will be used by the Chars
55
+ # proxy when $KCODE is set to 'UTF8'.
56
+ class UTF8Handler
57
+ # Hangul character boundaries and properties
58
+ HANGUL_SBASE = 0xAC00
59
+ HANGUL_LBASE = 0x1100
60
+ HANGUL_VBASE = 0x1161
61
+ HANGUL_TBASE = 0x11A7
62
+ HANGUL_LCOUNT = 19
63
+ HANGUL_VCOUNT = 21
64
+ HANGUL_TCOUNT = 28
65
+ HANGUL_NCOUNT = HANGUL_VCOUNT * HANGUL_TCOUNT
66
+ HANGUL_SCOUNT = 11172
67
+ HANGUL_SLAST = HANGUL_SBASE + HANGUL_SCOUNT
68
+ HANGUL_JAMO_FIRST = 0x1100
69
+ HANGUL_JAMO_LAST = 0x11FF
70
+
71
+ # All the unicode whitespace
72
+ UNICODE_WHITESPACE = [
73
+ (0x0009..0x000D).to_a, # White_Space # Cc [5] <control-0009>..<control-000D>
74
+ 0x0020, # White_Space # Zs SPACE
75
+ 0x0085, # White_Space # Cc <control-0085>
76
+ 0x00A0, # White_Space # Zs NO-BREAK SPACE
77
+ 0x1680, # White_Space # Zs OGHAM SPACE MARK
78
+ 0x180E, # White_Space # Zs MONGOLIAN VOWEL SEPARATOR
79
+ (0x2000..0x200A).to_a, # White_Space # Zs [11] EN QUAD..HAIR SPACE
80
+ 0x2028, # White_Space # Zl LINE SEPARATOR
81
+ 0x2029, # White_Space # Zp PARAGRAPH SEPARATOR
82
+ 0x202F, # White_Space # Zs NARROW NO-BREAK SPACE
83
+ 0x205F, # White_Space # Zs MEDIUM MATHEMATICAL SPACE
84
+ 0x3000, # White_Space # Zs IDEOGRAPHIC SPACE
85
+ ].flatten.freeze
86
+
87
+ # BOM (byte order mark) can also be seen as whitespace, it's a non-rendering character used to distinguish
88
+ # between little and big endian. This is not an issue in utf-8, so it must be ignored.
89
+ UNICODE_LEADERS_AND_TRAILERS = UNICODE_WHITESPACE + [65279] # ZERO-WIDTH NO-BREAK SPACE aka BOM
90
+
91
+ # Borrowed from the Kconv library by Shinji KONO - (also as seen on the W3C site)
92
+ UTF8_PAT = /\A(?:
93
+ [\x00-\x7f] |
94
+ [\xc2-\xdf] [\x80-\xbf] |
95
+ \xe0 [\xa0-\xbf] [\x80-\xbf] |
96
+ [\xe1-\xef] [\x80-\xbf] [\x80-\xbf] |
97
+ \xf0 [\x90-\xbf] [\x80-\xbf] [\x80-\xbf] |
98
+ [\xf1-\xf3] [\x80-\xbf] [\x80-\xbf] [\x80-\xbf] |
99
+ \xf4 [\x80-\x8f] [\x80-\xbf] [\x80-\xbf]
100
+ )*\z/xn
101
+
102
+ # Returns a regular expression pattern that matches the passed Unicode codepoints
103
+ def self.codepoints_to_pattern(array_of_codepoints) #:nodoc:
104
+ array_of_codepoints.collect{ |e| [e].pack 'U*' }.join('|')
105
+ end
106
+ UNICODE_TRAILERS_PAT = /(#{codepoints_to_pattern(UNICODE_LEADERS_AND_TRAILERS)})+\Z/
107
+ UNICODE_LEADERS_PAT = /\A(#{codepoints_to_pattern(UNICODE_LEADERS_AND_TRAILERS)})+/
108
+
109
+ class << self
110
+
111
+ # ///
112
+ # /// BEGIN String method overrides
113
+ # ///
114
+
115
+ # Inserts the passed string at specified codepoint offsets
116
+ def insert(str, offset, fragment)
117
+ str.replace(
118
+ u_unpack(str).insert(
119
+ offset,
120
+ u_unpack(fragment)
121
+ ).flatten.pack('U*')
122
+ )
123
+ end
124
+
125
+ # Returns the position of the passed argument in the string, counting in codepoints
126
+ def index(str, *args)
127
+ bidx = str.index(*args)
128
+ bidx ? (u_unpack(str.slice(0...bidx)).size) : nil
129
+ end
130
+
131
+ # Does Unicode-aware rstrip
132
+ def rstrip(str)
133
+ str.gsub(UNICODE_TRAILERS_PAT, '')
134
+ end
135
+
136
+ # Does Unicode-aware lstrip
137
+ def lstrip(str)
138
+ str.gsub(UNICODE_LEADERS_PAT, '')
139
+ end
140
+
141
+ # Removed leading and trailing whitespace
142
+ def strip(str)
143
+ str.gsub(UNICODE_LEADERS_PAT, '').gsub(UNICODE_TRAILERS_PAT, '')
144
+ end
145
+
146
+ # Returns the number of codepoints in the string
147
+ def size(str)
148
+ u_unpack(str).size
149
+ end
150
+ alias_method :length, :size
151
+
152
+ # Reverses codepoints in the string.
153
+ def reverse(str)
154
+ u_unpack(str).reverse.pack('U*')
155
+ end
156
+
157
+ # Implements Unicode-aware slice with codepoints. Slicing on one point returns the codepoints for that
158
+ # character.
159
+ def slice(str, *args)
160
+ if (args.size == 2 && args.first.is_a?(Range))
161
+ raise TypeError, 'cannot convert Range into Integer' # Do as if we were native
162
+ elsif args[0].kind_of? Range
163
+ cps = u_unpack(str).slice(*args)
164
+ cps.nil? ? nil : cps.pack('U*')
165
+ elsif args.size == 1 && args[0].kind_of?(Numeric)
166
+ u_unpack(str)[args[0]]
167
+ else
168
+ u_unpack(str).slice(*args).pack('U*')
169
+ end
170
+ end
171
+ alias_method :[], :slice
172
+
173
+ # Convert characters in the string to uppercase
174
+ def upcase(str); to_case :uppercase_mapping, str; end
175
+
176
+ # Convert characters in the string to lowercase
177
+ def downcase(str); to_case :lowercase_mapping, str; end
178
+
179
+ # Returns a copy of +str+ with the first character converted to uppercase and the remainder to lowercase
180
+ def capitalize(str)
181
+ upcase(slice(str, 0..0)) + downcase(slice(str, 1..-1) || '')
182
+ end
183
+
184
+ # ///
185
+ # /// Extra String methods for unicode operations
186
+ # ///
187
+
188
+ # Returns the KC normalization of the string by default. NFKC is considered the best normalization form for
189
+ # passing strings to databases and validations.
190
+ #
191
+ # * <tt>str</tt>: The string to perform normalization on.
192
+ # * <tt>form</tt>: The form you want to normalize in. Should be one of the following: :c, :kc, :d or :kd.
193
+ def normalize(str, form=ActiveSupport::Multibyte::DEFAULT_NORMALIZATION_FORM)
194
+ # See http://www.unicode.org/reports/tr15, Table 1
195
+ codepoints = u_unpack(str)
196
+ case form
197
+ when :d
198
+ reorder_characters(decompose_codepoints(:canonical, codepoints))
199
+ when :c
200
+ compose_codepoints reorder_characters(decompose_codepoints(:canonical, codepoints))
201
+ when :kd
202
+ reorder_characters(decompose_codepoints(:compatability, codepoints))
203
+ when :kc
204
+ compose_codepoints reorder_characters(decompose_codepoints(:compatability, codepoints))
205
+ else
206
+ raise ArgumentError, "#{form} is not a valid normalization variant", caller
207
+ end.pack('U*')
208
+ end
209
+
210
+ # Perform decomposition on the characters in the string
211
+ def decompose(str)
212
+ decompose_codepoints(:canonical, u_unpack(str)).pack('U*')
213
+ end
214
+
215
+ # Perform composition on the characters in the string
216
+ def compose(str)
217
+ compose_codepoints u_unpack(str).pack('U*')
218
+ end
219
+
220
+ # ///
221
+ # /// BEGIN Helper methods for unicode operation
222
+ # ///
223
+
224
+ # Used to translate an offset from bytes to characters, for instance one received from a regular expression match
225
+ def translate_offset(str, byte_offset)
226
+ return 0 if str == ''
227
+ return nil if byte_offset.nil?
228
+ chunk = str[0..byte_offset]
229
+ begin
230
+ begin
231
+ chunk.unpack('U*').length - 1
232
+ rescue ArgumentError => e
233
+ chunk = str[0..(byte_offset+=1)]
234
+ # Stop retrying at the end of the string
235
+ raise e unless byte_offset < chunk.length
236
+ # We damaged a character, retry
237
+ retry
238
+ end
239
+ # Catch the ArgumentError so we can throw our own
240
+ rescue ArgumentError
241
+ raise EncodingError.new('malformed UTF-8 character')
242
+ end
243
+ end
244
+
245
+ # Checks if the string is valid UTF8.
246
+ def consumes?(str)
247
+ # Unpack is a little bit faster than regular expressions
248
+ begin
249
+ str.unpack('U*')
250
+ true
251
+ rescue ArgumentError
252
+ false
253
+ end
254
+ end
255
+
256
+ # Returns the number of grapheme clusters in the string. This method is very likely to be moved or renamed
257
+ # in future versions.
258
+ def g_length(str)
259
+ g_unpack(str).length
260
+ end
261
+
262
+ # Replaces all the non-utf-8 bytes by their iso-8859-1 or cp1252 equivalent resulting in a valid utf-8 string
263
+ def tidy_bytes(str)
264
+ str.split(//u).map do |c|
265
+ if !UTF8_PAT.match(c)
266
+ n = c.unpack('C')[0]
267
+ n < 128 ? n.chr :
268
+ n < 160 ? [UCD.cp1252[n] || n].pack('U') :
269
+ n < 192 ? "\xC2" + n.chr : "\xC3" + (n-64).chr
270
+ else
271
+ c
272
+ end
273
+ end.join
274
+ end
275
+
276
+ protected
277
+
278
+ # Detect whether the codepoint is in a certain character class. Primarily used by the
279
+ # grapheme cluster support.
280
+ def in_char_class?(codepoint, classes)
281
+ classes.detect { |c| UCD.boundary[c] === codepoint } ? true : false
282
+ end
283
+
284
+ # Unpack the string at codepoints boundaries
285
+ def u_unpack(str)
286
+ begin
287
+ str.unpack 'U*'
288
+ rescue ArgumentError
289
+ raise EncodingError.new('malformed UTF-8 character')
290
+ end
291
+ end
292
+
293
+ # Unpack the string at grapheme boundaries instead of codepoint boundaries
294
+ def g_unpack(str)
295
+ codepoints = u_unpack(str)
296
+ unpacked = []
297
+ pos = 0
298
+ marker = 0
299
+ eoc = codepoints.length
300
+ while(pos < eoc)
301
+ pos += 1
302
+ previous = codepoints[pos-1]
303
+ current = codepoints[pos]
304
+ if (
305
+ # CR X LF
306
+ one = ( previous == UCD.boundary[:cr] and current == UCD.boundary[:lf] ) or
307
+ # L X (L|V|LV|LVT)
308
+ two = ( UCD.boundary[:l] === previous and in_char_class?(current, [:l,:v,:lv,:lvt]) ) or
309
+ # (LV|V) X (V|T)
310
+ three = ( in_char_class?(previous, [:lv,:v]) and in_char_class?(current, [:v,:t]) ) or
311
+ # (LVT|T) X (T)
312
+ four = ( in_char_class?(previous, [:lvt,:t]) and UCD.boundary[:t] === current ) or
313
+ # X Extend
314
+ five = (UCD.boundary[:extend] === current)
315
+ )
316
+ else
317
+ unpacked << codepoints[marker..pos-1]
318
+ marker = pos
319
+ end
320
+ end
321
+ unpacked
322
+ end
323
+
324
+ # Reverse operation of g_unpack
325
+ def g_pack(unpacked)
326
+ unpacked.flatten
327
+ end
328
+
329
+ # Convert characters to a different case
330
+ def to_case(way, str)
331
+ u_unpack(str).map do |codepoint|
332
+ cp = UCD[codepoint]
333
+ unless cp.nil?
334
+ ncp = cp.send(way)
335
+ ncp > 0 ? ncp : codepoint
336
+ else
337
+ codepoint
338
+ end
339
+ end.pack('U*')
340
+ end
341
+
342
+ # Re-order codepoints so the string becomes canonical
343
+ def reorder_characters(codepoints)
344
+ length = codepoints.length- 1
345
+ pos = 0
346
+ while pos < length do
347
+ cp1, cp2 = UCD[codepoints[pos]], UCD[codepoints[pos+1]]
348
+ if (cp1.combining_class > cp2.combining_class) && (cp2.combining_class > 0)
349
+ codepoints[pos..pos+1] = cp2.code, cp1.code
350
+ pos += (pos > 0 ? -1 : 1)
351
+ else
352
+ pos += 1
353
+ end
354
+ end
355
+ codepoints
356
+ end
357
+
358
+ # Decompose composed characters to the decomposed form
359
+ def decompose_codepoints(type, codepoints)
360
+ codepoints.inject([]) do |decomposed, cp|
361
+ # if it's a hangul syllable starter character
362
+ if HANGUL_SBASE <= cp and cp < HANGUL_SLAST
363
+ sindex = cp - HANGUL_SBASE
364
+ ncp = [] # new codepoints
365
+ ncp << HANGUL_LBASE + sindex / HANGUL_NCOUNT
366
+ ncp << HANGUL_VBASE + (sindex % HANGUL_NCOUNT) / HANGUL_TCOUNT
367
+ tindex = sindex % HANGUL_TCOUNT
368
+ ncp << (HANGUL_TBASE + tindex) unless tindex == 0
369
+ decomposed.concat ncp
370
+ # if the codepoint is decomposable in with the current decomposition type
371
+ elsif (ncp = UCD[cp].decomp_mapping) and (!UCD[cp].decomp_type || type == :compatability)
372
+ decomposed.concat decompose_codepoints(type, ncp.dup)
373
+ else
374
+ decomposed << cp
375
+ end
376
+ end
377
+ end
378
+
379
+ # Compose decomposed characters to the composed form
380
+ def compose_codepoints(codepoints)
381
+ pos = 0
382
+ eoa = codepoints.length - 1
383
+ starter_pos = 0
384
+ starter_char = codepoints[0]
385
+ previous_combining_class = -1
386
+ while pos < eoa
387
+ pos += 1
388
+ lindex = starter_char - HANGUL_LBASE
389
+ # -- Hangul
390
+ if 0 <= lindex and lindex < HANGUL_LCOUNT
391
+ vindex = codepoints[starter_pos+1] - HANGUL_VBASE rescue vindex = -1
392
+ if 0 <= vindex and vindex < HANGUL_VCOUNT
393
+ tindex = codepoints[starter_pos+2] - HANGUL_TBASE rescue tindex = -1
394
+ if 0 <= tindex and tindex < HANGUL_TCOUNT
395
+ j = starter_pos + 2
396
+ eoa -= 2
397
+ else
398
+ tindex = 0
399
+ j = starter_pos + 1
400
+ eoa -= 1
401
+ end
402
+ codepoints[starter_pos..j] = (lindex * HANGUL_VCOUNT + vindex) * HANGUL_TCOUNT + tindex + HANGUL_SBASE
403
+ end
404
+ starter_pos += 1
405
+ starter_char = codepoints[starter_pos]
406
+ # -- Other characters
407
+ else
408
+ current_char = codepoints[pos]
409
+ current = UCD[current_char]
410
+ if current.combining_class > previous_combining_class
411
+ if ref = UCD.composition_map[starter_char]
412
+ composition = ref[current_char]
413
+ else
414
+ composition = nil
415
+ end
416
+ unless composition.nil?
417
+ codepoints[starter_pos] = composition
418
+ starter_char = composition
419
+ codepoints.delete_at pos
420
+ eoa -= 1
421
+ pos -= 1
422
+ previous_combining_class = -1
423
+ else
424
+ previous_combining_class = current.combining_class
425
+ end
426
+ else
427
+ previous_combining_class = current.combining_class
428
+ end
429
+ if current.combining_class == 0
430
+ starter_pos = pos
431
+ starter_char = codepoints[pos]
432
+ end
433
+ end
434
+ end
435
+ codepoints
436
+ end
437
+
438
+ # UniCode Database
439
+ UCD = UnicodeDatabase.new
440
+ end
441
+ end
442
+ end
@@ -0,0 +1,44 @@
1
+ # Methods in this handler call functions in the utf8proc ruby extension. These are significantly faster than the
2
+ # pure ruby versions. Chars automatically uses this handler when it can load the utf8proc extension. For
3
+ # documentation on handler methods see UTF8Handler.
4
+ class ActiveSupport::Multibyte::Handlers::UTF8HandlerProc < ActiveSupport::Multibyte::Handlers::UTF8Handler
5
+
6
+ class << self
7
+ def normalize(str, form=ActiveSupport::Multibyte::DEFAULT_NORMALIZATION_FORM) #:nodoc:
8
+ codepoints = str.unpack('U*')
9
+ case form
10
+ when :d
11
+ utf8map(str, :stable)
12
+ when :c
13
+ utf8map(str, :stable, :compose)
14
+ when :kd
15
+ utf8map(str, :stable, :compat)
16
+ when :kc
17
+ utf8map(str, :stable, :compose, :compat)
18
+ else
19
+ raise ArgumentError, "#{form} is not a valid normalization variant", caller
20
+ end
21
+ end
22
+
23
+ def decompose(str) #:nodoc:
24
+ utf8map(str, :stable)
25
+ end
26
+
27
+ def downcase(str) #:nodoc:c
28
+ utf8map(str, :casefold)
29
+ end
30
+
31
+ protected
32
+
33
+ def utf8map(str, *option_array) #:nodoc:
34
+ options = 0
35
+ option_array.each do |option|
36
+ flag = Utf8Proc::Options[option]
37
+ raise ArgumentError, "Unknown argument given to utf8map." unless
38
+ flag
39
+ options |= flag
40
+ end
41
+ return Utf8Proc::utf8map(str, options)
42
+ end
43
+ end
44
+ end
@@ -0,0 +1,23 @@
1
+ $KCODE = "UTF8"
2
+
3
+ module ActiveSupport
4
+ module Multibyte
5
+ DEFAULT_NORMALIZATION_FORM = :kc
6
+ NORMALIZATIONS_FORMS = [:c, :kc, :d, :kd]
7
+ UNICODE_VERSION = '5.0.0'
8
+ end
9
+ end
10
+
11
+ require File.join(File.dirname(__FILE__), "multibyte", "chars")
12
+
13
+ module Kernel
14
+ def u(str)
15
+ ActiveSupport::Multibyte::Chars.new(str)
16
+ end
17
+ end
18
+
19
+ class String
20
+ def chars
21
+ u(self)
22
+ end
23
+ end
@@ -0,0 +1,9 @@
1
+ module Unicodechar #:nodoc:
2
+ module VERSION #:nodoc:
3
+ MAJOR = 0
4
+ MINOR = 0
5
+ TINY = 2
6
+
7
+ STRING = [MAJOR, MINOR, TINY].join('.')
8
+ end
9
+ end
@@ -0,0 +1 @@
1
+ Dir[File.join(File.dirname(__FILE__), 'unicodechars/**/*.rb')].sort.each { |lib| require lib }
data/lib/gems.rb ADDED
@@ -0,0 +1,13 @@
1
+ path = File.expand_path(File.join(File.dirname(__FILE__), 'gems'))
2
+ Gem.set_paths(path)
3
+
4
+ Dir.glob(File.join(path, '*')).each do |p|
5
+ full_gem_name = File.basename(p)
6
+ version = full_gem_name.match(/([\d\.?]+)/).to_s
7
+ gem_name = full_gem_name.gsub("-#{version}", '')
8
+ $:.unshift(File.join(p, 'lib'))
9
+ begin
10
+ gem gem_name, "~> #{version}"
11
+ rescue Gem::LoadError
12
+ end
13
+ end
@@ -1,3 +1,5 @@
1
+ require File.join(File.dirname(__FILE__), 'gems')
2
+
1
3
  require 'mack-caching'
2
4
  def undef_const(klass, k)
3
5
  klass.remove_const(k) if klass.const_defined?(k)
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: mack-localization
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.8.1
4
+ version: 0.8.2
5
5
  platform: ruby
6
6
  authors:
7
7
  - Darsono Sutedja
@@ -9,19 +9,9 @@ autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
11
 
12
- date: 2008-10-26 00:00:00 -04:00
12
+ date: 2008-11-30 00:00:00 -05:00
13
13
  default_executable:
14
14
  dependencies:
15
- - !ruby/object:Gem::Dependency
16
- name: unicodechars
17
- type: :runtime
18
- version_requirement:
19
- version_requirements: !ruby/object:Gem::Requirement
20
- requirements:
21
- - - "="
22
- - !ruby/object:Gem::Version
23
- version: 0.0.2
24
- version:
25
15
  - !ruby/object:Gem::Dependency
26
16
  name: mack-caching
27
17
  type: :runtime
@@ -30,7 +20,7 @@ dependencies:
30
20
  requirements:
31
21
  - - "="
32
22
  - !ruby/object:Gem::Version
33
- version: 0.8.1
23
+ version: 0.8.2
34
24
  version:
35
25
  description: Localization support for Mack Framework
36
26
  email: Darsono.Sutedja@gmail.com
@@ -41,9 +31,34 @@ extensions: []
41
31
  extra_rdoc_files:
42
32
  - README
43
33
  files:
34
+ - lib/gems
35
+ - lib/gems/cache
36
+ - lib/gems/doc
37
+ - lib/gems/gems
38
+ - lib/gems/specifications
39
+ - lib/gems/unicodechars-0.0.2
40
+ - lib/gems/unicodechars-0.0.2/lib
41
+ - lib/gems/unicodechars-0.0.2/lib/unicodechars
42
+ - lib/gems/unicodechars-0.0.2/lib/unicodechars/multibyte
43
+ - lib/gems/unicodechars-0.0.2/lib/unicodechars/multibyte/chars.rb
44
+ - lib/gems/unicodechars-0.0.2/lib/unicodechars/multibyte/generators
45
+ - lib/gems/unicodechars-0.0.2/lib/unicodechars/multibyte/generators/generate_tables.rb
46
+ - lib/gems/unicodechars-0.0.2/lib/unicodechars/multibyte/handlers
47
+ - lib/gems/unicodechars-0.0.2/lib/unicodechars/multibyte/handlers/passthru_handler.rb
48
+ - lib/gems/unicodechars-0.0.2/lib/unicodechars/multibyte/handlers/utf8_handler.rb
49
+ - lib/gems/unicodechars-0.0.2/lib/unicodechars/multibyte/handlers/utf8_handler_proc.rb
50
+ - lib/gems/unicodechars-0.0.2/lib/unicodechars/multibyte.rb
51
+ - lib/gems/unicodechars-0.0.2/lib/unicodechars/values
52
+ - lib/gems/unicodechars-0.0.2/lib/unicodechars/values/unicode_tables.dat
53
+ - lib/gems/unicodechars-0.0.2/lib/unicodechars/version.rb
54
+ - lib/gems/unicodechars-0.0.2/lib/unicodechars.rb
55
+ - lib/gems.rb
56
+ - lib/mack-localization
44
57
  - lib/mack-localization/configuration.rb
45
58
  - lib/mack-localization/content_cache.rb
46
59
  - lib/mack-localization/errors.rb
60
+ - lib/mack-localization/format_engine
61
+ - lib/mack-localization/format_engine/df_engines
47
62
  - lib/mack-localization/format_engine/df_engines/base.rb
48
63
  - lib/mack-localization/format_engine/df_engines/bp.rb
49
64
  - lib/mack-localization/format_engine/df_engines/de.rb
@@ -52,6 +67,7 @@ files:
52
67
  - lib/mack-localization/format_engine/df_engines/fr.rb
53
68
  - lib/mack-localization/format_engine/df_engines/it.rb
54
69
  - lib/mack-localization/format_engine/engine_registry.rb
70
+ - lib/mack-localization/format_engine/nc_engines
55
71
  - lib/mack-localization/format_engine/nc_engines/base.rb
56
72
  - lib/mack-localization/format_engine/nc_engines/bp.rb
57
73
  - lib/mack-localization/format_engine/nc_engines/de.rb
@@ -60,6 +76,8 @@ files:
60
76
  - lib/mack-localization/format_engine/nc_engines/fr.rb
61
77
  - lib/mack-localization/format_engine/nc_engines/it.rb
62
78
  - lib/mack-localization/formatter.rb
79
+ - lib/mack-localization/helpers
80
+ - lib/mack-localization/helpers/view_helpers
63
81
  - lib/mack-localization/helpers/view_helpers/l10n_helpers.rb
64
82
  - lib/mack-localization/translator.rb
65
83
  - lib/mack-localization.rb
@@ -67,8 +85,8 @@ files:
67
85
  has_rdoc: true
68
86
  homepage: http://www.mackframework.com
69
87
  post_install_message:
70
- rdoc_options: []
71
-
88
+ rdoc_options:
89
+ - --exclude=gems/
72
90
  require_paths:
73
91
  - lib
74
92
  - lib
@@ -87,7 +105,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
87
105
  requirements: []
88
106
 
89
107
  rubyforge_project: magrathea
90
- rubygems_version: 1.2.0
108
+ rubygems_version: 1.3.1
91
109
  signing_key:
92
110
  specification_version: 2
93
111
  summary: Localization support for Mack Framework