unicode-multibyte 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,442 @@
1
+ # Contains all the handlers and helper classes
2
+ module UnicodeMultibyte::Multibyte::Handlers
3
+ class EncodingError < ArgumentError; end
4
+
5
+ class Codepoint #:nodoc:
6
+ attr_accessor :code, :combining_class, :decomp_type, :decomp_mapping, :uppercase_mapping, :lowercase_mapping
7
+ end
8
+
9
+ class UnicodeDatabase #:nodoc:
10
+ attr_accessor :codepoints, :composition_exclusion, :composition_map, :boundary, :cp1252
11
+
12
+ # Creates a new UnicodeDatabase instance and loads the database.
13
+ def initialize
14
+ begin
15
+ @codepoints, @composition_exclusion, @composition_map, @boundary, @cp1252 = self.class.load
16
+ rescue Exception => e
17
+ raise IOError.new("Couldn't load the unicode tables for UTF8Handler (#{e.message}), handler is unusable")
18
+ end
19
+ @codepoints ||= Hash.new(Codepoint.new)
20
+ @composition_exclusion ||= []
21
+ @composition_map ||= {}
22
+ @boundary ||= {}
23
+ @cp1252 ||= {}
24
+
25
+ # Redefine the === method so we can write shorter rules for grapheme cluster breaks
26
+ @boundary.each do |k,_|
27
+ @boundary[k].instance_eval do
28
+ def ===(other)
29
+ detect { |i| i === other } ? true : false
30
+ end
31
+ end if @boundary[k].kind_of?(Array)
32
+ end
33
+ end
34
+
35
+ # Shortcut to ucd.codepoints[]
36
+ def [](index); @codepoints[index]; end
37
+
38
+ # Returns the directory in which the data files are stored
39
+ def self.dirname
40
+ File.dirname(__FILE__) + '/../../values/'
41
+ end
42
+
43
+ # Returns the filename for the data file for this version
44
+ def self.filename
45
+ File.expand_path File.join(dirname, "unicode_tables.dat")
46
+ end
47
+
48
+ # Loads the unicode database and returns all the internal objects of UnicodeDatabase
49
+ def self.load
50
+ File.open(self.filename, 'rb') { |f| Marshal.load f.read }
51
+ end
52
+ end
53
+
54
+ # UTF8Handler implements Unicode aware operations for strings, these operations will be used by the Chars
55
+ # proxy when $KCODE is set to 'UTF8'.
56
+ class UTF8Handler
57
+ # Hangul character boundaries and properties
58
+ HANGUL_SBASE = 0xAC00
59
+ HANGUL_LBASE = 0x1100
60
+ HANGUL_VBASE = 0x1161
61
+ HANGUL_TBASE = 0x11A7
62
+ HANGUL_LCOUNT = 19
63
+ HANGUL_VCOUNT = 21
64
+ HANGUL_TCOUNT = 28
65
+ HANGUL_NCOUNT = HANGUL_VCOUNT * HANGUL_TCOUNT
66
+ HANGUL_SCOUNT = 11172
67
+ HANGUL_SLAST = HANGUL_SBASE + HANGUL_SCOUNT
68
+ HANGUL_JAMO_FIRST = 0x1100
69
+ HANGUL_JAMO_LAST = 0x11FF
70
+
71
+ # All the unicode whitespace
72
+ UNICODE_WHITESPACE = [
73
+ (0x0009..0x000D).to_a, # White_Space # Cc [5] <control-0009>..<control-000D>
74
+ 0x0020, # White_Space # Zs SPACE
75
+ 0x0085, # White_Space # Cc <control-0085>
76
+ 0x00A0, # White_Space # Zs NO-BREAK SPACE
77
+ 0x1680, # White_Space # Zs OGHAM SPACE MARK
78
+ 0x180E, # White_Space # Zs MONGOLIAN VOWEL SEPARATOR
79
+ (0x2000..0x200A).to_a, # White_Space # Zs [11] EN QUAD..HAIR SPACE
80
+ 0x2028, # White_Space # Zl LINE SEPARATOR
81
+ 0x2029, # White_Space # Zp PARAGRAPH SEPARATOR
82
+ 0x202F, # White_Space # Zs NARROW NO-BREAK SPACE
83
+ 0x205F, # White_Space # Zs MEDIUM MATHEMATICAL SPACE
84
+ 0x3000, # White_Space # Zs IDEOGRAPHIC SPACE
85
+ ].flatten.freeze
86
+
87
+ # BOM (byte order mark) can also be seen as whitespace, it's a non-rendering character used to distinguish
88
+ # between little and big endian. This is not an issue in utf-8, so it must be ignored.
89
+ UNICODE_LEADERS_AND_TRAILERS = UNICODE_WHITESPACE + [65279] # ZERO-WIDTH NO-BREAK SPACE aka BOM
90
+
91
+ # Borrowed from the Kconv library by Shinji KONO - (also as seen on the W3C site)
92
+ UTF8_PAT = /\A(?:
93
+ [\x00-\x7f] |
94
+ [\xc2-\xdf] [\x80-\xbf] |
95
+ \xe0 [\xa0-\xbf] [\x80-\xbf] |
96
+ [\xe1-\xef] [\x80-\xbf] [\x80-\xbf] |
97
+ \xf0 [\x90-\xbf] [\x80-\xbf] [\x80-\xbf] |
98
+ [\xf1-\xf3] [\x80-\xbf] [\x80-\xbf] [\x80-\xbf] |
99
+ \xf4 [\x80-\x8f] [\x80-\xbf] [\x80-\xbf]
100
+ )*\z/xn
101
+
102
+ # Returns a regular expression pattern that matches the passed Unicode codepoints
103
+ def self.codepoints_to_pattern(array_of_codepoints) #:nodoc:
104
+ array_of_codepoints.collect{ |e| [e].pack 'U*' }.join('|')
105
+ end
106
+ UNICODE_TRAILERS_PAT = /(#{codepoints_to_pattern(UNICODE_LEADERS_AND_TRAILERS)})+\Z/
107
+ UNICODE_LEADERS_PAT = /\A(#{codepoints_to_pattern(UNICODE_LEADERS_AND_TRAILERS)})+/
108
+
109
+ class << self
110
+
111
+ # ///
112
+ # /// BEGIN String method overrides
113
+ # ///
114
+
115
+ # Inserts the passed string at specified codepoint offsets
116
+ def insert(str, offset, fragment)
117
+ str.replace(
118
+ u_unpack(str).insert(
119
+ offset,
120
+ u_unpack(fragment)
121
+ ).flatten.pack('U*')
122
+ )
123
+ end
124
+
125
+ # Returns the position of the passed argument in the string, counting in codepoints
126
+ def index(str, *args)
127
+ bidx = str.index(*args)
128
+ bidx ? (u_unpack(str.slice(0...bidx)).size) : nil
129
+ end
130
+
131
+ # Does Unicode-aware rstrip
132
+ def rstrip(str)
133
+ str.gsub(UNICODE_TRAILERS_PAT, '')
134
+ end
135
+
136
+ # Does Unicode-aware lstrip
137
+ def lstrip(str)
138
+ str.gsub(UNICODE_LEADERS_PAT, '')
139
+ end
140
+
141
+ # Removed leading and trailing whitespace
142
+ def strip(str)
143
+ str.gsub(UNICODE_LEADERS_PAT, '').gsub(UNICODE_TRAILERS_PAT, '')
144
+ end
145
+
146
+ # Returns the number of codepoints in the string
147
+ def size(str)
148
+ u_unpack(str).size
149
+ end
150
+ alias_method :length, :size
151
+
152
+ # Reverses codepoints in the string.
153
+ def reverse(str)
154
+ u_unpack(str).reverse.pack('U*')
155
+ end
156
+
157
+ # Implements Unicode-aware slice with codepoints. Slicing on one point returns the codepoints for that
158
+ # character.
159
+ def slice(str, *args)
160
+ if (args.size == 2 && args.first.is_a?(Range))
161
+ raise TypeError, 'cannot convert Range into Integer' # Do as if we were native
162
+ elsif args[0].kind_of? Range
163
+ cps = u_unpack(str).slice(*args)
164
+ cps.nil? ? nil : cps.pack('U*')
165
+ elsif args.size == 1 && args[0].kind_of?(Numeric)
166
+ u_unpack(str)[args[0]]
167
+ else
168
+ u_unpack(str).slice(*args).pack('U*')
169
+ end
170
+ end
171
+ alias_method :[], :slice
172
+
173
+ # Convert characters in the string to uppercase
174
+ def upcase(str); to_case :uppercase_mapping, str; end
175
+
176
+ # Convert characters in the string to lowercase
177
+ def downcase(str); to_case :lowercase_mapping, str; end
178
+
179
+ # Returns a copy of +str+ with the first character converted to uppercase and the remainder to lowercase
180
+ def capitalize(str)
181
+ upcase(slice(str, 0..0)) + downcase(slice(str, 1..-1) || '')
182
+ end
183
+
184
+ # ///
185
+ # /// Extra String methods for unicode operations
186
+ # ///
187
+
188
+ # Returns the KC normalization of the string by default. NFKC is considered the best normalization form for
189
+ # passing strings to databases and validations.
190
+ #
191
+ # * <tt>str</tt>: The string to perform normalization on.
192
+ # * <tt>form</tt>: The form you want to normalize in. Should be one of the following: :c, :kc, :d or :kd.
193
+ def normalize(str, form=UnicodeMultibyte::Multibyte::DEFAULT_NORMALIZATION_FORM)
194
+ # See http://www.unicode.org/reports/tr15, Table 1
195
+ codepoints = u_unpack(str)
196
+ case form
197
+ when :d
198
+ reorder_characters(decompose_codepoints(:canonical, codepoints))
199
+ when :c
200
+ compose_codepoints reorder_characters(decompose_codepoints(:canonical, codepoints))
201
+ when :kd
202
+ reorder_characters(decompose_codepoints(:compatability, codepoints))
203
+ when :kc
204
+ compose_codepoints reorder_characters(decompose_codepoints(:compatability, codepoints))
205
+ else
206
+ raise ArgumentError, "#{form} is not a valid normalization variant", caller
207
+ end.pack('U*')
208
+ end
209
+
210
+ # Perform decomposition on the characters in the string
211
+ def decompose(str)
212
+ decompose_codepoints(:canonical, u_unpack(str)).pack('U*')
213
+ end
214
+
215
+ # Perform composition on the characters in the string
216
+ def compose(str)
217
+ compose_codepoints u_unpack(str).pack('U*')
218
+ end
219
+
220
+ # ///
221
+ # /// BEGIN Helper methods for unicode operation
222
+ # ///
223
+
224
+ # Used to translate an offset from bytes to characters, for instance one received from a regular expression match
225
+ def translate_offset(str, byte_offset)
226
+ return 0 if str == ''
227
+ return nil if byte_offset.nil?
228
+ chunk = str[0..byte_offset]
229
+ begin
230
+ begin
231
+ chunk.unpack('U*').length - 1
232
+ rescue ArgumentError => e
233
+ chunk = str[0..(byte_offset+=1)]
234
+ # Stop retrying at the end of the string
235
+ raise e unless byte_offset < chunk.length
236
+ # We damaged a character, retry
237
+ retry
238
+ end
239
+ # Catch the ArgumentError so we can throw our own
240
+ rescue ArgumentError
241
+ raise EncodingError.new('malformed UTF-8 character')
242
+ end
243
+ end
244
+
245
+ # Checks if the string is valid UTF8.
246
+ def consumes?(str)
247
+ # Unpack is a little bit faster than regular expressions
248
+ begin
249
+ str.unpack('U*')
250
+ true
251
+ rescue ArgumentError
252
+ false
253
+ end
254
+ end
255
+
256
+ # Returns the number of grapheme clusters in the string. This method is very likely to be moved or renamed
257
+ # in future versions.
258
+ def g_length(str)
259
+ g_unpack(str).length
260
+ end
261
+
262
+ # Replaces all the non-utf-8 bytes by their iso-8859-1 or cp1252 equivalent resulting in a valid utf-8 string
263
+ def tidy_bytes(str)
264
+ str.split(//u).map do |c|
265
+ if !UTF8_PAT.match(c)
266
+ n = c.unpack('C')[0]
267
+ n < 128 ? n.chr :
268
+ n < 160 ? [UCD.cp1252[n] || n].pack('U') :
269
+ n < 192 ? "\xC2" + n.chr : "\xC3" + (n-64).chr
270
+ else
271
+ c
272
+ end
273
+ end.join
274
+ end
275
+
276
+ protected
277
+
278
+ # Detect whether the codepoint is in a certain character class. Primarily used by the
279
+ # grapheme cluster support.
280
+ def in_char_class?(codepoint, classes)
281
+ classes.detect { |c| UCD.boundary[c] === codepoint } ? true : false
282
+ end
283
+
284
+ # Unpack the string at codepoints boundaries
285
+ def u_unpack(str)
286
+ begin
287
+ str.unpack 'U*'
288
+ rescue ArgumentError
289
+ raise EncodingError.new('malformed UTF-8 character')
290
+ end
291
+ end
292
+
293
+ # Unpack the string at grapheme boundaries instead of codepoint boundaries
294
+ def g_unpack(str)
295
+ codepoints = u_unpack(str)
296
+ unpacked = []
297
+ pos = 0
298
+ marker = 0
299
+ eoc = codepoints.length
300
+ while(pos < eoc)
301
+ pos += 1
302
+ previous = codepoints[pos-1]
303
+ current = codepoints[pos]
304
+ if (
305
+ # CR X LF
306
+ one = ( previous == UCD.boundary[:cr] and current == UCD.boundary[:lf] ) or
307
+ # L X (L|V|LV|LVT)
308
+ two = ( UCD.boundary[:l] === previous and in_char_class?(current, [:l,:v,:lv,:lvt]) ) or
309
+ # (LV|V) X (V|T)
310
+ three = ( in_char_class?(previous, [:lv,:v]) and in_char_class?(current, [:v,:t]) ) or
311
+ # (LVT|T) X (T)
312
+ four = ( in_char_class?(previous, [:lvt,:t]) and UCD.boundary[:t] === current ) or
313
+ # X Extend
314
+ five = (UCD.boundary[:extend] === current)
315
+ )
316
+ else
317
+ unpacked << codepoints[marker..pos-1]
318
+ marker = pos
319
+ end
320
+ end
321
+ unpacked
322
+ end
323
+
324
+ # Reverse operation of g_unpack
325
+ def g_pack(unpacked)
326
+ unpacked.flatten
327
+ end
328
+
329
+ # Convert characters to a different case
330
+ def to_case(way, str)
331
+ u_unpack(str).map do |codepoint|
332
+ cp = UCD[codepoint]
333
+ unless cp.nil?
334
+ ncp = cp.send(way)
335
+ ncp > 0 ? ncp : codepoint
336
+ else
337
+ codepoint
338
+ end
339
+ end.pack('U*')
340
+ end
341
+
342
+ # Re-order codepoints so the string becomes canonical
343
+ def reorder_characters(codepoints)
344
+ length = codepoints.length- 1
345
+ pos = 0
346
+ while pos < length do
347
+ cp1, cp2 = UCD[codepoints[pos]], UCD[codepoints[pos+1]]
348
+ if (cp1.combining_class > cp2.combining_class) && (cp2.combining_class > 0)
349
+ codepoints[pos..pos+1] = cp2.code, cp1.code
350
+ pos += (pos > 0 ? -1 : 1)
351
+ else
352
+ pos += 1
353
+ end
354
+ end
355
+ codepoints
356
+ end
357
+
358
+ # Decompose composed characters to the decomposed form
359
+ def decompose_codepoints(type, codepoints)
360
+ codepoints.inject([]) do |decomposed, cp|
361
+ # if it's a hangul syllable starter character
362
+ if HANGUL_SBASE <= cp and cp < HANGUL_SLAST
363
+ sindex = cp - HANGUL_SBASE
364
+ ncp = [] # new codepoints
365
+ ncp << HANGUL_LBASE + sindex / HANGUL_NCOUNT
366
+ ncp << HANGUL_VBASE + (sindex % HANGUL_NCOUNT) / HANGUL_TCOUNT
367
+ tindex = sindex % HANGUL_TCOUNT
368
+ ncp << (HANGUL_TBASE + tindex) unless tindex == 0
369
+ decomposed.concat ncp
370
+ # if the codepoint is decomposable in with the current decomposition type
371
+ elsif (ncp = UCD[cp].decomp_mapping) and (!UCD[cp].decomp_type || type == :compatability)
372
+ decomposed.concat decompose_codepoints(type, ncp.dup)
373
+ else
374
+ decomposed << cp
375
+ end
376
+ end
377
+ end
378
+
379
+ # Compose decomposed characters to the composed form
380
+ def compose_codepoints(codepoints)
381
+ pos = 0
382
+ eoa = codepoints.length - 1
383
+ starter_pos = 0
384
+ starter_char = codepoints[0]
385
+ previous_combining_class = -1
386
+ while pos < eoa
387
+ pos += 1
388
+ lindex = starter_char - HANGUL_LBASE
389
+ # -- Hangul
390
+ if 0 <= lindex and lindex < HANGUL_LCOUNT
391
+ vindex = codepoints[starter_pos+1] - HANGUL_VBASE rescue vindex = -1
392
+ if 0 <= vindex and vindex < HANGUL_VCOUNT
393
+ tindex = codepoints[starter_pos+2] - HANGUL_TBASE rescue tindex = -1
394
+ if 0 <= tindex and tindex < HANGUL_TCOUNT
395
+ j = starter_pos + 2
396
+ eoa -= 2
397
+ else
398
+ tindex = 0
399
+ j = starter_pos + 1
400
+ eoa -= 1
401
+ end
402
+ codepoints[starter_pos..j] = (lindex * HANGUL_VCOUNT + vindex) * HANGUL_TCOUNT + tindex + HANGUL_SBASE
403
+ end
404
+ starter_pos += 1
405
+ starter_char = codepoints[starter_pos]
406
+ # -- Other characters
407
+ else
408
+ current_char = codepoints[pos]
409
+ current = UCD[current_char]
410
+ if current.combining_class > previous_combining_class
411
+ if ref = UCD.composition_map[starter_char]
412
+ composition = ref[current_char]
413
+ else
414
+ composition = nil
415
+ end
416
+ unless composition.nil?
417
+ codepoints[starter_pos] = composition
418
+ starter_char = composition
419
+ codepoints.delete_at pos
420
+ eoa -= 1
421
+ pos -= 1
422
+ previous_combining_class = -1
423
+ else
424
+ previous_combining_class = current.combining_class
425
+ end
426
+ else
427
+ previous_combining_class = current.combining_class
428
+ end
429
+ if current.combining_class == 0
430
+ starter_pos = pos
431
+ starter_char = codepoints[pos]
432
+ end
433
+ end
434
+ end
435
+ codepoints
436
+ end
437
+
438
+ # UniCode Database
439
+ UCD = UnicodeDatabase.new
440
+ end
441
+ end
442
+ end
@@ -0,0 +1,44 @@
1
+ # Methods in this handler call functions in the utf8proc ruby extension. These are significantly faster than the
2
+ # pure ruby versions. Chars automatically uses this handler when it can load the utf8proc extension. For
3
+ # documentation on handler methods see UTF8Handler.
4
+ class UnicodeMultibyte::Multibyte::Handlers::UTF8HandlerProc < UnicodeMultibyte::Multibyte::Handlers::UTF8Handler
5
+
6
+ class << self
7
+ def normalize(str, form=UnicodeMultibyte::Multibyte::DEFAULT_NORMALIZATION_FORM) #:nodoc:
8
+ codepoints = str.unpack('U*')
9
+ case form
10
+ when :d
11
+ utf8map(str, :stable)
12
+ when :c
13
+ utf8map(str, :stable, :compose)
14
+ when :kd
15
+ utf8map(str, :stable, :compat)
16
+ when :kc
17
+ utf8map(str, :stable, :compose, :compat)
18
+ else
19
+ raise ArgumentError, "#{form} is not a valid normalization variant", caller
20
+ end
21
+ end
22
+
23
+ def decompose(str) #:nodoc:
24
+ utf8map(str, :stable)
25
+ end
26
+
27
+ def downcase(str) #:nodoc:c
28
+ utf8map(str, :casefold)
29
+ end
30
+
31
+ protected
32
+
33
+ def utf8map(str, *option_array) #:nodoc:
34
+ options = 0
35
+ option_array.each do |option|
36
+ flag = Utf8Proc::Options[option]
37
+ raise ArgumentError, "Unknown argument given to utf8map." unless
38
+ flag
39
+ options |= flag
40
+ end
41
+ return Utf8Proc::utf8map(str, options)
42
+ end
43
+ end
44
+ end
@@ -0,0 +1,23 @@
1
+ $KCODE = "UTF8"
2
+
3
+ module UnicodeMultibyte
4
+ module Multibyte
5
+ DEFAULT_NORMALIZATION_FORM = :kc
6
+ NORMALIZATIONS_FORMS = [:c, :kc, :d, :kd]
7
+ UNICODE_VERSION = '5.0.0'
8
+ end
9
+ end
10
+
11
+ require File.join(File.dirname(__FILE__), "multibyte", "chars")
12
+
13
+ module Kernel
14
+ def u(str)
15
+ UnicodeMultibyte::Multibyte::Chars.new(str)
16
+ end
17
+ end
18
+
19
+ class String
20
+ def mb_chars
21
+ u(self)
22
+ end
23
+ end
@@ -0,0 +1,9 @@
1
+ module Unicodechar #:nodoc:
2
+ module VERSION #:nodoc:
3
+ MAJOR = 0
4
+ MINOR = 0
5
+ TINY = 1
6
+
7
+ STRING = [MAJOR, MINOR, TINY].join('.')
8
+ end
9
+ end
@@ -0,0 +1 @@
1
+ Dir[File.join(File.dirname(__FILE__), 'unicodechars/**/*.rb')].sort.each { |lib| require lib }