unicodechars 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/CHANGELOG ADDED
File without changes
data/README ADDED
@@ -0,0 +1,3 @@
1
+ README for unicodechars
2
+ =======================
3
+
data/Rakefile ADDED
@@ -0,0 +1,85 @@
1
+ require 'rubygems'
2
+ require 'rake'
3
+ require 'rake/clean'
4
+ require 'rake/testtask'
5
+ require 'rake/packagetask'
6
+ require 'rake/gempackagetask'
7
+ require 'rake/rdoctask'
8
+ require 'rake/contrib/rubyforgepublisher'
9
+ require 'fileutils'
10
+ include FileUtils
11
+ require File.join(File.dirname(__FILE__), 'lib', 'unicodechars', 'version')
12
+
13
+ AUTHOR = "yehudakatz"
14
+ EMAIL = "your contact email for bug fixes and info"
15
+ DESCRIPTION = "description of gem"
16
+ RUBYFORGE_PROJECT = "unicodechars"
17
+ HOMEPATH = "http://#{RUBYFORGE_PROJECT}.rubyforge.org"
18
+ BIN_FILES = %w( )
19
+
20
+
21
+ NAME = "unicodechars"
22
+ REV = File.read(".svn/entries")[/committed-rev="(d+)"/, 1] rescue nil
23
+ VERS = ENV['VERSION'] || (Unicodechar::VERSION::STRING + (REV ? ".#{REV}" : ""))
24
+ CLEAN.include ['**/.*.sw?', '*.gem', '.config']
25
+ RDOC_OPTS = ['--quiet', '--title', "unicodechars documentation",
26
+ "--opname", "index.html",
27
+ "--line-numbers",
28
+ "--main", "README",
29
+ "--inline-source"]
30
+
31
+ desc "Packages up unicodechars gem."
32
+ task :default => [:test]
33
+ task :package => [:clean]
34
+
35
+ Rake::TestTask.new("test") { |t|
36
+ t.libs << "test"
37
+ t.pattern = "test/**/*_test.rb"
38
+ t.verbose = true
39
+ }
40
+
41
+ spec =
42
+ Gem::Specification.new do |s|
43
+ s.name = NAME
44
+ s.version = VERS
45
+ s.platform = Gem::Platform::RUBY
46
+ s.has_rdoc = true
47
+ s.extra_rdoc_files = ["README", "CHANGELOG"]
48
+ s.rdoc_options += RDOC_OPTS + ['--exclude', '^(examples|extras)/']
49
+ s.summary = DESCRIPTION
50
+ s.description = DESCRIPTION
51
+ s.author = AUTHOR
52
+ s.email = EMAIL
53
+ s.homepage = HOMEPATH
54
+ s.executables = BIN_FILES
55
+ s.rubyforge_project = RUBYFORGE_PROJECT
56
+ s.bindir = "bin"
57
+ s.require_path = "lib"
58
+ s.autorequire = "unicodechars"
59
+
60
+ #s.add_dependency('activesupport', '>=1.3.1')
61
+ #s.required_ruby_version = '>= 1.8.2'
62
+
63
+ s.files = %w(README CHANGELOG Rakefile) +
64
+ Dir.glob("{bin,doc,test,lib,templates,generator,extras,website,script}/**/*") +
65
+ Dir.glob("ext/**/*.{h,c,rb}") +
66
+ Dir.glob("examples/**/*.rb") +
67
+ Dir.glob("tools/*.rb")
68
+
69
+ # s.extensions = FileList["ext/**/extconf.rb"].to_a
70
+ end
71
+
72
+ Rake::GemPackageTask.new(spec) do |p|
73
+ p.need_tar = true
74
+ p.gem_spec = spec
75
+ end
76
+
77
+ task :install do
78
+ name = "#{NAME}-#{VERS}.gem"
79
+ sh %{rake package}
80
+ sh %{sudo gem install pkg/#{name}}
81
+ end
82
+
83
+ task :uninstall => [:clean] do
84
+ sh %{sudo gem uninstall #{NAME}}
85
+ end
@@ -0,0 +1 @@
1
+ Dir[File.join(File.dirname(__FILE__), 'unicodechars/**/*.rb')].sort.each { |lib| require lib }
@@ -0,0 +1,23 @@
1
+ $KCODE = "UTF8"
2
+
3
+ module ActiveSupport
4
+ module Multibyte
5
+ DEFAULT_NORMALIZATION_FORM = :kc
6
+ NORMALIZATIONS_FORMS = [:c, :kc, :d, :kd]
7
+ UNICODE_VERSION = '5.0.0'
8
+ end
9
+ end
10
+
11
+ require File.join(File.dirname(__FILE__), "multibyte", "chars")
12
+
13
+ module Kernel
14
+ def u(str)
15
+ ActiveSupport::Multibyte::Chars.new(str)
16
+ end
17
+ end
18
+
19
+ class String
20
+ def chars
21
+ u(self)
22
+ end
23
+ end
@@ -0,0 +1,155 @@
1
+ require File.join(File.dirname(__FILE__), "handlers", "utf8_handler")
2
+ require File.join(File.dirname(__FILE__), "handlers", "passthru_handler")
3
+
4
+ # Encapsulates all the functionality related to the Chars proxy.
5
+ module ActiveSupport::Multibyte
6
+ # Chars enables you to work transparently with multibyte encodings in the Ruby String class without having extensive
7
+ # knowledge about the encoding. A Chars object accepts a string upon initialization and proxies String methods in an
8
+ # encoding safe manner. All the normal String methods are also implemented on the proxy.
9
+ #
10
+ # String methods are proxied through the Chars object, and can be accessed through the +chars+ method. Methods
11
+ # which would normally return a String object now return a Chars object so methods can be chained.
12
+ #
13
+ # "The Perfect String ".chars.downcase.strip.normalize #=> "the perfect string"
14
+ #
15
+ # Chars objects are perfectly interchangeable with String objects as long as no explicit class checks are made.
16
+ # If certain methods do explicitly check the class, call +to_s+ before you pass chars objects to them.
17
+ #
18
+ # bad.explicit_checking_method "T".chars.downcase.to_s
19
+ #
20
+ # The actual operations on the string are delegated to handlers. Theoretically handlers can be implemented for
21
+ # any encoding, but the default handler handles UTF-8. This handler is set during initialization, if you want to
22
+ # use you own handler, you can set it on the Chars class. Look at the UTF8Handler source for an example how to
23
+ # implement your own handler. If you your own handler to work on anything but UTF-8 you probably also
24
+ # want to override Chars#handler.
25
+ #
26
+ # ActiveSupport::Multibyte::Chars.handler = MyHandler
27
+ #
28
+ # Note that a few methods are defined on Chars instead of the handler because they are defined on Object or Kernel
29
+ # and method_missing can't catch them.
30
+ class Chars
31
+
32
+ attr_reader :string # The contained string
33
+ alias_method :to_s, :string
34
+
35
+ include Comparable
36
+
37
+ # The magic method to make String and Chars comparable
38
+ def to_str
39
+ # Using any other ways of overriding the String itself will lead you all the way from infinite loops to
40
+ # core dumps. Don't go there.
41
+ @string
42
+ end
43
+
44
+ # Makes unicode string look like a string in the console
45
+ def inspect
46
+ @string.inspect
47
+ end
48
+
49
+ def is_a?(type)
50
+ if type == String
51
+ true
52
+ else
53
+ super
54
+ end
55
+ end
56
+
57
+ # Fix [] for single numbers
58
+ def [](num)
59
+ if num.is_a?(Fixnum)
60
+ self[num..num]
61
+ else
62
+ super
63
+ end
64
+ end
65
+
66
+ # Create a new Chars instance.
67
+ def initialize(str)
68
+ @string = (str.string rescue str)
69
+ end
70
+
71
+ def each &block
72
+ split(//).each(&block)
73
+ end
74
+
75
+ # Returns -1, 0 or +1 depending on whether the Chars object is to be sorted before, equal or after the
76
+ # object on the right side of the operation. It accepts any object that implements +to_s+. See String.<=>
77
+ # for more details.
78
+ def <=>(other); @string <=> other.to_s; end
79
+
80
+ # Works just like String#split, with the exception that the items in the resulting list are Chars
81
+ # instances instead of String. This makes chaining methods easier.
82
+ def split(*args)
83
+ @string.split(*args).map { |i| i.chars }
84
+ end
85
+
86
+ # Gsub works exactly the same as gsub on a normal string.
87
+ def gsub(*a, &b); @string.gsub(*a, &b).chars; end
88
+
89
+ # Like String.=~ only it returns the character offset (in codepoints) instead of the byte offset.
90
+ def =~(other)
91
+ handler.translate_offset(@string, @string =~ other)
92
+ end
93
+
94
+ # Try to forward all undefined methods to the handler, when a method is not defined on the handler, send it to
95
+ # the contained string. Method_missing is also responsible for making the bang! methods destructive.
96
+ def method_missing(m, *a, &b)
97
+ begin
98
+ # Simulate methods with a ! at the end because we can't touch the enclosed string from the handlers.
99
+ if m.to_s =~ /^(.*)\!$/
100
+ result = handler.send($1, @string, *a, &b)
101
+ if result == @string
102
+ result = nil
103
+ else
104
+ @string.replace result
105
+ end
106
+ else
107
+ result = handler.send(m, @string, *a, &b)
108
+ end
109
+ rescue NoMethodError
110
+ result = @string.send(m, *a, &b)
111
+ rescue Handlers::EncodingError
112
+ @string.replace handler.tidy_bytes(@string)
113
+ retry
114
+ end
115
+
116
+ if result.kind_of?(String)
117
+ result.chars
118
+ else
119
+ result
120
+ end
121
+ end
122
+
123
+ # Set the handler class for the Char objects.
124
+ def self.handler=(klass)
125
+ @@handler = klass
126
+ end
127
+
128
+ # Returns the proper handler for the contained string depending on $KCODE and the encoding of the string. This
129
+ # method is used internally to always redirect messages to the proper classes depending on the context.
130
+ def handler
131
+ if utf8_pragma?
132
+ @@handler
133
+ else
134
+ ActiveSupport::Multibyte::Handlers::PassthruHandler
135
+ end
136
+ end
137
+
138
+ private
139
+
140
+ # +utf8_pragma+ checks if it can send this string to the handlers. It makes sure @string isn't nil and $KCODE is
141
+ # set to 'UTF8'.
142
+ def utf8_pragma?
143
+ !@string.nil? && ($KCODE == 'UTF8')
144
+ end
145
+ end
146
+ end
147
+
148
+ # When we can load the utf8proc library, override normalization with the faster methods
149
+ begin
150
+ require 'utf8proc_native'
151
+ require File.join(File.dirname(__FILE__), "handlers", "utf8_handler_proc")
152
+ ActiveSupport::Multibyte::Chars.handler = ActiveSupport::Multibyte::Handlers::UTF8HandlerProc
153
+ rescue LoadError
154
+ ActiveSupport::Multibyte::Chars.handler = ActiveSupport::Multibyte::Handlers::UTF8Handler
155
+ end
@@ -0,0 +1,149 @@
1
+ #!/usr/bin/env ruby
2
+ #begin
3
+ # require File.dirname(__FILE__) + '/../../../active_support'
4
+ #rescue IOError
5
+ #end
6
+ require 'open-uri'
7
+ require 'tmpdir'
8
+
9
+ module ActiveSupport::Multibyte::Handlers #:nodoc:
10
+ class UnicodeDatabase #:nodoc:
11
+ def self.load
12
+ [Hash.new(Codepoint.new),[],{},{}]
13
+ end
14
+ end
15
+
16
+ class UnicodeTableGenerator #:nodoc:
17
+ BASE_URI = "http://www.unicode.org/Public/#{ActiveSupport::Multibyte::UNICODE_VERSION}/ucd/"
18
+ SOURCES = {
19
+ :codepoints => BASE_URI + 'UnicodeData.txt',
20
+ :composition_exclusion => BASE_URI + 'CompositionExclusions.txt',
21
+ :grapheme_break_property => BASE_URI + 'auxiliary/GraphemeBreakProperty.txt',
22
+ :cp1252 => 'http://unicode.org/Public/MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1252.TXT'
23
+ }
24
+
25
+ def initialize
26
+ @ucd = UnicodeDatabase.new
27
+
28
+ default = Codepoint.new
29
+ default.combining_class = 0
30
+ default.uppercase_mapping = 0
31
+ default.lowercase_mapping = 0
32
+ @ucd.codepoints = Hash.new(default)
33
+
34
+ @ucd.composition_exclusion = []
35
+ @ucd.composition_map = {}
36
+ @ucd.boundary = {}
37
+ @ucd.cp1252 = {}
38
+ end
39
+
40
+ def parse_codepoints(line)
41
+ codepoint = Codepoint.new
42
+ raise "Could not parse input." unless line =~ /^
43
+ ([0-9A-F]+); # code
44
+ ([^;]+); # name
45
+ ([A-Z]+); # general category
46
+ ([0-9]+); # canonical combining class
47
+ ([A-Z]+); # bidi class
48
+ (<([A-Z]*)>)? # decomposition type
49
+ ((\ ?[0-9A-F]+)*); # decompomposition mapping
50
+ ([0-9]*); # decimal digit
51
+ ([0-9]*); # digit
52
+ ([^;]*); # numeric
53
+ ([YN]*); # bidi mirrored
54
+ ([^;]*); # unicode 1.0 name
55
+ ([^;]*); # iso comment
56
+ ([0-9A-F]*); # simple uppercase mapping
57
+ ([0-9A-F]*); # simple lowercase mapping
58
+ ([0-9A-F]*)$/ix # simple titlecase mapping
59
+ codepoint.code = $1.hex
60
+ #codepoint.name = $2
61
+ #codepoint.category = $3
62
+ codepoint.combining_class = Integer($4)
63
+ #codepoint.bidi_class = $5
64
+ codepoint.decomp_type = $7
65
+ codepoint.decomp_mapping = ($8=='') ? nil : $8.split.collect { |element| element.hex }
66
+ #codepoint.bidi_mirrored = ($13=='Y') ? true : false
67
+ codepoint.uppercase_mapping = ($16=='') ? 0 : $16.hex
68
+ codepoint.lowercase_mapping = ($17=='') ? 0 : $17.hex
69
+ #codepoint.titlecase_mapping = ($18=='') ? nil : $18.hex
70
+ @ucd.codepoints[codepoint.code] = codepoint
71
+ end
72
+
73
+ def parse_grapheme_break_property(line)
74
+ if line =~ /^([0-9A-F\.]+)\s*;\s*([\w]+)\s*#/
75
+ type = $2.downcase.intern
76
+ @ucd.boundary[type] ||= []
77
+ if $1.include? '..'
78
+ parts = $1.split '..'
79
+ @ucd.boundary[type] << (parts[0].hex..parts[1].hex)
80
+ else
81
+ @ucd.boundary[type] << $1.hex
82
+ end
83
+ end
84
+ end
85
+
86
+ def parse_composition_exclusion(line)
87
+ if line =~ /^([0-9A-F]+)/i
88
+ @ucd.composition_exclusion << $1.hex
89
+ end
90
+ end
91
+
92
+ def parse_cp1252(line)
93
+ if line =~ /^([0-9A-Fx]+)\s([0-9A-Fx]+)/i
94
+ @ucd.cp1252[$1.hex] = $2.hex
95
+ end
96
+ end
97
+
98
+ def create_composition_map
99
+ @ucd.codepoints.each do |_, cp|
100
+ if !cp.nil? and cp.combining_class == 0 and cp.decomp_type.nil? and !cp.decomp_mapping.nil? and cp.decomp_mapping.length == 2 and @ucd[cp.decomp_mapping[0]].combining_class == 0 and !@ucd.composition_exclusion.include?(cp.code)
101
+ @ucd.composition_map[cp.decomp_mapping[0]] ||= {}
102
+ @ucd.composition_map[cp.decomp_mapping[0]][cp.decomp_mapping[1]] = cp.code
103
+ end
104
+ end
105
+ end
106
+
107
+ def normalize_boundary_map
108
+ @ucd.boundary.each do |k,v|
109
+ if [:lf, :cr].include? k
110
+ @ucd.boundary[k] = v[0]
111
+ end
112
+ end
113
+ end
114
+
115
+ def parse
116
+ SOURCES.each do |type, url|
117
+ filename = File.join(Dir.tmpdir, "#{url.split('/').last}")
118
+ unless File.exist?(filename)
119
+ $stderr.puts "Downloading #{url.split('/').last}"
120
+ File.open(filename, 'wb') do |target|
121
+ open(url) do |source|
122
+ source.each_line { |line| target.write line }
123
+ end
124
+ end
125
+ end
126
+ File.open(filename) do |file|
127
+ file.each_line { |line| send "parse_#{type}".intern, line }
128
+ end
129
+ end
130
+ create_composition_map
131
+ normalize_boundary_map
132
+ end
133
+
134
+ def dump_to(filename)
135
+ File.open(filename, 'wb') do |f|
136
+ f.write Marshal.dump([@ucd.codepoints, @ucd.composition_exclusion, @ucd.composition_map, @ucd.boundary, @ucd.cp1252])
137
+ end
138
+ end
139
+ end
140
+ end
141
+
142
+ if __FILE__ == $0
143
+ filename = ActiveSupport::Multibyte::Handlers::UnicodeDatabase.filename
144
+ generator = ActiveSupport::Multibyte::Handlers::UnicodeTableGenerator.new
145
+ generator.parse
146
+ print "Writing to: #{filename}"
147
+ generator.dump_to filename
148
+ puts " (#{File.size(filename)} bytes)"
149
+ end
@@ -0,0 +1,9 @@
1
+ # Chars uses this handler when $KCODE is not set to 'UTF8'. Because this handler doesn't define any methods all call
2
+ # will be forwarded to String.
3
+ class ActiveSupport::Multibyte::Handlers::PassthruHandler
4
+
5
+ # Return the original byteoffset
6
+ def self.translate_offset(string, byte_offset) #:nodoc:
7
+ byte_offset
8
+ end
9
+ end
@@ -0,0 +1,442 @@
1
+ # Contains all the handlers and helper classes
2
+ module ActiveSupport::Multibyte::Handlers
3
+ class EncodingError < ArgumentError; end
4
+
5
+ class Codepoint #:nodoc:
6
+ attr_accessor :code, :combining_class, :decomp_type, :decomp_mapping, :uppercase_mapping, :lowercase_mapping
7
+ end
8
+
9
+ class UnicodeDatabase #:nodoc:
10
+ attr_accessor :codepoints, :composition_exclusion, :composition_map, :boundary, :cp1252
11
+
12
+ # Creates a new UnicodeDatabase instance and loads the database.
13
+ def initialize
14
+ begin
15
+ @codepoints, @composition_exclusion, @composition_map, @boundary, @cp1252 = self.class.load
16
+ rescue Exception => e
17
+ raise IOError.new("Couldn't load the unicode tables for UTF8Handler (#{e.message}), handler is unusable")
18
+ end
19
+ @codepoints ||= Hash.new(Codepoint.new)
20
+ @composition_exclusion ||= []
21
+ @composition_map ||= {}
22
+ @boundary ||= {}
23
+ @cp1252 ||= {}
24
+
25
+ # Redefine the === method so we can write shorter rules for grapheme cluster breaks
26
+ @boundary.each do |k,_|
27
+ @boundary[k].instance_eval do
28
+ def ===(other)
29
+ detect { |i| i === other } ? true : false
30
+ end
31
+ end if @boundary[k].kind_of?(Array)
32
+ end
33
+ end
34
+
35
+ # Shortcut to ucd.codepoints[]
36
+ def [](index); @codepoints[index]; end
37
+
38
+ # Returns the directory in which the data files are stored
39
+ def self.dirname
40
+ File.dirname(__FILE__) + '/../../values/'
41
+ end
42
+
43
+ # Returns the filename for the data file for this version
44
+ def self.filename
45
+ File.expand_path File.join(dirname, "unicode_tables.dat")
46
+ end
47
+
48
+ # Loads the unicode database and returns all the internal objects of UnicodeDatabase
49
+ def self.load
50
+ File.open(self.filename, 'rb') { |f| Marshal.load f.read }
51
+ end
52
+ end
53
+
54
+ # UTF8Handler implements Unicode aware operations for strings, these operations will be used by the Chars
55
+ # proxy when $KCODE is set to 'UTF8'.
56
+ class UTF8Handler
57
+ # Hangul character boundaries and properties
58
+ HANGUL_SBASE = 0xAC00
59
+ HANGUL_LBASE = 0x1100
60
+ HANGUL_VBASE = 0x1161
61
+ HANGUL_TBASE = 0x11A7
62
+ HANGUL_LCOUNT = 19
63
+ HANGUL_VCOUNT = 21
64
+ HANGUL_TCOUNT = 28
65
+ HANGUL_NCOUNT = HANGUL_VCOUNT * HANGUL_TCOUNT
66
+ HANGUL_SCOUNT = 11172
67
+ HANGUL_SLAST = HANGUL_SBASE + HANGUL_SCOUNT
68
+ HANGUL_JAMO_FIRST = 0x1100
69
+ HANGUL_JAMO_LAST = 0x11FF
70
+
71
+ # All the unicode whitespace
72
+ UNICODE_WHITESPACE = [
73
+ (0x0009..0x000D).to_a, # White_Space # Cc [5] <control-0009>..<control-000D>
74
+ 0x0020, # White_Space # Zs SPACE
75
+ 0x0085, # White_Space # Cc <control-0085>
76
+ 0x00A0, # White_Space # Zs NO-BREAK SPACE
77
+ 0x1680, # White_Space # Zs OGHAM SPACE MARK
78
+ 0x180E, # White_Space # Zs MONGOLIAN VOWEL SEPARATOR
79
+ (0x2000..0x200A).to_a, # White_Space # Zs [11] EN QUAD..HAIR SPACE
80
+ 0x2028, # White_Space # Zl LINE SEPARATOR
81
+ 0x2029, # White_Space # Zp PARAGRAPH SEPARATOR
82
+ 0x202F, # White_Space # Zs NARROW NO-BREAK SPACE
83
+ 0x205F, # White_Space # Zs MEDIUM MATHEMATICAL SPACE
84
+ 0x3000, # White_Space # Zs IDEOGRAPHIC SPACE
85
+ ].flatten.freeze
86
+
87
+ # BOM (byte order mark) can also be seen as whitespace, it's a non-rendering character used to distinguish
88
+ # between little and big endian. This is not an issue in utf-8, so it must be ignored.
89
+ UNICODE_LEADERS_AND_TRAILERS = UNICODE_WHITESPACE + [65279] # ZERO-WIDTH NO-BREAK SPACE aka BOM
90
+
91
+ # Borrowed from the Kconv library by Shinji KONO - (also as seen on the W3C site)
92
+ UTF8_PAT = /\A(?:
93
+ [\x00-\x7f] |
94
+ [\xc2-\xdf] [\x80-\xbf] |
95
+ \xe0 [\xa0-\xbf] [\x80-\xbf] |
96
+ [\xe1-\xef] [\x80-\xbf] [\x80-\xbf] |
97
+ \xf0 [\x90-\xbf] [\x80-\xbf] [\x80-\xbf] |
98
+ [\xf1-\xf3] [\x80-\xbf] [\x80-\xbf] [\x80-\xbf] |
99
+ \xf4 [\x80-\x8f] [\x80-\xbf] [\x80-\xbf]
100
+ )*\z/xn
101
+
102
+ # Returns a regular expression pattern that matches the passed Unicode codepoints
103
+ def self.codepoints_to_pattern(array_of_codepoints) #:nodoc:
104
+ array_of_codepoints.collect{ |e| [e].pack 'U*' }.join('|')
105
+ end
106
+ UNICODE_TRAILERS_PAT = /(#{codepoints_to_pattern(UNICODE_LEADERS_AND_TRAILERS)})+\Z/
107
+ UNICODE_LEADERS_PAT = /\A(#{codepoints_to_pattern(UNICODE_LEADERS_AND_TRAILERS)})+/
108
+
109
+ class << self
110
+
111
+ # ///
112
+ # /// BEGIN String method overrides
113
+ # ///
114
+
115
+ # Inserts the passed string at specified codepoint offsets
116
+ def insert(str, offset, fragment)
117
+ str.replace(
118
+ u_unpack(str).insert(
119
+ offset,
120
+ u_unpack(fragment)
121
+ ).flatten.pack('U*')
122
+ )
123
+ end
124
+
125
+ # Returns the position of the passed argument in the string, counting in codepoints
126
+ def index(str, *args)
127
+ bidx = str.index(*args)
128
+ bidx ? (u_unpack(str.slice(0...bidx)).size) : nil
129
+ end
130
+
131
+ # Does Unicode-aware rstrip
132
+ def rstrip(str)
133
+ str.gsub(UNICODE_TRAILERS_PAT, '')
134
+ end
135
+
136
+ # Does Unicode-aware lstrip
137
+ def lstrip(str)
138
+ str.gsub(UNICODE_LEADERS_PAT, '')
139
+ end
140
+
141
+ # Removed leading and trailing whitespace
142
+ def strip(str)
143
+ str.gsub(UNICODE_LEADERS_PAT, '').gsub(UNICODE_TRAILERS_PAT, '')
144
+ end
145
+
146
+ # Returns the number of codepoints in the string
147
+ def size(str)
148
+ u_unpack(str).size
149
+ end
150
+ alias_method :length, :size
151
+
152
+ # Reverses codepoints in the string.
153
+ def reverse(str)
154
+ u_unpack(str).reverse.pack('U*')
155
+ end
156
+
157
+ # Implements Unicode-aware slice with codepoints. Slicing on one point returns the codepoints for that
158
+ # character.
159
+ def slice(str, *args)
160
+ if (args.size == 2 && args.first.is_a?(Range))
161
+ raise TypeError, 'cannot convert Range into Integer' # Do as if we were native
162
+ elsif args[0].kind_of? Range
163
+ cps = u_unpack(str).slice(*args)
164
+ cps.nil? ? nil : cps.pack('U*')
165
+ elsif args.size == 1 && args[0].kind_of?(Numeric)
166
+ u_unpack(str)[args[0]]
167
+ else
168
+ u_unpack(str).slice(*args).pack('U*')
169
+ end
170
+ end
171
+ alias_method :[], :slice
172
+
173
+ # Convert characters in the string to uppercase
174
+ def upcase(str); to_case :uppercase_mapping, str; end
175
+
176
+ # Convert characters in the string to lowercase
177
+ def downcase(str); to_case :lowercase_mapping, str; end
178
+
179
+ # Returns a copy of +str+ with the first character converted to uppercase and the remainder to lowercase
180
+ def capitalize(str)
181
+ upcase(slice(str, 0..0)) + downcase(slice(str, 1..-1) || '')
182
+ end
183
+
184
+ # ///
185
+ # /// Extra String methods for unicode operations
186
+ # ///
187
+
188
+ # Returns the KC normalization of the string by default. NFKC is considered the best normalization form for
189
+ # passing strings to databases and validations.
190
+ #
191
+ # * <tt>str</tt>: The string to perform normalization on.
192
+ # * <tt>form</tt>: The form you want to normalize in. Should be one of the following: :c, :kc, :d or :kd.
193
+ def normalize(str, form=ActiveSupport::Multibyte::DEFAULT_NORMALIZATION_FORM)
194
+ # See http://www.unicode.org/reports/tr15, Table 1
195
+ codepoints = u_unpack(str)
196
+ case form
197
+ when :d
198
+ reorder_characters(decompose_codepoints(:canonical, codepoints))
199
+ when :c
200
+ compose_codepoints reorder_characters(decompose_codepoints(:canonical, codepoints))
201
+ when :kd
202
+ reorder_characters(decompose_codepoints(:compatability, codepoints))
203
+ when :kc
204
+ compose_codepoints reorder_characters(decompose_codepoints(:compatability, codepoints))
205
+ else
206
+ raise ArgumentError, "#{form} is not a valid normalization variant", caller
207
+ end.pack('U*')
208
+ end
209
+
210
+ # Perform decomposition on the characters in the string
211
+ def decompose(str)
212
+ decompose_codepoints(:canonical, u_unpack(str)).pack('U*')
213
+ end
214
+
215
+ # Perform composition on the characters in the string
216
+ def compose(str)
217
+ compose_codepoints u_unpack(str).pack('U*')
218
+ end
219
+
220
+ # ///
221
+ # /// BEGIN Helper methods for unicode operation
222
+ # ///
223
+
224
+ # Used to translate an offset from bytes to characters, for instance one received from a regular expression match
225
+ def translate_offset(str, byte_offset)
226
+ return 0 if str == ''
227
+ return nil if byte_offset.nil?
228
+ chunk = str[0..byte_offset]
229
+ begin
230
+ begin
231
+ chunk.unpack('U*').length - 1
232
+ rescue ArgumentError => e
233
+ chunk = str[0..(byte_offset+=1)]
234
+ # Stop retrying at the end of the string
235
+ raise e unless byte_offset < chunk.length
236
+ # We damaged a character, retry
237
+ retry
238
+ end
239
+ # Catch the ArgumentError so we can throw our own
240
+ rescue ArgumentError
241
+ raise EncodingError.new('malformed UTF-8 character')
242
+ end
243
+ end
244
+
245
+ # Checks if the string is valid UTF8.
246
+ def consumes?(str)
247
+ # Unpack is a little bit faster than regular expressions
248
+ begin
249
+ str.unpack('U*')
250
+ true
251
+ rescue ArgumentError
252
+ false
253
+ end
254
+ end
255
+
256
+ # Returns the number of grapheme clusters in the string. This method is very likely to be moved or renamed
257
+ # in future versions.
258
+ def g_length(str)
259
+ g_unpack(str).length
260
+ end
261
+
262
+ # Replaces all the non-utf-8 bytes by their iso-8859-1 or cp1252 equivalent resulting in a valid utf-8 string
263
+ def tidy_bytes(str)
264
+ str.split(//u).map do |c|
265
+ if !UTF8_PAT.match(c)
266
+ n = c.unpack('C')[0]
267
+ n < 128 ? n.chr :
268
+ n < 160 ? [UCD.cp1252[n] || n].pack('U') :
269
+ n < 192 ? "\xC2" + n.chr : "\xC3" + (n-64).chr
270
+ else
271
+ c
272
+ end
273
+ end.join
274
+ end
275
+
276
+ protected
277
+
278
+ # Detect whether the codepoint is in a certain character class. Primarily used by the
279
+ # grapheme cluster support.
280
+ def in_char_class?(codepoint, classes)
281
+ classes.detect { |c| UCD.boundary[c] === codepoint } ? true : false
282
+ end
283
+
284
+ # Unpack the string at codepoints boundaries
285
+ def u_unpack(str)
286
+ begin
287
+ str.unpack 'U*'
288
+ rescue ArgumentError
289
+ raise EncodingError.new('malformed UTF-8 character')
290
+ end
291
+ end
292
+
293
+ # Unpack the string at grapheme boundaries instead of codepoint boundaries
294
+ def g_unpack(str)
295
+ codepoints = u_unpack(str)
296
+ unpacked = []
297
+ pos = 0
298
+ marker = 0
299
+ eoc = codepoints.length
300
+ while(pos < eoc)
301
+ pos += 1
302
+ previous = codepoints[pos-1]
303
+ current = codepoints[pos]
304
+ if (
305
+ # CR X LF
306
+ one = ( previous == UCD.boundary[:cr] and current == UCD.boundary[:lf] ) or
307
+ # L X (L|V|LV|LVT)
308
+ two = ( UCD.boundary[:l] === previous and in_char_class?(current, [:l,:v,:lv,:lvt]) ) or
309
+ # (LV|V) X (V|T)
310
+ three = ( in_char_class?(previous, [:lv,:v]) and in_char_class?(current, [:v,:t]) ) or
311
+ # (LVT|T) X (T)
312
+ four = ( in_char_class?(previous, [:lvt,:t]) and UCD.boundary[:t] === current ) or
313
+ # X Extend
314
+ five = (UCD.boundary[:extend] === current)
315
+ )
316
+ else
317
+ unpacked << codepoints[marker..pos-1]
318
+ marker = pos
319
+ end
320
+ end
321
+ unpacked
322
+ end
323
+
324
+ # Reverse operation of g_unpack
325
+ def g_pack(unpacked)
326
+ unpacked.flatten
327
+ end
328
+
329
+ # Convert characters to a different case
330
+ def to_case(way, str)
331
+ u_unpack(str).map do |codepoint|
332
+ cp = UCD[codepoint]
333
+ unless cp.nil?
334
+ ncp = cp.send(way)
335
+ ncp > 0 ? ncp : codepoint
336
+ else
337
+ codepoint
338
+ end
339
+ end.pack('U*')
340
+ end
341
+
342
+ # Re-order codepoints so the string becomes canonical
343
+ def reorder_characters(codepoints)
344
+ length = codepoints.length- 1
345
+ pos = 0
346
+ while pos < length do
347
+ cp1, cp2 = UCD[codepoints[pos]], UCD[codepoints[pos+1]]
348
+ if (cp1.combining_class > cp2.combining_class) && (cp2.combining_class > 0)
349
+ codepoints[pos..pos+1] = cp2.code, cp1.code
350
+ pos += (pos > 0 ? -1 : 1)
351
+ else
352
+ pos += 1
353
+ end
354
+ end
355
+ codepoints
356
+ end
357
+
358
+ # Decompose composed characters to the decomposed form
359
+ def decompose_codepoints(type, codepoints)
360
+ codepoints.inject([]) do |decomposed, cp|
361
+ # if it's a hangul syllable starter character
362
+ if HANGUL_SBASE <= cp and cp < HANGUL_SLAST
363
+ sindex = cp - HANGUL_SBASE
364
+ ncp = [] # new codepoints
365
+ ncp << HANGUL_LBASE + sindex / HANGUL_NCOUNT
366
+ ncp << HANGUL_VBASE + (sindex % HANGUL_NCOUNT) / HANGUL_TCOUNT
367
+ tindex = sindex % HANGUL_TCOUNT
368
+ ncp << (HANGUL_TBASE + tindex) unless tindex == 0
369
+ decomposed.concat ncp
370
+ # if the codepoint is decomposable in with the current decomposition type
371
+ elsif (ncp = UCD[cp].decomp_mapping) and (!UCD[cp].decomp_type || type == :compatability)
372
+ decomposed.concat decompose_codepoints(type, ncp.dup)
373
+ else
374
+ decomposed << cp
375
+ end
376
+ end
377
+ end
378
+
379
+ # Compose decomposed characters to the composed form
380
+ def compose_codepoints(codepoints)
381
+ pos = 0
382
+ eoa = codepoints.length - 1
383
+ starter_pos = 0
384
+ starter_char = codepoints[0]
385
+ previous_combining_class = -1
386
+ while pos < eoa
387
+ pos += 1
388
+ lindex = starter_char - HANGUL_LBASE
389
+ # -- Hangul
390
+ if 0 <= lindex and lindex < HANGUL_LCOUNT
391
+ vindex = codepoints[starter_pos+1] - HANGUL_VBASE rescue vindex = -1
392
+ if 0 <= vindex and vindex < HANGUL_VCOUNT
393
+ tindex = codepoints[starter_pos+2] - HANGUL_TBASE rescue tindex = -1
394
+ if 0 <= tindex and tindex < HANGUL_TCOUNT
395
+ j = starter_pos + 2
396
+ eoa -= 2
397
+ else
398
+ tindex = 0
399
+ j = starter_pos + 1
400
+ eoa -= 1
401
+ end
402
+ codepoints[starter_pos..j] = (lindex * HANGUL_VCOUNT + vindex) * HANGUL_TCOUNT + tindex + HANGUL_SBASE
403
+ end
404
+ starter_pos += 1
405
+ starter_char = codepoints[starter_pos]
406
+ # -- Other characters
407
+ else
408
+ current_char = codepoints[pos]
409
+ current = UCD[current_char]
410
+ if current.combining_class > previous_combining_class
411
+ if ref = UCD.composition_map[starter_char]
412
+ composition = ref[current_char]
413
+ else
414
+ composition = nil
415
+ end
416
+ unless composition.nil?
417
+ codepoints[starter_pos] = composition
418
+ starter_char = composition
419
+ codepoints.delete_at pos
420
+ eoa -= 1
421
+ pos -= 1
422
+ previous_combining_class = -1
423
+ else
424
+ previous_combining_class = current.combining_class
425
+ end
426
+ else
427
+ previous_combining_class = current.combining_class
428
+ end
429
+ if current.combining_class == 0
430
+ starter_pos = pos
431
+ starter_char = codepoints[pos]
432
+ end
433
+ end
434
+ end
435
+ codepoints
436
+ end
437
+
438
+ # UniCode Database
439
+ UCD = UnicodeDatabase.new
440
+ end
441
+ end
442
+ end
@@ -0,0 +1,44 @@
1
+ # Methods in this handler call functions in the utf8proc ruby extension. These are significantly faster than the
2
+ # pure ruby versions. Chars automatically uses this handler when it can load the utf8proc extension. For
3
+ # documentation on handler methods see UTF8Handler.
4
+ class ActiveSupport::Multibyte::Handlers::UTF8HandlerProc < ActiveSupport::Multibyte::Handlers::UTF8Handler
5
+
6
+ class << self
7
+ def normalize(str, form=ActiveSupport::Multibyte::DEFAULT_NORMALIZATION_FORM) #:nodoc:
8
+ codepoints = str.unpack('U*')
9
+ case form
10
+ when :d
11
+ utf8map(str, :stable)
12
+ when :c
13
+ utf8map(str, :stable, :compose)
14
+ when :kd
15
+ utf8map(str, :stable, :compat)
16
+ when :kc
17
+ utf8map(str, :stable, :compose, :compat)
18
+ else
19
+ raise ArgumentError, "#{form} is not a valid normalization variant", caller
20
+ end
21
+ end
22
+
23
+ def decompose(str) #:nodoc:
24
+ utf8map(str, :stable)
25
+ end
26
+
27
+ def downcase(str) #:nodoc:c
28
+ utf8map(str, :casefold)
29
+ end
30
+
31
+ protected
32
+
33
+ def utf8map(str, *option_array) #:nodoc:
34
+ options = 0
35
+ option_array.each do |option|
36
+ flag = Utf8Proc::Options[option]
37
+ raise ArgumentError, "Unknown argument given to utf8map." unless
38
+ flag
39
+ options |= flag
40
+ end
41
+ return Utf8Proc::utf8map(str, options)
42
+ end
43
+ end
44
+ end
@@ -0,0 +1,9 @@
1
+ module Unicodechar #:nodoc:
2
+ module VERSION #:nodoc:
3
+ MAJOR = 0
4
+ MINOR = 0
5
+ TINY = 1
6
+
7
+ STRING = [MAJOR, MINOR, TINY].join('.')
8
+ end
9
+ end
@@ -0,0 +1,2 @@
1
+ require 'test/unit'
2
+ require File.dirname(__FILE__) + '/../lib/unicodechars'
@@ -0,0 +1,11 @@
1
+ require File.dirname(__FILE__) + '/test_helper.rb'
2
+
3
+ class UnicodecharTest < Test::Unit::TestCase
4
+
5
+ def setup
6
+ end
7
+
8
+ def test_truth
9
+ assert true
10
+ end
11
+ end
metadata ADDED
@@ -0,0 +1,75 @@
1
+ --- !ruby/object:Gem::Specification
2
+ rubygems_version: 0.9.0
3
+ specification_version: 1
4
+ name: unicodechars
5
+ version: !ruby/object:Gem::Version
6
+ version: 0.0.1
7
+ date: 2006-10-29 00:00:00 -04:00
8
+ summary: description of gem
9
+ require_paths:
10
+ - lib
11
+ email: your contact email for bug fixes and info
12
+ homepage: http://unicodechars.rubyforge.org
13
+ rubyforge_project: unicodechars
14
+ description: description of gem
15
+ autorequire: unicodechars
16
+ default_executable:
17
+ bindir: bin
18
+ has_rdoc: true
19
+ required_ruby_version: !ruby/object:Gem::Version::Requirement
20
+ requirements:
21
+ - - ">"
22
+ - !ruby/object:Gem::Version
23
+ version: 0.0.0
24
+ version:
25
+ platform: ruby
26
+ signing_key:
27
+ cert_chain:
28
+ post_install_message:
29
+ authors:
30
+ - yehudakatz
31
+ files:
32
+ - README
33
+ - CHANGELOG
34
+ - Rakefile
35
+ - test/test_helper.rb
36
+ - test/unicodechars_test.rb
37
+ - lib/unicodechars
38
+ - lib/unicodechars.rb
39
+ - lib/unicodechars/multibyte
40
+ - lib/unicodechars/multibyte.rb
41
+ - lib/unicodechars/values
42
+ - lib/unicodechars/version.rb
43
+ - lib/unicodechars/multibyte/chars.rb
44
+ - lib/unicodechars/multibyte/generators
45
+ - lib/unicodechars/multibyte/handlers
46
+ - lib/unicodechars/multibyte/generators/generate_tables.rb
47
+ - lib/unicodechars/multibyte/handlers/passthru_handler.rb
48
+ - lib/unicodechars/multibyte/handlers/utf8_handler.rb
49
+ - lib/unicodechars/multibyte/handlers/utf8_handler_proc.rb
50
+ - lib/unicodechars/values/unicode_tables.dat
51
+ test_files: []
52
+
53
+ rdoc_options:
54
+ - --quiet
55
+ - --title
56
+ - unicodechars documentation
57
+ - --opname
58
+ - index.html
59
+ - --line-numbers
60
+ - --main
61
+ - README
62
+ - --inline-source
63
+ - --exclude
64
+ - ^(examples|extras)/
65
+ extra_rdoc_files:
66
+ - README
67
+ - CHANGELOG
68
+ executables: []
69
+
70
+ extensions: []
71
+
72
+ requirements: []
73
+
74
+ dependencies: []
75
+