wikitext 0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,132 @@
1
+ #!/usr/bin/env ruby
2
+ # Copyright 2007-2008 Wincent Colaiuta
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU General Public License as published by
5
+ # the Free Software Foundation, either version 3 of the License, or
6
+ # (at your option) any later version.
7
+ #
8
+ # This program is distributed in the hope that it will be useful,
9
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
10
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
+ # GNU General Public License for more details.
12
+ #
13
+ # You should have received a copy of the GNU General Public License
14
+ # along with this program. If not, see <http://www.gnu.org/licenses/>.
15
+
16
+ require File.join(File.dirname(__FILE__), 'spec_helper.rb')
17
+ require 'wikitext'
18
+ require 'uri'
19
+
20
+ describe Wikitext, 'encoding a link target' do
21
+ it 'should complain if passed nil' do
22
+ lambda { Wikitext::Parser.encode_link_target(nil) }.should raise_error
23
+ end
24
+
25
+ it 'should do nothing on zero-length input' do
26
+ Wikitext::Parser.encode_link_target('').should == ''
27
+ end
28
+
29
+ it 'should convert embedded spaces into "%20"' do
30
+ Wikitext::Parser.encode_link_target('hello world').should == 'hello%20world'
31
+ end
32
+
33
+ it 'should eat leading spaces' do
34
+ Wikitext::Parser.encode_link_target(' hello world').should == 'hello%20world'
35
+ Wikitext::Parser.encode_link_target(' hello world').should == 'hello%20world'
36
+ Wikitext::Parser.encode_link_target(' hello world').should == 'hello%20world'
37
+ Wikitext::Parser.encode_link_target(' hello world').should == 'hello%20world'
38
+ Wikitext::Parser.encode_link_target(' hello world').should == 'hello%20world'
39
+ Wikitext::Parser.encode_link_target(' hello world').should == 'hello%20world'
40
+ end
41
+
42
+ it 'should eat trailing spaces' do
43
+ Wikitext::Parser.encode_link_target('hello world ').should == 'hello%20world'
44
+ Wikitext::Parser.encode_link_target('hello world ').should == 'hello%20world'
45
+ Wikitext::Parser.encode_link_target('hello world ').should == 'hello%20world'
46
+ Wikitext::Parser.encode_link_target('hello world ').should == 'hello%20world'
47
+ Wikitext::Parser.encode_link_target('hello world ').should == 'hello%20world'
48
+ Wikitext::Parser.encode_link_target('hello world ').should == 'hello%20world'
49
+ end
50
+
51
+ it 'should eat leading and trailing spaces combined' do
52
+ Wikitext::Parser.encode_link_target(' hello world ').should == 'hello%20world'
53
+ Wikitext::Parser.encode_link_target(' hello world ').should == 'hello%20world'
54
+ Wikitext::Parser.encode_link_target(' hello world ').should == 'hello%20world'
55
+ Wikitext::Parser.encode_link_target(' hello world ').should == 'hello%20world'
56
+ Wikitext::Parser.encode_link_target(' hello world ').should == 'hello%20world'
57
+ Wikitext::Parser.encode_link_target(' hello world ').should == 'hello%20world'
58
+ end
59
+
60
+ it 'should return nothing for input consisting entirely of spaces' do
61
+ Wikitext::Parser.encode_link_target(' ').should == ''
62
+ Wikitext::Parser.encode_link_target(' ').should == ''
63
+ Wikitext::Parser.encode_link_target(' ').should == ''
64
+ Wikitext::Parser.encode_link_target(' ').should == ''
65
+ Wikitext::Parser.encode_link_target(' ').should == ''
66
+ Wikitext::Parser.encode_link_target(' ').should == ''
67
+ end
68
+
69
+ it 'should convert reserved symbols into percent escapes' do
70
+ Wikitext::Parser.encode_link_target('http://www.apple.com/q?foo').should == 'http%3a%2f%2fwww.apple.com%2fq%3ffoo'
71
+ end
72
+
73
+ it 'should convert non-ASCII into UTF-8 and then apply percent escapes' do
74
+ Wikitext::Parser.encode_link_target('cañon').should == 'ca%c3%b1on'
75
+ end
76
+
77
+ it 'should handle mixed scenarios (commas, double-quotes and UTF-8)' do
78
+ Wikitext::Parser.encode_link_target('foo, "bar" & baz €').should == 'foo%2c%20%22bar%22%20%26%20baz%20%e2%82%ac'
79
+ end
80
+
81
+ it 'should get the same answer as URI.escape' do
82
+ reserved = Regexp.new("[^#{URI::PATTERN::UNRESERVED}]")
83
+ ['foo bar', 'http://www.google.com/search?q=hello&foo=bar', '€'].each do |string|
84
+ Wikitext::Parser.encode_link_target(string).should == URI.escape(string, reserved).downcase
85
+ end
86
+ end
87
+
88
+ # "special" links don't get transformed in any way
89
+ describe 'special links' do
90
+ it 'should recognize links which match /\A[a-z]+\/\d+\z/ as being special' do
91
+ string = 'foo/10'
92
+ Wikitext::Parser.encode_special_link_target(string).should == string
93
+ Wikitext::Parser.encode_link_target(string).should_not == string
94
+ end
95
+
96
+ it "should not recognize links which don't match at /\A/ as being special" do
97
+ string = ' foo/10'
98
+ Wikitext::Parser.encode_special_link_target(string).should_not == string
99
+ string = '..foo/10'
100
+ Wikitext::Parser.encode_special_link_target(string).should_not == string
101
+ string = '12foo/10'
102
+ Wikitext::Parser.encode_special_link_target(string).should_not == string
103
+ end
104
+
105
+ it "should not recognize links which don't match at /\z/ as being special" do
106
+ string = 'foo/10 '
107
+ Wikitext::Parser.encode_special_link_target(string).should_not == string
108
+ string = 'foo/10__'
109
+ Wikitext::Parser.encode_special_link_target(string).should_not == string
110
+ string = 'foo/10##'
111
+ Wikitext::Parser.encode_special_link_target(string).should_not == string
112
+ string = 'foo/10ab'
113
+ Wikitext::Parser.encode_special_link_target(string).should_not == string
114
+ end
115
+
116
+ it "should not recognize links which don't match at /[a-z]/ (case differences) as being special" do
117
+ string = 'FOO/10'
118
+ Wikitext::Parser.encode_special_link_target(string).should_not == string
119
+ end
120
+
121
+ it "should not recognize links which don't match at /[0-9]/ (case differences) as being special" do
122
+ string = 'foo/xx'
123
+ Wikitext::Parser.encode_special_link_target(string).should_not == string
124
+ end
125
+
126
+ it "should not recognize links which don't match at /\// as being special" do
127
+ string = 'foo 10'
128
+ Wikitext::Parser.encode_special_link_target(string).should_not == string
129
+ end
130
+ end
131
+ end
132
+
@@ -0,0 +1,228 @@
1
+ #!/usr/bin/env ruby
2
+ # Copyright 2007-2008 Wincent Colaiuta
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU General Public License as published by
5
+ # the Free Software Foundation, either version 3 of the License, or
6
+ # (at your option) any later version.
7
+ #
8
+ # This program is distributed in the hope that it will be useful,
9
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
10
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
+ # GNU General Public License for more details.
12
+ #
13
+ # You should have received a copy of the GNU General Public License
14
+ # along with this program. If not, see <http://www.gnu.org/licenses/>.
15
+
16
+ require File.join(File.dirname(__FILE__), 'spec_helper.rb')
17
+ require 'wikitext'
18
+
19
+ describe Wikitext, 'sanitizing a link target' do
20
+ it 'should complain if passed nil' do
21
+ lambda { Wikitext::Parser.sanitize_link_target(nil) }.should raise_error
22
+ end
23
+
24
+ it 'should complain if passed <' do
25
+ lambda { Wikitext::Parser.sanitize_link_target('<') }.should raise_error(RangeError, /</)
26
+ end
27
+
28
+ it 'should complain if passed >' do
29
+ lambda { Wikitext::Parser.sanitize_link_target('>') }.should raise_error(RangeError, />/)
30
+ end
31
+
32
+ it 'should do nothing on zero-length input' do
33
+ Wikitext::Parser.sanitize_link_target('').should == ''
34
+ end
35
+
36
+ it 'should do nothing to embedded spaces' do
37
+ Wikitext::Parser.sanitize_link_target('hello world').should == 'hello world'
38
+ end
39
+
40
+ it 'should eat leading spaces' do
41
+ Wikitext::Parser.sanitize_link_target(' hello world').should == 'hello world'
42
+ Wikitext::Parser.sanitize_link_target(' hello world').should == 'hello world'
43
+ Wikitext::Parser.sanitize_link_target(' hello world').should == 'hello world'
44
+ Wikitext::Parser.sanitize_link_target(' hello world').should == 'hello world'
45
+ Wikitext::Parser.sanitize_link_target(' hello world').should == 'hello world'
46
+ Wikitext::Parser.sanitize_link_target(' hello world').should == 'hello world'
47
+ end
48
+
49
+ it 'should eat trailing spaces' do
50
+ Wikitext::Parser.sanitize_link_target('hello world ').should == 'hello world'
51
+ Wikitext::Parser.sanitize_link_target('hello world ').should == 'hello world'
52
+ Wikitext::Parser.sanitize_link_target('hello world ').should == 'hello world'
53
+ Wikitext::Parser.sanitize_link_target('hello world ').should == 'hello world'
54
+ Wikitext::Parser.sanitize_link_target('hello world ').should == 'hello world' # was a crasher
55
+ Wikitext::Parser.sanitize_link_target('hello world ').should == 'hello world' # was a crasher
56
+
57
+ # same but with lots of entities to force a reallocation (we were crashing under reallocation)
58
+ expected = '&quot;&quot;&quot;&quot;&quot;&quot;&quot;&quot;&quot;&quot;'
59
+ Wikitext::Parser.sanitize_link_target('"""""""""" ').should == expected
60
+ end
61
+
62
+ it 'should eat leading and trailing spaces combined' do
63
+ Wikitext::Parser.sanitize_link_target(' hello world ').should == 'hello world'
64
+ Wikitext::Parser.sanitize_link_target(' hello world ').should == 'hello world'
65
+ Wikitext::Parser.sanitize_link_target(' hello world ').should == 'hello world'
66
+ Wikitext::Parser.sanitize_link_target(' hello world ').should == 'hello world'
67
+ Wikitext::Parser.sanitize_link_target(' hello world ').should == 'hello world'
68
+ Wikitext::Parser.sanitize_link_target(' hello world ').should == 'hello world'
69
+ end
70
+
71
+ it 'should return nothing for input consisting entirely of spaces' do
72
+ Wikitext::Parser.sanitize_link_target(' ').should == ''
73
+ Wikitext::Parser.sanitize_link_target(' ').should == ''
74
+ Wikitext::Parser.sanitize_link_target(' ').should == ''
75
+ Wikitext::Parser.sanitize_link_target(' ').should == ''
76
+ Wikitext::Parser.sanitize_link_target(' ').should == ''
77
+ Wikitext::Parser.sanitize_link_target(' ').should == ''
78
+ end
79
+
80
+ it 'should convert double quotes into named entities' do
81
+ Wikitext::Parser.sanitize_link_target('hello "world"').should == 'hello &quot;world&quot;'
82
+ end
83
+
84
+ it 'should convert ampersands into named entities' do
85
+ Wikitext::Parser.sanitize_link_target('hello & goodbye').should == 'hello &amp; goodbye'
86
+ end
87
+
88
+ it 'should convert non-ASCII hexadecimal entities' do
89
+ Wikitext::Parser.sanitize_link_target('cañon').should == 'ca&#x00f1;on'
90
+ end
91
+
92
+ it 'should handle mixed scenarios (ampersands, double-quotes and non-ASCII)' do
93
+ Wikitext::Parser.sanitize_link_target('foo, "bar" & baz €').should == 'foo, &quot;bar&quot; &amp; baz &#x20ac;'
94
+ end
95
+
96
+ # here we're exercising the _Wikitext_utf8_to_utf32 function
97
+ describe 'with invalidly encoded input' do
98
+ it 'should raise an exception for missing second byte' do
99
+ lambda {
100
+ Wikitext::Parser.sanitize_link_target(UTF8::Invalid::TWO_BYTES_MISSING_SECOND_BYTE)
101
+ }.should raise_error(Wikitext::Parser::Error, /truncated/)
102
+ lambda {
103
+ Wikitext::Parser.sanitize_link_target('good text' + UTF8::Invalid::TWO_BYTES_MISSING_SECOND_BYTE)
104
+ }.should raise_error(Wikitext::Parser::Error, /truncated/)
105
+ end
106
+
107
+ it 'should raise an exception for malformed second byte' do
108
+ lambda {
109
+ Wikitext::Parser.sanitize_link_target(UTF8::Invalid::TWO_BYTES_MALFORMED_SECOND_BYTE)
110
+ }.should raise_error(Wikitext::Parser::Error, /malformed/)
111
+ lambda {
112
+ Wikitext::Parser.sanitize_link_target('good text' + UTF8::Invalid::TWO_BYTES_MALFORMED_SECOND_BYTE)
113
+ }.should raise_error(Wikitext::Parser::Error, /malformed/)
114
+ end
115
+
116
+ it 'should raise an exception for overlong sequence' do
117
+ lambda {
118
+ Wikitext::Parser.sanitize_link_target(UTF8::Invalid::OVERLONG)
119
+ }.should raise_error(Wikitext::Parser::Error, /overlong/)
120
+ lambda {
121
+ Wikitext::Parser.sanitize_link_target('good text' + UTF8::Invalid::OVERLONG)
122
+ }.should raise_error(Wikitext::Parser::Error, /overlong/)
123
+
124
+ # alternate
125
+ lambda {
126
+ Wikitext::Parser.sanitize_link_target(UTF8::Invalid::OVERLONG_ALT)
127
+ }.should raise_error(Wikitext::Parser::Error, /overlong/)
128
+ lambda {
129
+ Wikitext::Parser.sanitize_link_target('good text' + UTF8::Invalid::OVERLONG_ALT)
130
+ }.should raise_error(Wikitext::Parser::Error, /overlong/)
131
+ end
132
+
133
+ it 'should raise an exception for missing second byte in three-byte sequence' do
134
+ lambda {
135
+ Wikitext::Parser.sanitize_link_target(UTF8::Invalid::THREE_BYTES_MISSING_SECOND_BYTE)
136
+ }.should raise_error(Wikitext::Parser::Error, /truncated/)
137
+ lambda {
138
+ Wikitext::Parser.sanitize_link_target('good text' + UTF8::Invalid::THREE_BYTES_MISSING_SECOND_BYTE)
139
+ }.should raise_error(Wikitext::Parser::Error, /truncated/)
140
+ end
141
+
142
+ it 'should raise an exception for missing third byte in three-byte sequence' do
143
+ lambda {
144
+ Wikitext::Parser.sanitize_link_target(UTF8::Invalid::THREE_BYTES_MISSING_THIRD_BYTE)
145
+ }.should raise_error(Wikitext::Parser::Error, /truncated/)
146
+ lambda {
147
+ Wikitext::Parser.sanitize_link_target('good text' + UTF8::Invalid::THREE_BYTES_MISSING_THIRD_BYTE)
148
+ }.should raise_error(Wikitext::Parser::Error, /truncated/)
149
+ end
150
+
151
+ it 'should raise an exception for malformed second byte in three-byte sequence' do
152
+ lambda {
153
+ Wikitext::Parser.sanitize_link_target(UTF8::Invalid::THREE_BYTES_MALFORMED_SECOND_BYTE)
154
+ }.should raise_error(Wikitext::Parser::Error, /malformed/)
155
+ lambda {
156
+ Wikitext::Parser.sanitize_link_target('good text' + UTF8::Invalid::THREE_BYTES_MALFORMED_SECOND_BYTE)
157
+ }.should raise_error(Wikitext::Parser::Error, /malformed/)
158
+ end
159
+
160
+ it 'should raise an exception for malformed third byte in three-byte sequence' do
161
+ lambda {
162
+ Wikitext::Parser.sanitize_link_target(UTF8::Invalid::THREE_BYTES_MALFORMED_THIRD_BYTE)
163
+ }.should raise_error(Wikitext::Parser::Error, /malformed/)
164
+ lambda {
165
+ Wikitext::Parser.sanitize_link_target('good text' + UTF8::Invalid::THREE_BYTES_MALFORMED_THIRD_BYTE)
166
+ }.should raise_error(Wikitext::Parser::Error, /malformed/)
167
+ end
168
+
169
+ it 'should raise an exception for missing second byte in four-byte sequence' do
170
+ lambda {
171
+ Wikitext::Parser.sanitize_link_target(UTF8::Invalid::FOUR_BYTES_MISSING_SECOND_BYTE)
172
+ }.should raise_error(Wikitext::Parser::Error, /truncated/)
173
+ lambda {
174
+ Wikitext::Parser.sanitize_link_target('good text' + UTF8::Invalid::FOUR_BYTES_MISSING_SECOND_BYTE)
175
+ }.should raise_error(Wikitext::Parser::Error, /truncated/)
176
+ end
177
+
178
+ it 'should raise an exception for missing third byte in four-byte sequence' do
179
+ lambda {
180
+ Wikitext::Parser.sanitize_link_target(UTF8::Invalid::FOUR_BYTES_MISSING_THIRD_BYTE)
181
+ }.should raise_error(Wikitext::Parser::Error, /truncated/)
182
+ lambda {
183
+ Wikitext::Parser.sanitize_link_target('good text' + UTF8::Invalid::FOUR_BYTES_MISSING_THIRD_BYTE)
184
+ }.should raise_error(Wikitext::Parser::Error, /truncated/)
185
+ end
186
+
187
+ it 'should raise an exception for missing fourth byte in four-byte sequence' do
188
+ lambda {
189
+ Wikitext::Parser.sanitize_link_target(UTF8::Invalid::FOUR_BYTES_MISSING_FOURTH_BYTE)
190
+ }.should raise_error(Wikitext::Parser::Error, /truncated/)
191
+ lambda {
192
+ Wikitext::Parser.sanitize_link_target('good text' + UTF8::Invalid::FOUR_BYTES_MISSING_FOURTH_BYTE)
193
+ }.should raise_error(Wikitext::Parser::Error, /truncated/)
194
+ end
195
+
196
+ it 'should raise an exception for illegal first byte in four-byte sequence' do
197
+ lambda {
198
+ Wikitext::Parser.sanitize_link_target(UTF8::Invalid::FOUR_BYTES_ILLEGAL_FIRST_BYTE)
199
+ }.should raise_error(Wikitext::Parser::Error, /overlong/)
200
+ lambda {
201
+ Wikitext::Parser.sanitize_link_target('good text' + UTF8::Invalid::FOUR_BYTES_ILLEGAL_FIRST_BYTE)
202
+ }.should raise_error(Wikitext::Parser::Error, /overlong/)
203
+
204
+ lambda {
205
+ Wikitext::Parser.sanitize_link_target(UTF8::Invalid::FOUR_BYTES_ILLEGAL_FIRST_BYTE_ALT)
206
+ }.should raise_error(Wikitext::Parser::Error, /overlong/)
207
+ lambda {
208
+ Wikitext::Parser.sanitize_link_target('good text' + UTF8::Invalid::FOUR_BYTES_ILLEGAL_FIRST_BYTE_ALT)
209
+ }.should raise_error(Wikitext::Parser::Error, /overlong/)
210
+
211
+ lambda {
212
+ Wikitext::Parser.sanitize_link_target(UTF8::Invalid::FOUR_BYTES_ILLEGAL_FIRST_BYTE_ALT2)
213
+ }.should raise_error(Wikitext::Parser::Error, /overlong/)
214
+ lambda {
215
+ Wikitext::Parser.sanitize_link_target('good text' + UTF8::Invalid::FOUR_BYTES_ILLEGAL_FIRST_BYTE_ALT2)
216
+ }.should raise_error(Wikitext::Parser::Error, /overlong/)
217
+ end
218
+
219
+ it 'should raise an exception for unexpected bytes' do
220
+ lambda {
221
+ Wikitext::Parser.sanitize_link_target(UTF8::Invalid::UNEXPECTED_BYTE)
222
+ }.should raise_error(Wikitext::Parser::Error, /unexpected/)
223
+ lambda {
224
+ Wikitext::Parser.sanitize_link_target('good text' + UTF8::Invalid::UNEXPECTED_BYTE)
225
+ }.should raise_error(Wikitext::Parser::Error, /unexpected/)
226
+ end
227
+ end
228
+ end
@@ -0,0 +1,155 @@
1
+ #!/usr/bin/env ruby
2
+ # Copyright 2007-2008 Wincent Colaiuta
3
+ # This program is free software: you can redistribute it and/or modify
4
+ # it under the terms of the GNU General Public License as published by
5
+ # the Free Software Foundation, either version 3 of the License, or
6
+ # (at your option) any later version.
7
+ #
8
+ # This program is distributed in the hope that it will be useful,
9
+ # but WITHOUT ANY WARRANTY; without even the implied warranty of
10
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11
+ # GNU General Public License for more details.
12
+ #
13
+ # You should have received a copy of the GNU General Public License
14
+ # along with this program. If not, see <http://www.gnu.org/licenses/>.
15
+
16
+ require File.join(File.dirname(__FILE__), 'spec_helper.rb')
17
+ require 'wikitext'
18
+
19
+ describe Wikitext::Parser, 'parsing <nowiki> spans' do
20
+ before do
21
+ @parser = Wikitext::Parser.new
22
+ end
23
+
24
+ it 'should not echo paired <nowiki> and </nowiki> tags' do
25
+ @parser.parse('foo <nowiki>bar</nowiki> baz').should == "<p>foo bar baz</p>\n"
26
+ end
27
+
28
+ it 'should automatically handle missing closing tags (at end of outpt)' do
29
+ # note that CRLF doesn't auto-terminate a <nowiki> span, unlike other inline spans
30
+ @parser.parse('foo <nowiki>bar').should == "<p>foo bar</p>\n"
31
+ end
32
+
33
+ it 'should unconditionally echo newlines inside <nowiki> spans' do
34
+ @parser.parse("<nowiki>foo\nbar</nowiki>").should == "<p>foo\nbar</p>\n"
35
+ @parser.parse("<nowiki>foo\n\nbar</nowiki>").should == "<p>foo\n\nbar</p>\n"
36
+ @parser.parse("<nowiki>foo\n\n\nbar</nowiki>").should == "<p>foo\n\n\nbar</p>\n"
37
+ end
38
+
39
+ it 'should convert unexpected closing tags into entities' do
40
+ @parser.parse('foo </nowiki>bar').should == "<p>foo &lt;/nowiki&gt;bar</p>\n"
41
+ end
42
+
43
+ it 'should convert nested opening tags into entities' do
44
+ @parser.parse('<nowiki>foo<nowiki>bar</nowiki>baz').should == "<p>foo&lt;nowiki&gt;barbaz</p>\n"
45
+ end
46
+
47
+ it 'should have no effect inside <pre> blocks' do
48
+ @parser.parse(' <nowiki>foo</nowiki>').should == "<pre>&lt;nowiki&gt;foo&lt;/nowiki&gt;</pre>\n"
49
+ end
50
+
51
+ it 'should pass short BLOCKQUOTE tokens through without any special meaning' do
52
+ @parser.parse("<nowiki>\n></nowiki>").should == "<p>\n&gt;</p>\n"
53
+ end
54
+
55
+ it 'should pass long BLOCKQUOTE tokens through without any special meaning' do
56
+ @parser.parse("<nowiki>\n> </nowiki>").should == "<p>\n&gt; </p>\n"
57
+ end
58
+
59
+ it 'should pass <tt> and </tt> tags through without any special meaning' do
60
+ @parser.parse('<nowiki>foo <tt>bar</tt></nowiki>').should == "<p>foo &lt;tt&gt;bar&lt;/tt&gt;</p>\n"
61
+ end
62
+
63
+ it 'should pass <em> and </em> tags through without any special meaning' do
64
+ @parser.parse("<nowiki>foo ''bar''</nowiki>").should == "<p>foo ''bar''</p>\n"
65
+ end
66
+
67
+ it 'should pass <strong> and </strong> tags through without any special meaning' do
68
+ @parser.parse("<nowiki>foo '''bar'''</nowiki>").should == "<p>foo '''bar'''</p>\n"
69
+ end
70
+
71
+ it 'should pass combined <strong>/<em> and </strong>/</em> tags through without any special meaning' do
72
+ @parser.parse("<nowiki>foo '''''bar'''''</nowiki>").should == "<p>foo '''''bar'''''</p>\n"
73
+ end
74
+
75
+ it 'should pass <h1> tags through without any special meaning' do
76
+ @parser.parse("<nowiki>\n= foo</nowiki>").should == "<p>\n= foo</p>\n"
77
+ end
78
+
79
+ it 'should pass </h1> tags through without any special meaning' do
80
+ @parser.parse("<nowiki>foo =\n</nowiki>").should == "<p>foo =\n</p>\n"
81
+ end
82
+
83
+ it 'should pass <h2> tags through without any special meaning' do
84
+ @parser.parse("<nowiki>\n== foo</nowiki>").should == "<p>\n== foo</p>\n"
85
+ end
86
+
87
+ it 'should pass </h2> tags through without any special meaning' do
88
+ @parser.parse("<nowiki>foo ==\n</nowiki>").should == "<p>foo ==\n</p>\n"
89
+ end
90
+
91
+ it 'should pass <h3> tags through without any special meaning' do
92
+ @parser.parse("<nowiki>\n=== foo</nowiki>").should == "<p>\n=== foo</p>\n"
93
+ end
94
+
95
+ it 'should pass </h3> tags through without any special meaning' do
96
+ @parser.parse("<nowiki>foo ===\n</nowiki>").should == "<p>foo ===\n</p>\n"
97
+ end
98
+
99
+ it 'should pass <h4> tags through without any special meaning' do
100
+ @parser.parse("<nowiki>\n==== foo</nowiki>").should == "<p>\n==== foo</p>\n"
101
+ end
102
+
103
+ it 'should pass </h4> tags through without any special meaning' do
104
+ @parser.parse("<nowiki>foo ====\n</nowiki>").should == "<p>foo ====\n</p>\n"
105
+ end
106
+
107
+ it 'should pass <h5> tags through without any special meaning' do
108
+ @parser.parse("<nowiki>\n===== foo</nowiki>").should == "<p>\n===== foo</p>\n"
109
+ end
110
+
111
+ it 'should pass </h5> tags through without any special meaning' do
112
+ @parser.parse("<nowiki>foo =====\n</nowiki>").should == "<p>foo =====\n</p>\n"
113
+ end
114
+
115
+ it 'should pass <h6> tags through without any special meaning' do
116
+ @parser.parse("<nowiki>\n====== foo</nowiki>").should == "<p>\n====== foo</p>\n"
117
+ end
118
+
119
+ it 'should pass </h6> tags through without any special meaning' do
120
+ @parser.parse("<nowiki>foo ======\n</nowiki>").should == "<p>foo ======\n</p>\n"
121
+ end
122
+
123
+ it 'should pass link start tokens through unchanged' do
124
+ @parser.parse('<nowiki>[[</nowiki>').should == "<p>[[</p>\n"
125
+ end
126
+
127
+ it 'should pass link end tokens through unchanged' do
128
+ @parser.parse('<nowiki>]]</nowiki>').should == "<p>]]</p>\n"
129
+ end
130
+
131
+ it 'should pass external link start tokens through unchanged' do
132
+ @parser.parse('<nowiki>[</nowiki>').should == "<p>[</p>\n"
133
+ end
134
+
135
+ it 'should pass external link end tokens through unchanged' do
136
+ @parser.parse('<nowiki>]</nowiki>').should == "<p>]</p>\n"
137
+ end
138
+
139
+ it 'should pass named entities through unchanged' do
140
+ @parser.parse('<nowiki>&euro;</nowiki>').should == "<p>&euro;</p>\n"
141
+ end
142
+
143
+ it 'should pass numeric (decimal) entities through unchanged' do
144
+ @parser.parse('<nowiki>&#8364;</nowiki>').should == "<p>&#8364;</p>\n"
145
+ end
146
+
147
+ it 'should pass numeric (hexadecimal) entities through unchanged' do
148
+ @parser.parse('<nowiki>&#x20ac;</nowiki>').should == "<p>&#x20ac;</p>\n"
149
+ @parser.parse('<nowiki>&#X20Ac;</nowiki>').should == "<p>&#x20ac;</p>\n"
150
+ end
151
+
152
+ it 'should convert non-ASCII characters to numeric entities' do
153
+ @parser.parse('<nowiki>€</nowiki>').should == "<p>&#x20ac;</p>\n"
154
+ end
155
+ end