sanitize 5.0.0 → 5.2.3

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sanitize might be problematic. Click here for more details.

@@ -128,13 +128,15 @@ describe 'Malicious HTML' do
128
128
 
129
129
  # libxml2 >= 2.9.2 doesn't escape comments within some attributes, in an
130
130
  # attempt to preserve server-side includes. This can result in XSS since an
131
- # unescaped double quote can allow an attacker to inject a non-whitelisted
131
+ # unescaped double quote can allow an attacker to inject a non-allowlisted
132
132
  # attribute. Sanitize works around this by implementing its own escaping for
133
133
  # affected attributes.
134
134
  #
135
135
  # The relevant libxml2 code is here:
136
136
  # <https://github.com/GNOME/libxml2/commit/960f0e275616cadc29671a218d7fb9b69eb35588>
137
137
  describe 'unsafe libxml2 server-side includes in attributes' do
138
+ using_unpatched_libxml2 = Nokogiri::VersionInfo.instance.libxml2_using_system?
139
+
138
140
  tag_configs = [
139
141
  {
140
142
  tag_name: 'a',
@@ -166,12 +168,21 @@ describe 'Malicious HTML' do
166
168
  input = %[<#{tag_name} #{attr_name}='examp<!--" onmouseover=alert(1)>-->le.com'>foo</#{tag_name}>]
167
169
 
168
170
  it 'should escape unsafe characters in attributes' do
169
- output = %[<#{tag_name} #{attr_name}="examp<!--%22%20onmouseover=alert(1)>-->le.com">foo</#{tag_name}>]
170
- @s.fragment(input).must_equal(output)
171
+ skip "behavior should only exist in nokogiri's patched libxml" if using_unpatched_libxml2
172
+
173
+ # This uses Nokogumbo's HTML-compliant serializer rather than
174
+ # libxml2's.
175
+ @s.fragment(input).
176
+ must_equal(%[<#{tag_name} #{attr_name}="examp<!--%22%20onmouseover=alert(1)>-->le.com">foo</#{tag_name}>])
171
177
 
178
+ # This uses the not-quite-standards-compliant libxml2 serializer via
179
+ # Nokogiri, so the output may be a little different as of Nokogiri
180
+ # 1.10.2 when using Nokogiri's vendored libxml2 due to this patch:
181
+ # https://github.com/sparklemotion/nokogiri/commit/4852e43cb6039e26d8c51af78621e539cbf46c5d
172
182
  fragment = Nokogiri::HTML.fragment(input)
173
183
  @s.node!(fragment)
174
- fragment.to_html.must_equal(output)
184
+ fragment.to_html.
185
+ must_equal(%[<#{tag_name} #{attr_name}="examp&lt;!--%22%20onmouseover=alert(1)&gt;--&gt;le.com">foo</#{tag_name}>])
175
186
  end
176
187
 
177
188
  it 'should round-trip to the same output' do
@@ -184,11 +195,21 @@ describe 'Malicious HTML' do
184
195
  input = %[<#{tag_name} #{attr_name}='examp<!--" onmouseover=alert(1)>-->le.com'>foo</#{tag_name}>]
185
196
 
186
197
  it 'should not escape characters unnecessarily' do
187
- @s.fragment(input).must_equal(%[<#{tag_name} #{attr_name}="examp<!--&quot; onmouseover=alert(1)>-->le.com">foo</#{tag_name}>])
198
+ skip "behavior should only exist in nokogiri's patched libxml" if using_unpatched_libxml2
188
199
 
200
+ # This uses Nokogumbo's HTML-compliant serializer rather than
201
+ # libxml2's.
202
+ @s.fragment(input).
203
+ must_equal(%[<#{tag_name} #{attr_name}="examp<!--&quot; onmouseover=alert(1)>-->le.com">foo</#{tag_name}>])
204
+
205
+ # This uses the not-quite-standards-compliant libxml2 serializer via
206
+ # Nokogiri, so the output may be a little different as of Nokogiri
207
+ # 1.10.2 when using Nokogiri's vendored libxml2 due to this patch:
208
+ # https://github.com/sparklemotion/nokogiri/commit/4852e43cb6039e26d8c51af78621e539cbf46c5d
189
209
  fragment = Nokogiri::HTML.fragment(input)
190
210
  @s.node!(fragment)
191
- fragment.to_html.must_equal(%[<#{tag_name} #{attr_name}='examp<!--" onmouseover=alert(1)>-->le.com'>foo</#{tag_name}>])
211
+ fragment.to_html.
212
+ must_equal(%[<#{tag_name} #{attr_name}='examp&lt;!--" onmouseover=alert(1)&gt;--&gt;le.com'>foo</#{tag_name}>])
192
213
  end
193
214
 
194
215
  it 'should round-trip to the same output' do
@@ -198,4 +219,17 @@ describe 'Malicious HTML' do
198
219
  end
199
220
  end
200
221
  end
222
+
223
+ # https://github.com/rgrove/sanitize/security/advisories/GHSA-p4x4-rw2p-8j8m
224
+ describe 'foreign content bypass in relaxed config' do
225
+ it 'prevents a sanitization bypass via carefully crafted foreign content' do
226
+ %w[iframe noembed noframes noscript plaintext script style xmp].each do |tag_name|
227
+ @s.fragment(%[<math><#{tag_name}>/*&lt;/#{tag_name}&gt;&lt;img src onerror=alert(1)>*/]).
228
+ must_equal ''
229
+
230
+ @s.fragment(%[<svg><#{tag_name}>/*&lt;/#{tag_name}&gt;&lt;img src onerror=alert(1)>*/]).
231
+ must_equal ''
232
+ end
233
+ end
234
+ end
201
235
  end
@@ -55,7 +55,7 @@ describe 'Parser' do
55
55
  siblings << env[:node][:id]
56
56
  end
57
57
 
58
- return {:node_whitelist => [env[:node]]}
58
+ return {:node_allowlist => [env[:node]]}
59
59
  })
60
60
 
61
61
  # All siblings should be traversed, and in the order added.
@@ -37,6 +37,44 @@ describe 'Sanitize' do
37
37
  it 'should not choke on frozen documents' do
38
38
  @s.document('<!doctype html><html><b>foo</b>'.freeze).must_equal "<html>foo</html>"
39
39
  end
40
+
41
+ it 'should normalize newlines' do
42
+ @s.document("a\r\n\n\r\r\r\nz").must_equal "<html>a\n\n\n\n\nz</html>"
43
+ end
44
+
45
+ it 'should strip control characters (except ASCII whitespace)' do
46
+ sample_control_chars = "\u0001\u0008\u000b\u000e\u001f\u007f\u009f"
47
+ whitespace = "\t\n\f\u0020"
48
+ @s.document("a#{sample_control_chars}#{whitespace}z").must_equal "<html>a#{whitespace}z</html>"
49
+ end
50
+
51
+ it 'should strip non-characters' do
52
+ sample_non_chars = "\ufdd0\ufdef\ufffe\uffff\u{1fffe}\u{1ffff}\u{2fffe}\u{2ffff}\u{3fffe}\u{3ffff}\u{4fffe}\u{4ffff}\u{5fffe}\u{5ffff}\u{6fffe}\u{6ffff}\u{7fffe}\u{7ffff}\u{8fffe}\u{8ffff}\u{9fffe}\u{9ffff}\u{afffe}\u{affff}\u{bfffe}\u{bffff}\u{cfffe}\u{cffff}\u{dfffe}\u{dffff}\u{efffe}\u{effff}\u{ffffe}\u{fffff}\u{10fffe}\u{10ffff}"
53
+ @s.document("a#{sample_non_chars}z").must_equal "<html>az</html>"
54
+ end
55
+
56
+ describe 'when html body exceeds Nokogumbo::DEFAULT_MAX_TREE_DEPTH' do
57
+ let(:content) do
58
+ content = nest_html_content('<b>foo</b>', Nokogumbo::DEFAULT_MAX_TREE_DEPTH)
59
+ "<html>#{content}</html>"
60
+ end
61
+
62
+ it 'raises an ArgumentError exception' do
63
+ assert_raises ArgumentError do
64
+ @s.document(content)
65
+ end
66
+ end
67
+
68
+ describe 'and :max_tree_depth of -1 is supplied in :parser_options' do
69
+ before do
70
+ @s = Sanitize.new(elements: ['html'], parser_options: { max_tree_depth: -1 })
71
+ end
72
+
73
+ it 'does not raise an ArgumentError exception' do
74
+ @s.document(content).must_equal '<html>foo</html>'
75
+ end
76
+ end
77
+ end
40
78
  end
41
79
 
42
80
  describe '#fragment' do
@@ -61,6 +99,44 @@ describe 'Sanitize' do
61
99
  it 'should not choke on frozen fragments' do
62
100
  @s.fragment('<b>foo</b>'.freeze).must_equal 'foo'
63
101
  end
102
+
103
+ it 'should normalize newlines' do
104
+ @s.fragment("a\r\n\n\r\r\r\nz").must_equal "a\n\n\n\n\nz"
105
+ end
106
+
107
+ it 'should strip control characters (except ASCII whitespace)' do
108
+ sample_control_chars = "\u0001\u0008\u000b\u000e\u001f\u007f\u009f"
109
+ whitespace = "\t\n\f\u0020"
110
+ @s.fragment("a#{sample_control_chars}#{whitespace}z").must_equal "a#{whitespace}z"
111
+ end
112
+
113
+ it 'should strip non-characters' do
114
+ sample_non_chars = "\ufdd0\ufdef\ufffe\uffff\u{1fffe}\u{1ffff}\u{2fffe}\u{2ffff}\u{3fffe}\u{3ffff}\u{4fffe}\u{4ffff}\u{5fffe}\u{5ffff}\u{6fffe}\u{6ffff}\u{7fffe}\u{7ffff}\u{8fffe}\u{8ffff}\u{9fffe}\u{9ffff}\u{afffe}\u{affff}\u{bfffe}\u{bffff}\u{cfffe}\u{cffff}\u{dfffe}\u{dffff}\u{efffe}\u{effff}\u{ffffe}\u{fffff}\u{10fffe}\u{10ffff}"
115
+ @s.fragment("a#{sample_non_chars}z").must_equal "az"
116
+ end
117
+
118
+ describe 'when html body exceeds Nokogumbo::DEFAULT_MAX_TREE_DEPTH' do
119
+ let(:content) do
120
+ content = nest_html_content('<b>foo</b>', Nokogumbo::DEFAULT_MAX_TREE_DEPTH)
121
+ "<body>#{content}</body>"
122
+ end
123
+
124
+ it 'raises an ArgumentError exception' do
125
+ assert_raises ArgumentError do
126
+ @s.fragment(content)
127
+ end
128
+ end
129
+
130
+ describe 'and :max_tree_depth of -1 is supplied in :parser_options' do
131
+ before do
132
+ @s = Sanitize.new(parser_options: { max_tree_depth: -1 })
133
+ end
134
+
135
+ it 'does not raise an ArgumentError exception' do
136
+ @s.fragment(content).must_equal 'foo'
137
+ end
138
+ end
139
+ end
64
140
  end
65
141
 
66
142
  describe '#node!' do
@@ -74,7 +150,7 @@ describe 'Sanitize' do
74
150
  frag.to_html.must_equal 'Lorem ipsum dolor sit amet '
75
151
  end
76
152
 
77
- describe "when the given node is a document and <html> isn't whitelisted" do
153
+ describe "when the given node is a document and <html> isn't allowlisted" do
78
154
  it 'should raise a Sanitize::Error' do
79
155
  doc = Nokogiri::HTML5.parse('foo')
80
156
  proc { @s.node!(doc) }.must_raise Sanitize::Error
@@ -85,28 +161,37 @@ describe 'Sanitize' do
85
161
 
86
162
  describe 'class methods' do
87
163
  describe '.document' do
88
- it 'should call #document' do
89
- Sanitize.stub_instance(:document, proc {|html| html + ' called' }) do
90
- Sanitize.document('<html>foo</html>')
91
- .must_equal '<html>foo</html> called'
92
- end
164
+ it 'should sanitize an HTML document with the given config' do
165
+ html = '<!doctype html><html><b>Lo<!-- comment -->rem</b> <a href="pants" title="foo">ipsum</a> <a href="http://foo.com/"><strong>dolor</strong></a> sit<br/>amet <script>alert("hello world");</script></html>'
166
+ Sanitize.document(html, :elements => ['html'])
167
+ .must_equal "<html>Lorem ipsum dolor sit amet </html>"
93
168
  end
94
169
  end
95
170
 
96
171
  describe '.fragment' do
97
- it 'should call #fragment' do
98
- Sanitize.stub_instance(:fragment, proc {|html| html + ' called' }) do
99
- Sanitize.fragment('<b>foo</b>').must_equal '<b>foo</b> called'
100
- end
172
+ it 'should sanitize an HTML fragment with the given config' do
173
+ html = '<b>Lo<!-- comment -->rem</b> <a href="pants" title="foo">ipsum</a> <a href="http://foo.com/"><strong>dolor</strong></a> sit<br/>amet <script>alert("hello world");</script>'
174
+ Sanitize.fragment(html, :elements => ['strong'])
175
+ .must_equal 'Lorem ipsum <strong>dolor</strong> sit amet '
101
176
  end
102
177
  end
103
178
 
104
179
  describe '.node!' do
105
- it 'should call #node!' do
106
- Sanitize.stub_instance(:node!, proc {|input| input + ' called' }) do
107
- Sanitize.node!('not really a node').must_equal 'not really a node called'
108
- end
180
+ it 'should sanitize a Nokogiri::XML::Node with the given config' do
181
+ doc = Nokogiri::HTML5.parse('<b>Lo<!-- comment -->rem</b> <a href="pants" title="foo">ipsum</a> <a href="http://foo.com/"><strong>dolor</strong></a> sit<br/>amet <script>alert("hello world");</script>')
182
+ frag = doc.fragment
183
+
184
+ doc.xpath('/html/body/node()').each {|node| frag << node }
185
+
186
+ Sanitize.node!(frag, :elements => ['strong'])
187
+ frag.to_html.must_equal 'Lorem ipsum <strong>dolor</strong> sit amet '
109
188
  end
110
189
  end
111
190
  end
191
+
192
+ private
193
+
194
+ def nest_html_content(html_content, depth)
195
+ "#{'<span>' * depth}#{html_content}#{'</span>' * depth}"
196
+ end
112
197
  end
@@ -21,7 +21,7 @@ describe 'Sanitize::CSS' do
21
21
  @custom.properties(css).must_equal 'background: #fff; '
22
22
  end
23
23
 
24
- it 'should allow whitelisted URL protocols' do
24
+ it 'should allow allowlisted URL protocols' do
25
25
  [
26
26
  "background: url(relative.jpg)",
27
27
  "background: url('relative.jpg')",
@@ -36,7 +36,7 @@ describe 'Sanitize::CSS' do
36
36
  end
37
37
  end
38
38
 
39
- it 'should not allow non-whitelisted URL protocols' do
39
+ it 'should not allow non-allowlisted URL protocols' do
40
40
  [
41
41
  "background: url(javascript:alert(0))",
42
42
  "background: url(ja\\56 ascript:alert(0))",
@@ -196,26 +196,53 @@ describe 'Sanitize::CSS' do
196
196
 
197
197
  describe 'class methods' do
198
198
  describe '.properties' do
199
- it 'should call #properties' do
200
- Sanitize::CSS.stub_instance(:properties, proc {|css| css + 'bar' }) do
201
- Sanitize::CSS.properties('foo').must_equal 'foobar'
202
- end
199
+ it 'should sanitize CSS properties with the given config' do
200
+ css = 'background: #fff; width: expression(alert("hi"));'
201
+
202
+ Sanitize::CSS.properties(css).must_equal ' '
203
+ Sanitize::CSS.properties(css, Sanitize::Config::RELAXED[:css]).must_equal 'background: #fff; '
204
+ Sanitize::CSS.properties(css, :properties => %w[background color width]).must_equal 'background: #fff; '
203
205
  end
204
206
  end
205
207
 
206
208
  describe '.stylesheet' do
207
- it 'should call #stylesheet' do
208
- Sanitize::CSS.stub_instance(:stylesheet, proc {|css| css + 'bar' }) do
209
- Sanitize::CSS.stylesheet('foo').must_equal 'foobar'
210
- end
209
+ it 'should sanitize a CSS stylesheet with the given config' do
210
+ css = %[
211
+ /* Yay CSS! */
212
+ .foo { color: #fff; }
213
+ #bar { background: url(yay.jpg); }
214
+
215
+ @media screen (max-width:480px) {
216
+ .foo { width: 400px; }
217
+ #bar:not(.baz) { height: 100px; }
218
+ }
219
+ ].strip
220
+
221
+ Sanitize::CSS.stylesheet(css).strip.must_equal %[
222
+ .foo { }
223
+ #bar { }
224
+ ].strip
225
+
226
+ Sanitize::CSS.stylesheet(css, Sanitize::Config::RELAXED[:css]).must_equal css
227
+
228
+ Sanitize::CSS.stylesheet(css, :properties => %w[background color width]).strip.must_equal %[
229
+ .foo { color: #fff; }
230
+ #bar { }
231
+ ].strip
211
232
  end
212
233
  end
213
234
 
214
235
  describe '.tree!' do
215
- it 'should call #tree!' do
216
- Sanitize::CSS.stub_instance(:tree!, proc {|tree| tree + 'bar' }) do
217
- Sanitize::CSS.tree!('foo').must_equal 'foobar'
218
- end
236
+ it 'should sanitize a Crass CSS parse tree with the given config' do
237
+ tree = Crass.parse(String.new("@import url(foo.css);\n") <<
238
+ ".foo { background: #fff; font: 16pt 'Comic Sans MS'; }\n" <<
239
+ "#bar { top: 125px; background: green; }")
240
+
241
+ Sanitize::CSS.tree!(tree, :properties => %w[background color width]).must_be_same_as tree
242
+
243
+ Crass::Parser.stringify(tree).must_equal String.new("\n") <<
244
+ ".foo { background: #fff; }\n" <<
245
+ "#bar { background: green; }"
219
246
  end
220
247
  end
221
248
  end
@@ -280,7 +307,7 @@ describe 'Sanitize::CSS' do
280
307
  end
281
308
 
282
309
  describe ":at_rules" do
283
- it "should remove blockless at-rules that aren't whitelisted" do
310
+ it "should remove blockless at-rules that aren't allowlisted" do
284
311
  css = %[
285
312
  @charset 'utf-8';
286
313
  @import url('foo.css');
@@ -292,7 +319,7 @@ describe 'Sanitize::CSS' do
292
319
  ].strip
293
320
  end
294
321
 
295
- describe "when blockless at-rules are whitelisted" do
322
+ describe "when blockless at-rules are allowlisted" do
296
323
  before do
297
324
  @scss = Sanitize::CSS.new(Sanitize::Config.merge(Sanitize::Config::RELAXED[:css], {
298
325
  :at_rules => ['charset', 'import']
@@ -12,11 +12,13 @@ describe 'Transformers' do
12
12
  return unless env[:node].element?
13
13
 
14
14
  env[:config][:foo].must_equal :bar
15
- env[:is_whitelisted].must_equal false
15
+ env[:is_allowlisted].must_equal false
16
+ env[:is_whitelisted].must_equal env[:is_allowlisted]
16
17
  env[:node].must_be_kind_of Nokogiri::XML::Node
17
18
  env[:node_name].must_equal 'span'
18
- env[:node_whitelist].must_be_kind_of Set
19
- env[:node_whitelist].must_be_empty
19
+ env[:node_allowlist].must_be_kind_of Set
20
+ env[:node_allowlist].must_be_empty
21
+ env[:node_whitelist].must_equal env[:node_allowlist]
20
22
  }
21
23
  )
22
24
  end
@@ -43,34 +45,38 @@ describe 'Transformers' do
43
45
  nodes.must_equal %w[div span strong b p]
44
46
  end
45
47
 
46
- it 'should whitelist nodes in the node whitelist' do
48
+ it 'should allowlist nodes in the node allowlist' do
47
49
  Sanitize.fragment('<div class="foo">foo</div><span>bar</span>',
48
50
  :transformers => [
49
51
  proc {|env|
50
- {:node_whitelist => [env[:node]]} if env[:node_name] == 'div'
52
+ {:node_allowlist => [env[:node]]} if env[:node_name] == 'div'
51
53
  },
52
54
 
53
55
  proc {|env|
54
- env[:is_whitelisted].must_equal false unless env[:node_name] == 'div'
55
- env[:is_whitelisted].must_equal true if env[:node_name] == 'div'
56
- env[:node_whitelist].must_include env[:node] if env[:node_name] == 'div'
56
+ env[:is_allowlisted].must_equal false unless env[:node_name] == 'div'
57
+ env[:is_allowlisted].must_equal true if env[:node_name] == 'div'
58
+ env[:node_allowlist].must_include env[:node] if env[:node_name] == 'div'
59
+ env[:is_whitelisted].must_equal env[:is_allowlisted]
60
+ env[:node_whitelist].must_equal env[:node_allowlist]
57
61
  }
58
62
  ]
59
63
  ).must_equal '<div class="foo">foo</div>bar'
60
64
  end
61
65
 
62
- it 'should clear the node whitelist after each fragment' do
66
+ it 'should clear the node allowlist after each fragment' do
63
67
  called = false
64
68
 
65
69
  Sanitize.fragment('<div>foo</div>',
66
- :transformers => proc {|env| {:node_whitelist => [env[:node]]}}
70
+ :transformers => proc {|env| {:node_allowlist => [env[:node]]}}
67
71
  )
68
72
 
69
73
  Sanitize.fragment('<div>foo</div>',
70
74
  :transformers => proc {|env|
71
75
  called = true
72
- env[:is_whitelisted].must_equal false
73
- env[:node_whitelist].must_be_empty
76
+ env[:is_allowlisted].must_equal false
77
+ env[:is_whitelisted].must_equal env[:is_allowlisted]
78
+ env[:node_allowlist].must_be_empty
79
+ env[:node_whitelist].must_equal env[:node_allowlist]
74
80
  }
75
81
  )
76
82
 
@@ -83,10 +89,10 @@ describe 'Transformers' do
83
89
  .must_equal(' foo ')
84
90
  end
85
91
 
86
- describe 'Image whitelist transformer' do
92
+ describe 'Image allowlist transformer' do
87
93
  require 'uri'
88
94
 
89
- image_whitelist_transformer = lambda do |env|
95
+ image_allowlist_transformer = lambda do |env|
90
96
  # Ignore everything except <img> elements.
91
97
  return unless env[:node_name] == 'img'
92
98
 
@@ -103,7 +109,7 @@ describe 'Transformers' do
103
109
 
104
110
  before do
105
111
  @s = Sanitize.new(Sanitize::Config.merge(Sanitize::Config::RELAXED,
106
- :transformers => image_whitelist_transformer))
112
+ :transformers => image_allowlist_transformer))
107
113
  end
108
114
 
109
115
  it 'should allow images with relative URLs' do
@@ -142,8 +148,8 @@ describe 'Transformers' do
142
148
  node = env[:node]
143
149
  node_name = env[:node_name]
144
150
 
145
- # Don't continue if this node is already whitelisted or is not an element.
146
- return if env[:is_whitelisted] || !node.element?
151
+ # Don't continue if this node is already allowlisted or is not an element.
152
+ return if env[:is_allowlisted] || !node.element?
147
153
 
148
154
  # Don't continue unless the node is an iframe.
149
155
  return unless node_name == 'iframe'
@@ -164,8 +170,8 @@ describe 'Transformers' do
164
170
 
165
171
  # Now that we're sure that this is a valid YouTube embed and that there are
166
172
  # no unwanted elements or attributes hidden inside it, we can tell Sanitize
167
- # to whitelist the current node.
168
- {:node_whitelist => [node]}
173
+ # to allowlist the current node.
174
+ {:node_allowlist => [node]}
169
175
  end
170
176
 
171
177
  it 'should allow HTTP YouTube video embeds' do
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: sanitize
3
3
  version: !ruby/object:Gem::Version
4
- version: 5.0.0
4
+ version: 5.2.3
5
5
  platform: ruby
6
6
  authors:
7
7
  - Ryan Grove
8
- autorequire:
8
+ autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2018-10-15 00:00:00.000000000 Z
11
+ date: 2021-01-11 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: crass
@@ -80,9 +80,9 @@ dependencies:
80
80
  - - "~>"
81
81
  - !ruby/object:Gem::Version
82
82
  version: 12.3.1
83
- description: Sanitize is a whitelist-based HTML and CSS sanitizer. Given a list of
84
- acceptable elements, attributes, and CSS properties, Sanitize will remove all unacceptable
85
- HTML and/or CSS from a string.
83
+ description: Sanitize is an allowlist-based HTML and CSS sanitizer. It removes all
84
+ HTML and/or CSS from a string except the elements, attributes, and properties you
85
+ choose to allow.
86
86
  email: ryan@wonko.com
87
87
  executables: []
88
88
  extensions: []
@@ -116,12 +116,11 @@ files:
116
116
  - test/test_sanitize.rb
117
117
  - test/test_sanitize_css.rb
118
118
  - test/test_transformers.rb
119
- - test/test_unicode.rb
120
119
  homepage: https://github.com/rgrove/sanitize/
121
120
  licenses:
122
121
  - MIT
123
122
  metadata: {}
124
- post_install_message:
123
+ post_install_message:
125
124
  rdoc_options: []
126
125
  require_paths:
127
126
  - lib
@@ -136,9 +135,8 @@ required_rubygems_version: !ruby/object:Gem::Requirement
136
135
  - !ruby/object:Gem::Version
137
136
  version: 1.2.0
138
137
  requirements: []
139
- rubyforge_project:
140
- rubygems_version: 2.7.6
141
- signing_key:
138
+ rubygems_version: 3.2.3
139
+ signing_key:
142
140
  specification_version: 4
143
- summary: Whitelist-based HTML and CSS sanitizer.
141
+ summary: Allowlist-based HTML and CSS sanitizer.
144
142
  test_files: []