sax-machine 1.0.3 → 1.1.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 70bbbc0d3143b8a3470722b16bd6ae18c7f20823
4
- data.tar.gz: 2175ebaaa36b8d325d5bd2a67296edb8ad6c5b9b
3
+ metadata.gz: b68b4364c0bbc26a2743e8d9d0e1675db01353f0
4
+ data.tar.gz: dd0c941cf149d2ffee930fff8ba2a5e6b0303d72
5
5
  SHA512:
6
- metadata.gz: de8580d61bbfc9bba11ea486c2eac3796a74b882c61d9fade5c14073b38f684b80c3d48489e6d9f520e0d77d3c8af34c0538fa5b125de33e2708c6b93a41b8de
7
- data.tar.gz: 6f642b324f6f39c6662e5112fced199d8bd976d968fd228a530e497a0fd695df5232e25c64a34151a5399b00154de0f97878026c34fe8320f89db8728370d3e2
6
+ metadata.gz: e4701a517cfda4c199a8b47f0e9f0aad288aab008bacb913d371572a3303f84ecfed4cb474affac4fbbc8fd38377591a7b59ecc2ecb49787c0355271aaa56335
7
+ data.tar.gz: a62ba38de0e90ac351d057b093dea3648397d995d447215053fb9e9905f7374d5ac19bd2184b8938c377d49cad9d27e6d4ddbe38037d7b3b0c7e9fde36aae8ae
@@ -21,3 +21,4 @@ env:
21
21
  matrix:
22
22
  - HANDLER="nokogiri"
23
23
  - HANDLER="ox"
24
+ - HANDLER="oga"
data/Gemfile CHANGED
@@ -11,4 +11,5 @@ group :development, :test do
11
11
  gem 'activerecord', '~> 4.1'
12
12
  gem 'nokogiri', '~> 1.6'
13
13
  gem 'ox', '>= 2.1.2', platforms: [:mri, :rbx]
14
+ gem 'oga', '>= 0.2.0'
14
15
  end
data/HISTORY.md CHANGED
@@ -1,5 +1,9 @@
1
1
  # HEAD
2
2
 
3
+ # 1.1.0
4
+
5
+ * Option to use Oga as a SAX handler
6
+
3
7
  # 1.0.3
4
8
 
5
9
  * Remove missed `nokogiri` reference [[#54](https://github.com/pauldix/sax-machine/pull/54)]
data/README.md CHANGED
@@ -10,7 +10,7 @@
10
10
 
11
11
  ## Description
12
12
 
13
- A declarative SAX parsing library backed by Nokogiri or Ox.
13
+ A declarative SAX parsing library backed by Nokogiri, Ox or Oga.
14
14
 
15
15
  ## Installation
16
16
 
@@ -28,7 +28,7 @@ $ bundle
28
28
 
29
29
  ## Usage
30
30
 
31
- SAX Machine can use either `nokogiri` or `ox` as XML SAX handler.
31
+ SAX Machine can use either `nokogiri`, `ox` or `oga` as XML SAX handler.
32
32
 
33
33
  To use **Nokogiri** add this line to your Gemfile:
34
34
 
@@ -42,6 +42,12 @@ To use **Ox** add this line to your Gemfile:
42
42
  gem 'ox', '>= 2.1.2'
43
43
  ```
44
44
 
45
+ To use **Oga** add this line to your Gemfile:
46
+
47
+ ```ruby
48
+ gem 'oga', '>= 0.2.0'
49
+ ```
50
+
45
51
  You can also specify which handler to use manually, like this:
46
52
 
47
53
  ```ruby
@@ -152,8 +158,8 @@ end
152
158
  ```
153
159
 
154
160
  If more than one of these elements exists in the source, the value from the *last one* is used. The order of
155
- the `element` declarations in the code is unimportant. The order they are encountered while parsing the
156
- document determines the value assigned to the alias.
161
+ the `element` declarations in the code is unimportant. The order they are encountered while parsing the
162
+ document determines the value assigned to the alias.
157
163
 
158
164
  If an element is defined in the source but is blank (e.g., `<pubDate></pubDate>`), it is ignored, and non-empty one is picked.
159
165
 
@@ -14,8 +14,16 @@ module SAXMachine
14
14
  end
15
15
  end
16
16
 
17
- begin
18
- SAXMachine.handler = :ox
19
- rescue LoadError
17
+ # Try handlers
18
+ [:ox, :oga].each do |handler|
19
+ begin
20
+ SAXMachine.handler = handler
21
+ break
22
+ rescue LoadError
23
+ end
24
+ end
25
+
26
+ # Still no handler, use Nokogiri
27
+ if !SAXMachine.handler
20
28
  SAXMachine.handler = :nokogiri
21
29
  end
@@ -0,0 +1,37 @@
1
+ require 'sax-machine/handlers/sax_abstract_handler'
2
+ require 'oga'
3
+
4
+ module SAXMachine
5
+ class SAXOgaHandler
6
+ include SAXAbstractHandler
7
+
8
+ def initialize(*args)
9
+ _initialize(*args)
10
+ end
11
+
12
+ def sax_parse(xml_text)
13
+ Oga.sax_parse_xml(self, xml_text)
14
+ end
15
+
16
+ def on_element(namespace, name, attrs)
17
+ _start_element(node_name(namespace, name), attrs.map { |a| [a.name, a.value] })
18
+ end
19
+
20
+ def after_element(namespace, name)
21
+ _end_element(node_name(namespace, name))
22
+ end
23
+
24
+ def on_error(*args)
25
+ _error(args.join(" "))
26
+ end
27
+
28
+ alias_method :on_text, :_characters
29
+ alias_method :on_cdata, :_characters
30
+
31
+ private
32
+
33
+ def node_name(namespace, name)
34
+ namespace ? [namespace, name].join(":") : name
35
+ end
36
+ end
37
+ end
@@ -1,3 +1,3 @@
1
1
  module SAXMachine
2
- VERSION = "1.0.3"
2
+ VERSION = "1.1.0"
3
3
  end
@@ -8,7 +8,7 @@ Gem::Specification.new do |s|
8
8
  s.authors = ["Paul Dix", "Julien Kirch", "Ezekiel Templin", "Dmitry Krasnoukhov"]
9
9
  s.email = %q{paul@pauldix.net}
10
10
  s.homepage = %q{http://github.com/pauldix/sax-machine}
11
- s.summary = %q{Declarative SAX Parsing with Nokogiri or Ox}
11
+ s.summary = %q{Declarative SAX Parsing with Nokogiri, Ox or Oga}
12
12
  s.license = %q{MIT}
13
13
 
14
14
  s.files = `git ls-files`.split("\n")
@@ -12,4 +12,4 @@
12
12
  <p>I've also read people posting about this error when using the database session store. I can only assume that it's because they were trying to store either way too much data in their session (too much for a regular text field) or they were storing float values or some other data type that would cause this to pop up. Hopefully this helps.</p></div>
13
13
  <div class="feedflare">
14
14
  <a href="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?a=rWfWO"><img src="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?i=rWfWO" border="0"></img></a> <a href="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?a=RaCqo"><img src="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?i=RaCqo" border="0"></img></a> <a href="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?a=1CBLo"><img src="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?i=1CBLo" border="0"></img></a>
15
- </div><img src="http://feeds.feedburner.com/~r/PaulDixExplainsNothing/~4/383536354" height="1" width="1"/>
15
+ </div><img src="http://feeds.feedburner.com/~r/PaulDixExplainsNothing/~4/383536354" height="1" width="1"/>
@@ -1,165 +1,165 @@
1
- <?xml version="1.0" encoding="UTF-8"?>
2
- <?xml-stylesheet href="http://feeds.feedburner.com/~d/styles/atom10full.xsl" type="text/xsl" media="screen"?><?xml-stylesheet href="http://feeds.feedburner.com/~d/styles/itemcontent.css" type="text/css" media="screen"?><feed xmlns="http://www.w3.org/2005/Atom" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:thr="http://purl.org/syndication/thread/1.0" xmlns:feedburner="http://rssnamespace.org/feedburner/ext/1.0">
3
- <title>Paul Dix Explains Nothing</title>
4
-
5
- <link rel="alternate" type="text/html" href="http://www.pauldix.net/" />
6
- <id>tag:typepad.com,2003:weblog-108605</id>
7
- <updated>2008-09-04T16:07:19-04:00</updated>
8
- <subtitle>Entrepreneurship, programming, software development, politics, NYC, and random thoughts.</subtitle>
9
- <generator uri="http://www.typepad.com/">TypePad</generator>
10
- <link rel="self" href="http://feeds.feedburner.com/PaulDixExplainsNothing" type="application/atom+xml" /><entry>
11
- <title>Marshal data too short error with ActiveRecord</title>
12
- <link rel="alternate" type="text/html" href="http://feeds.feedburner.com/~r/PaulDixExplainsNothing/~3/383536354/marshal-data-to.html?param1=1&amp;param2=2" />
13
- <link rel="replies" type="text/html" href="http://www.pauldix.net/2008/09/marshal-data-to.html" thr:count="2" thr:updated="2008-11-17T14:40:06-05:00" />
14
- <id>tag:typepad.com,2003:post-55147740</id>
15
- <published>2008-09-04T16:07:19-04:00</published>
16
- <updated>2008-11-17T14:40:06-05:00</updated>
17
- <summary>In my previous post about the speed of serializing data, I concluded that Marshal was the quickest way to get things done. So I set about using Marshal to store some data in an ActiveRecord object. Things worked great at...</summary>
18
- <author>
19
- <name>Paul Dix</name>
20
- </author>
21
- <category scheme="http://www.sixapart.com/ns/types#category" term="Tahiti" />
22
-
23
-
24
- <content type="html" xml:lang="en-US" xml:base="http://www.pauldix.net/">
25
- &lt;div xmlns="http://www.w3.org/1999/xhtml"&gt;&lt;p&gt;In my previous &lt;a href="http://www.pauldix.net/2008/08/serializing-dat.html"&gt;post about the speed of serializing data&lt;/a&gt;, I concluded that Marshal was the quickest way to get things done. So I set about using Marshal to store some data in an ActiveRecord object. Things worked great at first, but on some test data I got this error: marshal data too short. Luckily, &lt;a href="http://www.brynary.com/"&gt;Bryan Helmkamp&lt;/a&gt; had helpfully pointed out that there were sometimes problems with storing marshaled data in the database. He said it was best to base64 encode the marshal dump before storing.&lt;/p&gt;
26
-
27
- &lt;p&gt;I was curious why it was working on some things and not others. It turns out that some types of data being marshaled were causing the error to pop up. Here's the test data I used in my specs:&lt;/p&gt;
28
- &lt;pre&gt;{ :foo =&amp;gt; 3, :bar =&amp;gt; 2 } # hash with symbols for keys and integer values&lt;br /&gt;[3, 2.1, 4, 8]&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;&amp;nbsp; # array with integer and float values&lt;/pre&gt;
29
- &lt;p&gt;Everything worked when I switched the array values to all integers so it seems that floats were causing the problem. However, in the interest of keeping everything working regardless of data types, I base64 encoded before going into the database and decoded on the way out.&lt;/p&gt;
30
-
31
- &lt;p&gt;I also ran the benchmarks again to determine what impact this would have on speed. Here are the results for 100 iterations on a 10k element array and a 10k element hash with and without base64 encode/decode:&lt;/p&gt;
32
- &lt;pre&gt;&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp; user&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp; system&amp;nbsp; &amp;nbsp;&amp;nbsp; total&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp; real&lt;br /&gt;array marshal&amp;nbsp; 0.200000&amp;nbsp; &amp;nbsp;0.010000&amp;nbsp; &amp;nbsp;0.210000 (&amp;nbsp; 0.214018) (without Base64)&lt;br /&gt;array marshal&amp;nbsp; 0.220000&amp;nbsp; &amp;nbsp;0.010000&amp;nbsp; &amp;nbsp;0.230000 (&amp;nbsp; 0.250260)&lt;br /&gt;&lt;br /&gt;hash marshal&amp;nbsp; &amp;nbsp;1.830000&amp;nbsp; &amp;nbsp;0.040000&amp;nbsp; &amp;nbsp;1.870000 (&amp;nbsp; 1.892874) (without Base64)&lt;br /&gt;hash marshal&amp;nbsp; &amp;nbsp;2.040000&amp;nbsp; &amp;nbsp;0.100000&amp;nbsp; &amp;nbsp;2.140000 (&amp;nbsp; 2.170405)&lt;/pre&gt;
33
- &lt;p&gt;As you can see the difference in speed is pretty negligible. I assume that the error has to do with AR cleaning the stuff that gets inserted into the database, but I'm not really sure. In the end it's just easier to use Base64.encode64 when serializing data into a text field in ActiveRecord using Marshal.&lt;/p&gt;
34
-
35
- &lt;p&gt;I've also read people posting about this error when using the database session store. I can only assume that it's because they were trying to store either way too much data in their session (too much for a regular text field) or they were storing float values or some other data type that would cause this to pop up. Hopefully this helps.&lt;/p&gt;&lt;/div&gt;
36
- &lt;div class="feedflare"&gt;
37
- &lt;a href="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?a=rWfWO"&gt;&lt;img src="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?i=rWfWO" border="0"&gt;&lt;/img&gt;&lt;/a&gt; &lt;a href="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?a=RaCqo"&gt;&lt;img src="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?i=RaCqo" border="0"&gt;&lt;/img&gt;&lt;/a&gt; &lt;a href="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?a=1CBLo"&gt;&lt;img src="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?i=1CBLo" border="0"&gt;&lt;/img&gt;&lt;/a&gt;
38
- &lt;/div&gt;&lt;img src="http://feeds.feedburner.com/~r/PaulDixExplainsNothing/~4/383536354" height="1" width="1"/&gt;</content>
39
-
40
-
41
- <feedburner:origLink>http://www.pauldix.net/2008/09/marshal-data-to.html?param1=1&amp;param2=2</feedburner:origLink></entry>
42
- <entry>
43
- <title>Serializing data speed comparison: Marshal vs. JSON vs. Eval vs. YAML</title>
44
- <link rel="alternate" type="text/html" href="http://feeds.feedburner.com/~r/PaulDixExplainsNothing/~3/376401099/serializing-dat.html" />
45
- <link rel="replies" type="text/html" href="http://www.pauldix.net/2008/08/serializing-dat.html" thr:count="5" thr:updated="2008-10-14T01:26:31-04:00" />
46
- <id>tag:typepad.com,2003:post-54766774</id>
47
- <published>2008-08-27T14:31:41-04:00</published>
48
- <updated>2008-10-14T01:26:31-04:00</updated>
49
- <summary>Last night at the NYC Ruby hackfest, I got into a discussion about serializing data. Brian mentioned the Marshal library to me, which for some reason had completely escaped my attention until last night. He said it was wicked fast...</summary>
50
- <author>
51
- <name>Paul Dix</name>
52
- </author>
53
- <category scheme="http://www.sixapart.com/ns/types#category" term="Tahiti" />
54
-
55
-
56
- <content type="html" xml:lang="en-US" xml:base="http://www.pauldix.net/">
57
- &lt;div xmlns="http://www.w3.org/1999/xhtml"&gt;&lt;p&gt;Last night at the &lt;a href="http://nycruby.org"&gt;NYC Ruby hackfest&lt;/a&gt;, I got into a discussion about serializing data. Brian mentioned the Marshal library to me, which for some reason had completely escaped my attention until last night. He said it was wicked fast so we decided to run a quick benchmark comparison.&lt;/p&gt;
58
- &lt;p&gt;The test data is designed to roughly approximate what my &lt;a href="http://www.pauldix.net/2008/08/storing-many-cl.html"&gt;stored classifier data&lt;/a&gt; will look like. The different methods we decided to benchmark were Marshal, json, eval, and yaml. With each one we took the in-memory object and serialized it and then read it back in. With eval we had to convert the object to ruby code to serialize it then run eval against that. Here are the results for 100 iterations on a 10k element array and a hash with 10k key/value pairs run on my Macbook Pro 2.4 GHz Core 2 Duo:&lt;/p&gt;
59
- &lt;pre&gt;&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;&amp;nbsp; user&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;system&amp;nbsp; &amp;nbsp;&amp;nbsp; total&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp; real&lt;br /&gt;array marshal&amp;nbsp; 0.210000&amp;nbsp; &amp;nbsp;0.010000&amp;nbsp; &amp;nbsp;0.220000 (&amp;nbsp; 0.220701)&lt;br /&gt;array json&amp;nbsp; &amp;nbsp;&amp;nbsp; 2.180000&amp;nbsp; &amp;nbsp;0.050000&amp;nbsp; &amp;nbsp;2.230000 (&amp;nbsp; 2.288489)&lt;br /&gt;array eval&amp;nbsp; &amp;nbsp;&amp;nbsp; 2.090000&amp;nbsp; &amp;nbsp;0.060000&amp;nbsp; &amp;nbsp;2.150000 (&amp;nbsp; 2.240443)&lt;br /&gt;array yaml&amp;nbsp; &amp;nbsp; 26.650000&amp;nbsp; &amp;nbsp;0.350000&amp;nbsp; 27.000000 ( 27.810609)&lt;br /&gt;&lt;br /&gt;hash marshal&amp;nbsp; &amp;nbsp;2.000000&amp;nbsp; &amp;nbsp;0.050000&amp;nbsp; &amp;nbsp;2.050000 (&amp;nbsp; 2.114950)&lt;br /&gt;hash json&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;3.700000&amp;nbsp; &amp;nbsp;0.060000&amp;nbsp; &amp;nbsp;3.760000 (&amp;nbsp; 3.881716)&lt;br /&gt;hash eval&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;5.370000&amp;nbsp; &amp;nbsp;0.140000&amp;nbsp; &amp;nbsp;5.510000 (&amp;nbsp; 6.117947)&lt;br /&gt;hash yaml&amp;nbsp; &amp;nbsp;&amp;nbsp; 68.220000&amp;nbsp; &amp;nbsp;0.870000&amp;nbsp; 69.090000 ( 72.370784)&lt;/pre&gt;
60
- &lt;p&gt;The order in which I tested them is pretty much the order in which they ranked for speed. Marshal was amazingly fast. JSON and eval came out roughly equal on the array with eval trailing quite a bit for the hash. Yaml was just slow as all hell. A note on the json: I used the 1.1.3 library which uses c to parse. I assume it would be quite a bit slower if I used the pure ruby implementation. Here's &lt;a href="http://gist.github.com/7549"&gt;a gist of the benchmark code&lt;/a&gt; if you're curious and want to run it yourself.&lt;/p&gt;
61
-
62
-
63
-
64
- &lt;p&gt;If you're serializing user data, be super careful about using eval. It's probably best to avoid it completely. Finally, just for fun I took yaml out (it was too slow) and ran the benchmark again with 1k iterations:&lt;/p&gt;
65
- &lt;pre&gt;&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;&amp;nbsp; user&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;system&amp;nbsp; &amp;nbsp;&amp;nbsp; total&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp; real&lt;br /&gt;array marshal&amp;nbsp; 2.080000&amp;nbsp; &amp;nbsp;0.110000&amp;nbsp; &amp;nbsp;2.190000 (&amp;nbsp; 2.242235)&lt;br /&gt;array json&amp;nbsp; &amp;nbsp; 21.860000&amp;nbsp; &amp;nbsp;0.500000&amp;nbsp; 22.360000 ( 23.052403)&lt;br /&gt;array eval&amp;nbsp; &amp;nbsp; 20.730000&amp;nbsp; &amp;nbsp;0.570000&amp;nbsp; 21.300000 ( 21.992454)&lt;br /&gt;&lt;br /&gt;hash marshal&amp;nbsp; 19.510000&amp;nbsp; &amp;nbsp;0.500000&amp;nbsp; 20.010000 ( 20.794111)&lt;br /&gt;hash json&amp;nbsp; &amp;nbsp;&amp;nbsp; 39.770000&amp;nbsp; &amp;nbsp;0.670000&amp;nbsp; 40.440000 ( 41.689297)&lt;br /&gt;hash eval&amp;nbsp; &amp;nbsp;&amp;nbsp; 51.410000&amp;nbsp; &amp;nbsp;1.290000&amp;nbsp; 52.700000 ( 54.155711)&lt;/pre&gt;&lt;/div&gt;
66
- &lt;div class="feedflare"&gt;
67
- &lt;a href="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?a=zombO"&gt;&lt;img src="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?i=zombO" border="0"&gt;&lt;/img&gt;&lt;/a&gt; &lt;a href="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?a=T3kqo"&gt;&lt;img src="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?i=T3kqo" border="0"&gt;&lt;/img&gt;&lt;/a&gt; &lt;a href="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?a=aI6Oo"&gt;&lt;img src="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?i=aI6Oo" border="0"&gt;&lt;/img&gt;&lt;/a&gt;
68
- &lt;/div&gt;&lt;img src="http://feeds.feedburner.com/~r/PaulDixExplainsNothing/~4/376401099" height="1" width="1"/&gt;</content>
69
-
70
-
71
- <feedburner:origLink>http://www.pauldix.net/2008/08/serializing-dat.html</feedburner:origLink></entry>
72
- <entry>
73
- <title>Gotcha with cache_fu and permalinks</title>
74
- <link rel="alternate" type="text/html" href="http://feeds.feedburner.com/~r/PaulDixExplainsNothing/~3/369250462/gotcha-with-cac.html" />
75
- <link rel="replies" type="text/html" href="http://www.pauldix.net/2008/08/gotcha-with-cac.html" thr:count="2" thr:updated="2008-11-20T13:58:38-05:00" />
76
- <id>tag:typepad.com,2003:post-54411628</id>
77
- <published>2008-08-19T14:26:24-04:00</published>
78
- <updated>2008-11-20T13:58:38-05:00</updated>
79
- <summary>This is an issue I had recently in a project with cache_fu. Models that I found and cached based on permalinks weren't expiring the cache correctly when getting updated. Here's an example scenario. Say you have a blog with posts....</summary>
80
- <author>
81
- <name>Paul Dix</name>
82
- </author>
83
- <category scheme="http://www.sixapart.com/ns/types#category" term="Ruby on Rails" />
84
-
85
-
86
- <content type="html" xml:lang="en-US" xml:base="http://www.pauldix.net/">
87
- &lt;div xmlns="http://www.w3.org/1999/xhtml"&gt;&lt;p&gt;This is an issue I had recently in a project with &lt;a href="http://errtheblog.com/posts/57-kickin-ass-w-cachefu"&gt;cache_fu&lt;/a&gt;. Models that I found and cached based on permalinks weren't expiring the cache correctly when getting updated. Here's an example scenario.&lt;/p&gt;
88
-
89
- &lt;p&gt;Say you have a blog with posts. However, instead of using a url like http://paulscoolblog.com/posts/23 you want something that's more search engine friendly and readable for the user. So you use a permalink (maybe using the &lt;a href="http://github.com/github/permalink_fu/tree/master"&gt;permalink_fu plugin&lt;/a&gt;) that's auto-generated based on the title of the post. This post would have a url that looks something like http://paulscoolblog.com/posts/gotcha-with-cache_fu-and-permalinks.&lt;/p&gt;
90
-
91
- &lt;p&gt;In your controller's show method you'd probably find the post like this:&lt;/p&gt;
92
- &lt;pre&gt;@post = Post.find_by_permalink(params[:permalink])&lt;/pre&gt;
93
- &lt;p&gt;However, you'd want to do the caching thing so you'd actually do this:&lt;/p&gt;
94
- &lt;pre&gt;@post = Post.cached(:find_by_permalink, :with =&amp;gt; params[:permalink])&lt;/pre&gt;
95
- &lt;p&gt;The problem that I ran into, which is probably obvious to anyone familiar with cache_fu, was that when updating the post, it wouldn't expire the cache. That part of the post model looks like this:&lt;/p&gt;
96
- &lt;pre&gt;class Post &amp;lt; ActiveRecord::Base&lt;br /&gt;&amp;nbsp; before_save :expire_cache&lt;br /&gt;&amp;nbsp; ...&lt;br /&gt;end&lt;/pre&gt;
97
- &lt;p&gt;Do you see it? The issue is that when expire_cache gets called on the object, it expires the key &lt;strong&gt;Post:23&lt;/strong&gt; from the cache (assuming 23 was the id of the post). However, when the post was cached using the cached(:find_by_permalink ...) method, it put the post object into the cache with a key of &lt;strong&gt;Post:find_by_permalink:gotcha-with-cache_fu-and-permalinks&lt;/strong&gt;.&lt;/p&gt;
98
- &lt;p&gt;Luckily, it's a fairly simple fix. If you have a model that is commonly accessed through permalinks, just write your own cache expiry method that looks for both keys and expires them.&lt;/p&gt;&lt;/div&gt;
99
- &lt;div class="feedflare"&gt;
100
- &lt;a href="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?a=V1ojO"&gt;&lt;img src="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?i=V1ojO" border="0"&gt;&lt;/img&gt;&lt;/a&gt; &lt;a href="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?a=eu6Zo"&gt;&lt;img src="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?i=eu6Zo" border="0"&gt;&lt;/img&gt;&lt;/a&gt; &lt;a href="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?a=ddUho"&gt;&lt;img src="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?i=ddUho" border="0"&gt;&lt;/img&gt;&lt;/a&gt;
101
- &lt;/div&gt;&lt;img src="http://feeds.feedburner.com/~r/PaulDixExplainsNothing/~4/369250462" height="1" width="1"/&gt;</content>
102
-
103
-
104
- <feedburner:origLink>http://www.pauldix.net/2008/08/gotcha-with-cac.html</feedburner:origLink></entry>
105
- <entry>
106
- <title>Non-greedy mode in regex</title>
107
- <link rel="alternate" type="text/html" href="http://feeds.feedburner.com/~r/PaulDixExplainsNothing/~3/365673983/non-greedy-mode.html" />
108
- <link rel="replies" type="text/html" href="http://www.pauldix.net/2008/08/non-greedy-mode.html" thr:count="0" />
109
- <id>tag:typepad.com,2003:post-54227244</id>
110
- <published>2008-08-15T09:32:11-04:00</published>
111
- <updated>2008-08-27T09:33:15-04:00</updated>
112
- <summary>I was writing a regular expression yesterday and this popped up. It's just a quick note about greedy vs. non-greedy mode in regular expression matching. Say I have a regular expression that looks something like this: /(\[.*\])/ In English that...</summary>
113
- <author>
114
- <name>Paul Dix</name>
115
- </author>
116
- <category scheme="http://www.sixapart.com/ns/types#category" term="Ruby" />
117
-
118
-
119
- <content type="html" xml:lang="en-US" xml:base="http://www.pauldix.net/">&lt;p&gt;I was writing a regular expression yesterday and this popped up. It's just a quick note about greedy vs. non-greedy mode in regular expression matching. Say I have a regular expression that looks something like this:&lt;/p&gt;&#xD;
120
- &lt;pre&gt;/(\[.*\])/&lt;/pre&gt;&#xD;
121
- &lt;p&gt;In English that says something roughly like: find an opening bracket [ with 0 or more of any character followed by a closing bracket. The backslashes are to escape the brackets and the parenthesis specify grouping so we can later access that matched text.&lt;/p&gt;&#xD;
122
- &#xD;
123
- &lt;p&gt;The greedy mode comes up with the 0 or more characters part of the match (the .* part of the expression). The default mode of greedy means that the parser will gobble up as many characters as it can and match the very last closing bracket. So if you have text like this:&lt;/p&gt;&#xD;
124
- &#xD;
125
- &lt;pre&gt;a = [:foo, :bar]&lt;br&gt;b = [:hello, :world]&lt;/pre&gt;&#xD;
126
- &lt;p&gt;The resulting grouped match would be this:&lt;/p&gt;&#xD;
127
- &lt;pre&gt;[:foo, :bar]&lt;br&gt;b = [:hello, :world]&lt;/pre&gt;&#xD;
128
- &lt;p&gt;If you just wanted the [:foo, :bar] part, the solution is to parse in non-greedy mode. This means that it will match on the first closing bracket it sees. The modified regular expression looks like this:&lt;/p&gt;&#xD;
129
- &lt;pre&gt;/(\[.*?\])/&lt;/pre&gt;&#xD;
130
- &lt;p&gt;I love the regular expression engine in Ruby. It's one of the best things it ripped off from Perl. The one thing I don't like is the magic global variable that it places matched groups into. You can access that first match through the $1 variable. If you're unfamiliar with regular expressions, a good place to start is the &lt;a href="http://www.amazon.com/Programming-Perl-3rd-Larry-Wall/dp/0596000278/ref=pd_bbs_sr_1?ie=UTF8&amp;amp;s=books&amp;amp;qid=1218806755&amp;amp;sr=8-1"&gt;Camel book&lt;/a&gt;. It's about Perl, but the way they work is very similar. I actually haven't seen good coverage of regexes in a Ruby book.&lt;/p&gt;&lt;div class="feedflare"&gt;
131
- &lt;a href="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?a=OkVmO"&gt;&lt;img src="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?i=OkVmO" border="0"&gt;&lt;/img&gt;&lt;/a&gt; &lt;a href="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?a=iRpWo"&gt;&lt;img src="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?i=iRpWo" border="0"&gt;&lt;/img&gt;&lt;/a&gt; &lt;a href="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?a=pjRCo"&gt;&lt;img src="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?i=pjRCo" border="0"&gt;&lt;/img&gt;&lt;/a&gt;
132
- &lt;/div&gt;&lt;img src="http://feeds.feedburner.com/~r/PaulDixExplainsNothing/~4/365673983" height="1" width="1"/&gt;</content>
133
-
134
-
135
- <feedburner:origLink>http://www.pauldix.net/2008/08/non-greedy-mode.html</feedburner:origLink></entry>
136
- <entry>
137
- <title>Storing many classification models</title>
138
- <link rel="alternate" type="text/html" href="http://feeds.feedburner.com/~r/PaulDixExplainsNothing/~3/358530158/storing-many-cl.html" />
139
- <link rel="replies" type="text/html" href="http://www.pauldix.net/2008/08/storing-many-cl.html" thr:count="3" thr:updated="2008-08-08T11:40:28-04:00" />
140
- <id>tag:typepad.com,2003:post-53888232</id>
141
- <published>2008-08-07T12:01:38-04:00</published>
142
- <updated>2008-08-27T16:58:18-04:00</updated>
143
- <summary>One of the things I need to do in Filterly is keep many trained classifiers. These are the machine learning models that determine if a blog post is on topic (Filterly separates information by topic). At the very least I...</summary>
144
- <author>
145
- <name>Paul Dix</name>
146
- </author>
147
- <category scheme="http://www.sixapart.com/ns/types#category" term="Tahiti" />
148
-
149
-
150
- <content type="html" xml:lang="en-US" xml:base="http://www.pauldix.net/">&lt;p&gt;One of the things I need to do in &lt;a href="http://filterly.com/"&gt;Filterly&lt;/a&gt; is keep many trained &lt;a href="http://en.wikipedia.org/wiki/Statistical_classification"&gt;classifiers&lt;/a&gt;. These are the machine learning models that determine if a blog post is on topic (Filterly separates information by topic). At the very least I need one per topic in the system. If I want to do something like &lt;a href="http://en.wikipedia.org/wiki/Boosting"&gt;boosting&lt;/a&gt; then I need even more. The issue I'm wrestling with is how to store this data. I'll outline a specific approach and what the storage needs are.&lt;/p&gt;&#xD;
151
- &#xD;
152
- &lt;p&gt;Let's say I go with boosting and 10 &lt;a href="http://en.wikipedia.org/wiki/Perceptron"&gt;perceptrons&lt;/a&gt;. I'll also limit my feature space to the 10,000 most statistically significant features. So the storage for each perceptron is a 10k element array. However, I'll also have to keep another data structure to store what the 10k features are and their position in the array. In code I use a hash for this where the feature name is the key and the value is its position. I just need to store one of these hashes per topic.&lt;/p&gt;&#xD;
153
- &#xD;
154
- &lt;p&gt;That's not really a huge amount of data. I'm more concerned about the best way to store it. I don't think this kind of thing maps well to a relational database. I don't need to store the features individually. Generally when I'm running the thing I'll want the whole perceptron and feature set in memory for quick access. For now I'm just using a big text field and serializing each using JSON.&lt;/p&gt;&#xD;
155
- &#xD;
156
- &lt;p&gt;I don't really like this approach. The whole serializing into the database seems really inelegant. Combined with the time that it takes to parse these things. Each time I want to see if a new post is on topic I'd need to load up the classifier and parse the 10 10k arrays and the 10k key hash. I could keep each classifier running as a service, but then I've got a pretty heavy process running for each topic.&lt;/p&gt;&#xD;
157
- &#xD;
158
- &lt;p&gt;I guess I'll just use the stupid easy solution for the time being and worry about performance later. Anyone have thoughts on the best approach?&lt;/p&gt;&lt;div class="feedflare"&gt;
159
- &lt;a href="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?a=DUT8O"&gt;&lt;img src="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?i=DUT8O" border="0"&gt;&lt;/img&gt;&lt;/a&gt; &lt;a href="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?a=ZGjFo"&gt;&lt;img src="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?i=ZGjFo" border="0"&gt;&lt;/img&gt;&lt;/a&gt; &lt;a href="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?a=pH3Vo"&gt;&lt;img src="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?i=pH3Vo" border="0"&gt;&lt;/img&gt;&lt;/a&gt;
160
- &lt;/div&gt;&lt;img src="http://feeds.feedburner.com/~r/PaulDixExplainsNothing/~4/358530158" height="1" width="1"/&gt;</content>
161
-
162
-
163
- <feedburner:origLink>http://www.pauldix.net/2008/08/storing-many-cl.html</feedburner:origLink></entry>
164
-
165
- </feed>
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <?xml-stylesheet href="http://feeds.feedburner.com/~d/styles/atom10full.xsl" type="text/xsl" media="screen"?><?xml-stylesheet href="http://feeds.feedburner.com/~d/styles/itemcontent.css" type="text/css" media="screen"?><feed xmlns="http://www.w3.org/2005/Atom" xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:thr="http://purl.org/syndication/thread/1.0" xmlns:feedburner="http://rssnamespace.org/feedburner/ext/1.0">
3
+ <title>Paul Dix Explains Nothing</title>
4
+
5
+ <link rel="alternate" type="text/html" href="http://www.pauldix.net/" />
6
+ <id>tag:typepad.com,2003:weblog-108605</id>
7
+ <updated>2008-09-04T16:07:19-04:00</updated>
8
+ <subtitle>Entrepreneurship, programming, software development, politics, NYC, and random thoughts.</subtitle>
9
+ <generator uri="http://www.typepad.com/">TypePad</generator>
10
+ <link rel="self" href="http://feeds.feedburner.com/PaulDixExplainsNothing" type="application/atom+xml" /><entry>
11
+ <title>Marshal data too short error with ActiveRecord</title>
12
+ <link rel="alternate" type="text/html" href="http://feeds.feedburner.com/~r/PaulDixExplainsNothing/~3/383536354/marshal-data-to.html?param1=1&amp;param2=2" />
13
+ <link rel="replies" type="text/html" href="http://www.pauldix.net/2008/09/marshal-data-to.html" thr:count="2" thr:updated="2008-11-17T14:40:06-05:00" />
14
+ <id>tag:typepad.com,2003:post-55147740</id>
15
+ <published>2008-09-04T16:07:19-04:00</published>
16
+ <updated>2008-11-17T14:40:06-05:00</updated>
17
+ <summary>In my previous post about the speed of serializing data, I concluded that Marshal was the quickest way to get things done. So I set about using Marshal to store some data in an ActiveRecord object. Things worked great at...</summary>
18
+ <author>
19
+ <name>Paul Dix</name>
20
+ </author>
21
+ <category scheme="http://www.sixapart.com/ns/types#category" term="Tahiti" />
22
+
23
+
24
+ <content type="html" xml:lang="en-US" xml:base="http://www.pauldix.net/">
25
+ &lt;div xmlns="http://www.w3.org/1999/xhtml"&gt;&lt;p&gt;In my previous &lt;a href="http://www.pauldix.net/2008/08/serializing-dat.html"&gt;post about the speed of serializing data&lt;/a&gt;, I concluded that Marshal was the quickest way to get things done. So I set about using Marshal to store some data in an ActiveRecord object. Things worked great at first, but on some test data I got this error: marshal data too short. Luckily, &lt;a href="http://www.brynary.com/"&gt;Bryan Helmkamp&lt;/a&gt; had helpfully pointed out that there were sometimes problems with storing marshaled data in the database. He said it was best to base64 encode the marshal dump before storing.&lt;/p&gt;
26
+
27
+ &lt;p&gt;I was curious why it was working on some things and not others. It turns out that some types of data being marshaled were causing the error to pop up. Here's the test data I used in my specs:&lt;/p&gt;
28
+ &lt;pre&gt;{ :foo =&amp;gt; 3, :bar =&amp;gt; 2 } # hash with symbols for keys and integer values&lt;br /&gt;[3, 2.1, 4, 8]&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;&amp;nbsp; # array with integer and float values&lt;/pre&gt;
29
+ &lt;p&gt;Everything worked when I switched the array values to all integers so it seems that floats were causing the problem. However, in the interest of keeping everything working regardless of data types, I base64 encoded before going into the database and decoded on the way out.&lt;/p&gt;
30
+
31
+ &lt;p&gt;I also ran the benchmarks again to determine what impact this would have on speed. Here are the results for 100 iterations on a 10k element array and a 10k element hash with and without base64 encode/decode:&lt;/p&gt;
32
+ &lt;pre&gt;&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp; user&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp; system&amp;nbsp; &amp;nbsp;&amp;nbsp; total&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp; real&lt;br /&gt;array marshal&amp;nbsp; 0.200000&amp;nbsp; &amp;nbsp;0.010000&amp;nbsp; &amp;nbsp;0.210000 (&amp;nbsp; 0.214018) (without Base64)&lt;br /&gt;array marshal&amp;nbsp; 0.220000&amp;nbsp; &amp;nbsp;0.010000&amp;nbsp; &amp;nbsp;0.230000 (&amp;nbsp; 0.250260)&lt;br /&gt;&lt;br /&gt;hash marshal&amp;nbsp; &amp;nbsp;1.830000&amp;nbsp; &amp;nbsp;0.040000&amp;nbsp; &amp;nbsp;1.870000 (&amp;nbsp; 1.892874) (without Base64)&lt;br /&gt;hash marshal&amp;nbsp; &amp;nbsp;2.040000&amp;nbsp; &amp;nbsp;0.100000&amp;nbsp; &amp;nbsp;2.140000 (&amp;nbsp; 2.170405)&lt;/pre&gt;
33
+ &lt;p&gt;As you can see the difference in speed is pretty negligible. I assume that the error has to do with AR cleaning the stuff that gets inserted into the database, but I'm not really sure. In the end it's just easier to use Base64.encode64 when serializing data into a text field in ActiveRecord using Marshal.&lt;/p&gt;
34
+
35
+ &lt;p&gt;I've also read people posting about this error when using the database session store. I can only assume that it's because they were trying to store either way too much data in their session (too much for a regular text field) or they were storing float values or some other data type that would cause this to pop up. Hopefully this helps.&lt;/p&gt;&lt;/div&gt;
36
+ &lt;div class="feedflare"&gt;
37
+ &lt;a href="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?a=rWfWO"&gt;&lt;img src="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?i=rWfWO" border="0"&gt;&lt;/img&gt;&lt;/a&gt; &lt;a href="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?a=RaCqo"&gt;&lt;img src="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?i=RaCqo" border="0"&gt;&lt;/img&gt;&lt;/a&gt; &lt;a href="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?a=1CBLo"&gt;&lt;img src="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?i=1CBLo" border="0"&gt;&lt;/img&gt;&lt;/a&gt;
38
+ &lt;/div&gt;&lt;img src="http://feeds.feedburner.com/~r/PaulDixExplainsNothing/~4/383536354" height="1" width="1"/&gt;</content>
39
+
40
+
41
+ <feedburner:origLink>http://www.pauldix.net/2008/09/marshal-data-to.html?param1=1&amp;param2=2</feedburner:origLink></entry>
42
+ <entry>
43
+ <title>Serializing data speed comparison: Marshal vs. JSON vs. Eval vs. YAML</title>
44
+ <link rel="alternate" type="text/html" href="http://feeds.feedburner.com/~r/PaulDixExplainsNothing/~3/376401099/serializing-dat.html" />
45
+ <link rel="replies" type="text/html" href="http://www.pauldix.net/2008/08/serializing-dat.html" thr:count="5" thr:updated="2008-10-14T01:26:31-04:00" />
46
+ <id>tag:typepad.com,2003:post-54766774</id>
47
+ <published>2008-08-27T14:31:41-04:00</published>
48
+ <updated>2008-10-14T01:26:31-04:00</updated>
49
+ <summary>Last night at the NYC Ruby hackfest, I got into a discussion about serializing data. Brian mentioned the Marshal library to me, which for some reason had completely escaped my attention until last night. He said it was wicked fast...</summary>
50
+ <author>
51
+ <name>Paul Dix</name>
52
+ </author>
53
+ <category scheme="http://www.sixapart.com/ns/types#category" term="Tahiti" />
54
+
55
+
56
+ <content type="html" xml:lang="en-US" xml:base="http://www.pauldix.net/">
57
+ &lt;div xmlns="http://www.w3.org/1999/xhtml"&gt;&lt;p&gt;Last night at the &lt;a href="http://nycruby.org"&gt;NYC Ruby hackfest&lt;/a&gt;, I got into a discussion about serializing data. Brian mentioned the Marshal library to me, which for some reason had completely escaped my attention until last night. He said it was wicked fast so we decided to run a quick benchmark comparison.&lt;/p&gt;
58
+ &lt;p&gt;The test data is designed to roughly approximate what my &lt;a href="http://www.pauldix.net/2008/08/storing-many-cl.html"&gt;stored classifier data&lt;/a&gt; will look like. The different methods we decided to benchmark were Marshal, json, eval, and yaml. With each one we took the in-memory object and serialized it and then read it back in. With eval we had to convert the object to ruby code to serialize it then run eval against that. Here are the results for 100 iterations on a 10k element array and a hash with 10k key/value pairs run on my Macbook Pro 2.4 GHz Core 2 Duo:&lt;/p&gt;
59
+ &lt;pre&gt;&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;&amp;nbsp; user&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;system&amp;nbsp; &amp;nbsp;&amp;nbsp; total&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp; real&lt;br /&gt;array marshal&amp;nbsp; 0.210000&amp;nbsp; &amp;nbsp;0.010000&amp;nbsp; &amp;nbsp;0.220000 (&amp;nbsp; 0.220701)&lt;br /&gt;array json&amp;nbsp; &amp;nbsp;&amp;nbsp; 2.180000&amp;nbsp; &amp;nbsp;0.050000&amp;nbsp; &amp;nbsp;2.230000 (&amp;nbsp; 2.288489)&lt;br /&gt;array eval&amp;nbsp; &amp;nbsp;&amp;nbsp; 2.090000&amp;nbsp; &amp;nbsp;0.060000&amp;nbsp; &amp;nbsp;2.150000 (&amp;nbsp; 2.240443)&lt;br /&gt;array yaml&amp;nbsp; &amp;nbsp; 26.650000&amp;nbsp; &amp;nbsp;0.350000&amp;nbsp; 27.000000 ( 27.810609)&lt;br /&gt;&lt;br /&gt;hash marshal&amp;nbsp; &amp;nbsp;2.000000&amp;nbsp; &amp;nbsp;0.050000&amp;nbsp; &amp;nbsp;2.050000 (&amp;nbsp; 2.114950)&lt;br /&gt;hash json&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;3.700000&amp;nbsp; &amp;nbsp;0.060000&amp;nbsp; &amp;nbsp;3.760000 (&amp;nbsp; 3.881716)&lt;br /&gt;hash eval&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;5.370000&amp;nbsp; &amp;nbsp;0.140000&amp;nbsp; &amp;nbsp;5.510000 (&amp;nbsp; 6.117947)&lt;br /&gt;hash yaml&amp;nbsp; &amp;nbsp;&amp;nbsp; 68.220000&amp;nbsp; &amp;nbsp;0.870000&amp;nbsp; 69.090000 ( 72.370784)&lt;/pre&gt;
60
+ &lt;p&gt;The order in which I tested them is pretty much the order in which they ranked for speed. Marshal was amazingly fast. JSON and eval came out roughly equal on the array with eval trailing quite a bit for the hash. Yaml was just slow as all hell. A note on the json: I used the 1.1.3 library which uses c to parse. I assume it would be quite a bit slower if I used the pure ruby implementation. Here's &lt;a href="http://gist.github.com/7549"&gt;a gist of the benchmark code&lt;/a&gt; if you're curious and want to run it yourself.&lt;/p&gt;
61
+
62
+
63
+
64
+ &lt;p&gt;If you're serializing user data, be super careful about using eval. It's probably best to avoid it completely. Finally, just for fun I took yaml out (it was too slow) and ran the benchmark again with 1k iterations:&lt;/p&gt;
65
+ &lt;pre&gt;&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;&amp;nbsp; user&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp;system&amp;nbsp; &amp;nbsp;&amp;nbsp; total&amp;nbsp; &amp;nbsp;&amp;nbsp; &amp;nbsp; real&lt;br /&gt;array marshal&amp;nbsp; 2.080000&amp;nbsp; &amp;nbsp;0.110000&amp;nbsp; &amp;nbsp;2.190000 (&amp;nbsp; 2.242235)&lt;br /&gt;array json&amp;nbsp; &amp;nbsp; 21.860000&amp;nbsp; &amp;nbsp;0.500000&amp;nbsp; 22.360000 ( 23.052403)&lt;br /&gt;array eval&amp;nbsp; &amp;nbsp; 20.730000&amp;nbsp; &amp;nbsp;0.570000&amp;nbsp; 21.300000 ( 21.992454)&lt;br /&gt;&lt;br /&gt;hash marshal&amp;nbsp; 19.510000&amp;nbsp; &amp;nbsp;0.500000&amp;nbsp; 20.010000 ( 20.794111)&lt;br /&gt;hash json&amp;nbsp; &amp;nbsp;&amp;nbsp; 39.770000&amp;nbsp; &amp;nbsp;0.670000&amp;nbsp; 40.440000 ( 41.689297)&lt;br /&gt;hash eval&amp;nbsp; &amp;nbsp;&amp;nbsp; 51.410000&amp;nbsp; &amp;nbsp;1.290000&amp;nbsp; 52.700000 ( 54.155711)&lt;/pre&gt;&lt;/div&gt;
66
+ &lt;div class="feedflare"&gt;
67
+ &lt;a href="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?a=zombO"&gt;&lt;img src="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?i=zombO" border="0"&gt;&lt;/img&gt;&lt;/a&gt; &lt;a href="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?a=T3kqo"&gt;&lt;img src="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?i=T3kqo" border="0"&gt;&lt;/img&gt;&lt;/a&gt; &lt;a href="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?a=aI6Oo"&gt;&lt;img src="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?i=aI6Oo" border="0"&gt;&lt;/img&gt;&lt;/a&gt;
68
+ &lt;/div&gt;&lt;img src="http://feeds.feedburner.com/~r/PaulDixExplainsNothing/~4/376401099" height="1" width="1"/&gt;</content>
69
+
70
+
71
+ <feedburner:origLink>http://www.pauldix.net/2008/08/serializing-dat.html</feedburner:origLink></entry>
72
+ <entry>
73
+ <title>Gotcha with cache_fu and permalinks</title>
74
+ <link rel="alternate" type="text/html" href="http://feeds.feedburner.com/~r/PaulDixExplainsNothing/~3/369250462/gotcha-with-cac.html" />
75
+ <link rel="replies" type="text/html" href="http://www.pauldix.net/2008/08/gotcha-with-cac.html" thr:count="2" thr:updated="2008-11-20T13:58:38-05:00" />
76
+ <id>tag:typepad.com,2003:post-54411628</id>
77
+ <published>2008-08-19T14:26:24-04:00</published>
78
+ <updated>2008-11-20T13:58:38-05:00</updated>
79
+ <summary>This is an issue I had recently in a project with cache_fu. Models that I found and cached based on permalinks weren't expiring the cache correctly when getting updated. Here's an example scenario. Say you have a blog with posts....</summary>
80
+ <author>
81
+ <name>Paul Dix</name>
82
+ </author>
83
+ <category scheme="http://www.sixapart.com/ns/types#category" term="Ruby on Rails" />
84
+
85
+
86
+ <content type="html" xml:lang="en-US" xml:base="http://www.pauldix.net/">
87
+ &lt;div xmlns="http://www.w3.org/1999/xhtml"&gt;&lt;p&gt;This is an issue I had recently in a project with &lt;a href="http://errtheblog.com/posts/57-kickin-ass-w-cachefu"&gt;cache_fu&lt;/a&gt;. Models that I found and cached based on permalinks weren't expiring the cache correctly when getting updated. Here's an example scenario.&lt;/p&gt;
88
+
89
+ &lt;p&gt;Say you have a blog with posts. However, instead of using a url like http://paulscoolblog.com/posts/23 you want something that's more search engine friendly and readable for the user. So you use a permalink (maybe using the &lt;a href="http://github.com/github/permalink_fu/tree/master"&gt;permalink_fu plugin&lt;/a&gt;) that's auto-generated based on the title of the post. This post would have a url that looks something like http://paulscoolblog.com/posts/gotcha-with-cache_fu-and-permalinks.&lt;/p&gt;
90
+
91
+ &lt;p&gt;In your controller's show method you'd probably find the post like this:&lt;/p&gt;
92
+ &lt;pre&gt;@post = Post.find_by_permalink(params[:permalink])&lt;/pre&gt;
93
+ &lt;p&gt;However, you'd want to do the caching thing so you'd actually do this:&lt;/p&gt;
94
+ &lt;pre&gt;@post = Post.cached(:find_by_permalink, :with =&amp;gt; params[:permalink])&lt;/pre&gt;
95
+ &lt;p&gt;The problem that I ran into, which is probably obvious to anyone familiar with cache_fu, was that when updating the post, it wouldn't expire the cache. That part of the post model looks like this:&lt;/p&gt;
96
+ &lt;pre&gt;class Post &amp;lt; ActiveRecord::Base&lt;br /&gt;&amp;nbsp; before_save :expire_cache&lt;br /&gt;&amp;nbsp; ...&lt;br /&gt;end&lt;/pre&gt;
97
+ &lt;p&gt;Do you see it? The issue is that when expire_cache gets called on the object, it expires the key &lt;strong&gt;Post:23&lt;/strong&gt; from the cache (assuming 23 was the id of the post). However, when the post was cached using the cached(:find_by_permalink ...) method, it put the post object into the cache with a key of &lt;strong&gt;Post:find_by_permalink:gotcha-with-cache_fu-and-permalinks&lt;/strong&gt;.&lt;/p&gt;
98
+ &lt;p&gt;Luckily, it's a fairly simple fix. If you have a model that is commonly accessed through permalinks, just write your own cache expiry method that looks for both keys and expires them.&lt;/p&gt;&lt;/div&gt;
99
+ &lt;div class="feedflare"&gt;
100
+ &lt;a href="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?a=V1ojO"&gt;&lt;img src="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?i=V1ojO" border="0"&gt;&lt;/img&gt;&lt;/a&gt; &lt;a href="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?a=eu6Zo"&gt;&lt;img src="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?i=eu6Zo" border="0"&gt;&lt;/img&gt;&lt;/a&gt; &lt;a href="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?a=ddUho"&gt;&lt;img src="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?i=ddUho" border="0"&gt;&lt;/img&gt;&lt;/a&gt;
101
+ &lt;/div&gt;&lt;img src="http://feeds.feedburner.com/~r/PaulDixExplainsNothing/~4/369250462" height="1" width="1"/&gt;</content>
102
+
103
+
104
+ <feedburner:origLink>http://www.pauldix.net/2008/08/gotcha-with-cac.html</feedburner:origLink></entry>
105
+ <entry>
106
+ <title>Non-greedy mode in regex</title>
107
+ <link rel="alternate" type="text/html" href="http://feeds.feedburner.com/~r/PaulDixExplainsNothing/~3/365673983/non-greedy-mode.html" />
108
+ <link rel="replies" type="text/html" href="http://www.pauldix.net/2008/08/non-greedy-mode.html" thr:count="0" />
109
+ <id>tag:typepad.com,2003:post-54227244</id>
110
+ <published>2008-08-15T09:32:11-04:00</published>
111
+ <updated>2008-08-27T09:33:15-04:00</updated>
112
+ <summary>I was writing a regular expression yesterday and this popped up. It's just a quick note about greedy vs. non-greedy mode in regular expression matching. Say I have a regular expression that looks something like this: /(\[.*\])/ In English that...</summary>
113
+ <author>
114
+ <name>Paul Dix</name>
115
+ </author>
116
+ <category scheme="http://www.sixapart.com/ns/types#category" term="Ruby" />
117
+
118
+
119
+ <content type="html" xml:lang="en-US" xml:base="http://www.pauldix.net/">&lt;p&gt;I was writing a regular expression yesterday and this popped up. It's just a quick note about greedy vs. non-greedy mode in regular expression matching. Say I have a regular expression that looks something like this:&lt;/p&gt;&#xD;
120
+ &lt;pre&gt;/(\[.*\])/&lt;/pre&gt;&#xD;
121
+ &lt;p&gt;In English that says something roughly like: find an opening bracket [ with 0 or more of any character followed by a closing bracket. The backslashes are to escape the brackets and the parenthesis specify grouping so we can later access that matched text.&lt;/p&gt;&#xD;
122
+ &#xD;
123
+ &lt;p&gt;The greedy mode comes up with the 0 or more characters part of the match (the .* part of the expression). The default mode of greedy means that the parser will gobble up as many characters as it can and match the very last closing bracket. So if you have text like this:&lt;/p&gt;&#xD;
124
+ &#xD;
125
+ &lt;pre&gt;a = [:foo, :bar]&lt;br&gt;b = [:hello, :world]&lt;/pre&gt;&#xD;
126
+ &lt;p&gt;The resulting grouped match would be this:&lt;/p&gt;&#xD;
127
+ &lt;pre&gt;[:foo, :bar]&lt;br&gt;b = [:hello, :world]&lt;/pre&gt;&#xD;
128
+ &lt;p&gt;If you just wanted the [:foo, :bar] part, the solution is to parse in non-greedy mode. This means that it will match on the first closing bracket it sees. The modified regular expression looks like this:&lt;/p&gt;&#xD;
129
+ &lt;pre&gt;/(\[.*?\])/&lt;/pre&gt;&#xD;
130
+ &lt;p&gt;I love the regular expression engine in Ruby. It's one of the best things it ripped off from Perl. The one thing I don't like is the magic global variable that it places matched groups into. You can access that first match through the $1 variable. If you're unfamiliar with regular expressions, a good place to start is the &lt;a href="http://www.amazon.com/Programming-Perl-3rd-Larry-Wall/dp/0596000278/ref=pd_bbs_sr_1?ie=UTF8&amp;amp;s=books&amp;amp;qid=1218806755&amp;amp;sr=8-1"&gt;Camel book&lt;/a&gt;. It's about Perl, but the way they work is very similar. I actually haven't seen good coverage of regexes in a Ruby book.&lt;/p&gt;&lt;div class="feedflare"&gt;
131
+ &lt;a href="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?a=OkVmO"&gt;&lt;img src="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?i=OkVmO" border="0"&gt;&lt;/img&gt;&lt;/a&gt; &lt;a href="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?a=iRpWo"&gt;&lt;img src="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?i=iRpWo" border="0"&gt;&lt;/img&gt;&lt;/a&gt; &lt;a href="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?a=pjRCo"&gt;&lt;img src="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?i=pjRCo" border="0"&gt;&lt;/img&gt;&lt;/a&gt;
132
+ &lt;/div&gt;&lt;img src="http://feeds.feedburner.com/~r/PaulDixExplainsNothing/~4/365673983" height="1" width="1"/&gt;</content>
133
+
134
+
135
+ <feedburner:origLink>http://www.pauldix.net/2008/08/non-greedy-mode.html</feedburner:origLink></entry>
136
+ <entry>
137
+ <title>Storing many classification models</title>
138
+ <link rel="alternate" type="text/html" href="http://feeds.feedburner.com/~r/PaulDixExplainsNothing/~3/358530158/storing-many-cl.html" />
139
+ <link rel="replies" type="text/html" href="http://www.pauldix.net/2008/08/storing-many-cl.html" thr:count="3" thr:updated="2008-08-08T11:40:28-04:00" />
140
+ <id>tag:typepad.com,2003:post-53888232</id>
141
+ <published>2008-08-07T12:01:38-04:00</published>
142
+ <updated>2008-08-27T16:58:18-04:00</updated>
143
+ <summary>One of the things I need to do in Filterly is keep many trained classifiers. These are the machine learning models that determine if a blog post is on topic (Filterly separates information by topic). At the very least I...</summary>
144
+ <author>
145
+ <name>Paul Dix</name>
146
+ </author>
147
+ <category scheme="http://www.sixapart.com/ns/types#category" term="Tahiti" />
148
+
149
+
150
+ <content type="html" xml:lang="en-US" xml:base="http://www.pauldix.net/">&lt;p&gt;One of the things I need to do in &lt;a href="http://filterly.com/"&gt;Filterly&lt;/a&gt; is keep many trained &lt;a href="http://en.wikipedia.org/wiki/Statistical_classification"&gt;classifiers&lt;/a&gt;. These are the machine learning models that determine if a blog post is on topic (Filterly separates information by topic). At the very least I need one per topic in the system. If I want to do something like &lt;a href="http://en.wikipedia.org/wiki/Boosting"&gt;boosting&lt;/a&gt; then I need even more. The issue I'm wrestling with is how to store this data. I'll outline a specific approach and what the storage needs are.&lt;/p&gt;&#xD;
151
+ &#xD;
152
+ &lt;p&gt;Let's say I go with boosting and 10 &lt;a href="http://en.wikipedia.org/wiki/Perceptron"&gt;perceptrons&lt;/a&gt;. I'll also limit my feature space to the 10,000 most statistically significant features. So the storage for each perceptron is a 10k element array. However, I'll also have to keep another data structure to store what the 10k features are and their position in the array. In code I use a hash for this where the feature name is the key and the value is its position. I just need to store one of these hashes per topic.&lt;/p&gt;&#xD;
153
+ &#xD;
154
+ &lt;p&gt;That's not really a huge amount of data. I'm more concerned about the best way to store it. I don't think this kind of thing maps well to a relational database. I don't need to store the features individually. Generally when I'm running the thing I'll want the whole perceptron and feature set in memory for quick access. For now I'm just using a big text field and serializing each using JSON.&lt;/p&gt;&#xD;
155
+ &#xD;
156
+ &lt;p&gt;I don't really like this approach. The whole serializing into the database seems really inelegant. Combined with the time that it takes to parse these things. Each time I want to see if a new post is on topic I'd need to load up the classifier and parse the 10 10k arrays and the 10k key hash. I could keep each classifier running as a service, but then I've got a pretty heavy process running for each topic.&lt;/p&gt;&#xD;
157
+ &#xD;
158
+ &lt;p&gt;I guess I'll just use the stupid easy solution for the time being and worry about performance later. Anyone have thoughts on the best approach?&lt;/p&gt;&lt;div class="feedflare"&gt;
159
+ &lt;a href="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?a=DUT8O"&gt;&lt;img src="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?i=DUT8O" border="0"&gt;&lt;/img&gt;&lt;/a&gt; &lt;a href="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?a=ZGjFo"&gt;&lt;img src="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?i=ZGjFo" border="0"&gt;&lt;/img&gt;&lt;/a&gt; &lt;a href="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?a=pH3Vo"&gt;&lt;img src="http://feeds.feedburner.com/~f/PaulDixExplainsNothing?i=pH3Vo" border="0"&gt;&lt;/img&gt;&lt;/a&gt;
160
+ &lt;/div&gt;&lt;img src="http://feeds.feedburner.com/~r/PaulDixExplainsNothing/~4/358530158" height="1" width="1"/&gt;</content>
161
+
162
+
163
+ <feedburner:origLink>http://www.pauldix.net/2008/08/storing-many-cl.html</feedburner:origLink></entry>
164
+
165
+ </feed>
@@ -650,7 +650,7 @@ describe "SAXMachine" do
650
650
  end
651
651
 
652
652
  it "parses content" do
653
- expect(@feed.entries.first.content).to eq(File.read("spec/fixtures/atom-content.html"))
653
+ expect(@feed.entries.first.content.strip).to eq(File.read("spec/fixtures/atom-content.html").strip)
654
654
  end
655
655
  end
656
656
 
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: sax-machine
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.0.3
4
+ version: 1.1.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Paul Dix
@@ -11,7 +11,7 @@ authors:
11
11
  autorequire:
12
12
  bindir: bin
13
13
  cert_chain: []
14
- date: 2014-11-06 00:00:00.000000000 Z
14
+ date: 2014-11-20 00:00:00.000000000 Z
15
15
  dependencies:
16
16
  - !ruby/object:Gem::Dependency
17
17
  name: rspec
@@ -49,6 +49,7 @@ files:
49
49
  - lib/sax-machine/config/sax_element_value.rb
50
50
  - lib/sax-machine/handlers/sax_abstract_handler.rb
51
51
  - lib/sax-machine/handlers/sax_nokogiri_handler.rb
52
+ - lib/sax-machine/handlers/sax_oga_handler.rb
52
53
  - lib/sax-machine/handlers/sax_ox_handler.rb
53
54
  - lib/sax-machine/sax_config.rb
54
55
  - lib/sax-machine/sax_configure.rb
@@ -85,7 +86,7 @@ rubyforge_project:
85
86
  rubygems_version: 2.2.2
86
87
  signing_key:
87
88
  specification_version: 4
88
- summary: Declarative SAX Parsing with Nokogiri or Ox
89
+ summary: Declarative SAX Parsing with Nokogiri, Ox or Oga
89
90
  test_files:
90
91
  - spec/fixtures/atom-content.html
91
92
  - spec/fixtures/atom.xml