trawler 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml ADDED
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA1:
3
+ metadata.gz: f2215c97872959bff772658f412a924f9d4f6357
4
+ data.tar.gz: 5ed5422791ad8fe45a668984c17ad84dba60d924
5
+ SHA512:
6
+ metadata.gz: 1fe1b9aa090d52b281b38b1e948613eb37192c4872a0de922c8335ca9b6757c53ffa839ad462c083f9f31a6fcbc568412d8f414c57ea47f10854459002e4fdfe
7
+ data.tar.gz: b082e3234696d8bb0cd99346fa71816eb98e4062973e94e4e3cf7c2e6b42154200382507da7ef058348faf8c52a9baea8a0133be67be451766236c3dbe287e93
data/.gitignore ADDED
@@ -0,0 +1,19 @@
1
+ *.gem
2
+ *.rbc
3
+ .bundle
4
+ .config
5
+ .yardoc
6
+ Gemfile.lock
7
+ InstalledFiles
8
+ _yardoc
9
+ coverage
10
+ doc/
11
+ lib/bundler/man
12
+ pkg
13
+ rdoc
14
+ spec/reports
15
+ test/tmp
16
+ test/version_tmp
17
+ tmp
18
+ .ruby-*
19
+ .DS_Store
data/.rspec ADDED
@@ -0,0 +1 @@
1
+ --color
data/Gemfile ADDED
@@ -0,0 +1,4 @@
1
+ source 'https://rubygems.org'
2
+
3
+ # Specify your gem's dependencies in trawler.gemspec
4
+ gemspec
data/LICENSE.txt ADDED
@@ -0,0 +1,22 @@
1
+ Copyright (c) 2013 Jon Wheeler
2
+
3
+ MIT License
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining
6
+ a copy of this software and associated documentation files (the
7
+ "Software"), to deal in the Software without restriction, including
8
+ without limitation the rights to use, copy, modify, merge, publish,
9
+ distribute, sublicense, and/or sell copies of the Software, and to
10
+ permit persons to whom the Software is furnished to do so, subject to
11
+ the following conditions:
12
+
13
+ The above copyright notice and this permission notice shall be
14
+ included in all copies or substantial portions of the Software.
15
+
16
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
20
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
data/README.md ADDED
@@ -0,0 +1,52 @@
1
+ # Trawler
2
+
3
+ Trawl the web for meta tags.
4
+
5
+ ## Installation
6
+
7
+ Add this line to your application's Gemfile:
8
+
9
+ gem 'trawler'
10
+
11
+ And then execute:
12
+
13
+ $ bundle
14
+
15
+ Or install it yourself as:
16
+
17
+ $ gem install trawler
18
+
19
+ ## Usage
20
+
21
+ Trawler is designed to be simple to use. Give the Trawler a URL and some options
22
+ and it will bring back a catch of Images, Meta Descriptions and Video URLS in a
23
+ ParsedDocument object.
24
+
25
+ ```ruby
26
+ doc = Trawler.fetch("www.foobar.com")
27
+ doc.url # => "http://www.foobar.com"
28
+ doc.description # => "Descriptive meta tag"
29
+ doc.title # => "Foobar.com The place of Foo"
30
+ doc.images # => [
31
+ # http://www.foobar.com/assets/bar.png,
32
+ # http://www.foobar.com/assets/baz.jpg,
33
+ # http://www.foobar.com/assets/bat.gif
34
+ # ]
35
+ doc.video # => [
36
+ # http://www.vimeo.com/354357349
37
+ # ]
38
+ ```
39
+
40
+ ## Testing
41
+
42
+ ```
43
+ rspec/
44
+ ```
45
+
46
+ ## Contributing
47
+
48
+ 1. Fork it
49
+ 2. Create your feature branch (`git checkout -b my-new-feature`)
50
+ 3. Commit your changes (`git commit -am 'Add some feature'`)
51
+ 4. Push to the branch (`git push origin my-new-feature`)
52
+ 5. Create new Pull Request
data/Rakefile ADDED
@@ -0,0 +1 @@
1
+ require "bundler/gem_tasks"
@@ -0,0 +1,22 @@
1
+ module Trawler
2
+ class Document
3
+ def initialize(url, options = {})
4
+ @url = url
5
+ @parser = options.fetch(:parser, Parser)
6
+ @spider = options.fetch(:spider, Spider)
7
+ @image = options.fetch(:image_size, "100")
8
+ end
9
+
10
+ def parse
11
+ ParsedDocument.new(doc.full_url, parsed_data)
12
+ end
13
+
14
+ def parsed_data
15
+ Parser.new(page: doc.page, url: doc.full_url, image_size: @image)
16
+ end
17
+
18
+ def doc
19
+ @page ||= @spider.new(@url).call
20
+ end
21
+ end
22
+ end
@@ -0,0 +1,30 @@
1
+ module Trawler
2
+ class ParsedDocument
3
+ attr_reader :url, :data
4
+
5
+ def initialize(host, data)
6
+ @url = host
7
+ @data = data
8
+ end
9
+
10
+ def title
11
+ data.title
12
+ end
13
+
14
+ def video
15
+ data.video
16
+ end
17
+
18
+ def images
19
+ data.images
20
+ end
21
+
22
+ def description
23
+ data.description
24
+ end
25
+
26
+ def raw_data
27
+ @raw_data ||= data.document
28
+ end
29
+ end
30
+ end
@@ -0,0 +1,94 @@
1
+ require "nokogiri"
2
+ require "hashr"
3
+
4
+ module Trawler
5
+ class Parser
6
+ attr_reader :url
7
+ attr_reader :meta_data
8
+
9
+ def initialize(options)
10
+ @page = options[:page]
11
+ @url = options[:url]
12
+ @min_image_size = options[:image_size]
13
+ @meta_data = Hashr.new
14
+ end
15
+
16
+ def title
17
+ meta_title.nil? ? html_title : meta_title
18
+ end
19
+
20
+ def description
21
+ meta_description.nil? ? html_description : meta_description
22
+ end
23
+
24
+ def images
25
+ images = []
26
+ images << meta_image
27
+ images << find_images
28
+ images.flatten.compact.map { |i| i.strip }.uniq
29
+ end
30
+
31
+ def video
32
+ meta_video
33
+ end
34
+
35
+ def document
36
+ @document ||= Nokogiri::HTML(@page)
37
+ end
38
+
39
+ private
40
+
41
+ def html_title
42
+ document.css("title").inner_text rescue nil
43
+ end
44
+
45
+ def html_description
46
+ first_long_paragraph = document.search("//p[string-length() >= 150]").first
47
+ first_long_paragraph ? first_long_paragraph.text : ''
48
+ end
49
+
50
+ def find_images
51
+ images = document.css('img')
52
+ images = images.select { |img| !img[:alt].nil? && !img[:alt].empty? }
53
+ images.reject! { |img| img[:alt] =~ /(loading|spinner)/i }
54
+ images.reject! { |img| img[:class] =~ /(loading|spinner|icon)/i }
55
+ images.reject! { |img| img[:width] && img[:width] < @min_image_size }
56
+ images.map { |img| img[:src] }
57
+ end
58
+
59
+ def meta_title
60
+ scrape_meta_data
61
+ meta_data.title rescue nil
62
+ end
63
+
64
+ def meta_description
65
+ scrape_meta_data
66
+ meta_data.description rescue nil
67
+ end
68
+
69
+ def meta_video
70
+ scrape_meta_data
71
+ meta_data.video rescue nil
72
+ end
73
+
74
+ def meta_image
75
+ scrape_meta_data
76
+ meta_data.image rescue nil
77
+ end
78
+
79
+ def scrape_meta_data
80
+ document.xpath("//meta").each do |element|
81
+ get_meta_data_name_or_property(element)
82
+ end
83
+ end
84
+
85
+ def get_meta_data_name_or_property(element)
86
+ name_or_property = element.attributes["name"] ? "name" : (element.attributes["property"] ? "property" : nil)
87
+ content_or_value = element.attributes["content"] ? "content" : (element.attributes["value"] ? "value" : nil)
88
+
89
+ if !name_or_property.nil? && !content_or_value.nil?
90
+ meta_data[element.attributes[name_or_property].value.downcase.gsub(/(og:|twitter:)/, "").gsub(/:/, "_").to_sym] = element.attributes[content_or_value].value
91
+ end
92
+ end
93
+ end
94
+ end
@@ -0,0 +1,24 @@
1
+ require "open-uri"
2
+
3
+ module Trawler
4
+ class Spider
5
+ attr_reader :page, :full_url
6
+
7
+ def initialize(url)
8
+ @url = url
9
+ end
10
+
11
+ def call
12
+ get_page
13
+ self
14
+ end
15
+
16
+ def full_url
17
+ @full_url ||= URI.parse(@url).scheme.nil? ? 'http://' + @url : @url
18
+ end
19
+
20
+ def get_page
21
+ @page ||= open(full_url)
22
+ end
23
+ end
24
+ end
@@ -0,0 +1,3 @@
1
+ module Trawler
2
+ VERSION = "0.0.1"
3
+ end
data/lib/trawler.rb ADDED
@@ -0,0 +1,11 @@
1
+ require "trawler/document"
2
+ require "trawler/parsed_document"
3
+ require "trawler/spider"
4
+ require "trawler/parser"
5
+ require "trawler/version"
6
+
7
+ module Trawler
8
+ def self.fetch(url, options = {})
9
+ Document.new(url, options).parse
10
+ end
11
+ end
@@ -0,0 +1,31 @@
1
+ require "spec_helper"
2
+
3
+ describe Trawler::Document do
4
+ let(:document) { Trawler::Document.new("http://www.dogshaming.com") }
5
+
6
+ before do
7
+ Trawler::Spider.any_instance.stub(:get_page) { fixture("sample_pages/tumblr.html") }
8
+ end
9
+
10
+ it "requires a URL" do
11
+ expect{ Trawler::Document.new }.to raise_error
12
+ end
13
+
14
+ describe "#doc" do
15
+ it "returns the crawled document object" do
16
+ expect(document.doc).to be_a Trawler::Spider
17
+ end
18
+ end
19
+
20
+ describe "#parse" do
21
+ it "returns a parsed document" do
22
+ expect(document.parse).to be_a Trawler::ParsedDocument
23
+ end
24
+ end
25
+
26
+ describe "#parsed_data" do
27
+ it "returns a parse object" do
28
+ expect(document.parsed_data).to be_a Trawler::Parser
29
+ end
30
+ end
31
+ end
@@ -0,0 +1,10 @@
1
+ <html>
2
+ <head>
3
+ <title>Super simple html page</title>
4
+ </head>
5
+ <body>
6
+ <p>
7
+ Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed dapibus velit in lacus mollis vehicula nec a arcu. Vivamus in turpis ultricies, ornare quam vehicula, adipiscing metus. Nam mauris libero, dignissim quis sodales ac, dignissim sed risus. Quisque varius eget eros vel condimentum. Duis cursus pretium est vitae lobortis. Donec tincidunt lorem vel erat feugiat lobortis. Pellentesque scelerisque eu massa a bibendum. Curabitur aliquam vitae eros nec semper. Morbi eu dui dapibus, iaculis ipsum vulputate, mollis ante. Pellentesque quis ipsum dignissim, pharetra tortor non, fringilla diam. Morbi a justo nec neque vulputate tempor eget sed velit. Nunc imperdiet neque mi, ut tincidunt leo convallis id. Nam in gravida metus.
8
+ </p>
9
+ </body>
10
+ </html>