stupid_crawler 0.2.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml ADDED
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA1:
3
+ metadata.gz: c5211fa749a672a28bd854ccc95ff4b3c12295e6
4
+ data.tar.gz: 871bc9b924fe8b1e23f2c92151052f8f02d9531b
5
+ SHA512:
6
+ metadata.gz: 2f5674db26b7cd930aefac6c0e0ac46145de9c124da717a5758846832772cd215dd57cc07bc4e37e60006188e1d5f743f330fc4fd63e098287ee1e7d6076576c
7
+ data.tar.gz: dba506bbd88ddfa7b6670f4761eacc95d56ec7c32b19f0ab7c7d1b341c0516e2e26be5630a31cc56a19d0d9ab4fedbcdd5f4d3620ab45a4b13b521eab96cca4c
data/.gitignore ADDED
@@ -0,0 +1,9 @@
1
+ /.bundle/
2
+ /.yardoc
3
+ /Gemfile.lock
4
+ /_yardoc/
5
+ /coverage/
6
+ /doc/
7
+ /pkg/
8
+ /spec/reports/
9
+ /tmp/
data/Gemfile ADDED
@@ -0,0 +1,4 @@
1
+ source 'https://rubygems.org'
2
+
3
+ # Specify your gem's dependencies in stupid_crawler.gemspec
4
+ gemspec
data/LICENSE.txt ADDED
@@ -0,0 +1,21 @@
1
+ The MIT License (MIT)
2
+
3
+ Copyright (c) 2016 Jacob Burenstam
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in
13
+ all copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
21
+ THE SOFTWARE.
data/README.md ADDED
@@ -0,0 +1,39 @@
1
+ # StupidCrawler
2
+
3
+ Stupid crawler that looks for URLs on a given site. Result is saved as two CSV files one with found URLs and another with failed URLs.
4
+
5
+ ## Installation
6
+
7
+ Add this line to your application's Gemfile:
8
+
9
+ ```ruby
10
+ gem 'stupid_crawler'
11
+ ```
12
+
13
+ And then execute:
14
+
15
+ $ bundle
16
+
17
+ Or install it yourself as:
18
+
19
+ $ gem install stupid_crawler
20
+
21
+ ## Usage
22
+
23
+ ```
24
+ stupid-crawler --help
25
+ ```
26
+
27
+ ## Development
28
+
29
+ After checking out the repo, run `bin/setup` to install dependencies. You can also run `bin/console` for an interactive prompt that will allow you to experiment.
30
+
31
+ To install this gem onto your local machine, run `bundle exec rake install`. To release a new version, update the version number in `version.rb`, and then run `bundle exec rake release`, which will create a git tag for the version, push git commits and tags, and push the `.gem` file to [rubygems.org](https://rubygems.org).
32
+
33
+ ## Contributing
34
+
35
+ Bug reports and pull requests are welcome on GitHub at https://github.com/buren/stupid_crawler.
36
+
37
+ ## License
38
+
39
+ The gem is available as open source under the terms of the [MIT License](http://opensource.org/licenses/MIT).
data/Rakefile ADDED
@@ -0,0 +1 @@
1
+ require "bundler/gem_tasks"
data/bin/console ADDED
@@ -0,0 +1,14 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require 'bundler/setup'
4
+ require 'stupid_crawler'
5
+
6
+ # You can add fixtures and/or initialization code here to make experimenting
7
+ # with your gem easier. You can also use a different console, if you like.
8
+
9
+ # (If you use this, don't forget to add pry to your Gemfile!)
10
+ # require "pry"
11
+ # Pry.start
12
+
13
+ require 'irb'
14
+ IRB.start
data/bin/setup ADDED
@@ -0,0 +1,8 @@
1
+ #!/usr/bin/env bash
2
+ set -euo pipefail
3
+ IFS=$'\n\t'
4
+ set -vx
5
+
6
+ bundle install
7
+
8
+ # Do any other automated setup that you need to do here
@@ -0,0 +1,64 @@
1
+ #!/usr/bin/env ruby
2
+ require 'optparse'
3
+ require 'uri'
4
+
5
+ require 'stupid_crawler'
6
+
7
+ site = nil
8
+ max_urls = Float::INFINITY
9
+ sleep_time = 0.1
10
+ robots = false
11
+ ignore_links = nil
12
+ dir_path = ''
13
+
14
+ optparse = OptionParser.new do |parser|
15
+ parser.on('--site=example.com', String, 'The site to crawl') do |value|
16
+ site = value
17
+ end
18
+
19
+ parser.on('--max=10000', Integer, 'Max number of URLs to crawl (default: Infinity)') do |value|
20
+ max_urls = value
21
+ end
22
+
23
+ parser.on('--sleep=0.1', Integer, 'Sleep time between URL fetches (default: 0.1)') do |value|
24
+ sleep_time = value
25
+ end
26
+
27
+ parser.on('--ignore-links=/blog/', String, 'Ignore links matching this pattern (default: none)') do |value|
28
+ ignore_links = value
29
+ end
30
+
31
+ parser.on('--output=/result/', String, 'Output directory (default:)') do |value|
32
+ dir_path = value
33
+ end
34
+
35
+ parser.on('--[no-]robots', "Respect robots.txt (default: false)") do |value|
36
+ robots = value
37
+ end
38
+
39
+ parser.on('-h', '--help', 'How to use') do
40
+ puts parser
41
+ exit
42
+ end
43
+ end
44
+
45
+ optparse.parse!
46
+
47
+ if site.nil? || site.empty?
48
+ raise OptionParser::MissingArgument, "'--site' can't be blank"
49
+ end
50
+
51
+ unless site.start_with?('http://') || site.start_with?('https://')
52
+ site = "http://#{site}"
53
+ end
54
+
55
+ result = StupidCrawler::Crawler.new(
56
+ site,
57
+ max_urls: max_urls,
58
+ sleep_time: sleep_time,
59
+ robots: robots,
60
+ ignore_links: ignore_links
61
+ ).call
62
+
63
+ dir_path = URI.parse(site).host
64
+ StupidCrawler::ResultWriter.call(result, dir_path: dir_path)
@@ -0,0 +1,58 @@
1
+ require 'uri'
2
+ require 'set'
3
+ require 'spidr'
4
+
5
+ module StupidCrawler
6
+ class Crawler
7
+ NotAbsoluteURI = Class.new(ArgumentError)
8
+
9
+ attr_reader :uri, :max_urls, :sleep_time, :robots, :ignore_links
10
+
11
+ def initialize(site, max_urls:, sleep_time:, robots:, ignore_links:)
12
+ @uri = build_uri!(site)
13
+ @max_urls = max_urls
14
+ @sleep_time = sleep_time
15
+ @robots = robots
16
+ @ignore_links = ignore_links.nil? ? [] : [Regexp.new(ignore_links)]
17
+ end
18
+
19
+ def call
20
+ crawl
21
+ end
22
+
23
+ private
24
+
25
+ def crawl
26
+ found_urls = Set.new
27
+ failed_urls = Set.new
28
+
29
+ Spidr.site(uri.to_s, ignore_links: ignore_links, robots: robots) do |spider|
30
+ spider.every_url do |url|
31
+ puts url
32
+ found_urls << url
33
+ sleep sleep_time
34
+ return found_urls.to_a if found_urls.length > max_urls
35
+ end
36
+
37
+ spider.every_failed_url do |url|
38
+ puts "FAILED: #{url}"
39
+ failed_urls << url
40
+ end
41
+ end
42
+ {
43
+ found: found_urls.to_a,
44
+ failed: failed_urls.to_a
45
+ }
46
+ end
47
+
48
+ def build_uri!(site)
49
+ uri = URI.parse(site)
50
+
51
+ unless uri.absolute
52
+ raise(NotAbsoluteURI, 'must be an absolute url with http(s) protocol')
53
+ end
54
+
55
+ uri
56
+ end
57
+ end
58
+ end
@@ -0,0 +1,19 @@
1
+ require 'fileutils'
2
+
3
+ module StupidCrawler
4
+ class ResultWriter
5
+ def self.call(result_hash, dir_path: nil)
6
+ dir_path = dir_path.to_s
7
+ dir_path = "#{dir_path}/" if !dir_path.empty? && !dir_path.end_with?('/')
8
+ FileUtils::mkdir_p(dir_path) unless dir_path.empty?
9
+
10
+ timestamp = Time.new.strftime("%Y-%m-%d-%H-%M-%S")
11
+
12
+ found = result_hash[:found]
13
+ fails = result_hash[:failed]
14
+
15
+ File.write("#{dir_path}#{timestamp}-found.csv", found.join("\n"))
16
+ File.write("#{dir_path}#{timestamp}-fails.csv", fails.join("\n"))
17
+ end
18
+ end
19
+ end
@@ -0,0 +1,3 @@
1
+ module StupidCrawler
2
+ VERSION = '0.2.0'
3
+ end
@@ -0,0 +1,6 @@
1
+ require 'stupid_crawler/version'
2
+ require 'stupid_crawler/crawler'
3
+ require 'stupid_crawler/result_writer'
4
+
5
+ module StupidCrawler
6
+ end
@@ -0,0 +1,26 @@
1
+ # coding: utf-8
2
+ lib = File.expand_path('../lib', __FILE__)
3
+ $LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
4
+ require 'stupid_crawler/version'
5
+
6
+ Gem::Specification.new do |spec|
7
+ spec.name = 'stupid_crawler'
8
+ spec.version = StupidCrawler::VERSION
9
+ spec.authors = ['Jacob Burenstam']
10
+ spec.email = ['burenstam@gmail.com']
11
+
12
+ spec.summary = %q{Stupid crawler that looks for URLs on a given site.}
13
+ spec.description = %q{Stupid crawler that looks for URLs on a given site. Result is saved as two CSV files one with found URLs and another with failed URLs.}
14
+ spec.homepage = 'https://github.com/buren/stupid_crawler'
15
+ spec.license = 'MIT'
16
+
17
+ spec.files = `git ls-files -z`.split("\x0").reject { |f| f.match(%r{^(test|spec|features)/}) }
18
+ spec.bindir = 'exe'
19
+ spec.executables = spec.files.grep(%r{^exe/}) { |f| File.basename(f) }
20
+ spec.require_paths = ['lib']
21
+
22
+ spec.add_dependency 'spidr', '~> 0.6'
23
+
24
+ spec.add_development_dependency 'bundler', '~> 1.12'
25
+ spec.add_development_dependency 'rake', '~> 10.0'
26
+ end
metadata ADDED
@@ -0,0 +1,101 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: stupid_crawler
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.2.0
5
+ platform: ruby
6
+ authors:
7
+ - Jacob Burenstam
8
+ autorequire:
9
+ bindir: exe
10
+ cert_chain: []
11
+ date: 2017-09-10 00:00:00.000000000 Z
12
+ dependencies:
13
+ - !ruby/object:Gem::Dependency
14
+ name: spidr
15
+ requirement: !ruby/object:Gem::Requirement
16
+ requirements:
17
+ - - "~>"
18
+ - !ruby/object:Gem::Version
19
+ version: '0.6'
20
+ type: :runtime
21
+ prerelease: false
22
+ version_requirements: !ruby/object:Gem::Requirement
23
+ requirements:
24
+ - - "~>"
25
+ - !ruby/object:Gem::Version
26
+ version: '0.6'
27
+ - !ruby/object:Gem::Dependency
28
+ name: bundler
29
+ requirement: !ruby/object:Gem::Requirement
30
+ requirements:
31
+ - - "~>"
32
+ - !ruby/object:Gem::Version
33
+ version: '1.12'
34
+ type: :development
35
+ prerelease: false
36
+ version_requirements: !ruby/object:Gem::Requirement
37
+ requirements:
38
+ - - "~>"
39
+ - !ruby/object:Gem::Version
40
+ version: '1.12'
41
+ - !ruby/object:Gem::Dependency
42
+ name: rake
43
+ requirement: !ruby/object:Gem::Requirement
44
+ requirements:
45
+ - - "~>"
46
+ - !ruby/object:Gem::Version
47
+ version: '10.0'
48
+ type: :development
49
+ prerelease: false
50
+ version_requirements: !ruby/object:Gem::Requirement
51
+ requirements:
52
+ - - "~>"
53
+ - !ruby/object:Gem::Version
54
+ version: '10.0'
55
+ description: Stupid crawler that looks for URLs on a given site. Result is saved as
56
+ two CSV files one with found URLs and another with failed URLs.
57
+ email:
58
+ - burenstam@gmail.com
59
+ executables:
60
+ - stupid-crawler
61
+ extensions: []
62
+ extra_rdoc_files: []
63
+ files:
64
+ - ".gitignore"
65
+ - Gemfile
66
+ - LICENSE.txt
67
+ - README.md
68
+ - Rakefile
69
+ - bin/console
70
+ - bin/setup
71
+ - exe/stupid-crawler
72
+ - lib/stupid_crawler.rb
73
+ - lib/stupid_crawler/crawler.rb
74
+ - lib/stupid_crawler/result_writer.rb
75
+ - lib/stupid_crawler/version.rb
76
+ - stupid_crawler.gemspec
77
+ homepage: https://github.com/buren/stupid_crawler
78
+ licenses:
79
+ - MIT
80
+ metadata: {}
81
+ post_install_message:
82
+ rdoc_options: []
83
+ require_paths:
84
+ - lib
85
+ required_ruby_version: !ruby/object:Gem::Requirement
86
+ requirements:
87
+ - - ">="
88
+ - !ruby/object:Gem::Version
89
+ version: '0'
90
+ required_rubygems_version: !ruby/object:Gem::Requirement
91
+ requirements:
92
+ - - ">="
93
+ - !ruby/object:Gem::Version
94
+ version: '0'
95
+ requirements: []
96
+ rubyforge_project:
97
+ rubygems_version: 2.6.13
98
+ signing_key:
99
+ specification_version: 4
100
+ summary: Stupid crawler that looks for URLs on a given site.
101
+ test_files: []