scrapix 0.1.3

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA1:
3
+ metadata.gz: 7229eb9784d2c1e6a49ebc62c56cd2639b8b18aa
4
+ data.tar.gz: 766a3bda0863e5126ca10188a3d629250097392b
5
+ SHA512:
6
+ metadata.gz: 31237fb06e6cb6413305d1fae6f3f593072c40225619e3975f883ce805f89e8f1c2a1eddb2aaae045e37fab0427b1d6c7b07c7fe040c072b2ac5c373387c6a2c
7
+ data.tar.gz: 57cd2684ac04991a4ff5ae28b5a37c48362d6db4338fec0af575cc2b0288cedeb056c170781d10d17605f119f46024e9aa82388c16ea51672dc4cfdea9b27d0d
@@ -0,0 +1,17 @@
1
+ *.gem
2
+ *.rbc
3
+ .bundle
4
+ .config
5
+ .yardoc
6
+ Gemfile.lock
7
+ InstalledFiles
8
+ _yardoc
9
+ coverage
10
+ doc/
11
+ lib/bundler/man
12
+ pkg
13
+ rdoc
14
+ spec/reports
15
+ test/tmp
16
+ test/version_tmp
17
+ tmp
data/.rvmrc ADDED
@@ -0,0 +1,52 @@
1
+ #!/usr/bin/env bash
2
+
3
+ # This is an RVM Project .rvmrc file, used to automatically load the ruby
4
+ # development environment upon cd'ing into the directory
5
+
6
+ # First we specify our desired <ruby>[@<gemset>], the @gemset name is optional,
7
+ # Only full ruby name is supported here, for short names use:
8
+ # echo "rvm use 2.0.0" > .rvmrc
9
+ environment_id="ruby-2.0.0-p0@scrapix"
10
+
11
+ # Uncomment the following lines if you want to verify rvm version per project
12
+ # rvmrc_rvm_version="1.18.15 (stable)" # 1.10.1 seams as a safe start
13
+ # eval "$(echo ${rvm_version}.${rvmrc_rvm_version} | awk -F. '{print "[[ "$1*65536+$2*256+$3" -ge "$4*65536+$5*256+$6" ]]"}' )" || {
14
+ # echo "This .rvmrc file requires at least RVM ${rvmrc_rvm_version}, aborting loading."
15
+ # return 1
16
+ # }
17
+
18
+ # First we attempt to load the desired environment directly from the environment
19
+ # file. This is very fast and efficient compared to running through the entire
20
+ # CLI and selector. If you want feedback on which environment was used then
21
+ # insert the word 'use' after --create as this triggers verbose mode.
22
+ if [[ -d "${rvm_path:-$HOME/.rvm}/environments"
23
+ && -s "${rvm_path:-$HOME/.rvm}/environments/$environment_id" ]]
24
+ then
25
+ \. "${rvm_path:-$HOME/.rvm}/environments/$environment_id"
26
+ [[ -s "${rvm_path:-$HOME/.rvm}/hooks/after_use" ]] &&
27
+ \. "${rvm_path:-$HOME/.rvm}/hooks/after_use" || true
28
+ if [[ $- == *i* ]] # check for interactive shells
29
+ then echo "Using: $(tput setaf 2)$GEM_HOME$(tput sgr0)" # show the user the ruby and gemset they are using in green
30
+ else echo "Using: $GEM_HOME" # don't use colors in non-interactive shells
31
+ fi
32
+ else
33
+ # If the environment file has not yet been created, use the RVM CLI to select.
34
+ rvm --create use "$environment_id" || {
35
+ echo "Failed to create RVM environment '${environment_id}'."
36
+ return 1
37
+ }
38
+ fi
39
+
40
+ # If you use bundler, this might be useful to you:
41
+ # if [[ -s Gemfile ]] && {
42
+ # ! builtin command -v bundle >/dev/null ||
43
+ # builtin command -v bundle | GREP_OPTIONS= \grep $rvm_path/bin/bundle >/dev/null
44
+ # }
45
+ # then
46
+ # printf "%b" "The rubygem 'bundler' is not installed. Installing it now.\n"
47
+ # gem install bundler
48
+ # fi
49
+ # if [[ -s Gemfile ]] && builtin command -v bundle >/dev/null
50
+ # then
51
+ # bundle install | GREP_OPTIONS= \grep -vE '^Using|Your bundle is complete'
52
+ # fi
data/Gemfile ADDED
@@ -0,0 +1,4 @@
1
+ source 'https://rubygems.org'
2
+
3
+ # Specify your gem's dependencies in scrapix.gemspec
4
+ gemspec
@@ -0,0 +1,22 @@
1
+ Copyright (c) 2013 Nikhil Gupta
2
+
3
+ MIT License
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining
6
+ a copy of this software and associated documentation files (the
7
+ "Software"), to deal in the Software without restriction, including
8
+ without limitation the rights to use, copy, modify, merge, publish,
9
+ distribute, sublicense, and/or sell copies of the Software, and to
10
+ permit persons to whom the Software is furnished to do so, subject to
11
+ the following conditions:
12
+
13
+ The above copyright notice and this permission notice shall be
14
+ included in all copies or substantial portions of the Software.
15
+
16
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
20
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
21
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
22
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
@@ -0,0 +1,94 @@
1
+ # Scrapix
2
+
3
+ A gem that is able to scrape images from various sources. The gem provides you with the
4
+ results of these searches in a neat way, which you can then use to download these images,
5
+ or simply obtain a list of such images.
6
+
7
+ You can, also, use the API to call these scraping methods inside your own applications.
8
+
9
+ ## Installation
10
+
11
+ Add this line to your application's Gemfile:
12
+
13
+ gem 'scrapix'
14
+
15
+ And then execute:
16
+
17
+ $ bundle
18
+
19
+ Or install it yourself as:
20
+
21
+ $ gem install scrapix
22
+
23
+ ## Usage :: Google Images
24
+
25
+ This gem is able to scrape images from Google Images search. It uses `Capybara` along with the
26
+ `Poltergeist` driver (which works on top of `PhantomJS`) for this purpose.
27
+
28
+ To use the `Google Images Scraper` inside your ruby applications, simply do:
29
+
30
+ scraper = Scrapix::GoogleImages.new # create the scraper
31
+
32
+ scraper.query = "programmer" # find images for keyword: "programmer"
33
+ scraper.total = 30 # search is limited to 30 images (default: 100)
34
+ scraper.find # return a list of such images
35
+
36
+ # search for 'large' images, and put safesearch to off!
37
+ scraper.options = { safe: false, size: "large" }
38
+ scraper.find
39
+
40
+ # everything:
41
+ scraper = Scrapix::GoogleImages.new "programmer", safe: false, size: "large"
42
+ scraper.total = 30 # limits to 30 images - default: 100 images
43
+ scraper.find
44
+
45
+
46
+ The `size` option can be supplied in following ways:
47
+
48
+ - __icon__, __small__, __medium__, or __large__
49
+ - __&lt;n&gt;__: searches for images with exact dimensions (width: _&lt;m&gt;_, height: _&lt;n&gt;_)
50
+ - __&lt;m&gt;x&lt;n&gt;__: searches for images with exact dimensions (width: _&lt;m&gt;_, height: _&lt;n&gt;_)
51
+ - __&lt;n&gt;mp__: searches for images larger than _&lt;n&gt;_ MP. Intelligently, adjusts to
52
+ the closest available option, if _&lt;n&gt;_ is not in the supported list of sizes
53
+ for this search.
54
+
55
+ You can also use the scraper on CLI:
56
+
57
+ scrapix google_images "programmer" --no-safe --total=30 --size=large
58
+
59
+ ## Usage :: vBulletin Threads
60
+
61
+ This gem is able to scrape vBulletin threads for images. It uses `Mechanize` gem for this purpose.
62
+
63
+ To use the `vBulletin Thread Scraper` inside your ruby applications, simply do:
64
+
65
+ scraper = Scrapix::VBulletin.new # create the scraper
66
+
67
+ # find images for the following thread
68
+ scraper.url = "http://www.wetacollectors.com/forum/showthread.php?t=40085"
69
+ scraper.find # return a list of such images
70
+
71
+ # start searching from page 2 of this thread till we find 10 images
72
+ scraper.options = { start: 2, total: 10 }
73
+ scraper.find
74
+
75
+ # everything:
76
+ url = "http://www.wetacollectors.com/forum/showthread.php?t=40085"
77
+ scraper = Scrapix::VBulletin.new url, start: 2, end: 3, total: 10
78
+ scraper.find
79
+
80
+ You can also use the scraper on CLI:
81
+
82
+ scrapix vbulletin "http://www.wetacollectors.com/forum/showthread.php?t=40085" --total=10 --start=2
83
+
84
+ ## Contributing
85
+
86
+ 1. Fork it
87
+ 2. Create your feature branch (`git checkout -b my-new-feature`)
88
+ 3. Commit your changes (`git commit -am 'Add some feature'`)
89
+ 4. Push to the branch (`git push origin my-new-feature`)
90
+ 5. Create new Pull Request
91
+
92
+ ## TODO
93
+
94
+ 1. Check if `mechanize` can be used instead of `capybara + poltergeist` combination for scraping Google Images.
@@ -0,0 +1,2 @@
1
+ #!/usr/bin/env rake
2
+ require "bundler/gem_tasks"
@@ -0,0 +1,46 @@
1
+ #!/usr/bin/env ruby
2
+ # encoding: utf-8
3
+
4
+ require 'thor'
5
+ require 'scrapix'
6
+
7
+ module Scrapix
8
+ class CLI < Thor
9
+ desc "google_images [KEYWORD]", "scrape images from Google Images"
10
+ method_option :safe, type: :boolean, default: true, description: "use safe search?"
11
+ method_option :size, default: "any", description: "size of the images to search for"
12
+ method_option :ref, type: :boolean, default: false, description: "provide a list of reference urls, instead"
13
+ method_option :verbose, type: :boolean, default: false, description: "provide all info", alias: "-v"
14
+ method_option :total, default: 100, description: "number of images to search", aliases: "-n"
15
+ def google_images(keyword)
16
+ scraper = Scrapix::GoogleImages.new keyword, options
17
+ scraper.total = options["total"].to_i
18
+ images = scraper.find
19
+ if images.empty?
20
+ puts "No images were found! :("
21
+ else
22
+ puts "URL, WIDTH, HEIGHT, REFERENCE_URL" if options["verbose"]
23
+ images.each do |image|
24
+ if options["verbose"]
25
+ puts "#{image[:url]},#{image[:width]},#{image[:height]},#{image[:reference_url]}"
26
+ else
27
+ puts options["ref"] ? image[:reference_url] : image[:url]
28
+ end
29
+ end
30
+ end
31
+ end
32
+
33
+ desc "vbulletin [THREAD_URL]", "scrape images from a vBulletin Thread"
34
+ method_option :total, default: 100000, description: "number of images to search", aliases: "-n"
35
+ method_option :start, default: 1, description: "starting page number"
36
+ method_option :end, default: 10000, description: "ending page number"
37
+ method_option :verbose, type: :boolean, default: false, description: "be verbose", alias: "-v"
38
+ def vbulletin(thread_url)
39
+ scraper = Scrapix::VBulletin.new thread_url, options.merge({"cli" => true})
40
+ images = scraper.find
41
+ puts "No images were found! :(" if images.empty?
42
+ end
43
+ end
44
+ end
45
+
46
+ Scrapix::CLI.start
@@ -0,0 +1,5 @@
1
+ require "scrapix/version"
2
+ require "scrapix/drivers/capybara"
3
+ require "scrapix/google_images"
4
+ require "scrapix/vbulletin"
5
+ require 'mechanize'
@@ -0,0 +1,20 @@
1
+ # capybara for scraping
2
+ require 'capybara'
3
+ require 'capybara/dsl'
4
+ require 'capybara/poltergeist'
5
+
6
+ Capybara.register_driver :poltergeist_debug do |app|
7
+ Capybara::Poltergeist::Driver.new app, {
8
+ timeout: 600,
9
+ inspector: true,
10
+ # js_errors: false,
11
+ phantomjs_options: ["--web-security=no"]
12
+ }
13
+ end
14
+
15
+ # use javascript driver
16
+ Capybara.current_driver = :poltergeist_debug
17
+
18
+ Scrapix::UserAgent = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) " +
19
+ "AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31"
20
+
@@ -0,0 +1,113 @@
1
+ module Scrapix
2
+ # download images from a Google Image Search
3
+ class GoogleImages
4
+ include Capybara::DSL
5
+
6
+ # options can be:
7
+ # size: named size, e.g. icon, small, medium, large, 13mp, 1280x800, etc.
8
+ # safe: true or false
9
+ #
10
+ def initialize(query = nil, options = {})
11
+ self.options = options
12
+ self.query = query
13
+ self.total = 100
14
+ end
15
+
16
+ def search_url(page_no = 1)
17
+ "http://google.com/search?tbm=isch&q=#{@query}#{@params}&start=#{(page_no - 1)*20}"
18
+ end
19
+
20
+ def query=(q)
21
+ @query = URI.escape(q) if q
22
+ end
23
+
24
+ def total=(n)
25
+ @num = n.to_i
26
+ end
27
+
28
+ def options=(opts)
29
+ # convert symbolic keys to string keys
30
+ options = {}
31
+ opts.each { |k,v| options[k.to_s] = v }
32
+
33
+ # merge the options with defaults!
34
+ @options ||= { "safe" => true, "size" => "any" }
35
+ @options.merge!(options)
36
+ sanitize_size
37
+
38
+ # parametrize for url purposes
39
+ @params = create_params
40
+ end
41
+
42
+ # params: page_no => starting page number for google results
43
+ def find(page_no = 1)
44
+ images = {}
45
+ return images unless @query
46
+
47
+ while images.count < @num
48
+ visit search_url(page_no)
49
+ links = Capybara.page.all("a")
50
+ links = links.select{|x| x["href"] =~ /^\/imgres/} if links.any?
51
+ return images unless links.any?
52
+ page_counter = 0
53
+ links.each do |link|
54
+ attribs = CGI.parse(URI.parse(link["href"]).query) rescue nil
55
+ next if attribs.nil?
56
+ hash = Digest::MD5.hexdigest(attribs["imgurl"][0])
57
+ unless images.has_key?(hash)
58
+ images[hash] = {
59
+ width: attribs["w"][0],
60
+ height: attribs["h"][0],
61
+ url: attribs["imgurl"][0],
62
+ reference_url: attribs["imgrefurl"][0]
63
+ }
64
+ page_counter += 1
65
+ end
66
+ end
67
+ page_no += 1
68
+ break if page_counter == 0
69
+ end
70
+ images.take(@num).map{|x| x[1]}
71
+ end
72
+
73
+ private
74
+
75
+ def validate_mp_size(mp)
76
+ mp = mp.to_i
77
+ lower_bound = 0; upper_bound = 9999;
78
+ valid_mp_sizes = [ 2, 4, 6, 8, 10, 12, 15, 20, 40, 70 ]
79
+ valid_mp_sizes.each do |s|
80
+ return s if s == mp
81
+ lower_bound = s if s < mp
82
+ upper_bound = s if s > mp && s < upper_bound
83
+ end
84
+ mp - lower_bound > upper_bound - mp ? upper_bound : lower_bound
85
+ end
86
+
87
+ # if width or height is specified, use them as 'exact' size
88
+ # otherwise, use a MP size for finding images larger than that size
89
+ # otherwise, use a given named size
90
+ def sanitize_size
91
+ @options["size"] = case
92
+ when m = @options["size"].match(/^(\d*)x(\d*)$/)
93
+ then "isz:ex,iszw:#{m[1]},iszh:#{m[2]}"
94
+ when m = @options["size"].match(/^(\d*)$/)
95
+ then "isz:ex,iszw:#{m[1]},iszh:#{m[1]}"
96
+ when m = @options["size"].match(/^(\d*)mp$/)
97
+ then "isz:lt,islt:#{validate_mp_size(m[1])}mp"
98
+ when @options["size"] == "large" then "isz:l"
99
+ when @options["size"] == "medium" then "isz:m"
100
+ when @options["size"] == "small" then "isz:s"
101
+ when @options["size"] == "icon" then "isz:i"
102
+ else nil
103
+ end
104
+ end
105
+
106
+ def create_params
107
+ string = ""
108
+ string += "&tbs=#{@options["size"]}" if @options["size"]
109
+ string += "&safe=off" unless @options["safe"]
110
+ string
111
+ end
112
+ end
113
+ end
@@ -0,0 +1,72 @@
1
+ module Scrapix
2
+ # download images from a vBulletin thread
3
+ class VBulletin
4
+
5
+ attr_reader :title, :max_pages, :options, :page_no, :images, :url
6
+
7
+ def initialize(url = nil, options = {})
8
+ @images = {}
9
+ @agent = Mechanize.new
10
+ @agent.user_agent_alias = 'Mac Safari'
11
+ self.options = options
12
+ self.url = url
13
+ end
14
+
15
+ # find images for this thread, specified by starting page_no
16
+ def find
17
+ reset; return @images unless @url
18
+ @page_no = @options["start"]
19
+ until @images.count > @options["total"] || thread_has_ended?
20
+ page = @agent.get "#{@url}&page=#{@page_no}"
21
+ puts "[VERBOSE] Searching: #{@url}&page=#{@page_no}" if @options["verbose"] && options["cli"]
22
+ sources = page.image_urls.map{|x| x.to_s}
23
+ sources = filter_images sources # hook for sub-classes
24
+ @page_no += 1
25
+ continue if sources.empty?
26
+ sources.each do |source|
27
+ hash = Digest::MD5.hexdigest(source)
28
+ unless @images.has_key?(hash)
29
+ @images[hash] = {url: source}
30
+ puts source if options["cli"]
31
+ end
32
+ end
33
+ end
34
+ @images = @images.map{|x, y| y}
35
+ end
36
+
37
+ def thread_has_ended?
38
+ @page_no > @options["end"] || @page_no > @max_pages
39
+ end
40
+
41
+ def filter_images(sources)
42
+ # useful for filtering the image by sub-classes
43
+ return sources
44
+ end
45
+
46
+ def url=(url)
47
+ @url = url
48
+ return unless @url
49
+ page = @agent.get @url
50
+ @title = page.title.strip
51
+ puts @title + "\n" + ("=" * @title.length) if self.options["cli"]
52
+ begin
53
+ text = page.search(".pagenav .vbmenu_control").first.inner_text
54
+ @max_pages = text.match(/Page \d* of (\d*)/)[1].to_i
55
+ rescue
56
+ @max_pages = 1
57
+ end
58
+ end
59
+
60
+ def reset
61
+ @images = {}
62
+ @page_no = @options["start"]
63
+ end
64
+
65
+ def options=(options = {})
66
+ @options = { "start" => 1, "end" => 10000, "total" => 100000, "verbose" => false, "cli" => false }
67
+ options.each { |k,v| @options[k.to_s] = v }
68
+ ["start", "end", "total"].each {|k| @options[k] = @options[k].to_i}
69
+ @options
70
+ end
71
+ end
72
+ end
@@ -0,0 +1,3 @@
1
+ module Scrapix
2
+ VERSION = "0.1.3"
3
+ end
@@ -0,0 +1,28 @@
1
+ # coding: utf-8
2
+ lib = File.expand_path('../lib', __FILE__)
3
+ $LOAD_PATH.unshift(lib) unless $LOAD_PATH.include?(lib)
4
+ require 'scrapix/version'
5
+
6
+ Gem::Specification.new do |spec|
7
+ spec.name = "scrapix"
8
+ spec.version = Scrapix::VERSION
9
+ spec.authors = ["Nikhil Gupta"]
10
+ spec.email = ["me@nikhgupta.com"]
11
+ spec.description = %q{Scrapes images from various sources e.g. Google Images, vBulletin threads, etc.}
12
+ spec.summary = %q{A gem that is able to scrape images from various sources. The gem provides you with the results of these searches in a neat way, which you can then use to download these images.}
13
+ spec.homepage = "http://github.com/nikhgupta/scrapix"
14
+ spec.license = "MIT"
15
+
16
+ spec.files = `git ls-files`.split($/)
17
+ spec.executables = spec.files.grep(%r{^bin/}) { |f| File.basename(f) }
18
+ spec.test_files = spec.files.grep(%r{^(test|spec|features)/})
19
+ spec.require_paths = ["lib"]
20
+
21
+ spec.add_dependency "thor"
22
+ spec.add_dependency "capybara"
23
+ spec.add_dependency "mechanize"
24
+ spec.add_dependency "poltergeist"
25
+
26
+ spec.add_development_dependency "rake"
27
+ spec.add_development_dependency "bundler", "~> 1.3"
28
+ end
metadata ADDED
@@ -0,0 +1,145 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: scrapix
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.1.3
5
+ platform: ruby
6
+ authors:
7
+ - Nikhil Gupta
8
+ autorequire:
9
+ bindir: bin
10
+ cert_chain: []
11
+ date: 2013-05-03 00:00:00.000000000 Z
12
+ dependencies:
13
+ - !ruby/object:Gem::Dependency
14
+ name: thor
15
+ requirement: !ruby/object:Gem::Requirement
16
+ requirements:
17
+ - - '>='
18
+ - !ruby/object:Gem::Version
19
+ version: '0'
20
+ type: :runtime
21
+ prerelease: false
22
+ version_requirements: !ruby/object:Gem::Requirement
23
+ requirements:
24
+ - - '>='
25
+ - !ruby/object:Gem::Version
26
+ version: '0'
27
+ - !ruby/object:Gem::Dependency
28
+ name: capybara
29
+ requirement: !ruby/object:Gem::Requirement
30
+ requirements:
31
+ - - '>='
32
+ - !ruby/object:Gem::Version
33
+ version: '0'
34
+ type: :runtime
35
+ prerelease: false
36
+ version_requirements: !ruby/object:Gem::Requirement
37
+ requirements:
38
+ - - '>='
39
+ - !ruby/object:Gem::Version
40
+ version: '0'
41
+ - !ruby/object:Gem::Dependency
42
+ name: mechanize
43
+ requirement: !ruby/object:Gem::Requirement
44
+ requirements:
45
+ - - '>='
46
+ - !ruby/object:Gem::Version
47
+ version: '0'
48
+ type: :runtime
49
+ prerelease: false
50
+ version_requirements: !ruby/object:Gem::Requirement
51
+ requirements:
52
+ - - '>='
53
+ - !ruby/object:Gem::Version
54
+ version: '0'
55
+ - !ruby/object:Gem::Dependency
56
+ name: poltergeist
57
+ requirement: !ruby/object:Gem::Requirement
58
+ requirements:
59
+ - - '>='
60
+ - !ruby/object:Gem::Version
61
+ version: '0'
62
+ type: :runtime
63
+ prerelease: false
64
+ version_requirements: !ruby/object:Gem::Requirement
65
+ requirements:
66
+ - - '>='
67
+ - !ruby/object:Gem::Version
68
+ version: '0'
69
+ - !ruby/object:Gem::Dependency
70
+ name: rake
71
+ requirement: !ruby/object:Gem::Requirement
72
+ requirements:
73
+ - - '>='
74
+ - !ruby/object:Gem::Version
75
+ version: '0'
76
+ type: :development
77
+ prerelease: false
78
+ version_requirements: !ruby/object:Gem::Requirement
79
+ requirements:
80
+ - - '>='
81
+ - !ruby/object:Gem::Version
82
+ version: '0'
83
+ - !ruby/object:Gem::Dependency
84
+ name: bundler
85
+ requirement: !ruby/object:Gem::Requirement
86
+ requirements:
87
+ - - ~>
88
+ - !ruby/object:Gem::Version
89
+ version: '1.3'
90
+ type: :development
91
+ prerelease: false
92
+ version_requirements: !ruby/object:Gem::Requirement
93
+ requirements:
94
+ - - ~>
95
+ - !ruby/object:Gem::Version
96
+ version: '1.3'
97
+ description: Scrapes images from various sources e.g. Google Images, vBulletin threads,
98
+ etc.
99
+ email:
100
+ - me@nikhgupta.com
101
+ executables:
102
+ - scrapix
103
+ extensions: []
104
+ extra_rdoc_files: []
105
+ files:
106
+ - .gitignore
107
+ - .rvmrc
108
+ - Gemfile
109
+ - LICENSE.txt
110
+ - README.md
111
+ - Rakefile
112
+ - bin/scrapix
113
+ - lib/scrapix.rb
114
+ - lib/scrapix/drivers/capybara.rb
115
+ - lib/scrapix/google_images.rb
116
+ - lib/scrapix/vbulletin.rb
117
+ - lib/scrapix/version.rb
118
+ - scrapix.gemspec
119
+ homepage: http://github.com/nikhgupta/scrapix
120
+ licenses:
121
+ - MIT
122
+ metadata: {}
123
+ post_install_message:
124
+ rdoc_options: []
125
+ require_paths:
126
+ - lib
127
+ required_ruby_version: !ruby/object:Gem::Requirement
128
+ requirements:
129
+ - - '>='
130
+ - !ruby/object:Gem::Version
131
+ version: '0'
132
+ required_rubygems_version: !ruby/object:Gem::Requirement
133
+ requirements:
134
+ - - '>='
135
+ - !ruby/object:Gem::Version
136
+ version: '0'
137
+ requirements: []
138
+ rubyforge_project:
139
+ rubygems_version: 2.0.0
140
+ signing_key:
141
+ specification_version: 4
142
+ summary: A gem that is able to scrape images from various sources. The gem provides
143
+ you with the results of these searches in a neat way, which you can then use to
144
+ download these images.
145
+ test_files: []