scraperwiki-api 0.0.1

Sign up to get free protection for your applications and to get access to all the features.
data/.gitignore ADDED
@@ -0,0 +1,6 @@
1
+ *.gem
2
+ .bundle
3
+ .yardoc
4
+ Gemfile.lock
5
+ doc/*
6
+ pkg/*
data/Gemfile ADDED
@@ -0,0 +1,4 @@
1
+ source "http://rubygems.org"
2
+
3
+ # Specify your gem's dependencies in unbreakable.gemspec
4
+ gemspec
data/LICENSE ADDED
@@ -0,0 +1,20 @@
1
+ Copyright (c) 2011 Open North Inc.
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining
4
+ a copy of this software and associated documentation files (the
5
+ "Software"), to deal in the Software without restriction, including
6
+ without limitation the rights to use, copy, modify, merge, publish,
7
+ distribute, sublicense, and/or sell copies of the Software, and to
8
+ permit persons to whom the Software is furnished to do so, subject to
9
+ the following conditions:
10
+
11
+ The above copyright notice and this permission notice shall be
12
+ included in all copies or substantial portions of the Software.
13
+
14
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
16
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
18
+ LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
19
+ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
20
+ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
data/README.md ADDED
@@ -0,0 +1,37 @@
1
+ # The ScraperWiki API Ruby Gem
2
+
3
+ A Ruby wrapper for the ScraperWiki API.
4
+
5
+ ## Installation
6
+
7
+ gem install scraperwiki-api
8
+
9
+ ## Examples
10
+
11
+ >> require 'scraperwiki-api'
12
+
13
+ >> api = ScraperWiki::API.new 'my-api-key' # API key is optional
14
+
15
+ >> api.datastore_sqlite 'example-scraper', 'SELECT * FROM swdata LIMIT 10'
16
+ => [{"fieldA"=>"valueA", "fieldB"=>"valueB", "fieldC"=>"valueC"}, ...]
17
+
18
+ >> api.scraper_getinfo 'example-scraper'
19
+ => [{"code"=>"require 'nokogiri'\n...", "datasummary"=>...}]
20
+
21
+ >> api.scraper_getruninfo 'example-scraper'
22
+ => [{"run_ended"=>"1970-01-01T00:00:00", "first_url_scraped"=>...}]
23
+
24
+ >> api.scraper_getuserinfo 'johndoe'
25
+ => [{"username"=>"johndoe", "profilename"=>"John Doe", "coderoles"=>...}]
26
+
27
+ >> api.scraper_search searchquery: 'search terms'
28
+ => [{"description"=>"Scrapes websites for data.", "language"=>"ruby", ...]
29
+
30
+ >> api.scraper_usersearch searchquery: 'search terms'
31
+ => [{"username"=>"johndoe", "profilename"=>"John Doe", "date_joined"=>...}]
32
+
33
+ ## Bugs? Questions?
34
+
35
+ Unbreakable's main repository is on GitHub: [http://github.com/opennorth/scraperwiki-api-ruby](http://github.com/opennorth/scraperwiki-api-ruby), where your contributions, forks, bug reports, feature requests, and feedback are greatly welcomed.
36
+
37
+ Copyright (c) 2011 Open North Inc., released under the MIT license
data/Rakefile ADDED
@@ -0,0 +1,16 @@
1
+ require 'bundler'
2
+ Bundler::GemHelper.install_tasks
3
+
4
+ require 'rspec/core/rake_task'
5
+ RSpec::Core::RakeTask.new(:spec)
6
+
7
+ task :default => :spec
8
+
9
+ begin
10
+ require 'yard'
11
+ YARD::Rake::YardocTask.new
12
+ rescue LoadError
13
+ task :yard do
14
+ abort 'YARD is not available. In order to run yard, you must: gem install yard'
15
+ end
16
+ end
data/USAGE ADDED
@@ -0,0 +1 @@
1
+ See README.md for full usage details.
@@ -0,0 +1,306 @@
1
+ require 'httparty'
2
+ require 'scraperwiki-api/version'
3
+
4
+ module ScraperWiki
5
+ # A Ruby wrapper for the ScraperWiki API.
6
+ # @see https://scraperwiki.com/docs/api
7
+ class API
8
+ include HTTParty
9
+ base_uri 'api.scraperwiki.com/api/1.0'
10
+
11
+ class Error < StandardError; end
12
+ class ScraperNotFound < Error; end
13
+
14
+ # Initializes a ScraperWiki API object.
15
+ def initialize(apikey = nil)
16
+ @apikey = apikey
17
+ end
18
+
19
+ # Queries and extracts data via a general purpose SQL interface.
20
+ #
21
+ # To make an RSS feed you need to use SQL's +AS+ keyword (e.g. "SELECT name
22
+ # AS description") to make columns called +title+, +link+, +description+,
23
+ # +guid+ (optional, uses link if not available) and +pubDate+ or +date+.
24
+ #
25
+ # +jsondict+ example output:
26
+ #
27
+ # [
28
+ # {
29
+ # "fieldA": "valueA",
30
+ # "fieldB": "valueB",
31
+ # "fieldC": "valueC",
32
+ # },
33
+ # ...
34
+ # ]
35
+ #
36
+ # +jsonlist+ example output:
37
+ #
38
+ # {
39
+ # "keys": ["fieldA", "fieldB", "fieldC"],
40
+ # "data": [
41
+ # ["valueA", "valueB", "valueC"],
42
+ # ...
43
+ # ]
44
+ # }
45
+ #
46
+ # +csv+ example output:
47
+ #
48
+ # fieldA,fieldB,fieldC
49
+ # valueA,valueB,valueC
50
+ # ...
51
+ #
52
+ # @param [String] shortname the scraper's shortname (as it appears in the URL)
53
+ # @param [String] query a SQL query
54
+ # @param [Hash] opts optional arguments
55
+ # @option opts [String] :format one of "jsondict", "jsonlist", "csv",
56
+ # "htmltable" or "rss2"
57
+ # @option opts [String] :attach ";"-delimited list of shortnames of other
58
+ # scrapers whose data you need to access
59
+ # @see https://scraperwiki.com/docs/ruby/ruby_help_documentation/
60
+ #
61
+ # @note The query string parameter is +name+, not +shortname+
62
+ # {https://scraperwiki.com/docs/api#sqlite as documented}
63
+ def datastore_sqlite(shortname, query, opts = {})
64
+ if Array === opts[:attach]
65
+ opts[:attach] = opts[:attach].join ';'
66
+ end
67
+ request_with_apikey '/datastore/sqlite', {name: shortname, query: query}.merge(opts)
68
+ end
69
+
70
+ # Extracts data about a scraper's code, owner, history, etc.
71
+ #
72
+ # Example output:
73
+ # * The +runid+ is a Unix timestamp with microseconds and a UUID.
74
+ # * The value of +records+ is the same as that of +total_rows+ under +datasummary+.
75
+ # * +run_interval+ is the number of seconds between runs.
76
+ #
77
+ # [
78
+ # {
79
+ # "code": "require 'nokogiri'\n...",
80
+ # "datasummary": {
81
+ # "tables": {
82
+ # "swdata": {
83
+ # "keys": [
84
+ # "fieldA",
85
+ # ...
86
+ # ],
87
+ # "count": 42,
88
+ # "sql": "CREATE TABLE `swdata` (...)"
89
+ # },
90
+ # "swvariables": {
91
+ # "keys": [
92
+ # "value_blob",
93
+ # "type",
94
+ # "name"
95
+ # ],
96
+ # "count": 2,
97
+ # "sql": "CREATE TABLE `swvariables` (`value_blob` blob, `type` text, `name` text)"
98
+ # },
99
+ # ...
100
+ # },
101
+ # "total_rows": 44,
102
+ # "filesize": 1000000
103
+ # },
104
+ # "description": "Scrapes websites for data.",
105
+ # "language": "ruby",
106
+ # "title": "Example scraper",
107
+ # "tags": [],
108
+ # "short_name": "example-scraper",
109
+ # "userroles": {
110
+ # "owner": [
111
+ # "johndoe"
112
+ # ],
113
+ # "editor": [
114
+ # "janedoe",
115
+ # ...
116
+ # ]
117
+ # },
118
+ # "last_run": "1970-01-01T00:00:00",
119
+ # "created": "1970-01-01T00:00:00",
120
+ # "runevents": [
121
+ # {
122
+ # "still_running": false,
123
+ # "pages_scraped": 5,
124
+ # "run_started": "1970-01-01T00:00:00",
125
+ # "last_update": "1970-01-01T00:00:00",
126
+ # "runid": "1325394000.000000_xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx",
127
+ # "records_produced": 42
128
+ # },
129
+ # ...
130
+ # ],
131
+ # "records": 44,
132
+ # "wiki_type": "scraper",
133
+ # "privacy_status": "visible",
134
+ # "run_interval": 604800,
135
+ # "attachable_here": [],
136
+ # "attachables": [],
137
+ # "history": [
138
+ # ...,
139
+ # {
140
+ # "date": "1970-01-01T00:00:00",
141
+ # "version": 0,
142
+ # "user": "johndoe",
143
+ # "session": "Thu, 1 Jan 1970 00:00:08 GMT"
144
+ # }
145
+ # ]
146
+ # }
147
+ # ]
148
+ #
149
+ # @param [String] shortname the scraper's shortname (as it appears in the URL)
150
+ # @param [Hash] opts optional arguments
151
+ # @option opts [String] :version version number (-1 for most recent) [default -1]
152
+ # @option opts [String] :history_start_date history and runevents are
153
+ # restricted to this date or after, enter as YYYY-MM-DD
154
+ # @option opts [String] :quietfields "|"-delimited list of fields to exclude
155
+ # from the output. Must be a subset of 'code|runevents|datasummary|userroles|history'
156
+ #
157
+ # @note Returns an array although the array seems to always have only one item
158
+ # @note The +tags+ field seems to always be an empty array
159
+ # @note The query string parameter is +name+, not +shortname+
160
+ # {https://scraperwiki.com/docs/api#getinfo as documented}
161
+ def scraper_getinfo(shortname, opts = {})
162
+ if Array === opts[:quietfields]
163
+ opts[:quietfields] = opts[:quietfields].join '|'
164
+ end
165
+ request_with_apikey '/scraper/getinfo', {name: shortname}.merge(opts)
166
+ end
167
+
168
+ # See what the scraper did during each run.
169
+ #
170
+ # Example output:
171
+ #
172
+ # [
173
+ # {
174
+ # "run_ended": "1970-01-01T00:00:00",
175
+ # "first_url_scraped": "http://www.iana.org/domains/example/",
176
+ # "pages_scraped": 5,
177
+ # "run_started": "1970-01-01T00:00:00",
178
+ # "runid": "1325394000.000000_xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx",
179
+ # "domainsscraped": [
180
+ # {
181
+ # "domain": "http://example.com",
182
+ # "bytes": 1000000,
183
+ # "pages": 5
184
+ # }
185
+ # ...
186
+ # ],
187
+ # "output": "...",
188
+ # "records_produced": 42
189
+ # }
190
+ # ]
191
+ #
192
+ # @param [String] shortname the scraper's shortname (as it appears in the URL)
193
+ # @param [Hash] opts optional arguments
194
+ # @option opts [String] runid a run ID
195
+ #
196
+ # @note Returns an array although the array seems to always have only one item
197
+ # @note The query string parameter is +name+, not +shortname+
198
+ # {https://scraperwiki.com/docs/api#getinfo as documented}
199
+ def scraper_getruninfo(shortname, opts = {})
200
+ request_with_apikey '/scraper/getruninfo', {name: shortname}.merge(opts)
201
+ end
202
+
203
+ # Find out information about a user.
204
+ #
205
+ # Example output:
206
+ #
207
+ # [
208
+ # {
209
+ # "username": "johndoe",
210
+ # "profilename": "John Doe",
211
+ # "coderoles": {
212
+ # "owner": [
213
+ # "johndoe.emailer",
214
+ # "example-scraper",
215
+ # ...
216
+ # ],
217
+ # "email": [
218
+ # "johndoe.emailer"
219
+ # ],
220
+ # "editor": [
221
+ # "yet-another-scraper",
222
+ # ...
223
+ # ]
224
+ # },
225
+ # "datejoined": "1970-01-01T00:00:00"
226
+ # }
227
+ # ]
228
+ #
229
+ # @param [String] username a username
230
+ #
231
+ # @note Returns an array although the array seems to always have only one item
232
+ # @note The date joined field is +date_joined+ (with underscore) on
233
+ # {#scraper_usersearch}
234
+ def scraper_getuserinfo(username)
235
+ request_with_apikey '/scraper/getuserinfo', username: username
236
+ end
237
+
238
+ # Search the titles and descriptions of all the scrapers.
239
+ #
240
+ # Example output:
241
+ #
242
+ # [
243
+ # {
244
+ # "description": "Scrapes websites for data.",
245
+ # "language": "ruby",
246
+ # "created": "1970-01-01T00:00:00",
247
+ # "title": "Example scraper",
248
+ # "short_name": "example-scraper",
249
+ # "privacy_status": "public"
250
+ # },
251
+ # ...
252
+ # ]
253
+ #
254
+ # @param [Hash] opts optional arguments
255
+ # @option opts [String] :searchquery search terms
256
+ # @option opts [Integer] :maxrows number of results to return [default 5]
257
+ # @option opts [String] :requestinguser the name of the user making the
258
+ # search, which changes the order of the matches
259
+ def scraper_search(opts = {})
260
+ request_with_apikey '/scraper/search', opts
261
+ end
262
+
263
+ # Search for a user by name.
264
+ #
265
+ # Example output:
266
+ #
267
+ # [
268
+ # {
269
+ # "username": "johndoe",
270
+ # "profilename": "John Doe",
271
+ # "date_joined": "1970-01-01T00:00:00"
272
+ # },
273
+ # ...
274
+ # ]
275
+ #
276
+ # @param [Hash] opts optional arguments
277
+ # @option opts [String] :searchquery search terms
278
+ # @option opts [Integer] :maxrows number of results to return [default 5]
279
+ # @option opts [String] :nolist space-separated list of usernames to exclude
280
+ # from the output
281
+ # @option opts [String] :requestinguser the name of the user making the
282
+ # search, which changes the order of the matches
283
+ #
284
+ # @note The date joined field is +datejoined+ (without underscore) on
285
+ # {#scraper_getuserinfo}
286
+ def scraper_usersearch(opts = {})
287
+ if Array === opts[:nolist]
288
+ opts[:nolist] = opts[:nolist].join ' '
289
+ end
290
+ request '/scraper/usersearch', opts
291
+ end
292
+
293
+ private
294
+
295
+ def request_with_apikey(path, opts = {})
296
+ if @apikey
297
+ opts[:apikey] = @apikey
298
+ end
299
+ request path, opts
300
+ end
301
+
302
+ def request(path, opts)
303
+ self.class.get(path, query: opts).parsed_response
304
+ end
305
+ end
306
+ end
@@ -0,0 +1,5 @@
1
+ module ScraperWiki
2
+ class API
3
+ VERSION = "0.0.1"
4
+ end
5
+ end
@@ -0,0 +1,22 @@
1
+ # -*- encoding: utf-8 -*-
2
+ $:.push File.expand_path("../lib", __FILE__)
3
+ require "scraperwiki-api/version"
4
+
5
+ Gem::Specification.new do |s|
6
+ s.name = "scraperwiki-api"
7
+ s.version = ScraperWiki::API::VERSION
8
+ s.platform = Gem::Platform::RUBY
9
+ s.authors = ["Open North"]
10
+ s.email = ["info@opennorth.ca"]
11
+ s.homepage = "http://github.com/opennorth/scraperwiki-api-ruby"
12
+ s.summary = %q{The ScraperWiki API Ruby Gem}
13
+ s.description = %q{A Ruby wrapper for the ScraperWiki API}
14
+
15
+ s.files = `git ls-files`.split("\n")
16
+ s.test_files = `git ls-files -- {test,spec,features}/*`.split("\n")
17
+ s.executables = `git ls-files -- bin/*`.split("\n").map{ |f| File.basename(f) }
18
+ s.require_paths = ["lib"]
19
+
20
+ s.add_runtime_dependency('httparty', '~> 0.7.8')
21
+ s.add_development_dependency('rspec', '~> 2.6.0')
22
+ end
@@ -0,0 +1,138 @@
1
+ require File.expand_path(File.dirname(__FILE__) + '/spec_helper')
2
+
3
+ require 'time'
4
+
5
+ class ScraperWiki::API
6
+ # We don't want to test the ScraperWiki API. We just want to check that the
7
+ # wrapper works.
8
+ describe ScraperWiki::API do
9
+ EXAMPLE_SHORTNAME = 'frabcus.emailer'
10
+ EXAMPLE_USERNAME = 'frabcus'
11
+ QUIETFIELDS = %w(code runevents datasummary userroles history)
12
+
13
+ before :all do
14
+ @api = ScraperWiki::API.new
15
+ end
16
+
17
+ describe '#datastore_sqlite' do
18
+ # @todo
19
+ end
20
+
21
+ describe '#scraper_getinfo' do
22
+ it 'should return a non-empty array containing a single hash' do
23
+ response = @api.scraper_getinfo EXAMPLE_SHORTNAME
24
+ response.should be_an(Array)
25
+ response.size.should == 1
26
+ response.first.should be_a(Hash)
27
+ end
28
+
29
+ it 'should respect the :version argument' do
30
+ bare = @api.scraper_getinfo(EXAMPLE_SHORTNAME).first
31
+ bare.should_not have_key('currcommit')
32
+ result = @api.scraper_getinfo(EXAMPLE_SHORTNAME, version: 1).first
33
+ result.should have_key('currcommit')
34
+ result['code'].should_not == bare['code']
35
+ end
36
+
37
+ it 'should respect the :history_start_date argument' do
38
+ bare = @api.scraper_getinfo(EXAMPLE_SHORTNAME).first
39
+ bare['history'].size.should be > 1
40
+ history_start_date = bare['history'][0]['date'][0..9]
41
+ result = @api.scraper_getinfo(EXAMPLE_SHORTNAME, history_start_date: history_start_date).first
42
+ result['history'].size.should == 1
43
+ end
44
+
45
+ it 'should respect the :quietfields argument (as an array)' do
46
+ result = @api.scraper_getinfo(EXAMPLE_SHORTNAME, quietfields: QUIETFIELDS).first
47
+ QUIETFIELDS.each do |key|
48
+ result.should_not have_key(key)
49
+ end
50
+ end
51
+
52
+ it 'should respect the :quietfields argument (as an string)' do
53
+ result = @api.scraper_getinfo(EXAMPLE_SHORTNAME, quietfields: QUIETFIELDS.join('|')).first
54
+ QUIETFIELDS.each do |key|
55
+ result.should_not have_key(key)
56
+ end
57
+ end
58
+ end
59
+
60
+ describe '#scraper_getruninfo' do
61
+ it 'should return a non-empty array containing a single hash' do
62
+ response = @api.scraper_getruninfo EXAMPLE_SHORTNAME
63
+ response.should be_an(Array)
64
+ response.size.should == 1
65
+ response.first.should be_a(Hash)
66
+ end
67
+
68
+ it 'should respect the :runid argument' do
69
+ runevents = @api.scraper_getinfo(EXAMPLE_SHORTNAME).first['runevents']
70
+ bare = @api.scraper_getruninfo(EXAMPLE_SHORTNAME).first
71
+ bare['runid'].should == runevents.first['runid']
72
+ response = @api.scraper_getruninfo(EXAMPLE_SHORTNAME, runid: runevents.last['runid']).first
73
+ response['runid'].should_not == bare['runid']
74
+ end
75
+ end
76
+
77
+ describe '#scraper_getuserinfo' do
78
+ it 'should return a non-empty array containing a single hash' do
79
+ response = @api.scraper_getuserinfo EXAMPLE_USERNAME
80
+ response.should be_an(Array)
81
+ response.size.should == 1
82
+ response.first.should be_a(Hash)
83
+ end
84
+ end
85
+
86
+ describe '#scraper_search' do
87
+ it 'should return a non-empty array of hashes' do
88
+ response = @api.scraper_search
89
+ response.should be_an(Array)
90
+ response.size.should_not be_zero
91
+ response.first.should be_a(Hash)
92
+ end
93
+
94
+ it 'should respect the :searchquery argument' do
95
+ @api.scraper_search(searchquery: EXAMPLE_SHORTNAME).find{|result|
96
+ result['short_name'] == EXAMPLE_SHORTNAME
97
+ }.should_not be_nil
98
+ end
99
+
100
+ it 'should respect the :maxrows argument' do
101
+ @api.scraper_search(maxrows: 1).size.should == 1
102
+ end
103
+ end
104
+
105
+ describe '#scraper_usersearch' do
106
+ it 'should return a non-empty array of hashes' do
107
+ response = @api.scraper_usersearch
108
+ response.should be_an(Array)
109
+ response.size.should_not be_zero
110
+ response.first.should be_a(Hash)
111
+ end
112
+
113
+ it 'should respect the :searchquery argument' do
114
+ @api.scraper_usersearch(searchquery: EXAMPLE_USERNAME).find{|result|
115
+ result['username'] == EXAMPLE_USERNAME
116
+ }.should_not be_nil
117
+ end
118
+
119
+ it 'should respect the :maxrows argument' do
120
+ @api.scraper_usersearch(maxrows: 1).size.should == 1
121
+ end
122
+
123
+ it 'should respect the :nolist argument (as an array)' do
124
+ usernames = @api.scraper_usersearch.map{|result| result['username']}
125
+ @api.scraper_usersearch(nolist: usernames).find{|result|
126
+ usernames.include? result['username']
127
+ }.should be_nil
128
+ end
129
+
130
+ it 'should respect the :nolist argument (as an string)' do
131
+ usernames = @api.scraper_usersearch.map{|result| result['username']}
132
+ @api.scraper_usersearch(nolist: usernames.join(' ')).find{|result|
133
+ usernames.include? result['username']
134
+ }.should be_nil
135
+ end
136
+ end
137
+ end
138
+ end
data/spec/spec.opts ADDED
@@ -0,0 +1,5 @@
1
+ --colour
2
+ --format nested
3
+ --loadby mtime
4
+ --reverse
5
+ --backtrace
@@ -0,0 +1,3 @@
1
+ require 'rubygems'
2
+ require 'rspec'
3
+ require File.dirname(__FILE__) + '/../lib/scraperwiki-api'
metadata ADDED
@@ -0,0 +1,82 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: scraperwiki-api
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.0.1
5
+ prerelease:
6
+ platform: ruby
7
+ authors:
8
+ - Open North
9
+ autorequire:
10
+ bindir: bin
11
+ cert_chain: []
12
+ date: 2012-05-26 00:00:00.000000000 Z
13
+ dependencies:
14
+ - !ruby/object:Gem::Dependency
15
+ name: httparty
16
+ requirement: &70190599887060 !ruby/object:Gem::Requirement
17
+ none: false
18
+ requirements:
19
+ - - ~>
20
+ - !ruby/object:Gem::Version
21
+ version: 0.7.8
22
+ type: :runtime
23
+ prerelease: false
24
+ version_requirements: *70190599887060
25
+ - !ruby/object:Gem::Dependency
26
+ name: rspec
27
+ requirement: &70190599883440 !ruby/object:Gem::Requirement
28
+ none: false
29
+ requirements:
30
+ - - ~>
31
+ - !ruby/object:Gem::Version
32
+ version: 2.6.0
33
+ type: :development
34
+ prerelease: false
35
+ version_requirements: *70190599883440
36
+ description: A Ruby wrapper for the ScraperWiki API
37
+ email:
38
+ - info@opennorth.ca
39
+ executables: []
40
+ extensions: []
41
+ extra_rdoc_files: []
42
+ files:
43
+ - .gitignore
44
+ - Gemfile
45
+ - LICENSE
46
+ - README.md
47
+ - Rakefile
48
+ - USAGE
49
+ - lib/scraperwiki-api.rb
50
+ - lib/scraperwiki-api/version.rb
51
+ - scraperwiki-api.gemspec
52
+ - spec/scraperwiki-api_spec.rb
53
+ - spec/spec.opts
54
+ - spec/spec_helper.rb
55
+ homepage: http://github.com/opennorth/scraperwiki-api-ruby
56
+ licenses: []
57
+ post_install_message:
58
+ rdoc_options: []
59
+ require_paths:
60
+ - lib
61
+ required_ruby_version: !ruby/object:Gem::Requirement
62
+ none: false
63
+ requirements:
64
+ - - ! '>='
65
+ - !ruby/object:Gem::Version
66
+ version: '0'
67
+ required_rubygems_version: !ruby/object:Gem::Requirement
68
+ none: false
69
+ requirements:
70
+ - - ! '>='
71
+ - !ruby/object:Gem::Version
72
+ version: '0'
73
+ requirements: []
74
+ rubyforge_project:
75
+ rubygems_version: 1.8.12
76
+ signing_key:
77
+ specification_version: 3
78
+ summary: The ScraperWiki API Ruby Gem
79
+ test_files:
80
+ - spec/scraperwiki-api_spec.rb
81
+ - spec/spec.opts
82
+ - spec/spec_helper.rb