clauneck 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (4) hide show
  1. checksums.yaml +7 -0
  2. data/bin/clauneck +5 -0
  3. data/lib/clauneck.rb +385 -0
  4. metadata +187 -0
checksums.yaml ADDED
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA256:
3
+ metadata.gz: a8882bf97614aaa5e88384c02e0665732c54ca69460e3bd52fbd179dc959c930
4
+ data.tar.gz: cf3cce6159b0add0a20b6cc5fe3a44786f71243d10c1640701a0e0b9b4483c82
5
+ SHA512:
6
+ metadata.gz: 61abc2edfb1eccf699b322c7e863e76806b32f1d996d803c60321393da35e8cd4487335ecc8200fe4e5ab0f7bb19d8063426506d1490c678df15a3fe20f60f78
7
+ data.tar.gz: 6b31c46281b27579c22fc61703e95066d8d7745141b0adc776cd9f57831742997c029f348bc2d25bc46eff7451899c777e51bd08b6a7e2426f25868dbdb66ea1
data/bin/clauneck ADDED
@@ -0,0 +1,5 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require_relative '../lib/clauneck'
4
+
5
+ Clauneck.run
data/lib/clauneck.rb ADDED
@@ -0,0 +1,385 @@
1
+ require 'faraday'
2
+ require 'json'
3
+ require 'optparse'
4
+ require 'concurrent'
5
+ require 'thread'
6
+ require 'csv'
7
+ require 'zlib'
8
+ require 'stringio'
9
+ require 'brotli'
10
+
11
+ USER_AGENTS = [
12
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36",
13
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36",
14
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36",
15
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36",
16
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36",
17
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36 OPR/97.0.0.0",
18
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36",
19
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36 OPR/98.0.0.0",
20
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36",
21
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Safari/537.36 Edg/113.0.1774.42",
22
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36",
23
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36",
24
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36",
25
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1 Safari/605.1.15",
26
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.2 Safari/605.1.15",
27
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.1 Safari/605.1.15",
28
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/15.3 Safari/605.1.15",
29
+ "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:109.0) Gecko/20100101 Firefox/114.0",
30
+ ]
31
+
32
+ MAXIMUM_RETRIES = 5
33
+
34
+ REGEXES = [
35
+ ["Email", /\b[A-Za-z0-9._%+-]+@[A-Za-z0-9.-]+\.[A-Za-z]{2,}\b/],
36
+ ["Instagram", /(?:www\.)?(?:instagram)\.com\/([a-zA-Z0-9_\-]+)/],
37
+ ["Facebook", /(?:www\.)?(?:instagram)\.com\/([a-zA-Z0-9_\-]+)/],
38
+ ["Twitter", /(?:www\.)?(?:twitter)\.com\/([a-zA-Z0-9_\-]+)/],
39
+ ["Tiktok", /(?:www\.)?(?:tiktok.com)\.com\/(@[a-zA-Z0-9_\-]+)/],
40
+ ["Youtube", /(?:www\.)?(?:youtube)\.com\/(channel\/[a-zA-Z0-9_\-]+)/],
41
+ ["Github", /(?:www\.)?(?:github)\.com\/([a-zA-Z0-9_\-]+)/],
42
+ ["Medium", /(?:www\.)?(?:medium)\.com\/([a-zA-Z0-9_\-]+)/],
43
+ ]
44
+
45
+ module Clauneck
46
+ class << self
47
+ def run(api_key: nil, proxy: nil, pages: nil, output: nil, google_url: nil, params: {}, urls: nil)
48
+ options = {
49
+ api_key: api_key,
50
+ proxy: proxy,
51
+ pages: pages,
52
+ output: output,
53
+ google_url: google_url,
54
+ params: params,
55
+ urls: urls
56
+ }
57
+
58
+ if options.values.all? { |v| v.nil? || v.empty? }
59
+ options = parse_options
60
+ else
61
+ options[:proxies] = get_proxies proxy
62
+ end
63
+
64
+ if options[:urls].nil? || options[:urls].empty?
65
+ pages = fetch_pages_via_serpapi(options[:api_key], options[:google_url], options[:params], options[:pages])
66
+ links = parse_pages(pages)
67
+ else
68
+ # Use existing URLs
69
+ links = get_urls(urls)
70
+ end
71
+
72
+ fetch_and_write_information(links, options[:proxies], options[:output])
73
+ end
74
+
75
+ private
76
+
77
+ def parse_options
78
+ options = { params: {} }
79
+ remaining = []
80
+
81
+ while arg = ARGV.shift
82
+ if arg == '--help'
83
+ show_help
84
+ exit
85
+ elsif arg.start_with?('--') && !known_option?(arg)
86
+ key, value = arg.split('=')
87
+ key.gsub!(/^-*/, '') # Remove leading dashes
88
+ if value.nil? # In case option passed as "--option value" instead of "--option=value"
89
+ value = ARGV.shift
90
+ end
91
+ options[:params][key] = value
92
+ else
93
+ remaining << arg
94
+ end
95
+ end
96
+
97
+ parser = OptionParser.new do |opts|
98
+ opts.on('--api_key API_KEY') { |v| options[:api_key] = v }
99
+ opts.on('--proxy PROXY') { |v| options[:proxy] = v }
100
+ opts.on('--pages PAGES') { |v| options[:pages] = v.to_i }
101
+ opts.on('--output OUTPUT') { |v| options[:output] = v }
102
+ opts.on('--google_url GOOGLE_URL') { |v| options[:google_url] = v }
103
+ opts.on('--urls URLS') { |v| options[:urls] = v }
104
+ opts.on('--help', 'Prints this help message') do
105
+ show_help(opts)
106
+ exit
107
+ end
108
+ end
109
+ parser.parse!(remaining)
110
+
111
+ options[:pages] ||= 1
112
+ options[:output] ||= 'output.csv'
113
+ options[:proxies] = get_proxies(options[:proxy])
114
+
115
+ if requirements(options)
116
+ options
117
+ else
118
+ puts <<-HELP
119
+ Warning: Use at least one of the required parameters. Use `clauneck --help`` to get more information.
120
+ HELP
121
+ exit(1)
122
+ end
123
+ end
124
+
125
+ def known_option?(arg)
126
+ ['--api_key', '--proxy', '--pages', '--output', '--google_url', '--urls'].any? { |opt| arg.start_with?(opt) }
127
+ end
128
+
129
+ def requirements(options)
130
+ cond_1 = options[:params] && options[:params].keys.any? {|k| k == "api-key"}
131
+ cond_2 = options[:urls]
132
+ cond_1 || cond_2
133
+ end
134
+
135
+ def show_help(opts = nil)
136
+ puts opts if opts
137
+ puts <<-HELP
138
+ Usage: clauneck [options]
139
+
140
+ Options:
141
+ --api_key API_KEY Set the SerpApi API key (Required if you don't provide `--urls` option)
142
+ --proxy PROXY Set the proxy file or proxy url (Default: System IP)
143
+ --pages PAGES Set the number of pages to be gathered from Google using SerpApi (Default: 1)
144
+ --output OUTPUT Set the csv output file (Default: output.csv)
145
+ --google_url GOOGLE_URL Set the Google URL that contains the webpages you want to scrape
146
+ --urls URLS Set the URLs you want to scrape information from (Required if you don't provide `--api-key` option)
147
+ --help Prints this help message
148
+ HELP
149
+ exit(1)
150
+ end
151
+
152
+ def get_urls(url)
153
+ return [] unless url
154
+ return url if proxy.is_a?(Array)
155
+ return [File.read(url).strip] if url.end_with?('.txt')
156
+
157
+ [url]
158
+ end
159
+
160
+ def get_proxies(proxy)
161
+ return [] unless proxy
162
+ return proxy if proxy.is_a?(Array)
163
+ return File.readlines(proxy).map(&:strip) if proxy.end_with?('.txt')
164
+
165
+ [proxy]
166
+ end
167
+
168
+ def build_url(api_key, google_url, params, page)
169
+ num = params['num'] || 100 || google_url.scan(/num=(\d+)\&|num=(\d+)$/)&.dig(0, 0)
170
+ base = google_url ? google_url.gsub('google', 'serpapi') : "https://serpapi.com/search"
171
+ params = params.merge({ start: page * num, num: num, api_key: api_key, no_cache: true, async: true })
172
+ "#{base}?#{URI.encode_www_form(params)}"
173
+ end
174
+
175
+ def fetch_pages_via_serpapi(api_key, google_url, params, pages)
176
+ pages = 1 if pages == nil
177
+ pool = Concurrent::FixedThreadPool.new(10)
178
+ futures = []
179
+
180
+ urls = (0...pages).map { |page| build_url(api_key, google_url, params, page) }
181
+
182
+ urls.each do |url|
183
+ futures << Concurrent::Future.execute(executor: pool) do
184
+ response = Faraday.get(url).body
185
+
186
+ begin
187
+ data = JSON.parse(response)
188
+ rescue JSON::ParserError => e
189
+ puts "Failed to parse JSON response from #{url} with error: #{e.message}"
190
+ next
191
+ end
192
+
193
+ while data["search_metadata"]["status"] == "Processing"
194
+ sleep(0.1)
195
+ endpoint = data["search_metadata"]["json_endpoint"]
196
+ response = Faraday.get(endpoint).body
197
+
198
+ begin
199
+ data = JSON.parse(response)
200
+ rescue JSON::ParserError => e
201
+ puts "Failed to parse JSON response from #{endpoint} with error: #{e.message}"
202
+ next
203
+ end
204
+ end
205
+
206
+ data
207
+ end
208
+ end
209
+
210
+ puts "Using SerpApi to collect webpages..."
211
+ futures.map(&:value)
212
+ end
213
+
214
+ def fetch_and_write_information(links, proxies, output)
215
+ csv_mutex = Mutex.new
216
+ total_links = links.size
217
+ processed_links = Concurrent::AtomicFixnum.new(0)
218
+
219
+ puts "Total links to process: #{total_links}"
220
+
221
+ output = "output.csv" if output == nil
222
+ CSV.open(output, 'w') do |csv|
223
+ links.each_slice(5) do |links_slice|
224
+ link_futures = []
225
+ links_slice.each do |link|
226
+ action = proc {
227
+ page_body = nil
228
+ retry_count = 0
229
+ link = link.sub(/^https:\/\//, 'http://')
230
+ while retry_count < MAXIMUM_RETRIES
231
+ proxies_cycle = proxies.cycle.take(1)
232
+ user_agents_cycle = USER_AGENTS.cycle.take(1)
233
+
234
+ begin
235
+ page_body = fetch_link(link, proxies_cycle, user_agents_cycle)
236
+ rescue => e
237
+ puts "Error: #{e.message}; #{processed_links.value} out of #{total_links}"
238
+ end
239
+
240
+ if page_body
241
+ break
242
+ else
243
+ retry_count += 1
244
+ puts "Retrying #{retry_count} times..."
245
+ end
246
+ end
247
+
248
+ information_arr, type_arr = parse_for_information(page_body)
249
+
250
+ domain = link&.scan(/http:\/\/webcache\.googleusercontent\.com\/search\?q=cache:.*:\/\/([^\/]+)/)&.dig(0,0)
251
+
252
+ csv_mutex.synchronize do
253
+ information_arr.each_with_index do |information, index|
254
+ csv << [domain, information, type_arr[index]]
255
+ csv.flush
256
+ end
257
+ end
258
+
259
+ processed_links.increment
260
+
261
+ if information_arr.all? {|element| element == ["null"]}
262
+ puts "Couldn't find information on the link: #{processed_links.value} out of #{total_links}"
263
+ elsif information_arr.all? {|element| element == ["error"]}
264
+ puts "There was an error in fetching the webcache: #{processed_links.value} out of #{total_links}"
265
+ else
266
+ puts "Processed link: #{processed_links.value} out of #{total_links}"
267
+ end
268
+ }
269
+
270
+ link_futures << Thread.new(&action)
271
+ end
272
+ link_futures.each(&:join)
273
+ end
274
+ end
275
+ end
276
+
277
+ def fetch_link(link, proxies_cycle, user_agents_cycle)
278
+ user_agent = user_agents_cycle.sample
279
+ headers = {
280
+ 'User-Agent' => user_agent,
281
+ 'Accept-Encoding' => 'gzip, deflate, br',
282
+ 'Host' => 'webcache.googleusercontent.com'
283
+ }
284
+
285
+ if proxies_cycle && !proxies_cycle.empty?
286
+ proxy = proxies_cycle.sample
287
+ protocol, username, password, addr, port = parse_proxy(proxy)
288
+ f_proxy = Faraday::ProxyOptions.new(uri=proxy, user=user, password=password)
289
+ conn = Faraday.new(url: link, ssl: { verify: false }, proxy: f_proxy) do |faraday|
290
+ faraday.headers = headers
291
+ faraday.options.timeout = 10
292
+ faraday.adapter Faraday.default_adapter
293
+ end
294
+ else
295
+ conn = Faraday.new(url: link, ssl: { verify: false }) do |faraday|
296
+ faraday.headers = headers
297
+ faraday.options.timeout = 5
298
+ faraday.adapter Faraday.default_adapter
299
+ end
300
+ end
301
+
302
+ response = conn.get
303
+ if response.status == 200
304
+ response_body = handle_compressed_response(response)
305
+ return response_body
306
+ end
307
+ end
308
+
309
+ def parse_pages(pages)
310
+ if !pages.empty? && pages != nil
311
+ pages.flat_map do |page|
312
+ if page
313
+ page['organic_results']&.map { |r| r.dig('cached_page_link') }
314
+ else
315
+ puts <<-HELP
316
+ Warning: There's a problem connecting to SerpApi. Make sure you have used the correct API Key.
317
+ HELP
318
+ exit(1)
319
+ end
320
+ end.compact
321
+ else
322
+ puts <<-HELP
323
+ Warning: There's a problem connecting to SerpApi. Make sure you have used the correct API Key.
324
+ HELP
325
+ exit(1)
326
+ end
327
+ end
328
+
329
+ def parse_proxy(proxy)
330
+ protocol, userinfo_and_hostinfo = proxy.split('://')
331
+ userinfo, hostinfo = userinfo_and_hostinfo.split('@')
332
+ username, password = userinfo.split(':')
333
+ addr, port = hostinfo&.split(':')
334
+ if addr == nil && port == nil
335
+ addr, port = username, password
336
+ username, password = nil, nil
337
+ end
338
+
339
+ [protocol, username, password, addr, port]
340
+ end
341
+
342
+ def parse_for_information(body)
343
+ information_arr = []
344
+ type_arr = []
345
+
346
+ REGEXES.each_with_index do |regex, index|
347
+ information = begin
348
+ body&.scan(regex[1])&.uniq&.compact&.flatten
349
+ rescue ArgumentError
350
+ nil
351
+ end
352
+ information.reject! {|item| item[/\.png|\.jpg|\.jpeg|\.gif|\.webp/]} if information
353
+ information = "error" if information == nil
354
+ information = "null" if information.empty?
355
+ information_arr << information
356
+ type = []
357
+ if information != "null" && information != "error"
358
+ information.each {|i| type << regex[0]}
359
+ else
360
+ type = regex[0]
361
+ end
362
+
363
+ type_arr << type
364
+ end
365
+
366
+ return information_arr.flatten, type_arr.flatten
367
+ end
368
+
369
+ def handle_compressed_response(response)
370
+ if response['content-encoding'] == 'br'
371
+ body = Brotli.inflate(response.body)
372
+ elsif response['content-encoding'] == 'gzip'
373
+ gzip_reader = Zlib::GzipReader.new(StringIO.new(response.body))
374
+ body = gzip_reader.read
375
+ gzip_reader.close
376
+ elsif response['content-encoding'] == 'deflate'
377
+ body = Zlib::Inflate.inflate(response.body)
378
+ else
379
+ body = response.body
380
+ end
381
+
382
+ return body
383
+ end
384
+ end
385
+ end
metadata ADDED
@@ -0,0 +1,187 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: clauneck
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.0.1
5
+ platform: ruby
6
+ authors:
7
+ - Emirhan Akdeniz
8
+ autorequire:
9
+ bindir: bin
10
+ cert_chain: []
11
+ date: 2023-07-05 00:00:00.000000000 Z
12
+ dependencies:
13
+ - !ruby/object:Gem::Dependency
14
+ name: faraday
15
+ requirement: !ruby/object:Gem::Requirement
16
+ requirements:
17
+ - - "~>"
18
+ - !ruby/object:Gem::Version
19
+ version: '2.7'
20
+ - - ">="
21
+ - !ruby/object:Gem::Version
22
+ version: 2.7.9
23
+ type: :runtime
24
+ prerelease: false
25
+ version_requirements: !ruby/object:Gem::Requirement
26
+ requirements:
27
+ - - "~>"
28
+ - !ruby/object:Gem::Version
29
+ version: '2.7'
30
+ - - ">="
31
+ - !ruby/object:Gem::Version
32
+ version: 2.7.9
33
+ - !ruby/object:Gem::Dependency
34
+ name: json
35
+ requirement: !ruby/object:Gem::Requirement
36
+ requirements:
37
+ - - "~>"
38
+ - !ruby/object:Gem::Version
39
+ version: '2.6'
40
+ - - ">="
41
+ - !ruby/object:Gem::Version
42
+ version: 2.6.3
43
+ type: :runtime
44
+ prerelease: false
45
+ version_requirements: !ruby/object:Gem::Requirement
46
+ requirements:
47
+ - - "~>"
48
+ - !ruby/object:Gem::Version
49
+ version: '2.6'
50
+ - - ">="
51
+ - !ruby/object:Gem::Version
52
+ version: 2.6.3
53
+ - !ruby/object:Gem::Dependency
54
+ name: optparse
55
+ requirement: !ruby/object:Gem::Requirement
56
+ requirements:
57
+ - - "~>"
58
+ - !ruby/object:Gem::Version
59
+ version: 0.3.1
60
+ type: :runtime
61
+ prerelease: false
62
+ version_requirements: !ruby/object:Gem::Requirement
63
+ requirements:
64
+ - - "~>"
65
+ - !ruby/object:Gem::Version
66
+ version: 0.3.1
67
+ - !ruby/object:Gem::Dependency
68
+ name: concurrent-ruby
69
+ requirement: !ruby/object:Gem::Requirement
70
+ requirements:
71
+ - - "~>"
72
+ - !ruby/object:Gem::Version
73
+ version: '1.2'
74
+ - - ">="
75
+ - !ruby/object:Gem::Version
76
+ version: 1.2.2
77
+ type: :runtime
78
+ prerelease: false
79
+ version_requirements: !ruby/object:Gem::Requirement
80
+ requirements:
81
+ - - "~>"
82
+ - !ruby/object:Gem::Version
83
+ version: '1.2'
84
+ - - ">="
85
+ - !ruby/object:Gem::Version
86
+ version: 1.2.2
87
+ - !ruby/object:Gem::Dependency
88
+ name: csv
89
+ requirement: !ruby/object:Gem::Requirement
90
+ requirements:
91
+ - - "~>"
92
+ - !ruby/object:Gem::Version
93
+ version: '3.2'
94
+ - - ">="
95
+ - !ruby/object:Gem::Version
96
+ version: 3.2.7
97
+ type: :runtime
98
+ prerelease: false
99
+ version_requirements: !ruby/object:Gem::Requirement
100
+ requirements:
101
+ - - "~>"
102
+ - !ruby/object:Gem::Version
103
+ version: '3.2'
104
+ - - ">="
105
+ - !ruby/object:Gem::Version
106
+ version: 3.2.7
107
+ - !ruby/object:Gem::Dependency
108
+ name: zlib
109
+ requirement: !ruby/object:Gem::Requirement
110
+ requirements:
111
+ - - "~>"
112
+ - !ruby/object:Gem::Version
113
+ version: '3.0'
114
+ type: :runtime
115
+ prerelease: false
116
+ version_requirements: !ruby/object:Gem::Requirement
117
+ requirements:
118
+ - - "~>"
119
+ - !ruby/object:Gem::Version
120
+ version: '3.0'
121
+ - !ruby/object:Gem::Dependency
122
+ name: stringio
123
+ requirement: !ruby/object:Gem::Requirement
124
+ requirements:
125
+ - - "~>"
126
+ - !ruby/object:Gem::Version
127
+ version: '3.0'
128
+ - - ">="
129
+ - !ruby/object:Gem::Version
130
+ version: 3.0.7
131
+ type: :runtime
132
+ prerelease: false
133
+ version_requirements: !ruby/object:Gem::Requirement
134
+ requirements:
135
+ - - "~>"
136
+ - !ruby/object:Gem::Version
137
+ version: '3.0'
138
+ - - ">="
139
+ - !ruby/object:Gem::Version
140
+ version: 3.0.7
141
+ - !ruby/object:Gem::Dependency
142
+ name: brotli
143
+ requirement: !ruby/object:Gem::Requirement
144
+ requirements:
145
+ - - "~>"
146
+ - !ruby/object:Gem::Version
147
+ version: 0.4.0
148
+ type: :runtime
149
+ prerelease: false
150
+ version_requirements: !ruby/object:Gem::Requirement
151
+ requirements:
152
+ - - "~>"
153
+ - !ruby/object:Gem::Version
154
+ version: 0.4.0
155
+ description: A tool for scraping information from websites using Google Search Results.
156
+ email: kagermanovtalks@gmail.com
157
+ executables:
158
+ - clauneck
159
+ extensions: []
160
+ extra_rdoc_files: []
161
+ files:
162
+ - bin/clauneck
163
+ - lib/clauneck.rb
164
+ homepage: https://github.com/serpapi/clauneck
165
+ licenses:
166
+ - MIT
167
+ metadata: {}
168
+ post_install_message:
169
+ rdoc_options: []
170
+ require_paths:
171
+ - lib
172
+ required_ruby_version: !ruby/object:Gem::Requirement
173
+ requirements:
174
+ - - ">="
175
+ - !ruby/object:Gem::Version
176
+ version: '0'
177
+ required_rubygems_version: !ruby/object:Gem::Requirement
178
+ requirements:
179
+ - - ">="
180
+ - !ruby/object:Gem::Version
181
+ version: '0'
182
+ requirements: []
183
+ rubygems_version: 3.1.4
184
+ signing_key:
185
+ specification_version: 4
186
+ summary: Custom Lead Acquisition Using Next-Generation Extraction and Collection Kit
187
+ test_files: []