youtubescraper 0.0.7
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/CHANGELOG +20 -0
- data/MIT-LICENSE +20 -0
- data/README +23 -0
- data/lib/youtube/browsescraper.rb +260 -0
- data/lib/youtube/searchresultscraper.rb +263 -0
- data/lib/youtube/searchresultscraper.rb~ +263 -0
- data/lib/youtube/video.rb +62 -0
- data/test/html/dataN_noMsgN.htm +387 -0
- data/test/html/dataY_noMsgY.htm +1507 -0
- data/test/html/scraping_error.html +1503 -0
- data/test/youtube_scraper_test.rb +89 -0
- data/test/youtube_scraper_test.rb~ +92 -0
- metadata +69 -0
data/CHANGELOG
ADDED
@@ -0,0 +1,20 @@
|
|
1
|
+
0.0.1 2006-11-26
|
2
|
+
First public release
|
3
|
+
|
4
|
+
0.0.2 2006-12-03
|
5
|
+
Add rdoc.
|
6
|
+
Add each() method into YouTube::SearchResultScraper
|
7
|
+
|
8
|
+
0.0.3 2006-12-22
|
9
|
+
Add error check.
|
10
|
+
Add attribute for video_count, video_from, video_to
|
11
|
+
|
12
|
+
0.0.4 2007-02-01
|
13
|
+
Add error check for scraping of pagination.
|
14
|
+
Fix scraping rule for html markup change of youtube.
|
15
|
+
|
16
|
+
0.0.5 2007-02-16
|
17
|
+
Fix scraping rule for video_count, video_from, video_to
|
18
|
+
|
19
|
+
0.0.6 2007-02-16
|
20
|
+
Fix error handling for video_count, video_from, video_to
|
data/MIT-LICENSE
ADDED
@@ -0,0 +1,20 @@
|
|
1
|
+
Copyright (C) 2006 by in3c.org
|
2
|
+
|
3
|
+
Permission is hereby granted, free of charge, to any person obtaining
|
4
|
+
a copy of this software and associated documentation files (the
|
5
|
+
"Software"), to deal in the Software without restriction, including
|
6
|
+
without limitation the rights to use, copy, modify, merge, publish,
|
7
|
+
distribute, sublicense, and/or sell copies of the Software, and to
|
8
|
+
permit persons to whom the Software is furnished to do so, subject to
|
9
|
+
the following conditions:
|
10
|
+
|
11
|
+
The above copyright notice and this permission notice shall be
|
12
|
+
included in all copies or substantial portions of the Software.
|
13
|
+
|
14
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
15
|
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
16
|
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
17
|
+
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
18
|
+
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
19
|
+
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
20
|
+
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
data/README
ADDED
@@ -0,0 +1,23 @@
|
|
1
|
+
Introduction
|
2
|
+
|
3
|
+
Youtube::SearchResultScraper scrapes video information from search result page on www.youtube.com.
|
4
|
+
You can get result as array or xml.
|
5
|
+
XML format is same as YouTube Developer API (www.youtube.com/dev_api_ref?m=youtube.videos.list_by_tag).
|
6
|
+
|
7
|
+
Example
|
8
|
+
|
9
|
+
require "rubygems"
|
10
|
+
require "youtube/searchresultscraper"
|
11
|
+
|
12
|
+
scraper = Youtube::SearchResultScraper.new(keyword, page)
|
13
|
+
scraper.open
|
14
|
+
scraper.scrape
|
15
|
+
puts scraper.get_xml
|
16
|
+
|
17
|
+
More Information
|
18
|
+
|
19
|
+
http://www.ark-web.jp/sandbox/wiki/184.html (japanese only)
|
20
|
+
|
21
|
+
Author: Yuki SHIDA, shida@in3c.org
|
22
|
+
Version: 0.0.2
|
23
|
+
License: MIT license
|
@@ -0,0 +1,260 @@
|
|
1
|
+
#--
|
2
|
+
# Copyright (C) 2006 by in3c.org, ARK-Web co., ltd
|
3
|
+
#
|
4
|
+
# Permission is hereby granted, free of charge, to any person obtaining
|
5
|
+
# a copy of this software and associated documentation files (the
|
6
|
+
# "Software"), to deal in the Software without restriction, including
|
7
|
+
# without limitation the rights to use, copy, modify, merge, publish,
|
8
|
+
# distribute, sublicense, and/or sell copies of the Software, and to
|
9
|
+
# permit persons to whom the Software is furnished to do so, subject to
|
10
|
+
# the following conditions:
|
11
|
+
#
|
12
|
+
# The above copyright notice and this permission notice shall be
|
13
|
+
# included in all copies or substantial portions of the Software.
|
14
|
+
#
|
15
|
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
16
|
+
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
17
|
+
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
18
|
+
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
19
|
+
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
20
|
+
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
21
|
+
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
22
|
+
#++
|
23
|
+
# :main:Youtube::BrowseScraper
|
24
|
+
# :title:Youtube::BrowseScraper RDoc Documentation
|
25
|
+
|
26
|
+
require 'open-uri'
|
27
|
+
require 'cgi'
|
28
|
+
require 'rubygems'
|
29
|
+
require 'hpricot'
|
30
|
+
require 'video'
|
31
|
+
|
32
|
+
module Youtube #:nodoc:
|
33
|
+
# = Introduction
|
34
|
+
# Youtube::BrowseScraper scrapes video information from search result page
|
35
|
+
# on http://www.youtube.com.
|
36
|
+
#
|
37
|
+
# You can get result as array or xml.
|
38
|
+
#
|
39
|
+
# XML format is same as YouTube Developer API
|
40
|
+
# (http://www.youtube.com/dev_api_ref?m=youtube.videos.list_by_tag).
|
41
|
+
#
|
42
|
+
# = Example
|
43
|
+
# require "rubygems"
|
44
|
+
# require "youtube/BrowseScraper"
|
45
|
+
#
|
46
|
+
# scraper = Youtube::BrowseScraper.new(browse, time, category, language, page)
|
47
|
+
# scraper.open
|
48
|
+
# data = scraper.scrape
|
49
|
+
# p data
|
50
|
+
#
|
51
|
+
# = More Information
|
52
|
+
#
|
53
|
+
#
|
54
|
+
# Author:: Syuichi Kohata <sgkohata@gmail.com>
|
55
|
+
# Version:: 0.0.1
|
56
|
+
# License:: MIT license
|
57
|
+
|
58
|
+
class BrowseScraper
|
59
|
+
# constants for browse parameter(default MoseRecent)
|
60
|
+
MostRecent = 'mr'
|
61
|
+
MostViewed = 'mp'
|
62
|
+
TopRated = 'tr'
|
63
|
+
MostDiscussed = 'md'
|
64
|
+
TopFavorites = 'mf'
|
65
|
+
MostLinked = 'mrd'
|
66
|
+
RecentryFeatured = 'rf'
|
67
|
+
MostResponded = 'ms'
|
68
|
+
WatchOnMobile = 'mv'
|
69
|
+
BrowseArray = [MostRecent,
|
70
|
+
MostViewed,
|
71
|
+
TopRated,
|
72
|
+
MostDiscussed,
|
73
|
+
TopFavorites,
|
74
|
+
MostLinked,
|
75
|
+
RecentryFeatured,
|
76
|
+
MostResponded,
|
77
|
+
WatchOnMobile]
|
78
|
+
|
79
|
+
# constants for time parameter(default Today)
|
80
|
+
Today = 't'
|
81
|
+
ThisWeek = 'w'
|
82
|
+
ThisMonth = 'm'
|
83
|
+
All = 'a'
|
84
|
+
TimeArray = [Today,
|
85
|
+
ThisWeek,
|
86
|
+
ThisMonth,
|
87
|
+
All]
|
88
|
+
|
89
|
+
# constants for category parameter(default 0)
|
90
|
+
AllCategory = 0
|
91
|
+
AutosVehicles = 2
|
92
|
+
Comedy = 23
|
93
|
+
Entertainment = 24
|
94
|
+
FilmAnimation = 1
|
95
|
+
GadgetsGames = 20
|
96
|
+
HowtoDIY = 26
|
97
|
+
Music = 10
|
98
|
+
NewsPolitics = 25
|
99
|
+
PeopleBlogs = 22
|
100
|
+
PetsAnimals = 15
|
101
|
+
Sports = 17
|
102
|
+
TravelPlaces = 19
|
103
|
+
|
104
|
+
# constants for language parameter(default '')
|
105
|
+
AllLanguage = ''
|
106
|
+
English = 'EN'
|
107
|
+
Spanish = 'ES'
|
108
|
+
Japanese = 'JP'
|
109
|
+
German = 'DE'
|
110
|
+
Chinese = 'CN'
|
111
|
+
French = 'FR'
|
112
|
+
LanguageArray = [AllLanguage,
|
113
|
+
English,
|
114
|
+
Spanish,
|
115
|
+
Japanese,
|
116
|
+
German,
|
117
|
+
Chinese,
|
118
|
+
French]
|
119
|
+
|
120
|
+
attr_accessor :browse
|
121
|
+
attr_accessor :time
|
122
|
+
attr_accessor :category
|
123
|
+
attr_accessor :language
|
124
|
+
attr_accessor :page
|
125
|
+
attr_reader :video_count
|
126
|
+
attr_reader :video_from
|
127
|
+
attr_reader :video_to
|
128
|
+
|
129
|
+
@@youtube_search_base_url = 'http://www.youtube.com/browse'
|
130
|
+
|
131
|
+
# Create Youtube::BrowseScraper object
|
132
|
+
# (default parameter )
|
133
|
+
#
|
134
|
+
# You cannot specify number of videos per page.
|
135
|
+
# Always, the number of videos is 20 per page.
|
136
|
+
def initialize browse = MostRecent, time = Today, category = AllCategory, language = AllLanguage, page = 1
|
137
|
+
@browse = browse
|
138
|
+
@time = time
|
139
|
+
@category = category
|
140
|
+
@language = language
|
141
|
+
@page = page
|
142
|
+
|
143
|
+
errors = []
|
144
|
+
errors << "browse" if BrowseArray.index(@browse) == nil
|
145
|
+
errors << "time" if TimeArray.index(@time) == nil
|
146
|
+
errors << "language" if LanguageArray.index(@language) == nil
|
147
|
+
unless errors.empty? then
|
148
|
+
error_msg = "parameter error occurred.\n"
|
149
|
+
errors.each do |error|
|
150
|
+
error_msg << error + " is invalid.\n"
|
151
|
+
end
|
152
|
+
raise error_msg
|
153
|
+
end
|
154
|
+
end
|
155
|
+
|
156
|
+
# Get search result from youtube by specified keyword.
|
157
|
+
def open
|
158
|
+
@url = @@youtube_search_base_url
|
159
|
+
@url += "?s=#{@browse}"
|
160
|
+
@url += "&t=#{@time}"
|
161
|
+
@url += "&c=#{@category}"
|
162
|
+
@url += "&l=#{@language}"
|
163
|
+
@url += "&p=#{@page}"
|
164
|
+
@html = Kernel.open(@url).read
|
165
|
+
@search_result = Hpricot.parse(@html)
|
166
|
+
end
|
167
|
+
|
168
|
+
# Scrape video information from search result html.
|
169
|
+
def scrape
|
170
|
+
@videos = []
|
171
|
+
@video_count = 0
|
172
|
+
@search_result.search('//div[@class="v120vEntry"]').each do |video_html|
|
173
|
+
video = Youtube::Video.new
|
174
|
+
|
175
|
+
video.id = scrape_id(video_html)
|
176
|
+
video.author = scrape_author(video_html)
|
177
|
+
video.title = scrape_title(video_html)
|
178
|
+
video.length_seconds = scrape_length_seconds(video_html)
|
179
|
+
video.rating_avg = scrape_rating_avg(video_html)
|
180
|
+
video.view_count = scrape_view_count(video_html)
|
181
|
+
video.thumbnail_url = scrape_thumbnail_url(video_html)
|
182
|
+
|
183
|
+
check_video video
|
184
|
+
|
185
|
+
@videos << video
|
186
|
+
@video_count += 1
|
187
|
+
end
|
188
|
+
@videos
|
189
|
+
end
|
190
|
+
|
191
|
+
# Return videos information as XML Format.
|
192
|
+
def get_xml
|
193
|
+
end
|
194
|
+
|
195
|
+
def replace_document_write_javascript
|
196
|
+
@html.gsub!(%r{<script language="javascript" type="text/javascript">.*?document.write\('(.*?)'\).*?</script>}m, '\1')
|
197
|
+
end
|
198
|
+
|
199
|
+
def scrape_id video_html
|
200
|
+
scrape_thumbnail_url(video_html).sub(%r{.*/([^/]+)/[^/]+.jpg}, '\1')
|
201
|
+
end
|
202
|
+
|
203
|
+
def scrape_thumbnail_url video_html
|
204
|
+
video_html.search("img[@class='vimg120']").to_html.sub(/.*src="(.*?)".*/, '\1')
|
205
|
+
end
|
206
|
+
|
207
|
+
def scrape_title video_html
|
208
|
+
video_html.search('div[@class="vtitle"]/a').inner_html
|
209
|
+
end
|
210
|
+
|
211
|
+
def scrape_length_seconds video_html
|
212
|
+
length_seconds = video_html.search("span[@class='runtime']").inner_html
|
213
|
+
length_seconds =~ /(\d\d):(\d\d)/
|
214
|
+
$1.to_i * 60 + $2.to_i
|
215
|
+
end
|
216
|
+
|
217
|
+
def scrape_rating_avg video_html
|
218
|
+
video_html.search("img[@src='/img/icn_star_full_11x11.gif']").size +
|
219
|
+
video_html.search("img[@src='/img/icn_star_half_11x11.gif']").size * 0.5
|
220
|
+
end
|
221
|
+
|
222
|
+
def scrape_thumbnail_url video_html
|
223
|
+
video_html.search("img[@class=' vimg ']").to_html.sub(/.*src="(.*?)".*/, '\1')
|
224
|
+
end
|
225
|
+
|
226
|
+
def scrape_author video_html
|
227
|
+
video_html.search("div[@class='vfacets']").inner_html.sub(/.*From:<\/span> <a.*?>(.*?)<\/a>.*/m, '\1')
|
228
|
+
end
|
229
|
+
|
230
|
+
def scrape_view_count video_html
|
231
|
+
@num = video_html.search("div[@class='vfacets']").inner_html.sub(/.*Views:<\/span> ([\d,]+).*/m, '\1')
|
232
|
+
@num.gsub(/,/, '').to_i
|
233
|
+
end
|
234
|
+
|
235
|
+
def check_video video
|
236
|
+
errors = []
|
237
|
+
|
238
|
+
errors << "id" if video.id.empty?
|
239
|
+
errors << "author" if video.author.empty?
|
240
|
+
errors << "title" if video.title.empty?
|
241
|
+
errors << "length_seconds" if video.length_seconds.to_s.empty?
|
242
|
+
errors << "thumbnail_url" if video.thumbnail_url.empty?
|
243
|
+
|
244
|
+
unless errors.empty? then
|
245
|
+
error_msg = "scraping error occurred.\n"
|
246
|
+
errors.each do |error|
|
247
|
+
error_msg << error + " is not setted.\n"
|
248
|
+
end
|
249
|
+
raise error_msg
|
250
|
+
end
|
251
|
+
end
|
252
|
+
|
253
|
+
def each
|
254
|
+
@videos.each do |video|
|
255
|
+
yield video
|
256
|
+
end
|
257
|
+
end
|
258
|
+
|
259
|
+
end
|
260
|
+
end
|
@@ -0,0 +1,263 @@
|
|
1
|
+
#--
|
2
|
+
# Copyright (C) 2006 by in3c.org, ARK-Web co., ltd
|
3
|
+
#
|
4
|
+
# Permission is hereby granted, free of charge, to any person obtaining
|
5
|
+
# a copy of this software and associated documentation files (the
|
6
|
+
# "Software"), to deal in the Software without restriction, including
|
7
|
+
# without limitation the rights to use, copy, modify, merge, publish,
|
8
|
+
# distribute, sublicense, and/or sell copies of the Software, and to
|
9
|
+
# permit persons to whom the Software is furnished to do so, subject to
|
10
|
+
# the following conditions:
|
11
|
+
#
|
12
|
+
# The above copyright notice and this permission notice shall be
|
13
|
+
# included in all copies or substantial portions of the Software.
|
14
|
+
#
|
15
|
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
16
|
+
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
17
|
+
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
18
|
+
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
19
|
+
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
20
|
+
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
21
|
+
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
22
|
+
#++
|
23
|
+
# :main:Youtube::SearchResultScraper
|
24
|
+
# :title:Youtube::SearchResultScraper RDoc Documentation
|
25
|
+
|
26
|
+
require 'open-uri'
|
27
|
+
require 'cgi'
|
28
|
+
require 'rubygems'
|
29
|
+
require 'hpricot'
|
30
|
+
require 'youtube/video'
|
31
|
+
|
32
|
+
module Youtube #:nodoc:
|
33
|
+
|
34
|
+
# = Introduction
|
35
|
+
# Youtube::SearchResultScraper scrapes video information from search result page
|
36
|
+
# on http://www.youtube.com.
|
37
|
+
#
|
38
|
+
# You can get result as array or xml.
|
39
|
+
#
|
40
|
+
# XML format is same as YouTube Developer API
|
41
|
+
# (http://www.youtube.com/dev_api_ref?m=youtube.videos.list_by_tag).
|
42
|
+
#
|
43
|
+
# = Example
|
44
|
+
# require "rubygems"
|
45
|
+
# require "youtube/searchresultscraper"
|
46
|
+
#
|
47
|
+
# scraper = Youtube::SearchResultScraper.new(keyword, page)
|
48
|
+
# scraper.open
|
49
|
+
# scraper.scrape
|
50
|
+
# puts scraper.get_xml
|
51
|
+
#
|
52
|
+
# = More Information
|
53
|
+
# http://www.ark-web.jp/sandbox/wiki/184.html (japanese only)
|
54
|
+
#
|
55
|
+
# Author:: Yuki SHIDA <shida@in3c.org>
|
56
|
+
# Author:: Konuma Akio <konuma@ark-web.jp>
|
57
|
+
# Version:: 0.0.3
|
58
|
+
# License:: MIT license
|
59
|
+
|
60
|
+
class SearchResultScraper
|
61
|
+
|
62
|
+
attr_accessor :keyword
|
63
|
+
attr_accessor :page
|
64
|
+
attr_reader :video_count
|
65
|
+
attr_reader :video_from
|
66
|
+
attr_reader :video_to
|
67
|
+
|
68
|
+
@@youtube_search_base_url = "http://www.youtube.com/results?search_query="
|
69
|
+
|
70
|
+
# Create Youtube::SearchResultScraper object specifying keyword and number of page.
|
71
|
+
#
|
72
|
+
# You cannot specify number of videos per page.
|
73
|
+
# Always, the number of videos is 20 per page.
|
74
|
+
#
|
75
|
+
# * keyword - specify keyword that you want to search on YouTube.
|
76
|
+
# You must specify keyword encoded by UTF-8.
|
77
|
+
# * page - specify number of page
|
78
|
+
|
79
|
+
def initialize keyword, page=nil
|
80
|
+
@keyword = keyword
|
81
|
+
@page = page if not page == nil
|
82
|
+
end
|
83
|
+
|
84
|
+
# Get search result from youtube by specified keyword.
|
85
|
+
def open
|
86
|
+
@url = @@youtube_search_base_url + CGI.escape(@keyword)
|
87
|
+
@url += "&page=#{@page}" if not @page == nil
|
88
|
+
@html = Kernel.open(@url).read
|
89
|
+
replace_document_write_javascript
|
90
|
+
@search_result = Hpricot.parse(@html)
|
91
|
+
end
|
92
|
+
|
93
|
+
# Scrape video information from search result html.
|
94
|
+
def scrape
|
95
|
+
@videos = []
|
96
|
+
|
97
|
+
@search_result.search("//div[@class='vEntry']").each do |video_html|
|
98
|
+
video = Youtube::Video.new
|
99
|
+
video.id = scrape_id(video_html)
|
100
|
+
video.author = scrape_author(video_html)
|
101
|
+
video.title = scrape_title(video_html)
|
102
|
+
video.length_seconds = scrape_length_seconds(video_html)
|
103
|
+
video.rating_avg = scrape_rating_avg(video_html)
|
104
|
+
video.rating_count = scrape_rating_count(video_html)
|
105
|
+
video.description = scrape_description(video_html)
|
106
|
+
video.view_count = scrape_view_count(video_html)
|
107
|
+
video.thumbnail_url = scrape_thumbnail_url(video_html)
|
108
|
+
video.tags = scrape_tags(video_html)
|
109
|
+
video.url = scrape_url(video_html)
|
110
|
+
|
111
|
+
check_video video
|
112
|
+
|
113
|
+
@videos << video
|
114
|
+
end
|
115
|
+
|
116
|
+
@video_count = scrape_video_count
|
117
|
+
@video_from = scrape_video_from
|
118
|
+
@video_to = scrape_video_to
|
119
|
+
|
120
|
+
raise "scraping error" if (is_no_result != @videos.empty?)
|
121
|
+
|
122
|
+
@videos
|
123
|
+
end
|
124
|
+
|
125
|
+
# Iterator for scraped videos.
|
126
|
+
def each
|
127
|
+
@videos.each do |video|
|
128
|
+
yield video
|
129
|
+
end
|
130
|
+
end
|
131
|
+
|
132
|
+
# Return videos information as XML Format.
|
133
|
+
def get_xml
|
134
|
+
xml = "<ut_response status=\"ok\">" +
|
135
|
+
"<video_count>" + @video_count.to_s + "</video_count>" +
|
136
|
+
"<video_list>\n"
|
137
|
+
each do |video|
|
138
|
+
xml += video.to_xml
|
139
|
+
end
|
140
|
+
xml += "</video_list></ut_response>"
|
141
|
+
end
|
142
|
+
|
143
|
+
private
|
144
|
+
|
145
|
+
def replace_document_write_javascript
|
146
|
+
@html.gsub!(%r{<script language="javascript" type="text/javascript">.*?document.write\('(.*?)'\).*?</script>}m, '\1')
|
147
|
+
end
|
148
|
+
|
149
|
+
def scrape_id video_html
|
150
|
+
scrape_thumbnail_url(video_html).sub(%r{.*/([^/]+)/[^/]+.jpg}, '\1')
|
151
|
+
end
|
152
|
+
|
153
|
+
def scrape_author video_html
|
154
|
+
video_html.search("div[@class='vfacets']").inner_html.sub(/.*From:<\/span> <a.*?>(.*?)<\/a>.*/m, '\1')
|
155
|
+
end
|
156
|
+
|
157
|
+
def scrape_title video_html
|
158
|
+
video_html.search("div[@class='vtitle']/a").inner_html
|
159
|
+
end
|
160
|
+
|
161
|
+
def scrape_length_seconds video_html
|
162
|
+
length_seconds = video_html.search("span[@class='runtime']").inner_html
|
163
|
+
length_seconds =~ /(\d\d):(\d\d)/
|
164
|
+
$1.to_i * 60 + $2.to_i
|
165
|
+
end
|
166
|
+
|
167
|
+
def scrape_rating_avg video_html
|
168
|
+
video_html.search("img[@src='/img/star_sm.gif']").size +
|
169
|
+
video_html.search("img[@src='/img/star_sm_half.gif']").size * 0.5
|
170
|
+
end
|
171
|
+
|
172
|
+
def scrape_rating_count video_html
|
173
|
+
video_html.search("div[@class='rating']").inner_html.sub(/(\d+) rating/, '\1').to_i
|
174
|
+
end
|
175
|
+
|
176
|
+
def scrape_description video_html
|
177
|
+
description = video_html.search("div[@class='vdesc']/span").inner_html.sub(/^\n\t(.*?)\n\t$/m, '\1')
|
178
|
+
end
|
179
|
+
|
180
|
+
def scrape_view_count video_html
|
181
|
+
video_html.search("div[@class='vfacets']").inner_html.sub(/.*Views:<\/span> (\d+).*/m, '\1').to_i
|
182
|
+
end
|
183
|
+
|
184
|
+
def scrape_tags video_html
|
185
|
+
tags = []
|
186
|
+
video_html.search("div[@class='vtagValue']/a").each do |tag|
|
187
|
+
tags << tag.inner_html
|
188
|
+
end
|
189
|
+
tags.join(" ")
|
190
|
+
end
|
191
|
+
|
192
|
+
def scrape_thumbnail_url video_html
|
193
|
+
video_html.search("img[@class='vimg120']").to_html.sub(/.*src="(.*?)".*/, '\1')
|
194
|
+
end
|
195
|
+
|
196
|
+
def scrape_url video_html
|
197
|
+
"http://www.youtube.com" +
|
198
|
+
video_html.search("div[@class='vtitle']/a").to_html.sub(/.*href="(.*?)".*/m, '\1')
|
199
|
+
end
|
200
|
+
|
201
|
+
def scrape_result_header
|
202
|
+
@search_result.search("div[@id='sectionHeader']").inner_html
|
203
|
+
end
|
204
|
+
|
205
|
+
def scrape_video_count
|
206
|
+
video_count = scrape_result_header
|
207
|
+
unless video_count.sub!(/.+Results \d+-\d+ of\s*(|about )([0-9,]+)/m , '\2')
|
208
|
+
raise "no video count: " + @url unless is_no_result
|
209
|
+
end
|
210
|
+
video_count.gsub!(/,/, '')
|
211
|
+
video_count.to_i
|
212
|
+
end
|
213
|
+
|
214
|
+
def scrape_video_from
|
215
|
+
video_from = scrape_result_header
|
216
|
+
unless video_from.sub!(/.+Results (\d+)/m, '\1')
|
217
|
+
raise "no video from: " + @url unless is_no_result
|
218
|
+
end
|
219
|
+
video_from.to_i
|
220
|
+
end
|
221
|
+
|
222
|
+
def scrape_video_to
|
223
|
+
video_to = scrape_result_header
|
224
|
+
unless video_to.sub!(/.+Results \d+-(\d+)/m, '\1')
|
225
|
+
raise "no video to: " + @url unless is_no_result
|
226
|
+
end
|
227
|
+
video_to.to_i
|
228
|
+
end
|
229
|
+
|
230
|
+
def is_no_result
|
231
|
+
if @is_no_result == nil
|
232
|
+
@is_no_result = @html.include?('No Videos found')
|
233
|
+
end
|
234
|
+
@is_no_result
|
235
|
+
end
|
236
|
+
|
237
|
+
def check_video video
|
238
|
+
errors = []
|
239
|
+
|
240
|
+
errors << "author" if video.author.empty?
|
241
|
+
errors << "id" if video.id.empty?
|
242
|
+
errors << "title" if video.title.empty?
|
243
|
+
errors << "length_seconds" if video.length_seconds.to_s.empty?
|
244
|
+
errors << "rating_avg" if video.rating_avg.to_s.empty?
|
245
|
+
errors << "rating_count" if video.rating_count.to_s.empty?
|
246
|
+
errors << "description" if video.description.empty?
|
247
|
+
errors << "view_count" if video.view_count.to_s.empty?
|
248
|
+
errors << "tags" if video.tags.empty?
|
249
|
+
errors << "url" if video.url.empty?
|
250
|
+
errors << "thumbnail_url" if video.thumbnail_url.empty?
|
251
|
+
|
252
|
+
unless errors.empty? then
|
253
|
+
error_msg = "scraping error occurred.\n"
|
254
|
+
errors.each do |error|
|
255
|
+
error_msg << error + " is not setted.\n"
|
256
|
+
end
|
257
|
+
raise error_msg
|
258
|
+
end
|
259
|
+
end
|
260
|
+
|
261
|
+
end
|
262
|
+
|
263
|
+
end
|