twitter_friendly 0.2.1 → 0.3.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,313 @@
1
+ module TwitterFriendly
2
+ module REST
3
+ module Extension
4
+ module Clusters
5
+
6
+ PROFILE_SPECIAL_WORDS = %w(20↑ 成人済 腐女子)
7
+ PROFILE_SPECIAL_REGEXP = nil
8
+ PROFILE_EXCLUDE_WORDS = %w(in at of my to no er by is RT DM the and for you inc Inc com from info next gmail 好き こと 最近 紹介 連載 発売 依頼 情報 さん ちゃん くん 発言 関係 もの 活動 見解 所属 組織 代表 連絡 大好き サイト ブログ つぶやき 株式会社 最新 こちら 届け お仕事 ツイ 返信 プロ 今年 リプ ヘッダー アイコン アカ アカウント ツイート たま ブロック 無言 時間 お願い お願いします お願いいたします イベント フォロー フォロワー フォロバ スタッフ 自動 手動 迷言 名言 非公式 リリース 問い合わせ ツイッター)
9
+ PROFILE_EXCLUDE_REGEXP = Regexp.union(/\w+@\w+\.(com|co\.jp)/, %r[\d{2,4}(年|/)\d{1,2}(月|/)\d{1,2}日], %r[\d{1,2}/\d{1,2}], /\d{2}th/, URI.regexp)
10
+
11
+ def tweet_clusters(tweets, limit: 10, debug: false)
12
+ return {} if tweets.blank?
13
+ text = tweets.map(&:text).join(' ')
14
+
15
+ if defined?(Rails)
16
+ exclude_words = JSON.parse(File.read(Rails.configuration.x.constants['cluster_bad_words_path']))
17
+ special_words = JSON.parse(File.read(Rails.configuration.x.constants['cluster_good_words_path']))
18
+ else
19
+ exclude_words = JSON.parse(File.read('./cluster_bad_words.json'))
20
+ special_words = JSON.parse(File.read('./cluster_good_words.json'))
21
+ end
22
+
23
+ %w(べたら むっちゃ それとも たしかに さそう そんなに ったことある してるの しそうな おやくま ってますか これをやってるよ のせいか 面白い 可愛い).each { |w| exclude_words << w }
24
+ %w(面白い 可愛い 食べ物 宇多田ヒカル ご飯 面倒 体調悪くなる 空腹 頑張ってない 眼鏡 台風 沖縄 らんま1/2 女の子 怪我 足のむくみ 彼女欲しい 彼氏欲しい 吐き気 注射 海鮮チヂミ 出勤 価格ドットコム 幹事 雑談 パズドラ ビオフェルミン 餃子 お金 まんだらけ 結婚 焼肉 タッチペン).each { |w| special_words << w }
25
+
26
+ # クラスタ用の単語の出現回数を記録
27
+ frequency =
28
+ special_words.map { |sw| [sw, text.scan(sw)] }
29
+ .delete_if { |_, matched| matched.empty? }
30
+ .each_with_object(Hash.new(0)) { |(word, matched), memo| memo[word] = matched.size }
31
+
32
+ # 同一文字種の繰り返しを見付ける。漢字の繰り返し、ひらがなの繰り返し、カタカナの繰り返し、など
33
+ text.scan(/[一-龠〆ヵヶ々]+|[ぁ-んー~]+|[ァ-ヴー~]+|[a-zA-ZA-Z0-9]+|[、。!!??]+/).
34
+
35
+ # 複数回繰り返される文字を除去
36
+ map { |w| w.remove /[?!?!。、w]|(ー{2,})/ }.
37
+
38
+ # 文字数の少なすぎる単語、除外単語を除去する
39
+ delete_if { |w| w.length <= 2 || exclude_words.include?(w) }.
40
+
41
+ # 出現回数を記録
42
+ each { |w| frequency[w] += 1 }
43
+
44
+ # 複数個以上見付かった単語のみを残し、出現頻度順にソート
45
+ frequency.select { |_, v| 2 < v }.sort_by { |k, v| [-v, -k.size] }.take(limit).to_h
46
+ end
47
+
48
+ def count_freq_hashtags(tweets, with_prefix: true, use_regexp: false, debug: false)
49
+ puts "tweets: #{tweets.size}" if debug
50
+ return {} if tweets.blank?
51
+
52
+ prefix = %w(# #)
53
+ regexp = /[##]([A-Za-zA-Za-z_一-鿆0-90-9ぁ-ヶヲ-゚ー]+)/
54
+
55
+ tweets =
56
+ if use_regexp
57
+ tweets.select { |t| t.text && prefix.any? { |char| t.text.include?(char)} }
58
+ else
59
+ tweets.select { |t| include_hashtags?(t) }
60
+ end
61
+ puts "tweets with hashtag: #{tweets.size}" if debug
62
+
63
+ hashtags =
64
+ if use_regexp
65
+ tweets.map { |t| t.text.scan(regexp).flatten.map(&:strip) }
66
+ else
67
+ tweets.map { |t| extract_hashtags(t) }
68
+ end.flatten
69
+ hashtags = hashtags.map { |h| "#{prefix[0]}#{h}" } if with_prefix
70
+
71
+ hashtags.each_with_object(Hash.new(0)) { |h, memo| memo[h] += 1 }.sort_by { |k, v| [-v, -k.size] }.to_h
72
+ end
73
+
74
+ def hashtag_clusters(hashtags, limit: 10, debug: false)
75
+ puts "hashtags: #{hashtags.take(10)}" if debug
76
+
77
+ hashtag, count = hashtags.take(3).each_with_object(Hash.new(0)) do |tag, memo|
78
+ tweets = search(tag)
79
+ puts "tweets #{tag}: #{tweets.size}" if debug
80
+ memo[tag] = count_freq_hashtags(tweets).reject { |t, c| t == tag }.values.sum
81
+ end.max_by { |_, c| c }
82
+
83
+ hashtags = count_freq_hashtags(search(hashtag)).reject { |t, c| t == hashtag }.keys
84
+ queries = hashtags.take(3).combination(2).map { |ary| ary.join(' AND ') }
85
+ puts "selected #{hashtag}: #{queries.inspect}" if debug
86
+
87
+ tweets = queries.map { |q| search(q) }.flatten
88
+ puts "tweets #{queries.inspect}: #{tweets.size}" if debug
89
+
90
+ if tweets.empty?
91
+ tweets = search(hashtag)
92
+ puts "tweets #{hashtag}: #{tweets.size}" if debug
93
+ end
94
+
95
+ members = tweets.map { |t| t.user }
96
+ puts "members count: #{members.size}" if debug
97
+
98
+ count_freq_words(members.map { |m| m.description }, special_words: PROFILE_SPECIAL_WORDS, exclude_words: PROFILE_EXCLUDE_WORDS, special_regexp: PROFILE_SPECIAL_REGEXP, exclude_regexp: PROFILE_EXCLUDE_REGEXP, debug: debug).take(limit)
99
+ end
100
+
101
+ def fetch_lists(user, debug: false)
102
+ memberships(user, count: 500, call_limit: 2).sort_by { |li| li.member_count }
103
+ rescue Twitter::Error::ServiceUnavailable => e
104
+ puts "#{__method__}: #{e.class} #{e.message} #{user.inspect}" if debug
105
+ []
106
+ end
107
+
108
+ def list_clusters(lists, shrink: false, shrink_limit: 100, list_member: 300, total_member: 3000, total_list: 50, rate: 0.3, limit: 10, debug: false)
109
+ lists = lists.sort_by { |li| li.member_count }
110
+ puts "lists: #{lists.size} (#{lists.map { |li| li.member_count }.join(', ')})" if debug
111
+ return {} if lists.empty?
112
+
113
+ open('lists.txt', 'w') {|f| f.write lists.map(&:full_name).join("\n") } if debug
114
+
115
+ list_special_words = %w()
116
+ list_exclude_regexp = %r(list[0-9]*|people-ive-faved|twizard-magic-list|my-favstar-fm-list|timeline-list|conversationlist|who-i-met)
117
+ list_exclude_words = %w(it list people who met)
118
+
119
+ # リスト名を - で分割 -> 1文字の単語を除去 -> 出現頻度の降順でソート
120
+ words = lists.map { |li| li.full_name.split('/')[1] }.
121
+ select { |n| !n.match(list_exclude_regexp) }.
122
+ map { |n| n.split('-') }.flatten.
123
+ delete_if { |w| w.size < 2 || list_exclude_words.include?(w) }.
124
+ map { |w| SYNONYM_WORDS.has_key?(w) ? SYNONYM_WORDS[w] : w }.
125
+ each_with_object(Hash.new(0)) { |w, memo| memo[w] += 1 }.
126
+ sort_by { |k, v| [-v, -k.size] }
127
+
128
+ puts "words: #{words.take(10)}" if debug
129
+ return {} if words.empty?
130
+
131
+ # 出現頻度の高い単語を名前に含むリストを抽出
132
+ _words = []
133
+ lists =
134
+ filter(lists, min: 2) do |li, i|
135
+ _words = words[0..i].map(&:first)
136
+ name = li.full_name.split('/')[1]
137
+ _words.any? { |w| name.include?(w) }
138
+ end
139
+ puts "lists include #{_words.inspect}: #{lists.size} (#{lists.map { |li| li.member_count }.join(', ')})" if debug
140
+ return {} if lists.empty?
141
+
142
+ # 中間の 25-75% のリストを抽出
143
+ while lists.size > shrink_limit
144
+ percentile25 = ((lists.length * 0.25).ceil) - 1
145
+ percentile75 = ((lists.length * 0.75).ceil) - 1
146
+ lists = lists[percentile25..percentile75]
147
+ puts "lists sliced by 25-75 percentile: #{lists.size} (#{lists.map { |li| li.member_count }.join(', ')})" if debug
148
+ end if shrink || lists.size > shrink_limit
149
+
150
+ # メンバー数がしきい値より少ないリストを抽出
151
+ _list_member = 0
152
+ _min_list_member = 10 < lists.size ? 10 : 0
153
+ _lists =
154
+ filter(lists, min: 2) do |li, i|
155
+ _list_member = list_member * (1.0 + 0.25 * i)
156
+ _min_list_member < li.member_count && li.member_count < _list_member
157
+ end
158
+ lists = _lists.empty? ? [lists[0]] : _lists
159
+ puts "lists limited by list member #{_min_list_member}..#{_list_member.round}: #{lists.size} (#{lists.map { |li| li.member_count }.join(', ')})" if debug
160
+ return {} if lists.empty?
161
+
162
+ # トータルメンバー数がしきい値より少なくなるリストを抽出
163
+ _lists = []
164
+ lists.size.times do |i|
165
+ _lists = lists[0..(-1 - i)]
166
+ if _lists.map { |li| li.member_count }.sum < total_member
167
+ break
168
+ else
169
+ _lists = []
170
+ end
171
+ end
172
+ lists = _lists.empty? ? [lists[0]] : _lists
173
+ puts "lists limited by total members #{total_member}: #{lists.size} (#{lists.map { |li| li.member_count }.join(', ')})" if debug
174
+ return {} if lists.empty?
175
+
176
+ # リスト数がしきい値より少なくなるリストを抽出
177
+ if lists.size > total_list
178
+ lists = lists[0..(total_list - 1)]
179
+ end
180
+ puts "lists limited by total lists #{total_list}: #{lists.size} (#{lists.map { |li| li.member_count }.join(', ')})" if debug
181
+ return {} if lists.empty?
182
+
183
+ members = lists.map do |li|
184
+ begin
185
+ list_members(li.id)
186
+ rescue Twitter::Error::NotFound => e
187
+ puts "#{__method__}: #{e.class} #{e.message} #{li.id} #{li.full_name} #{li.mode}" if debug
188
+ nil
189
+ end
190
+ end.compact.flatten
191
+ puts "candidate members: #{members.size}" if debug
192
+ return {} if members.empty?
193
+
194
+ open('members.txt', 'w') {|f| f.write members.map{ |m| m.description.gsub(/\R/, ' ') }.join("\n") } if debug
195
+
196
+ 3.times do
197
+ _members = members.each_with_object(Hash.new(0)) { |member, memo| memo[member] += 1 }.
198
+ select { |_, v| lists.size * rate < v }.keys
199
+ if _members.size > 100
200
+ members = _members
201
+ break
202
+ else
203
+ rate -= 0.05
204
+ end
205
+ end
206
+ puts "members included multi lists #{rate.round(3)}: #{members.size}" if debug
207
+
208
+ count_freq_words(members.map { |m| m.description }, special_words: PROFILE_SPECIAL_WORDS, exclude_words: PROFILE_EXCLUDE_WORDS, special_regexp: PROFILE_SPECIAL_REGEXP, exclude_regexp: PROFILE_EXCLUDE_REGEXP, debug: debug).take(limit)
209
+ end
210
+
211
+ private
212
+
213
+ def filter(lists, min:)
214
+ min = [min, lists.size].min
215
+ _lists = []
216
+ 3.times do |i|
217
+ _lists = lists.select { |li| yield(li, i) }
218
+ break if _lists.size >= min
219
+ end
220
+ _lists
221
+ end
222
+
223
+ def count_by_word(texts, delim: nil, tagger: nil, min_length: 2, max_length: 5, special_words: [], exclude_words: [], special_regexp: nil, exclude_regexp: nil)
224
+ texts = texts.dup
225
+
226
+ frequency = Hash.new(0)
227
+ if special_words.any?
228
+ texts.each do |text|
229
+ special_words.map { |sw| [sw, text.scan(sw)] }
230
+ .delete_if { |_, matched| matched.empty? }
231
+ .each_with_object(frequency) { |(word, matched), memo| memo[word] += matched.size }
232
+
233
+ end
234
+ end
235
+
236
+ if exclude_regexp
237
+ texts = texts.map { |t| t.remove(exclude_regexp) }
238
+ end
239
+
240
+ if delim
241
+ texts = texts.map { |t| t.split(delim) }.flatten.map(&:strip)
242
+ end
243
+
244
+ if tagger
245
+ texts = texts.map { |t| tagger.parse(t).split("\n") }.flatten.
246
+ select { |line| line.include?('名詞') }.
247
+ map { |line| line.split("\t")[0] }
248
+ end
249
+
250
+ texts.delete_if { |w| w.empty? || w.size < min_length || max_length < w.size || exclude_words.include?(w) || w.match(/\d{2}/) }.
251
+ each_with_object(frequency) { |word, memo| memo[word] += 1 }.
252
+ sort_by { |k, v| [-v, -k.size] }.to_h
253
+ end
254
+
255
+ def count_freq_words(texts, special_words: [], exclude_words: [], special_regexp: nil, exclude_regexp: nil, debug: false)
256
+ candidates, remains = texts.partition { |desc| desc.scan('/').size > 2 }
257
+ slash_freq = count_by_word(candidates, delim: '/', exclude_regexp: exclude_regexp)
258
+ puts "words splitted by /: #{slash_freq.take(10)}" if debug
259
+
260
+ candidates, remains = remains.partition { |desc| desc.scan('|').size > 2 }
261
+ pipe_freq = count_by_word(candidates, delim: '|', exclude_regexp: exclude_regexp)
262
+ puts "words splitted by |: #{pipe_freq.take(10)}" if debug
263
+
264
+ noun_freq = count_by_word(remains, tagger: build_tagger, special_words: special_words, exclude_words: exclude_words, special_regexp: special_regexp, exclude_regexp: exclude_regexp)
265
+ puts "words tagged as noun: #{noun_freq.take(10)}" if debug
266
+
267
+ slash_freq.merge(pipe_freq) { |_, old, neww| old + neww }.
268
+ merge(noun_freq) { |_, old, neww| old + neww }.sort_by { |k, v| [-v, -k.size] }
269
+ end
270
+
271
+ def build_tagger
272
+ require 'mecab'
273
+ MeCab::Tagger.new("-d #{`mecab-config --dicdir`.chomp}/mecab-ipadic-neologd/")
274
+ rescue => e
275
+ puts "Add gem 'mecab' to your Gemfile."
276
+ raise e
277
+ end
278
+
279
+ def include_hashtags?(tweet)
280
+ tweet.entities&.hashtags&.any?
281
+ end
282
+
283
+ def extract_hashtags(tweet)
284
+ tweet.entities.hashtags.map { |h| h.text }
285
+ end
286
+
287
+ SYNONYM_WORDS = (
288
+ %w(cosplay cosplayer cosplayers coser cos こすぷれ コスプレ レイヤ レイヤー コスプレイヤー レイヤーさん).map { |w| [w, 'coplay'] } +
289
+ %w(tsukuba tkb).map { |w| [w, 'tsukuba'] } +
290
+ %w(waseda 早稲田 早稲田大学).map { |w| [w, 'waseda'] } +
291
+ %w(keio 慶應 慶應義塾).map { |w| [w, 'keio'] } +
292
+ %w(gakusai gakuensai 学祭 学園祭).map { |w| [w, 'gakusai'] } +
293
+ %w(kosen kousen).map { |w| [w, 'kosen'] } +
294
+ %w(anime アニメ).map { |w| [w, 'anime'] } +
295
+ %w(photo photos).map { |w| [w, 'photo'] } +
296
+ %w(creator creater クリエイター).map { |w| [w, 'creator'] } +
297
+ %w(illustrator illustrater 絵師).map { |w| [w, 'illustrator'] } +
298
+ %w(artist art artists アート 芸術).map { |w| [w, 'artist'] } +
299
+ %w(design デザイン).map { |w| [w, 'design'] } +
300
+ %w(kawaii かわいい).map { |w| [w, 'kawaii'] } +
301
+ %w(idol あいどる アイドル 美人).map { |w| [w, 'idol'] } +
302
+ %w(music musician musicians dj netlabel label レーベル おんがく 音楽家 音楽).map { |w| [w, 'music'] } +
303
+ %w(engineer engineers engineering えんじにあ tech 技術 技術系 hacker coder programming programer programmer geek rubyist ruby scala java lisp).map { |w| [w, 'engineer'] } +
304
+ %w(internet インターネット).map { |w| [w, 'internet'] }
305
+ ).to_h
306
+
307
+ def normalize_synonym(words)
308
+ words.map { |w| SYNONYM_WORDS.has_key?(w) ? SYNONYM_WORDS[w] : w }
309
+ end
310
+ end
311
+ end
312
+ end
313
+ end
@@ -0,0 +1,129 @@
1
+ module TwitterFriendly
2
+ module REST
3
+ module Extension
4
+ module Timelines
5
+
6
+ EVERY_DAY = (0..6)
7
+ WDAY_COUNT = EVERY_DAY.map { |n| [n, 0] }.to_h
8
+ WDAY_NIL_COUNT = EVERY_DAY.map { |n| [n, nil] }.to_h
9
+
10
+ EVERY_HOUR = (0..23)
11
+ HOUR_COUNT = EVERY_HOUR.map { |n| [n, 0] }.to_h
12
+ HOUR_NIL_COUNT = EVERY_HOUR.map { |n| [n, nil] }.to_h
13
+
14
+ def count_wday(times)
15
+ times.each_with_object(WDAY_COUNT.dup) { |time, memo| memo[time.wday] += 1 }
16
+ end
17
+
18
+ def count_hour(times)
19
+ times.each_with_object(HOUR_COUNT.dup) { |time, memo| memo[time.hour] += 1 }
20
+ end
21
+
22
+ # [
23
+ # {:name=>"Sun", :y=>111, :drilldown=>"Sun"},
24
+ # {:name=>"Mon", :y=>95, :drilldown=>"Mon"},
25
+ # {:name=>"Tue", :y=>72, :drilldown=>"Tue"},
26
+ # {:name=>"Wed", :y=>70, :drilldown=>"Wed"},
27
+ # {:name=>"Thu", :y=>73, :drilldown=>"Thu"},
28
+ # {:name=>"Fri", :y=>81, :drilldown=>"Fri"},
29
+ # {:name=>"Sat", :y=>90, :drilldown=>"Sat"}
30
+ # ]
31
+ def usage_stats_wday_series_data(times, day_names:)
32
+ count_wday(times).map do |wday, count|
33
+ {name: day_names[wday], y: count, drilldown: day_names[wday]}
34
+ end
35
+ end
36
+
37
+ # [
38
+ # {
39
+ # :name=>"Sun",
40
+ # :id=>"Sun",
41
+ # :data=> [ ["0", 7], ["1", 12], ... , ["22", 10], ["23", 12] ]
42
+ # },
43
+ # ...
44
+ # {
45
+ # :name=>"Mon",
46
+ # :id=>"Mon",
47
+ # :data=> [ ["0", 22], ["1", 11], ... , ["22", 9], ["23", 14] ]
48
+ # }
49
+ def usage_stats_wday_drilldown_series(times, day_names:)
50
+ counts =
51
+ EVERY_DAY.each_with_object(WDAY_NIL_COUNT.dup) do |wday, memo|
52
+ memo[wday] = count_hour(times.select { |t| t.wday == wday })
53
+ end
54
+
55
+ counts.map { |wday, hour_count| [day_names[wday], hour_count] }.map do |wday, hour_count|
56
+ {name: wday, id: wday, data: hour_count.map { |hour, count| [hour.to_s, count] }}
57
+ end
58
+ end
59
+
60
+ # [
61
+ # {:name=>"0", :y=>66, :drilldown=>"0"},
62
+ # {:name=>"1", :y=>47, :drilldown=>"1"},
63
+ # ...
64
+ # {:name=>"22", :y=>73, :drilldown=>"22"},
65
+ # {:name=>"23", :y=>87, :drilldown=>"23"}
66
+ # ]
67
+ def usage_stats_hour_series_data(times)
68
+ count_hour(times).map do |hour, count|
69
+ {name: hour.to_s, y: count, drilldown: hour.to_s}
70
+ end
71
+ end
72
+
73
+ # [
74
+ # {:name=>"0", :id=>"0", :data=>[["Sun", 7], ["Mon", 22], ["Tue", 8], ["Wed", 9], ["Thu", 9], ["Fri", 6], ["Sat", 5]]},
75
+ # {:name=>"1", :id=>"1", :data=>[["Sun", 12], ["Mon", 11], ["Tue", 5], ["Wed", 5], ["Thu", 0], ["Fri", 8], ["Sat", 6]]},
76
+ # ...
77
+ # ]
78
+ def usage_stats_hour_drilldown_series(times, day_names:)
79
+ counts =
80
+ EVERY_HOUR.each_with_object(HOUR_NIL_COUNT.dup) do |hour, memo|
81
+ memo[hour] = count_wday(times.select { |t| t.hour == hour })
82
+ end
83
+
84
+ counts.map do |hour, wday_count|
85
+ {name: hour.to_s, id: hour.to_s, data: wday_count.map { |wday, count| [day_names[wday], count] }}
86
+ end
87
+ end
88
+
89
+ # [
90
+ # {:name=>"Sun", :y=>14.778310502283107},
91
+ # {:name=>"Mon", :y=>12.273439878234399},
92
+ # {:name=>"Tue", :y=>10.110578386605784},
93
+ # {:name=>"Wed", :y=>9.843683409436835},
94
+ # {:name=>"Thu", :y=>10.547945205479452},
95
+ # {:name=>"Fri", :y=>10.61773211567732},
96
+ # {:name=>"Sat", :y=>12.115753424657534}
97
+ # ]
98
+ def twitter_addiction_series(times, day_names:)
99
+ max_duration = 5.minutes
100
+ wday_count =
101
+ EVERY_DAY.each_with_object(WDAY_NIL_COUNT.dup) do |wday, memo|
102
+ target_times = times.select { |t| t.wday == wday }
103
+ memo[wday] =
104
+ if target_times.empty?
105
+ nil
106
+ else
107
+ target_times.each_cons(2).map { |newer, older| (newer - older) < max_duration ? newer - older : max_duration }.sum
108
+ end
109
+ end
110
+ days = times.map { |t| t.to_date.to_s(:long) }.uniq.size
111
+ weeks = [days / 7.0, 1.0].max
112
+ wday_count.map do |wday, seconds|
113
+ {name: day_names[wday], y: (seconds.nil? ? nil : seconds / weeks / 60)}
114
+ end
115
+ end
116
+
117
+ def usage_stats(tweet_times, day_names: %w(Sun Mon Tue Wed Thu Fri Sat))
118
+ [
119
+ usage_stats_wday_series_data(tweet_times, day_names: day_names),
120
+ usage_stats_wday_drilldown_series(tweet_times, day_names: day_names),
121
+ usage_stats_hour_series_data(tweet_times),
122
+ usage_stats_hour_drilldown_series(tweet_times, day_names: day_names),
123
+ twitter_addiction_series(tweet_times, day_names: day_names)
124
+ ]
125
+ end
126
+ end
127
+ end
128
+ end
129
+ end
@@ -4,11 +4,10 @@ module TwitterFriendly
4
4
 
5
5
  MAX_TWEETS_PER_REQUEST = 100
6
6
 
7
- %i(favorites).each do |name|
8
- define_method(name) do |*args|
9
- args << {result_type: :recent}.merge(args.extract_options!)
10
- fetch_tweets_with_max_id(name, args, MAX_TWEETS_PER_REQUEST)
11
- end
7
+ def favorites(*args)
8
+ options = {result_type: :recent}.merge(args.extract_options!)
9
+ push_operations(options, __method__)
10
+ fetch_tweets_with_max_id(__method__, MAX_TWEETS_PER_REQUEST, args[0], options)
12
11
  end
13
12
  end
14
13
  end
@@ -7,39 +7,58 @@ module TwitterFriendly
7
7
 
8
8
  MAX_IDS_PER_REQUEST = 5000
9
9
 
10
- %i(friend_ids follower_ids).each do |name|
11
- define_method(name) do |*args|
12
- options = {count: MAX_IDS_PER_REQUEST}.merge(args.extract_options!)
13
- if options[:super_operation]
14
- options[:super_super_operation] = options[:super_operation]
15
- options[:super_operation] = name
16
- end
17
- args << options
18
- fetch_resources_with_cursor(name, args)
19
- end
10
+ # @return [Hash]
11
+ #
12
+ # @overload friend_ids(options = {})
13
+ # @overload friend_ids(user, options = {})
14
+ #
15
+ # @param user [Integer, String] A Twitter user ID or screen name.
16
+ #
17
+ # @option options [Integer] :count The number of tweets to return per page, up to a maximum of 5000.
18
+ def friend_ids(*args)
19
+ options = {count: MAX_IDS_PER_REQUEST}.merge(args.extract_options!)
20
+ push_operations(options, __method__)
21
+ fetch_resources_with_cursor(__method__, args[0], options)
22
+ end
23
+
24
+ def follower_ids(*args)
25
+ options = {count: MAX_IDS_PER_REQUEST}.merge(args.extract_options!)
26
+ push_operations(options, __method__)
27
+ fetch_resources_with_cursor(__method__, args[0], options)
20
28
  end
21
29
 
30
+ # @return [Hash]
31
+ #
32
+ # @overload friends(options = {})
33
+ # @overload friends(user, options = {})
34
+ #
35
+ # @param user [Integer, String] A Twitter user ID or screen name.
36
+ #
37
+ # @option options [Bool] :parallel
22
38
  def friends(*args)
23
- options = args.extract_options!.merge(super_operation: :friends)
24
- ids = friend_ids(*args, options)
39
+ options = {parallel: true}.merge(args.extract_options!)
40
+ push_operations(options, __method__)
41
+ ids = friend_ids(*args, options.except(:parallel))
25
42
  users(ids, options)
26
43
  end
27
44
 
28
45
  def followers(*args)
29
- options = args.extract_options!.merge(super_operation: :followers)
30
- ids = follower_ids(*args, options)
46
+ options = {parallel: true}.merge(args.extract_options!)
47
+ push_operations(options, __method__)
48
+ ids = follower_ids(*args, options.except(:parallel))
31
49
  users(ids, options)
32
50
  end
33
51
 
34
52
  def friend_ids_and_follower_ids(*args)
35
- options = {super_operation: :friend_ids_and_follower_ids, parallel: true}.merge(args.extract_options!)
53
+ options = { parallel: true}.merge(args.extract_options!)
54
+ is_parallel = options.delete(:parallel)
36
55
 
37
- if options[:parallel]
56
+ if is_parallel
38
57
  require 'parallel'
39
58
 
40
59
  parallel(in_threads: 2) do |batch|
41
- batch.friend_ids(*args, options)
42
- batch.follower_ids(*args, options)
60
+ batch.friend_ids(*args, options.merge(super_operation: [__method__]))
61
+ batch.follower_ids(*args, options.merge(super_operation: [__method__]))
43
62
  end
44
63
  else
45
64
  [friend_ids(*args, options), follower_ids(*args, options)]
@@ -4,18 +4,34 @@ module TwitterFriendly
4
4
 
5
5
  MAX_LISTS_PER_REQUEST = 1000
6
6
 
7
- # Returns the lists the specified user has been added to.
7
+ # @return [Hash] The lists the specified user has been added to.
8
+ #
9
+ # @overload list_members(options = {})
10
+ # @overload list_members(user, options = {})
11
+ #
12
+ # @param user [Integer, String] A Twitter user ID or screen name.
13
+ #
14
+ # @option options [Integer] :count The number of tweets to return per page, up to a maximum of 5000.
8
15
  def memberships(*args)
9
- args << {count: MAX_LISTS_PER_REQUEST}.merge(args.extract_options!)
10
- fetch_resources_with_cursor(__method__, args)
16
+ options = {count: MAX_LISTS_PER_REQUEST}.merge(args.extract_options!)
17
+ push_operations(options, __method__)
18
+ fetch_resources_with_cursor(__method__, args[0], options)
11
19
  end
12
20
 
13
21
  MAX_MEMBERS_PER_REQUEST = 5000
14
22
 
15
- # Returns the members of the specified list.
23
+ # @return [Hash] The members of the specified list.
24
+ #
25
+ # @overload list_members(options = {})
26
+ # @overload list_members(user, options = {})
27
+ #
28
+ # @param list [Integer, String] A Twitter user ID or screen name.
29
+ #
30
+ # @option options [Integer] :count The number of tweets to return per page, up to a maximum of 5000.
16
31
  def list_members(*args)
17
- args << {count: MAX_MEMBERS_PER_REQUEST, skip_status: 1}.merge(args.extract_options!)
18
- fetch_resources_with_cursor(__method__, args)
32
+ options = {count: MAX_MEMBERS_PER_REQUEST, skip_status: 1}.merge(args.extract_options!)
33
+ push_operations(options, __method__)
34
+ fetch_resources_with_cursor(__method__, args[0], options)
19
35
  end
20
36
  end
21
37
  end
@@ -14,14 +14,19 @@ module TwitterFriendly
14
14
 
15
15
  class Arguments < Array
16
16
  %i(
17
- users
18
- friend_ids
19
- follower_ids
20
- friends
21
- followers
22
- ).each do |name|
17
+ users
18
+ friend_ids
19
+ follower_ids
20
+ friends
21
+ followers
22
+ home_timeline
23
+ user_timeline
24
+ mentions_timeline
25
+ search
26
+ favorites
27
+ ).each do |name|
23
28
  define_method(name) do |*args|
24
- send(:<< , [name, *args])
29
+ send(:<<, [name, *args])
25
30
  end
26
31
  end
27
32
  end
@@ -4,12 +4,11 @@ module TwitterFriendly
4
4
 
5
5
  MAX_TWEETS_PER_REQUEST = 100
6
6
 
7
- %i(search).each do |name|
8
- define_method(name) do |query, options = {}|
9
- raise ArgumentError.new('You must specify a search query.') unless query.is_a?(String)
10
- args = [query, {result_type: :recent}.merge(options)]
11
- fetch_tweets_with_max_id(name, args, MAX_TWEETS_PER_REQUEST)
12
- end
7
+ def search(query, options = {})
8
+ raise ArgumentError.new('You must specify a search query.') unless query.is_a?(String)
9
+ options = {result_type: :recent}.merge(options)
10
+ push_operations(options, __method__)
11
+ fetch_tweets_with_max_id(__method__, MAX_TWEETS_PER_REQUEST, query, options)
13
12
  end
14
13
  end
15
14
  end
@@ -4,11 +4,22 @@ module TwitterFriendly
4
4
 
5
5
  MAX_TWEETS_PER_REQUEST = 200
6
6
 
7
- %i(home_timeline user_timeline mentions_timeline).each do |name|
8
- define_method(name) do |*args|
9
- args << {include_rts: true}.merge(args.extract_options!)
10
- fetch_tweets_with_max_id(name, args, MAX_TWEETS_PER_REQUEST)
11
- end
7
+ def home_timeline(options = {})
8
+ options = {include_rts: true}.merge(options)
9
+ push_operations(options, __method__)
10
+ fetch_tweets_with_max_id(__method__, MAX_TWEETS_PER_REQUEST, nil, options)
11
+ end
12
+
13
+ def user_timeline(*args)
14
+ options = {include_rts: true}.merge(args.extract_options!)
15
+ push_operations(options, __method__)
16
+ fetch_tweets_with_max_id(__method__, MAX_TWEETS_PER_REQUEST, args[0], options)
17
+ end
18
+
19
+ def mentions_timeline(options = {})
20
+ options = {include_rts: true}.merge(options)
21
+ push_operations(options, __method__)
22
+ fetch_tweets_with_max_id(__method__, MAX_TWEETS_PER_REQUEST, nil, options)
12
23
  end
13
24
  end
14
25
  end