reddit_bot 1.7.4 → 1.8.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (66) hide show
  1. checksums.yaml +4 -4
  2. data/lib/reddit_bot.rb +63 -21
  3. data/reddit_bot.gemspec +9 -13
  4. metadata +12 -74
  5. data/Gemfile +0 -5
  6. data/README.md +0 -101
  7. data/Rakefile +0 -6
  8. data/examples/.bashrc +0 -2
  9. data/examples/.gitignore +0 -2
  10. data/examples/Gemfile.lock +0 -17
  11. data/examples/boilerplate.rb +0 -12
  12. data/examples/councilofricks/Gemfile +0 -4
  13. data/examples/councilofricks/Gemfile.lock +0 -17
  14. data/examples/councilofricks/main.rb +0 -58
  15. data/examples/cptflairbot3/.bashrc +0 -1
  16. data/examples/cptflairbot3/Code.gs +0 -13
  17. data/examples/cptflairbot3/Gemfile +0 -5
  18. data/examples/cptflairbot3/Gemfile.lock +0 -74
  19. data/examples/cptflairbot3/app.js +0 -40
  20. data/examples/cptflairbot3/casual/casualpokemontrades.htm +0 -910
  21. data/examples/cptflairbot3/casual/script.js +0 -55
  22. data/examples/cptflairbot3/casual/style.css +0 -1099
  23. data/examples/cptflairbot3/log.htm +0 -1
  24. data/examples/cptflairbot3/main.rb +0 -62
  25. data/examples/cptflairbot3/package.json +0 -6
  26. data/examples/cptflairbot3/pubsub.rb +0 -30
  27. data/examples/cptflairbot3/update_gas_hook_secret.rb +0 -4
  28. data/examples/devflairbot/Gemfile +0 -6
  29. data/examples/devflairbot/Gemfile.lock +0 -74
  30. data/examples/devflairbot/main.rb +0 -81
  31. data/examples/get_dimensions.rb +0 -212
  32. data/examples/iostroubleshooting/Gemfile +0 -5
  33. data/examples/iostroubleshooting/Gemfile.lock +0 -16
  34. data/examples/iostroubleshooting/main.rb +0 -36
  35. data/examples/johnnymarr/Gemfile +0 -3
  36. data/examples/johnnymarr/Gemfile.lock +0 -17
  37. data/examples/johnnymarr/main.rb +0 -54
  38. data/examples/johnnymarr/twitter.rb +0 -80
  39. data/examples/largeimages/Gemfile +0 -11
  40. data/examples/largeimages/Gemfile.lock +0 -107
  41. data/examples/largeimages/main.rb +0 -167
  42. data/examples/largeimagesreview/Gemfile +0 -4
  43. data/examples/largeimagesreview/Gemfile.lock +0 -15
  44. data/examples/largeimagesreview/main.rb +0 -43
  45. data/examples/mlgtv/Gemfile +0 -4
  46. data/examples/mlgtv/Gemfile.lock +0 -23
  47. data/examples/mlgtv/channels.txt +0 -127
  48. data/examples/mlgtv/main.rb +0 -160
  49. data/examples/net_http_utils.rb +0 -148
  50. data/examples/oneplus/Gemfile +0 -5
  51. data/examples/oneplus/Gemfile.lock +0 -26
  52. data/examples/oneplus/main.rb +0 -43
  53. data/examples/realtimeww2/.bashrc +0 -1
  54. data/examples/realtimeww2/Gemfile +0 -3
  55. data/examples/realtimeww2/Gemfile.lock +0 -17
  56. data/examples/realtimeww2/main.rb +0 -129
  57. data/examples/sexypizza/Gemfile +0 -3
  58. data/examples/sexypizza/Gemfile.lock +0 -15
  59. data/examples/sexypizza/main.rb +0 -33
  60. data/examples/wallpaper/Gemfile +0 -5
  61. data/examples/wallpaper/Gemfile.lock +0 -33
  62. data/examples/wallpaper/main.rb +0 -27
  63. data/examples/yayornay/Gemfile +0 -3
  64. data/examples/yayornay/Gemfile.lock +0 -15
  65. data/examples/yayornay/main.rb +0 -33
  66. data/lib/reddit_bot/version.rb +0 -3
@@ -1,5 +0,0 @@
1
- source "https://rubygems.org"
2
-
3
- # ruby "2.0.0"
4
- gem "json"#, "1.7.7"
5
- gem "reddit_bot", "~>1.1.5"
@@ -1,16 +0,0 @@
1
- GEM
2
- remote: https://rubygems.org/
3
- specs:
4
- json (1.8.3)
5
- reddit_bot (1.1.5)
6
- json
7
-
8
- PLATFORMS
9
- ruby
10
-
11
- DEPENDENCIES
12
- json
13
- reddit_bot (~> 1.1.5)
14
-
15
- BUNDLED WITH
16
- 1.11.2
@@ -1,36 +0,0 @@
1
- require_relative File.join "..", "boilerplate"
2
- BOT = RedditBot::Bot.new YAML.load(File.read "secrets.yaml"), ignore_captcha: true
3
-
4
- # SUBREDDIT = "test___________"
5
- SUBREDDIT = "iostroubleshooting"
6
-
7
- # require "open-uri"
8
- require "csv" # /api/flaircsv
9
-
10
- loop do
11
- AWSStatus::touch
12
- catch :loop do
13
-
14
- existing = BOT.json(:get, "/r/#{SUBREDDIT}/api/flairlist")["users"]
15
- begin
16
- JSON.parse(DownloadWithRetry::download_with_retry("#{File.read "gas.url"}sheet_name=Bot&spreadsheet_id=10UzXUbawBgXLQkxXDMz28Qcx3IQPjwG9nByd_d8y31I", &:read))
17
- rescue JSON::ParserError
18
- puts "smth wrong with GAS script"
19
- throw :loop
20
- end.drop(1).reverse.uniq{ |_, user, _, _| user }.map do |row|
21
- next unless row.map(&:empty?) == [false, false, false, false]
22
- _, user, ios, flair = row
23
- next if existing.include?({"flair_css_class"=>flair, "user"=>user, "flair_text"=>ios})
24
- [user, ios, flair]
25
- # {"iPhone"=>"greenflair", "iPad"=>"blue", "iPod"=>"red"}[device[/iP(od|ad|hone)/]]]
26
- end.compact.each_slice(50) do |slice|
27
- CSV(load = ""){ |csv| slice.each{ |record| csv << record } }
28
- puts load
29
- BOT.json(:post, "/r/#{SUBREDDIT}/api/flaircsv", [["flair_csv", load]]).each do |report|
30
- pp report unless report.values_at("errors", "ok", "warnings") == [{}, true, {}]
31
- end
32
- end
33
-
34
- end
35
- sleep 60
36
- end
@@ -1,3 +0,0 @@
1
- source "https://rubygems.org"
2
-
3
- gem "reddit_bot"
@@ -1,17 +0,0 @@
1
- GEM
2
- remote: https://rubygems.org/
3
- specs:
4
- json (2.1.0)
5
- nethttputils (0.2.5.1)
6
- reddit_bot (1.6.10)
7
- json
8
- nethttputils (~> 0.2.5.1)
9
-
10
- PLATFORMS
11
- ruby
12
-
13
- DEPENDENCIES
14
- reddit_bot
15
-
16
- BUNDLED WITH
17
- 1.17.1
@@ -1,54 +0,0 @@
1
- require_relative "../boilerplate"
2
- SUBREDDIT = "JohnnyMarr"
3
- BOT = RedditBot::Bot.new YAML.load(File.read "secrets.yaml"), subreddit: SUBREDDIT
4
-
5
- TWITTER = "Johnny_Marr"
6
- require_relative "twitter"
7
-
8
- loop do
9
- id = BOT.new_posts.find do |post|
10
- /\(https:\/\/twitter\.com\/#{TWITTER}\/status\/(\d{18,})\)/i =~ post["selftext"] and break $1
11
- end.to_i
12
- n = if id.zero?
13
- fail "no tweets found in subreddit" unless [ "#{SUBREDDIT}_TEST" ].include?(SUBREDDIT) || ENV["START"]
14
- 10
15
- else
16
- 200
17
- end
18
-
19
- fail unless flair = BOT.json(:get, "/r/#{SUBREDDIT}/api/link_flair").find do |flair|
20
- flair["text"] == "Twitter"
21
- end
22
-
23
- timeout = 0
24
- JSON.load( begin
25
- NetHTTPUtils.request_data(
26
- "https://api.twitter.com/1.1/statuses/user_timeline.json",
27
- form: { screen_name: TWITTER, count: n, tweet_mode: "extended" },
28
- header: { Authorization: "Bearer #{TWITTER_ACCESS_TOKEN}" }
29
- )
30
- rescue NetHTTPUtils::Error => e
31
- fail unless [500, 503].include? e.code
32
- sleep timeout += 1
33
- retry
34
- end ).sort_by{ |tweet| -tweet["id"] }.take_while do |tweet|
35
- tweet["id"] > id && (!File.exist?("id") || tweet["id"] > File.read("id").to_i)
36
- end.reverse_each do |tweet|
37
- title, text, contains_media = Tweet2titleNtext[tweet]
38
- result = BOT.json :post, "/api/submit", {
39
- sr: SUBREDDIT,
40
- kind: "self",
41
- title: title,
42
- text: text,
43
- }.tap{ |h| h.merge!({ flair_id: flair["id"] }) }
44
- unless result["json"]["errors"].empty?
45
- fail unless result["json"]["errors"].map(&:first) == ["ALREADY_SUB"]
46
- puts "ALREADY_SUB error for #{tweet["id"]}"
47
- end
48
- File.write "id", tweet["id"]
49
- abort if ENV["ONCE"]
50
- end
51
-
52
- puts "END LOOP #{Time.now}"
53
- sleep 300
54
- end
@@ -1,80 +0,0 @@
1
- require "json"
2
- require "nethttputils"
3
-
4
- TWITTER_ACCESS_TOKEN = JSON.load(
5
- NetHTTPUtils.request_data "https://api.twitter.com/oauth2/token", :post,
6
- auth: File.read("twitter.token").split,
7
- form: {grant_type: :client_credentials}
8
- )["access_token"]
9
-
10
- Tweet2titleNtext = lambda do |tweet|
11
- pp tweet if ENV["TEST"]
12
- text = ""
13
- contains_media = false
14
- up = ->s{ s.split.map{ |w| "^#{w}" }.join " " }
15
-
16
- tweet_to_get_media_from = tweet["retweeted_status"] || tweet
17
- if tweet_to_get_media_from["extended_entities"] && !tweet_to_get_media_from["extended_entities"]["media"].empty?
18
- contains_media = true
19
- tweet_to_get_media_from["extended_entities"]["media"].each_with_index do |media, i|
20
- text.concat "* [Image #{i + 1}](#{media["media_url_https"]})\n\n"
21
- end
22
- end
23
- if !tweet_to_get_media_from["entities"]["urls"].empty?
24
- contains_media = true
25
- tweet_to_get_media_from["entities"]["urls"].each_with_index do |url, i|
26
- text.concat "* [Link #{i + 1}](#{url["expanded_url"]})\n\n"
27
- end
28
- end
29
-
30
- require "date"
31
- text.concat "^- #{
32
- up[tweet["user"]["name"]]
33
- } [^\\(@#{TWITTER}\\)](https://twitter.com/#{TWITTER}) ^| [#{
34
- up[Date.parse(tweet["created_at"]).strftime "%B %-d, %Y"]
35
- }](https://twitter.com/#{TWITTER}/status/#{tweet["id"]})"
36
- require "cgi"
37
- # [CGI::unescapeHTML(tweet["full_text"]).sub(/( https:\/\/t\.co\/[0-9a-zA-Z]{10})*\z/, ""), text, contains_media]
38
- [CGI::unescapeHTML(tweet["retweeted_status"] ? "RT: #{tweet["retweeted_status"]["full_text"]}" : tweet["full_text"]).sub(/(\s+https:\/\/t\.co\/[0-9a-zA-Z]{10})*\z/, ""), text, contains_media]
39
- end
40
- [
41
- [905764294687633408, true, "The Polish government & military high command is now evacuating Warsaw for Brest, 120 miles east: German armies are too close to the capital", "* [Image 1](https://pbs.twimg.com/media/DJHq71BXYAA6KJ0.jpg)\n\n" "^- ^WW2 ^Tweets ^from ^1940 [^\\(@#{TWITTER}\\)](https://twitter.com/#{TWITTER}) ^| [^""September ^7, ^2017](https://twitter.com/#{TWITTER}/status/905764294687633408)"],
42
- [915534673471733760, true, "In east Poland (now Soviet Ukraine) industry & farms to be collectivised, political parties banned, aristocrats & capitalists \"re-educated\".", "* [Image 1](https://pbs.twimg.com/media/DLSh2J9W4AACcOG.jpg)\n\n* [Image 2](https://pbs.twimg.com/media/DLSh4sKX0AEBaXq.jpg)\n\n^- ^WW2 ^Tweets ^from ^1940 [^\\(@#{TWITTER}\\)](https://twitter.com/#{TWITTER}) ^| [^" "October ^4, ^2017](https://twitter.com/#{TWITTER}/status/915534673471733760)"],
43
- [915208866408824832, true, "For 1st time, RAF planes dropping propaganda leaflets on Berlin itself, entitled \"Germans: these are your leaders!\"", "* [Image 1](https://pbs.twimg.com/media/DLN5jJ-XkAEUz9M.jpg)\n\n* [Link 1](https://www.psywar.org/product_1939EH158.php)\n\n" "^- ^WW2 ^Tweets ^from ^1940 [^\\(@#{TWITTER}\\)](https://twitter.com/#{TWITTER}) ^| [^" "October ^3, ^2017](https://twitter.com/#{TWITTER}/status/915208866408824832)"],
44
- [914577848891006978, true, "\"In Poland, Russia pursued a cold policy of selfinterest. But clearly necessary for Russia… against Nazi menace.\"", "* [Link 1](https://www.youtube.com/watch?v=ygmP5A3n2JA)\n\n" "^- ^WW2 ^Tweets ^from ^1940 [^\\(@#{TWITTER}\\)](https://twitter.com/#{TWITTER}) ^| [^" "October ^1, ^2017](https://twitter.com/#{TWITTER}/status/914577848891006978)"],
45
- [926581977372942336, false, "Finland rejects Soviet demand to surrender land near Leningrad & give Red Navy base in Hanko; Soviets now claim Finns' manner \"warlike\".", "^- ^WW2 ^Tweets ^from ^1940 [^\\(@#{TWITTER}\\)](https://twitter.com/#{TWITTER}) ^| [^" "November ^3, ^2017](https://twitter.com/#{TWITTER}/status/926581977372942336)"],
46
- [1007650044441329664, true, "RT: SOLD OUT | Tonight’s @Johnny_Marr signing at Rough Trade East is now completely sold out! Catch you in a bit. ‘Call The Comet’ is out now:", "* [Image 1](https://pbs.twimg.com/media/DfvdN1_WsAE_a3r.jpg)\n\n* [Link 1](https://roughtrade.com/gb/music/johnny-marr-call-the-comet)\n\n^- ^Johnny ^Marr [^\\(@#{TWITTER}\\)](https://twitter.com/#{TWITTER}) ^| [^June ^15, ^2018](https://twitter.com/#{TWITTER}/status/1007650044441329664)"],
47
- [1007155648612581376, true, "Tomorrow. #CallTheComet", "* [Image 1](https://pbs.twimg.com/ext_tw_video_thumb/1007155601913204736/pu/img/IREVPkgUVHoQHfBB.jpg)\n\n" "^- ^Johnny ^Marr [^\\(@#{TWITTER}\\)](https://twitter.com/#{TWITTER}) ^| [^June ^14, ^2018](https://twitter.com/#{TWITTER}/status/1007155648612581376)"],
48
- ].each do |id, contains_media_, title_, text_|
49
- title, text, contains_media = Tweet2titleNtext[ JSON.load NetHTTPUtils.request_data(
50
- "https://api.twitter.com/1.1/statuses/show.json",
51
- form: { id: id, tweet_mode: "extended" },
52
- header: { Authorization: "Bearer #{TWITTER_ACCESS_TOKEN}" },
53
- ) ]
54
- unless contains_media_ == contains_media
55
- puts "expected: #{contains_media_}"
56
- puts "got: #{contains_media}"
57
- abort "CONTAINS_MEDIA ERROR"
58
- end
59
- unless title_ == title
60
- puts "expected:\n#{title_.inspect}"
61
- puts "got:\n#{title.inspect}"
62
- abort "TITLE FORMATTING ERROR"
63
- end
64
- unless text_ == text
65
- puts "expected:\n#{text_.inspect}"
66
- puts "got:\n#{text.inspect}"
67
- abort "TEXT FORMATTING ERROR"
68
- end
69
- if ENV["TEST_POST"]
70
- pp BOT.json :post, "/api/submit", {
71
- sr: "#{SUBREDDIT}_TEST",
72
- kind: "self",
73
- title: title,
74
- text: text,
75
- }.tap{ |h| h.merge!({ flair_id: BOT.json(:get, "/r/#{SUBREDDIT}_TEST/api/link_flair").find{ |flair|
76
- flair["text"] == "Contains Media"
77
- }["id"] }) if contains_media }
78
- end
79
- end
80
- abort "OK" if ENV["TEST"]
@@ -1,11 +0,0 @@
1
- source "https://rubygems.org"
2
-
3
- gem "json"
4
-
5
- gem "nokogiri", "~>1.10.4"
6
-
7
- gem "nethttputils", git: "git@github.com:nakilon/nethttputils.git"
8
- gem "directlink", "~>0.0.8.5"
9
-
10
- gem "gcplogger", git: "git@github.com:nakilon/gcplogger.git", tag: "v0.1.1.0"
11
- gem "google-cloud-error_reporting"
@@ -1,107 +0,0 @@
1
- GIT
2
- remote: git@github.com:nakilon/gcplogger.git
3
- revision: 7c1451fac49bd0d242c6de43315ae6e9a70d8f7f
4
- tag: v0.1.1.0
5
- specs:
6
- gcplogger (0.1.1.0)
7
- google-cloud-logging (~> 1.4.0)
8
- public_suffix (~> 2.0)
9
-
10
- GIT
11
- remote: git@github.com:nakilon/nethttputils.git
12
- revision: 1eea987604cf592a62a6732eae864959f7a9fdc4
13
- specs:
14
- nethttputils (0.3.3.0)
15
-
16
- GEM
17
- remote: https://rubygems.org/
18
- specs:
19
- addressable (2.7.0)
20
- public_suffix (>= 2.0.2, < 5.0)
21
- directlink (0.0.8.5)
22
- addressable
23
- fastimage (~> 2.1.3)
24
- kramdown
25
- nethttputils (~> 0.3.3.0)
26
- nokogiri
27
- reddit_bot (~> 1.7.0)
28
- faraday (0.14.0)
29
- multipart-post (>= 1.2, < 3)
30
- fastimage (2.1.7)
31
- google-cloud-core (1.2.0)
32
- google-cloud-env (~> 1.0)
33
- google-cloud-env (1.0.1)
34
- faraday (~> 0.11)
35
- google-cloud-error_reporting (0.30.0)
36
- google-cloud-core (~> 1.2)
37
- google-gax (~> 1.0)
38
- stackdriver-core (~> 1.3)
39
- google-cloud-logging (1.4.0)
40
- google-cloud-core (~> 1.1)
41
- google-gax (~> 1.0)
42
- stackdriver-core (~> 1.2)
43
- google-gax (1.0.1)
44
- google-protobuf (~> 3.2)
45
- googleapis-common-protos (>= 1.3.5, < 2.0)
46
- googleauth (~> 0.6.2)
47
- grpc (>= 1.7.2, < 2.0)
48
- rly (~> 0.2.3)
49
- google-protobuf (3.5.1.2)
50
- googleapis-common-protos (1.3.7)
51
- google-protobuf (~> 3.0)
52
- googleapis-common-protos-types (~> 1.0)
53
- grpc (~> 1.0)
54
- googleapis-common-protos-types (1.0.1)
55
- google-protobuf (~> 3.0)
56
- googleauth (0.6.2)
57
- faraday (~> 0.12)
58
- jwt (>= 1.4, < 3.0)
59
- logging (~> 2.0)
60
- memoist (~> 0.12)
61
- multi_json (~> 1.11)
62
- os (~> 0.9)
63
- signet (~> 0.7)
64
- grpc (1.10.0)
65
- google-protobuf (~> 3.1)
66
- googleapis-common-protos-types (~> 1.0.0)
67
- googleauth (>= 0.5.1, < 0.7)
68
- json (2.2.0)
69
- jwt (2.1.0)
70
- kramdown (2.1.0)
71
- little-plugger (1.1.4)
72
- logging (2.2.2)
73
- little-plugger (~> 1.1)
74
- multi_json (~> 1.10)
75
- memoist (0.16.0)
76
- mini_portile2 (2.4.0)
77
- multi_json (1.13.1)
78
- multipart-post (2.0.0)
79
- nokogiri (1.10.5)
80
- mini_portile2 (~> 2.4.0)
81
- os (0.9.6)
82
- public_suffix (2.0.5)
83
- reddit_bot (1.7.3)
84
- json
85
- nethttputils (~> 0.3.3.0)
86
- rly (0.2.3)
87
- signet (0.8.1)
88
- addressable (~> 2.3)
89
- faraday (~> 0.9)
90
- jwt (>= 1.5, < 3.0)
91
- multi_json (~> 1.10)
92
- stackdriver-core (1.3.0)
93
- google-cloud-core (~> 1.2)
94
-
95
- PLATFORMS
96
- ruby
97
-
98
- DEPENDENCIES
99
- directlink (~> 0.0.8.5)
100
- gcplogger!
101
- google-cloud-error_reporting
102
- json
103
- nethttputils!
104
- nokogiri (~> 1.10.4)
105
-
106
- BUNDLED WITH
107
- 2.0.2
@@ -1,167 +0,0 @@
1
- ### THIS WAS MY THE VERY FIRST REDDIT BOT
2
-
3
-
4
- require "gcplogger"
5
- logger = GCPLogger.logger "largeimagesbot"
6
-
7
- fail "no ENV['ERROR_REPORTING_KEYFILE'] specified" unless ENV["ERROR_REPORTING_KEYFILE"]
8
- require "google/cloud/error_reporting"
9
- Google::Cloud::ErrorReporting.configure do |config|
10
- config.project_id = JSON.load(File.read ENV["ERROR_REPORTING_KEYFILE"])["project_id"]
11
- end
12
-
13
-
14
- require "directlink"
15
-
16
- require "nokogiri"
17
-
18
- require "../boilerplate"
19
- BOT = RedditBot::Bot.new YAML.load_file "secrets.yaml"
20
-
21
- INCLUDE = %w{
22
- user/kjoneslol/m/sfwpornnetwork
23
-
24
- r/woahdude
25
- r/pic
26
-
27
- r/highres
28
- r/wallpapers
29
- r/wallpaper
30
- r/WQHD_Wallpaper
31
-
32
- r/oldmaps
33
- r/telephotolandscapes
34
- }
35
- EXCLUDE = %w{ foodporn powerwashingporn }
36
-
37
- checked = []
38
-
39
- search_url = lambda do |url|
40
- JSON.load( begin
41
- NetHTTPUtils.request_data "https://www.reddit.com/r/largeimages/search.json", form: {q: "url:#{url}", restrict_sr: "on"}, header: ["User-Agent", "ajsdjasdasd"]
42
- rescue NetHTTPUtils::Error => e
43
- raise unless [503].include? e.code
44
- sleep 60
45
- retry
46
- end )["data"]["children"]
47
- end
48
- fail unless 1 == search_url["https://i.imgur.com/9JTxtjW.jpg"].size
49
-
50
- loop do
51
- begin
52
- logger.info "LOOP #{Time.now}"
53
- rescue => e
54
- puts "oops"
55
- Google::Cloud::ErrorReporting.report e
56
- sleep 5
57
- raise
58
- end
59
-
60
- [ [:source_ultireddit, 10000000, ( Nokogiri::XML( begin
61
- NetHTTPUtils.request_data ENV["FEEDPCBR_URL"]
62
- rescue NetHTTPUtils::Error => e
63
- raise unless [502, 504].include? e.code
64
- sleep 60
65
- retry
66
- end ).remove_namespaces!.xpath("feed/entry").map do |entry|
67
- [
68
- entry.at_xpath("id").text,
69
- entry.at_xpath("link[@rel='via']")["href"],
70
- entry.at_xpath("title").text,
71
- entry.at_xpath("category")["term"],
72
- entry.at_xpath("author/name").text,
73
- entry.at_xpath("link[@rel='alternate']")["href"],
74
- ]
75
- end ) ],
76
- [:source_reddit, 30000000, ( INCLUDE.flat_map do |sortasub|
77
- BOT.new_posts(sortasub).take(100).map do |child|
78
- next if child["is_self"]
79
- next if EXCLUDE.include? child["subreddit"].downcase
80
- child.values_at(
81
- *%w{ id url title subreddit author permalink }
82
- ).tap{ |_| _.last.prepend "https://www.reddit.com" }
83
- end.compact
84
- end ) ],
85
- ].each do |source, min_resolution, entries|
86
- logger.warn "#{source}.size: #{entries.size}"
87
- entries.each do |id, url, title, subreddit, author, permalink|
88
- next if checked.include? id
89
- checked << id
90
- # next if Gem::Platform.local.os == "darwin" # prevent concurrent posting
91
- logger.debug "image url for #{id}: #{url}"
92
- next logger.warn "skipped a post by /u/sjhill" if author == "sjhill" # opt-out
93
- next logger.warn "skipped a post by /u/redisforever" if author == "redisforever" # opt-out
94
- next logger.warn "skipped a post by /u/bekalaki" if author == "bekalaki" # 9 ways to divide a karmawhore
95
-
96
- t = begin
97
- DirectLink url, 60
98
- rescue SocketError,
99
- Net::OpenTimeout,
100
- Errno::ECONNRESET,
101
- NetHTTPUtils::Error,
102
- FastImage::UnknownImageType,
103
- FastImage::ImageFetchFailure,
104
- DirectLink::ErrorNotFound,
105
- DirectLink::ErrorBadLink => e
106
- next logger.error "skipped (#{e}) #{url} from http://redd.it/#{id}"
107
- end
108
- logger.info "DirectLink: #{t.inspect}"
109
- tt = t.is_a?(Array) ? t : [t]
110
- next logger.error "probably crosspost of a self post: http://redd.it/#{id}" if tt.empty?
111
- unless min_resolution <= tt.first.width * tt.first.height
112
- next logger.debug "skipped low resolution #{source}"
113
- end
114
- # puts "https://www.reddit.com/r/LargeImages/search.json?q=url%3A#{CGI.escape url}&restrict_sr=on"
115
- resolution = "[#{tt.first.width}x#{tt.first.height}]"
116
- next logger.warn "already submitted #{resolution} #{id}: '#{url}'" unless
117
- Gem::Platform.local.os == "darwin" || search_url[url].empty?
118
- logger.warn "resolution #{resolution} got from #{id}: #{url}"
119
- title = "#{resolution}#{
120
- " [#{tt.size} images]" if tt.size > 1
121
- } #{
122
- title.sub(/\s*\[?#{tt.first.width}\s*[*x×]\s*#{tt.first.height}\]?\s*/i, " ").
123
- sub("[OC]", " ").gsub(/\s+/, " ").strip
124
- } /r/#{subreddit}".gsub(/\s+\(\s+\)\s+/, " ").sub(/(?<=.{297}).+/, "...")
125
- logger.warn "new post #{source}: #{url} #{title.inspect}"
126
- unless Gem::Platform.local.os == "darwin"
127
- result = BOT.json :post,
128
- "/api/submit",
129
- {
130
- kind: "link",
131
- url: url,
132
- sr: "LargeImages",
133
- title: title,
134
- }
135
- next unless result["json"]["errors"].empty?
136
- logger.info "post url: #{result["json"]["data"]["url"]}"
137
- end
138
- # {"json"=>
139
- # {"errors"=>[],
140
- # "data"=>
141
- # {"url"=>
142
- # "https://www.reddit.com/r/LargeImages/comments/3a9rel/2594x1724_overlooking_wildhorse_lake_from_near/",
143
- # "id"=>"3a9rel",
144
- # "name"=>"t3_3a9rel"}}}
145
- line1 = "[Original thread](#{permalink}) by /u/#{author}"
146
- line2 = "Direct link#{" (the largest image)" if tt.size > 1}: #{tt.first.url}"
147
- line3 = [
148
- "Direct links to all other images in album:",
149
- tt.map(&:url) - [tt.first.url]
150
- ] if tt.size > 1
151
- text = [line1, line2, line3].compact.join(" \n")
152
- logger.info "new comment: #{text.inspect}"
153
- unless Gem::Platform.local.os == "darwin"
154
- result = BOT.leave_a_comment "#{result["json"]["data"]["name"]}", text.sub(/(?<=.{9000}).+/m, "...")
155
- unless result["json"]["errors"].empty?
156
- logger.error result.inspect
157
- fail "failed to leave comment"
158
- end
159
- end
160
-
161
- abort if Gem::Platform.local.os == "darwin"
162
- end
163
- end
164
-
165
- logger.info "END LOOP #{Time.now}"
166
- sleep 300
167
- end