awl_tags_twitter 0.0.1 → 0.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 8b58a59a77672a9fbf26fa2a846beacb9d2dd611
4
- data.tar.gz: 1565fae962838c8d4a0ea4f00345d1da49e48a17
3
+ metadata.gz: 681d4e15c326c34421fe0e13a2f6b31635085ca1
4
+ data.tar.gz: ba7dfc6c47c84f0c39a716eb5db78a79b8761497
5
5
  SHA512:
6
- metadata.gz: 1f7bf18d6dd160d79cf4c7bb463fb485deea48c611aa58248c2aa021472e750a111b2b2a8d7cd9f09f89ed1e19b68b5e1f0c0ec422c95dce03557ffa57b4b7f1
7
- data.tar.gz: 22aba35c2a545919b2f8fc372958076db85e01e27c99bfffb15ca4bdab9e62ae59facbbc63f7395aa13fe8e52e3381b17b30e941a80f2c8a74028b534a4fb81b
6
+ metadata.gz: 143093d912537e3005e56b9d25de2b2cc64a31166d9594a6ca773e1d822e3b540767bb506f2c880b7b50a52bacbd77a8a15c6a58c15f1a23ecd304f9fc0ca6ab
7
+ data.tar.gz: b0399953d0af27dae21a2a7beb95ec409bc746b9e7f62cdba518124b18ed6a4d368e94c631888599edf39e9f18a2521d23b0def3154234ef4a5a2556bd5cc2e3
data/README.md CHANGED
@@ -9,11 +9,17 @@ Grab tags from The Awl posts and tweet them out
9
9
  ## Usage
10
10
 
11
11
  ```
12
- $ awl_tags_twitter --config /path/to/config/file post
13
- $ awl_tags_twitter --config /path/to/config/file list
14
- $ awl_tags_twitter --config /path/to/config/file help
12
+ $ awl_tags_twitter list [--basic | --complex]
13
+ $ awl_tags_twitter cache-all [--dry]
14
+ $ awl_tags_twitter tweet --config /path/to/config/file [--dry]
15
15
  ```
16
16
 
17
+ ### How to retweet a previous Article
18
+
19
+ 1. You should locate the gem installation location
20
+ 2. Navigate to the tmp directory, and open articles.json
21
+ 3. Remove the article link which you would like to retweet
22
+
17
23
  ## Contributing
18
24
 
19
25
  1. Fork it ( https://github.com/ellisandy/awl_tags_twitter/fork )
@@ -22,24 +28,3 @@ $ awl_tags_twitter --config /path/to/config/file help
22
28
  4. Push to the branch (`git push origin my-new-feature`)
23
29
  5. Create a new Pull Request
24
30
 
25
- ## Archecture
26
- 1. Scraper returns a list of URLs
27
- * These URLs tie to a specific Article
28
- * When the article is initialized it should only have the URL
29
- * You can then call Builder which will poll the webservice and grab all the tags
30
- 2. Article
31
- * has a URL
32
- * has a list of tags
33
- * utilizes builder to actually poll teh article and return the tags
34
- 3. Builder
35
- * accepts a url
36
- * returns an array of tags
37
-
38
- ## Algorythm
39
- * []
40
- * Tweet.new(link)
41
- * Tweet.add tag
42
- * Tweet.add will check if the tweet will be too long with the new tag included
43
- * next
44
- * tweet add or Tweet.new again
45
-
@@ -22,6 +22,7 @@ Gem::Specification.new do |spec|
22
22
  spec.add_runtime_dependency 'commander', '~> 4.3'
23
23
  spec.add_runtime_dependency 'contracts', '~> 0.12'
24
24
  spec.add_runtime_dependency 'terminal-table', '~> 1.5'
25
+ spec.add_runtime_dependency 'twitter', '~> 5.16'
25
26
 
26
27
  spec.add_development_dependency 'bundler', '~> 1.7'
27
28
  spec.add_development_dependency 'rspec', '~> 3.2'
data/bin/awl_tags_twitter CHANGED
@@ -4,25 +4,12 @@ require 'rubygems'
4
4
  require 'commander/import'
5
5
  require 'scraper'
6
6
  require 'terminal-table'
7
+ require 'twitter_client'
7
8
  require 'awl_tags_twitter/version'
8
9
 
9
10
  program :version, AwlTagsTwitter::VERSION
10
11
  program :description, 'Grab tags from posts and tweet them out'
11
12
 
12
- command :post do |c|
13
- c.syntax = 'awl_tags_twitter post [options]'
14
- c.summary = ''
15
- c.description = ''
16
- c.example 'description', 'command example'
17
- c.option '--some-switch', 'Some switch that does something'
18
- c.action do # |args, options|
19
- scrapper = Scraper.new
20
- scrapper.some = 'hello'
21
-
22
- puts scrapper.what
23
- end
24
- end
25
-
26
13
  command :list do |c|
27
14
  c.syntax = 'awl_tags_twitter list'
28
15
  c.summary = 'List current posts and their tags'
@@ -33,32 +20,89 @@ command :list do |c|
33
20
  c.option '--basic', 'Display posts with tags'
34
21
  c.option '--complex', 'Display posts with tags split between posts'
35
22
  c.action do |_args, options|
36
- # Do something or c.when_called Awstags::Commands::List
23
+ scraper = Scraper.new
24
+ scraper.retrieve_posts
37
25
  if options.basic
38
- scraper = Scraper.new
39
- scraper.retrieve_posts
26
+ scraper.subtract_cache
40
27
  rows = scraper.articles.map { |a| [a.link, a.tags] }
41
- table = Terminal::Table.new rows: rows
42
- puts table
43
28
  elsif options.complex
44
- scraper = Scraper.new
45
- scraper.retrieve_posts.map(&:build_tweets)
29
+ scraper.subtract_cache
30
+ scraper.articles.map(&:build_tweets)
46
31
  rows = scraper.articles.map { |a| [a.link, a.tags, a.tweets] }
47
- table = Terminal::Table.new rows: rows
48
- puts table
49
32
  else
50
33
  fail 'provide --basic or --complex'
51
34
  end
35
+ tracker = Tracker.new
36
+ tracker.read_articles
37
+ tracker.articles << scraper.articles.map(&:link)
38
+ tracker.articles.flatten!
39
+ tracker.write_articles
40
+
41
+ table = Terminal::Table.new rows: rows
42
+ puts table
52
43
  end
53
44
  end
54
45
 
55
46
  command :'cache-all' do |c|
56
47
  c.syntax = 'awl_tags_twitter cache-all [options]'
57
- c.summary = ''
58
- c.description = ''
59
- c.example 'description', 'command example'
60
- c.option '--some-switch', 'Some switch that does something'
61
- c.action do |args, options|
62
- # Do something or c.when_called Awstags::Commands::Cache-all
48
+ c.summary = 'Cache all existing articles'
49
+ c.description = 'This method will call the AWL and save all files to disk.'\
50
+ 'You can use this if there is an issue while attempting to publish a tweet.'
51
+ c.example 'default', 'awl_tags_twitter cache-all'
52
+ c.example 'dry-run', 'awl_tags_twitter cache-all --dry-run'
53
+ c.option '--dry', 'Print which articles would get saved, but don\'t save them'
54
+ c.action do |_args, options|
55
+ scraper = Scraper.new
56
+ scraper.retrieve_posts
57
+ scraper.subtract_cache
58
+ rows = scraper.articles.map { |a| [a.link, a.tags] }
59
+ tracker = Tracker.new
60
+ tracker.read_articles
61
+ tracker.articles << scraper.articles.map(&:link)
62
+ tracker.articles.flatten!
63
+ tracker.write_articles unless options.dry
64
+
65
+ table = Terminal::Table.new rows: rows
66
+ puts table
67
+ end
68
+ end
69
+
70
+ command :tweet do |c|
71
+ c.syntax = 'awl_tags_twitter tweet'
72
+ c.summary = 'tweet out any untweeted post.'
73
+ c.description = 'Grabs a list of all the posts which haven\'t been tweeted' \
74
+ 'then builds the tweets, and tweets them for you'
75
+ c.example 'default', 'awl_tags_twitter tweet'
76
+ c.example 'dry', 'awl_tags_twitter tweet --dry'
77
+ c.option '--dry', 'Display what tweets would be sent out'
78
+ c.option '--config STRING', 'Path to the configuration file'
79
+ c.action do |_args, options|
80
+ credentials = JSON.parse(File.read(options.config))
81
+ twitter_client = TwitterClient.new(credentials)
82
+ fail 'please pass a configuration file' unless options.config
83
+ p "loading with Config #{options.config}"
84
+ scraper = Scraper.new
85
+ scraper.retrieve_posts
86
+ scraper.subtract_cache
87
+ scraper.articles.map(&:build_tweets)
88
+ tracker = Tracker.new
89
+ tracker.read_articles
90
+ tracker.articles << scraper.articles.map(&:link)
91
+ tracker.articles.flatten!
92
+ rows = scraper.articles.map { |a| [a.tweets] }
93
+
94
+ unless options.dry
95
+ scraper.articles.each do |a|
96
+ a.tweets.each do |t|
97
+ twitter_client.update(t)
98
+ end
99
+ end
100
+
101
+ # push out them tweets
102
+ tracker.write_articles
103
+ end
104
+
105
+ table = Terminal::Table.new rows: rows
106
+ puts table
63
107
  end
64
108
  end
data/lib/article.rb CHANGED
@@ -46,9 +46,9 @@ class Article
46
46
 
47
47
  # Filters finds the link inside the .g-tag-box div, pulls the name, then makes
48
48
  # the resulting string uppercase.
49
- def filter_tags(doc)
49
+ def filter_tags(document)
50
50
  # Filter down and get the tags.
51
- @tags = doc.css(TAG_CSS).map(&:children).map(&:text)
51
+ @tags = document.css(TAG_CSS).map(&:children).map(&:text)
52
52
  end
53
53
 
54
54
  # Opens @link, then parses using Nokogiri
@@ -1,5 +1,5 @@
1
1
  # Global Version for AwlTagsTwitter
2
2
  class AwlTagsTwitter
3
3
  # Global Version for AwlTagsTwitter
4
- VERSION = '0.0.1'
4
+ VERSION = '0.0.3'
5
5
  end
data/lib/scraper.rb CHANGED
@@ -1,7 +1,8 @@
1
- require 'nokogiri'
2
- require 'rss'
3
1
  require 'article'
4
2
  require 'contracts'
3
+ require 'nokogiri'
4
+ require 'rss'
5
+ require 'tracker'
5
6
 
6
7
  # Class for handling RSS feed to grab posts
7
8
  class Scraper
@@ -30,6 +31,15 @@ class Scraper
30
31
  @articles << Article.new(link)
31
32
  end
32
33
 
34
+ # TODO: Only grab the tags for articles that haven't already be tweeted
33
35
  @articles.map(&:retrieve_tags)
34
36
  end
37
+
38
+ Contract C::None => C::ArrayOf[Article]
39
+ # Subtrack saved artciles from the list of articles
40
+ def subtract_cache
41
+ tracker = Tracker.new
42
+ tracker.read_articles
43
+ @articles.delete_if { |x| tracker.articles.include?(x.link) }
44
+ end
35
45
  end
data/lib/tracker.rb ADDED
@@ -0,0 +1,31 @@
1
+ require 'contracts'
2
+ require 'json'
3
+
4
+ # Track previous posts in ./tmp/articles.json
5
+ class Tracker
6
+ include Contracts::Core
7
+
8
+ attr_accessor :articles
9
+
10
+ # Short cut for Contract
11
+ C = Contracts
12
+
13
+ Contract C::None => C::ArrayOf[String]
14
+ # reads articles from the ./tmp/articles.json file
15
+ def read_articles
16
+ file = JSON.load(File.read("#{Dir.home}/awl_articles.json"))
17
+ @articles = file['articles']
18
+
19
+ rescue JSON::ParserError
20
+ @articles = []
21
+ rescue Errno::ENOENT
22
+ @articles = []
23
+ end
24
+
25
+ Contract C::None => C::Num
26
+ # writes articles from the ./tmp/articles.json file
27
+ def write_articles
28
+ f = File.new("#{Dir.home}/.awl_articles.json", 'w')
29
+ f.write "{ \"articles\": #{@articles} }"
30
+ end
31
+ end
data/lib/tweet.rb CHANGED
@@ -14,15 +14,15 @@ class Tweet
14
14
  Contract String => String
15
15
  def initialize(link)
16
16
  @link = link
17
- @post = @link
17
+ @post = "| #{@link}"
18
18
  end
19
19
 
20
20
  Contract String => String
21
21
  # Add tag to @post
22
22
  def add(tag)
23
- temp_post = "#{tag} | #{@post}"
23
+ temp_post = "| #{tag} #{@post}"
24
24
  if temp_post.length <= 140
25
- @post = tag + ' | ' + @post
25
+ @post = "| #{tag} #{@post}"
26
26
  else
27
27
  fail Tweet::TagTooLong
28
28
  end
@@ -0,0 +1,33 @@
1
+ require 'contracts'
2
+ require 'twitter'
3
+
4
+ # Establishes the Twitter client
5
+ class TwitterClient
6
+ include Contracts::Core
7
+
8
+ # Shortcut for contracts
9
+ C = Contracts
10
+
11
+ Contract Hash => nil
12
+ def initialize(credentials)
13
+ # @config = {
14
+ # consumer_key: '7Jw0Oc7ZVO9NHY5Z5ieYB91Rs',
15
+ # consumer_secret: 'hjKJVdd2ikwHdD8SMJjDQQOxxw8FmhI22s3oGXtR7u3OllcDqf',
16
+ # access_token: '794719566966333440-dR7EPJfd6wR5Wc0nhSR1yGZfKmrqPpI',
17
+ # access_token_secret: 'YWwWVFhRRx84NH2VxjyxnUIiyeT2tEZZiBb8wjQ72ARRX'
18
+ # }
19
+ @client = Twitter::REST::Client.new(credentials)
20
+ fail 'Unable to load your credentials' unless @client.credentials?
21
+ end
22
+
23
+ Contract String => nil
24
+ # Wrapper for Twitter::Rest::Client.update with retries if too many requests
25
+ def update(post)
26
+ @client.update(post)
27
+ rescue Twitter::Error::TooManyRequests => error
28
+ # NOTE: Your process could go to sleep for up to 15 minutes but if you
29
+ # retry any sooner, it will almost certainly fail with the same exception.
30
+ sleep error.rate_limit.reset_in + 1
31
+ retry
32
+ end
33
+ end
data/spec/article_spec.rb CHANGED
@@ -65,10 +65,10 @@ RSpec.describe Article do
65
65
  end
66
66
 
67
67
  context 'when there is only a few tags' do
68
- let(:tags) { ['foo'] }
68
+ let(:tags) { %w(foo bar) }
69
69
 
70
70
  it 'returns an array of tweets' do
71
- expect(article.build_tweets.first.post).to eq("#{tags.first} | #{link}")
71
+ expect(article.build_tweets.first.post).to eq("| #{tags.last} | #{tags.first} | #{link}")
72
72
  end
73
73
  it 'returns only one tweet' do
74
74
  expect(article.build_tweets.count).to eq(1)
@@ -98,7 +98,7 @@ RSpec.describe Article do
98
98
  it 'returns the array of tweets' do
99
99
  article.build_tweets
100
100
 
101
- expect(article.tweets).to eq(["#{tags.first} | #{link}"])
101
+ expect(article.tweets).to eq(["| #{tags.first} | #{link}"])
102
102
  end
103
103
  end
104
104
  end
data/spec/scraper_spec.rb CHANGED
@@ -1,13 +1,12 @@
1
1
  require 'scraper'
2
2
 
3
3
  RSpec.describe Scraper do
4
- context '#retrieve_posts' do
5
- let(:scraper) { Scraper.new }
4
+ let(:scraper) { Scraper.new }
6
5
 
6
+ context '#retrieve_posts' do
7
7
  it 'responsed', :vcr do
8
8
  expect(scraper).to respond_to(:retrieve_posts)
9
9
  end
10
-
11
10
  it 'returns an arrays', :vcr do
12
11
  expect(scraper.retrieve_posts).to be_kind_of(Array)
13
12
  end
@@ -21,7 +20,39 @@ RSpec.describe Scraper do
21
20
  end
22
21
  end
23
22
 
24
- context '#posts' do
23
+ context '#articles' do
25
24
  it { should respond_to(:articles) }
26
25
  end
26
+
27
+ context '#subtract_cache' do
28
+ let(:article_1) { Article.new('http://domain.com/1') }
29
+ let(:article_2) { Article.new('http://domain.com/2') }
30
+ let(:double) { ['foo'] }
31
+
32
+ context 'when there are previous articles' do
33
+ before do
34
+ allow_any_instance_of(Tracker).to receive(:articles).and_return(['http://domain.com/1'])
35
+ allow_any_instance_of(Tracker).to receive(:read_articles).and_return(['http://domain.com/1'])
36
+ end
37
+
38
+ it 'does not return the previous articles', :vcr do
39
+ scraper.instance_variable_set(:@articles, [article_1, article_2])
40
+ scraper.subtract_cache
41
+ expect(scraper.articles.map(&:link)).to eq(['http://domain.com/2'])
42
+ end
43
+ end
44
+
45
+ context 'when there are no previous posts' do
46
+ before do
47
+ allow_any_instance_of(Tracker).to receive(:articles).and_return([])
48
+ allow_any_instance_of(Tracker).to receive(:read_articles).and_return([])
49
+ end
50
+
51
+ it 'returns all posts' do
52
+ scraper.instance_variable_set(:@articles, [article_1, article_2])
53
+ scraper.subtract_cache
54
+ expect(scraper.articles.map(&:link)).to eq(['http://domain.com/1', 'http://domain.com/2'])
55
+ end
56
+ end
57
+ end
27
58
  end
@@ -0,0 +1,62 @@
1
+ require 'tracker'
2
+
3
+ RSpec.describe Tracker do
4
+ let(:tracker) { Tracker.new }
5
+ let(:data) { '{ "articles" : [ "https://domain.com/123456" ] }' }
6
+
7
+ before do
8
+ allow(File).to receive(:read).and_return(data)
9
+ end
10
+
11
+ context '#articles' do
12
+ context 'when the file is populated' do
13
+ it 'returns an array with urls' do
14
+ expect(tracker.read_articles).to eq(['https://domain.com/123456'])
15
+ end
16
+ end
17
+
18
+ context 'when the file does not contain JSON' do
19
+ let(:data) { ' { "articles": "blah" : "foo" } ' }
20
+
21
+ it 'returns an empty array' do
22
+ expect(tracker.read_articles).to eq([])
23
+ end
24
+ end
25
+
26
+ context 'when the file does not exist' do
27
+ let(:data) { '{ "articles": [] }' }
28
+
29
+ before do
30
+ expect(File).to receive(:read).once.and_raise(Errno::ENOENT)
31
+ end
32
+
33
+ it 'returns an empty array' do
34
+ expect(tracker.read_articles).to eq([])
35
+ end
36
+ end
37
+ end
38
+
39
+ context '#read_articles' do
40
+ it 'returns an array of articles' do
41
+ expect(tracker.read_articles).to eq(['https://domain.com/123456'])
42
+ end
43
+ end
44
+
45
+ context '#write_articles' do
46
+ it { is_expected.to respond_to(:write_articles) }
47
+
48
+ context 'when the JSON is valid' do
49
+ let(:articles) { ['https://domain.com/123456'] }
50
+
51
+ before do
52
+ expect_any_instance_of(File).to receive(:write).and_return(47)
53
+ end
54
+
55
+ it 'it writes the JSON to file' do
56
+ tracker.instance_variable_set(:@articles, articles)
57
+ tracker.write_articles
58
+ expect(tracker.read_articles).to eq(articles)
59
+ end
60
+ end
61
+ end
62
+ end
data/spec/tweet_spec.rb CHANGED
@@ -14,7 +14,7 @@ RSpec.describe Tweet do
14
14
  context 'when the tag + @post is < 140' do
15
15
  let(:tag) { 'foo' }
16
16
  it 'is added' do
17
- expect(tweet.add(tag)).to eq("#{tag} | #{link}")
17
+ expect(tweet.add(tag)).to eq("| #{tag} | #{link}")
18
18
  end
19
19
  end
20
20
  context 'when the tag + @post is > 140' do
@@ -0,0 +1,4 @@
1
+ require 'twitter_client'
2
+
3
+ Rsepc.describe TwitterClient do
4
+ end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: awl_tags_twitter
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.0.1
4
+ version: 0.0.3
5
5
  platform: ruby
6
6
  authors:
7
7
  - Jack Ellis
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2016-10-30 00:00:00.000000000 Z
11
+ date: 2016-11-05 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: nokogiri
@@ -66,6 +66,20 @@ dependencies:
66
66
  - - "~>"
67
67
  - !ruby/object:Gem::Version
68
68
  version: '1.5'
69
+ - !ruby/object:Gem::Dependency
70
+ name: twitter
71
+ requirement: !ruby/object:Gem::Requirement
72
+ requirements:
73
+ - - "~>"
74
+ - !ruby/object:Gem::Version
75
+ version: '5.16'
76
+ type: :runtime
77
+ prerelease: false
78
+ version_requirements: !ruby/object:Gem::Requirement
79
+ requirements:
80
+ - - "~>"
81
+ - !ruby/object:Gem::Version
82
+ version: '5.16'
69
83
  - !ruby/object:Gem::Dependency
70
84
  name: bundler
71
85
  requirement: !ruby/object:Gem::Requirement
@@ -256,14 +270,17 @@ files:
256
270
  - lib/article.rb
257
271
  - lib/awl_tags_twitter/version.rb
258
272
  - lib/scraper.rb
273
+ - lib/tracker.rb
259
274
  - lib/tweet.rb
260
275
  - lib/tweet/tag_too_long.rb
261
- - lib/tweet_builder.rb
276
+ - lib/twitter_client.rb
262
277
  - spec/.txt
263
278
  - spec/article_spec.rb
264
279
  - spec/scraper_spec.rb
265
280
  - spec/spec_helper.rb
281
+ - spec/tracker_spec.rb
266
282
  - spec/tweet_spec.rb
283
+ - spec/twitter_client.rb
267
284
  - spec/version_spec.rb
268
285
  homepage: https://github.com/ellisandy
269
286
  licenses:
@@ -294,6 +311,8 @@ test_files:
294
311
  - spec/article_spec.rb
295
312
  - spec/scraper_spec.rb
296
313
  - spec/spec_helper.rb
314
+ - spec/tracker_spec.rb
297
315
  - spec/tweet_spec.rb
316
+ - spec/twitter_client.rb
298
317
  - spec/version_spec.rb
299
318
  has_rdoc:
data/lib/tweet_builder.rb DELETED
@@ -1,5 +0,0 @@
1
- require 'contracts'
2
-
3
- # Recursively build tweets and returns a list of Tweets
4
- class TweetBuilder
5
- end