reddit_post_to_markdown 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml ADDED
@@ -0,0 +1,7 @@
1
+ ---
2
+ SHA256:
3
+ metadata.gz: fbcefef7bcf13e1fc6d417be5e2a5adf397f69931fbacae27855c0fc0d34e66e
4
+ data.tar.gz: 4cecd3cb2af8878a4766a12b7b33fc6eb7a47bcb957681f4ce7f83248c76a384
5
+ SHA512:
6
+ metadata.gz: 3e5e54ea2bf6f039f889ee2d19020b7e9d7621f993722b0d1850969d48f67adcfe3ae1bcd26c15548e0512a6d40a3c56a342b9e206e368327925e9c60380ee61
7
+ data.tar.gz: fbfa20da94b37b834cfbcf88b6ae4f625aba540a0abe9ec0738e2fde074f0d9230545c9889ab93ddd7e555de2ac88457e6689f0c0f12189ab369b07353b69220
@@ -0,0 +1,13 @@
1
+ module RedditPostToMarkdown
2
+ # Raised when the given URL does not match a Reddit post URL pattern.
3
+ # This includes subreddit listings, user profiles, search results, and
4
+ # any URL that is not a direct link to a single post.
5
+ class NotAPostError < StandardError; end
6
+
7
+ # Raised when the HTTP request to Reddit fails with a non-2xx status code.
8
+ class FetchError < StandardError; end
9
+
10
+ # Raised when Reddit returns a response that does not have the expected
11
+ # two-element JSON array structure of a post listing.
12
+ class InvalidResponseError < StandardError; end
13
+ end
@@ -0,0 +1,334 @@
1
+ require "time"
2
+
3
+ module RedditPostToMarkdown
4
+ # Converts Reddit post data and its comments into a Markdown string.
5
+ #
6
+ # The output format matches the {https://github.com/chauduyphanvu/reddit-markdown
7
+ # reddit-markdown} tool: post header, title, selftext, reply count, and a
8
+ # depth-indented comment tree.
9
+ class PostRenderer
10
+ # Replacement text used when a comment matches a filter and no custom
11
+ # +:message+ is provided in the filters hash.
12
+ DEFAULT_FILTERED_MESSAGE = "REMOVED DUE TO CUSTOM FILTER(S)"
13
+
14
+ # Renders a Reddit post and its comments as a Markdown string.
15
+ #
16
+ # This is the primary entry point for the class. It instantiates a renderer
17
+ # and calls {#render}.
18
+ #
19
+ # @param post_data [Hash] the +data+ object from Reddit's post listing JSON,
20
+ # containing keys such as +"title"+, +"author"+, +"selftext"+, +"ups"+,
21
+ # +"locked"+, +"created_utc"+, and +"subreddit_name_prefixed"+
22
+ # @param replies_data [Array<Hash>] the +children+ array from Reddit's
23
+ # comment listing JSON; each element represents a top-level comment
24
+ # @param filters [Hash] optional comment filters (see {RedditPostToMarkdown.convert}
25
+ # for full key documentation)
26
+ # @return [String] the fully rendered Markdown
27
+ def self.render(post_data, replies_data, filters: {})
28
+ new(post_data, replies_data, filters).render
29
+ end
30
+
31
+ # @param post_data [Hash] Reddit post data hash (see {.render})
32
+ # @param replies_data [Array<Hash>] top-level comment objects (see {.render})
33
+ # @param filters [Hash] optional comment filters (see {.render})
34
+ def initialize(post_data, replies_data, filters = {})
35
+ @post_data = post_data
36
+ @replies_data = replies_data
37
+ @filters = filters || {}
38
+ end
39
+
40
+ # Renders the post and all its comments as a single Markdown string.
41
+ #
42
+ # Sections in order:
43
+ # 1. Post header (subreddit, author, upvotes, timestamp)
44
+ # 2. Post title as an H2
45
+ # 3. Link back to the original post
46
+ # 4. Lock notice (if the thread is locked)
47
+ # 5. Post body / selftext as a block-quote (if present)
48
+ # 6. Total reply count
49
+ # 7. Horizontal rule
50
+ # 8. Comment tree, depth-indented with tab characters
51
+ #
52
+ # @return [String]
53
+ def render
54
+ lines = []
55
+
56
+ # Post header
57
+ lines << "#{header_line}"
58
+ lines << "## #{post_title}"
59
+ lines << "Original post: [#{post_url}](#{post_url})"
60
+ lines << lock_message if post_locked?
61
+
62
+ # Selftext
63
+ if post_selftext && !post_selftext.strip.empty?
64
+ decoded = decode_selftext(post_selftext)
65
+ lines << "> #{decoded.gsub("\n", "\n> ")}"
66
+ end
67
+
68
+ image_urls = post_image_urls()
69
+ if image_urls.size > 0
70
+ lines << "### Images"
71
+ image_urls.each do |url|
72
+ lines << "![no alt text](#{url})"
73
+ end
74
+ lines << ""
75
+ end
76
+
77
+ # Reply count + separator
78
+ total = count_all_replies
79
+ lines << "💬 ~ #{total} replies"
80
+ lines << "---\n"
81
+
82
+ # Top-level comments
83
+ @replies_data.each do |reply_obj|
84
+ render_top_level_reply(reply_obj, lines)
85
+ end
86
+
87
+ lines.join("\n")
88
+ end
89
+
90
+ private
91
+
92
+ def post_title
93
+ @post_data.fetch("title", "Untitled")
94
+ end
95
+
96
+ def post_author
97
+ @post_data.fetch("author", "[unknown]")
98
+ end
99
+
100
+ def post_subreddit
101
+ @post_data.fetch("subreddit_name_prefixed", "")
102
+ end
103
+
104
+ def post_ups
105
+ @post_data.fetch("ups", 0)
106
+ end
107
+
108
+ def post_locked?
109
+ @post_data.fetch("locked", false)
110
+ end
111
+
112
+ def post_selftext
113
+ @post_data.fetch("selftext", "")
114
+ end
115
+
116
+ def post_url
117
+ @post_data.fetch("url", "")
118
+ end
119
+
120
+ def post_created_utc
121
+ @post_data["created_utc"]
122
+ end
123
+
124
+ def post_image_urls
125
+ image_urls = []
126
+ media_metadata = @post_data["media_metadata"]
127
+ # a hash of hashes with some sort of hashed keys we don't care about
128
+ return image_urls unless media_metadata
129
+ media_metadata.each do |_hashed_key, metadata_hash|
130
+ next unless metadata_hash["e"] == "Image" || metadata_hash["e"] == "AnimatedImage"
131
+ src = metadata_hash["s"]
132
+ next unless src
133
+ url = src["u"] || src["gif"] || src["mp4"]
134
+ next unless url
135
+ # Reddit JSON HTML-encodes query strings; strip the signed params and
136
+ # rewrite preview.redd.it → i.redd.it so the URL serves the image directly
137
+ # instead of redirecting to an HTML wrapper page.
138
+ url = url.gsub("&amp;", "&").split("?")[0].sub("/preview.", "/i.")
139
+ image_urls << url
140
+ end
141
+ image_urls
142
+ end
143
+
144
+ def header_line
145
+ upvotes = format_upvotes(post_ups)
146
+ ts = format_timestamp(post_created_utc)
147
+ ts_str = ts ? "_( #{ts} )_" : ""
148
+ "**#{post_subreddit}** | Posted by u/#{post_author} #{upvotes} #{ts_str}"
149
+ end
150
+
151
+ def lock_message
152
+ "---\n\n>🔒 **This thread has been locked by the moderators of #{post_subreddit}**.\n New comments cannot be posted\n\n"
153
+ end
154
+
155
+ def format_upvotes(ups)
156
+ return "" if ups.nil?
157
+ ups >= 1000 ? "⬆️ #{ups / 1000}k" : "⬆️ #{ups}"
158
+ end
159
+
160
+ def format_timestamp(utc)
161
+ return nil unless utc && utc != 0
162
+ Time.at(utc.to_i).utc.strftime("%Y-%m-%d %H:%M:%S")
163
+ rescue
164
+ nil
165
+ end
166
+
167
+ def decode_selftext(text)
168
+ text
169
+ .gsub("&amp;", "&")
170
+ .gsub("&lt;", "<")
171
+ .gsub("&gt;", ">")
172
+ .gsub("&quot;", '"')
173
+ end
174
+
175
+ def apply_filter(author, body, ups)
176
+ return body if @filters.nil? || @filters.empty?
177
+
178
+ message = @filters[:message] || DEFAULT_FILTERED_MESSAGE
179
+ keywords = Array(@filters[:keywords])
180
+ authors = Array(@filters[:authors])
181
+ min_ups = @filters[:min_upvotes] || 0
182
+ regexes = Array(@filters[:regexes])
183
+
184
+ keywords.each do |kw|
185
+ return message if body.downcase.include?(kw.to_s.downcase)
186
+ end
187
+
188
+ return message if authors.include?(author)
189
+ return message if ups < min_ups
190
+
191
+ regexes.each do |regex|
192
+ return message if regex.match?(body)
193
+ end
194
+
195
+ body
196
+ end
197
+
198
+ def decode_body(text)
199
+ text
200
+ .gsub("&gt;", ">")
201
+ .gsub("\r", "")
202
+ end
203
+
204
+ def decode_child_body(text)
205
+ text
206
+ .gsub("&gt;", ">")
207
+ .gsub("&amp;#32;", " ")
208
+ .gsub("^^[", "[")
209
+ .gsub("^^(", "(")
210
+ end
211
+
212
+ def linkify_mentions(text)
213
+ text.gsub(%r{u/(\w+)}) { "[u/#{$1}](https://www.reddit.com/user/#{$1})" }
214
+ end
215
+
216
+ def author_link(author)
217
+ return author if author.nil? || author == "[deleted]" || author.empty?
218
+ "[#{author}](https://www.reddit.com/user/#{author})"
219
+ end
220
+
221
+ def author_field(author)
222
+ field = author_link(author)
223
+ field += " (OP)" if author == post_author && author != "[deleted]" && !author.empty?
224
+ field
225
+ end
226
+
227
+ def count_all_replies
228
+ total = @replies_data.length
229
+ @replies_data.each do |reply_obj|
230
+ total += get_replies(reply_obj).length
231
+ end
232
+ total
233
+ end
234
+
235
+ # Recursively collects all child replies into a flat ordered hash.
236
+ #
237
+ # Traverses the Reddit comment tree depth-first and returns every
238
+ # descendant comment keyed by its Reddit comment ID. Comments with empty
239
+ # or whitespace-only bodies are skipped. Comments deeper than +max_depth+
240
+ # are skipped unless +max_depth+ is +-1+ (unlimited).
241
+ #
242
+ # @param reply_data [Hash] a Reddit comment object containing a nested
243
+ # +"replies"+ structure
244
+ # @param max_depth [Integer] maximum comment depth to collect;
245
+ # +-1+ means no limit
246
+ # @param collected [Hash] accumulator used during recursion; callers
247
+ # should omit this argument
248
+ # @return [Hash{String => Hash}] a hash of
249
+ # +id => { depth: Integer, child_reply: Hash }+ in depth-first order
250
+ def get_replies(reply_data, max_depth: -1, collected: {})
251
+ replies_obj = reply_data.dig("data", "replies")
252
+ return collected unless replies_obj.is_a?(Hash)
253
+
254
+ children = replies_obj.dig("data", "children") || []
255
+ children.each do |child|
256
+ child_data = child.fetch("data", {})
257
+ child_id = child_data["id"]
258
+ child_depth = child_data.fetch("depth", 0)
259
+ child_body = child_data.fetch("body", "")
260
+
261
+ next if max_depth != -1 && child_depth > max_depth
262
+ next if child_body.strip.empty?
263
+
264
+ collected[child_id] = { depth: child_depth, child_reply: child }
265
+ get_replies(child, max_depth: max_depth, collected: collected)
266
+ end
267
+
268
+ collected
269
+ end
270
+
271
+ def render_top_level_reply(reply_obj, lines)
272
+ data = reply_obj.fetch("data", {})
273
+ author = data.fetch("author", "")
274
+
275
+ return if author.empty?
276
+ return if author == "AutoModerator"
277
+
278
+ ups = data.fetch("ups", 0)
279
+ upvotes = format_upvotes(ups)
280
+ ts = format_timestamp(data["created_utc"])
281
+ ts_str = ts ? "_( #{ts} )_" : ""
282
+ af = author_field(author)
283
+
284
+ lines << "* **#{af}** #{upvotes} #{ts_str}\n\n"
285
+
286
+ body = data.fetch("body", "")
287
+ return if body.strip.empty?
288
+
289
+ if body == "[deleted]"
290
+ lines << "\tComment deleted by user\n\n"
291
+ else
292
+ filtered = apply_filter(author, body, ups)
293
+ formatted = decode_body(filtered)
294
+ formatted = linkify_mentions(formatted)
295
+ formatted = formatted.gsub("\n", "\n\t")
296
+ lines << "\t#{formatted}\n\n"
297
+ end
298
+
299
+ # Nested replies
300
+ child_map = get_replies(reply_obj)
301
+ child_map.each_value do |info|
302
+ render_child_reply(info, lines)
303
+ end
304
+ end
305
+
306
+ def render_child_reply(info, lines)
307
+ cdepth = info[:depth]
308
+ child_data = info[:child_reply].fetch("data", {})
309
+ author = child_data.fetch("author", "")
310
+ ups = child_data.fetch("ups", 0)
311
+ body = child_data.fetch("body", "")
312
+
313
+ upvotes = format_upvotes(ups)
314
+ ts = format_timestamp(child_data["created_utc"])
315
+ ts_str = ts ? "_( #{ts} )_" : ""
316
+ af = author_field(author)
317
+ indent = "\t" * cdepth
318
+
319
+ lines << "#{indent}* **#{af}** #{upvotes} #{ts_str}\n\n"
320
+
321
+ return if body.strip.empty?
322
+
323
+ if body == "[deleted]"
324
+ lines << "#{indent}\tComment deleted by user\n\n"
325
+ else
326
+ filtered = apply_filter(author, body, ups)
327
+ formatted = decode_child_body(filtered)
328
+ formatted = linkify_mentions(formatted)
329
+ formatted = formatted.gsub("\n", "\n#{indent}\t")
330
+ lines << "#{indent}\t#{formatted}\n\n"
331
+ end
332
+ end
333
+ end
334
+ end
@@ -0,0 +1,42 @@
1
+ require "httparty"
2
+ require_relative "version"
3
+
4
+ module RedditPostToMarkdown
5
+ # Fetches Reddit post JSON via the public Reddit API.
6
+ #
7
+ # Requests are made without authentication using Reddit's +.json+ endpoint,
8
+ # which is available for any public post.
9
+ class RedditClient
10
+ include HTTParty
11
+
12
+ USER_AGENT = "RedditPostToMarkdown/#{VERSION} (Safe Download Bot)"
13
+
14
+ # Downloads the JSON data for a Reddit post URL.
15
+ #
16
+ # Appends +.json+ to +url+ (unless already present) and issues a GET
17
+ # request. Reddit returns a two-element array: the first element contains
18
+ # the post data and the second contains the top-level comments.
19
+ #
20
+ # @param url [String] a cleaned Reddit post URL (no trailing slash,
21
+ # no query parameters)
22
+ # @return [Array] the parsed two-element JSON response from Reddit
23
+ # @raise [FetchError] if the server returns a non-2xx HTTP status
24
+ # @raise [InvalidResponseError] if the parsed response is not a two-element
25
+ # Array
26
+ def fetch_post(url)
27
+ json_url = url.end_with?(".json") ? url : "#{url}.json"
28
+
29
+ response = self.class.get(json_url, headers: { "User-Agent" => USER_AGENT })
30
+
31
+ raise FetchError, "HTTP #{response.code} fetching #{url}" unless response.success?
32
+
33
+ data = response.parsed_response
34
+
35
+ unless data.is_a?(Array) && data.length >= 2
36
+ raise InvalidResponseError, "Expected a 2-element JSON array from #{url}"
37
+ end
38
+
39
+ data
40
+ end
41
+ end
42
+ end
@@ -0,0 +1,47 @@
1
+ module RedditPostToMarkdown
2
+ # Validates and normalises Reddit post URLs.
3
+ class UrlValidator
4
+ PATTERNS = [
5
+ %r{\Ahttps://(?:www\.)?reddit\.com/r/[^/]+/comments/[a-z0-9]+/},
6
+ %r{\Ahttps://(?:www\.)?reddit\.com/[^/]+/comments/[a-z0-9]+/},
7
+ %r{\Ahttps://(?:old\.)?reddit\.com/r/[^/]+/comments/[a-z0-9]+/},
8
+ %r{\Ahttps://redd\.it/[a-z0-9]+}
9
+ ].freeze
10
+
11
+ # Returns +true+ if +url+ looks like a direct Reddit post URL.
12
+ #
13
+ # A valid post URL must use HTTPS and match one of the following forms:
14
+ # - +https://www.reddit.com/r/<sub>/comments/<id>/+
15
+ # - +https://reddit.com/r/<sub>/comments/<id>/+
16
+ # - +https://old.reddit.com/r/<sub>/comments/<id>/+
17
+ # - +https://redd.it/<id>+
18
+ #
19
+ # Subreddit listings, user profiles, search pages, and similar URLs return
20
+ # +false+.
21
+ #
22
+ # @param url [String, nil] the URL to check
23
+ # @return [Boolean]
24
+ def self.valid_post_url?(url)
25
+ return false if url.nil? || url.empty?
26
+ return false unless url.start_with?("https://")
27
+
28
+ PATTERNS.any? { |pattern| url.match?(pattern) }
29
+ end
30
+
31
+ # Strips common tracking parameters and the trailing slash from a Reddit URL.
32
+ #
33
+ # Removes query strings beginning with +?utm_source+, +?ref=+, or
34
+ # +?context=+, then strips any trailing slash. Leading and trailing
35
+ # whitespace is also removed.
36
+ #
37
+ # @param url [String] the URL to clean
38
+ # @return [String] the cleaned URL
39
+ def self.clean_url(url)
40
+ url = url.to_s.strip
41
+ url = url.split("?utm_source").first
42
+ url = url.split("?ref=").first
43
+ url = url.split("?context=").first
44
+ url.chomp("/")
45
+ end
46
+ end
47
+ end
@@ -0,0 +1,3 @@
1
+ module RedditPostToMarkdown
2
+ VERSION = "0.1.0"
3
+ end
@@ -0,0 +1,74 @@
1
+ require_relative "reddit_post_to_markdown/version"
2
+ require_relative "reddit_post_to_markdown/errors"
3
+ require_relative "reddit_post_to_markdown/url_validator"
4
+ require_relative "reddit_post_to_markdown/reddit_client"
5
+ require_relative "reddit_post_to_markdown/post_renderer"
6
+
7
+ # Top-level namespace for the reddit_post_to_markdown gem.
8
+ module RedditPostToMarkdown
9
+ # Downloads a public Reddit post and returns it as a Markdown string.
10
+ #
11
+ # The URL must point directly to a single post. Subreddit listings, user
12
+ # profiles, search pages, and similar URLs will raise {NotAPostError}.
13
+ # Posts that require authentication (private subreddits, age-gated content)
14
+ # are not accessible.
15
+ #
16
+ # @example Basic usage
17
+ # markdown = RedditPostToMarkdown.convert(
18
+ # "https://www.reddit.com/r/ruby/comments/abc123/some_title/"
19
+ # )
20
+ #
21
+ # @example Without comments
22
+ # markdown = RedditPostToMarkdown.convert(url, include_comments: false)
23
+ #
24
+ # @example With comment filters
25
+ # markdown = RedditPostToMarkdown.convert(
26
+ # url,
27
+ # filters: {
28
+ # keywords: ["spam"],
29
+ # authors: ["AutoModerator"],
30
+ # min_upvotes: 5,
31
+ # regexes: [/buy now/i],
32
+ # message: "[ removed ]"
33
+ # }
34
+ # )
35
+ #
36
+ # @param url [String] the URL of a public Reddit post
37
+ # @param include_comments [Boolean] when +false+, omits all comments and
38
+ # renders only the post header, title, body, and a reply count of 0.
39
+ # Defaults to +true+.
40
+ # @param filters [Hash] optional hash to suppress comments matching any
41
+ # criterion. Filters are evaluated in the order listed below; the first
42
+ # match replaces the comment body with +:message+. All keys are optional.
43
+ # @option filters [Array<String>] :keywords case-insensitive substrings;
44
+ # any comment whose body contains one of these strings is replaced
45
+ # @option filters [Array<String>] :authors usernames (exact, case-sensitive
46
+ # match) whose comments are replaced regardless of content
47
+ # @option filters [Integer] :min_upvotes comments with fewer upvotes than
48
+ # this value are replaced
49
+ # @option filters [Array<Regexp>] :regexes patterns matched against the
50
+ # comment body; a match causes the comment to be replaced
51
+ # @option filters [String] :message the replacement text used when any
52
+ # filter matches (default: +"REMOVED DUE TO CUSTOM FILTER(S)"+)
53
+ # @return [String] the post and its comments rendered as Markdown
54
+ # @raise [NotAPostError] if +url+ does not point to a Reddit post
55
+ # @raise [FetchError] if the HTTP request to Reddit fails
56
+ # @raise [InvalidResponseError] if Reddit returns an unexpected JSON structure
57
+ def self.convert(url, filters: {}, include_comments: true)
58
+ clean = UrlValidator.clean_url(url)
59
+
60
+ unless UrlValidator.valid_post_url?(clean)
61
+ raise NotAPostError, "Not a Reddit post URL: #{url}"
62
+ end
63
+
64
+ data = RedditClient.new.fetch_post(clean)
65
+
66
+ post_info = data.dig(0, "data", "children")
67
+ raise InvalidResponseError, "No post data found in response" if post_info.nil? || post_info.empty?
68
+
69
+ post_data = post_info[0].fetch("data", {})
70
+ replies_data = include_comments ? (data.dig(1, "data", "children") || []) : []
71
+
72
+ PostRenderer.render(post_data, replies_data, filters: filters)
73
+ end
74
+ end
@@ -0,0 +1,19 @@
1
+ require_relative "lib/reddit_post_to_markdown/version"
2
+
3
+ Gem::Specification.new do |spec|
4
+ spec.name = "reddit_post_to_markdown"
5
+ spec.version = RedditPostToMarkdown::VERSION
6
+ spec.authors = ["masukomi"]
7
+ spec.summary = "Download a public Reddit post and convert it to Markdown"
8
+ spec.description = "Takes the URL of a public Reddit post, downloads the post and its comments via the Reddit JSON API, and returns the content as a Markdown string."
9
+ spec.license = "MIT"
10
+
11
+ spec.required_ruby_version = ">= 2.7"
12
+
13
+ spec.files = Dir["lib/**/*.rb", "reddit_post_to_markdown.gemspec"]
14
+
15
+ spec.add_dependency "httparty", "~> 0.22"
16
+
17
+ spec.add_development_dependency "rspec", "~> 3.13"
18
+ spec.add_development_dependency "webmock", "~> 3.26"
19
+ end
metadata ADDED
@@ -0,0 +1,87 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: reddit_post_to_markdown
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.1.0
5
+ platform: ruby
6
+ authors:
7
+ - masukomi
8
+ bindir: bin
9
+ cert_chain: []
10
+ date: 1980-01-02 00:00:00.000000000 Z
11
+ dependencies:
12
+ - !ruby/object:Gem::Dependency
13
+ name: httparty
14
+ requirement: !ruby/object:Gem::Requirement
15
+ requirements:
16
+ - - "~>"
17
+ - !ruby/object:Gem::Version
18
+ version: '0.22'
19
+ type: :runtime
20
+ prerelease: false
21
+ version_requirements: !ruby/object:Gem::Requirement
22
+ requirements:
23
+ - - "~>"
24
+ - !ruby/object:Gem::Version
25
+ version: '0.22'
26
+ - !ruby/object:Gem::Dependency
27
+ name: rspec
28
+ requirement: !ruby/object:Gem::Requirement
29
+ requirements:
30
+ - - "~>"
31
+ - !ruby/object:Gem::Version
32
+ version: '3.13'
33
+ type: :development
34
+ prerelease: false
35
+ version_requirements: !ruby/object:Gem::Requirement
36
+ requirements:
37
+ - - "~>"
38
+ - !ruby/object:Gem::Version
39
+ version: '3.13'
40
+ - !ruby/object:Gem::Dependency
41
+ name: webmock
42
+ requirement: !ruby/object:Gem::Requirement
43
+ requirements:
44
+ - - "~>"
45
+ - !ruby/object:Gem::Version
46
+ version: '3.26'
47
+ type: :development
48
+ prerelease: false
49
+ version_requirements: !ruby/object:Gem::Requirement
50
+ requirements:
51
+ - - "~>"
52
+ - !ruby/object:Gem::Version
53
+ version: '3.26'
54
+ description: Takes the URL of a public Reddit post, downloads the post and its comments
55
+ via the Reddit JSON API, and returns the content as a Markdown string.
56
+ executables: []
57
+ extensions: []
58
+ extra_rdoc_files: []
59
+ files:
60
+ - lib/reddit_post_to_markdown.rb
61
+ - lib/reddit_post_to_markdown/errors.rb
62
+ - lib/reddit_post_to_markdown/post_renderer.rb
63
+ - lib/reddit_post_to_markdown/reddit_client.rb
64
+ - lib/reddit_post_to_markdown/url_validator.rb
65
+ - lib/reddit_post_to_markdown/version.rb
66
+ - reddit_post_to_markdown.gemspec
67
+ licenses:
68
+ - MIT
69
+ metadata: {}
70
+ rdoc_options: []
71
+ require_paths:
72
+ - lib
73
+ required_ruby_version: !ruby/object:Gem::Requirement
74
+ requirements:
75
+ - - ">="
76
+ - !ruby/object:Gem::Version
77
+ version: '2.7'
78
+ required_rubygems_version: !ruby/object:Gem::Requirement
79
+ requirements:
80
+ - - ">="
81
+ - !ruby/object:Gem::Version
82
+ version: '0'
83
+ requirements: []
84
+ rubygems_version: 4.0.7
85
+ specification_version: 4
86
+ summary: Download a public Reddit post and convert it to Markdown
87
+ test_files: []