instapaper-scraper 1.1.0rc1__py3-none-any.whl → 1.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
instapaper_scraper/api.py CHANGED
@@ -1,10 +1,11 @@
1
1
  import os
2
2
  import logging
3
3
  import time
4
- from typing import List, Dict, Tuple, Optional
4
+ from typing import List, Dict, Tuple, Optional, Any
5
5
 
6
6
  import requests
7
7
  from bs4 import BeautifulSoup
8
+ from bs4.element import Tag
8
9
 
9
10
  from .exceptions import ScraperStructureChanged
10
11
  from .constants import INSTAPAPER_BASE_URL, KEY_ID, KEY_TITLE, KEY_URL
@@ -123,14 +124,28 @@ class InstapaperClient:
123
124
  soup = BeautifulSoup(response.text, self.HTML_PARSER)
124
125
 
125
126
  article_list = soup.find(id=self.ARTICLE_LIST_ID)
126
- if not article_list:
127
+ if not isinstance(article_list, Tag):
127
128
  raise ScraperStructureChanged(self.MSG_ARTICLE_LIST_NOT_FOUND)
128
129
 
129
130
  articles = article_list.find_all(self.ARTICLE_TAG)
130
- article_ids = [
131
- article[KEY_ID].replace(self.ARTICLE_ID_PREFIX, "")
132
- for article in articles
133
- ]
131
+ article_ids = []
132
+ for article in articles:
133
+ if not isinstance(article, Tag):
134
+ continue
135
+ article_id_val = article.get(KEY_ID)
136
+
137
+ # Ensure article_id_val is a string before calling replace
138
+ # If it's a list, take the first element. This is a pragmatic
139
+ # approach since 'id' attributes should ideally be unique strings.
140
+ if isinstance(article_id_val, list):
141
+ article_id_val = article_id_val[0] if article_id_val else None
142
+
143
+ if isinstance(article_id_val, str) and article_id_val.startswith(
144
+ self.ARTICLE_ID_PREFIX
145
+ ):
146
+ article_ids.append(
147
+ article_id_val.replace(self.ARTICLE_ID_PREFIX, "")
148
+ )
134
149
 
135
150
  data = self._parse_article_data(soup, article_ids, page)
136
151
  has_more = soup.find(class_=self.PAGINATE_OLDER_CLASS) is not None
@@ -203,14 +218,14 @@ class InstapaperClient:
203
218
 
204
219
  def _parse_article_data(
205
220
  self, soup: BeautifulSoup, article_ids: List[str], page: int
206
- ) -> List[Dict[str, str]]:
221
+ ) -> List[Dict[str, Any]]:
207
222
  """Parses the raw HTML to extract structured data for each article."""
208
223
  data = []
209
224
  for article_id in article_ids:
210
225
  article_id_full = f"{self.ARTICLE_ID_PREFIX}{article_id}"
211
226
  article_element = soup.find(id=article_id_full)
212
227
  try:
213
- if not article_element:
228
+ if not isinstance(article_element, Tag):
214
229
  raise AttributeError(
215
230
  self.MSG_ARTICLE_ELEMENT_NOT_FOUND.format(
216
231
  article_id_full=article_id_full
@@ -218,14 +233,19 @@ class InstapaperClient:
218
233
  )
219
234
 
220
235
  title_element = article_element.find(class_=self.ARTICLE_TITLE_CLASS)
221
- if not title_element:
236
+ if not isinstance(title_element, Tag):
222
237
  raise AttributeError(self.MSG_TITLE_ELEMENT_NOT_FOUND)
223
238
  title = title_element.get_text().strip()
224
239
 
225
- link_element = article_element.find(class_=self.TITLE_META_CLASS).find(
226
- "a"
227
- )
228
- if not link_element or "href" not in link_element.attrs:
240
+ meta_element = article_element.find(class_=self.TITLE_META_CLASS)
241
+ if not isinstance(meta_element, Tag):
242
+ raise AttributeError(self.MSG_LINK_ELEMENT_NOT_FOUND)
243
+
244
+ link_element = meta_element.find("a")
245
+ if (
246
+ not isinstance(link_element, Tag)
247
+ or "href" not in link_element.attrs
248
+ ):
229
249
  raise AttributeError(self.MSG_LINK_ELEMENT_NOT_FOUND)
230
250
  link = link_element["href"]
231
251
 
@@ -281,7 +301,7 @@ class InstapaperClient:
281
301
  )
282
302
  return False
283
303
 
284
- def _wait_for_retry(self, attempt: int, reason: str):
304
+ def _wait_for_retry(self, attempt: int, reason: str) -> None:
285
305
  """Calculates and waits for an exponential backoff period."""
286
306
  sleep_time = self.backoff_factor * (2**attempt)
287
307
  logging.warning(
@@ -3,7 +3,7 @@ import getpass
3
3
  import logging
4
4
  import stat
5
5
  from pathlib import Path
6
- from typing import Union
6
+ from typing import Union, Optional
7
7
 
8
8
  from cryptography.fernet import Fernet
9
9
  import requests
@@ -67,11 +67,12 @@ class InstapaperAuthenticator:
67
67
  session: requests.Session,
68
68
  session_file: Union[str, Path],
69
69
  key_file: Union[str, Path],
70
- username: str = None,
71
- password: str = None,
70
+ username: Optional[str] = None,
71
+ password: Optional[str] = None,
72
72
  ):
73
73
  self.session = session
74
74
  self.session_file = Path(session_file)
75
+ self.key_file = Path(key_file)
75
76
  self.key = get_encryption_key(key_file)
76
77
  self.fernet = Fernet(self.key)
77
78
  self.username = username
@@ -175,7 +176,7 @@ class InstapaperAuthenticator:
175
176
  logging.error(self.LOG_LOGIN_FAILED)
176
177
  return False
177
178
 
178
- def _save_session(self):
179
+ def _save_session(self) -> None:
179
180
  """Saves the current session cookies to an encrypted file."""
180
181
  required_cookies = self.REQUIRED_COOKIES
181
182
  cookies_to_save = [
instapaper_scraper/cli.py CHANGED
@@ -3,7 +3,7 @@ import logging
3
3
  import argparse
4
4
  import requests
5
5
  from pathlib import Path
6
- from typing import Union
6
+ from typing import Union, List, Dict, Any, Optional, cast
7
7
 
8
8
  if sys.version_info >= (3, 11):
9
9
  import tomllib
@@ -39,7 +39,7 @@ def _resolve_path(
39
39
  return user_dir_filename
40
40
 
41
41
 
42
- def load_config(config_path_str: Union[str, None] = None) -> Union[dict, None]:
42
+ def load_config(config_path_str: Union[str, None] = None) -> Optional[Dict[str, Any]]:
43
43
  """
44
44
  Loads configuration from a TOML file.
45
45
  It checks the provided path, then config.toml in the project root,
@@ -50,7 +50,7 @@ def load_config(config_path_str: Union[str, None] = None) -> Union[dict, None]:
50
50
  CONFIG_DIR / CONFIG_FILENAME,
51
51
  ]
52
52
 
53
- paths_to_check = []
53
+ paths_to_check: List[Path] = []
54
54
  if config_path_str:
55
55
  paths_to_check.insert(0, Path(config_path_str).expanduser())
56
56
  paths_to_check.extend(default_paths)
@@ -60,7 +60,7 @@ def load_config(config_path_str: Union[str, None] = None) -> Union[dict, None]:
60
60
  try:
61
61
  with open(path, "rb") as f:
62
62
  logging.info(f"Loading configuration from {path}")
63
- return tomllib.load(f)
63
+ return cast(Dict[str, Any], tomllib.load(f))
64
64
  except tomllib.TOMLDecodeError as e:
65
65
  logging.error(f"Error decoding TOML file at {path}: {e}")
66
66
  return None
@@ -68,7 +68,7 @@ def load_config(config_path_str: Union[str, None] = None) -> Union[dict, None]:
68
68
  return None
69
69
 
70
70
 
71
- def main():
71
+ def main() -> None:
72
72
  """
73
73
  Main entry point for the Instapaper scraper CLI.
74
74
  """
@@ -144,7 +144,7 @@ def main():
144
144
  print(" 0: none (non-folder mode)")
145
145
  for i, folder in enumerate(folders):
146
146
  display_name = folder.get("key") or folder.get("slug") or folder.get("id")
147
- print(f" {i+1}: {display_name}")
147
+ print(f" {i + 1}: {display_name}")
148
148
 
149
149
  try:
150
150
  choice = int(input("Select a folder (enter a number): "))
@@ -1,9 +1,6 @@
1
1
  import os
2
- import json
3
- import sqlite3
4
2
  import logging
5
- import csv
6
- from typing import List, Dict, Any
3
+ from typing import List, Dict, Any, TYPE_CHECKING
7
4
 
8
5
  from .constants import INSTAPAPER_READ_URL, KEY_ID, KEY_TITLE, KEY_URL
9
6
 
@@ -19,6 +16,13 @@ LOG_NO_ARTICLES = "No articles found to save."
19
16
  LOG_SAVED_ARTICLES = "Saved {count} articles to {filename}"
20
17
  LOG_UNKNOWN_FORMAT = "Unknown output format: {format}"
21
18
 
19
+ if TYPE_CHECKING:
20
+ # Import for type-checking purposes, and use an alias
21
+ # to signal to linters like ruff that it is being used.
22
+ import sqlite3 as sqlite3
23
+
24
+ __all__ = ["sqlite3"]
25
+
22
26
 
23
27
  def get_sqlite_create_table_sql(add_instapaper_url: bool = False) -> str:
24
28
  """Returns the SQL statement to create the articles table."""
@@ -28,6 +32,8 @@ def get_sqlite_create_table_sql(add_instapaper_url: bool = False) -> str:
28
32
  f"{KEY_URL} TEXT NOT NULL",
29
33
  ]
30
34
  if add_instapaper_url:
35
+ import sqlite3
36
+
31
37
  # The GENERATED ALWAYS AS syntax was added in SQLite 3.31.0
32
38
  if sqlite3.sqlite_version_info >= (3, 31, 0):
33
39
  columns.append(
@@ -53,8 +59,10 @@ def get_sqlite_insert_sql(add_instapaper_url_manually: bool = False) -> str:
53
59
 
54
60
  def save_to_csv(
55
61
  data: List[Dict[str, Any]], filename: str, add_instapaper_url: bool = False
56
- ):
62
+ ) -> None:
57
63
  """Saves a list of articles to a CSV file."""
64
+ import csv
65
+
58
66
  os.makedirs(os.path.dirname(filename), exist_ok=True)
59
67
  with open(filename, "w", newline="", encoding="utf-8") as f:
60
68
  fieldnames = [KEY_ID, KEY_TITLE, KEY_URL]
@@ -69,8 +77,10 @@ def save_to_csv(
69
77
  logging.info(LOG_SAVED_ARTICLES.format(count=len(data), filename=filename))
70
78
 
71
79
 
72
- def save_to_json(data: List[Dict[str, Any]], filename: str):
80
+ def save_to_json(data: List[Dict[str, Any]], filename: str) -> None:
73
81
  """Saves a list of articles to a JSON file."""
82
+ import json
83
+
74
84
  os.makedirs(os.path.dirname(filename), exist_ok=True)
75
85
  with open(filename, "w", encoding="utf-8") as f:
76
86
  json.dump(data, f, indent=JSON_INDENT, ensure_ascii=False)
@@ -79,8 +89,10 @@ def save_to_json(data: List[Dict[str, Any]], filename: str):
79
89
 
80
90
  def save_to_sqlite(
81
91
  data: List[Dict[str, Any]], db_name: str, add_instapaper_url: bool = False
82
- ):
92
+ ) -> None:
83
93
  """Saves a list of articles to a SQLite database."""
94
+ import sqlite3
95
+
84
96
  os.makedirs(os.path.dirname(db_name), exist_ok=True)
85
97
  conn = sqlite3.connect(db_name)
86
98
  cursor = conn.cursor()
@@ -131,7 +143,7 @@ def save_articles(
131
143
  format: str,
132
144
  filename: str,
133
145
  add_instapaper_url: bool = False,
134
- ):
146
+ ) -> None:
135
147
  """
136
148
  Dispatches to the correct save function based on the format.
137
149
  """
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: instapaper-scraper
3
- Version: 1.1.0rc1
3
+ Version: 1.1.1
4
4
  Summary: A tool to scrape articles from Instapaper.
5
5
  Project-URL: Homepage, https://github.com/chriskyfung/InstapaperScraper
6
6
  Project-URL: Source, https://github.com/chriskyfung/InstapaperScraper
@@ -35,30 +35,54 @@ Requires-Dist: tomli~=2.0.1; python_version < "3.11"
35
35
  Provides-Extra: dev
36
36
  Requires-Dist: pytest; extra == "dev"
37
37
  Requires-Dist: pytest-cov; extra == "dev"
38
- Requires-Dist: black; extra == "dev"
39
38
  Requires-Dist: ruff; extra == "dev"
40
39
  Requires-Dist: types-requests; extra == "dev"
41
40
  Requires-Dist: types-beautifulsoup4; extra == "dev"
42
41
  Requires-Dist: requests-mock; extra == "dev"
43
42
  Requires-Dist: build; extra == "dev"
44
43
  Requires-Dist: twine; extra == "dev"
44
+ Requires-Dist: mypy; extra == "dev"
45
+ Requires-Dist: pre-commit; extra == "dev"
46
+ Requires-Dist: licensecheck; extra == "dev"
45
47
  Dynamic: license-file
46
48
 
47
49
  # Instapaper Scraper
48
50
 
49
- ![Python Version from PEP 621 TOML](https://img.shields.io/python/required-version-toml?tomlFilePath=https%3A%2F%2Fraw.githubusercontent.com%2Fchriskyfung%2FInstapaperScraper%2Frefs%2Fheads%2Fmaster%2Fpyproject.toml)
50
- [![CI](https://github.com/chriskyfung/InstapaperScraper/actions/workflows/ci.yml/badge.svg)](https://github.com/chriskyfung/InstapaperScraper/actions/workflows/ci.yml)
51
- [![PyPI version](https://img.shields.io/pypi/v/instapaper-scraper.svg)](https://pypi.org/project/instapaper-scraper/)
52
- [![PyPI Downloads](https://static.pepy.tech/personalized-badge/instapaper-scraper?period=total&left_text=downloads)](https://pepy.tech/projects/instapaper-scraper)
53
- [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
54
- [![Ruff](https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/astral-sh/ruff/main/assets/badge/v2.json)](https://github.com/astral-sh/ruff)
55
- [![GitHub License](https://img.shields.io/github/license/chriskyfung/InstapaperScraper)
56
- ](https://www.gnu.org/licenses/gpl-3.0.en.html)
57
- [![codecov](https://codecov.io/gh/chriskyfung/InstapaperScraper/graph/badge.svg)](https://codecov.io/gh/chriskyfung/InstapaperScraper)
58
-
59
- A Python tool to scrape all your saved Instapaper bookmarks and export them to various formats.
60
-
61
- ## Features
51
+ <!-- Badges -->
52
+ <p align="center">
53
+ <a href="https://pypi.org/project/instapaper-scraper/">
54
+ <img src="https://img.shields.io/pypi/v/instapaper-scraper.svg" alt="PyPI version">
55
+ </a>
56
+ <a href="https://pepy.tech/projects/instapaper-scraper">
57
+ <img src="https://static.pepy.tech/personalized-badge/instapaper-scraper?period=total&left_text=downloads" alt="PyPI Downloads">
58
+ </a>
59
+ <a href="https://github.com/chriskyfung/InstapaperScraper">
60
+ <img src="https://img.shields.io/python/required-version-toml?tomlFilePath=https%3A%2F%2Fraw.githubusercontent.com%2Fchriskyfung%2FInstapaperScraper%2Frefs%2Fheads%2Fmaster%2Fpyproject.toml" alt="Python Version from PEP 621 TOML">
61
+ </a>
62
+ <a href="https://github.com/astral-sh/ruff">
63
+ <img src="https://img.shields.io/endpoint?url=https%3A%2F%2Fraw.githubusercontent.com%2Fastral-sh%2Fruff%2Fmain%2Fassets%2Fbadge%2Fv2.json" alt="Ruff">
64
+ </a>
65
+ <a href="https://codecov.io/gh/chriskyfung/InstapaperScraper">
66
+ <img src="https://codecov.io/gh/chriskyfung/InstapaperScraper/graph/badge.svg" alt="Code Coverage">
67
+ </a>
68
+ <wbr />
69
+ <a href="https://github.com/chriskyfung/InstapaperScraper/actions/workflows/ci.yml">
70
+ <img src="https://github.com/chriskyfung/InstapaperScraper/actions/workflows/ci.yml/badge.svg" alt="CI Status">
71
+ </a>
72
+ <a href="https://www.gnu.org/licenses/gpl-3.0.en.html">
73
+ <img src="https://img.shields.io/github/license/chriskyfung/InstapaperScraper" alt="GitHub License">
74
+ </a>
75
+ <a href="https://github.com/sponsors/chriskyfung" title="Sponsor on GitHub">
76
+ <img src="https://img.shields.io/badge/Sponsor-GitHub-blue?logo=github-sponsors&colorA=263238&colorB=EC407A" alt="GitHub Sponsors Default">
77
+ </a>
78
+ <a href="https://www.buymeacoffee.com/chriskyfung" title="Support Coffee">
79
+ <img src="https://img.shields.io/badge/Support-Coffee-ffdd00?logo=buy-me-a-coffee&logoColor=ffdd00&colorA=263238" alt="Buy Me A Coffee">
80
+ </a>
81
+ </p>
82
+
83
+ A powerful and reliable Python tool to automate the export of all your saved Instapaper bookmarks into various formats, giving you full ownership of your data.
84
+
85
+ ## ✨ Features
62
86
 
63
87
  - Scrapes all bookmarks from your Instapaper account.
64
88
  - Supports scraping from specific folders.
@@ -66,13 +90,13 @@ A Python tool to scrape all your saved Instapaper bookmarks and export them to v
66
90
  - Securely stores your session for future runs.
67
91
  - Modern, modular, and tested architecture.
68
92
 
69
- ## Getting Started
93
+ ## 🚀 Getting Started
70
94
 
71
- ### 1. Requirements
95
+ ### 📋 1. Requirements
72
96
 
73
97
  - Python 3.9+
74
98
 
75
- ### 2. Installation
99
+ ### 📦 2. Installation
76
100
 
77
101
  This package is available on PyPI and can be installed with pip:
78
102
 
@@ -80,7 +104,7 @@ This package is available on PyPI and can be installed with pip:
80
104
  pip install instapaper-scraper
81
105
  ```
82
106
 
83
- ### 3. Usage
107
+ ### 💻 3. Usage
84
108
 
85
109
  Run the tool from the command line, specifying your desired output format:
86
110
 
@@ -95,35 +119,35 @@ instapaper-scraper --format json
95
119
  instapaper-scraper --format sqlite --output my_articles.db
96
120
  ```
97
121
 
98
- ## Configuration
122
+ ## ⚙️ Configuration
99
123
 
100
- ### Authentication
124
+ ### 🔐 Authentication
101
125
 
102
126
  The script authenticates using one of the following methods, in order of priority:
103
127
 
104
- 1. **Command-line Arguments**: Provide your username and password directly when running the script:
128
+ 1. **Command-line Arguments**: Provide your username and password directly when running the script:
105
129
 
106
130
  ```sh
107
131
  instapaper-scraper --username your_username --password your_password
108
132
  ```
109
133
 
110
- 2. **Session Files (`.session_key`, `.instapaper_session`)**: The script attempts to load these files in the following order:
134
+ 2. **Session Files (`.session_key`, `.instapaper_session`)**: The script attempts to load these files in the following order:
111
135
  a. Path specified by `--session-file` or `--key-file` arguments.
112
136
  b. Files in the current working directory (e.g., `./.session_key`).
113
137
  c. Files in the user's configuration directory (`~/.config/instapaper-scraper/`).
114
138
  After the first successful login, the script creates an encrypted `.instapaper_session` file and a `.session_key` file to reuse your session securely.
115
139
 
116
- 3. **Interactive Prompt**: If no other method is available, the script will prompt you for your username and password.
140
+ 3. **Interactive Prompt**: If no other method is available, the script will prompt you for your username and password.
117
141
 
118
142
  > **Note on Security:** Your session file (`.instapaper_session`) and the encryption key (`.session_key`) are stored with secure permissions (read/write for the owner only) to protect your credentials.
119
143
 
120
- ### Folder Configuration
144
+ ### 📁 Folder Configuration
121
145
 
122
146
  You can define and quickly access your Instapaper folders using a `config.toml` file. The scraper will look for this file in the following locations (in order of precedence):
123
147
 
124
- 1. The path specified by the `--config-path` argument.
125
- 2. `config.toml` in the current working directory.
126
- 3. `~/.config/instapaper-scraper/config.toml`
148
+ 1. The path specified by the `--config-path` argument.
149
+ 2. `config.toml` in the current working directory.
150
+ 3. `~/.config/instapaper-scraper/config.toml`
127
151
 
128
152
  Here is an example of `config.toml`:
129
153
 
@@ -152,7 +176,7 @@ output_filename = "python-articles.db"
152
176
 
153
177
  When a `config.toml` file is present and no `--folder` argument is provided, the scraper will prompt you to select a folder. You can also specify a folder directly using the `--folder` argument with its key, ID, or slug. Use `--folder=none` to explicitly disable folder mode and scrape all articles.
154
178
 
155
- ### Command-line Arguments
179
+ ### 💻 Command-line Arguments
156
180
 
157
181
  | Argument | Description |
158
182
  | --- | --- |
@@ -164,7 +188,7 @@ When a `config.toml` file is present and no `--folder` argument is provided, the
164
188
  | `--password <pass>` | Your Instapaper account password. |
165
189
  | `--add-instapaper-url` | Adds a `instapaper_url` column to the output, containing a full, clickable URL for each article. |
166
190
 
167
- ### Output Formats
191
+ ### 📄 Output Formats
168
192
 
169
193
  You can control the output format using the `--format` argument. The supported formats are:
170
194
 
@@ -176,7 +200,7 @@ If the `--format` flag is omitted, the script will default to `csv`.
176
200
 
177
201
  When using `--output <filename>`, the file extension is automatically corrected to match the chosen format. For example, `instapaper-scraper --format json --output my_articles.txt` will create `my_articles.json`.
178
202
 
179
- #### Opening Articles in Instapaper
203
+ #### 📖 Opening Articles in Instapaper
180
204
 
181
205
  The output data includes a unique `id` for each article. You can use this ID to construct a URL to the article's reader view: `https://www.instapaper.com/read/<article_id>`.
182
206
 
@@ -188,7 +212,7 @@ instapaper-scraper --add-instapaper-url
188
212
 
189
213
  This adds a `instapaper_url` field to each article in the JSON output and a `instapaper_url` column in the CSV and SQLite outputs. The original `id` field is preserved.
190
214
 
191
- ## How It Works
215
+ ## 🛠️ How It Works
192
216
 
193
217
  The tool is designed with a modular architecture for reliability and maintainability.
194
218
 
@@ -197,9 +221,9 @@ The tool is designed with a modular architecture for reliability and maintainabi
197
221
  3. **Data Collection**: All fetched articles are aggregated into a single list.
198
222
  4. **Export**: Finally, the collected data is written to a file in your chosen format (`.csv`, `.json`, or `.db`).
199
223
 
200
- ## Example Output
224
+ ## 📊 Example Output
201
225
 
202
- ### CSV (`output/bookmarks.csv`) (with --add-instapaper-url)
226
+ ### 📄 CSV (`output/bookmarks.csv`) (with --add-instapaper-url)
203
227
 
204
228
  ```csv
205
229
  "id","instapaper_url","title","url"
@@ -207,7 +231,7 @@ The tool is designed with a modular architecture for reliability and maintainabi
207
231
  "999002345","https://www.instapaper.com/read/999002345","Article 2","https://www.example.com/page-2/"
208
232
  ```
209
233
 
210
- ### JSON (`output/bookmarks.json`) (with --add-instapaper-url)
234
+ ### 📄 JSON (`output/bookmarks.json`) (with --add-instapaper-url)
211
235
 
212
236
  ```json
213
237
  [
@@ -226,15 +250,33 @@ The tool is designed with a modular architecture for reliability and maintainabi
226
250
  ]
227
251
  ```
228
252
 
229
- ### SQLite (`output/bookmarks.db`)
253
+ ### 🗄️ SQLite (`output/bookmarks.db`)
230
254
 
231
255
  A SQLite database file is created with an `articles` table. The table includes `id`, `title`, and `url` columns. If the `--add-instapaper-url` flag is used, a `instapaper_url` column is also included. This feature is fully backward-compatible and will automatically adapt to the user's installed SQLite version, using an efficient generated column on modern versions (3.31.0+) and a fallback for older versions.
232
256
 
233
- ## Development & Testing
257
+ ## 🤗 Support and Community
258
+
259
+ - **🐛 Bug Reports:** For any bugs or unexpected behavior, please [open an issue on GitHub](https://github.com/chriskyfung/InstapaperScraper/issues).
260
+ - **💬 Questions & General Discussion:** For questions, feature requests, or general discussion, please use our [GitHub Discussions](https://github.com/chriskyfung/InstapaperScraper/discussions).
261
+
262
+ ## 🙏 Support the Project
263
+
264
+ `Instapaper Scraper` is a free and open-source project that requires significant time and effort to maintain and improve. If you find this tool useful, please consider supporting its development. Your contribution helps ensure the project stays healthy, active, and continuously updated.
265
+
266
+ - **[Sponsor on GitHub](https://github.com/sponsors/chriskyfung):** The best way to support the project with recurring monthly donations. Tiers with special rewards like priority support are available!
267
+ - **[Buy Me a Coffee](https://www.buymeacoffee.com/chriskyfung):** Perfect for a one-time thank you.
268
+
269
+ ## 🤝 Contributing
234
270
 
235
- This project uses `pytest` for testing, `black` for code formatting, and `ruff` for linting.
271
+ Contributions are welcome! Whether it's a bug fix, a new feature, or documentation improvements, please feel free to open a pull request.
236
272
 
237
- ### Setup
273
+ Please read the **[Contribution Guidelines](CONTRIBUTING.md)** before you start.
274
+
275
+ ## 🧑‍💻 Development & Testing
276
+
277
+ This project uses `pytest` for testing, `ruff` for code formatting and linting, and `mypy` for static type checking.
278
+
279
+ ### 🔧 Setup
238
280
 
239
281
  To install the development dependencies:
240
282
 
@@ -242,7 +284,13 @@ To install the development dependencies:
242
284
  pip install -e .[dev]
243
285
  ```
244
286
 
245
- ### Running the Scraper
287
+ To set up the pre-commit hooks:
288
+
289
+ ```sh
290
+ pre-commit install
291
+ ```
292
+
293
+ ### ▶️ Running the Scraper
246
294
 
247
295
  To run the scraper directly without installing the package:
248
296
 
@@ -250,7 +298,7 @@ To run the scraper directly without installing the package:
250
298
  python -m src.instapaper_scraper.cli
251
299
  ```
252
300
 
253
- ### Testing
301
+ ### Testing
254
302
 
255
303
  To run the tests, execute the following command from the project root:
256
304
 
@@ -264,12 +312,12 @@ To check test coverage:
264
312
  pytest --cov=src/instapaper_scraper --cov-report=term-missing
265
313
  ```
266
314
 
267
- ### Code Quality
315
+ ### Code Quality
268
316
 
269
- To format the code with `black`:
317
+ To format the code with `ruff`:
270
318
 
271
319
  ```sh
272
- black .
320
+ ruff format .
273
321
  ```
274
322
 
275
323
  To check for linting errors with `ruff`:
@@ -284,10 +332,29 @@ To automatically fix linting errors:
284
332
  ruff check . --fix
285
333
  ```
286
334
 
287
- ## Disclaimer
335
+ To run static type checking with `mypy`:
336
+
337
+ ```sh
338
+ mypy src
339
+ ```
340
+
341
+ To run license checks:
342
+
343
+ ```sh
344
+ licensecheck --show-only-failing
345
+ ```
346
+
347
+
348
+ ## 📜 Disclaimer
288
349
 
289
350
  This script requires valid Instapaper credentials. Use it responsibly and in accordance with Instapaper’s Terms of Service.
290
351
 
291
- ## License
352
+ ## 📄 License
353
+
354
+ This project is licensed under the terms of the **GNU General Public License v3.0**. See the [LICENSE](LICENSE) file for the full license text.
355
+
356
+ ## Contributors
357
+
358
+ [![Contributors](https://contrib.rocks/image?repo=chriskyfung/InstapaperScraper)](https://github.com/chriskyfung/InstapaperScraper/graphs/contributors)
292
359
 
293
- This project is licensed under the terms of the GNU General Public License v3.0. See the [LICENSE](LICENSE) file for the full license text.
360
+ Made with [contrib.rocks](https://contrib.rocks).
@@ -0,0 +1,13 @@
1
+ instapaper_scraper/__init__.py,sha256=qdcT3tp4KLufWH1u6tOuPVUQaXwakQD0gdjkwY4ljfg,206
2
+ instapaper_scraper/api.py,sha256=67ZeiVjsZpGspB8S3ni8FS6LBAOHXBc_oz3vEDWDNms,12672
3
+ instapaper_scraper/auth.py,sha256=OpgjbdI697FitumiyznWjey5-R2ZuxAEATaMz9NNnTc,7092
4
+ instapaper_scraper/cli.py,sha256=YL9c7kksmj5iGKRvVqG0KO4rBbhTg5c9Lgvsf_brRPA,7579
5
+ instapaper_scraper/constants.py,sha256=ubFWa47985lIz58qokMC0xQzTmCB6NOa17KFgWLn65E,403
6
+ instapaper_scraper/exceptions.py,sha256=CptHoZe4NOhdjOoyXkZEMFgQC6oKtzjRljywwDEtsTg,134
7
+ instapaper_scraper/output.py,sha256=cadyUOaGQ5Ct5iLiEkHDvN2cqYc1WmJTvAa7OxFjg0w,5618
8
+ instapaper_scraper-1.1.1.dist-info/licenses/LICENSE,sha256=IwGE9guuL-ryRPEKi6wFPI_zOhg7zDZbTYuHbSt_SAk,35823
9
+ instapaper_scraper-1.1.1.dist-info/METADATA,sha256=CDiUTjY5eu1OTlFhhBNA1irP6gTNTLw6Ra-RIbkJeKY,14320
10
+ instapaper_scraper-1.1.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
11
+ instapaper_scraper-1.1.1.dist-info/entry_points.txt,sha256=7AvRgN5fvtas_Duxdz-JPbDN6A1Lq2GaTfTSv54afxA,67
12
+ instapaper_scraper-1.1.1.dist-info/top_level.txt,sha256=kiU9nLkqPOVPLsP4QMHuBFjAmoIKfftYmGV05daLrcc,19
13
+ instapaper_scraper-1.1.1.dist-info/RECORD,,
@@ -1,13 +0,0 @@
1
- instapaper_scraper/__init__.py,sha256=qdcT3tp4KLufWH1u6tOuPVUQaXwakQD0gdjkwY4ljfg,206
2
- instapaper_scraper/api.py,sha256=-Dq5fOAGSGopb-qonIbETd9ZlxWdULKRgl1DCOuVemY,11618
3
- instapaper_scraper/auth.py,sha256=VTBE9KhGGJm0KbMT5DCTMCbh-N3HiJuJ9wMDb8CyZT4,7015
4
- instapaper_scraper/cli.py,sha256=wsQxTVFIyJq3EQiAtz7dCjg1vI2_Y9quZv4ifuEPDU8,7495
5
- instapaper_scraper/constants.py,sha256=ubFWa47985lIz58qokMC0xQzTmCB6NOa17KFgWLn65E,403
6
- instapaper_scraper/exceptions.py,sha256=CptHoZe4NOhdjOoyXkZEMFgQC6oKtzjRljywwDEtsTg,134
7
- instapaper_scraper/output.py,sha256=lxJgW71-m1YuMYJHeK6nu479pk_3bQGc0axzNCvxtZQ,5338
8
- instapaper_scraper-1.1.0rc1.dist-info/licenses/LICENSE,sha256=IwGE9guuL-ryRPEKi6wFPI_zOhg7zDZbTYuHbSt_SAk,35823
9
- instapaper_scraper-1.1.0rc1.dist-info/METADATA,sha256=O-VJZg1yN3cuPRfBCevmD9_IrOR07NGpzrgZXI2-6hk,11637
10
- instapaper_scraper-1.1.0rc1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
11
- instapaper_scraper-1.1.0rc1.dist-info/entry_points.txt,sha256=7AvRgN5fvtas_Duxdz-JPbDN6A1Lq2GaTfTSv54afxA,67
12
- instapaper_scraper-1.1.0rc1.dist-info/top_level.txt,sha256=kiU9nLkqPOVPLsP4QMHuBFjAmoIKfftYmGV05daLrcc,19
13
- instapaper_scraper-1.1.0rc1.dist-info/RECORD,,