creddit 0.0.1__py3-none-any.whl → 0.0.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
creddit/__init__.py CHANGED
@@ -1,5 +1,5 @@
1
- """
2
- creddit - A CLI client for Reddit
3
- """
4
-
5
- from .terminal import run as run
1
+ """
2
+ creddit - A CLI client for Reddit
3
+ """
4
+
5
+ from .terminal import run as run
creddit/__main__.py CHANGED
@@ -1,4 +1,4 @@
1
- from creddit.terminal import run
2
-
3
- if __name__ == "__main__":
4
- run()
1
+ from creddit.terminal import run
2
+
3
+ if __name__ == "__main__":
4
+ run()
creddit/config.py CHANGED
@@ -1,113 +1,113 @@
1
- """
2
- A file that stores all the variables that much be read from the env file. Helps me keep track of them at one place, instead of reading and using them where they are needed. This application relies on env for config.
3
-
4
- Config is store in a JSON file. TOML, YAML, and ini were options but
5
-
6
- TOML - built in library can't write. Don't want to package another module.
7
- YAML - Built in and ugly
8
- .ini - Ugly
9
-
10
- """
11
-
12
- from json import JSONDecodeError
13
- from json import dump as json_dump
14
- from json import load as json_load
15
- from os import getenv
16
- from pathlib import Path
17
- from sys import platform
18
- from typing import Literal, NotRequired, TypedDict
19
-
20
-
21
- class Config(TypedDict):
22
- ignored_users: list[str]
23
- no_of_posts_to_print: int
24
- ignore_all_mod_posts: bool
25
- default_subreddit: NotRequired[str]
26
-
27
-
28
- default_config: Config = {
29
- "ignored_users": ["2soccer2bot", "AutoModerator"],
30
- "no_of_posts_to_print": 10,
31
- "ignore_all_mod_posts": True,
32
- }
33
-
34
- config_folder: Path
35
-
36
- if platform == "win32":
37
- config_folder = Path(getenv("LOCALAPPDATA", "./config/"))
38
- elif platform == "darwin":
39
- config_folder = Path("~/Library/Application Support/")
40
- elif platform == "linux":
41
- config_folder = Path("~/.config/")
42
- else:
43
- config_folder = Path("./config/")
44
-
45
- config_folder = config_folder / "creddit"
46
- config_folder.mkdir(parents=True, exist_ok=True)
47
-
48
- # It's actually a cursed thing that you can divide paths.
49
- config_path: Path = config_folder / "config.json"
50
- """The path of the config"""
51
-
52
-
53
- def check_config_existence() -> bool:
54
- """Checks if a config file exists, and that it valid
55
-
56
- Returns:
57
- bool: True if file exists and contains all required keys
58
- """
59
- if not Path(config_path).exists():
60
- return False
61
-
62
- with open(config_path, "r") as f:
63
- try:
64
- config = json_load(f)
65
- except JSONDecodeError:
66
- return False
67
- if not (config or (default_config.keys() < config.keys())):
68
- return False
69
-
70
- return True
71
-
72
-
73
- def read_config(config_path: Path = config_path) -> Config:
74
- """Read the config file"""
75
-
76
- with open(config_path, "r") as f:
77
- config: Config = json_load(f)
78
-
79
- required_keys = default_config.keys()
80
-
81
- if required_keys <= config.keys():
82
- # Looks like all keys are present
83
- return config
84
-
85
- raise RuntimeError("Looks like some values in the config file are missing")
86
-
87
-
88
- def create_config(c: dict | Config = default_config) -> bool:
89
- required_keys = default_config.keys()
90
-
91
- if required_keys <= c.keys():
92
- # All the must have keys are present
93
- with open(config_path, "w+") as f:
94
- json_dump(c, f, indent=4, sort_keys=True)
95
- return True
96
-
97
- return False
98
-
99
-
100
- def edit_config(new_config: dict) -> bool:
101
- c = read_config()
102
- key: Literal[
103
- "ignored_users",
104
- "no_of_posts_to_print",
105
- "ignore_all_mod_posts",
106
- "default_subreddit",
107
- ]
108
- for key in new_config:
109
- c[key] = new_config[key]
110
- with open(config_path, "w") as f:
111
- json_dump(c, f, indent=4, sort_keys=True)
112
- return True
113
- return False
1
+ """
2
+ A file that stores all the variables that much be read from the env file. Helps me keep track of them at one place, instead of reading and using them where they are needed. This application relies on env for config.
3
+
4
+ Config is store in a JSON file. TOML, YAML, and ini were options but
5
+
6
+ TOML - built in library can't write. Don't want to package another module.
7
+ YAML - Built in and ugly
8
+ .ini - Ugly
9
+
10
+ """
11
+
12
+ from json import JSONDecodeError
13
+ from json import dump as json_dump
14
+ from json import load as json_load
15
+ from os import getenv
16
+ from pathlib import Path
17
+ from sys import platform
18
+ from typing import Literal, NotRequired, TypedDict
19
+
20
+
21
+ class Config(TypedDict):
22
+ ignored_users: list[str]
23
+ no_of_posts_to_print: int
24
+ ignore_all_mod_posts: bool
25
+ default_subreddit: NotRequired[str]
26
+
27
+
28
+ default_config: Config = {
29
+ "ignored_users": ["2soccer2bot", "AutoModerator"],
30
+ "no_of_posts_to_print": 10,
31
+ "ignore_all_mod_posts": True,
32
+ }
33
+
34
+ config_folder: Path
35
+
36
+ if platform == "win32":
37
+ config_folder = Path(getenv("LOCALAPPDATA", "./config/"))
38
+ elif platform == "darwin":
39
+ config_folder = Path("~/Library/Application Support/")
40
+ elif platform == "linux":
41
+ config_folder = Path("~/.config/")
42
+ else:
43
+ config_folder = Path("./config/")
44
+
45
+ config_folder = config_folder / "creddit"
46
+ config_folder.mkdir(parents=True, exist_ok=True)
47
+
48
+ # It's actually a cursed thing that you can divide paths.
49
+ config_path: Path = config_folder / "config.json"
50
+ """The path of the config"""
51
+
52
+
53
+ def check_config_existence() -> bool:
54
+ """Checks if a config file exists, and that it valid
55
+
56
+ Returns:
57
+ bool: True if file exists and contains all required keys
58
+ """
59
+ if not Path(config_path).exists():
60
+ return False
61
+
62
+ with open(config_path, "r") as f:
63
+ try:
64
+ config = json_load(f)
65
+ except JSONDecodeError:
66
+ return False
67
+ if not (config or (default_config.keys() < config.keys())):
68
+ return False
69
+
70
+ return True
71
+
72
+
73
+ def read_config(config_path: Path = config_path) -> Config:
74
+ """Read the config file"""
75
+
76
+ with open(config_path, "r") as f:
77
+ config: Config = json_load(f)
78
+
79
+ required_keys = default_config.keys()
80
+
81
+ if required_keys <= config.keys():
82
+ # Looks like all keys are present
83
+ return config
84
+
85
+ raise RuntimeError("Looks like some values in the config file are missing")
86
+
87
+
88
+ def create_config(c: dict | Config = default_config) -> bool:
89
+ required_keys = default_config.keys()
90
+
91
+ if required_keys <= c.keys():
92
+ # All the must have keys are present
93
+ with open(config_path, "w+") as f:
94
+ json_dump(c, f, indent=4, sort_keys=True)
95
+ return True
96
+
97
+ return False
98
+
99
+
100
+ def edit_config(new_config: dict) -> bool:
101
+ c = read_config()
102
+ key: Literal[
103
+ "ignored_users",
104
+ "no_of_posts_to_print",
105
+ "ignore_all_mod_posts",
106
+ "default_subreddit",
107
+ ]
108
+ for key in new_config:
109
+ c[key] = new_config[key]
110
+ with open(config_path, "w") as f:
111
+ json_dump(c, f, indent=4, sort_keys=True)
112
+ return True
113
+ return False
creddit/reddit.py CHANGED
@@ -1,123 +1,123 @@
1
- """
2
- Manage API connections with and responses from Reddit
3
- """
4
-
5
- from functools import lru_cache
6
-
7
- from requests import get as r_get
8
- from requests.exceptions import ConnectionError
9
-
10
-
11
- @lru_cache(maxsize=None)
12
- def get_api_response(url: str) -> dict:
13
- """
14
- Returns the json-encoded content of the api response, by requesting the given URL using the built-in requests library.
15
- Parameters:
16
- url (str):URL to load using requests library
17
-
18
- Returns:
19
- requests.Response
20
- """
21
-
22
- # Caching the resposes for every URL, since I might be calling it again and again for posts/comments, etc, and the response doesn't really change in a few minutes. When the application is run next (after a few hours, days, weeks), it would be running as a fresh instance and the response would be fetched again.
23
-
24
- headers = {
25
- "User-Agent": "cli:reddit-cli:v1.0.0 (by /u/vishalnandagopal)"
26
- # "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:104.0) Gecko/20100101 Firefox/130.0"
27
- }
28
- try:
29
- api_response = r_get(url=url, headers=headers)
30
- if api_response.status_code != 200:
31
- raise RuntimeError(f"Error with API - \n{api_response}")
32
- return api_response.json()
33
- except ConnectionError:
34
- raise ConnectionError("You seem to be offline. Try connecting to a network.")
35
-
36
-
37
- def get_posts_in_a_subreddit(
38
- subreddit: str, limit: int = 10, after_post: str | None = None
39
- ) -> list[dict]:
40
- """
41
- Constructs the api_url to load the subreddit, and loads the api_response and returns it as a dict.
42
-
43
- Parameters:
44
- limit (int):number of posts that should be present in the json response.
45
-
46
- after_post (int):The posts that are after this id in the feed are loaded.
47
-
48
- For example if you give the ID of the 9th post (eg: yjbttz) and limit=10, the posts 10,11,12 and so on till 19 are loaded.
49
-
50
- Returns
51
- Dict: Subreddit response as key value pair of index and post_details.
52
- """
53
-
54
- subreddit_url = f"https://api.reddit.com/r/{subreddit}?limit={limit}"
55
-
56
- if after_post:
57
- subreddit_url += f"&after=t3_{after_post}"
58
-
59
- subreddit_response = get_api_response(subreddit_url)
60
- return subreddit_response["data"]["children"]
61
-
62
-
63
- def get_post_dict(post_id) -> dict:
64
- """
65
- Constructs the api_url to load the comments of a post, and loads the api_response and returns it as a tuple, with the first element being the post text, and the second element being a dict of the comments.
66
-
67
- Parameters:
68
- post_id (str):The ID of the post to load comments for. Eg: yjbttz.
69
-
70
- Returns:
71
- dict: The JSON response of the API, parsed as a dict
72
- """
73
-
74
- # LRU caching this function doesn't really help, since it only calls the API and returns it. The API function is already cached using the same lru_cache decorator.
75
- post_url = f"https://api.reddit.com/{post_id}"
76
- return get_api_response(post_url)
77
-
78
-
79
- def get_post_text(post_id: str) -> str:
80
- """
81
- Fetches the text of the post, if any
82
-
83
- Parameters:
84
- post_id (str): The post ID
85
-
86
- Returns:
87
- str: The text of the post
88
- """
89
- try:
90
- return get_post_dict(post_id)[0]["data"]["children"][0]["data"]["selftext"]
91
- except KeyError:
92
- return ""
93
-
94
-
95
- def get_comments_dict(post_id: str) -> dict[int, dict]:
96
- """
97
- Fetches the dict for the post and returns only a tuple of the text in the post (if any), and the a dict of the comments. the api_url to load the comments of a post, and loads the api_response and returns it as a tuple, with the first element being the post text, and the second element being a dict of the comments.
98
-
99
- Parameters:
100
- post_id (str):The ID of the post to load comments for. Eg: yjbttz.
101
-
102
- Returns:
103
- dict: The comments dict. Key value pair of int and each comment's dict
104
- """
105
- post_comments_response = get_post_dict(post_id)
106
- return post_comments_response[1]["data"]["children"]
107
-
108
-
109
- def get_link_in_post(post_id: str) -> str:
110
- """
111
- Fetches the link in the post. If it is a reddit video, it sends the fallback URL so that it links directly to the video. Reddit redirects video links to the post page, which is unecessary
112
-
113
- Parameters:
114
- post_id (str): The ID of the post to fetch the posted link
115
-
116
- Returns:
117
- str: The link
118
- """
119
- _ = get_post_dict(post_id)[0]["data"]["children"][0]["data"]
120
- if _["is_video"]:
121
- return _["media"]["reddit_video"]["fallback_url"]
122
- else:
123
- return _["url"]
1
+ """
2
+ Manage API connections with and responses from Reddit
3
+ """
4
+
5
+ from functools import lru_cache
6
+
7
+ from requests import get as r_get
8
+ from requests.exceptions import ConnectionError
9
+
10
+
11
+ @lru_cache(maxsize=None)
12
+ def get_api_response(url: str) -> dict:
13
+ """
14
+ Returns the json-encoded content of the api response, by requesting the given URL using the built-in requests library.
15
+ Parameters:
16
+ url (str):URL to load using requests library
17
+
18
+ Returns:
19
+ requests.Response
20
+ """
21
+
22
+ # Caching the resposes for every URL, since I might be calling it again and again for posts/comments, etc, and the response doesn't really change in a few minutes. When the application is run next (after a few hours, days, weeks), it would be running as a fresh instance and the response would be fetched again.
23
+
24
+ headers = {
25
+ "User-Agent": "cli:reddit-cli:v1.0.0 (by /u/vishalnandagopal)"
26
+ # "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:104.0) Gecko/20100101 Firefox/130.0"
27
+ }
28
+ try:
29
+ api_response = r_get(url=url, headers=headers)
30
+ if api_response.status_code != 200:
31
+ raise RuntimeError(f"Error with API - \n{api_response}")
32
+ return api_response.json()
33
+ except ConnectionError:
34
+ raise ConnectionError("You seem to be offline. Try connecting to a network.")
35
+
36
+
37
+ def get_posts_in_a_subreddit(
38
+ subreddit: str, limit: int = 10, after_post: str | None = None
39
+ ) -> list[dict]:
40
+ """
41
+ Constructs the api_url to load the subreddit, and loads the api_response and returns it as a dict.
42
+
43
+ Parameters:
44
+ limit (int):number of posts that should be present in the json response.
45
+
46
+ after_post (int):The posts that are after this id in the feed are loaded.
47
+
48
+ For example if you give the ID of the 9th post (eg: yjbttz) and limit=10, the posts 10,11,12 and so on till 19 are loaded.
49
+
50
+ Returns
51
+ Dict: Subreddit response as key value pair of index and post_details.
52
+ """
53
+
54
+ subreddit_url = f"https://api.reddit.com/r/{subreddit}?limit={limit}"
55
+
56
+ if after_post:
57
+ subreddit_url += f"&after=t3_{after_post}"
58
+
59
+ subreddit_response = get_api_response(subreddit_url)
60
+ return subreddit_response["data"]["children"]
61
+
62
+
63
+ def get_post_dict(post_id) -> dict:
64
+ """
65
+ Constructs the api_url to load the comments of a post, and loads the api_response and returns it as a tuple, with the first element being the post text, and the second element being a dict of the comments.
66
+
67
+ Parameters:
68
+ post_id (str):The ID of the post to load comments for. Eg: yjbttz.
69
+
70
+ Returns:
71
+ dict: The JSON response of the API, parsed as a dict
72
+ """
73
+
74
+ # LRU caching this function doesn't really help, since it only calls the API and returns it. The API function is already cached using the same lru_cache decorator.
75
+ post_url = f"https://api.reddit.com/{post_id}"
76
+ return get_api_response(post_url)
77
+
78
+
79
+ def get_post_text(post_id: str) -> str:
80
+ """
81
+ Fetches the text of the post, if any
82
+
83
+ Parameters:
84
+ post_id (str): The post ID
85
+
86
+ Returns:
87
+ str: The text of the post
88
+ """
89
+ try:
90
+ return get_post_dict(post_id)[0]["data"]["children"][0]["data"]["selftext"]
91
+ except KeyError:
92
+ return ""
93
+
94
+
95
+ def get_comments_dict(post_id: str) -> dict[int, dict]:
96
+ """
97
+ Fetches the dict for the post and returns only a tuple of the text in the post (if any), and the a dict of the comments. the api_url to load the comments of a post, and loads the api_response and returns it as a tuple, with the first element being the post text, and the second element being a dict of the comments.
98
+
99
+ Parameters:
100
+ post_id (str):The ID of the post to load comments for. Eg: yjbttz.
101
+
102
+ Returns:
103
+ dict: The comments dict. Key value pair of int and each comment's dict
104
+ """
105
+ post_comments_response = get_post_dict(post_id)
106
+ return post_comments_response[1]["data"]["children"]
107
+
108
+
109
+ def get_link_in_post(post_id: str) -> str:
110
+ """
111
+ Fetches the link in the post. If it is a reddit video, it sends the fallback URL so that it links directly to the video. Reddit redirects video links to the post page, which is unecessary
112
+
113
+ Parameters:
114
+ post_id (str): The ID of the post to fetch the posted link
115
+
116
+ Returns:
117
+ str: The link
118
+ """
119
+ _ = get_post_dict(post_id)[0]["data"]["children"][0]["data"]
120
+ if _["is_video"]:
121
+ return _["media"]["reddit_video"]["fallback_url"]
122
+ else:
123
+ return _["url"]