tumblrbot 1.1.5__tar.gz → 1.2.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {tumblrbot-1.1.5 → tumblrbot-1.2.1}/.gitignore +6 -6
- {tumblrbot-1.1.5 → tumblrbot-1.2.1}/PKG-INFO +3 -2
- {tumblrbot-1.1.5 → tumblrbot-1.2.1}/README.md +1 -1
- {tumblrbot-1.1.5 → tumblrbot-1.2.1}/pyproject.toml +2 -1
- {tumblrbot-1.1.5 → tumblrbot-1.2.1}/src/tumblrbot/flow/examples.py +32 -7
- {tumblrbot-1.1.5 → tumblrbot-1.2.1}/src/tumblrbot/flow/generate.py +1 -1
- {tumblrbot-1.1.5 → tumblrbot-1.2.1}/src/tumblrbot/utils/models.py +1 -1
- {tumblrbot-1.1.5 → tumblrbot-1.2.1}/src/tumblrbot/utils/settings.py +80 -50
- {tumblrbot-1.1.5 → tumblrbot-1.2.1}/src/tumblrbot/utils/tumblr.py +2 -26
- {tumblrbot-1.1.5 → tumblrbot-1.2.1}/.github/dependabot.yml +0 -0
- {tumblrbot-1.1.5 → tumblrbot-1.2.1}/UNLICENSE +0 -0
- {tumblrbot-1.1.5 → tumblrbot-1.2.1}/src/tumblrbot/__init__.py +0 -0
- {tumblrbot-1.1.5 → tumblrbot-1.2.1}/src/tumblrbot/__main__.py +0 -0
- {tumblrbot-1.1.5 → tumblrbot-1.2.1}/src/tumblrbot/flow/__init__.py +0 -0
- {tumblrbot-1.1.5 → tumblrbot-1.2.1}/src/tumblrbot/flow/download.py +0 -0
- {tumblrbot-1.1.5 → tumblrbot-1.2.1}/src/tumblrbot/flow/fine_tune.py +0 -0
- {tumblrbot-1.1.5 → tumblrbot-1.2.1}/src/tumblrbot/utils/__init__.py +0 -0
- {tumblrbot-1.1.5 → tumblrbot-1.2.1}/src/tumblrbot/utils/common.py +0 -0
|
@@ -1,3 +1,8 @@
|
|
|
1
|
+
# Custom
|
|
2
|
+
data
|
|
3
|
+
*.toml
|
|
4
|
+
*.json*
|
|
5
|
+
|
|
1
6
|
# Byte-compiled / optimized / DLL files
|
|
2
7
|
__pycache__/
|
|
3
8
|
*.py[codz]
|
|
@@ -207,9 +212,4 @@ marimo/_lsp/
|
|
|
207
212
|
__marimo__/
|
|
208
213
|
|
|
209
214
|
# Streamlit
|
|
210
|
-
.streamlit/secrets.toml
|
|
211
|
-
|
|
212
|
-
# Custom
|
|
213
|
-
data
|
|
214
|
-
*.toml
|
|
215
|
-
*.jsonl
|
|
215
|
+
.streamlit/secrets.toml
|
|
@@ -1,9 +1,10 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: tumblrbot
|
|
3
|
-
Version: 1.1
|
|
3
|
+
Version: 1.2.1
|
|
4
4
|
Summary: An updated bot that posts to Tumblr, based on your very own blog!
|
|
5
5
|
Requires-Python: >= 3.13
|
|
6
6
|
Description-Content-Type: text/markdown
|
|
7
|
+
Requires-Dist: keyring
|
|
7
8
|
Requires-Dist: more-itertools
|
|
8
9
|
Requires-Dist: openai
|
|
9
10
|
Requires-Dist: pydantic
|
|
@@ -61,6 +62,7 @@ This fork is largely a rewrite of the source code with similarities in its struc
|
|
|
61
62
|
- Added the option to [Download] the latest posts from the [specified blogs][Settings].
|
|
62
63
|
- Added the option to remove posts flagged by the [Moderation API].
|
|
63
64
|
- Added the option to automatically [Fine-Tune] the examples on the [specified base model][Settings].
|
|
65
|
+
- Added the ability to add custom prompts and responses to the example data.
|
|
64
66
|
- Changed to now escape examples automatically.
|
|
65
67
|
- Set encoding for reading post data to `UTF-8` to fix decoding errors.
|
|
66
68
|
- Added newlines between paragraphs.
|
|
@@ -83,7 +85,6 @@ To-Do:
|
|
|
83
85
|
- Add documentation.
|
|
84
86
|
- Finish updating [README.md].
|
|
85
87
|
- Change the differences list to instead just be a list of features.
|
|
86
|
-
- Allow adding arbitrary data to examples.
|
|
87
88
|
|
|
88
89
|
|
|
89
90
|
**Please submit an issue or contact us for features you want to added/reimplemented.**
|
|
@@ -44,6 +44,7 @@ This fork is largely a rewrite of the source code with similarities in its struc
|
|
|
44
44
|
- Added the option to [Download] the latest posts from the [specified blogs][Settings].
|
|
45
45
|
- Added the option to remove posts flagged by the [Moderation API].
|
|
46
46
|
- Added the option to automatically [Fine-Tune] the examples on the [specified base model][Settings].
|
|
47
|
+
- Added the ability to add custom prompts and responses to the example data.
|
|
47
48
|
- Changed to now escape examples automatically.
|
|
48
49
|
- Set encoding for reading post data to `UTF-8` to fix decoding errors.
|
|
49
50
|
- Added newlines between paragraphs.
|
|
@@ -66,7 +67,6 @@ To-Do:
|
|
|
66
67
|
- Add documentation.
|
|
67
68
|
- Finish updating [README.md].
|
|
68
69
|
- Change the differences list to instead just be a list of features.
|
|
69
|
-
- Allow adding arbitrary data to examples.
|
|
70
70
|
|
|
71
71
|
|
|
72
72
|
**Please submit an issue or contact us for features you want to added/reimplemented.**
|
|
@@ -1,10 +1,11 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "tumblrbot"
|
|
3
|
-
version = "1.1
|
|
3
|
+
version = "1.2.1"
|
|
4
4
|
description = "An updated bot that posts to Tumblr, based on your very own blog!"
|
|
5
5
|
readme = "README.md"
|
|
6
6
|
requires-python = ">= 3.13"
|
|
7
7
|
dependencies = [
|
|
8
|
+
"keyring",
|
|
8
9
|
"more-itertools",
|
|
9
10
|
"openai",
|
|
10
11
|
"pydantic",
|
|
@@ -1,8 +1,10 @@
|
|
|
1
1
|
from collections.abc import Generator
|
|
2
2
|
from dataclasses import dataclass
|
|
3
|
+
from json import loads
|
|
3
4
|
from math import ceil
|
|
4
5
|
from pathlib import Path
|
|
5
6
|
from re import search
|
|
7
|
+
from typing import IO
|
|
6
8
|
|
|
7
9
|
import rich
|
|
8
10
|
from more_itertools import chunked
|
|
@@ -76,17 +78,40 @@ class ExamplesWriter(UtilClass):
|
|
|
76
78
|
else:
|
|
77
79
|
yield from posts
|
|
78
80
|
|
|
81
|
+
def get_custom_prompts(self) -> Generator[tuple[str, str]]:
|
|
82
|
+
if self.config.custom_prompts_file.exists():
|
|
83
|
+
text = self.config.custom_prompts_file.read_text(encoding="utf_8")
|
|
84
|
+
yield from loads(text).items()
|
|
85
|
+
|
|
79
86
|
def write_examples(self) -> None:
|
|
80
87
|
self.config.examples_file.parent.mkdir(parents=True, exist_ok=True)
|
|
88
|
+
|
|
81
89
|
with self.config.examples_file.open("w", encoding="utf_8") as fp:
|
|
82
90
|
for post in self.get_filtered_posts():
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
91
|
+
self.write_example(
|
|
92
|
+
None,
|
|
93
|
+
post.get_text_content(),
|
|
94
|
+
fp,
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
for user_message, assistant_response in self.get_custom_prompts():
|
|
98
|
+
self.write_example(
|
|
99
|
+
user_message,
|
|
100
|
+
assistant_response,
|
|
101
|
+
fp,
|
|
89
102
|
)
|
|
90
|
-
fp.write(f"{example.model_dump_json()}\n")
|
|
91
103
|
|
|
92
104
|
rich.print(f"[bold]The examples file can be found at: '{self.config.examples_file}'\n")
|
|
105
|
+
|
|
106
|
+
def write_example(self, user_input: str | None, assistant_message: str, fp: IO[str]) -> None:
|
|
107
|
+
example = Example(
|
|
108
|
+
messages=[
|
|
109
|
+
Example.Message(role="developer", content=self.config.developer_message),
|
|
110
|
+
Example.Message(role="assistant", content=assistant_message),
|
|
111
|
+
],
|
|
112
|
+
)
|
|
113
|
+
|
|
114
|
+
if user_input:
|
|
115
|
+
example.messages.insert(1, Example.Message(role="user", content=user_input))
|
|
116
|
+
|
|
117
|
+
fp.write(f"{example.model_dump_json()}\n")
|
|
@@ -21,7 +21,7 @@ class DraftGenerator(UtilClass):
|
|
|
21
21
|
|
|
22
22
|
def generate_content(self) -> Post.Block:
|
|
23
23
|
content = self.openai.responses.create(
|
|
24
|
-
input=self.config.
|
|
24
|
+
input=self.config.user_message,
|
|
25
25
|
instructions=self.config.developer_message,
|
|
26
26
|
model=self.config.fine_tuned_model,
|
|
27
27
|
).output_text
|
|
@@ -23,7 +23,7 @@ class Post(FullyValidatedModel):
|
|
|
23
23
|
blocks: list[int] = [] # noqa: RUF012
|
|
24
24
|
|
|
25
25
|
timestamp: SkipJsonSchema[int] = 0
|
|
26
|
-
tags: Annotated[
|
|
26
|
+
tags: Annotated[list[str], PlainSerializer(",".join)] = [] # noqa: RUF012
|
|
27
27
|
state: SkipJsonSchema[Literal["published", "queued", "draft", "private", "unapproved"]] = "published"
|
|
28
28
|
|
|
29
29
|
content: SkipJsonSchema[list[Block]] = [] # noqa: RUF012
|
|
@@ -1,63 +1,28 @@
|
|
|
1
|
+
import json
|
|
1
2
|
from collections.abc import Generator, Sequence
|
|
2
3
|
from pathlib import Path
|
|
3
|
-
from typing import TYPE_CHECKING, Any, Self, override
|
|
4
|
+
from typing import TYPE_CHECKING, Any, ClassVar, Self, override
|
|
4
5
|
|
|
5
6
|
import rich
|
|
7
|
+
import tomlkit
|
|
8
|
+
from keyring import get_password, set_password
|
|
6
9
|
from openai.types import ChatModel
|
|
7
10
|
from pydantic import Field, PositiveFloat, PositiveInt, Secret, model_validator
|
|
8
11
|
from pydantic_settings import BaseSettings, PydanticBaseSettingsSource, SettingsConfigDict, TomlConfigSettingsSource
|
|
9
|
-
from
|
|
10
|
-
from
|
|
12
|
+
from requests_oauthlib import OAuth2Session
|
|
13
|
+
from rich.prompt import Confirm, Prompt
|
|
14
|
+
from tomlkit import comment, document
|
|
11
15
|
|
|
12
16
|
if TYPE_CHECKING:
|
|
13
17
|
from _typeshed import StrPath
|
|
14
18
|
|
|
15
19
|
|
|
16
|
-
class
|
|
20
|
+
class Config(BaseSettings):
|
|
17
21
|
model_config = SettingsConfigDict(
|
|
18
22
|
extra="ignore",
|
|
19
23
|
validate_assignment=True,
|
|
20
24
|
validate_return=True,
|
|
21
25
|
validate_by_name=True,
|
|
22
|
-
)
|
|
23
|
-
|
|
24
|
-
@override
|
|
25
|
-
@classmethod
|
|
26
|
-
def settings_customise_sources(cls, settings_cls: type[BaseSettings], *args: PydanticBaseSettingsSource, **kwargs: PydanticBaseSettingsSource) -> tuple[PydanticBaseSettingsSource, ...]:
|
|
27
|
-
return (TomlConfigSettingsSource(settings_cls),)
|
|
28
|
-
|
|
29
|
-
@model_validator(mode="after")
|
|
30
|
-
def write_to_file(self) -> Self:
|
|
31
|
-
# Make sure to call this if updating values in nested models.
|
|
32
|
-
toml_files = self.model_config.get("toml_file")
|
|
33
|
-
if isinstance(toml_files, (Path, str)):
|
|
34
|
-
self.dump_toml(toml_files)
|
|
35
|
-
elif isinstance(toml_files, Sequence):
|
|
36
|
-
for toml_file in toml_files:
|
|
37
|
-
self.dump_toml(toml_file)
|
|
38
|
-
|
|
39
|
-
return self
|
|
40
|
-
|
|
41
|
-
def dump_toml(self, toml_file: "StrPath") -> None:
|
|
42
|
-
toml_table = document()
|
|
43
|
-
|
|
44
|
-
dumped_model = self.model_dump(mode="json")
|
|
45
|
-
for name, field in self.__class__.model_fields.items():
|
|
46
|
-
if field.description:
|
|
47
|
-
for line in field.description.split(". "):
|
|
48
|
-
toml_table.add(comment(f"{line.removesuffix('.')}."))
|
|
49
|
-
|
|
50
|
-
value = getattr(self, name)
|
|
51
|
-
toml_table[name] = value.get_secret_value() if isinstance(value, Secret) else dumped_model[name]
|
|
52
|
-
|
|
53
|
-
Path(toml_file).write_text(
|
|
54
|
-
dumps(toml_table),
|
|
55
|
-
encoding="utf_8",
|
|
56
|
-
)
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
class Config(TOMLSettings):
|
|
60
|
-
model_config = SettingsConfigDict(
|
|
61
26
|
cli_parse_args=True,
|
|
62
27
|
cli_avoid_json=True,
|
|
63
28
|
cli_kebab_case=True,
|
|
@@ -77,6 +42,7 @@ class Config(TOMLSettings):
|
|
|
77
42
|
description="The identifiers of the blogs which post data will be downloaded from. These must be blogs associated with the same account as the configured Tumblr secret tokens.",
|
|
78
43
|
)
|
|
79
44
|
data_directory: Path = Field(Path("data"), description="Where to store downloaded post data.")
|
|
45
|
+
custom_prompts_file: Path = Field(Path("custom_prompts.json"), description="Where to read in custom prompts from.")
|
|
80
46
|
examples_file: Path = Field(Path("examples.jsonl"), description="Where to output the examples that will be used to fine-tune the model.")
|
|
81
47
|
job_id: str = Field("", description="The fine-tuning job ID that will be polled on next run.")
|
|
82
48
|
expected_epochs: PositiveInt = Field(3, description="The expected number of epochs fine-tuning will be run for. This will be updated during fine-tuning.")
|
|
@@ -84,12 +50,15 @@ class Config(TOMLSettings):
|
|
|
84
50
|
|
|
85
51
|
base_model: ChatModel = Field("gpt-4.1-nano-2025-04-14", description="The name of the model that will be fine-tuned by the generated training data.")
|
|
86
52
|
developer_message: str = Field("You are a Tumblr post bot. Please generate a Tumblr post in accordance with the user's request.", description="The developer message used by the OpenAI API to generate drafts.")
|
|
87
|
-
|
|
53
|
+
user_message: str = Field("Please write a comical Tumblr post.", description="The user input used by the OpenAI API to generate drafts.")
|
|
88
54
|
|
|
89
55
|
@override
|
|
90
|
-
|
|
91
|
-
|
|
56
|
+
@classmethod
|
|
57
|
+
def settings_customise_sources(cls, settings_cls: type[BaseSettings], *args: PydanticBaseSettingsSource, **kwargs: PydanticBaseSettingsSource) -> tuple[PydanticBaseSettingsSource, ...]:
|
|
58
|
+
return (TomlConfigSettingsSource(settings_cls),)
|
|
92
59
|
|
|
60
|
+
@model_validator(mode="after")
|
|
61
|
+
def write_to_file(self) -> Self:
|
|
93
62
|
if not self.download_blog_identifiers:
|
|
94
63
|
rich.print("Enter the [cyan]identifiers of your blogs[/] that data should be [bold purple]downloaded[/] from, separated by commas.")
|
|
95
64
|
self.download_blog_identifiers = list(map(str.strip, Prompt.ask("[bold]Example: staff.tumblr.com,changes").split(",")))
|
|
@@ -98,12 +67,38 @@ class Config(TOMLSettings):
|
|
|
98
67
|
rich.print("Enter the [cyan]identifier of your blog[/] that drafts should be [bold purple]uploaded[/] to.")
|
|
99
68
|
self.upload_blog_identifier = Prompt.ask("[bold]Examples: staff.tumblr.com or changes").strip()
|
|
100
69
|
|
|
70
|
+
toml_files = self.model_config.get("toml_file")
|
|
71
|
+
if isinstance(toml_files, (Path, str)):
|
|
72
|
+
self.dump_toml(toml_files)
|
|
73
|
+
elif isinstance(toml_files, Sequence):
|
|
74
|
+
for toml_file in toml_files:
|
|
75
|
+
self.dump_toml(toml_file)
|
|
101
76
|
|
|
102
|
-
|
|
77
|
+
return self
|
|
78
|
+
|
|
79
|
+
def dump_toml(self, toml_file: "StrPath") -> None:
|
|
80
|
+
toml_table = document()
|
|
81
|
+
|
|
82
|
+
dumped_model = self.model_dump(mode="json")
|
|
83
|
+
for name, field in self.__class__.model_fields.items():
|
|
84
|
+
if field.description:
|
|
85
|
+
for line in field.description.split(". "):
|
|
86
|
+
toml_table.add(comment(f"{line.removesuffix('.')}."))
|
|
87
|
+
|
|
88
|
+
value = getattr(self, name)
|
|
89
|
+
toml_table[name] = value.get_secret_value() if isinstance(value, Secret) else dumped_model[name]
|
|
90
|
+
|
|
91
|
+
Path(toml_file).write_text(
|
|
92
|
+
tomlkit.dumps(toml_table), # pyright: ignore[reportUnknownMemberType]
|
|
93
|
+
encoding="utf_8",
|
|
94
|
+
)
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
class Tokens(BaseSettings):
|
|
98
|
+
service_name: ClassVar = "tumblrbot"
|
|
103
99
|
model_config = SettingsConfigDict(toml_file="env.toml")
|
|
104
100
|
|
|
105
101
|
openai_api_key: Secret[str] = Secret("")
|
|
106
|
-
|
|
107
102
|
tumblr_client_id: Secret[str] = Secret("")
|
|
108
103
|
tumblr_client_secret: Secret[str] = Secret("")
|
|
109
104
|
tumblr_token: Secret[Any] = Secret({})
|
|
@@ -124,8 +119,43 @@ class Tokens(TOMLSettings):
|
|
|
124
119
|
def model_post_init(self, context: object) -> None:
|
|
125
120
|
super().model_post_init(context)
|
|
126
121
|
|
|
127
|
-
|
|
122
|
+
for name, _ in self:
|
|
123
|
+
if value := get_password(self.service_name, name):
|
|
124
|
+
setattr(self, name, Secret(json.loads(value)))
|
|
125
|
+
|
|
126
|
+
@model_validator(mode="after")
|
|
127
|
+
def write_to_keyring(self) -> Self:
|
|
128
|
+
if not self.openai_api_key.get_secret_value() or Confirm.ask("Reset OpenAI API key?", default=False):
|
|
128
129
|
(self.openai_api_key,) = self.online_token_prompt("https://platform.openai.com/api-keys", "API key")
|
|
129
130
|
|
|
130
|
-
if not (
|
|
131
|
+
if not all(
|
|
132
|
+
map(
|
|
133
|
+
Secret[Any].get_secret_value,
|
|
134
|
+
[
|
|
135
|
+
self.tumblr_client_id,
|
|
136
|
+
self.tumblr_client_secret,
|
|
137
|
+
self.tumblr_token,
|
|
138
|
+
],
|
|
139
|
+
),
|
|
140
|
+
) or Confirm.ask("Reset Tumblr API tokens?", default=False):
|
|
131
141
|
self.tumblr_client_id, self.tumblr_client_secret = self.online_token_prompt("https://tumblr.com/oauth/apps", "consumer key", "consumer secret")
|
|
142
|
+
|
|
143
|
+
oauth = OAuth2Session(
|
|
144
|
+
self.tumblr_client_id.get_secret_value(),
|
|
145
|
+
scope=["basic", "write", "offline_access"],
|
|
146
|
+
)
|
|
147
|
+
authorization_url, _ = oauth.authorization_url("https://tumblr.com/oauth2/authorize") # pyright: ignore[reportUnknownMemberType]
|
|
148
|
+
rich.print(f"Please go to {authorization_url} and authorize access.")
|
|
149
|
+
self.tumblr_token = Secret(
|
|
150
|
+
oauth.fetch_token( # pyright: ignore[reportUnknownMemberType]
|
|
151
|
+
"https://api.tumblr.com/v2/oauth2/token",
|
|
152
|
+
authorization_response=Prompt.ask("Enter the full callback URL"),
|
|
153
|
+
client_secret=self.tumblr_client_secret.get_secret_value(),
|
|
154
|
+
),
|
|
155
|
+
)
|
|
156
|
+
|
|
157
|
+
for name, value in self:
|
|
158
|
+
if isinstance(value, Secret):
|
|
159
|
+
set_password(self.service_name, name, json.dumps(value.get_secret_value()))
|
|
160
|
+
|
|
161
|
+
return self
|
|
@@ -1,11 +1,8 @@
|
|
|
1
1
|
from dataclasses import dataclass
|
|
2
|
-
from typing import Self
|
|
3
2
|
|
|
4
|
-
import rich
|
|
5
3
|
from pydantic import Secret
|
|
6
4
|
from requests import HTTPError, Response
|
|
7
5
|
from requests_oauthlib import OAuth2Session
|
|
8
|
-
from rich.prompt import Prompt
|
|
9
6
|
|
|
10
7
|
from tumblrbot.utils.models import Post
|
|
11
8
|
from tumblrbot.utils.settings import Tokens
|
|
@@ -22,35 +19,14 @@ class TumblrClient(OAuth2Session):
|
|
|
22
19
|
auto_refresh_kwargs={
|
|
23
20
|
"client_id": self.tokens.tumblr_client_id.get_secret_value(),
|
|
24
21
|
"client_secret": self.tokens.tumblr_client_secret.get_secret_value(),
|
|
25
|
-
"token": self.tokens.tumblr_token.get_secret_value(),
|
|
26
22
|
},
|
|
27
|
-
scope=["basic", "write", "offline_access"],
|
|
28
23
|
token=self.tokens.tumblr_token.get_secret_value(),
|
|
29
|
-
token_updater=self.
|
|
24
|
+
token_updater=self.token_updater,
|
|
30
25
|
)
|
|
31
26
|
|
|
32
27
|
self.hooks["response"].append(self.response_hook)
|
|
33
28
|
|
|
34
|
-
def
|
|
35
|
-
super().__enter__()
|
|
36
|
-
|
|
37
|
-
if not self.tokens.tumblr_token.get_secret_value():
|
|
38
|
-
authorization_url, _ = self.authorization_url("https://tumblr.com/oauth2/authorize") # pyright: ignore[reportUnknownMemberType]
|
|
39
|
-
|
|
40
|
-
rich.print(f"Please go to {authorization_url} and authorize access.")
|
|
41
|
-
authorization_response = Prompt.ask("Enter the full callback URL")
|
|
42
|
-
|
|
43
|
-
self.token_saver(
|
|
44
|
-
self.fetch_token( # pyright: ignore[reportUnknownMemberType]
|
|
45
|
-
"https://api.tumblr.com/v2/oauth2/token",
|
|
46
|
-
authorization_response=authorization_response,
|
|
47
|
-
client_secret=self.tokens.tumblr_client_secret.get_secret_value(),
|
|
48
|
-
),
|
|
49
|
-
)
|
|
50
|
-
|
|
51
|
-
return self
|
|
52
|
-
|
|
53
|
-
def token_saver(self, token: object) -> None:
|
|
29
|
+
def token_updater(self, token: object) -> None:
|
|
54
30
|
self.tokens.tumblr_token = Secret(token)
|
|
55
31
|
|
|
56
32
|
def response_hook(self, response: Response, **_: object) -> None:
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|
|
File without changes
|