tumblrbot 1.0.0__py3-none-any.whl → 1.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
tumblrbot/__main__.py CHANGED
@@ -1,49 +1,19 @@
1
- from collections.abc import Generator
2
-
3
- import rich
4
1
  from openai import OpenAI
5
- from pydantic import Secret
6
- from rich.prompt import Confirm, Prompt
2
+ from rich.prompt import Confirm
7
3
  from rich.traceback import install
8
4
 
9
5
  from tumblrbot.flow.download import PostDownloader
10
6
  from tumblrbot.flow.examples import ExamplesWriter
11
7
  from tumblrbot.flow.fine_tune import FineTuner
12
8
  from tumblrbot.flow.generate import DraftGenerator
9
+ from tumblrbot.utils.common import TumblrClient
13
10
  from tumblrbot.utils.settings import Tokens
14
- from tumblrbot.utils.tumblr import TumblrClient
15
-
16
-
17
- def online_token_prompt(url: str, *tokens: str) -> Generator[Secret[str]]:
18
- formatted_tokens = [f"[cyan]{token}[/]" for token in tokens]
19
- formatted_token_string = " and ".join(formatted_tokens)
20
-
21
- rich.print(f"Retrieve your {formatted_token_string} from: {url}")
22
- for token in formatted_tokens:
23
- prompt = f"Enter your {token} [yellow](hidden)"
24
- yield Secret(Prompt.ask(prompt, password=True).strip())
25
-
26
- rich.print()
27
-
28
-
29
- def verify_tokens() -> Tokens:
30
- tokens = Tokens()
31
-
32
- if not tokens.openai.api_key.get_secret_value():
33
- (tokens.openai.api_key,) = online_token_prompt("https://platform.openai.com/api-keys", "API key")
34
- tokens.model_post_init()
35
-
36
- if not (tokens.tumblr.client_id.get_secret_value() and tokens.tumblr.client_secret.get_secret_value()):
37
- tokens.tumblr.client_id, tokens.tumblr.client_secret = online_token_prompt("https://tumblr.com/oauth/apps", "consumer key", "consumer secret")
38
- tokens.model_post_init()
39
-
40
- return tokens
41
11
 
42
12
 
43
13
  def main() -> None:
44
14
  install()
45
- tokens = verify_tokens()
46
- with OpenAI(api_key=tokens.openai.api_key.get_secret_value()) as openai, TumblrClient(tokens) as tumblr:
15
+ tokens = Tokens()
16
+ with OpenAI(api_key=tokens.openai_api_key.get_secret_value()) as openai, TumblrClient(tokens) as tumblr:
47
17
  post_downloader = PostDownloader(openai, tumblr)
48
18
  if Confirm.ask("Download latest posts?", default=False):
49
19
  post_downloader.download()
@@ -56,7 +26,7 @@ def main() -> None:
56
26
 
57
27
  fine_tuner = FineTuner(openai, tumblr, estimated_tokens)
58
28
  fine_tuner.print_estimates()
59
- if Confirm.ask("Upload data to OpenAI for fine-tuning?", default=False):
29
+ if Confirm.ask("Upload data to OpenAI for fine-tuning? [bold]You must do this to set the model to generate drafts from. Alternatively, manually enter a model into the config.", default=False):
60
30
  fine_tuner.fine_tune()
61
31
 
62
32
  if Confirm.ask("Generate drafts?", default=False):
@@ -2,19 +2,17 @@ from io import TextIOBase
2
2
  from json import dump
3
3
  from pathlib import Path
4
4
 
5
- from more_itertools import last
6
-
7
5
  from tumblrbot.utils.common import PreviewLive, UtilClass
8
6
  from tumblrbot.utils.models import Post
9
7
 
10
8
 
11
9
  class PostDownloader(UtilClass):
12
- def paginate_posts(self, blog_name: str, before: int, completed: int, fp: TextIOBase, live: PreviewLive) -> None:
13
- task_id = live.progress.add_task(f"Downloading posts from '{blog_name}'...", total=None, completed=completed)
10
+ def paginate_posts(self, blog_identifier: str, offset: int, fp: TextIOBase, live: PreviewLive) -> None:
11
+ task_id = live.progress.add_task(f"Downloading posts from '{blog_identifier}'...", total=None, completed=offset)
14
12
 
15
13
  while True:
16
- response = self.tumblr.retrieve_published_posts(blog_name, before).json()["response"]
17
- live.progress.update(task_id, total=response["blog"]["posts"])
14
+ response = self.tumblr.retrieve_published_posts(blog_identifier, offset).json()["response"]
15
+ live.progress.update(task_id, total=response["blog"]["posts"], completed=offset)
18
16
 
19
17
  if posts := response["posts"]:
20
18
  for post in posts:
@@ -22,32 +20,29 @@ class PostDownloader(UtilClass):
22
20
  fp.write("\n")
23
21
 
24
22
  model = Post.model_validate(post)
25
- before = model.timestamp
26
-
27
- live.progress.update(task_id, advance=1)
28
23
  live.custom_update(model)
24
+
25
+ offset += len(posts)
29
26
  else:
30
27
  break
31
28
 
32
- def get_data_path(self, blog_name: str) -> Path:
33
- return (self.config.training.data_directory / blog_name).with_suffix(".jsonl")
29
+ def get_data_path(self, blog_identifier: str) -> Path:
30
+ return (self.config.data_directory / blog_identifier).with_suffix(".jsonl")
34
31
 
35
32
  def get_data_paths(self) -> list[Path]:
36
- return list(map(self.get_data_path, self.config.training.blog_names))
33
+ return list(map(self.get_data_path, self.config.download_blog_identifiers))
37
34
 
38
35
  def download(self) -> None:
39
- self.config.training.data_directory.mkdir(parents=True, exist_ok=True)
36
+ self.config.data_directory.mkdir(parents=True, exist_ok=True)
40
37
 
41
38
  with PreviewLive() as live:
42
- for blog_name in self.config.training.blog_names:
43
- data_path = self.get_data_path(blog_name)
44
- lines = data_path.read_text("utf_8").splitlines() if data_path.exists() else []
39
+ for blog_identifier in self.config.download_blog_identifiers:
40
+ data_path = self.get_data_path(blog_identifier)
45
41
 
46
42
  with data_path.open("a", encoding="utf_8") as fp:
47
43
  self.paginate_posts(
48
- blog_name,
49
- Post.model_validate_json(last(lines, "{}")).timestamp,
50
- len(lines),
44
+ blog_identifier,
45
+ len(data_path.read_text("utf_8").splitlines()) if data_path.exists() else 0,
51
46
  fp,
52
47
  live,
53
48
  )
@@ -28,7 +28,7 @@ class ExamplesWriter(UtilClass):
28
28
  encoding = get_encoding("o200k_base")
29
29
  Console(stderr=True, style="logging.level.warning").print(f"[Warning] Using encoding '{encoding.name}': {''.join(error.args)}\n")
30
30
 
31
- with self.config.training.output_file.open(encoding="utf_8") as fp:
31
+ with self.config.examples_file.open(encoding="utf_8") as fp:
32
32
  for line in fp:
33
33
  example = Example.model_validate_json(line)
34
34
  yield len(encoding.encode("assistant")) # every reply is primed with <|start|>assistant<|message|>
@@ -77,8 +77,8 @@ class ExamplesWriter(UtilClass):
77
77
  yield from posts
78
78
 
79
79
  def write_examples(self) -> None:
80
- self.config.training.output_file.parent.mkdir(parents=True, exist_ok=True)
81
- with self.config.training.output_file.open("w", encoding="utf_8") as fp:
80
+ self.config.examples_file.parent.mkdir(parents=True, exist_ok=True)
81
+ with self.config.examples_file.open("w", encoding="utf_8") as fp:
82
82
  for post in self.get_filtered_posts():
83
83
  example = Example(
84
84
  messages=[
@@ -89,4 +89,4 @@ class ExamplesWriter(UtilClass):
89
89
  )
90
90
  fp.write(f"{example.model_dump_json()}\n")
91
91
 
92
- rich.print(f"[bold]The examples file can be found at: '{self.config.training.output_file}'\n")
92
+ rich.print(f"[bold]The examples file can be found at: '{self.config.examples_file}'\n")
@@ -24,22 +24,19 @@ class FineTuner(UtilClass):
24
24
  Cost: {self.get_cost_string(job.trained_tokens)}
25
25
  """)
26
26
 
27
- self.config.training.job_id = ""
28
- self.config.model_post_init()
27
+ self.config.job_id = ""
29
28
 
30
29
  if job.status == "failed" and job.error is not None:
31
30
  raise RuntimeError(job.error.message)
32
31
 
33
32
  if job.fine_tuned_model:
34
- self.config.generation.fine_tuned_model = job.fine_tuned_model or ""
35
- self.config.model_post_init()
33
+ self.config.fine_tuned_model = job.fine_tuned_model or ""
36
34
 
37
35
  def poll_job_status(self) -> FineTuningJob:
38
- job = self.openai.fine_tuning.jobs.retrieve(self.config.training.job_id)
36
+ job = self.openai.fine_tuning.jobs.retrieve(self.config.job_id)
39
37
 
40
- if self.config.training.expected_epochs != job.hyperparameters.n_epochs and isinstance(job.hyperparameters.n_epochs, int):
41
- self.config.training.expected_epochs = job.hyperparameters.n_epochs
42
- self.config.model_post_init()
38
+ if self.config.expected_epochs != job.hyperparameters.n_epochs and isinstance(job.hyperparameters.n_epochs, int):
39
+ self.config.expected_epochs = job.hyperparameters.n_epochs
43
40
 
44
41
  self.dedent_print(f"""
45
42
  The number of epochs has been updated to {job.hyperparameters.n_epochs}!
@@ -50,11 +47,11 @@ class FineTuner(UtilClass):
50
47
  return job
51
48
 
52
49
  def create_job(self) -> FineTuningJob:
53
- if self.config.training.job_id:
50
+ if self.config.job_id:
54
51
  return self.poll_job_status()
55
52
 
56
53
  file = self.openai.files.create(
57
- file=self.config.training.output_file,
54
+ file=self.config.examples_file,
58
55
  purpose="fine-tune",
59
56
  )
60
57
  job = self.openai.fine_tuning.jobs.create(
@@ -62,8 +59,7 @@ class FineTuner(UtilClass):
62
59
  training_file=file.id,
63
60
  )
64
61
 
65
- self.config.training.job_id = job.id
66
- self.config.model_post_init()
62
+ self.config.job_id = job.id
67
63
  return job
68
64
 
69
65
  def fine_tune(self) -> None:
@@ -86,7 +82,7 @@ class FineTuner(UtilClass):
86
82
 
87
83
  live.progress.update(
88
84
  task_id,
89
- description=f"Fine-tuning: {job.status}...",
85
+ description=f"Fine-tuning: [italic]{job.status.replace('_', ' ').title()}[/]...",
90
86
  )
91
87
 
92
88
  sleep(1)
@@ -94,16 +90,16 @@ class FineTuner(UtilClass):
94
90
  self.process_completed_job(job)
95
91
 
96
92
  def get_cost_string(self, total_tokens: int) -> str:
97
- return f"${self.config.training.token_price / 1000000 * total_tokens:.2f}"
93
+ return f"${self.config.token_price / 1000000 * total_tokens:.2f}"
98
94
 
99
95
  def print_estimates(self) -> None:
100
- total_tokens = self.config.training.expected_epochs * self.estimated_tokens
96
+ total_tokens = self.config.expected_epochs * self.estimated_tokens
101
97
  cost_string = self.get_cost_string(total_tokens)
102
98
 
103
99
  self.dedent_print(f"""
104
100
  Tokens {self.estimated_tokens:,}:
105
- Total tokens for [bold orange1]{self.config.training.expected_epochs}[/] epoch(s): {total_tokens:,}
101
+ Total tokens for [bold orange1]{self.config.expected_epochs}[/] epoch(s): {total_tokens:,}
106
102
  Expected cost when trained with [bold purple]{self.config.base_model}[/]: {cost_string}
107
103
  NOTE: Token values are approximate and may not be 100% accurate, please be aware of this when using the data.
108
- [italic red]Neither Amelia nor Mutsumi are responsible for any inaccuracies in the token count or estimated price.[/]
104
+ [italic red]Amelia, Mutsumi, and Marin are not responsible for any inaccuracies in the token count or estimated price.[/]
109
105
  """)
@@ -8,7 +8,7 @@ from tumblrbot.utils.models import Post
8
8
 
9
9
  class DraftGenerator(UtilClass):
10
10
  def generate_tags(self, content: Post.Block) -> Post | None:
11
- if random() < self.config.generation.tags_chance: # noqa: S311
11
+ if random() < self.config.tags_chance: # noqa: S311
12
12
  return self.openai.responses.parse(
13
13
  input=content.text,
14
14
  model=self.config.base_model,
@@ -23,29 +23,32 @@ class DraftGenerator(UtilClass):
23
23
  content = self.openai.responses.create(
24
24
  input=self.config.user_input,
25
25
  instructions=self.config.developer_message,
26
- model=self.config.generation.fine_tuned_model,
26
+ model=self.config.fine_tuned_model,
27
27
  ).output_text
28
28
 
29
29
  return Post.Block(type="text", text=content)
30
30
 
31
31
  def generate_post(self) -> Post:
32
32
  content = self.generate_content()
33
- post = Post(content=[content])
33
+ post = Post(
34
+ content=[content],
35
+ state="draft",
36
+ )
34
37
  if tags := self.generate_tags(content):
35
38
  post.tags = tags.tags
36
39
  return post
37
40
 
38
41
  def create_drafts(self) -> None:
39
- message = f"View drafts here: https://tumblr.com/blog/{self.config.generation.blog_name}/drafts"
42
+ message = f"View drafts here: https://tumblr.com/blog/{self.config.upload_blog_identifier}/drafts"
40
43
 
41
44
  with PreviewLive() as live:
42
- for i in live.progress.track(range(self.config.generation.draft_count), description="Generating drafts..."):
45
+ for i in live.progress.track(range(self.config.draft_count), description="Generating drafts..."):
43
46
  try:
44
47
  post = self.generate_post()
45
- self.tumblr.create_draft_post(self.config.generation.blog_name, post)
48
+ self.tumblr.create_post(self.config.upload_blog_identifier, post)
46
49
  live.custom_update(post)
47
50
  except BaseException as exc:
48
51
  exc.add_note(f"📉 An error occurred! Generated {i} draft(s) before failing. {message}")
49
52
  raise
50
53
 
51
- rich.print(f":chart_increasing: [bold green]Generated {self.config.generation.draft_count} draft(s).[/] {message}")
54
+ rich.print(f":chart_increasing: [bold green]Generated {self.config.draft_count} draft(s).[/] {message}")
tumblrbot/utils/models.py CHANGED
@@ -18,16 +18,15 @@ class FullyValidatedModel(BaseModel):
18
18
 
19
19
  class Post(FullyValidatedModel):
20
20
  class Block(FullyValidatedModel):
21
- type: str = "text"
21
+ type: str = ""
22
22
  text: str = ""
23
- blocks: set[int] = set() # noqa: RUF012
23
+ blocks: list[int] = [] # noqa: RUF012
24
24
 
25
- tags: Annotated[set[str], PlainSerializer(",".join)] = set() # noqa: RUF012
25
+ tags: Annotated[list[str], PlainSerializer(",".join)] = [] # noqa: RUF012
26
26
  content: SkipJsonSchema[list[Block]] = [] # noqa: RUF012
27
27
  layout: SkipJsonSchema[list[Block]] = [] # noqa: RUF012
28
28
  trail: SkipJsonSchema[list[Any]] = [] # noqa: RUF012
29
- state: SkipJsonSchema[Literal["published", "queued", "draft", "private", "unapproved"]] = "draft"
30
- timestamp: SkipJsonSchema[int] = 0
29
+ state: SkipJsonSchema[Literal["published", "queued", "draft", "private", "unapproved"]] = "published"
31
30
  is_submission: SkipJsonSchema[bool] = False
32
31
 
33
32
  def __rich__(self) -> Panel:
@@ -45,7 +44,7 @@ class Post(FullyValidatedModel):
45
44
  indices: set[int] = set()
46
45
  for block in self.layout:
47
46
  if block.type == "ask":
48
- indices |= block.blocks
47
+ indices.update(block.blocks)
49
48
 
50
49
  self.content = [block for i, block in enumerate(self.content) if i not in indices and block.type == "text"]
51
50
 
@@ -55,7 +54,7 @@ class Post(FullyValidatedModel):
55
54
 
56
55
  class Example(FullyValidatedModel):
57
56
  class Message(FullyValidatedModel):
58
- role: str
57
+ role: Literal["developer", "user", "assistant"]
59
58
  content: str
60
59
 
61
60
  messages: list[Message]
@@ -1,18 +1,19 @@
1
- from collections.abc import Sequence
1
+ from collections.abc import Generator, Sequence
2
2
  from pathlib import Path
3
- from typing import TYPE_CHECKING, Any, override
3
+ from typing import TYPE_CHECKING, Any, Self, override
4
4
 
5
+ import rich
5
6
  from openai.types import ChatModel
6
- from pydantic import Field, PositiveFloat, PositiveInt, Secret, field_serializer
7
+ from pydantic import Field, PositiveFloat, PositiveInt, Secret, model_validator
7
8
  from pydantic_settings import BaseSettings, PydanticBaseSettingsSource, SettingsConfigDict, TomlConfigSettingsSource
8
- from tomlkit import comment, dumps, table
9
- from tomlkit.items import Table
9
+ from rich.prompt import Prompt
10
+ from tomlkit import comment, document, dumps # pyright: ignore[reportUnknownVariableType]
10
11
 
11
12
  if TYPE_CHECKING:
12
13
  from _typeshed import StrPath
13
14
 
14
15
 
15
- class TomlSettings(BaseSettings):
16
+ class TOMLSettings(BaseSettings):
16
17
  model_config = SettingsConfigDict(
17
18
  extra="ignore",
18
19
  validate_assignment=True,
@@ -20,38 +21,13 @@ class TomlSettings(BaseSettings):
20
21
  validate_by_name=True,
21
22
  )
22
23
 
23
- @field_serializer("*", when_used="json-unless-none")
24
- @staticmethod
25
- def serialize_secret(value: object) -> object:
26
- if isinstance(value, Secret):
27
- return value.get_secret_value()
28
- return value
29
-
30
- def get_toml_table(self) -> Table:
31
- toml_table = table()
32
-
33
- dumped_model = self.model_dump(mode="json")
34
- for name, field in self.__class__.model_fields.items():
35
- if field.description:
36
- for line in field.description.split(". "):
37
- toml_table.add(comment(f"{line.removesuffix('.')}."))
38
-
39
- value = getattr(self, name)
40
- toml_table[name] = value.get_toml_table() if isinstance(value, TomlSettings) else dumped_model[name]
41
-
42
- return toml_table
43
-
44
-
45
- class AutoGenerateTomlSettings(TomlSettings):
46
24
  @override
47
25
  @classmethod
48
26
  def settings_customise_sources(cls, settings_cls: type[BaseSettings], *args: PydanticBaseSettingsSource, **kwargs: PydanticBaseSettingsSource) -> tuple[PydanticBaseSettingsSource, ...]:
49
27
  return (TomlConfigSettingsSource(settings_cls),)
50
28
 
51
- @override
52
- def model_post_init(self, context: object = None) -> None:
53
- super().model_post_init(context)
54
-
29
+ @model_validator(mode="after")
30
+ def write_to_file(self) -> Self:
55
31
  # Make sure to call this if updating values in nested models.
56
32
  toml_files = self.model_config.get("toml_file")
57
33
  if isinstance(toml_files, (Path, str)):
@@ -60,14 +36,27 @@ class AutoGenerateTomlSettings(TomlSettings):
60
36
  for toml_file in toml_files:
61
37
  self.dump_toml(toml_file)
62
38
 
39
+ return self
40
+
63
41
  def dump_toml(self, toml_file: "StrPath") -> None:
42
+ toml_table = document()
43
+
44
+ dumped_model = self.model_dump(mode="json")
45
+ for name, field in self.__class__.model_fields.items():
46
+ if field.description:
47
+ for line in field.description.split(". "):
48
+ toml_table.add(comment(f"{line.removesuffix('.')}."))
49
+
50
+ value = getattr(self, name)
51
+ toml_table[name] = value.get_secret_value() if isinstance(value, Secret) else dumped_model[name]
52
+
64
53
  Path(toml_file).write_text(
65
- dumps(self.get_toml_table()),
54
+ dumps(toml_table),
66
55
  encoding="utf_8",
67
56
  )
68
57
 
69
58
 
70
- class Config(AutoGenerateTomlSettings):
59
+ class Config(TOMLSettings):
71
60
  model_config = SettingsConfigDict(
72
61
  cli_parse_args=True,
73
62
  cli_avoid_json=True,
@@ -75,44 +64,68 @@ class Config(AutoGenerateTomlSettings):
75
64
  toml_file="config.toml",
76
65
  )
77
66
 
78
- class Generation(TomlSettings):
79
- fine_tuned_model: str = Field("", description="The name of the OpenAI model that was fine-tuned with your posts.")
80
- blog_name: str = Field(
81
- "",
82
- description='The name of the blog which generated drafts will be uploaded to that appears in the URL. This must be a blog associated with the same account as the configured Tumblr secret values. Examples: "staff" for https://staff.tumblr.com and "changes" for https://tumblr.com/changes or https://tumblr.com/@changes',
83
- )
84
- draft_count: PositiveInt = Field(150, description="The number of drafts to process. This will affect the number of tokens used with OpenAI")
85
- tags_chance: float = Field(0.1, description="The chance to generate tags for any given post. This will incur extra calls to OpenAI.")
67
+ fine_tuned_model: str = Field("", description="The name of the OpenAI model that was fine-tuned with your posts.")
68
+ upload_blog_identifier: str = Field(
69
+ "",
70
+ description="The identifier of the blog which generated drafts will be uploaded to. This must be a blog associated with the same account as the configured Tumblr secret tokens.",
71
+ )
72
+ draft_count: PositiveInt = Field(150, description="The number of drafts to process. This will affect the number of tokens used with OpenAI")
73
+ tags_chance: float = Field(0.1, description="The chance to generate tags for any given post. This will incur extra calls to OpenAI.")
86
74
 
87
- class Training(TomlSettings):
88
- blog_names: list[str] = Field(
89
- [],
90
- description='The names of the blogs which post data will be downloaded from that appears in the URL. This must be a blog associated with the same account as the configured Tumblr secret values. Examples: ["staff", "changes"] for https://staff.tumblr.com and https://www.tumblr.com/changes or https://www.tumblr.com/@changes',
91
- )
92
- data_directory: Path = Field(Path("data"), description="Where to store downloaded post data.")
93
- output_file: Path = Field(Path("training.jsonl"), description="Where to output the training data that will be used to fine-tune the model.")
94
- job_id: str = Field("", description="The fine-tuning job ID that will be polled on next run.")
95
- expected_epochs: PositiveInt = Field(3, description="The expected number of epochs fine-tuning will be run for. This will be updated during fine-tuning.")
96
- token_price: PositiveFloat = Field(1.50, description="The expected price in USD per million tokens during fine-tuning for the current model.")
75
+ download_blog_identifiers: list[str] = Field(
76
+ [],
77
+ description="The identifiers of the blogs which post data will be downloaded from. These must be blogs associated with the same account as the configured Tumblr secret tokens.",
78
+ )
79
+ data_directory: Path = Field(Path("data"), description="Where to store downloaded post data.")
80
+ examples_file: Path = Field(Path("examples.jsonl"), description="Where to output the examples that will be used to fine-tune the model.")
81
+ job_id: str = Field("", description="The fine-tuning job ID that will be polled on next run.")
82
+ expected_epochs: PositiveInt = Field(3, description="The expected number of epochs fine-tuning will be run for. This will be updated during fine-tuning.")
83
+ token_price: PositiveFloat = Field(1.50, description="The expected price in USD per million tokens during fine-tuning for the current model.")
97
84
 
98
85
  base_model: ChatModel = Field("gpt-4.1-nano-2025-04-14", description="The name of the model that will be fine-tuned by the generated training data.")
99
86
  developer_message: str = Field("You are a Tumblr post bot. Please generate a Tumblr post in accordance with the user's request.", description="The developer message used by the OpenAI API to generate drafts.")
100
87
  user_input: str = Field("Please write a comical Tumblr post.", description="The user input used by the OpenAI API to generate drafts.")
101
88
 
102
- generation: Generation = Generation() # pyright: ignore[reportCallIssue]
103
- training: Training = Training() # pyright: ignore[reportCallIssue]
89
+ @override
90
+ def model_post_init(self, context: object) -> None:
91
+ super().model_post_init(context)
104
92
 
93
+ if not self.download_blog_identifiers:
94
+ rich.print("Enter the [cyan]identifiers of your blogs[/] that data should be [bold purple]downloaded[/] from, separated by commas.")
95
+ self.download_blog_identifiers = list(map(str.strip, Prompt.ask("[bold]Example: staff.tumblr.com,changes").split(",")))
105
96
 
106
- class Tokens(AutoGenerateTomlSettings):
97
+ if not self.upload_blog_identifier:
98
+ rich.print("Enter the [cyan]identifier of your blog[/] that drafts should be [bold purple]uploaded[/] to.")
99
+ self.upload_blog_identifier = Prompt.ask("[bold]Examples: staff.tumblr.com or changes").strip()
100
+
101
+
102
+ class Tokens(TOMLSettings):
107
103
  model_config = SettingsConfigDict(toml_file="env.toml")
108
104
 
109
- class OpenAI(TomlSettings):
110
- api_key: Secret[str] = Secret("")
105
+ openai_api_key: Secret[str] = Secret("")
106
+
107
+ tumblr_client_id: Secret[str] = Secret("")
108
+ tumblr_client_secret: Secret[str] = Secret("")
109
+ tumblr_token: Secret[Any] = Secret({})
110
+
111
+ @staticmethod
112
+ def online_token_prompt(url: str, *tokens: str) -> Generator[Secret[str]]:
113
+ formatted_tokens = [f"[cyan]{token}[/]" for token in tokens]
114
+ formatted_token_string = " and ".join(formatted_tokens)
115
+
116
+ rich.print(f"Retrieve your {formatted_token_string} from: {url}")
117
+ for token in formatted_tokens:
118
+ prompt = f"Enter your {token} [yellow](hidden)"
119
+ yield Secret(Prompt.ask(prompt, password=True).strip())
120
+
121
+ rich.print()
122
+
123
+ @override
124
+ def model_post_init(self, context: object) -> None:
125
+ super().model_post_init(context)
111
126
 
112
- class Tumblr(TomlSettings):
113
- client_id: Secret[str] = Secret("")
114
- client_secret: Secret[str] = Secret("")
115
- token: Secret[Any] = Secret({})
127
+ if not self.openai_api_key.get_secret_value():
128
+ (self.openai_api_key,) = self.online_token_prompt("https://platform.openai.com/api-keys", "API key")
116
129
 
117
- openai: OpenAI = OpenAI()
118
- tumblr: Tumblr = Tumblr()
130
+ if not (self.tumblr_client_id.get_secret_value() and self.tumblr_client_secret.get_secret_value()):
131
+ self.tumblr_client_id, self.tumblr_client_secret = self.online_token_prompt("https://tumblr.com/oauth/apps", "consumer key", "consumer secret")
tumblrbot/utils/tumblr.py CHANGED
@@ -16,12 +16,16 @@ class TumblrClient(OAuth2Session):
16
16
  tokens: Tokens
17
17
 
18
18
  def __post_init__(self) -> None:
19
- super().__init__(
20
- self.tokens.tumblr.client_id.get_secret_value(),
19
+ super().__init__( # pyright: ignore[reportUnknownMemberType]
20
+ self.tokens.tumblr_client_id.get_secret_value(),
21
21
  auto_refresh_url="https://api.tumblr.com/v2/oauth2/token",
22
- auto_refresh_kwargs=self.tokens.tumblr.model_dump(mode="json"),
22
+ auto_refresh_kwargs={
23
+ "client_id": self.tokens.tumblr_client_id.get_secret_value(),
24
+ "client_secret": self.tokens.tumblr_client_secret.get_secret_value(),
25
+ "token": self.tokens.tumblr_token.get_secret_value(),
26
+ },
23
27
  scope=["basic", "write", "offline_access"],
24
- token=self.tokens.tumblr.token.get_secret_value(),
28
+ token=self.tokens.tumblr_token.get_secret_value(),
25
29
  token_updater=self.token_saver,
26
30
  )
27
31
 
@@ -30,26 +34,25 @@ class TumblrClient(OAuth2Session):
30
34
  def __enter__(self) -> Self:
31
35
  super().__enter__()
32
36
 
33
- if not self.tokens.tumblr.token.get_secret_value():
34
- authorization_url, _ = self.authorization_url("https://tumblr.com/oauth2/authorize")
37
+ if not self.tokens.tumblr_token.get_secret_value():
38
+ authorization_url, _ = self.authorization_url("https://tumblr.com/oauth2/authorize") # pyright: ignore[reportUnknownMemberType]
35
39
 
36
40
  rich.print(f"Please go to {authorization_url} and authorize access.")
37
41
  authorization_response = Prompt.ask("Enter the full callback URL")
38
42
  rich.print("\n")
39
43
 
40
44
  self.token_saver(
41
- self.fetch_token(
45
+ self.fetch_token( # pyright: ignore[reportUnknownMemberType]
42
46
  "https://api.tumblr.com/v2/oauth2/token",
43
47
  authorization_response=authorization_response,
44
- client_secret=self.tokens.tumblr.client_secret.get_secret_value(),
48
+ client_secret=self.tokens.tumblr_client_secret.get_secret_value(),
45
49
  ),
46
50
  )
47
51
 
48
52
  return self
49
53
 
50
54
  def token_saver(self, token: object) -> None:
51
- self.tokens.tumblr.token = Secret(token)
52
- self.tokens.model_post_init()
55
+ self.tokens.tumblr_token = Secret(token)
53
56
 
54
57
  def response_hook(self, response: Response, **_: object) -> None:
55
58
  try:
@@ -65,17 +68,18 @@ class TumblrClient(OAuth2Session):
65
68
  error.add_note(str(json))
66
69
  raise
67
70
 
68
- def create_draft_post(self, blog_name: str, post: Post) -> Response:
69
- return self.post(
70
- f"https://api.tumblr.com/v2/blog/{blog_name}/posts",
71
- json=post.model_dump(mode="json"),
72
- )
73
-
74
- def retrieve_published_posts(self, blog_name: str, before: int) -> Response:
71
+ def retrieve_published_posts(self, blog_identifier: str, offset: int) -> Response:
75
72
  return self.get(
76
- f"https://api.tumblr.com/v2/blog/{blog_name}/posts",
73
+ f"https://api.tumblr.com/v2/blog/{blog_identifier}/posts",
77
74
  params={
78
- "before": before,
75
+ "offset": offset,
76
+ "sort": "asc",
79
77
  "npf": True,
80
78
  },
81
79
  )
80
+
81
+ def create_post(self, blog_identifier: str, post: Post) -> Response:
82
+ return self.post(
83
+ f"https://api.tumblr.com/v2/blog/{blog_identifier}/posts",
84
+ json=post.model_dump(mode="json"),
85
+ )
@@ -0,0 +1,15 @@
1
+ Metadata-Version: 2.4
2
+ Name: tumblrbot
3
+ Version: 1.1.1
4
+ Summary: An updated bot that posts to Tumblr, based on your very own blog!
5
+ Requires-Python: >= 3.13
6
+ Requires-Dist: more-itertools
7
+ Requires-Dist: openai
8
+ Requires-Dist: pydantic
9
+ Requires-Dist: pydantic-settings
10
+ Requires-Dist: requests
11
+ Requires-Dist: requests-oauthlib
12
+ Requires-Dist: rich
13
+ Requires-Dist: tiktoken
14
+ Requires-Dist: tomlkit
15
+ Project-URL: Source, https://github.com/MaidThatPrograms/tumblrbot
@@ -0,0 +1,16 @@
1
+ tumblrbot/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ tumblrbot/__main__.py,sha256=RSvzROxs8hi_0sOyKsnZtV2-S3T-lPeJuDwULGN1-2U,1509
3
+ tumblrbot/flow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ tumblrbot/flow/download.py,sha256=FsiYJSyinFZi5wHvRL6WQCURmTDzrJ4XgThJkaRFPxw,1911
5
+ tumblrbot/flow/examples.py,sha256=vCJ6KH-kqlmi7zW-jk6fQqSGDkYnXHplTmTmIUC-Xj0,4168
6
+ tumblrbot/flow/fine_tune.py,sha256=AWBxlHfQDDIGku1oZaaMNQtNw3yOaScq6QMgI8sJhd0,3836
7
+ tumblrbot/flow/generate.py,sha256=6b6-Hzqek0AO6i7ceX-mapJfOKLYx6FOqEvHItjg5kU,2088
8
+ tumblrbot/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
+ tumblrbot/utils/common.py,sha256=3PPyutv7yyjvij5anyKQRDRn7lrr8Eu05m8XVhh74Hc,1342
10
+ tumblrbot/utils/models.py,sha256=to4k0b1O0bYNXe_4zx5Vupxtb-739U3nMSGgJLJ3gto,1975
11
+ tumblrbot/utils/settings.py,sha256=yC2srfEw8Kl0z-jDOkc9qWfRkutTzZFfg3uRzfjSGAQ,6507
12
+ tumblrbot/utils/tumblr.py,sha256=tFrGwHY3FsX2kWNiLteettXpnWgqNpzPCQImtEpND2M,3283
13
+ tumblrbot-1.1.1.dist-info/entry_points.txt,sha256=lTiN7PxAbyGY1fpCWApEw6NUIUgobfcOKhvn6cu3IQA,53
14
+ tumblrbot-1.1.1.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82
15
+ tumblrbot-1.1.1.dist-info/METADATA,sha256=CitrYF_L0yiPI_q4tmUPq_ascm2tDG1Uc_YXT8RnC3Q,453
16
+ tumblrbot-1.1.1.dist-info/RECORD,,
@@ -1,369 +0,0 @@
1
- Metadata-Version: 2.4
2
- Name: tumblrbot
3
- Version: 1.0.0
4
- Summary: An updated bot that posts to Tumblr, based on your very own blog!
5
- Requires-Python: >= 3.13
6
- Description-Content-Type: text/markdown
7
- License-Expression: Unlicense
8
- License-File: UNLICENSE
9
- Requires-Dist: more-itertools
10
- Requires-Dist: openai
11
- Requires-Dist: pydantic
12
- Requires-Dist: pydantic-settings
13
- Requires-Dist: requests
14
- Requires-Dist: requests-oauthlib
15
- Requires-Dist: rich
16
- Requires-Dist: tiktoken
17
- Requires-Dist: tomlkit
18
- Project-URL: Issues, https://github.com/MaidThatPrograms/tumblrbot/issues
19
- Project-URL: Repository, https://github.com/MaidThatPrograms/tumblrbot
20
-
21
- [OpenAI]: https://pypi.org/project/openai
22
- [Rich]: https://pypi.org/project/rich
23
-
24
- [gpt-4.1-nano-2025-04-14]: https://platform.openai.com/docs/models/gpt-4.1-nano
25
- [Moderation API]: https://platform.openai.com/docs/api-reference/moderations
26
- [New Post Format]: https://tumblr.com/docs/npf
27
- [OAuth 2.0]: https://www.tumblr.com/docs/en/api/v2#oauth2-authorization
28
- [pip]: https://pypi.org
29
-
30
- [Download]: tumblrbot/flow/download.py
31
- [Examples]: tumblrbot/flow/examples.py
32
- [Fine-Tune]: tumblrbot/flow/fine_tune.py
33
- [Generate]: tumblrbot/flow/generate.py
34
- [Utils]: tumblrbot/utils/common.py
35
- [Models]: tumblrbot/utils/models.py
36
- [Settings]: tumblrbot/utils/settings.py
37
- [Tumblr]: tumblrbot/utils/tumblr.py
38
- [Main]: __main__.py
39
- [README.md]: README.md
40
-
41
- # tumblrbot
42
- Description of original project:
43
- > 4tv-tumblrbot was a collaborative project I embarked on with my close friend Dima, who goes by @smoqueen on Tumblr. The aim of this endeavor was straightforward yet silly: to develop a Tumblr bot powered by a machine-learning model. This bot would be specifically trained on the content from a particular Tumblr blog or a selected set of blogs, allowing it to mimic the style, tone, and thematic essence of the original posts.
44
-
45
- This fork is largely a rewrite of the source code with similarities in its structure and process:
46
- - Updates:
47
- - Updated to [OAuth 2.0].
48
- - Updated to the [New Post Format].
49
- - Updated to the latest version of [OpenAI].
50
- - Updated the [base model version][Settings] to [gpt-4.1-nano-2025-04-14].
51
- - Removed features:
52
- - [Generation][Generate]:
53
- - Removed clearing drafts behavior.
54
- - [Training][Examples]:
55
- - Removed exports that had HTML or reblogs.
56
- - Removed special word-replacement behavior.
57
- - Removed filtering by year.
58
- - Removed setup and related files.
59
- - Changed/Added features:
60
- - [Generation][Generate]:
61
- - Added a link to the blog's draft page.
62
- - Added error checking for uploading drafts.
63
- - [Training][Examples]:
64
- - Added the option to [Download] the latest posts from the [specified blogs][Settings].
65
- - Added the option to remove posts flagged by the [Moderation API].
66
- - Added the option to automatically [Fine-Tune] the examples on the [specified base model][Settings].
67
- - Changed to now escape examples automatically.
68
- - Set encoding for reading post data to `UTF-8` to fix decoding errors.
69
- - Added newlines between paragraphs.
70
- - Removed "ALT", submission, ask, and poll text from posts.
71
- - Improved the estimated token counts and costs.
72
- - Changed to [Rich] for output.
73
- - Added progress bars.
74
- - Added post previews.
75
- - Added color, formatting, and more information to output.
76
- - Created a [guided utility][Main] for every step of building your bot blog.
77
- - Maid scripts wait for user input before the console closes.
78
- - Added comand-line options to override [Settings] options.
79
- - Added behavior to regenerate the default [config.toml][Settings] and [env.toml][Settings] if missing.
80
- - Renamed several files.
81
- - Renamed several [Settings] options.
82
- - Changed the value of several [Settings] options.
83
- - Added full type-checking coverage (fully importable from third-party scripts).
84
-
85
- To-Do:
86
- - Add documentation.
87
- - Finish updating [README.md].
88
- - Look into places more-itertools can help.
89
- - Make this an installable [pip] package.
90
- - Add in-program configuration for non-defaulted [Settings].
91
-
92
-
93
- **Please submit an issue or contact us for features you want to added/reimplemented.**
94
-
95
- ## Preparation with TumblThree
96
-
97
- **Download and Install TumblThree:**
98
-
99
- - Visit the official [TumblThree GitHub page](https://github.com/TumblThreeApp/TumblThree) and download the latest version of the application.
100
- - Extract the downloaded ZIP file and run the `TumblThree.exe` file to launch the application.
101
-
102
- **Add Tumblr Blogs:**
103
-
104
- - Copy the URLs of the Tumblr blogs you want to download.
105
- - In TumblThree, enter the blogs into the field marked **Enter URL**, then hit enter.
106
- - The blogs will be added to the list of blogs in the main interface.
107
-
108
- **Configure Download Settings:**
109
-
110
- - Click on a blog in the list to view its settings on the right panel.
111
- - Choose which content types you want to download, including text posts, answers, and more.
112
-
113
- **Start Downloading:**
114
-
115
- - Click the **Download** button (represented by a download icon) to begin downloading the selected blogs.
116
- - The application will download all available posts from the blogs based on your configuration.
117
-
118
-
119
-
120
- ## Setup
121
-
122
- #### 1. Move Text Post Files to the Data Folder
123
-
124
- - Organize your text post files by moving them to the `./data` directory within the project. Ensure that all your post files are stored here for easy access by the software.
125
-
126
- #### 2. Rename Text Post Files
127
-
128
- - Rename each text post file following the format:
129
-
130
- ```
131
- blogname_typeofpost.txt
132
- ```
133
-
134
- For example:
135
-
136
- - If the blog name is "myblog" and the post is a "texts", rename it to `myblog_texts.txt`.
137
-
138
- #### 3. Install Python 3.11
139
-
140
- - **For Windows:**
141
-
142
- 1. Download Python 3.11 from the [official Python website](https://www.python.org/downloads/windows/).
143
- 2. Run the installer and select the checkbox to "Add Python to PATH" during installation.
144
- 3. Complete the installation process by following the prompts.
145
-
146
- - **For Linux:**
147
-
148
- 1. Open the terminal.
149
-
150
- 2. Run the following commands:
151
-
152
- ```bash
153
- sudo apt update
154
- sudo apt install python3.11
155
- ```
156
-
157
- #### 4. Install Pip
158
-
159
- - **For Windows:**
160
-
161
- 1. Open Command Prompt.
162
-
163
- 2. Run the following command to install pip:
164
-
165
- ```bash
166
- python -m ensurepip --upgrade
167
- ```
168
-
169
- - **For Linux:**
170
-
171
- 1. Open the terminal.
172
-
173
- 2. Run the following command:
174
-
175
- ```bash
176
- sudo apt install python3-pip
177
- ```
178
-
179
- #### 5. Install Virtualenv
180
-
181
- - **For Windows:**
182
-
183
- 1. Open Command Prompt.
184
-
185
- 2. Run the following command to install `virtualenv`:
186
-
187
- ```bash
188
- pip install virtualenv
189
- ```
190
-
191
- - **For Linux:**
192
-
193
- 1. Open the terminal.
194
-
195
- 2. Run the following command:
196
-
197
- ```bash
198
- sudo pip install virtualenv
199
- ```
200
-
201
- #### 6. Run the Setup Script
202
-
203
- - **For Windows:**
204
-
205
- 1. Navigate to the project directory in Command Prompt.
206
-
207
- 2. Run the following command:
208
-
209
- ```bash
210
- setup.bat
211
- ```
212
-
213
- - **For Linux:**
214
-
215
- 1. Open the terminal and navigate to the project directory.
216
-
217
- 2. Run the following command:
218
-
219
- ```bash
220
- ./setup.sh
221
- ```
222
-
223
- #### 7. Run Python Programs within the Virtual Environment
224
-
225
- - **For Windows:**
226
-
227
- 1. Activate the virtual environment by running:
228
-
229
- ```bash
230
- .\venv\Scripts\activate
231
- ```
232
-
233
- 2. Once activated, run any of the Python programs with:
234
-
235
- ```bash
236
- python {program}.py
237
- ```
238
-
239
- - **For Linux:**
240
-
241
- 1. Activate the virtual environment by running:
242
-
243
- ```bash
244
- source venv/bin/activate
245
- ```
246
-
247
- 2. Once activated, run any of the Python programs with:
248
-
249
- ```bash
250
- python3 {program}.py
251
- ```
252
-
253
-
254
-
255
- ## Testing
256
-
257
- 1. **Configure the Settings**:
258
- - Open the config file and fill in the required options.
259
- - Leave the `model` field blank for now, as it will be completed later after model training is complete.
260
-
261
- 2. **Prepare Training Data**:
262
- - Place your correctly formatted post files into the `./data/` directory.
263
- - Run the script `create_training_data.py` to generate the necessary training data.
264
-
265
- 3. **Upload Training Data**:
266
- - Once the training files are generated, locate them in the `./output/` folder.
267
- - Upload these files to OpenAI's fine-tuning web portal.
268
- - **Ensure you select the most recent version of the `gpt-4o-mini` model** for the fine-tuning process.
269
-
270
- 4. **Update the Config File**:
271
- - After the fine-tuning process is complete, copy the model identifier from OpenAI’s web portal.
272
- - Paste the model identifier into the `model` field of your config file.
273
-
274
- 5. **Test the Model**:
275
- - Run the script `4tv_tumblrbot.py` to generate posts. These posts will be saved in your Tumblr drafts.
276
- - Review the output in your drafts. If needed, repeat the process with different blogs or make adjustments to the training data to improve the model.
277
-
278
-
279
-
280
- ## Deployment
281
-
282
- ### Linux Instructions
283
-
284
- 1. **Place Your Python Script and Virtual Environment**
285
-
286
- - Ensure `4tv_tumblrbot.py` and the virtual environment (`.venv/`) are located in `/home/user/4tv-tumblrbot-1.0/`.
287
-
288
- 2. **Create a Shell Script to Run the Python Script**
289
-
290
- - Create a shell script named `run_4tv_tumblrbot.sh` in the same directory:
291
-
292
- ```bash
293
- #!/bin/bash
294
- cd /home/user/4tv-tumblrbot-1.0/
295
- source .venv/bin/activate
296
- python 4tv_tumblrbot.py
297
- deactivate
298
- ```
299
-
300
- 3. **Make the Shell Script Executable**
301
-
302
- - Run the following command to make the shell script executable:
303
-
304
- ```bash
305
- chmod +x /home/user/4tv-tumblrbot-1.0/run_4tv_tumblrbot.sh
306
- ```
307
-
308
- 4. **Schedule the Script Using Cron**
309
-
310
- - Open the cron table:
311
-
312
- ```bash
313
- crontab -e
314
- ```
315
-
316
- - Add the following line to schedule the script to run daily at 2 AM:
317
-
318
- ```bash
319
- 0 2 * * * /home/user/4tv-tumblrbot-1.0/run_4tv_tumblrbot.sh >> /home/user/4tv-tumblrbot-1.0/output.log 2>&1
320
- ```
321
-
322
- - This will log the output to `output.log` in the script's directory.
323
-
324
- 5. **Verify the Cron Job**
325
-
326
- - List your cron jobs to ensure it was added correctly:
327
-
328
- ```bash
329
- crontab -l
330
- ```
331
-
332
- ### Windows Instructions
333
-
334
- 1. **Place Your Python Script and Virtual Environment**
335
-
336
- - Ensure `4tv_tumblrbot.py` and the virtual environment (`.venv/`) are located in `C:\Users\user\4tv-tumblrbot-1.0\`.
337
-
338
- 2. **Create a Batch File to Run the Python Script**
339
-
340
- - Create a batch file named `run_4tv_tumblrbot.bat` in the same directory:
341
-
342
- ```batch
343
- @echo off
344
- cd C:\Users\user\4tv-tumblrbot-1.0\
345
- call .venv\Scripts\activate
346
- python 4tv_tumblrbot.py
347
- call .venv\Scripts\deactivate
348
- ```
349
-
350
- 3. **Schedule the Script Using Task Scheduler**
351
-
352
- - Open **Task Scheduler** by searching for it in the Start Menu.
353
- - Click **Create Basic Task** and name your task (e.g., “Run 4tv Tumblrbot Daily”).
354
- - Set the trigger to "Daily" and select the time (e.g., 2:00 AM).
355
- - Choose "Start a program" as the action.
356
- - In the **Program/script** field, enter the path to the batch file (`C:\Users\user\4tv-tumblrbot-1.0\run_4tv_tumblrbot.bat`).
357
- - Click **Finish** to create the task.
358
-
359
- 4. **Verify the Task**
360
-
361
- - Check the **Task Scheduler Library** to confirm your task is listed.
362
- - Optionally, run the task manually to ensure it works by right-clicking the task and selecting **Run**.
363
-
364
- ### Additional Notes:
365
-
366
- - Make sure Python and the virtual environment are properly set up on both systems.
367
- - Adjust paths and timings according to your preferences and system configurations.
368
- - Ensure the script and virtual environment have appropriate permissions and configurations to run correctly.
369
-
@@ -1,17 +0,0 @@
1
- tumblrbot/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
- tumblrbot/__main__.py,sha256=elqi-tHjPr6B5u1PB1n1OqsbZQr_oKwF1D9XVi7XxTs,2519
3
- tumblrbot/flow/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
- tumblrbot/flow/download.py,sha256=L16D2Jk9Yvm5Xc_AMMSscvZCUJdYQHdcr1w-bRz1cQA,2087
5
- tumblrbot/flow/examples.py,sha256=LxDyByKWJOczjDBmk6lj5-0sCwqGZrd3lPHthMZQgGQ,4196
6
- tumblrbot/flow/fine_tune.py,sha256=vk2sbqpp-F_j2757R6jnQSUIk5iKAO6FACMY2D8tCy8,4058
7
- tumblrbot/flow/generate.py,sha256=2CXUJN9_fdfAhdq1ckZDrAJLb13XFI-MgL0YtD8xlvk,2081
8
- tumblrbot/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
9
- tumblrbot/utils/common.py,sha256=3PPyutv7yyjvij5anyKQRDRn7lrr8Eu05m8XVhh74Hc,1342
10
- tumblrbot/utils/models.py,sha256=rUi6bCjow0lhUr73vG5gMimz_hNOTrpvvbXjebFIHzg,1976
11
- tumblrbot/utils/settings.py,sha256=oTTR72WW73pZ2OEJl8JyRlKXtV4VKxVevcMeLx6YHJU,5734
12
- tumblrbot/utils/tumblr.py,sha256=AbRzQrOV2owahIWn2ekP2E0xjZpl2tFhI_dfP8n5L24,2929
13
- tumblrbot-1.0.0.dist-info/entry_points.txt,sha256=lTiN7PxAbyGY1fpCWApEw6NUIUgobfcOKhvn6cu3IQA,53
14
- tumblrbot-1.0.0.dist-info/licenses/UNLICENSE,sha256=8Bl77UGlO95Tuu1FjTzqAPr-UU_A11XBQdPct1-E3qE,1236
15
- tumblrbot-1.0.0.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82
16
- tumblrbot-1.0.0.dist-info/METADATA,sha256=H0wykWYLA-P2Fv374I8LRISxRppgqdWpUojHOiweKWA,11413
17
- tumblrbot-1.0.0.dist-info/RECORD,,
@@ -1,24 +0,0 @@
1
- This is free and unencumbered software released into the public domain.
2
-
3
- Anyone is free to copy, modify, publish, use, compile, sell, or
4
- distribute this software, either in source code form or as a compiled
5
- binary, for any purpose, commercial or non-commercial, and by any
6
- means.
7
-
8
- In jurisdictions that recognize copyright laws, the author or authors
9
- of this software dedicate any and all copyright interest in the
10
- software to the public domain. We make this dedication for the benefit
11
- of the public at large and to the detriment of our heirs and
12
- successors. We intend this dedication to be an overt act of
13
- relinquishment in perpetuity of all present and future rights to this
14
- software under copyright law.
15
-
16
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17
- EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18
- MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19
- IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20
- OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21
- ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22
- OTHER DEALINGS IN THE SOFTWARE.
23
-
24
- For more information, please refer to <https://unlicense.org/>