tumblrbot 1.4.1__tar.gz → 1.4.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: tumblrbot
3
- Version: 1.4.1
3
+ Version: 1.4.2
4
4
  Summary: An updated bot that posts to Tumblr, based on your very own blog!
5
5
  Requires-Python: >= 3.13
6
6
  Description-Content-Type: text/markdown
@@ -11,6 +11,7 @@ Requires-Dist: niquests[speedups, http3]
11
11
  Requires-Dist: openai
12
12
  Requires-Dist: pydantic
13
13
  Requires-Dist: pydantic-settings
14
+ Requires-Dist: requests
14
15
  Requires-Dist: requests-cache
15
16
  Requires-Dist: requests-oauthlib
16
17
  Requires-Dist: rich
@@ -86,7 +87,8 @@ Features:
86
87
  - Linux (pacman): `pacman install python-pip`
87
88
  1. Install the [pip] package: `pip install tumblrbot`
88
89
  - Alternatively, you can install from this repository: `pip install git+https://github.com/MaidThatPrograms/tumblrbot.git`
89
- - On Linux, you will have to make a virtual environment.
90
+ - On Linux, you will have to make a virtual environment or use the flag to install packages system-wide.
91
+ - See [keyring] for additional requirements if you are not on Windows.
90
92
 
91
93
  ## Usage
92
94
  Run `tumblrbot` from anywhere. Run `tumblrbot --help` for command-line options. Every command-line option corresponds to a value from the [config](#configuration).
@@ -66,7 +66,8 @@ Features:
66
66
  - Linux (pacman): `pacman install python-pip`
67
67
  1. Install the [pip] package: `pip install tumblrbot`
68
68
  - Alternatively, you can install from this repository: `pip install git+https://github.com/MaidThatPrograms/tumblrbot.git`
69
- - On Linux, you will have to make a virtual environment.
69
+ - On Linux, you will have to make a virtual environment or use the flag to install packages system-wide.
70
+ - See [keyring] for additional requirements if you are not on Windows.
70
71
 
71
72
  ## Usage
72
73
  Run `tumblrbot` from anywhere. Run `tumblrbot --help` for command-line options. Every command-line option corresponds to a value from the [config](#configuration).
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "tumblrbot"
3
- version = "1.4.1"
3
+ version = "1.4.2"
4
4
  description = "An updated bot that posts to Tumblr, based on your very own blog!"
5
5
  readme = "README.md"
6
6
  requires-python = ">= 3.13"
@@ -12,6 +12,7 @@ dependencies = [
12
12
  "openai",
13
13
  "pydantic",
14
14
  "pydantic-settings",
15
+ "requests",
15
16
  "requests-cache",
16
17
  "requests-oauthlib",
17
18
  "rich",
@@ -6,8 +6,9 @@ from tumblrbot.flow.download import PostDownloader
6
6
  from tumblrbot.flow.examples import ExamplesWriter
7
7
  from tumblrbot.flow.fine_tune import FineTuner
8
8
  from tumblrbot.flow.generate import DraftGenerator
9
+ from tumblrbot.utils.common import FlowClass
9
10
  from tumblrbot.utils.models import Tokens
10
- from tumblrbot.utils.tumblr import TumblrClient
11
+ from tumblrbot.utils.tumblr import TumblrSession
11
12
 
12
13
 
13
14
  def main() -> None:
@@ -16,7 +17,7 @@ def main() -> None:
16
17
  tokens = Tokens.read_from_keyring()
17
18
  with (
18
19
  OpenAI(api_key=tokens.openai_api_key.get_secret_value(), http_client=DefaultHttpxClient(http2=True)) as openai,
19
- TumblrClient(tokens=tokens) as tumblr,
20
+ TumblrSession(tokens=tokens) as tumblr,
20
21
  ):
21
22
  post_downloader = PostDownloader(openai, tumblr)
22
23
  if Confirm.ask("Download latest posts?", default=False):
@@ -30,7 +31,9 @@ def main() -> None:
30
31
 
31
32
  fine_tuner = FineTuner(openai, tumblr, estimated_tokens)
32
33
  fine_tuner.print_estimates()
33
- if Confirm.ask("Upload data to OpenAI for fine-tuning? [bold]You must do this to set the model to generate drafts from. Alternatively, manually enter a model into the config.", default=False):
34
+
35
+ message = "Resume monitoring the previous fine-tuning process?" if FlowClass.config.job_id else "Upload data to OpenAI for fine-tuning?"
36
+ if Confirm.ask(f"{message} [bold]You must do this to set the model to generate drafts from. Alternatively, manually enter a model into the config", default=False):
34
37
  fine_tuner.fine_tune()
35
38
 
36
39
  if Confirm.ask("Generate drafts?", default=False):
@@ -1,10 +1,12 @@
1
1
  from dataclasses import dataclass
2
2
  from datetime import datetime
3
3
  from textwrap import dedent
4
- from time import sleep
4
+ from time import sleep, time
5
5
 
6
6
  import rich
7
7
  from openai.types.fine_tuning import FineTuningJob
8
+ from rich import progress
9
+ from rich.prompt import Confirm
8
10
 
9
11
  from tumblrbot.utils.common import FlowClass, PreviewLive
10
12
 
@@ -18,26 +20,27 @@ class FineTuner(FlowClass):
18
20
  rich.print(dedent(text).lstrip())
19
21
 
20
22
  def fine_tune(self) -> None:
21
- with PreviewLive() as live:
22
- job = self.create_job(live)
23
+ job = self.create_job()
23
24
 
24
- self.dedent_print(f"""
25
- [bold]Fine-tuning is starting...[/]
26
- View it online at: https://platform.openai.com/finetune/{job.id}
27
- Created at: {datetime.fromtimestamp(job.created_at)}
28
- Base Model: {job.model}
25
+ self.dedent_print(f"""
26
+ [bold]Fine-tuning is starting...[/]
27
+ View it online at: https://platform.openai.com/finetune/{job.id}
28
+ Created at: {datetime.fromtimestamp(job.created_at)}
29
+ Base Model: {job.model}
29
30
 
30
- [italic dim]Closing this terminal will not stop the fine-tuning. This will take a while...
31
- """) # noqa: DTZ006
31
+ [italic dim]Closing this terminal will not stop the fine-tuning. This will take a while...\
32
+ """) # noqa: DTZ006
32
33
 
34
+ with PreviewLive() as live:
33
35
  task_id = live.progress.add_task("", total=None)
34
36
 
35
- while job.status not in {"succeeded", "failed", "cancelled"}:
37
+ while job.status in {"validating_files", "queued", "running"}:
36
38
  job = self.poll_job_status()
37
39
 
38
40
  live.progress.update(
39
41
  task_id,
40
- total=job.estimated_finish,
42
+ total=job.estimated_finish - job.created_at if job.estimated_finish else None,
43
+ completed=time() - job.created_at,
41
44
  description=f"Fine-tuning: [italic]{job.status.replace('_', ' ').title()}[/]...",
42
45
  )
43
46
 
@@ -45,15 +48,16 @@ class FineTuner(FlowClass):
45
48
 
46
49
  self.process_completed_job(job)
47
50
 
48
- def create_job(self, live: PreviewLive) -> FineTuningJob:
51
+ def create_job(self) -> FineTuningJob:
49
52
  if self.config.job_id:
50
53
  return self.poll_job_status()
51
54
 
52
- with live.progress.open(self.config.examples_file, "rb", description=f"Uploading {self.config.examples_file}...") as fp:
55
+ with progress.open(self.config.examples_file, "rb", description=f"Uploading [purple]{self.config.examples_file}[/]...") as fp:
53
56
  file = self.openai.files.create(
54
57
  file=fp,
55
58
  purpose="fine-tune",
56
59
  )
60
+ rich.print()
57
61
 
58
62
  job = self.openai.fine_tuning.jobs.create(
59
63
  model=self.config.base_model,
@@ -86,8 +90,13 @@ class FineTuner(FlowClass):
86
90
 
87
91
  self.config.job_id = ""
88
92
 
89
- if job.status == "failed" and job.error is not None:
90
- raise RuntimeError(job.error.message)
93
+ if job.status != "succeeded":
94
+ if Confirm.ask("[gray62]Delete uploaded examples file?", default=False):
95
+ self.openai.files.delete(job.training_file)
96
+ rich.print()
97
+
98
+ if job.status == "failed" and job.error is not None:
99
+ raise RuntimeError(job.error.message)
91
100
 
92
101
  if job.fine_tuned_model:
93
102
  self.config.fine_tuned_model = job.fine_tuned_model or ""
@@ -10,7 +10,7 @@ from rich.progress import MofNCompleteColumn, Progress, SpinnerColumn, TimeElaps
10
10
  from rich.table import Table
11
11
 
12
12
  from tumblrbot.utils.config import Config
13
- from tumblrbot.utils.tumblr import TumblrClient
13
+ from tumblrbot.utils.tumblr import TumblrSession
14
14
 
15
15
 
16
16
  @dataclass
@@ -18,7 +18,7 @@ class FlowClass:
18
18
  config: ClassVar = Config() # pyright: ignore[reportCallIssue]
19
19
 
20
20
  openai: OpenAI
21
- tumblr: TumblrClient
21
+ tumblr: TumblrSession
22
22
 
23
23
 
24
24
  class PreviewLive(Live):
@@ -1,7 +1,8 @@
1
1
  from dataclasses import dataclass
2
2
  from typing import Self
3
3
 
4
- from niquests import HTTPError, PreparedRequest, Response, Session
4
+ from niquests import HTTPError, Session
5
+ from requests import Response
5
6
  from requests_cache import CacheMixin
6
7
  from requests_oauthlib import OAuth1
7
8
 
@@ -9,12 +10,12 @@ from tumblrbot.utils.models import Post, Tokens
9
10
 
10
11
 
11
12
  @dataclass
12
- class TumblrClient(Session, CacheMixin): # pyright: ignore[reportIncompatibleMethodOverride, reportIncompatibleVariableOverride]
13
+ class TumblrSession(CacheMixin, Session): # pyright: ignore[reportIncompatibleMethodOverride, reportIncompatibleVariableOverride]
13
14
  tokens: Tokens
14
15
 
15
16
  def __post_init__(self) -> None:
16
- super().__init__(happy_eyeballs=True)
17
17
  CacheMixin.__init__(self, use_cache_dir=True)
18
+ Session.__init__(self, happy_eyeballs=True)
18
19
 
19
20
  self.auth = OAuth1(**self.tokens.tumblr.model_dump(mode="json"))
20
21
  self.hooks["response"].append(self.response_hook)
@@ -23,22 +24,21 @@ class TumblrClient(Session, CacheMixin): # pyright: ignore[reportIncompatibleMe
23
24
  super().__enter__()
24
25
  return self
25
26
 
26
- def response_hook(self, response: PreparedRequest | Response) -> None:
27
- if isinstance(response, Response):
28
- try:
29
- response.raise_for_status()
30
- except HTTPError as error:
31
- if response.text:
32
- error.add_note(response.text)
33
- raise
27
+ def response_hook(self, response: Response, **_: object) -> None:
28
+ try:
29
+ response.raise_for_status()
30
+ except HTTPError as error:
31
+ if response.text:
32
+ error.add_note(response.text)
33
+ raise
34
34
 
35
35
  def retrieve_published_posts(self, blog_identifier: str, after: int) -> Response:
36
36
  return self.get(
37
37
  f"https://api.tumblr.com/v2/blog/{blog_identifier}/posts",
38
38
  params={
39
- "after": str(after),
39
+ "after": after,
40
40
  "sort": "asc",
41
- "npf": str(True),
41
+ "npf": True,
42
42
  },
43
43
  )
44
44
 
File without changes
File without changes