qBitrr2 5.5.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- qBitrr/__init__.py +14 -0
- qBitrr/arss.py +7100 -0
- qBitrr/auto_update.py +382 -0
- qBitrr/bundled_data.py +7 -0
- qBitrr/config.py +192 -0
- qBitrr/config_version.py +144 -0
- qBitrr/db_lock.py +400 -0
- qBitrr/db_recovery.py +202 -0
- qBitrr/env_config.py +73 -0
- qBitrr/errors.py +41 -0
- qBitrr/ffprobe.py +105 -0
- qBitrr/gen_config.py +1331 -0
- qBitrr/home_path.py +23 -0
- qBitrr/logger.py +235 -0
- qBitrr/main.py +790 -0
- qBitrr/search_activity_store.py +92 -0
- qBitrr/static/assets/ArrView.js +2 -0
- qBitrr/static/assets/ArrView.js.map +1 -0
- qBitrr/static/assets/ConfigView.js +4 -0
- qBitrr/static/assets/ConfigView.js.map +1 -0
- qBitrr/static/assets/LogsView.js +2 -0
- qBitrr/static/assets/LogsView.js.map +1 -0
- qBitrr/static/assets/ProcessesView.js +2 -0
- qBitrr/static/assets/ProcessesView.js.map +1 -0
- qBitrr/static/assets/app.css +1 -0
- qBitrr/static/assets/app.js +11 -0
- qBitrr/static/assets/app.js.map +1 -0
- qBitrr/static/assets/build.svg +3 -0
- qBitrr/static/assets/check-mark.svg +5 -0
- qBitrr/static/assets/close.svg +4 -0
- qBitrr/static/assets/download.svg +5 -0
- qBitrr/static/assets/gear.svg +5 -0
- qBitrr/static/assets/live-streaming.svg +8 -0
- qBitrr/static/assets/log.svg +3 -0
- qBitrr/static/assets/logo.svg +48 -0
- qBitrr/static/assets/plus.svg +4 -0
- qBitrr/static/assets/process.svg +15 -0
- qBitrr/static/assets/react-select.esm.js +7 -0
- qBitrr/static/assets/react-select.esm.js.map +1 -0
- qBitrr/static/assets/refresh-arrow.svg +3 -0
- qBitrr/static/assets/table.js +5 -0
- qBitrr/static/assets/table.js.map +1 -0
- qBitrr/static/assets/trash.svg +8 -0
- qBitrr/static/assets/up-arrow.svg +3 -0
- qBitrr/static/assets/useInterval.js +2 -0
- qBitrr/static/assets/useInterval.js.map +1 -0
- qBitrr/static/assets/vendor.js +2 -0
- qBitrr/static/assets/vendor.js.map +1 -0
- qBitrr/static/assets/visibility.svg +9 -0
- qBitrr/static/index.html +33 -0
- qBitrr/static/logov2-clean.svg +48 -0
- qBitrr/static/manifest.json +23 -0
- qBitrr/static/sw.js +87 -0
- qBitrr/static/vite.svg +1 -0
- qBitrr/tables.py +143 -0
- qBitrr/utils.py +274 -0
- qBitrr/versioning.py +136 -0
- qBitrr/webui.py +3114 -0
- qbitrr2-5.5.5.dist-info/METADATA +1191 -0
- qbitrr2-5.5.5.dist-info/RECORD +64 -0
- qbitrr2-5.5.5.dist-info/WHEEL +5 -0
- qbitrr2-5.5.5.dist-info/entry_points.txt +2 -0
- qbitrr2-5.5.5.dist-info/licenses/LICENSE +21 -0
- qbitrr2-5.5.5.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
{
|
|
2
|
+
"background_color": "#101114",
|
|
3
|
+
"categories": [
|
|
4
|
+
"utilities",
|
|
5
|
+
"productivity"
|
|
6
|
+
],
|
|
7
|
+
"description": "A Radarr and Sonarr companion app for managing torrents",
|
|
8
|
+
"display": "standalone",
|
|
9
|
+
"icons": [
|
|
10
|
+
{
|
|
11
|
+
"purpose": "any maskable",
|
|
12
|
+
"sizes": "any",
|
|
13
|
+
"src": "/logov2-clean.svg",
|
|
14
|
+
"type": "image/svg+xml"
|
|
15
|
+
}
|
|
16
|
+
],
|
|
17
|
+
"name": "qBitrr",
|
|
18
|
+
"orientation": "any",
|
|
19
|
+
"screenshots": [],
|
|
20
|
+
"short_name": "qBitrr",
|
|
21
|
+
"start_url": "/",
|
|
22
|
+
"theme_color": "#7aa2f7"
|
|
23
|
+
}
|
qBitrr/static/sw.js
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
// qBitrr Service Worker
|
|
2
|
+
// Update cache version on every deployment to force refresh
|
|
3
|
+
const CACHE_VERSION = Date.now();
|
|
4
|
+
const CACHE_NAME = `qbitrr-v${CACHE_VERSION}`;
|
|
5
|
+
const RUNTIME_CACHE = `qbitrr-runtime-v${CACHE_VERSION}`;
|
|
6
|
+
|
|
7
|
+
// Assets to cache on install
|
|
8
|
+
const PRECACHE_URLS = [
|
|
9
|
+
'/',
|
|
10
|
+
'/index.html',
|
|
11
|
+
];
|
|
12
|
+
|
|
13
|
+
// Install event - precache essential assets
|
|
14
|
+
self.addEventListener('install', (event) => {
|
|
15
|
+
event.waitUntil(
|
|
16
|
+
caches.open(CACHE_NAME)
|
|
17
|
+
.then((cache) => cache.addAll(PRECACHE_URLS))
|
|
18
|
+
.then(() => self.skipWaiting())
|
|
19
|
+
);
|
|
20
|
+
});
|
|
21
|
+
|
|
22
|
+
// Activate event - clean up old caches
|
|
23
|
+
self.addEventListener('activate', (event) => {
|
|
24
|
+
event.waitUntil(
|
|
25
|
+
caches.keys().then((cacheNames) => {
|
|
26
|
+
return Promise.all(
|
|
27
|
+
cacheNames
|
|
28
|
+
.filter((cacheName) => cacheName !== CACHE_NAME && cacheName !== RUNTIME_CACHE)
|
|
29
|
+
.map((cacheName) => caches.delete(cacheName))
|
|
30
|
+
);
|
|
31
|
+
}).then(() => self.clients.claim())
|
|
32
|
+
);
|
|
33
|
+
});
|
|
34
|
+
|
|
35
|
+
// Fetch event - network-first strategy for API calls, cache-first for assets
|
|
36
|
+
self.addEventListener('fetch', (event) => {
|
|
37
|
+
const { request } = event;
|
|
38
|
+
const url = new URL(request.url);
|
|
39
|
+
|
|
40
|
+
// Skip non-GET requests
|
|
41
|
+
if (request.method !== 'GET') {
|
|
42
|
+
return;
|
|
43
|
+
}
|
|
44
|
+
|
|
45
|
+
// Network-first for API calls
|
|
46
|
+
if (url.pathname.startsWith('/api/')) {
|
|
47
|
+
event.respondWith(
|
|
48
|
+
fetch(request)
|
|
49
|
+
.then((response) => {
|
|
50
|
+
// Clone the response before caching
|
|
51
|
+
const responseToCache = response.clone();
|
|
52
|
+
caches.open(RUNTIME_CACHE).then((cache) => {
|
|
53
|
+
cache.put(request, responseToCache);
|
|
54
|
+
});
|
|
55
|
+
return response;
|
|
56
|
+
})
|
|
57
|
+
.catch(() => {
|
|
58
|
+
// Return cached response if network fails
|
|
59
|
+
return caches.match(request);
|
|
60
|
+
})
|
|
61
|
+
);
|
|
62
|
+
return;
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
// Cache-first for static assets
|
|
66
|
+
event.respondWith(
|
|
67
|
+
caches.match(request).then((cachedResponse) => {
|
|
68
|
+
if (cachedResponse) {
|
|
69
|
+
return cachedResponse;
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
return fetch(request).then((response) => {
|
|
73
|
+
// Don't cache non-successful responses
|
|
74
|
+
if (!response || response.status !== 200 || response.type === 'error') {
|
|
75
|
+
return response;
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
const responseToCache = response.clone();
|
|
79
|
+
caches.open(RUNTIME_CACHE).then((cache) => {
|
|
80
|
+
cache.put(request, responseToCache);
|
|
81
|
+
});
|
|
82
|
+
|
|
83
|
+
return response;
|
|
84
|
+
});
|
|
85
|
+
})
|
|
86
|
+
);
|
|
87
|
+
});
|
qBitrr/static/vite.svg
ADDED
|
@@ -0,0 +1 @@
|
|
|
1
|
+
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" role="img" class="iconify iconify--logos" width="31.88" height="32" preserveAspectRatio="xMidYMid meet" viewBox="0 0 256 257"><defs><linearGradient id="IconifyId1813088fe1fbc01fb466" x1="-.828%" x2="57.636%" y1="7.652%" y2="78.411%"><stop offset="0%" stop-color="#41D1FF"></stop><stop offset="100%" stop-color="#BD34FE"></stop></linearGradient><linearGradient id="IconifyId1813088fe1fbc01fb467" x1="43.376%" x2="50.316%" y1="2.242%" y2="89.03%"><stop offset="0%" stop-color="#FFEA83"></stop><stop offset="8.333%" stop-color="#FFDD35"></stop><stop offset="100%" stop-color="#FFA800"></stop></linearGradient></defs><path fill="url(#IconifyId1813088fe1fbc01fb466)" d="M255.153 37.938L134.897 252.976c-2.483 4.44-8.862 4.466-11.382.048L.875 37.958c-2.746-4.814 1.371-10.646 6.827-9.67l120.385 21.517a6.537 6.537 0 0 0 2.322-.004l117.867-21.483c5.438-.991 9.574 4.796 6.877 9.62Z"></path><path fill="url(#IconifyId1813088fe1fbc01fb467)" d="M185.432.063L96.44 17.501a3.268 3.268 0 0 0-2.634 3.014l-5.474 92.456a3.268 3.268 0 0 0 3.997 3.378l24.777-5.718c2.318-.535 4.413 1.507 3.936 3.838l-7.361 36.047c-.495 2.426 1.782 4.5 4.151 3.78l15.304-4.649c2.372-.72 4.652 1.36 4.15 3.788l-11.698 56.621c-.732 3.542 3.979 5.473 5.943 2.437l1.313-2.028l72.516-144.72c1.215-2.423-.88-5.186-3.54-4.672l-25.505 4.922c-2.396.462-4.435-1.77-3.759-4.114l16.646-57.705c.677-2.35-1.37-4.583-3.769-4.113Z"></path></svg>
|
qBitrr/tables.py
ADDED
|
@@ -0,0 +1,143 @@
|
|
|
1
|
+
from peewee import BooleanField, CharField, DateTimeField, IntegerField, Model, TextField
|
|
2
|
+
|
|
3
|
+
|
|
4
|
+
class FilesQueued(Model):
|
|
5
|
+
EntryId = IntegerField(primary_key=True, null=False, unique=True)
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class MoviesFilesModel(Model):
|
|
9
|
+
Title = CharField()
|
|
10
|
+
Monitored = BooleanField()
|
|
11
|
+
TmdbId = IntegerField()
|
|
12
|
+
Year = IntegerField()
|
|
13
|
+
EntryId = IntegerField(unique=True)
|
|
14
|
+
Searched = BooleanField(default=False)
|
|
15
|
+
MovieFileId = IntegerField()
|
|
16
|
+
IsRequest = BooleanField(default=False)
|
|
17
|
+
QualityMet = BooleanField(default=False)
|
|
18
|
+
Upgrade = BooleanField(default=False)
|
|
19
|
+
CustomFormatScore = IntegerField(null=True)
|
|
20
|
+
MinCustomFormatScore = IntegerField(null=True)
|
|
21
|
+
CustomFormatMet = BooleanField(default=False)
|
|
22
|
+
Reason = TextField(null=True)
|
|
23
|
+
# Quality profile from Arr API
|
|
24
|
+
QualityProfileId = IntegerField(null=True)
|
|
25
|
+
QualityProfileName = TextField(null=True)
|
|
26
|
+
# Profile switching state tracking
|
|
27
|
+
LastProfileSwitchTime = DateTimeField(formats=["%Y-%m-%d %H:%M:%S.%f"], null=True)
|
|
28
|
+
CurrentProfileId = IntegerField(null=True)
|
|
29
|
+
OriginalProfileId = IntegerField(null=True)
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class EpisodeFilesModel(Model):
|
|
33
|
+
EntryId = IntegerField(primary_key=True)
|
|
34
|
+
SeriesTitle = TextField(null=True)
|
|
35
|
+
Title = TextField(null=True)
|
|
36
|
+
SeriesId = IntegerField(null=False)
|
|
37
|
+
EpisodeFileId = IntegerField(null=True)
|
|
38
|
+
EpisodeNumber = IntegerField(null=False)
|
|
39
|
+
SeasonNumber = IntegerField(null=False)
|
|
40
|
+
AbsoluteEpisodeNumber = IntegerField(null=True)
|
|
41
|
+
SceneAbsoluteEpisodeNumber = IntegerField(null=True)
|
|
42
|
+
AirDateUtc = DateTimeField(formats=["%Y-%m-%d %H:%M:%S.%f"], null=True)
|
|
43
|
+
Monitored = BooleanField(null=True)
|
|
44
|
+
Searched = BooleanField(default=False)
|
|
45
|
+
IsRequest = BooleanField(default=False)
|
|
46
|
+
QualityMet = BooleanField(default=False)
|
|
47
|
+
Upgrade = BooleanField(default=False)
|
|
48
|
+
CustomFormatScore = IntegerField(null=True)
|
|
49
|
+
MinCustomFormatScore = IntegerField(null=True)
|
|
50
|
+
CustomFormatMet = BooleanField(default=False)
|
|
51
|
+
Reason = TextField(null=True)
|
|
52
|
+
# Quality profile from Arr API (inherited from series)
|
|
53
|
+
QualityProfileId = IntegerField(null=True)
|
|
54
|
+
QualityProfileName = TextField(null=True)
|
|
55
|
+
# Profile switching state tracking
|
|
56
|
+
LastProfileSwitchTime = DateTimeField(formats=["%Y-%m-%d %H:%M:%S.%f"], null=True)
|
|
57
|
+
CurrentProfileId = IntegerField(null=True)
|
|
58
|
+
OriginalProfileId = IntegerField(null=True)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class SeriesFilesModel(Model):
|
|
62
|
+
EntryId = IntegerField(primary_key=True)
|
|
63
|
+
Title = TextField(null=True)
|
|
64
|
+
Monitored = BooleanField(null=True)
|
|
65
|
+
Searched = BooleanField(default=False)
|
|
66
|
+
Upgrade = BooleanField(default=False)
|
|
67
|
+
MinCustomFormatScore = IntegerField(null=True)
|
|
68
|
+
# Quality profile from Arr API
|
|
69
|
+
QualityProfileId = IntegerField(null=True)
|
|
70
|
+
QualityProfileName = TextField(null=True)
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
class MovieQueueModel(Model):
|
|
74
|
+
EntryId = IntegerField(unique=True)
|
|
75
|
+
Completed = BooleanField(default=False)
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
class EpisodeQueueModel(Model):
|
|
79
|
+
EntryId = IntegerField(unique=True)
|
|
80
|
+
Completed = BooleanField(default=False)
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
class AlbumFilesModel(Model):
|
|
84
|
+
Title = CharField()
|
|
85
|
+
Monitored = BooleanField()
|
|
86
|
+
ForeignAlbumId = CharField()
|
|
87
|
+
ReleaseDate = DateTimeField(formats=["%Y-%m-%d %H:%M:%S.%f"], null=True)
|
|
88
|
+
EntryId = IntegerField(unique=True)
|
|
89
|
+
Searched = BooleanField(default=False)
|
|
90
|
+
AlbumFileId = IntegerField()
|
|
91
|
+
IsRequest = BooleanField(default=False)
|
|
92
|
+
QualityMet = BooleanField(default=False)
|
|
93
|
+
Upgrade = BooleanField(default=False)
|
|
94
|
+
CustomFormatScore = IntegerField(null=True)
|
|
95
|
+
MinCustomFormatScore = IntegerField(null=True)
|
|
96
|
+
CustomFormatMet = BooleanField(default=False)
|
|
97
|
+
Reason = TextField(null=True)
|
|
98
|
+
ArtistId = IntegerField(null=False)
|
|
99
|
+
ArtistTitle = TextField(null=True)
|
|
100
|
+
# Quality profile from Arr API
|
|
101
|
+
QualityProfileId = IntegerField(null=True)
|
|
102
|
+
QualityProfileName = TextField(null=True)
|
|
103
|
+
# Profile switching state tracking
|
|
104
|
+
LastProfileSwitchTime = DateTimeField(formats=["%Y-%m-%d %H:%M:%S.%f"], null=True)
|
|
105
|
+
CurrentProfileId = IntegerField(null=True)
|
|
106
|
+
OriginalProfileId = IntegerField(null=True)
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
class TrackFilesModel(Model):
|
|
110
|
+
EntryId = IntegerField(primary_key=True)
|
|
111
|
+
AlbumId = IntegerField(null=False)
|
|
112
|
+
TrackNumber = IntegerField(null=True)
|
|
113
|
+
Title = TextField(null=True)
|
|
114
|
+
Duration = IntegerField(null=True) # Duration in seconds
|
|
115
|
+
HasFile = BooleanField(default=False)
|
|
116
|
+
TrackFileId = IntegerField(null=True)
|
|
117
|
+
Monitored = BooleanField(default=False)
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
class ArtistFilesModel(Model):
|
|
121
|
+
EntryId = IntegerField(primary_key=True)
|
|
122
|
+
Title = TextField(null=True)
|
|
123
|
+
Monitored = BooleanField(null=True)
|
|
124
|
+
Searched = BooleanField(default=False)
|
|
125
|
+
Upgrade = BooleanField(default=False)
|
|
126
|
+
MinCustomFormatScore = IntegerField(null=True)
|
|
127
|
+
# Quality profile from Arr API
|
|
128
|
+
QualityProfileId = IntegerField(null=True)
|
|
129
|
+
QualityProfileName = TextField(null=True)
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
class AlbumQueueModel(Model):
|
|
133
|
+
EntryId = IntegerField(unique=True)
|
|
134
|
+
Completed = BooleanField(default=False)
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
class TorrentLibrary(Model):
|
|
138
|
+
Hash = TextField(null=False)
|
|
139
|
+
Category = TextField(null=False)
|
|
140
|
+
AllowedSeeding = BooleanField(default=False)
|
|
141
|
+
Imported = BooleanField(default=False)
|
|
142
|
+
AllowedStalled = BooleanField(default=False)
|
|
143
|
+
FreeSpacePaused = BooleanField(default=False)
|
qBitrr/utils.py
ADDED
|
@@ -0,0 +1,274 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
import pathlib
|
|
5
|
+
import random
|
|
6
|
+
import re
|
|
7
|
+
import socket
|
|
8
|
+
import time
|
|
9
|
+
from typing import Iterator
|
|
10
|
+
|
|
11
|
+
import ping3
|
|
12
|
+
import qbittorrentapi
|
|
13
|
+
from cachetools import TTLCache
|
|
14
|
+
|
|
15
|
+
ping3.EXCEPTIONS = True
|
|
16
|
+
|
|
17
|
+
logger = logging.getLogger("qBitrr.Utils")
|
|
18
|
+
|
|
19
|
+
CACHE = TTLCache(maxsize=50, ttl=60)
|
|
20
|
+
|
|
21
|
+
UNITS = {"k": 1024, "m": 1048576, "g": 1073741824, "t": 1099511627776}
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def with_retry(
|
|
25
|
+
func, *, retries=3, backoff=0.5, max_backoff=5.0, jitter=0.25, exceptions=(Exception,)
|
|
26
|
+
):
|
|
27
|
+
"""Run `func()` with exponential backoff and jitter for transient failures.
|
|
28
|
+
|
|
29
|
+
- retries: total attempts (including first). Set to 1 for no retry.
|
|
30
|
+
- backoff: initial backoff seconds, doubles each attempt up to max_backoff.
|
|
31
|
+
- jitter: random jitter in seconds added to each delay.
|
|
32
|
+
- exceptions: tuple of exception types to catch and retry on.
|
|
33
|
+
"""
|
|
34
|
+
attempt = 0
|
|
35
|
+
while True:
|
|
36
|
+
try:
|
|
37
|
+
return func()
|
|
38
|
+
except exceptions as e:
|
|
39
|
+
attempt += 1
|
|
40
|
+
if attempt >= retries:
|
|
41
|
+
raise
|
|
42
|
+
delay = min(max_backoff, backoff * (2 ** (attempt - 1))) + random.random() * jitter
|
|
43
|
+
logger.debug(
|
|
44
|
+
"Retryable error: %s. Retrying in %.2fs (attempt %s/%s)",
|
|
45
|
+
e,
|
|
46
|
+
delay,
|
|
47
|
+
attempt + 1,
|
|
48
|
+
retries,
|
|
49
|
+
)
|
|
50
|
+
time.sleep(delay)
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
def absolute_file_paths(directory: pathlib.Path | str) -> Iterator[pathlib.Path]:
|
|
54
|
+
file_counter = 0
|
|
55
|
+
error = True
|
|
56
|
+
while error:
|
|
57
|
+
try:
|
|
58
|
+
if file_counter == 50:
|
|
59
|
+
error = False
|
|
60
|
+
yield from pathlib.Path(directory).glob("**/*")
|
|
61
|
+
error = False
|
|
62
|
+
file_counter = 0
|
|
63
|
+
except FileNotFoundError as e:
|
|
64
|
+
file_counter += 1
|
|
65
|
+
if file_counter == 1:
|
|
66
|
+
logger.warning("%s - %s", e.strerror, e.filename)
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def validate_and_return_torrent_file(file: str) -> pathlib.Path:
|
|
70
|
+
path = pathlib.Path(file)
|
|
71
|
+
if path.is_file():
|
|
72
|
+
path = path.parent.absolute()
|
|
73
|
+
count = 9
|
|
74
|
+
while not path.exists():
|
|
75
|
+
logger.debug(
|
|
76
|
+
"Attempt %s/10: File does not yet exist! (Possibly being moved?) | "
|
|
77
|
+
"%s | Sleeping for 0.1s",
|
|
78
|
+
10 - count,
|
|
79
|
+
path,
|
|
80
|
+
)
|
|
81
|
+
time.sleep(0.1)
|
|
82
|
+
if count == 0:
|
|
83
|
+
break
|
|
84
|
+
count -= 1
|
|
85
|
+
else:
|
|
86
|
+
count = 0
|
|
87
|
+
while str(path) == ".":
|
|
88
|
+
path = pathlib.Path(file)
|
|
89
|
+
if path.is_file():
|
|
90
|
+
path = path.parent.absolute()
|
|
91
|
+
while not path.exists():
|
|
92
|
+
logger.debug(
|
|
93
|
+
"Attempt %s/10: File does not yet exist! (Possibly being moved?) | "
|
|
94
|
+
"%s | Sleeping for 0.1s",
|
|
95
|
+
10 - count,
|
|
96
|
+
path,
|
|
97
|
+
)
|
|
98
|
+
time.sleep(0.1)
|
|
99
|
+
if count == 0:
|
|
100
|
+
break
|
|
101
|
+
count -= 1
|
|
102
|
+
else:
|
|
103
|
+
count = 0
|
|
104
|
+
if count == 0:
|
|
105
|
+
break
|
|
106
|
+
count -= 1
|
|
107
|
+
return path
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def has_internet(client: qbittorrentapi.Client):
|
|
111
|
+
from qBitrr.config import PING_URLS
|
|
112
|
+
|
|
113
|
+
# Prefer qBit's connection status to avoid frequent pings
|
|
114
|
+
try:
|
|
115
|
+
status = client.transfer_info().get("connection_status")
|
|
116
|
+
if status and status != "disconnected":
|
|
117
|
+
return True
|
|
118
|
+
except Exception as e:
|
|
119
|
+
logger.debug("transfer_info unavailable: %s", e)
|
|
120
|
+
# Fallback to a single ping
|
|
121
|
+
url = random.choice(PING_URLS)
|
|
122
|
+
try:
|
|
123
|
+
if is_connected(url):
|
|
124
|
+
logger.debug("Successfully connected to %s", url)
|
|
125
|
+
return True
|
|
126
|
+
except Exception as e:
|
|
127
|
+
logger.debug("Ping to %s failed: %s", url, e)
|
|
128
|
+
return False
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def _basic_ping(hostname):
|
|
132
|
+
host = "N/A"
|
|
133
|
+
try:
|
|
134
|
+
# if this hostname was called within the last 10 seconds skip it
|
|
135
|
+
# if it was previous successful
|
|
136
|
+
# Reducing the number of call to it and the likelihood of rate-limits.
|
|
137
|
+
if hostname in CACHE:
|
|
138
|
+
return CACHE[hostname]
|
|
139
|
+
# see if we can resolve the host name -- tells us if there is
|
|
140
|
+
# a DNS listening
|
|
141
|
+
host = socket.gethostbyname(hostname)
|
|
142
|
+
# connect to the host -- tells us if the host is actually
|
|
143
|
+
# reachable
|
|
144
|
+
s = socket.create_connection((host, 80), 5)
|
|
145
|
+
s.close()
|
|
146
|
+
CACHE[hostname] = True
|
|
147
|
+
return True
|
|
148
|
+
except Exception as e:
|
|
149
|
+
logger.debug("Error when connecting to host: %s %s %s", hostname, host, e)
|
|
150
|
+
return False
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
def is_connected(hostname):
|
|
154
|
+
try:
|
|
155
|
+
# if this hostname was called within the last 10 seconds skip it
|
|
156
|
+
# if it was previous successful
|
|
157
|
+
# Reducing the number of call to it and the likelihood of rate-limits.
|
|
158
|
+
if hostname in CACHE:
|
|
159
|
+
return CACHE[hostname]
|
|
160
|
+
ping3.ping(hostname, timeout=5)
|
|
161
|
+
CACHE[hostname] = True
|
|
162
|
+
return True
|
|
163
|
+
except ping3.errors.PingError as e: # All ping3 errors are subclasses of `PingError`.
|
|
164
|
+
logger.debug("Error when connecting to host: %s %s", hostname, e)
|
|
165
|
+
except (
|
|
166
|
+
Exception
|
|
167
|
+
): # Ping3 is far more robust but may requite root access, if root access is not available then run the basic mode
|
|
168
|
+
return _basic_ping(hostname)
|
|
169
|
+
|
|
170
|
+
|
|
171
|
+
def parse_size(size):
|
|
172
|
+
m = re.match(r"^([0-9]+(?:\.[0-9]+)?)([kmgt]?)$", size, re.IGNORECASE)
|
|
173
|
+
if not m:
|
|
174
|
+
raise ValueError("Unsupported value for leave_free_space")
|
|
175
|
+
val = float(m.group(1))
|
|
176
|
+
unit = m.group(2)
|
|
177
|
+
if unit:
|
|
178
|
+
val *= UNITS[unit.lower()]
|
|
179
|
+
return val
|
|
180
|
+
|
|
181
|
+
|
|
182
|
+
def format_bytes(bytes_value: int | float) -> str:
|
|
183
|
+
"""Format bytes into human-readable format (e.g., '1.5 GB', '256 MB').
|
|
184
|
+
|
|
185
|
+
Args:
|
|
186
|
+
bytes_value: Number of bytes to format
|
|
187
|
+
|
|
188
|
+
Returns:
|
|
189
|
+
Human-readable string representation of the byte value
|
|
190
|
+
"""
|
|
191
|
+
if bytes_value < 0:
|
|
192
|
+
return f"-{format_bytes(-bytes_value)}"
|
|
193
|
+
|
|
194
|
+
if bytes_value == 0:
|
|
195
|
+
return "0 B"
|
|
196
|
+
|
|
197
|
+
units = [("B", 1), ("KB", 1024), ("MB", 1048576), ("GB", 1073741824), ("TB", 1099511627776)]
|
|
198
|
+
|
|
199
|
+
for unit_name, unit_value in reversed(units):
|
|
200
|
+
if bytes_value >= unit_value:
|
|
201
|
+
value = bytes_value / unit_value
|
|
202
|
+
# Show 2 decimal places for values < 10, 1 decimal place for values >= 10
|
|
203
|
+
if value < 10:
|
|
204
|
+
return f"{value:.2f} {unit_name}"
|
|
205
|
+
else:
|
|
206
|
+
return f"{value:.1f} {unit_name}"
|
|
207
|
+
|
|
208
|
+
return f"{bytes_value} B"
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
class ExpiringSet:
|
|
212
|
+
def __init__(self, *args: list, **kwargs):
|
|
213
|
+
max_age_seconds = kwargs.get("max_age_seconds", 0)
|
|
214
|
+
assert max_age_seconds > 0
|
|
215
|
+
self.age = max_age_seconds
|
|
216
|
+
self.container = {}
|
|
217
|
+
for arg in args:
|
|
218
|
+
self.add(arg)
|
|
219
|
+
|
|
220
|
+
def __repr__(self):
|
|
221
|
+
self.__update__()
|
|
222
|
+
return f"{self.__class__.__name__}({', '.join(map(str, self.container.keys()))})"
|
|
223
|
+
|
|
224
|
+
def extend(self, args):
|
|
225
|
+
"""Add several items at once."""
|
|
226
|
+
for arg in args:
|
|
227
|
+
self.add(arg)
|
|
228
|
+
|
|
229
|
+
def add(self, value):
|
|
230
|
+
self.container[value] = time.time()
|
|
231
|
+
|
|
232
|
+
def remove(self, item):
|
|
233
|
+
del self.container[item]
|
|
234
|
+
|
|
235
|
+
def contains(self, value):
|
|
236
|
+
if value not in self.container:
|
|
237
|
+
return False
|
|
238
|
+
if time.time() - self.container[value] > self.age:
|
|
239
|
+
del self.container[value]
|
|
240
|
+
return False
|
|
241
|
+
return True
|
|
242
|
+
|
|
243
|
+
__contains__ = contains
|
|
244
|
+
|
|
245
|
+
def __getitem__(self, index):
|
|
246
|
+
self.__update__()
|
|
247
|
+
return list(self.container.keys())[index]
|
|
248
|
+
|
|
249
|
+
def __iter__(self):
|
|
250
|
+
self.__update__()
|
|
251
|
+
return iter(self.container.copy())
|
|
252
|
+
|
|
253
|
+
def __len__(self):
|
|
254
|
+
self.__update__()
|
|
255
|
+
return len(self.container)
|
|
256
|
+
|
|
257
|
+
def __copy__(self):
|
|
258
|
+
self.__update__()
|
|
259
|
+
temp = ExpiringSet(max_age_seconds=self.age)
|
|
260
|
+
temp.container = self.container.copy()
|
|
261
|
+
return temp
|
|
262
|
+
|
|
263
|
+
def __update__(self):
|
|
264
|
+
for k, b in self.container.copy().items():
|
|
265
|
+
if time.time() - b > self.age:
|
|
266
|
+
del self.container[k]
|
|
267
|
+
return False
|
|
268
|
+
|
|
269
|
+
def __eq__(self, other):
|
|
270
|
+
if not isinstance(other, ExpiringSet):
|
|
271
|
+
return False
|
|
272
|
+
self.__update__()
|
|
273
|
+
other.__update__()
|
|
274
|
+
return set(self.container.keys()) == set(other.container.keys())
|
qBitrr/versioning.py
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Any
|
|
4
|
+
|
|
5
|
+
import requests
|
|
6
|
+
from packaging import version as version_parser
|
|
7
|
+
|
|
8
|
+
from qBitrr.bundled_data import patched_version
|
|
9
|
+
|
|
10
|
+
DEFAULT_REPOSITORY = "Feramance/qBitrr"
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def normalize_version(raw: str | None) -> str | None:
|
|
14
|
+
if not raw:
|
|
15
|
+
return None
|
|
16
|
+
cleaned = raw.strip()
|
|
17
|
+
if cleaned.startswith(("v", "V")):
|
|
18
|
+
cleaned = cleaned[1:]
|
|
19
|
+
if "-" in cleaned:
|
|
20
|
+
cleaned = cleaned.split("-", 1)[0]
|
|
21
|
+
return cleaned or None
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def is_newer_version(candidate: str | None, current: str | None = None) -> bool:
|
|
25
|
+
if not candidate:
|
|
26
|
+
return False
|
|
27
|
+
normalized_candidate = normalize_version(candidate)
|
|
28
|
+
normalized_current = normalize_version(current or patched_version)
|
|
29
|
+
if not normalized_current:
|
|
30
|
+
return True
|
|
31
|
+
if not normalized_candidate:
|
|
32
|
+
return False
|
|
33
|
+
try:
|
|
34
|
+
latest_version = version_parser.parse(normalized_candidate)
|
|
35
|
+
current_version = version_parser.parse(normalized_current)
|
|
36
|
+
return latest_version > current_version
|
|
37
|
+
except Exception:
|
|
38
|
+
return normalized_candidate != normalized_current
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def fetch_latest_release(repo: str = DEFAULT_REPOSITORY, *, timeout: int = 10) -> dict[str, Any]:
|
|
42
|
+
"""Fetch latest non-draft, non-prerelease from GitHub.
|
|
43
|
+
|
|
44
|
+
Note: The /releases/latest endpoint excludes drafts by default, but we
|
|
45
|
+
explicitly check to be defensive and provide clear error messages.
|
|
46
|
+
"""
|
|
47
|
+
url = f"https://api.github.com/repos/{repo}/releases/latest"
|
|
48
|
+
headers = {"Accept": "application/vnd.github+json"}
|
|
49
|
+
try:
|
|
50
|
+
response = requests.get(url, headers=headers, timeout=timeout)
|
|
51
|
+
response.raise_for_status()
|
|
52
|
+
payload = response.json()
|
|
53
|
+
except Exception as exc:
|
|
54
|
+
message = str(exc)
|
|
55
|
+
if len(message) > 200:
|
|
56
|
+
message = f"{message[:197]}..."
|
|
57
|
+
return {
|
|
58
|
+
"raw_tag": None,
|
|
59
|
+
"normalized": None,
|
|
60
|
+
"changelog": "",
|
|
61
|
+
"changelog_url": f"https://github.com/{repo}/releases",
|
|
62
|
+
"update_available": False,
|
|
63
|
+
"error": message,
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
# Validate release is not draft/prerelease
|
|
67
|
+
is_draft = payload.get("draft", False)
|
|
68
|
+
is_prerelease = payload.get("prerelease", False)
|
|
69
|
+
|
|
70
|
+
if is_draft:
|
|
71
|
+
return {
|
|
72
|
+
"raw_tag": None,
|
|
73
|
+
"normalized": None,
|
|
74
|
+
"changelog": "",
|
|
75
|
+
"changelog_url": f"https://github.com/{repo}/releases",
|
|
76
|
+
"update_available": False,
|
|
77
|
+
"error": "Latest release is a draft (not yet published)",
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
if is_prerelease:
|
|
81
|
+
# Could make this configurable via settings in the future
|
|
82
|
+
return {
|
|
83
|
+
"raw_tag": None,
|
|
84
|
+
"normalized": None,
|
|
85
|
+
"changelog": "",
|
|
86
|
+
"changelog_url": f"https://github.com/{repo}/releases",
|
|
87
|
+
"update_available": False,
|
|
88
|
+
"error": "Latest release is a prerelease (beta/rc)",
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
raw_tag = (payload.get("tag_name") or payload.get("name") or "").strip()
|
|
92
|
+
normalized = normalize_version(raw_tag)
|
|
93
|
+
changelog = payload.get("body") or ""
|
|
94
|
+
changelog_url = payload.get("html_url") or f"https://github.com/{repo}/releases"
|
|
95
|
+
update_available = is_newer_version(normalized)
|
|
96
|
+
return {
|
|
97
|
+
"raw_tag": raw_tag or None,
|
|
98
|
+
"normalized": normalized,
|
|
99
|
+
"changelog": changelog,
|
|
100
|
+
"changelog_url": changelog_url,
|
|
101
|
+
"update_available": update_available,
|
|
102
|
+
"error": None,
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def fetch_release_by_tag(
|
|
107
|
+
tag: str, repo: str = DEFAULT_REPOSITORY, *, timeout: int = 10
|
|
108
|
+
) -> dict[str, Any]:
|
|
109
|
+
"""Fetch a specific release by tag name."""
|
|
110
|
+
# Ensure tag starts with 'v'
|
|
111
|
+
if not tag.startswith(("v", "V")):
|
|
112
|
+
tag = f"v{tag}"
|
|
113
|
+
|
|
114
|
+
url = f"https://api.github.com/repos/{repo}/releases/tags/{tag}"
|
|
115
|
+
headers = {"Accept": "application/vnd.github+json"}
|
|
116
|
+
try:
|
|
117
|
+
response = requests.get(url, headers=headers, timeout=timeout)
|
|
118
|
+
response.raise_for_status()
|
|
119
|
+
payload = response.json()
|
|
120
|
+
except Exception as exc:
|
|
121
|
+
message = str(exc)
|
|
122
|
+
if len(message) > 200:
|
|
123
|
+
message = f"{message[:197]}..."
|
|
124
|
+
return {
|
|
125
|
+
"changelog": "",
|
|
126
|
+
"changelog_url": f"https://github.com/{repo}/releases/tag/{tag}",
|
|
127
|
+
"error": message,
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
changelog = payload.get("body") or ""
|
|
131
|
+
changelog_url = payload.get("html_url") or f"https://github.com/{repo}/releases/tag/{tag}"
|
|
132
|
+
return {
|
|
133
|
+
"changelog": changelog,
|
|
134
|
+
"changelog_url": changelog_url,
|
|
135
|
+
"error": None,
|
|
136
|
+
}
|