lfss 0.11.1__tar.gz → 0.11.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {lfss-0.11.1 → lfss-0.11.2}/PKG-INFO +2 -1
- {lfss-0.11.1 → lfss-0.11.2}/docs/changelog.md +27 -0
- {lfss-0.11.1 → lfss-0.11.2}/frontend/login.js +0 -1
- {lfss-0.11.1 → lfss-0.11.2}/frontend/popup.js +18 -3
- {lfss-0.11.1 → lfss-0.11.2}/frontend/scripts.js +8 -5
- {lfss-0.11.1 → lfss-0.11.2}/frontend/utils.js +4 -5
- lfss-0.11.2/lfss/cli/log.py +77 -0
- {lfss-0.11.1 → lfss-0.11.2}/lfss/cli/vacuum.py +10 -3
- {lfss-0.11.1 → lfss-0.11.2}/lfss/eng/config.py +3 -2
- {lfss-0.11.1 → lfss-0.11.2}/lfss/eng/database.py +33 -34
- {lfss-0.11.1 → lfss-0.11.2}/lfss/eng/log.py +73 -4
- {lfss-0.11.1 → lfss-0.11.2}/lfss/eng/utils.py +1 -2
- {lfss-0.11.1 → lfss-0.11.2}/lfss/svc/app_dav.py +7 -7
- {lfss-0.11.1 → lfss-0.11.2}/lfss/svc/app_native.py +7 -7
- {lfss-0.11.1 → lfss-0.11.2}/lfss/svc/common_impl.py +4 -4
- {lfss-0.11.1 → lfss-0.11.2}/pyproject.toml +3 -1
- {lfss-0.11.1 → lfss-0.11.2}/Readme.md +0 -0
- {lfss-0.11.1 → lfss-0.11.2}/docs/Enviroment_variables.md +0 -0
- {lfss-0.11.1 → lfss-0.11.2}/docs/Known_issues.md +0 -0
- {lfss-0.11.1 → lfss-0.11.2}/docs/Permission.md +0 -0
- {lfss-0.11.1 → lfss-0.11.2}/docs/Webdav.md +0 -0
- {lfss-0.11.1 → lfss-0.11.2}/frontend/api.js +0 -0
- {lfss-0.11.1 → lfss-0.11.2}/frontend/index.html +0 -0
- {lfss-0.11.1 → lfss-0.11.2}/frontend/info.css +0 -0
- {lfss-0.11.1 → lfss-0.11.2}/frontend/info.js +0 -0
- {lfss-0.11.1 → lfss-0.11.2}/frontend/login.css +0 -0
- {lfss-0.11.1 → lfss-0.11.2}/frontend/popup.css +0 -0
- {lfss-0.11.1 → lfss-0.11.2}/frontend/state.js +0 -0
- {lfss-0.11.1 → lfss-0.11.2}/frontend/styles.css +0 -0
- {lfss-0.11.1 → lfss-0.11.2}/frontend/thumb.css +0 -0
- {lfss-0.11.1 → lfss-0.11.2}/frontend/thumb.js +0 -0
- {lfss-0.11.1 → lfss-0.11.2}/lfss/api/__init__.py +0 -0
- {lfss-0.11.1 → lfss-0.11.2}/lfss/api/connector.py +0 -0
- {lfss-0.11.1 → lfss-0.11.2}/lfss/cli/__init__.py +0 -0
- {lfss-0.11.1 → lfss-0.11.2}/lfss/cli/balance.py +0 -0
- {lfss-0.11.1 → lfss-0.11.2}/lfss/cli/cli.py +0 -0
- {lfss-0.11.1 → lfss-0.11.2}/lfss/cli/panel.py +0 -0
- {lfss-0.11.1 → lfss-0.11.2}/lfss/cli/serve.py +0 -0
- {lfss-0.11.1 → lfss-0.11.2}/lfss/cli/user.py +0 -0
- {lfss-0.11.1 → lfss-0.11.2}/lfss/eng/__init__.py +0 -0
- {lfss-0.11.1 → lfss-0.11.2}/lfss/eng/bounded_pool.py +0 -0
- {lfss-0.11.1 → lfss-0.11.2}/lfss/eng/connection_pool.py +0 -0
- {lfss-0.11.1 → lfss-0.11.2}/lfss/eng/datatype.py +0 -0
- {lfss-0.11.1 → lfss-0.11.2}/lfss/eng/error.py +0 -0
- {lfss-0.11.1 → lfss-0.11.2}/lfss/eng/thumb.py +0 -0
- {lfss-0.11.1 → lfss-0.11.2}/lfss/sql/init.sql +0 -0
- {lfss-0.11.1 → lfss-0.11.2}/lfss/sql/pragma.sql +0 -0
- {lfss-0.11.1 → lfss-0.11.2}/lfss/svc/app.py +0 -0
- {lfss-0.11.1 → lfss-0.11.2}/lfss/svc/app_base.py +0 -0
- {lfss-0.11.1 → lfss-0.11.2}/lfss/svc/request_log.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: lfss
|
3
|
-
Version: 0.11.
|
3
|
+
Version: 0.11.2
|
4
4
|
Summary: Lightweight file storage service
|
5
5
|
Home-page: https://github.com/MenxLi/lfss
|
6
6
|
Author: Li, Mengxun
|
@@ -17,6 +17,7 @@ Requires-Dist: mimesniff (==1.*)
|
|
17
17
|
Requires-Dist: pillow
|
18
18
|
Requires-Dist: python-multipart
|
19
19
|
Requires-Dist: requests (==2.*)
|
20
|
+
Requires-Dist: rich
|
20
21
|
Requires-Dist: stream-zip (==0.*)
|
21
22
|
Requires-Dist: uvicorn (==0.*)
|
22
23
|
Project-URL: Repository, https://github.com/MenxLi/lfss
|
@@ -1,3 +1,30 @@
|
|
1
|
+
## 0.11
|
2
|
+
|
3
|
+
### 0.11.2
|
4
|
+
- Improve frontend directory upload feedback.
|
5
|
+
- Set default large file threashold to 1M.
|
6
|
+
- Increase default concurrent threads.
|
7
|
+
- Use sqlite for logging.
|
8
|
+
- Add vacuum logs.
|
9
|
+
- Refactor: use dir for directory path.
|
10
|
+
|
11
|
+
### 0.11.1
|
12
|
+
- Rename api `get_meta` function.
|
13
|
+
- Frontend support upload directory.
|
14
|
+
- Fix admin put to non-exists user path.
|
15
|
+
|
16
|
+
### 0.11.0
|
17
|
+
- Copy file as hard link.
|
18
|
+
- Add vacuum thumb and all.
|
19
|
+
- Thumb database use file_id as index.
|
20
|
+
- improve username and url check with regular expression.
|
21
|
+
|
22
|
+
## 0.10
|
23
|
+
|
24
|
+
### 0.10.0
|
25
|
+
- Inherit permission from path owner for `unset` permission files.
|
26
|
+
- Add timeout and verify options for client api.
|
27
|
+
- Bundle small files in memory.
|
1
28
|
|
2
29
|
## 0.9
|
3
30
|
|
@@ -109,7 +109,14 @@ export function showPopup(content = '', {
|
|
109
109
|
} = {}){
|
110
110
|
const popup = document.createElement("div");
|
111
111
|
popup.classList.add("popup-window");
|
112
|
-
|
112
|
+
/**
|
113
|
+
* @param {string} c
|
114
|
+
* @returns {void}
|
115
|
+
*/
|
116
|
+
function setPopupContent(c){
|
117
|
+
popup.innerHTML = showTime? `<span>[${new Date().toLocaleTimeString()}]</span> ${c}` : c;
|
118
|
+
}
|
119
|
+
setPopupContent(content);
|
113
120
|
popup.style.width = width;
|
114
121
|
const popupHeight = '1rem';
|
115
122
|
popup.style.height = popupHeight;
|
@@ -132,11 +139,19 @@ export function showPopup(content = '', {
|
|
132
139
|
if (level === "success") popup.style.backgroundColor = "darkgreen";
|
133
140
|
document.body.appendChild(popup);
|
134
141
|
shownPopups.push(popup);
|
135
|
-
|
142
|
+
|
143
|
+
function closePopup(){
|
136
144
|
if (popup.parentNode) document.body.removeChild(popup);
|
137
145
|
shownPopups.splice(shownPopups.indexOf(popup), 1);
|
138
146
|
for (let i = 0; i < shownPopups.length; i++) {
|
139
147
|
shownPopups[i].style.top = `${i * (parseInt(popupHeight) + 2*parseInt(paddingHeight))*1.2 + 0.5}rem`;
|
140
148
|
}
|
141
|
-
}
|
149
|
+
}
|
150
|
+
|
151
|
+
window.setTimeout(closePopup, timeout);
|
152
|
+
return {
|
153
|
+
elem: popup,
|
154
|
+
setContent: setPopupContent,
|
155
|
+
close: closePopup
|
156
|
+
}
|
142
157
|
}
|
@@ -183,26 +183,29 @@ Are you sure you want to proceed?\
|
|
183
183
|
`)){ return; }
|
184
184
|
|
185
185
|
let counter = 0;
|
186
|
+
let totalCount = 0;
|
187
|
+
const uploadPopup = showPopup('Uploading multiple files...', {level: 'info', timeout: 999999});
|
186
188
|
async function uploadFileFn(path, file){
|
187
|
-
const this_count = counter;
|
188
189
|
try{
|
189
190
|
await uploadFile(conn, path, file, {conflict: 'overwrite'});
|
190
191
|
}
|
191
192
|
catch (err){
|
192
193
|
showPopup('Failed to upload file [' + file.name + ']: ' + err, {level: 'error', timeout: 5000});
|
193
194
|
}
|
194
|
-
console.log(`[${
|
195
|
+
console.log(`[${counter}/${totalCount}] Uploaded file: ${path}`);
|
196
|
+
uploadPopup.setContent(`Uploading multiple files... [${counter}/${totalCount}]`);
|
195
197
|
}
|
196
198
|
|
197
|
-
const promises = await forEachFile(e, async (relPath,
|
199
|
+
const promises = await forEachFile(e, async (relPath, filePromiseFn) => {
|
198
200
|
counter += 1;
|
199
|
-
const file = await
|
201
|
+
const file = await filePromiseFn();
|
200
202
|
await uploadFileFn(dstPath + relPath, file);
|
201
203
|
});
|
204
|
+
totalCount = promises.length;
|
202
205
|
|
203
|
-
showPopup('Uploading multiple files...', {level: 'info', timeout: 3000});
|
204
206
|
Promise.all(promises).then(
|
205
207
|
() => {
|
208
|
+
window.setTimeout(uploadPopup.close, 3000);
|
206
209
|
showPopup('Upload success.', {level: 'success', timeout: 3000});
|
207
210
|
refreshFileList();
|
208
211
|
},
|
@@ -101,7 +101,7 @@ export function asHtmlText(text){
|
|
101
101
|
* using the provided callback with a concurrency limit.
|
102
102
|
*
|
103
103
|
* @param {Event} e The drop event.
|
104
|
-
* @param {(relPath: string, file: Promise<File>) => Promise<void>} callback A function
|
104
|
+
* @param {(relPath: string, file: () => Promise<File>) => Promise<void>} callback A function
|
105
105
|
* that receives the relative path and a promise for the File.
|
106
106
|
* @param {number} [maxConcurrent=5] Maximum number of concurrent callback executions.
|
107
107
|
* @returns {Promise<Promise<void>[]>} A promise resolving to an array of callback promises.
|
@@ -146,11 +146,10 @@ export async function forEachFile(e, callback, maxConcurrent = 16) {
|
|
146
146
|
async function traverse(entry, path) {
|
147
147
|
if (entry.isFile) {
|
148
148
|
// Wrap file retrieval in a promise.
|
149
|
-
const
|
150
|
-
entry.file(resolve, reject);
|
151
|
-
});
|
149
|
+
const filePromiseFn = () =>
|
150
|
+
new Promise((resolve, reject) => entry.file(resolve, reject));
|
152
151
|
// Use the concurrency barrier for the callback invocation.
|
153
|
-
results.push(runWithLimit(() => callback(path + entry.name,
|
152
|
+
results.push(runWithLimit(() => callback(path + entry.name, filePromiseFn)));
|
154
153
|
} else if (entry.isDirectory) {
|
155
154
|
const reader = entry.createReader();
|
156
155
|
|
@@ -0,0 +1,77 @@
|
|
1
|
+
from typing import Optional
|
2
|
+
import argparse
|
3
|
+
import rich.console
|
4
|
+
import logging
|
5
|
+
import sqlite3
|
6
|
+
from lfss.eng.log import eval_logline
|
7
|
+
|
8
|
+
console = rich.console.Console()
|
9
|
+
def levelstr2int(levelstr: str) -> int:
|
10
|
+
import sys
|
11
|
+
if sys.version_info < (3, 11):
|
12
|
+
return logging.getLevelName(levelstr.upper())
|
13
|
+
else:
|
14
|
+
return logging.getLevelNamesMapping()[levelstr.upper()]
|
15
|
+
|
16
|
+
def view(
|
17
|
+
db_file: str,
|
18
|
+
level: Optional[str] = None,
|
19
|
+
offset: int = 0,
|
20
|
+
limit: int = 1000
|
21
|
+
):
|
22
|
+
conn = sqlite3.connect(db_file)
|
23
|
+
cursor = conn.cursor()
|
24
|
+
if level is None:
|
25
|
+
cursor.execute("SELECT * FROM log ORDER BY created DESC LIMIT ? OFFSET ?", (limit, offset))
|
26
|
+
else:
|
27
|
+
level_int = levelstr2int(level)
|
28
|
+
cursor.execute("SELECT * FROM log WHERE level >= ? ORDER BY created DESC LIMIT ? OFFSET ?", (level_int, limit, offset))
|
29
|
+
levelname_color = {
|
30
|
+
'DEBUG': 'blue',
|
31
|
+
'INFO': 'green',
|
32
|
+
'WARNING': 'yellow',
|
33
|
+
'ERROR': 'red',
|
34
|
+
'CRITICAL': 'bold red',
|
35
|
+
'FATAL': 'bold red'
|
36
|
+
}
|
37
|
+
for row in cursor.fetchall():
|
38
|
+
log = eval_logline(row)
|
39
|
+
console.print(f"{log.created} [{levelname_color[log.levelname]}][{log.levelname}] [default]{log.message}")
|
40
|
+
conn.close()
|
41
|
+
|
42
|
+
def trim(db_file: str, keep: int = 1000, level: Optional[str] = None):
|
43
|
+
conn = sqlite3.connect(db_file)
|
44
|
+
cursor = conn.cursor()
|
45
|
+
if level is None:
|
46
|
+
cursor.execute("DELETE FROM log WHERE id NOT IN (SELECT id FROM log ORDER BY created DESC LIMIT ?)", (keep,))
|
47
|
+
else:
|
48
|
+
cursor.execute("DELETE FROM log WHERE levelname = ? and id NOT IN (SELECT id FROM log WHERE levelname = ? ORDER BY created DESC LIMIT ?)", (level.upper(), level.upper(), keep))
|
49
|
+
conn.commit()
|
50
|
+
conn.execute("VACUUM")
|
51
|
+
conn.close()
|
52
|
+
|
53
|
+
def main():
|
54
|
+
parser = argparse.ArgumentParser(description="Log operations utility")
|
55
|
+
subparsers = parser.add_subparsers(title='subcommands', description='valid subcommands', help='additional help')
|
56
|
+
|
57
|
+
parser_show = subparsers.add_parser('view', help='Show logs')
|
58
|
+
parser_show.add_argument('db_file', type=str, help='Database file path')
|
59
|
+
parser_show.add_argument('-l', '--level', type=str, required=False, help='Log level')
|
60
|
+
parser_show.add_argument('--offset', type=int, default=0, help='Starting offset')
|
61
|
+
parser_show.add_argument('--limit', type=int, default=1000, help='Maximum number of entries to display')
|
62
|
+
parser_show.set_defaults(func=view)
|
63
|
+
|
64
|
+
parser_trim = subparsers.add_parser('trim', help='Trim logs')
|
65
|
+
parser_trim.add_argument('db_file', type=str, help='Database file path')
|
66
|
+
parser_trim.add_argument('-l', '--level', type=str, required=False, help='Log level')
|
67
|
+
parser_trim.add_argument('--keep', type=int, default=1000, help='Number of entries to keep')
|
68
|
+
parser_trim.set_defaults(func=trim)
|
69
|
+
|
70
|
+
args = parser.parse_args()
|
71
|
+
if hasattr(args, 'func'):
|
72
|
+
kwargs = vars(args)
|
73
|
+
func = kwargs.pop('func')
|
74
|
+
func(**kwargs)
|
75
|
+
|
76
|
+
if __name__ == '__main__':
|
77
|
+
main()
|
@@ -2,7 +2,7 @@
|
|
2
2
|
Vacuum the database and external storage to ensure that the storage is consistent and minimal.
|
3
3
|
"""
|
4
4
|
|
5
|
-
from lfss.eng.config import LARGE_BLOB_DIR, THUMB_DB
|
5
|
+
from lfss.eng.config import LARGE_BLOB_DIR, THUMB_DB, LOG_DIR
|
6
6
|
import argparse, time, itertools
|
7
7
|
from functools import wraps
|
8
8
|
from asyncio import Semaphore
|
@@ -14,6 +14,7 @@ from lfss.eng.database import transaction, unique_cursor
|
|
14
14
|
from lfss.svc.request_log import RequestDB
|
15
15
|
from lfss.eng.utils import now_stamp
|
16
16
|
from lfss.eng.connection_pool import global_entrance
|
17
|
+
from lfss.cli.log import trim
|
17
18
|
|
18
19
|
sem: Semaphore
|
19
20
|
|
@@ -33,7 +34,7 @@ def barriered(func):
|
|
33
34
|
return wrapper
|
34
35
|
|
35
36
|
@global_entrance()
|
36
|
-
async def vacuum_main(index: bool = False, blobs: bool = False, thumbs: bool = False, vacuum_all: bool = False):
|
37
|
+
async def vacuum_main(index: bool = False, blobs: bool = False, thumbs: bool = False, logs: bool = False, vacuum_all: bool = False):
|
37
38
|
|
38
39
|
# check if any file in the Large Blob directory is not in the database
|
39
40
|
# the reverse operation is not necessary, because by design, the database should be the source of truth...
|
@@ -73,6 +74,11 @@ async def vacuum_main(index: bool = False, blobs: bool = False, thumbs: bool = F
|
|
73
74
|
async with unique_cursor(is_write=True) as c:
|
74
75
|
await c.execute("VACUUM blobs")
|
75
76
|
|
77
|
+
if logs or vacuum_all:
|
78
|
+
with indicator("VACUUM-logs"):
|
79
|
+
for log_file in LOG_DIR.glob("*.log.db"):
|
80
|
+
trim(str(log_file), keep=10_000)
|
81
|
+
|
76
82
|
if thumbs or vacuum_all:
|
77
83
|
try:
|
78
84
|
async with transaction() as c:
|
@@ -123,9 +129,10 @@ def main():
|
|
123
129
|
parser.add_argument("-d", "--data", action="store_true", help="Vacuum blobs")
|
124
130
|
parser.add_argument("-t", "--thumb", action="store_true", help="Vacuum thumbnails")
|
125
131
|
parser.add_argument("-r", "--requests", action="store_true", help="Vacuum request logs to only keep at most recent 1M rows in 7 days")
|
132
|
+
parser.add_argument("-l", "--logs", action="store_true", help="Trim log to keep at most recent 10k rows for each category")
|
126
133
|
args = parser.parse_args()
|
127
134
|
sem = Semaphore(args.jobs)
|
128
|
-
asyncio.run(vacuum_main(index=args.metadata, blobs=args.data, thumbs=args.thumb, vacuum_all=args.all))
|
135
|
+
asyncio.run(vacuum_main(index=args.metadata, blobs=args.data, thumbs=args.thumb, logs = args.logs, vacuum_all=args.all))
|
129
136
|
|
130
137
|
if args.requests or args.all:
|
131
138
|
asyncio.run(vacuum_requests())
|
@@ -11,14 +11,15 @@ if not DATA_HOME.exists():
|
|
11
11
|
DATA_HOME = DATA_HOME.resolve().absolute()
|
12
12
|
LARGE_BLOB_DIR = DATA_HOME / 'large_blobs'
|
13
13
|
LARGE_BLOB_DIR.mkdir(exist_ok=True)
|
14
|
+
LOG_DIR = DATA_HOME / 'logs'
|
14
15
|
|
15
16
|
# https://sqlite.org/fasterthanfs.html
|
16
17
|
__env_large_file = os.environ.get('LFSS_LARGE_FILE', None)
|
17
18
|
if __env_large_file is not None:
|
18
19
|
LARGE_FILE_BYTES = parse_storage_size(__env_large_file)
|
19
20
|
else:
|
20
|
-
LARGE_FILE_BYTES =
|
21
|
-
MAX_MEM_FILE_BYTES = 128 * 1024 * 1024
|
21
|
+
LARGE_FILE_BYTES = 1 * 1024 * 1024 # 1MB
|
22
|
+
MAX_MEM_FILE_BYTES = 128 * 1024 * 1024 # 128MB
|
22
23
|
CHUNK_SIZE = 1024 * 1024 # 1MB chunks for streaming (on large files)
|
23
24
|
DEBUG_MODE = os.environ.get('LFSS_DEBUG', '0') == '1'
|
24
25
|
|
@@ -225,12 +225,12 @@ class FileConn(DBObjectBase):
|
|
225
225
|
await self.cur.execute("SELECT username FROM user")
|
226
226
|
res = await self.cur.fetchall()
|
227
227
|
dirnames = [u[0] + '/' for u in res]
|
228
|
-
dirs = [await self.
|
228
|
+
dirs = [await self.get_dir_record(u) for u in dirnames] if not skim else [DirectoryRecord(u) for u in dirnames]
|
229
229
|
return dirs
|
230
230
|
else:
|
231
231
|
# list specific users
|
232
232
|
dirnames = [uname + '/' for uname in usernames]
|
233
|
-
dirs = [await self.
|
233
|
+
dirs = [await self.get_dir_record(u) for u in dirnames] if not skim else [DirectoryRecord(u) for u in dirnames]
|
234
234
|
return dirs
|
235
235
|
|
236
236
|
async def count_path_dirs(self, url: str):
|
@@ -278,11 +278,11 @@ class FileConn(DBObjectBase):
|
|
278
278
|
if skim:
|
279
279
|
return DirectoryRecord(dir_url)
|
280
280
|
else:
|
281
|
-
return await self.
|
281
|
+
return await self.get_dir_record(dir_url)
|
282
282
|
dirs = [await get_dir(url + d) for d in dirs_str]
|
283
283
|
return dirs
|
284
284
|
|
285
|
-
async def
|
285
|
+
async def count_dir_files(self, url: str, flat: bool = False):
|
286
286
|
if not url.endswith('/'): url += '/'
|
287
287
|
if url == '/': url = ''
|
288
288
|
if flat:
|
@@ -293,7 +293,7 @@ class FileConn(DBObjectBase):
|
|
293
293
|
assert res is not None, "Error: count_path_files"
|
294
294
|
return res[0]
|
295
295
|
|
296
|
-
async def
|
296
|
+
async def list_dir_files(
|
297
297
|
self, url: str,
|
298
298
|
offset: int = 0, limit: int = 10_000,
|
299
299
|
order_by: FileSortKey = '', order_desc: bool = False,
|
@@ -328,15 +328,15 @@ class FileConn(DBObjectBase):
|
|
328
328
|
"""
|
329
329
|
MAX_ITEMS = 10_000
|
330
330
|
dir_count = await self.count_path_dirs(url)
|
331
|
-
file_count = await self.
|
331
|
+
file_count = await self.count_dir_files(url, flat=False)
|
332
332
|
if dir_count + file_count > MAX_ITEMS:
|
333
333
|
raise TooManyItemsError("Too many items, please paginate")
|
334
334
|
return PathContents(
|
335
335
|
dirs = await self.list_path_dirs(url, skim=True, limit=MAX_ITEMS),
|
336
|
-
files = await self.
|
336
|
+
files = await self.list_dir_files(url, flat=False, limit=MAX_ITEMS)
|
337
337
|
)
|
338
338
|
|
339
|
-
async def
|
339
|
+
async def get_dir_record(self, url: str) -> DirectoryRecord:
|
340
340
|
"""
|
341
341
|
Get the full record of a directory, including size, create_time, update_time, access_time etc.
|
342
342
|
"""
|
@@ -411,7 +411,6 @@ class FileConn(DBObjectBase):
|
|
411
411
|
await self._user_size_inc(owner_id, file_size)
|
412
412
|
self.logger.info(f"File {url} created")
|
413
413
|
|
414
|
-
# not tested
|
415
414
|
async def copy_file(self, old_url: str, new_url: str, user_id: Optional[int] = None):
|
416
415
|
old = await self.get_file_record(old_url)
|
417
416
|
if old is None:
|
@@ -428,7 +427,7 @@ class FileConn(DBObjectBase):
|
|
428
427
|
await self._user_size_inc(user_id, old.file_size)
|
429
428
|
self.logger.info(f"Copied file {old_url} to {new_url}")
|
430
429
|
|
431
|
-
async def
|
430
|
+
async def copy_dir(self, old_url: str, new_url: str, user_id: Optional[int] = None):
|
432
431
|
assert old_url.endswith('/'), "Old path must end with /"
|
433
432
|
assert new_url.endswith('/'), "New path must end with /"
|
434
433
|
if user_id is None:
|
@@ -461,7 +460,7 @@ class FileConn(DBObjectBase):
|
|
461
460
|
await self.cur.execute("UPDATE fmeta SET url = ?, create_time = CURRENT_TIMESTAMP WHERE url = ?", (new_url, old_url))
|
462
461
|
self.logger.info(f"Moved file {old_url} to {new_url}")
|
463
462
|
|
464
|
-
async def
|
463
|
+
async def move_dir(self, old_url: str, new_url: str, user_id: Optional[int] = None):
|
465
464
|
assert old_url.endswith('/'), "Old path must end with /"
|
466
465
|
assert new_url.endswith('/'), "New path must end with /"
|
467
466
|
if user_id is None:
|
@@ -500,7 +499,7 @@ class FileConn(DBObjectBase):
|
|
500
499
|
self.logger.info(f"Deleted {len(ret)} file records for user {owner_id}") # type: ignore
|
501
500
|
return ret
|
502
501
|
|
503
|
-
async def
|
502
|
+
async def delete_records_by_prefix(self, path: str, under_owner_id: Optional[int] = None) -> list[FileRecord]:
|
504
503
|
"""Delete all records with url starting with path"""
|
505
504
|
# update user size
|
506
505
|
cursor = await self.cur.execute("SELECT DISTINCT owner_id FROM fmeta WHERE url LIKE ?", (path + '%', ))
|
@@ -689,7 +688,7 @@ async def delayed_log_access(url: str):
|
|
689
688
|
])
|
690
689
|
),
|
691
690
|
)
|
692
|
-
def validate_url(url: str,
|
691
|
+
def validate_url(url: str, utype: Literal['file', 'dir'] = 'file'):
|
693
692
|
""" Check if a path is valid. The input path is considered url safe """
|
694
693
|
if len(url) > 1024:
|
695
694
|
raise InvalidPathError(f"URL too long: {url}")
|
@@ -703,7 +702,7 @@ def validate_url(url: str, is_file = True):
|
|
703
702
|
is_valid = False
|
704
703
|
break
|
705
704
|
|
706
|
-
if
|
705
|
+
if utype == 'file': is_valid = is_valid and not url.endswith('/')
|
707
706
|
else: is_valid = is_valid and url.endswith('/')
|
708
707
|
|
709
708
|
if not is_valid:
|
@@ -885,9 +884,9 @@ class Database:
|
|
885
884
|
raise PermissionDeniedError(f"Permission denied: {op_user.username} cannot copy file to {new_url}")
|
886
885
|
await fconn.copy_file(old_url, new_url, user_id=op_user.id if op_user is not None else None)
|
887
886
|
|
888
|
-
async def
|
889
|
-
validate_url(old_url,
|
890
|
-
validate_url(new_url,
|
887
|
+
async def move_dir(self, old_url: str, new_url: str, op_user: UserRecord):
|
888
|
+
validate_url(old_url, 'dir')
|
889
|
+
validate_url(new_url, 'dir')
|
891
890
|
|
892
891
|
if new_url.startswith('/'):
|
893
892
|
new_url = new_url[1:]
|
@@ -906,12 +905,11 @@ class Database:
|
|
906
905
|
|
907
906
|
async with transaction() as cur:
|
908
907
|
fconn = FileConn(cur)
|
909
|
-
await fconn.
|
908
|
+
await fconn.move_dir(old_url, new_url, op_user.id)
|
910
909
|
|
911
|
-
|
912
|
-
|
913
|
-
validate_url(
|
914
|
-
validate_url(new_url, is_file=False)
|
910
|
+
async def copy_dir(self, old_url: str, new_url: str, op_user: UserRecord):
|
911
|
+
validate_url(old_url, 'dir')
|
912
|
+
validate_url(new_url, 'dir')
|
915
913
|
|
916
914
|
if new_url.startswith('/'):
|
917
915
|
new_url = new_url[1:]
|
@@ -930,7 +928,7 @@ class Database:
|
|
930
928
|
|
931
929
|
async with transaction() as cur:
|
932
930
|
fconn = FileConn(cur)
|
933
|
-
await fconn.
|
931
|
+
await fconn.copy_dir(old_url, new_url, op_user.id)
|
934
932
|
|
935
933
|
async def __batch_delete_file_blobs(self, fconn: FileConn, file_records: list[FileRecord], batch_size: int = 512):
|
936
934
|
# https://github.com/langchain-ai/langchain/issues/10321
|
@@ -951,13 +949,13 @@ class Database:
|
|
951
949
|
await del_internal()
|
952
950
|
await del_external()
|
953
951
|
|
954
|
-
async def
|
955
|
-
validate_url(url,
|
952
|
+
async def delete_dir(self, url: str, op_user: Optional[UserRecord] = None) -> Optional[list[FileRecord]]:
|
953
|
+
validate_url(url, 'dir')
|
956
954
|
from_owner_id = op_user.id if op_user is not None and not (op_user.is_admin or await check_path_permission(url, op_user) >= AccessLevel.WRITE) else None
|
957
955
|
|
958
956
|
async with transaction() as cur:
|
959
957
|
fconn = FileConn(cur)
|
960
|
-
records = await fconn.
|
958
|
+
records = await fconn.delete_records_by_prefix(url, from_owner_id)
|
961
959
|
if not records:
|
962
960
|
return None
|
963
961
|
await self.__batch_delete_file_blobs(fconn, records)
|
@@ -981,14 +979,15 @@ class Database:
|
|
981
979
|
|
982
980
|
# make sure the user's directory is deleted,
|
983
981
|
# may contain admin's files, but delete them all
|
984
|
-
await fconn.
|
982
|
+
await fconn.delete_records_by_prefix(user.username + '/')
|
985
983
|
|
986
|
-
async def
|
984
|
+
async def iter_dir(self, top_url: str, urls: Optional[list[str]]) -> AsyncIterable[tuple[FileRecord, bytes | AsyncIterable[bytes]]]:
|
985
|
+
validate_url(top_url, 'dir')
|
987
986
|
async with unique_cursor() as cur:
|
988
987
|
fconn = FileConn(cur)
|
989
988
|
if urls is None:
|
990
|
-
fcount = await fconn.
|
991
|
-
urls = [r.url for r in (await fconn.
|
989
|
+
fcount = await fconn.count_dir_files(top_url, flat=True)
|
990
|
+
urls = [r.url for r in (await fconn.list_dir_files(top_url, flat=True, limit=fcount))]
|
992
991
|
|
993
992
|
for url in urls:
|
994
993
|
if not url.startswith(top_url):
|
@@ -1003,7 +1002,7 @@ class Database:
|
|
1003
1002
|
blob = await fconn.get_file_blob(f_id)
|
1004
1003
|
yield r, blob
|
1005
1004
|
|
1006
|
-
async def
|
1005
|
+
async def zip_dir_stream(self, top_url: str, op_user: Optional[UserRecord] = None) -> AsyncIterable[bytes]:
|
1007
1006
|
from stat import S_IFREG
|
1008
1007
|
from stream_zip import async_stream_zip, ZIP_64
|
1009
1008
|
if top_url.startswith('/'):
|
@@ -1015,7 +1014,7 @@ class Database:
|
|
1015
1014
|
|
1016
1015
|
# https://stream-zip.docs.trade.gov.uk/async-interface/
|
1017
1016
|
async def data_iter():
|
1018
|
-
async for (r, blob) in self.
|
1017
|
+
async for (r, blob) in self.iter_dir(top_url, None):
|
1019
1018
|
rel_path = r.url[len(top_url):]
|
1020
1019
|
rel_path = decode_uri_compnents(rel_path)
|
1021
1020
|
b_iter: AsyncIterable[bytes]
|
@@ -1035,7 +1034,7 @@ class Database:
|
|
1035
1034
|
return async_stream_zip(data_iter())
|
1036
1035
|
|
1037
1036
|
@concurrent_wrap()
|
1038
|
-
async def
|
1037
|
+
async def zip_dir(self, top_url: str, op_user: Optional[UserRecord]) -> io.BytesIO:
|
1039
1038
|
if top_url.startswith('/'):
|
1040
1039
|
top_url = top_url[1:]
|
1041
1040
|
|
@@ -1045,7 +1044,7 @@ class Database:
|
|
1045
1044
|
|
1046
1045
|
buffer = io.BytesIO()
|
1047
1046
|
with zipfile.ZipFile(buffer, 'w') as zf:
|
1048
|
-
async for (r, blob) in self.
|
1047
|
+
async for (r, blob) in self.iter_dir(top_url, None):
|
1049
1048
|
rel_path = r.url[len(top_url):]
|
1050
1049
|
rel_path = decode_uri_compnents(rel_path)
|
1051
1050
|
if r.external:
|
@@ -1,4 +1,5 @@
|
|
1
|
-
from .config import
|
1
|
+
from .config import LOG_DIR
|
2
|
+
import time, sqlite3, dataclasses
|
2
3
|
from typing import TypeVar, Callable, Literal, Optional
|
3
4
|
from concurrent.futures import ThreadPoolExecutor
|
4
5
|
from functools import wraps
|
@@ -57,15 +58,81 @@ class BaseLogger(logging.Logger):
|
|
57
58
|
@thread_wrap
|
58
59
|
def error(self, *args, **kwargs): super().error(*args, **kwargs)
|
59
60
|
|
60
|
-
|
61
|
+
class SQLiteFileHandler(logging.FileHandler):
|
62
|
+
def __init__(self, filename, *args, **kwargs):
|
63
|
+
super().__init__(filename, *args, **kwargs)
|
64
|
+
self._db_file = filename
|
65
|
+
self._buffer: list[logging.LogRecord] = []
|
66
|
+
self._buffer_size = 100
|
67
|
+
self._flush_interval = 10
|
68
|
+
self._last_flush = time.time()
|
69
|
+
conn = sqlite3.connect(self._db_file, check_same_thread=False)
|
70
|
+
conn.execute('PRAGMA journal_mode=WAL')
|
71
|
+
conn.execute('''
|
72
|
+
CREATE TABLE IF NOT EXISTS log (
|
73
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
74
|
+
created TIMESTAMP,
|
75
|
+
created_epoch FLOAT,
|
76
|
+
name TEXT,
|
77
|
+
levelname VARCHAR(16),
|
78
|
+
level INTEGER,
|
79
|
+
message TEXT
|
80
|
+
)
|
81
|
+
''')
|
82
|
+
conn.commit()
|
83
|
+
conn.close()
|
84
|
+
|
85
|
+
def flush(self):
|
86
|
+
def format_time(self, record: logging.LogRecord):
|
87
|
+
""" Create a time stamp """
|
88
|
+
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(record.created))
|
89
|
+
self.acquire()
|
90
|
+
try:
|
91
|
+
conn = sqlite3.connect(self._db_file, check_same_thread=False)
|
92
|
+
conn.executemany('''
|
93
|
+
INSERT INTO log (created, created_epoch, name, levelname, level, message)
|
94
|
+
VALUES (?, ?, ?, ?, ?, ?)
|
95
|
+
''', [
|
96
|
+
(format_time(self, record), record.created, record.name, record.levelname, record.levelno, record.getMessage())
|
97
|
+
for record in self._buffer
|
98
|
+
])
|
99
|
+
conn.commit()
|
100
|
+
conn.close()
|
101
|
+
self._buffer.clear()
|
102
|
+
self._last_flush = time.time()
|
103
|
+
finally:
|
104
|
+
self.release()
|
105
|
+
|
106
|
+
def emit(self, record: logging.LogRecord):
|
107
|
+
self._buffer.append(record)
|
108
|
+
if len(self._buffer) > self._buffer_size or time.time() - self._last_flush > self._flush_interval:
|
109
|
+
self.flush()
|
110
|
+
|
111
|
+
def close(self):
|
112
|
+
self.flush()
|
113
|
+
return super().close()
|
114
|
+
|
115
|
+
def eval_logline(row: sqlite3.Row):
|
116
|
+
@dataclasses.dataclass
|
117
|
+
class DBLogRecord:
|
118
|
+
id: int
|
119
|
+
created: str
|
120
|
+
created_epoch: float
|
121
|
+
name: str
|
122
|
+
levelname: str
|
123
|
+
level: int
|
124
|
+
message: str
|
125
|
+
return DBLogRecord(*row)
|
126
|
+
|
127
|
+
_fh_T = Literal['rotate', 'simple', 'daily', 'sqlite']
|
61
128
|
|
62
129
|
__g_logger_dict: dict[str, BaseLogger] = {}
|
63
130
|
def get_logger(
|
64
131
|
name = 'default',
|
65
|
-
log_home =
|
132
|
+
log_home = LOG_DIR,
|
66
133
|
level = 'DEBUG',
|
67
134
|
term_level = 'INFO',
|
68
|
-
file_handler_type: _fh_T = '
|
135
|
+
file_handler_type: _fh_T = 'sqlite',
|
69
136
|
global_instance = True
|
70
137
|
)->BaseLogger:
|
71
138
|
if global_instance and name in __g_logger_dict:
|
@@ -100,6 +167,8 @@ def get_logger(
|
|
100
167
|
file_handler = handlers.RotatingFileHandler(
|
101
168
|
log_file, maxBytes=1024*1024, backupCount=5
|
102
169
|
)
|
170
|
+
elif file_handler_type == 'sqlite':
|
171
|
+
file_handler = SQLiteFileHandler(log_file if log_file.suffix == '.db' else log_file.with_suffix('.log.db'))
|
103
172
|
|
104
173
|
file_handler.setFormatter(formatter_plain)
|
105
174
|
logger.addHandler(file_handler)
|
@@ -11,7 +11,6 @@ from concurrent.futures import ThreadPoolExecutor
|
|
11
11
|
from typing import TypeVar, Callable, Awaitable
|
12
12
|
from functools import wraps, partial
|
13
13
|
from uuid import uuid4
|
14
|
-
import os
|
15
14
|
|
16
15
|
async def copy_file(source: str|pathlib.Path, destination: str|pathlib.Path):
|
17
16
|
async with aiofiles.open(source, mode='rb') as src:
|
@@ -160,7 +159,7 @@ _g_executor = None
|
|
160
159
|
def get_global_executor():
|
161
160
|
global _g_executor
|
162
161
|
if _g_executor is None:
|
163
|
-
_g_executor = ThreadPoolExecutor(
|
162
|
+
_g_executor = ThreadPoolExecutor()
|
164
163
|
return _g_executor
|
165
164
|
def async_wrap(executor=None):
|
166
165
|
if executor is None:
|
@@ -57,9 +57,9 @@ async def eval_path(path: str) -> tuple[ptype, str, Optional[FileRecord | Direct
|
|
57
57
|
if len(dir_path_sp) > 2:
|
58
58
|
async with unique_cursor() as c:
|
59
59
|
fconn = FileConn(c)
|
60
|
-
if await fconn.
|
60
|
+
if await fconn.count_dir_files(path, flat=True) == 0:
|
61
61
|
return None, lfss_path, None
|
62
|
-
return "dir", lfss_path, await fconn.
|
62
|
+
return "dir", lfss_path, await fconn.get_dir_record(path)
|
63
63
|
else:
|
64
64
|
# test if its a user's root directory
|
65
65
|
assert len(dir_path_sp) == 2
|
@@ -85,8 +85,8 @@ async def eval_path(path: str) -> tuple[ptype, str, Optional[FileRecord | Direct
|
|
85
85
|
async with unique_cursor() as c:
|
86
86
|
lfss_path = path + "/"
|
87
87
|
fconn = FileConn(c)
|
88
|
-
if await fconn.
|
89
|
-
return "dir", lfss_path, await fconn.
|
88
|
+
if await fconn.count_dir_files(lfss_path) > 0:
|
89
|
+
return "dir", lfss_path, await fconn.get_dir_record(lfss_path)
|
90
90
|
|
91
91
|
return None, path, None
|
92
92
|
|
@@ -235,7 +235,7 @@ async def dav_propfind(request: Request, path: str, user: UserRecord = Depends(r
|
|
235
235
|
# query root directory content
|
236
236
|
async def user_path_record(user_name: str, cur) -> DirectoryRecord:
|
237
237
|
try:
|
238
|
-
return await FileConn(cur).
|
238
|
+
return await FileConn(cur).get_dir_record(user_name + "/")
|
239
239
|
except PathNotFoundError:
|
240
240
|
return DirectoryRecord(user_name + "/", size=0, n_files=0, create_time="1970-01-01 00:00:00", update_time="1970-01-01 00:00:00", access_time="1970-01-01 00:00:00")
|
241
241
|
|
@@ -253,7 +253,7 @@ async def dav_propfind(request: Request, path: str, user: UserRecord = Depends(r
|
|
253
253
|
elif path_type == "dir":
|
254
254
|
# query directory content
|
255
255
|
async with unique_cursor() as c:
|
256
|
-
flist = await FileConn(c).
|
256
|
+
flist = await FileConn(c).list_dir_files(lfss_path, flat = True if depth == "infinity" else False)
|
257
257
|
for frecord in flist:
|
258
258
|
if frecord.url.endswith(f"/{MKDIR_PLACEHOLDER}"): continue
|
259
259
|
file_el = await create_file_xml_element(frecord)
|
@@ -315,7 +315,7 @@ async def dav_move(request: Request, path: str, user: UserRecord = Depends(regis
|
|
315
315
|
assert ptype == "dir", "Directory path should end with /"
|
316
316
|
assert lfss_path.endswith("/"), "Directory path should end with /"
|
317
317
|
if not dlfss_path.endswith("/"): dlfss_path += "/" # the header destination may not end with /
|
318
|
-
await db.
|
318
|
+
await db.move_dir(lfss_path, dlfss_path, user)
|
319
319
|
return Response(status_code=201)
|
320
320
|
|
321
321
|
@router_dav.api_route("/{path:path}", methods=["COPY"])
|
@@ -90,13 +90,13 @@ async def bundle_files(path: str, user: UserRecord = Depends(registered_user)):
|
|
90
90
|
raise HTTPException(status_code=400, detail="Cannot bundle root")
|
91
91
|
|
92
92
|
async with unique_cursor() as cur:
|
93
|
-
dir_record = await FileConn(cur).
|
93
|
+
dir_record = await FileConn(cur).get_dir_record(path)
|
94
94
|
|
95
95
|
pathname = f"{path.split('/')[-2]}"
|
96
96
|
|
97
97
|
if dir_record.size < MAX_MEM_FILE_BYTES:
|
98
98
|
logger.debug(f"Bundle {path} in memory")
|
99
|
-
dir_bytes = (await db.
|
99
|
+
dir_bytes = (await db.zip_dir(path, op_user=user)).getvalue()
|
100
100
|
return Response(
|
101
101
|
content = dir_bytes,
|
102
102
|
media_type = "application/zip",
|
@@ -109,7 +109,7 @@ async def bundle_files(path: str, user: UserRecord = Depends(registered_user)):
|
|
109
109
|
else:
|
110
110
|
logger.debug(f"Bundle {path} in stream")
|
111
111
|
return StreamingResponse(
|
112
|
-
content = await db.
|
112
|
+
content = await db.zip_dir_stream(path, op_user=user),
|
113
113
|
media_type = "application/zip",
|
114
114
|
headers = {
|
115
115
|
f"Content-Disposition": f"attachment; filename=bundle-{pathname}.zip",
|
@@ -134,7 +134,7 @@ async def get_file_meta(path: str, user: UserRecord = Depends(registered_user)):
|
|
134
134
|
else:
|
135
135
|
if await check_path_permission(path, user, cursor=cur) < AccessLevel.READ:
|
136
136
|
raise HTTPException(status_code=403, detail="Permission denied")
|
137
|
-
record = await fconn.
|
137
|
+
record = await fconn.get_dir_record(path)
|
138
138
|
return record
|
139
139
|
|
140
140
|
@router_api.post("/meta")
|
@@ -171,7 +171,7 @@ async def update_file_meta(
|
|
171
171
|
new_path = ensure_uri_compnents(new_path)
|
172
172
|
logger.info(f"Update path of {path} to {new_path}")
|
173
173
|
# will raise duplicate path error if same name path exists in the new path
|
174
|
-
await db.
|
174
|
+
await db.move_dir(path, new_path, user)
|
175
175
|
|
176
176
|
return Response(status_code=200, content="OK")
|
177
177
|
|
@@ -194,7 +194,7 @@ async def count_files(path: str, flat: bool = False, user: UserRecord = Depends(
|
|
194
194
|
path = ensure_uri_compnents(path)
|
195
195
|
async with unique_cursor() as conn:
|
196
196
|
fconn = FileConn(conn)
|
197
|
-
return { "count": await fconn.
|
197
|
+
return { "count": await fconn.count_dir_files(url = path, flat = flat) }
|
198
198
|
@router_api.get("/list-files")
|
199
199
|
async def list_files(
|
200
200
|
path: str, offset: int = 0, limit: int = 1000,
|
@@ -205,7 +205,7 @@ async def list_files(
|
|
205
205
|
path = ensure_uri_compnents(path)
|
206
206
|
async with unique_cursor() as conn:
|
207
207
|
fconn = FileConn(conn)
|
208
|
-
return await fconn.
|
208
|
+
return await fconn.list_dir_files(
|
209
209
|
url = path, offset = offset, limit = limit,
|
210
210
|
order_by=order_by, order_desc=order_desc,
|
211
211
|
flat=flat
|
@@ -180,7 +180,7 @@ async def _get_dir_impl(
|
|
180
180
|
else:
|
181
181
|
raise HTTPException(status_code=404, detail="User not found")
|
182
182
|
else:
|
183
|
-
if await FileConn(cur).
|
183
|
+
if await FileConn(cur).count_dir_files(path, flat=True) > 0:
|
184
184
|
return Response(status_code=200)
|
185
185
|
else:
|
186
186
|
raise HTTPException(status_code=404, detail="Path not found")
|
@@ -295,7 +295,7 @@ async def delete_impl(path: str, user: UserRecord):
|
|
295
295
|
logger.info(f"DELETE {path}, user: {user.username}")
|
296
296
|
|
297
297
|
if path.endswith("/"):
|
298
|
-
res = await db.
|
298
|
+
res = await db.delete_dir(path, user)
|
299
299
|
else:
|
300
300
|
res = await db.delete_file(path, user)
|
301
301
|
|
@@ -327,8 +327,8 @@ async def copy_impl(
|
|
327
327
|
else:
|
328
328
|
async with unique_cursor() as cur:
|
329
329
|
fconn = FileConn(cur)
|
330
|
-
dst_fcount = await fconn.
|
330
|
+
dst_fcount = await fconn.count_dir_files(dst_path, flat=True)
|
331
331
|
if dst_fcount > 0:
|
332
332
|
raise HTTPException(status_code=409, detail="Destination exists")
|
333
|
-
await db.
|
333
|
+
await db.copy_dir(src_path, dst_path, op_user)
|
334
334
|
return Response(status_code=201, content="OK")
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[tool.poetry]
|
2
2
|
name = "lfss"
|
3
|
-
version = "0.11.
|
3
|
+
version = "0.11.2"
|
4
4
|
description = "Lightweight file storage service"
|
5
5
|
authors = ["Li, Mengxun <mengxunli@whu.edu.cn>"]
|
6
6
|
readme = "Readme.md"
|
@@ -19,6 +19,7 @@ uvicorn = "0.*"
|
|
19
19
|
stream-zip = "0.*"
|
20
20
|
python-multipart = "*"
|
21
21
|
pillow = "*"
|
22
|
+
rich = "*"
|
22
23
|
|
23
24
|
[tool.poetry.dev-dependencies]
|
24
25
|
pytest = "*"
|
@@ -32,6 +33,7 @@ lfss-panel = "lfss.cli.panel:main"
|
|
32
33
|
lfss-cli = "lfss.cli.cli:main"
|
33
34
|
lfss-vacuum = "lfss.cli.vacuum:main"
|
34
35
|
lfss-balance = "lfss.cli.balance:main"
|
36
|
+
lfss-log = "lfss.cli.log:main"
|
35
37
|
|
36
38
|
[build-system]
|
37
39
|
requires = ["poetry-core>=1.0.0"]
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|