lfss 0.11.1__py3-none-any.whl → 0.11.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- docs/Enviroment_variables.md +3 -1
- docs/changelog.md +27 -0
- frontend/api.js +66 -4
- frontend/login.js +0 -1
- frontend/popup.js +18 -3
- frontend/scripts.js +8 -5
- frontend/utils.js +4 -5
- lfss/api/connector.py +17 -2
- lfss/cli/cli.py +7 -7
- lfss/cli/log.py +77 -0
- lfss/cli/vacuum.py +10 -3
- lfss/eng/config.py +6 -3
- lfss/eng/database.py +99 -40
- lfss/eng/log.py +91 -21
- lfss/eng/utils.py +1 -2
- lfss/svc/app_base.py +4 -1
- lfss/svc/app_dav.py +7 -7
- lfss/svc/app_native.py +58 -11
- lfss/svc/common_impl.py +4 -4
- {lfss-0.11.1.dist-info → lfss-0.11.3.dist-info}/METADATA +3 -2
- {lfss-0.11.1.dist-info → lfss-0.11.3.dist-info}/RECORD +23 -22
- {lfss-0.11.1.dist-info → lfss-0.11.3.dist-info}/entry_points.txt +1 -0
- {lfss-0.11.1.dist-info → lfss-0.11.3.dist-info}/WHEEL +0 -0
lfss/eng/database.py
CHANGED
@@ -210,6 +210,10 @@ class FileConn(DBObjectBase):
|
|
210
210
|
return self.parse_record(res)
|
211
211
|
|
212
212
|
async def get_file_records(self, urls: list[str]) -> list[FileRecord]:
|
213
|
+
"""
|
214
|
+
Get all file records with the given urls, only urls in the database will be returned.
|
215
|
+
If the urls are not in the database, they will be ignored.
|
216
|
+
"""
|
213
217
|
await self.cur.execute("SELECT * FROM fmeta WHERE url IN ({})".format(','.join(['?'] * len(urls))), urls)
|
214
218
|
res = await self.cur.fetchall()
|
215
219
|
if res is None:
|
@@ -225,12 +229,12 @@ class FileConn(DBObjectBase):
|
|
225
229
|
await self.cur.execute("SELECT username FROM user")
|
226
230
|
res = await self.cur.fetchall()
|
227
231
|
dirnames = [u[0] + '/' for u in res]
|
228
|
-
dirs = [await self.
|
232
|
+
dirs = [await self.get_dir_record(u) for u in dirnames] if not skim else [DirectoryRecord(u) for u in dirnames]
|
229
233
|
return dirs
|
230
234
|
else:
|
231
235
|
# list specific users
|
232
236
|
dirnames = [uname + '/' for uname in usernames]
|
233
|
-
dirs = [await self.
|
237
|
+
dirs = [await self.get_dir_record(u) for u in dirnames] if not skim else [DirectoryRecord(u) for u in dirnames]
|
234
238
|
return dirs
|
235
239
|
|
236
240
|
async def count_path_dirs(self, url: str):
|
@@ -278,11 +282,11 @@ class FileConn(DBObjectBase):
|
|
278
282
|
if skim:
|
279
283
|
return DirectoryRecord(dir_url)
|
280
284
|
else:
|
281
|
-
return await self.
|
285
|
+
return await self.get_dir_record(dir_url)
|
282
286
|
dirs = [await get_dir(url + d) for d in dirs_str]
|
283
287
|
return dirs
|
284
288
|
|
285
|
-
async def
|
289
|
+
async def count_dir_files(self, url: str, flat: bool = False):
|
286
290
|
if not url.endswith('/'): url += '/'
|
287
291
|
if url == '/': url = ''
|
288
292
|
if flat:
|
@@ -293,7 +297,7 @@ class FileConn(DBObjectBase):
|
|
293
297
|
assert res is not None, "Error: count_path_files"
|
294
298
|
return res[0]
|
295
299
|
|
296
|
-
async def
|
300
|
+
async def list_dir_files(
|
297
301
|
self, url: str,
|
298
302
|
offset: int = 0, limit: int = 10_000,
|
299
303
|
order_by: FileSortKey = '', order_desc: bool = False,
|
@@ -328,15 +332,15 @@ class FileConn(DBObjectBase):
|
|
328
332
|
"""
|
329
333
|
MAX_ITEMS = 10_000
|
330
334
|
dir_count = await self.count_path_dirs(url)
|
331
|
-
file_count = await self.
|
335
|
+
file_count = await self.count_dir_files(url, flat=False)
|
332
336
|
if dir_count + file_count > MAX_ITEMS:
|
333
337
|
raise TooManyItemsError("Too many items, please paginate")
|
334
338
|
return PathContents(
|
335
339
|
dirs = await self.list_path_dirs(url, skim=True, limit=MAX_ITEMS),
|
336
|
-
files = await self.
|
340
|
+
files = await self.list_dir_files(url, flat=False, limit=MAX_ITEMS)
|
337
341
|
)
|
338
342
|
|
339
|
-
async def
|
343
|
+
async def get_dir_record(self, url: str) -> DirectoryRecord:
|
340
344
|
"""
|
341
345
|
Get the full record of a directory, including size, create_time, update_time, access_time etc.
|
342
346
|
"""
|
@@ -411,8 +415,11 @@ class FileConn(DBObjectBase):
|
|
411
415
|
await self._user_size_inc(owner_id, file_size)
|
412
416
|
self.logger.info(f"File {url} created")
|
413
417
|
|
414
|
-
# not tested
|
415
418
|
async def copy_file(self, old_url: str, new_url: str, user_id: Optional[int] = None):
|
419
|
+
"""
|
420
|
+
Copy file from old_url to new_url,
|
421
|
+
if user_id is None, will not change the owner_id of the file. Otherwise, will change the owner_id to user_id.
|
422
|
+
"""
|
416
423
|
old = await self.get_file_record(old_url)
|
417
424
|
if old is None:
|
418
425
|
raise FileNotFoundError(f"File {old_url} not found")
|
@@ -428,15 +435,15 @@ class FileConn(DBObjectBase):
|
|
428
435
|
await self._user_size_inc(user_id, old.file_size)
|
429
436
|
self.logger.info(f"Copied file {old_url} to {new_url}")
|
430
437
|
|
431
|
-
async def
|
438
|
+
async def copy_dir(self, old_url: str, new_url: str, user_id: Optional[int] = None):
|
439
|
+
"""
|
440
|
+
Copy all files under old_url to new_url,
|
441
|
+
if user_id is None, will not change the owner_id of the files. Otherwise, will change the owner_id to user_id.
|
442
|
+
"""
|
432
443
|
assert old_url.endswith('/'), "Old path must end with /"
|
433
444
|
assert new_url.endswith('/'), "New path must end with /"
|
434
|
-
|
435
|
-
|
436
|
-
res = await cursor.fetchall()
|
437
|
-
else:
|
438
|
-
cursor = await self.cur.execute("SELECT * FROM fmeta WHERE url LIKE ? AND owner_id = ?", (old_url + '%', user_id))
|
439
|
-
res = await cursor.fetchall()
|
445
|
+
cursor = await self.cur.execute("SELECT * FROM fmeta WHERE url LIKE ?", (old_url + '%', ))
|
446
|
+
res = await cursor.fetchall()
|
440
447
|
for r in res:
|
441
448
|
old_record = FileRecord(*r)
|
442
449
|
new_r = new_url + old_record.url[len(old_url):]
|
@@ -461,7 +468,7 @@ class FileConn(DBObjectBase):
|
|
461
468
|
await self.cur.execute("UPDATE fmeta SET url = ?, create_time = CURRENT_TIMESTAMP WHERE url = ?", (new_url, old_url))
|
462
469
|
self.logger.info(f"Moved file {old_url} to {new_url}")
|
463
470
|
|
464
|
-
async def
|
471
|
+
async def move_dir(self, old_url: str, new_url: str, user_id: Optional[int] = None):
|
465
472
|
assert old_url.endswith('/'), "Old path must end with /"
|
466
473
|
assert new_url.endswith('/'), "New path must end with /"
|
467
474
|
if user_id is None:
|
@@ -500,7 +507,7 @@ class FileConn(DBObjectBase):
|
|
500
507
|
self.logger.info(f"Deleted {len(ret)} file records for user {owner_id}") # type: ignore
|
501
508
|
return ret
|
502
509
|
|
503
|
-
async def
|
510
|
+
async def delete_records_by_prefix(self, path: str, under_owner_id: Optional[int] = None) -> list[FileRecord]:
|
504
511
|
"""Delete all records with url starting with path"""
|
505
512
|
# update user size
|
506
513
|
cursor = await self.cur.execute("SELECT DISTINCT owner_id FROM fmeta WHERE url LIKE ?", (path + '%', ))
|
@@ -689,7 +696,7 @@ async def delayed_log_access(url: str):
|
|
689
696
|
])
|
690
697
|
),
|
691
698
|
)
|
692
|
-
def validate_url(url: str,
|
699
|
+
def validate_url(url: str, utype: Literal['file', 'dir'] = 'file'):
|
693
700
|
""" Check if a path is valid. The input path is considered url safe """
|
694
701
|
if len(url) > 1024:
|
695
702
|
raise InvalidPathError(f"URL too long: {url}")
|
@@ -703,7 +710,7 @@ def validate_url(url: str, is_file = True):
|
|
703
710
|
is_valid = False
|
704
711
|
break
|
705
712
|
|
706
|
-
if
|
713
|
+
if utype == 'file': is_valid = is_valid and not url.endswith('/')
|
707
714
|
else: is_valid = is_valid and url.endswith('/')
|
708
715
|
|
709
716
|
if not is_valid:
|
@@ -827,6 +834,58 @@ class Database:
|
|
827
834
|
yield blob
|
828
835
|
ret = blob_stream()
|
829
836
|
return ret
|
837
|
+
|
838
|
+
async def read_files_bulk(
|
839
|
+
self, urls: list[str],
|
840
|
+
skip_content = False,
|
841
|
+
op_user: Optional[UserRecord] = None,
|
842
|
+
) -> dict[str, Optional[bytes]]:
|
843
|
+
"""
|
844
|
+
A frequent use case is to read multiple files at once,
|
845
|
+
this method will read all files in the list and return a dict of url -> blob.
|
846
|
+
if the file is not found, the value will be None.
|
847
|
+
- skip_content: if True, will not read the content of the file, resulting in a dict of url -> b''
|
848
|
+
|
849
|
+
may raise StorageExceededError if the total size of the files exceeds MAX_MEM_FILE_BYTES
|
850
|
+
"""
|
851
|
+
for url in urls:
|
852
|
+
validate_url(url)
|
853
|
+
|
854
|
+
async with unique_cursor() as cur:
|
855
|
+
fconn = FileConn(cur)
|
856
|
+
file_records = await fconn.get_file_records(urls)
|
857
|
+
|
858
|
+
if op_user is not None:
|
859
|
+
for r in file_records:
|
860
|
+
if await check_path_permission(r.url, op_user, cursor=cur) >= AccessLevel.READ:
|
861
|
+
continue
|
862
|
+
is_allowed, reason = await check_file_read_permission(op_user, r, cursor=cur)
|
863
|
+
if not is_allowed:
|
864
|
+
raise PermissionDeniedError(f"Permission denied: {op_user.username} cannot read file {r.url}: {reason}")
|
865
|
+
|
866
|
+
# first check if the files are too big
|
867
|
+
sum_size = sum([r.file_size for r in file_records])
|
868
|
+
if not skip_content and sum_size > MAX_MEM_FILE_BYTES:
|
869
|
+
raise StorageExceededError(f"Unable to read files at once, total size {sum_size} exceeds {MAX_MEM_FILE_BYTES}")
|
870
|
+
|
871
|
+
self.logger.debug(f"Reading {len(file_records)} files{' (skip content)' if skip_content else ''}, getting {sum_size} bytes, from {urls}")
|
872
|
+
# read the file content
|
873
|
+
async with unique_cursor() as cur:
|
874
|
+
fconn = FileConn(cur)
|
875
|
+
blobs: dict[str, bytes] = {}
|
876
|
+
for r in file_records:
|
877
|
+
if skip_content:
|
878
|
+
blobs[r.url] = b''
|
879
|
+
continue
|
880
|
+
|
881
|
+
if r.external:
|
882
|
+
blob_iter = fconn.get_file_blob_external(r.file_id)
|
883
|
+
blob = b''.join([chunk async for chunk in blob_iter])
|
884
|
+
else:
|
885
|
+
blob = await fconn.get_file_blob(r.file_id)
|
886
|
+
blobs[r.url] = blob
|
887
|
+
|
888
|
+
return {url: blobs.get(url, None) for url in urls}
|
830
889
|
|
831
890
|
async def delete_file(self, url: str, op_user: Optional[UserRecord] = None) -> Optional[FileRecord]:
|
832
891
|
validate_url(url)
|
@@ -885,9 +944,9 @@ class Database:
|
|
885
944
|
raise PermissionDeniedError(f"Permission denied: {op_user.username} cannot copy file to {new_url}")
|
886
945
|
await fconn.copy_file(old_url, new_url, user_id=op_user.id if op_user is not None else None)
|
887
946
|
|
888
|
-
async def
|
889
|
-
validate_url(old_url,
|
890
|
-
validate_url(new_url,
|
947
|
+
async def move_dir(self, old_url: str, new_url: str, op_user: UserRecord):
|
948
|
+
validate_url(old_url, 'dir')
|
949
|
+
validate_url(new_url, 'dir')
|
891
950
|
|
892
951
|
if new_url.startswith('/'):
|
893
952
|
new_url = new_url[1:]
|
@@ -906,12 +965,11 @@ class Database:
|
|
906
965
|
|
907
966
|
async with transaction() as cur:
|
908
967
|
fconn = FileConn(cur)
|
909
|
-
await fconn.
|
968
|
+
await fconn.move_dir(old_url, new_url, op_user.id)
|
910
969
|
|
911
|
-
|
912
|
-
|
913
|
-
validate_url(
|
914
|
-
validate_url(new_url, is_file=False)
|
970
|
+
async def copy_dir(self, old_url: str, new_url: str, op_user: UserRecord):
|
971
|
+
validate_url(old_url, 'dir')
|
972
|
+
validate_url(new_url, 'dir')
|
915
973
|
|
916
974
|
if new_url.startswith('/'):
|
917
975
|
new_url = new_url[1:]
|
@@ -930,7 +988,7 @@ class Database:
|
|
930
988
|
|
931
989
|
async with transaction() as cur:
|
932
990
|
fconn = FileConn(cur)
|
933
|
-
await fconn.
|
991
|
+
await fconn.copy_dir(old_url, new_url, op_user.id)
|
934
992
|
|
935
993
|
async def __batch_delete_file_blobs(self, fconn: FileConn, file_records: list[FileRecord], batch_size: int = 512):
|
936
994
|
# https://github.com/langchain-ai/langchain/issues/10321
|
@@ -951,13 +1009,13 @@ class Database:
|
|
951
1009
|
await del_internal()
|
952
1010
|
await del_external()
|
953
1011
|
|
954
|
-
async def
|
955
|
-
validate_url(url,
|
1012
|
+
async def delete_dir(self, url: str, op_user: Optional[UserRecord] = None) -> Optional[list[FileRecord]]:
|
1013
|
+
validate_url(url, 'dir')
|
956
1014
|
from_owner_id = op_user.id if op_user is not None and not (op_user.is_admin or await check_path_permission(url, op_user) >= AccessLevel.WRITE) else None
|
957
1015
|
|
958
1016
|
async with transaction() as cur:
|
959
1017
|
fconn = FileConn(cur)
|
960
|
-
records = await fconn.
|
1018
|
+
records = await fconn.delete_records_by_prefix(url, from_owner_id)
|
961
1019
|
if not records:
|
962
1020
|
return None
|
963
1021
|
await self.__batch_delete_file_blobs(fconn, records)
|
@@ -981,14 +1039,15 @@ class Database:
|
|
981
1039
|
|
982
1040
|
# make sure the user's directory is deleted,
|
983
1041
|
# may contain admin's files, but delete them all
|
984
|
-
await fconn.
|
1042
|
+
await fconn.delete_records_by_prefix(user.username + '/')
|
985
1043
|
|
986
|
-
async def
|
1044
|
+
async def iter_dir(self, top_url: str, urls: Optional[list[str]]) -> AsyncIterable[tuple[FileRecord, bytes | AsyncIterable[bytes]]]:
|
1045
|
+
validate_url(top_url, 'dir')
|
987
1046
|
async with unique_cursor() as cur:
|
988
1047
|
fconn = FileConn(cur)
|
989
1048
|
if urls is None:
|
990
|
-
fcount = await fconn.
|
991
|
-
urls = [r.url for r in (await fconn.
|
1049
|
+
fcount = await fconn.count_dir_files(top_url, flat=True)
|
1050
|
+
urls = [r.url for r in (await fconn.list_dir_files(top_url, flat=True, limit=fcount))]
|
992
1051
|
|
993
1052
|
for url in urls:
|
994
1053
|
if not url.startswith(top_url):
|
@@ -1003,7 +1062,7 @@ class Database:
|
|
1003
1062
|
blob = await fconn.get_file_blob(f_id)
|
1004
1063
|
yield r, blob
|
1005
1064
|
|
1006
|
-
async def
|
1065
|
+
async def zip_dir_stream(self, top_url: str, op_user: Optional[UserRecord] = None) -> AsyncIterable[bytes]:
|
1007
1066
|
from stat import S_IFREG
|
1008
1067
|
from stream_zip import async_stream_zip, ZIP_64
|
1009
1068
|
if top_url.startswith('/'):
|
@@ -1015,7 +1074,7 @@ class Database:
|
|
1015
1074
|
|
1016
1075
|
# https://stream-zip.docs.trade.gov.uk/async-interface/
|
1017
1076
|
async def data_iter():
|
1018
|
-
async for (r, blob) in self.
|
1077
|
+
async for (r, blob) in self.iter_dir(top_url, None):
|
1019
1078
|
rel_path = r.url[len(top_url):]
|
1020
1079
|
rel_path = decode_uri_compnents(rel_path)
|
1021
1080
|
b_iter: AsyncIterable[bytes]
|
@@ -1035,7 +1094,7 @@ class Database:
|
|
1035
1094
|
return async_stream_zip(data_iter())
|
1036
1095
|
|
1037
1096
|
@concurrent_wrap()
|
1038
|
-
async def
|
1097
|
+
async def zip_dir(self, top_url: str, op_user: Optional[UserRecord]) -> io.BytesIO:
|
1039
1098
|
if top_url.startswith('/'):
|
1040
1099
|
top_url = top_url[1:]
|
1041
1100
|
|
@@ -1045,7 +1104,7 @@ class Database:
|
|
1045
1104
|
|
1046
1105
|
buffer = io.BytesIO()
|
1047
1106
|
with zipfile.ZipFile(buffer, 'w') as zf:
|
1048
|
-
async for (r, blob) in self.
|
1107
|
+
async for (r, blob) in self.iter_dir(top_url, None):
|
1049
1108
|
rel_path = r.url[len(top_url):]
|
1050
1109
|
rel_path = decode_uri_compnents(rel_path)
|
1051
1110
|
if r.external:
|
lfss/eng/log.py
CHANGED
@@ -1,8 +1,9 @@
|
|
1
|
-
from .config import
|
1
|
+
from .config import LOG_DIR, DISABLE_LOGGING
|
2
|
+
import time, sqlite3, dataclasses
|
2
3
|
from typing import TypeVar, Callable, Literal, Optional
|
3
4
|
from concurrent.futures import ThreadPoolExecutor
|
4
5
|
from functools import wraps
|
5
|
-
import logging,
|
6
|
+
import logging, asyncio
|
6
7
|
from logging import handlers
|
7
8
|
|
8
9
|
class BCOLORS:
|
@@ -57,15 +58,81 @@ class BaseLogger(logging.Logger):
|
|
57
58
|
@thread_wrap
|
58
59
|
def error(self, *args, **kwargs): super().error(*args, **kwargs)
|
59
60
|
|
60
|
-
|
61
|
+
class SQLiteFileHandler(logging.FileHandler):
|
62
|
+
def __init__(self, filename, *args, **kwargs):
|
63
|
+
super().__init__(filename, *args, **kwargs)
|
64
|
+
self._db_file = filename
|
65
|
+
self._buffer: list[logging.LogRecord] = []
|
66
|
+
self._buffer_size = 100
|
67
|
+
self._flush_interval = 10
|
68
|
+
self._last_flush = time.time()
|
69
|
+
conn = sqlite3.connect(self._db_file, check_same_thread=False)
|
70
|
+
conn.execute('PRAGMA journal_mode=WAL')
|
71
|
+
conn.execute('''
|
72
|
+
CREATE TABLE IF NOT EXISTS log (
|
73
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
74
|
+
created TIMESTAMP,
|
75
|
+
created_epoch FLOAT,
|
76
|
+
name TEXT,
|
77
|
+
levelname VARCHAR(16),
|
78
|
+
level INTEGER,
|
79
|
+
message TEXT
|
80
|
+
)
|
81
|
+
''')
|
82
|
+
conn.commit()
|
83
|
+
conn.close()
|
84
|
+
|
85
|
+
def flush(self):
|
86
|
+
def format_time(self, record: logging.LogRecord):
|
87
|
+
""" Create a time stamp """
|
88
|
+
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(record.created))
|
89
|
+
self.acquire()
|
90
|
+
try:
|
91
|
+
conn = sqlite3.connect(self._db_file, check_same_thread=False)
|
92
|
+
conn.executemany('''
|
93
|
+
INSERT INTO log (created, created_epoch, name, levelname, level, message)
|
94
|
+
VALUES (?, ?, ?, ?, ?, ?)
|
95
|
+
''', [
|
96
|
+
(format_time(self, record), record.created, record.name, record.levelname, record.levelno, record.getMessage())
|
97
|
+
for record in self._buffer
|
98
|
+
])
|
99
|
+
conn.commit()
|
100
|
+
conn.close()
|
101
|
+
self._buffer.clear()
|
102
|
+
self._last_flush = time.time()
|
103
|
+
finally:
|
104
|
+
self.release()
|
105
|
+
|
106
|
+
def emit(self, record: logging.LogRecord):
|
107
|
+
self._buffer.append(record)
|
108
|
+
if len(self._buffer) > self._buffer_size or time.time() - self._last_flush > self._flush_interval:
|
109
|
+
self.flush()
|
110
|
+
|
111
|
+
def close(self):
|
112
|
+
self.flush()
|
113
|
+
return super().close()
|
114
|
+
|
115
|
+
def eval_logline(row: sqlite3.Row):
|
116
|
+
@dataclasses.dataclass
|
117
|
+
class DBLogRecord:
|
118
|
+
id: int
|
119
|
+
created: str
|
120
|
+
created_epoch: float
|
121
|
+
name: str
|
122
|
+
levelname: str
|
123
|
+
level: int
|
124
|
+
message: str
|
125
|
+
return DBLogRecord(*row)
|
126
|
+
|
127
|
+
_fh_T = Literal['rotate', 'simple', 'daily', 'sqlite']
|
61
128
|
|
62
129
|
__g_logger_dict: dict[str, BaseLogger] = {}
|
63
130
|
def get_logger(
|
64
131
|
name = 'default',
|
65
|
-
log_home =
|
132
|
+
log_home = LOG_DIR,
|
66
133
|
level = 'DEBUG',
|
67
134
|
term_level = 'INFO',
|
68
|
-
file_handler_type: _fh_T = '
|
135
|
+
file_handler_type: _fh_T = 'sqlite',
|
69
136
|
global_instance = True
|
70
137
|
)->BaseLogger:
|
71
138
|
if global_instance and name in __g_logger_dict:
|
@@ -87,22 +154,25 @@ def get_logger(
|
|
87
154
|
if isinstance(color, str) and color.startswith('\033'):
|
88
155
|
format_str_plain = format_str_plain.replace(color, '')
|
89
156
|
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
157
|
+
if not DISABLE_LOGGING:
|
158
|
+
formatter_plain = logging.Formatter(format_str_plain)
|
159
|
+
log_home.mkdir(exist_ok=True)
|
160
|
+
log_file = log_home / f'{name}.log'
|
161
|
+
if file_handler_type == 'simple':
|
162
|
+
file_handler = logging.FileHandler(log_file)
|
163
|
+
elif file_handler_type == 'daily':
|
164
|
+
file_handler = handlers.TimedRotatingFileHandler(
|
165
|
+
log_file, when='midnight', interval=1, backupCount=30
|
166
|
+
)
|
167
|
+
elif file_handler_type == 'rotate':
|
168
|
+
file_handler = handlers.RotatingFileHandler(
|
169
|
+
log_file, maxBytes=1024*1024, backupCount=5
|
170
|
+
)
|
171
|
+
elif file_handler_type == 'sqlite':
|
172
|
+
file_handler = SQLiteFileHandler(log_file if log_file.suffix == '.db' else log_file.with_suffix('.log.db'))
|
173
|
+
|
174
|
+
file_handler.setFormatter(formatter_plain)
|
175
|
+
logger.addHandler(file_handler)
|
106
176
|
|
107
177
|
logger = BaseLogger(name)
|
108
178
|
setupLogger(logger)
|
lfss/eng/utils.py
CHANGED
@@ -11,7 +11,6 @@ from concurrent.futures import ThreadPoolExecutor
|
|
11
11
|
from typing import TypeVar, Callable, Awaitable
|
12
12
|
from functools import wraps, partial
|
13
13
|
from uuid import uuid4
|
14
|
-
import os
|
15
14
|
|
16
15
|
async def copy_file(source: str|pathlib.Path, destination: str|pathlib.Path):
|
17
16
|
async with aiofiles.open(source, mode='rb') as src:
|
@@ -160,7 +159,7 @@ _g_executor = None
|
|
160
159
|
def get_global_executor():
|
161
160
|
global _g_executor
|
162
161
|
if _g_executor is None:
|
163
|
-
_g_executor = ThreadPoolExecutor(
|
162
|
+
_g_executor = ThreadPoolExecutor()
|
164
163
|
return _g_executor
|
165
164
|
def async_wrap(executor=None):
|
166
165
|
if executor is None:
|
lfss/svc/app_base.py
CHANGED
@@ -60,10 +60,13 @@ def handle_exception(fn):
|
|
60
60
|
raise
|
61
61
|
return wrapper
|
62
62
|
|
63
|
+
env_origins = os.environ.get("LFSS_ORIGINS", "*")
|
64
|
+
logger.debug(f"LFSS_ORIGINS: {env_origins}")
|
65
|
+
origins = [x.strip() for x in env_origins.split(",") if x.strip()]
|
63
66
|
app = FastAPI(docs_url=None, redoc_url=None, lifespan=lifespan)
|
64
67
|
app.add_middleware(
|
65
68
|
CORSMiddleware,
|
66
|
-
allow_origins=
|
69
|
+
allow_origins=origins,
|
67
70
|
allow_credentials=True,
|
68
71
|
allow_methods=["*"],
|
69
72
|
allow_headers=["*"],
|
lfss/svc/app_dav.py
CHANGED
@@ -57,9 +57,9 @@ async def eval_path(path: str) -> tuple[ptype, str, Optional[FileRecord | Direct
|
|
57
57
|
if len(dir_path_sp) > 2:
|
58
58
|
async with unique_cursor() as c:
|
59
59
|
fconn = FileConn(c)
|
60
|
-
if await fconn.
|
60
|
+
if await fconn.count_dir_files(path, flat=True) == 0:
|
61
61
|
return None, lfss_path, None
|
62
|
-
return "dir", lfss_path, await fconn.
|
62
|
+
return "dir", lfss_path, await fconn.get_dir_record(path)
|
63
63
|
else:
|
64
64
|
# test if its a user's root directory
|
65
65
|
assert len(dir_path_sp) == 2
|
@@ -85,8 +85,8 @@ async def eval_path(path: str) -> tuple[ptype, str, Optional[FileRecord | Direct
|
|
85
85
|
async with unique_cursor() as c:
|
86
86
|
lfss_path = path + "/"
|
87
87
|
fconn = FileConn(c)
|
88
|
-
if await fconn.
|
89
|
-
return "dir", lfss_path, await fconn.
|
88
|
+
if await fconn.count_dir_files(lfss_path) > 0:
|
89
|
+
return "dir", lfss_path, await fconn.get_dir_record(lfss_path)
|
90
90
|
|
91
91
|
return None, path, None
|
92
92
|
|
@@ -235,7 +235,7 @@ async def dav_propfind(request: Request, path: str, user: UserRecord = Depends(r
|
|
235
235
|
# query root directory content
|
236
236
|
async def user_path_record(user_name: str, cur) -> DirectoryRecord:
|
237
237
|
try:
|
238
|
-
return await FileConn(cur).
|
238
|
+
return await FileConn(cur).get_dir_record(user_name + "/")
|
239
239
|
except PathNotFoundError:
|
240
240
|
return DirectoryRecord(user_name + "/", size=0, n_files=0, create_time="1970-01-01 00:00:00", update_time="1970-01-01 00:00:00", access_time="1970-01-01 00:00:00")
|
241
241
|
|
@@ -253,7 +253,7 @@ async def dav_propfind(request: Request, path: str, user: UserRecord = Depends(r
|
|
253
253
|
elif path_type == "dir":
|
254
254
|
# query directory content
|
255
255
|
async with unique_cursor() as c:
|
256
|
-
flist = await FileConn(c).
|
256
|
+
flist = await FileConn(c).list_dir_files(lfss_path, flat = True if depth == "infinity" else False)
|
257
257
|
for frecord in flist:
|
258
258
|
if frecord.url.endswith(f"/{MKDIR_PLACEHOLDER}"): continue
|
259
259
|
file_el = await create_file_xml_element(frecord)
|
@@ -315,7 +315,7 @@ async def dav_move(request: Request, path: str, user: UserRecord = Depends(regis
|
|
315
315
|
assert ptype == "dir", "Directory path should end with /"
|
316
316
|
assert lfss_path.endswith("/"), "Directory path should end with /"
|
317
317
|
if not dlfss_path.endswith("/"): dlfss_path += "/" # the header destination may not end with /
|
318
|
-
await db.
|
318
|
+
await db.move_dir(lfss_path, dlfss_path, user)
|
319
319
|
return Response(status_code=201)
|
320
320
|
|
321
321
|
@router_dav.api_route("/{path:path}", methods=["COPY"])
|