lfss 0.11.1__py3-none-any.whl → 0.11.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
docs/changelog.md CHANGED
@@ -1,3 +1,30 @@
1
+ ## 0.11
2
+
3
+ ### 0.11.2
4
+ - Improve frontend directory upload feedback.
5
+ - Set default large file threashold to 1M.
6
+ - Increase default concurrent threads.
7
+ - Use sqlite for logging.
8
+ - Add vacuum logs.
9
+ - Refactor: use dir for directory path.
10
+
11
+ ### 0.11.1
12
+ - Rename api `get_meta` function.
13
+ - Frontend support upload directory.
14
+ - Fix admin put to non-exists user path.
15
+
16
+ ### 0.11.0
17
+ - Copy file as hard link.
18
+ - Add vacuum thumb and all.
19
+ - Thumb database use file_id as index.
20
+ - improve username and url check with regular expression.
21
+
22
+ ## 0.10
23
+
24
+ ### 0.10.0
25
+ - Inherit permission from path owner for `unset` permission files.
26
+ - Add timeout and verify options for client api.
27
+ - Bundle small files in memory.
1
28
 
2
29
  ## 0.9
3
30
 
frontend/login.js CHANGED
@@ -3,7 +3,6 @@ import { createFloatingWindow, showPopup } from "./popup.js";
3
3
 
4
4
  /**
5
5
  * @import { store } from "./state.js";
6
- * @import { UserRecord } from "./api.js";
7
6
  *
8
7
  * Shows the login panel if necessary.
9
8
  * @param {store} store - The store object.
frontend/popup.js CHANGED
@@ -109,7 +109,14 @@ export function showPopup(content = '', {
109
109
  } = {}){
110
110
  const popup = document.createElement("div");
111
111
  popup.classList.add("popup-window");
112
- popup.innerHTML = showTime? `<span>[${new Date().toLocaleTimeString()}]</span> ${content}` : content;
112
+ /**
113
+ * @param {string} c
114
+ * @returns {void}
115
+ */
116
+ function setPopupContent(c){
117
+ popup.innerHTML = showTime? `<span>[${new Date().toLocaleTimeString()}]</span> ${c}` : c;
118
+ }
119
+ setPopupContent(content);
113
120
  popup.style.width = width;
114
121
  const popupHeight = '1rem';
115
122
  popup.style.height = popupHeight;
@@ -132,11 +139,19 @@ export function showPopup(content = '', {
132
139
  if (level === "success") popup.style.backgroundColor = "darkgreen";
133
140
  document.body.appendChild(popup);
134
141
  shownPopups.push(popup);
135
- window.setTimeout(() => {
142
+
143
+ function closePopup(){
136
144
  if (popup.parentNode) document.body.removeChild(popup);
137
145
  shownPopups.splice(shownPopups.indexOf(popup), 1);
138
146
  for (let i = 0; i < shownPopups.length; i++) {
139
147
  shownPopups[i].style.top = `${i * (parseInt(popupHeight) + 2*parseInt(paddingHeight))*1.2 + 0.5}rem`;
140
148
  }
141
- }, timeout);
149
+ }
150
+
151
+ window.setTimeout(closePopup, timeout);
152
+ return {
153
+ elem: popup,
154
+ setContent: setPopupContent,
155
+ close: closePopup
156
+ }
142
157
  }
frontend/scripts.js CHANGED
@@ -183,26 +183,29 @@ Are you sure you want to proceed?\
183
183
  `)){ return; }
184
184
 
185
185
  let counter = 0;
186
+ let totalCount = 0;
187
+ const uploadPopup = showPopup('Uploading multiple files...', {level: 'info', timeout: 999999});
186
188
  async function uploadFileFn(path, file){
187
- const this_count = counter;
188
189
  try{
189
190
  await uploadFile(conn, path, file, {conflict: 'overwrite'});
190
191
  }
191
192
  catch (err){
192
193
  showPopup('Failed to upload file [' + file.name + ']: ' + err, {level: 'error', timeout: 5000});
193
194
  }
194
- console.log(`[${this_count}/${counter}] Uploaded file: ${path}`);
195
+ console.log(`[${counter}/${totalCount}] Uploaded file: ${path}`);
196
+ uploadPopup.setContent(`Uploading multiple files... [${counter}/${totalCount}]`);
195
197
  }
196
198
 
197
- const promises = await forEachFile(e, async (relPath, filePromise) => {
199
+ const promises = await forEachFile(e, async (relPath, filePromiseFn) => {
198
200
  counter += 1;
199
- const file = await filePromise;
201
+ const file = await filePromiseFn();
200
202
  await uploadFileFn(dstPath + relPath, file);
201
203
  });
204
+ totalCount = promises.length;
202
205
 
203
- showPopup('Uploading multiple files...', {level: 'info', timeout: 3000});
204
206
  Promise.all(promises).then(
205
207
  () => {
208
+ window.setTimeout(uploadPopup.close, 3000);
206
209
  showPopup('Upload success.', {level: 'success', timeout: 3000});
207
210
  refreshFileList();
208
211
  },
frontend/utils.js CHANGED
@@ -101,7 +101,7 @@ export function asHtmlText(text){
101
101
  * using the provided callback with a concurrency limit.
102
102
  *
103
103
  * @param {Event} e The drop event.
104
- * @param {(relPath: string, file: Promise<File>) => Promise<void>} callback A function
104
+ * @param {(relPath: string, file: () => Promise<File>) => Promise<void>} callback A function
105
105
  * that receives the relative path and a promise for the File.
106
106
  * @param {number} [maxConcurrent=5] Maximum number of concurrent callback executions.
107
107
  * @returns {Promise<Promise<void>[]>} A promise resolving to an array of callback promises.
@@ -146,11 +146,10 @@ export async function forEachFile(e, callback, maxConcurrent = 16) {
146
146
  async function traverse(entry, path) {
147
147
  if (entry.isFile) {
148
148
  // Wrap file retrieval in a promise.
149
- const filePromise = new Promise((resolve, reject) => {
150
- entry.file(resolve, reject);
151
- });
149
+ const filePromiseFn = () =>
150
+ new Promise((resolve, reject) => entry.file(resolve, reject));
152
151
  // Use the concurrency barrier for the callback invocation.
153
- results.push(runWithLimit(() => callback(path + entry.name, filePromise)));
152
+ results.push(runWithLimit(() => callback(path + entry.name, filePromiseFn)));
154
153
  } else if (entry.isDirectory) {
155
154
  const reader = entry.createReader();
156
155
 
lfss/cli/log.py ADDED
@@ -0,0 +1,77 @@
1
+ from typing import Optional
2
+ import argparse
3
+ import rich.console
4
+ import logging
5
+ import sqlite3
6
+ from lfss.eng.log import eval_logline
7
+
8
+ console = rich.console.Console()
9
+ def levelstr2int(levelstr: str) -> int:
10
+ import sys
11
+ if sys.version_info < (3, 11):
12
+ return logging.getLevelName(levelstr.upper())
13
+ else:
14
+ return logging.getLevelNamesMapping()[levelstr.upper()]
15
+
16
+ def view(
17
+ db_file: str,
18
+ level: Optional[str] = None,
19
+ offset: int = 0,
20
+ limit: int = 1000
21
+ ):
22
+ conn = sqlite3.connect(db_file)
23
+ cursor = conn.cursor()
24
+ if level is None:
25
+ cursor.execute("SELECT * FROM log ORDER BY created DESC LIMIT ? OFFSET ?", (limit, offset))
26
+ else:
27
+ level_int = levelstr2int(level)
28
+ cursor.execute("SELECT * FROM log WHERE level >= ? ORDER BY created DESC LIMIT ? OFFSET ?", (level_int, limit, offset))
29
+ levelname_color = {
30
+ 'DEBUG': 'blue',
31
+ 'INFO': 'green',
32
+ 'WARNING': 'yellow',
33
+ 'ERROR': 'red',
34
+ 'CRITICAL': 'bold red',
35
+ 'FATAL': 'bold red'
36
+ }
37
+ for row in cursor.fetchall():
38
+ log = eval_logline(row)
39
+ console.print(f"{log.created} [{levelname_color[log.levelname]}][{log.levelname}] [default]{log.message}")
40
+ conn.close()
41
+
42
+ def trim(db_file: str, keep: int = 1000, level: Optional[str] = None):
43
+ conn = sqlite3.connect(db_file)
44
+ cursor = conn.cursor()
45
+ if level is None:
46
+ cursor.execute("DELETE FROM log WHERE id NOT IN (SELECT id FROM log ORDER BY created DESC LIMIT ?)", (keep,))
47
+ else:
48
+ cursor.execute("DELETE FROM log WHERE levelname = ? and id NOT IN (SELECT id FROM log WHERE levelname = ? ORDER BY created DESC LIMIT ?)", (level.upper(), level.upper(), keep))
49
+ conn.commit()
50
+ conn.execute("VACUUM")
51
+ conn.close()
52
+
53
+ def main():
54
+ parser = argparse.ArgumentParser(description="Log operations utility")
55
+ subparsers = parser.add_subparsers(title='subcommands', description='valid subcommands', help='additional help')
56
+
57
+ parser_show = subparsers.add_parser('view', help='Show logs')
58
+ parser_show.add_argument('db_file', type=str, help='Database file path')
59
+ parser_show.add_argument('-l', '--level', type=str, required=False, help='Log level')
60
+ parser_show.add_argument('--offset', type=int, default=0, help='Starting offset')
61
+ parser_show.add_argument('--limit', type=int, default=1000, help='Maximum number of entries to display')
62
+ parser_show.set_defaults(func=view)
63
+
64
+ parser_trim = subparsers.add_parser('trim', help='Trim logs')
65
+ parser_trim.add_argument('db_file', type=str, help='Database file path')
66
+ parser_trim.add_argument('-l', '--level', type=str, required=False, help='Log level')
67
+ parser_trim.add_argument('--keep', type=int, default=1000, help='Number of entries to keep')
68
+ parser_trim.set_defaults(func=trim)
69
+
70
+ args = parser.parse_args()
71
+ if hasattr(args, 'func'):
72
+ kwargs = vars(args)
73
+ func = kwargs.pop('func')
74
+ func(**kwargs)
75
+
76
+ if __name__ == '__main__':
77
+ main()
lfss/cli/vacuum.py CHANGED
@@ -2,7 +2,7 @@
2
2
  Vacuum the database and external storage to ensure that the storage is consistent and minimal.
3
3
  """
4
4
 
5
- from lfss.eng.config import LARGE_BLOB_DIR, THUMB_DB
5
+ from lfss.eng.config import LARGE_BLOB_DIR, THUMB_DB, LOG_DIR
6
6
  import argparse, time, itertools
7
7
  from functools import wraps
8
8
  from asyncio import Semaphore
@@ -14,6 +14,7 @@ from lfss.eng.database import transaction, unique_cursor
14
14
  from lfss.svc.request_log import RequestDB
15
15
  from lfss.eng.utils import now_stamp
16
16
  from lfss.eng.connection_pool import global_entrance
17
+ from lfss.cli.log import trim
17
18
 
18
19
  sem: Semaphore
19
20
 
@@ -33,7 +34,7 @@ def barriered(func):
33
34
  return wrapper
34
35
 
35
36
  @global_entrance()
36
- async def vacuum_main(index: bool = False, blobs: bool = False, thumbs: bool = False, vacuum_all: bool = False):
37
+ async def vacuum_main(index: bool = False, blobs: bool = False, thumbs: bool = False, logs: bool = False, vacuum_all: bool = False):
37
38
 
38
39
  # check if any file in the Large Blob directory is not in the database
39
40
  # the reverse operation is not necessary, because by design, the database should be the source of truth...
@@ -73,6 +74,11 @@ async def vacuum_main(index: bool = False, blobs: bool = False, thumbs: bool = F
73
74
  async with unique_cursor(is_write=True) as c:
74
75
  await c.execute("VACUUM blobs")
75
76
 
77
+ if logs or vacuum_all:
78
+ with indicator("VACUUM-logs"):
79
+ for log_file in LOG_DIR.glob("*.log.db"):
80
+ trim(str(log_file), keep=10_000)
81
+
76
82
  if thumbs or vacuum_all:
77
83
  try:
78
84
  async with transaction() as c:
@@ -123,9 +129,10 @@ def main():
123
129
  parser.add_argument("-d", "--data", action="store_true", help="Vacuum blobs")
124
130
  parser.add_argument("-t", "--thumb", action="store_true", help="Vacuum thumbnails")
125
131
  parser.add_argument("-r", "--requests", action="store_true", help="Vacuum request logs to only keep at most recent 1M rows in 7 days")
132
+ parser.add_argument("-l", "--logs", action="store_true", help="Trim log to keep at most recent 10k rows for each category")
126
133
  args = parser.parse_args()
127
134
  sem = Semaphore(args.jobs)
128
- asyncio.run(vacuum_main(index=args.metadata, blobs=args.data, thumbs=args.thumb, vacuum_all=args.all))
135
+ asyncio.run(vacuum_main(index=args.metadata, blobs=args.data, thumbs=args.thumb, logs = args.logs, vacuum_all=args.all))
129
136
 
130
137
  if args.requests or args.all:
131
138
  asyncio.run(vacuum_requests())
lfss/eng/config.py CHANGED
@@ -11,14 +11,15 @@ if not DATA_HOME.exists():
11
11
  DATA_HOME = DATA_HOME.resolve().absolute()
12
12
  LARGE_BLOB_DIR = DATA_HOME / 'large_blobs'
13
13
  LARGE_BLOB_DIR.mkdir(exist_ok=True)
14
+ LOG_DIR = DATA_HOME / 'logs'
14
15
 
15
16
  # https://sqlite.org/fasterthanfs.html
16
17
  __env_large_file = os.environ.get('LFSS_LARGE_FILE', None)
17
18
  if __env_large_file is not None:
18
19
  LARGE_FILE_BYTES = parse_storage_size(__env_large_file)
19
20
  else:
20
- LARGE_FILE_BYTES = 8 * 1024 * 1024 # 8MB
21
- MAX_MEM_FILE_BYTES = 128 * 1024 * 1024 # 128MB
21
+ LARGE_FILE_BYTES = 1 * 1024 * 1024 # 1MB
22
+ MAX_MEM_FILE_BYTES = 128 * 1024 * 1024 # 128MB
22
23
  CHUNK_SIZE = 1024 * 1024 # 1MB chunks for streaming (on large files)
23
24
  DEBUG_MODE = os.environ.get('LFSS_DEBUG', '0') == '1'
24
25
 
lfss/eng/database.py CHANGED
@@ -225,12 +225,12 @@ class FileConn(DBObjectBase):
225
225
  await self.cur.execute("SELECT username FROM user")
226
226
  res = await self.cur.fetchall()
227
227
  dirnames = [u[0] + '/' for u in res]
228
- dirs = [await self.get_path_record(u) for u in dirnames] if not skim else [DirectoryRecord(u) for u in dirnames]
228
+ dirs = [await self.get_dir_record(u) for u in dirnames] if not skim else [DirectoryRecord(u) for u in dirnames]
229
229
  return dirs
230
230
  else:
231
231
  # list specific users
232
232
  dirnames = [uname + '/' for uname in usernames]
233
- dirs = [await self.get_path_record(u) for u in dirnames] if not skim else [DirectoryRecord(u) for u in dirnames]
233
+ dirs = [await self.get_dir_record(u) for u in dirnames] if not skim else [DirectoryRecord(u) for u in dirnames]
234
234
  return dirs
235
235
 
236
236
  async def count_path_dirs(self, url: str):
@@ -278,11 +278,11 @@ class FileConn(DBObjectBase):
278
278
  if skim:
279
279
  return DirectoryRecord(dir_url)
280
280
  else:
281
- return await self.get_path_record(dir_url)
281
+ return await self.get_dir_record(dir_url)
282
282
  dirs = [await get_dir(url + d) for d in dirs_str]
283
283
  return dirs
284
284
 
285
- async def count_path_files(self, url: str, flat: bool = False):
285
+ async def count_dir_files(self, url: str, flat: bool = False):
286
286
  if not url.endswith('/'): url += '/'
287
287
  if url == '/': url = ''
288
288
  if flat:
@@ -293,7 +293,7 @@ class FileConn(DBObjectBase):
293
293
  assert res is not None, "Error: count_path_files"
294
294
  return res[0]
295
295
 
296
- async def list_path_files(
296
+ async def list_dir_files(
297
297
  self, url: str,
298
298
  offset: int = 0, limit: int = 10_000,
299
299
  order_by: FileSortKey = '', order_desc: bool = False,
@@ -328,15 +328,15 @@ class FileConn(DBObjectBase):
328
328
  """
329
329
  MAX_ITEMS = 10_000
330
330
  dir_count = await self.count_path_dirs(url)
331
- file_count = await self.count_path_files(url, flat=False)
331
+ file_count = await self.count_dir_files(url, flat=False)
332
332
  if dir_count + file_count > MAX_ITEMS:
333
333
  raise TooManyItemsError("Too many items, please paginate")
334
334
  return PathContents(
335
335
  dirs = await self.list_path_dirs(url, skim=True, limit=MAX_ITEMS),
336
- files = await self.list_path_files(url, flat=False, limit=MAX_ITEMS)
336
+ files = await self.list_dir_files(url, flat=False, limit=MAX_ITEMS)
337
337
  )
338
338
 
339
- async def get_path_record(self, url: str) -> DirectoryRecord:
339
+ async def get_dir_record(self, url: str) -> DirectoryRecord:
340
340
  """
341
341
  Get the full record of a directory, including size, create_time, update_time, access_time etc.
342
342
  """
@@ -411,7 +411,6 @@ class FileConn(DBObjectBase):
411
411
  await self._user_size_inc(owner_id, file_size)
412
412
  self.logger.info(f"File {url} created")
413
413
 
414
- # not tested
415
414
  async def copy_file(self, old_url: str, new_url: str, user_id: Optional[int] = None):
416
415
  old = await self.get_file_record(old_url)
417
416
  if old is None:
@@ -428,7 +427,7 @@ class FileConn(DBObjectBase):
428
427
  await self._user_size_inc(user_id, old.file_size)
429
428
  self.logger.info(f"Copied file {old_url} to {new_url}")
430
429
 
431
- async def copy_path(self, old_url: str, new_url: str, user_id: Optional[int] = None):
430
+ async def copy_dir(self, old_url: str, new_url: str, user_id: Optional[int] = None):
432
431
  assert old_url.endswith('/'), "Old path must end with /"
433
432
  assert new_url.endswith('/'), "New path must end with /"
434
433
  if user_id is None:
@@ -461,7 +460,7 @@ class FileConn(DBObjectBase):
461
460
  await self.cur.execute("UPDATE fmeta SET url = ?, create_time = CURRENT_TIMESTAMP WHERE url = ?", (new_url, old_url))
462
461
  self.logger.info(f"Moved file {old_url} to {new_url}")
463
462
 
464
- async def move_path(self, old_url: str, new_url: str, user_id: Optional[int] = None):
463
+ async def move_dir(self, old_url: str, new_url: str, user_id: Optional[int] = None):
465
464
  assert old_url.endswith('/'), "Old path must end with /"
466
465
  assert new_url.endswith('/'), "New path must end with /"
467
466
  if user_id is None:
@@ -500,7 +499,7 @@ class FileConn(DBObjectBase):
500
499
  self.logger.info(f"Deleted {len(ret)} file records for user {owner_id}") # type: ignore
501
500
  return ret
502
501
 
503
- async def delete_path_records(self, path: str, under_owner_id: Optional[int] = None) -> list[FileRecord]:
502
+ async def delete_records_by_prefix(self, path: str, under_owner_id: Optional[int] = None) -> list[FileRecord]:
504
503
  """Delete all records with url starting with path"""
505
504
  # update user size
506
505
  cursor = await self.cur.execute("SELECT DISTINCT owner_id FROM fmeta WHERE url LIKE ?", (path + '%', ))
@@ -689,7 +688,7 @@ async def delayed_log_access(url: str):
689
688
  ])
690
689
  ),
691
690
  )
692
- def validate_url(url: str, is_file = True):
691
+ def validate_url(url: str, utype: Literal['file', 'dir'] = 'file'):
693
692
  """ Check if a path is valid. The input path is considered url safe """
694
693
  if len(url) > 1024:
695
694
  raise InvalidPathError(f"URL too long: {url}")
@@ -703,7 +702,7 @@ def validate_url(url: str, is_file = True):
703
702
  is_valid = False
704
703
  break
705
704
 
706
- if is_file: is_valid = is_valid and not url.endswith('/')
705
+ if utype == 'file': is_valid = is_valid and not url.endswith('/')
707
706
  else: is_valid = is_valid and url.endswith('/')
708
707
 
709
708
  if not is_valid:
@@ -885,9 +884,9 @@ class Database:
885
884
  raise PermissionDeniedError(f"Permission denied: {op_user.username} cannot copy file to {new_url}")
886
885
  await fconn.copy_file(old_url, new_url, user_id=op_user.id if op_user is not None else None)
887
886
 
888
- async def move_path(self, old_url: str, new_url: str, op_user: UserRecord):
889
- validate_url(old_url, is_file=False)
890
- validate_url(new_url, is_file=False)
887
+ async def move_dir(self, old_url: str, new_url: str, op_user: UserRecord):
888
+ validate_url(old_url, 'dir')
889
+ validate_url(new_url, 'dir')
891
890
 
892
891
  if new_url.startswith('/'):
893
892
  new_url = new_url[1:]
@@ -906,12 +905,11 @@ class Database:
906
905
 
907
906
  async with transaction() as cur:
908
907
  fconn = FileConn(cur)
909
- await fconn.move_path(old_url, new_url, op_user.id)
908
+ await fconn.move_dir(old_url, new_url, op_user.id)
910
909
 
911
- # not tested
912
- async def copy_path(self, old_url: str, new_url: str, op_user: UserRecord):
913
- validate_url(old_url, is_file=False)
914
- validate_url(new_url, is_file=False)
910
+ async def copy_dir(self, old_url: str, new_url: str, op_user: UserRecord):
911
+ validate_url(old_url, 'dir')
912
+ validate_url(new_url, 'dir')
915
913
 
916
914
  if new_url.startswith('/'):
917
915
  new_url = new_url[1:]
@@ -930,7 +928,7 @@ class Database:
930
928
 
931
929
  async with transaction() as cur:
932
930
  fconn = FileConn(cur)
933
- await fconn.copy_path(old_url, new_url, op_user.id)
931
+ await fconn.copy_dir(old_url, new_url, op_user.id)
934
932
 
935
933
  async def __batch_delete_file_blobs(self, fconn: FileConn, file_records: list[FileRecord], batch_size: int = 512):
936
934
  # https://github.com/langchain-ai/langchain/issues/10321
@@ -951,13 +949,13 @@ class Database:
951
949
  await del_internal()
952
950
  await del_external()
953
951
 
954
- async def delete_path(self, url: str, op_user: Optional[UserRecord] = None) -> Optional[list[FileRecord]]:
955
- validate_url(url, is_file=False)
952
+ async def delete_dir(self, url: str, op_user: Optional[UserRecord] = None) -> Optional[list[FileRecord]]:
953
+ validate_url(url, 'dir')
956
954
  from_owner_id = op_user.id if op_user is not None and not (op_user.is_admin or await check_path_permission(url, op_user) >= AccessLevel.WRITE) else None
957
955
 
958
956
  async with transaction() as cur:
959
957
  fconn = FileConn(cur)
960
- records = await fconn.delete_path_records(url, from_owner_id)
958
+ records = await fconn.delete_records_by_prefix(url, from_owner_id)
961
959
  if not records:
962
960
  return None
963
961
  await self.__batch_delete_file_blobs(fconn, records)
@@ -981,14 +979,15 @@ class Database:
981
979
 
982
980
  # make sure the user's directory is deleted,
983
981
  # may contain admin's files, but delete them all
984
- await fconn.delete_path_records(user.username + '/')
982
+ await fconn.delete_records_by_prefix(user.username + '/')
985
983
 
986
- async def iter_path(self, top_url: str, urls: Optional[list[str]]) -> AsyncIterable[tuple[FileRecord, bytes | AsyncIterable[bytes]]]:
984
+ async def iter_dir(self, top_url: str, urls: Optional[list[str]]) -> AsyncIterable[tuple[FileRecord, bytes | AsyncIterable[bytes]]]:
985
+ validate_url(top_url, 'dir')
987
986
  async with unique_cursor() as cur:
988
987
  fconn = FileConn(cur)
989
988
  if urls is None:
990
- fcount = await fconn.count_path_files(top_url, flat=True)
991
- urls = [r.url for r in (await fconn.list_path_files(top_url, flat=True, limit=fcount))]
989
+ fcount = await fconn.count_dir_files(top_url, flat=True)
990
+ urls = [r.url for r in (await fconn.list_dir_files(top_url, flat=True, limit=fcount))]
992
991
 
993
992
  for url in urls:
994
993
  if not url.startswith(top_url):
@@ -1003,7 +1002,7 @@ class Database:
1003
1002
  blob = await fconn.get_file_blob(f_id)
1004
1003
  yield r, blob
1005
1004
 
1006
- async def zip_path_stream(self, top_url: str, op_user: Optional[UserRecord] = None) -> AsyncIterable[bytes]:
1005
+ async def zip_dir_stream(self, top_url: str, op_user: Optional[UserRecord] = None) -> AsyncIterable[bytes]:
1007
1006
  from stat import S_IFREG
1008
1007
  from stream_zip import async_stream_zip, ZIP_64
1009
1008
  if top_url.startswith('/'):
@@ -1015,7 +1014,7 @@ class Database:
1015
1014
 
1016
1015
  # https://stream-zip.docs.trade.gov.uk/async-interface/
1017
1016
  async def data_iter():
1018
- async for (r, blob) in self.iter_path(top_url, None):
1017
+ async for (r, blob) in self.iter_dir(top_url, None):
1019
1018
  rel_path = r.url[len(top_url):]
1020
1019
  rel_path = decode_uri_compnents(rel_path)
1021
1020
  b_iter: AsyncIterable[bytes]
@@ -1035,7 +1034,7 @@ class Database:
1035
1034
  return async_stream_zip(data_iter())
1036
1035
 
1037
1036
  @concurrent_wrap()
1038
- async def zip_path(self, top_url: str, op_user: Optional[UserRecord]) -> io.BytesIO:
1037
+ async def zip_dir(self, top_url: str, op_user: Optional[UserRecord]) -> io.BytesIO:
1039
1038
  if top_url.startswith('/'):
1040
1039
  top_url = top_url[1:]
1041
1040
 
@@ -1045,7 +1044,7 @@ class Database:
1045
1044
 
1046
1045
  buffer = io.BytesIO()
1047
1046
  with zipfile.ZipFile(buffer, 'w') as zf:
1048
- async for (r, blob) in self.iter_path(top_url, None):
1047
+ async for (r, blob) in self.iter_dir(top_url, None):
1049
1048
  rel_path = r.url[len(top_url):]
1050
1049
  rel_path = decode_uri_compnents(rel_path)
1051
1050
  if r.external:
lfss/eng/log.py CHANGED
@@ -1,4 +1,5 @@
1
- from .config import DATA_HOME
1
+ from .config import LOG_DIR
2
+ import time, sqlite3, dataclasses
2
3
  from typing import TypeVar, Callable, Literal, Optional
3
4
  from concurrent.futures import ThreadPoolExecutor
4
5
  from functools import wraps
@@ -57,15 +58,81 @@ class BaseLogger(logging.Logger):
57
58
  @thread_wrap
58
59
  def error(self, *args, **kwargs): super().error(*args, **kwargs)
59
60
 
60
- _fh_T = Literal['rotate', 'simple', 'daily']
61
+ class SQLiteFileHandler(logging.FileHandler):
62
+ def __init__(self, filename, *args, **kwargs):
63
+ super().__init__(filename, *args, **kwargs)
64
+ self._db_file = filename
65
+ self._buffer: list[logging.LogRecord] = []
66
+ self._buffer_size = 100
67
+ self._flush_interval = 10
68
+ self._last_flush = time.time()
69
+ conn = sqlite3.connect(self._db_file, check_same_thread=False)
70
+ conn.execute('PRAGMA journal_mode=WAL')
71
+ conn.execute('''
72
+ CREATE TABLE IF NOT EXISTS log (
73
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
74
+ created TIMESTAMP,
75
+ created_epoch FLOAT,
76
+ name TEXT,
77
+ levelname VARCHAR(16),
78
+ level INTEGER,
79
+ message TEXT
80
+ )
81
+ ''')
82
+ conn.commit()
83
+ conn.close()
84
+
85
+ def flush(self):
86
+ def format_time(self, record: logging.LogRecord):
87
+ """ Create a time stamp """
88
+ return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(record.created))
89
+ self.acquire()
90
+ try:
91
+ conn = sqlite3.connect(self._db_file, check_same_thread=False)
92
+ conn.executemany('''
93
+ INSERT INTO log (created, created_epoch, name, levelname, level, message)
94
+ VALUES (?, ?, ?, ?, ?, ?)
95
+ ''', [
96
+ (format_time(self, record), record.created, record.name, record.levelname, record.levelno, record.getMessage())
97
+ for record in self._buffer
98
+ ])
99
+ conn.commit()
100
+ conn.close()
101
+ self._buffer.clear()
102
+ self._last_flush = time.time()
103
+ finally:
104
+ self.release()
105
+
106
+ def emit(self, record: logging.LogRecord):
107
+ self._buffer.append(record)
108
+ if len(self._buffer) > self._buffer_size or time.time() - self._last_flush > self._flush_interval:
109
+ self.flush()
110
+
111
+ def close(self):
112
+ self.flush()
113
+ return super().close()
114
+
115
+ def eval_logline(row: sqlite3.Row):
116
+ @dataclasses.dataclass
117
+ class DBLogRecord:
118
+ id: int
119
+ created: str
120
+ created_epoch: float
121
+ name: str
122
+ levelname: str
123
+ level: int
124
+ message: str
125
+ return DBLogRecord(*row)
126
+
127
+ _fh_T = Literal['rotate', 'simple', 'daily', 'sqlite']
61
128
 
62
129
  __g_logger_dict: dict[str, BaseLogger] = {}
63
130
  def get_logger(
64
131
  name = 'default',
65
- log_home = pathlib.Path(DATA_HOME) / 'logs',
132
+ log_home = LOG_DIR,
66
133
  level = 'DEBUG',
67
134
  term_level = 'INFO',
68
- file_handler_type: _fh_T = 'rotate',
135
+ file_handler_type: _fh_T = 'sqlite',
69
136
  global_instance = True
70
137
  )->BaseLogger:
71
138
  if global_instance and name in __g_logger_dict:
@@ -100,6 +167,8 @@ def get_logger(
100
167
  file_handler = handlers.RotatingFileHandler(
101
168
  log_file, maxBytes=1024*1024, backupCount=5
102
169
  )
170
+ elif file_handler_type == 'sqlite':
171
+ file_handler = SQLiteFileHandler(log_file if log_file.suffix == '.db' else log_file.with_suffix('.log.db'))
103
172
 
104
173
  file_handler.setFormatter(formatter_plain)
105
174
  logger.addHandler(file_handler)
lfss/eng/utils.py CHANGED
@@ -11,7 +11,6 @@ from concurrent.futures import ThreadPoolExecutor
11
11
  from typing import TypeVar, Callable, Awaitable
12
12
  from functools import wraps, partial
13
13
  from uuid import uuid4
14
- import os
15
14
 
16
15
  async def copy_file(source: str|pathlib.Path, destination: str|pathlib.Path):
17
16
  async with aiofiles.open(source, mode='rb') as src:
@@ -160,7 +159,7 @@ _g_executor = None
160
159
  def get_global_executor():
161
160
  global _g_executor
162
161
  if _g_executor is None:
163
- _g_executor = ThreadPoolExecutor(max_workers=4 if (cpu_count:=os.cpu_count()) and cpu_count > 4 else cpu_count)
162
+ _g_executor = ThreadPoolExecutor()
164
163
  return _g_executor
165
164
  def async_wrap(executor=None):
166
165
  if executor is None:
lfss/svc/app_dav.py CHANGED
@@ -57,9 +57,9 @@ async def eval_path(path: str) -> tuple[ptype, str, Optional[FileRecord | Direct
57
57
  if len(dir_path_sp) > 2:
58
58
  async with unique_cursor() as c:
59
59
  fconn = FileConn(c)
60
- if await fconn.count_path_files(path, flat=True) == 0:
60
+ if await fconn.count_dir_files(path, flat=True) == 0:
61
61
  return None, lfss_path, None
62
- return "dir", lfss_path, await fconn.get_path_record(path)
62
+ return "dir", lfss_path, await fconn.get_dir_record(path)
63
63
  else:
64
64
  # test if its a user's root directory
65
65
  assert len(dir_path_sp) == 2
@@ -85,8 +85,8 @@ async def eval_path(path: str) -> tuple[ptype, str, Optional[FileRecord | Direct
85
85
  async with unique_cursor() as c:
86
86
  lfss_path = path + "/"
87
87
  fconn = FileConn(c)
88
- if await fconn.count_path_files(lfss_path) > 0:
89
- return "dir", lfss_path, await fconn.get_path_record(lfss_path)
88
+ if await fconn.count_dir_files(lfss_path) > 0:
89
+ return "dir", lfss_path, await fconn.get_dir_record(lfss_path)
90
90
 
91
91
  return None, path, None
92
92
 
@@ -235,7 +235,7 @@ async def dav_propfind(request: Request, path: str, user: UserRecord = Depends(r
235
235
  # query root directory content
236
236
  async def user_path_record(user_name: str, cur) -> DirectoryRecord:
237
237
  try:
238
- return await FileConn(cur).get_path_record(user_name + "/")
238
+ return await FileConn(cur).get_dir_record(user_name + "/")
239
239
  except PathNotFoundError:
240
240
  return DirectoryRecord(user_name + "/", size=0, n_files=0, create_time="1970-01-01 00:00:00", update_time="1970-01-01 00:00:00", access_time="1970-01-01 00:00:00")
241
241
 
@@ -253,7 +253,7 @@ async def dav_propfind(request: Request, path: str, user: UserRecord = Depends(r
253
253
  elif path_type == "dir":
254
254
  # query directory content
255
255
  async with unique_cursor() as c:
256
- flist = await FileConn(c).list_path_files(lfss_path, flat = True if depth == "infinity" else False)
256
+ flist = await FileConn(c).list_dir_files(lfss_path, flat = True if depth == "infinity" else False)
257
257
  for frecord in flist:
258
258
  if frecord.url.endswith(f"/{MKDIR_PLACEHOLDER}"): continue
259
259
  file_el = await create_file_xml_element(frecord)
@@ -315,7 +315,7 @@ async def dav_move(request: Request, path: str, user: UserRecord = Depends(regis
315
315
  assert ptype == "dir", "Directory path should end with /"
316
316
  assert lfss_path.endswith("/"), "Directory path should end with /"
317
317
  if not dlfss_path.endswith("/"): dlfss_path += "/" # the header destination may not end with /
318
- await db.move_path(lfss_path, dlfss_path, user)
318
+ await db.move_dir(lfss_path, dlfss_path, user)
319
319
  return Response(status_code=201)
320
320
 
321
321
  @router_dav.api_route("/{path:path}", methods=["COPY"])
lfss/svc/app_native.py CHANGED
@@ -90,13 +90,13 @@ async def bundle_files(path: str, user: UserRecord = Depends(registered_user)):
90
90
  raise HTTPException(status_code=400, detail="Cannot bundle root")
91
91
 
92
92
  async with unique_cursor() as cur:
93
- dir_record = await FileConn(cur).get_path_record(path)
93
+ dir_record = await FileConn(cur).get_dir_record(path)
94
94
 
95
95
  pathname = f"{path.split('/')[-2]}"
96
96
 
97
97
  if dir_record.size < MAX_MEM_FILE_BYTES:
98
98
  logger.debug(f"Bundle {path} in memory")
99
- dir_bytes = (await db.zip_path(path, op_user=user)).getvalue()
99
+ dir_bytes = (await db.zip_dir(path, op_user=user)).getvalue()
100
100
  return Response(
101
101
  content = dir_bytes,
102
102
  media_type = "application/zip",
@@ -109,7 +109,7 @@ async def bundle_files(path: str, user: UserRecord = Depends(registered_user)):
109
109
  else:
110
110
  logger.debug(f"Bundle {path} in stream")
111
111
  return StreamingResponse(
112
- content = await db.zip_path_stream(path, op_user=user),
112
+ content = await db.zip_dir_stream(path, op_user=user),
113
113
  media_type = "application/zip",
114
114
  headers = {
115
115
  f"Content-Disposition": f"attachment; filename=bundle-{pathname}.zip",
@@ -134,7 +134,7 @@ async def get_file_meta(path: str, user: UserRecord = Depends(registered_user)):
134
134
  else:
135
135
  if await check_path_permission(path, user, cursor=cur) < AccessLevel.READ:
136
136
  raise HTTPException(status_code=403, detail="Permission denied")
137
- record = await fconn.get_path_record(path)
137
+ record = await fconn.get_dir_record(path)
138
138
  return record
139
139
 
140
140
  @router_api.post("/meta")
@@ -171,7 +171,7 @@ async def update_file_meta(
171
171
  new_path = ensure_uri_compnents(new_path)
172
172
  logger.info(f"Update path of {path} to {new_path}")
173
173
  # will raise duplicate path error if same name path exists in the new path
174
- await db.move_path(path, new_path, user)
174
+ await db.move_dir(path, new_path, user)
175
175
 
176
176
  return Response(status_code=200, content="OK")
177
177
 
@@ -194,7 +194,7 @@ async def count_files(path: str, flat: bool = False, user: UserRecord = Depends(
194
194
  path = ensure_uri_compnents(path)
195
195
  async with unique_cursor() as conn:
196
196
  fconn = FileConn(conn)
197
- return { "count": await fconn.count_path_files(url = path, flat = flat) }
197
+ return { "count": await fconn.count_dir_files(url = path, flat = flat) }
198
198
  @router_api.get("/list-files")
199
199
  async def list_files(
200
200
  path: str, offset: int = 0, limit: int = 1000,
@@ -205,7 +205,7 @@ async def list_files(
205
205
  path = ensure_uri_compnents(path)
206
206
  async with unique_cursor() as conn:
207
207
  fconn = FileConn(conn)
208
- return await fconn.list_path_files(
208
+ return await fconn.list_dir_files(
209
209
  url = path, offset = offset, limit = limit,
210
210
  order_by=order_by, order_desc=order_desc,
211
211
  flat=flat
lfss/svc/common_impl.py CHANGED
@@ -180,7 +180,7 @@ async def _get_dir_impl(
180
180
  else:
181
181
  raise HTTPException(status_code=404, detail="User not found")
182
182
  else:
183
- if await FileConn(cur).count_path_files(path, flat=True) > 0:
183
+ if await FileConn(cur).count_dir_files(path, flat=True) > 0:
184
184
  return Response(status_code=200)
185
185
  else:
186
186
  raise HTTPException(status_code=404, detail="Path not found")
@@ -295,7 +295,7 @@ async def delete_impl(path: str, user: UserRecord):
295
295
  logger.info(f"DELETE {path}, user: {user.username}")
296
296
 
297
297
  if path.endswith("/"):
298
- res = await db.delete_path(path, user)
298
+ res = await db.delete_dir(path, user)
299
299
  else:
300
300
  res = await db.delete_file(path, user)
301
301
 
@@ -327,8 +327,8 @@ async def copy_impl(
327
327
  else:
328
328
  async with unique_cursor() as cur:
329
329
  fconn = FileConn(cur)
330
- dst_fcount = await fconn.count_path_files(dst_path, flat=True)
330
+ dst_fcount = await fconn.count_dir_files(dst_path, flat=True)
331
331
  if dst_fcount > 0:
332
332
  raise HTTPException(status_code=409, detail="Destination exists")
333
- await db.copy_path(src_path, dst_path, op_user)
333
+ await db.copy_dir(src_path, dst_path, op_user)
334
334
  return Response(status_code=201, content="OK")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lfss
3
- Version: 0.11.1
3
+ Version: 0.11.2
4
4
  Summary: Lightweight file storage service
5
5
  Home-page: https://github.com/MenxLi/lfss
6
6
  Author: Li, Mengxun
@@ -17,6 +17,7 @@ Requires-Dist: mimesniff (==1.*)
17
17
  Requires-Dist: pillow
18
18
  Requires-Dist: python-multipart
19
19
  Requires-Dist: requests (==2.*)
20
+ Requires-Dist: rich
20
21
  Requires-Dist: stream-zip (==0.*)
21
22
  Requires-Dist: uvicorn (==0.*)
22
23
  Project-URL: Repository, https://github.com/MenxLi/lfss
@@ -3,49 +3,50 @@ docs/Enviroment_variables.md,sha256=xaL8qBwT8B2Qe11FaOU3xWrRCh1mJ1VyTFCeFbkd0rs,
3
3
  docs/Known_issues.md,sha256=ZqETcWP8lzTOel9b2mxEgCnADFF8IxOrEtiVO1NoMAk,251
4
4
  docs/Permission.md,sha256=thUJx7YRoU63Pb-eqo5l5450DrZN3QYZ36GCn8r66no,3152
5
5
  docs/Webdav.md,sha256=-Ja-BTWSY1BEMAyZycvEMNnkNTPZ49gSPzmf3Lbib70,1547
6
- docs/changelog.md,sha256=QYej_hmGnv9t8wjFHXBvmrBOvY7aACZ82oa5SVkIyzM,882
6
+ docs/changelog.md,sha256=fE0rE2IcovbxMhdTeqhnCnknT1vtVr7A860zIh7AEnE,1581
7
7
  frontend/api.js,sha256=GlQsNoZFEcy7QUUsLbXv7aP-KxRnIxM37FQHTaakGiQ,19387
8
8
  frontend/index.html,sha256=-k0bJ5FRqdl_H-O441D_H9E-iejgRCaL_z5UeYaS2qc,3384
9
9
  frontend/info.css,sha256=Ny0N3GywQ3a9q1_Qph_QFEKB4fEnTe_2DJ1Y5OsLLmQ,595
10
10
  frontend/info.js,sha256=xGUJPCSrtDhuSu0ELLQZ77PmVWldg-prU1mwQGbdEoA,5797
11
11
  frontend/login.css,sha256=VMM0QfbDFYerxKWKSGhMI1yg5IRBXg0TTdLJEEhQZNk,355
12
- frontend/login.js,sha256=QoO8yKmBHDVP-ZomCMOaV7xVUVIhpl7esJrb6T5aHQE,2466
12
+ frontend/login.js,sha256=xJkulk8dlvV4BhevADLeUrnZwShiFTWv3Wg2iJFUZlY,2423
13
13
  frontend/popup.css,sha256=TJZYFW1ZcdD1IVTlNPYNtMWKPbN6XDbQ4hKBOFK8uLg,1284
14
- frontend/popup.js,sha256=3PgaGZmxSdV1E-D_MWgcR7aHWkcsHA1BNKSOkmP66tA,5191
15
- frontend/scripts.js,sha256=OAx6o3Aabx-cE41uBABP62myZM8WbLxY37uXITMl8nY,24204
14
+ frontend/popup.js,sha256=cyUjtO0wbtqbEodHfwyUsak9iWbcDXeWMGDhpCPbcoE,5453
15
+ frontend/scripts.js,sha256=T3kMjTxrjOkp93OV4ZMGgCLRRaQgRmNzzxriOMGVeZM,24412
16
16
  frontend/state.js,sha256=vbNL5DProRKmSEY7xu9mZH6IY0PBenF8WGxPtGgDnLI,1680
17
17
  frontend/styles.css,sha256=xcNLqI3KBsY5TLnku8UIP0Jfr7QLajr1_KNlZj9eheM,4935
18
18
  frontend/thumb.css,sha256=rNsx766amYS2DajSQNabhpQ92gdTpNoQKmV69OKvtpI,295
19
19
  frontend/thumb.js,sha256=46ViD2TlTTWy0fx6wjoAs_5CQ4ajYB90vVzM7UO2IHw,6182
20
- frontend/utils.js,sha256=jqAZ7Xhlk8ZI97BRnd1dpFJcW0kPrN216xSFnrTT6zk,6069
20
+ frontend/utils.js,sha256=XP5hM_mROYaxK5dqn9qZVwv7GdQuiDzByilFskbrnxA,6068
21
21
  lfss/api/__init__.py,sha256=zT1JCiUM76wX-GtRrmKhTUzSYYfcmoyI1vYwN0fCcLw,6818
22
22
  lfss/api/connector.py,sha256=xl_WrvupplepZSYJs4pN9zN7GDnuZR2A8-pc08ILutI,13231
23
23
  lfss/cli/__init__.py,sha256=lPwPmqpa7EXQ4zlU7E7LOe6X2kw_xATGdwoHphUEirA,827
24
24
  lfss/cli/balance.py,sha256=fUbKKAUyaDn74f7mmxMfBL4Q4voyBLHu6Lg_g8GfMOQ,4121
25
25
  lfss/cli/cli.py,sha256=tPeUgj0BR_M649AGcBYwfsrGioes0qzGc0lghFkrjoo,8086
26
+ lfss/cli/log.py,sha256=TBlt8mhHMouv8ZBUMHYfGZiV6-0yPdajJQ5mkGHEojI,3016
26
27
  lfss/cli/panel.py,sha256=Xq3I_n-ctveym-Gh9LaUpzHiLlvt3a_nuDiwUS-MGrg,1597
27
28
  lfss/cli/serve.py,sha256=vTo6_BiD7Dn3VLvHsC5RKRBC3lMu45JVr_0SqpgHdj0,1086
28
29
  lfss/cli/user.py,sha256=1mTroQbaKxHjFCPHT67xwd08v-zxH0RZ_OnVc-4MzL0,5364
29
- lfss/cli/vacuum.py,sha256=SciDsIdy7cfRqrXcCKBAFb9FOLyXriZBZnXlCuy6F5I,6232
30
+ lfss/cli/vacuum.py,sha256=arEY89kYJKEpzuzjKtf21V7s0QzM1t3QWa1hNghhT0Q,6611
30
31
  lfss/eng/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
31
32
  lfss/eng/bounded_pool.py,sha256=BI1dU-MBf82TMwJBYbjhEty7w1jIUKc5Bn9SnZ_-hoY,1288
32
- lfss/eng/config.py,sha256=FcTtPL7bOpg54nVL_gX-VTIjfN1cafy423ezoWGvouY,874
33
+ lfss/eng/config.py,sha256=vP-0h_9TkAfu5626KjowHjCgX-CnVGZajw3sxBs5jtU,902
33
34
  lfss/eng/connection_pool.py,sha256=1aq7nSgd7hB9YNV4PjD1RDRyl_moDw3ubBtSLyfgGBs,6320
34
- lfss/eng/database.py,sha256=-6-IgR6hXe4ouMH8e0Ryeh2gZXJBpna1ech41sZ3UYs,53267
35
+ lfss/eng/database.py,sha256=huYSOvTO5jES9wVl6Zity2XzNXyJBSQQwuCQHrEVf-Q,53255
35
36
  lfss/eng/datatype.py,sha256=27UB7-l9SICy5lAvKjdzpTL_GohZjzstQcr9PtAq7nM,2709
36
37
  lfss/eng/error.py,sha256=JGf5NV-f4rL6tNIDSAx5-l9MG8dEj7F2w_MuOjj1d1o,732
37
- lfss/eng/log.py,sha256=u6WRZZsE7iOx6_CV2NHh1ugea26p408FI4WstZh896A,5139
38
+ lfss/eng/log.py,sha256=jJKOnC64Lb5EoVJK_oi7vl4iRrH_gtCKM_zjHiIUA-4,7590
38
39
  lfss/eng/thumb.py,sha256=AFyWEkkpuCKGWOB9bLlaDwPKzQ9JtCSSmHMhX2Gu3CI,3096
39
- lfss/eng/utils.py,sha256=WYoXFFi5308UWtFC8VP792gpzrVbHZZHhP3PaFjxIEY,6770
40
+ lfss/eng/utils.py,sha256=jQUJWWmzOPmXdTCId2Y307m1cZfB4hpzHcTjO0mkOrU,6683
40
41
  lfss/sql/init.sql,sha256=FBmVzkNjYUnWjEELRFzf7xb50GngmzmeDVffT1Uk8u8,1625
41
42
  lfss/sql/pragma.sql,sha256=uENx7xXjARmro-A3XAK8OM8v5AxDMdCCRj47f86UuXg,206
42
43
  lfss/svc/app.py,sha256=r1KUO3sPaaJWbkJF0bcVTD7arPKLs2jFlq52Ixicomo,220
43
44
  lfss/svc/app_base.py,sha256=bTQbz945xalyB3UZLlqVBvL6JKGNQ8Fm2KpIvvucPZQ,6850
44
- lfss/svc/app_dav.py,sha256=D0KSgjtTktPjIhyIKG5eRmBdh5X8HYFYH151E6gzlbc,18245
45
- lfss/svc/app_native.py,sha256=JbPge-F9irl26tXKAzfA5DfyjCh0Dgttflztqqrvt0A,8890
46
- lfss/svc/common_impl.py,sha256=5ZRM24zVZpAeipgDtZUVBMFtArkydlAkn17ic_XL7v8,13733
45
+ lfss/svc/app_dav.py,sha256=H3aL3MEdYaPK1w3FQvTzrGYGaaow4m8LZ7R35MN351A,18238
46
+ lfss/svc/app_native.py,sha256=_dhcq_R1VoafRCLuuWxXuttuhBAVaFVdlIQ6ep6ZQvs,8883
47
+ lfss/svc/common_impl.py,sha256=7QflWnxRqghLOSMpDz2UCRqEn49X1GLS3agCb5msia8,13729
47
48
  lfss/svc/request_log.py,sha256=v8yXEIzPjaksu76Oh5vgdbUEUrw8Kt4etLAXBWSGie8,3207
48
- lfss-0.11.1.dist-info/METADATA,sha256=qXJcsBI6dboEavUMZcRuCQFLzQ8i5cUqWg5OJWrTr8k,2712
49
- lfss-0.11.1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
50
- lfss-0.11.1.dist-info/entry_points.txt,sha256=VJ8svMz7RLtMCgNk99CElx7zo7M-N-z7BWDVw2HA92E,205
51
- lfss-0.11.1.dist-info/RECORD,,
49
+ lfss-0.11.2.dist-info/METADATA,sha256=__YXS_WBv6oNQlzcamUPEWayjek6bVsF4zRoGR0iJb8,2732
50
+ lfss-0.11.2.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
51
+ lfss-0.11.2.dist-info/entry_points.txt,sha256=R4uOP1y6eD0Qp3j1ySA8kRPVMdt6_W_9o-Zj9Ra4D0A,232
52
+ lfss-0.11.2.dist-info/RECORD,,
@@ -1,6 +1,7 @@
1
1
  [console_scripts]
2
2
  lfss-balance=lfss.cli.balance:main
3
3
  lfss-cli=lfss.cli.cli:main
4
+ lfss-log=lfss.cli.log:main
4
5
  lfss-panel=lfss.cli.panel:main
5
6
  lfss-serve=lfss.cli.serve:main
6
7
  lfss-user=lfss.cli.user:main
File without changes