lfss 0.11.0__py3-none-any.whl → 0.11.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- docs/changelog.md +27 -0
- frontend/login.js +0 -1
- frontend/popup.js +18 -3
- frontend/scripts.js +46 -39
- frontend/utils.js +98 -1
- lfss/api/__init__.py +1 -1
- lfss/api/connector.py +7 -4
- lfss/cli/cli.py +1 -1
- lfss/cli/log.py +77 -0
- lfss/cli/vacuum.py +10 -3
- lfss/eng/config.py +3 -2
- lfss/eng/database.py +41 -42
- lfss/eng/log.py +73 -4
- lfss/eng/utils.py +1 -2
- lfss/svc/app_dav.py +7 -7
- lfss/svc/app_native.py +7 -7
- lfss/svc/common_impl.py +4 -4
- {lfss-0.11.0.dist-info → lfss-0.11.2.dist-info}/METADATA +2 -1
- {lfss-0.11.0.dist-info → lfss-0.11.2.dist-info}/RECORD +21 -20
- {lfss-0.11.0.dist-info → lfss-0.11.2.dist-info}/entry_points.txt +1 -0
- {lfss-0.11.0.dist-info → lfss-0.11.2.dist-info}/WHEEL +0 -0
docs/changelog.md
CHANGED
@@ -1,3 +1,30 @@
|
|
1
|
+
## 0.11
|
2
|
+
|
3
|
+
### 0.11.2
|
4
|
+
- Improve frontend directory upload feedback.
|
5
|
+
- Set default large file threashold to 1M.
|
6
|
+
- Increase default concurrent threads.
|
7
|
+
- Use sqlite for logging.
|
8
|
+
- Add vacuum logs.
|
9
|
+
- Refactor: use dir for directory path.
|
10
|
+
|
11
|
+
### 0.11.1
|
12
|
+
- Rename api `get_meta` function.
|
13
|
+
- Frontend support upload directory.
|
14
|
+
- Fix admin put to non-exists user path.
|
15
|
+
|
16
|
+
### 0.11.0
|
17
|
+
- Copy file as hard link.
|
18
|
+
- Add vacuum thumb and all.
|
19
|
+
- Thumb database use file_id as index.
|
20
|
+
- improve username and url check with regular expression.
|
21
|
+
|
22
|
+
## 0.10
|
23
|
+
|
24
|
+
### 0.10.0
|
25
|
+
- Inherit permission from path owner for `unset` permission files.
|
26
|
+
- Add timeout and verify options for client api.
|
27
|
+
- Bundle small files in memory.
|
1
28
|
|
2
29
|
## 0.9
|
3
30
|
|
frontend/login.js
CHANGED
frontend/popup.js
CHANGED
@@ -109,7 +109,14 @@ export function showPopup(content = '', {
|
|
109
109
|
} = {}){
|
110
110
|
const popup = document.createElement("div");
|
111
111
|
popup.classList.add("popup-window");
|
112
|
-
|
112
|
+
/**
|
113
|
+
* @param {string} c
|
114
|
+
* @returns {void}
|
115
|
+
*/
|
116
|
+
function setPopupContent(c){
|
117
|
+
popup.innerHTML = showTime? `<span>[${new Date().toLocaleTimeString()}]</span> ${c}` : c;
|
118
|
+
}
|
119
|
+
setPopupContent(content);
|
113
120
|
popup.style.width = width;
|
114
121
|
const popupHeight = '1rem';
|
115
122
|
popup.style.height = popupHeight;
|
@@ -132,11 +139,19 @@ export function showPopup(content = '', {
|
|
132
139
|
if (level === "success") popup.style.backgroundColor = "darkgreen";
|
133
140
|
document.body.appendChild(popup);
|
134
141
|
shownPopups.push(popup);
|
135
|
-
|
142
|
+
|
143
|
+
function closePopup(){
|
136
144
|
if (popup.parentNode) document.body.removeChild(popup);
|
137
145
|
shownPopups.splice(shownPopups.indexOf(popup), 1);
|
138
146
|
for (let i = 0; i < shownPopups.length; i++) {
|
139
147
|
shownPopups[i].style.top = `${i * (parseInt(popupHeight) + 2*parseInt(paddingHeight))*1.2 + 0.5}rem`;
|
140
148
|
}
|
141
|
-
}
|
149
|
+
}
|
150
|
+
|
151
|
+
window.setTimeout(closePopup, timeout);
|
152
|
+
return {
|
153
|
+
elem: popup,
|
154
|
+
setContent: setPopupContent,
|
155
|
+
close: closePopup
|
156
|
+
}
|
142
157
|
}
|
frontend/scripts.js
CHANGED
@@ -5,6 +5,7 @@ import { showInfoPanel, showDirInfoPanel } from './info.js';
|
|
5
5
|
import { makeThumbHtml } from './thumb.js';
|
6
6
|
import { store } from './state.js';
|
7
7
|
import { maybeShowLoginPanel } from './login.js';
|
8
|
+
import { forEachFile } from './utils.js';
|
8
9
|
|
9
10
|
/** @type {import('./api.js').UserRecord}*/
|
10
11
|
let userRecord = null;
|
@@ -158,55 +159,61 @@ uploadFileNameInput.addEventListener('input', debounce(onFileNameInpuChange, 500
|
|
158
159
|
e.preventDefault();
|
159
160
|
e.stopPropagation();
|
160
161
|
});
|
161
|
-
window.addEventListener('drop', (e) => {
|
162
|
+
window.addEventListener('drop', async (e) => {
|
162
163
|
e.preventDefault();
|
163
164
|
e.stopPropagation();
|
164
|
-
const
|
165
|
-
if (
|
166
|
-
uploadFileSelector.files = files;
|
167
|
-
uploadFileNameInput.value = files[0].name;
|
165
|
+
const items = e.dataTransfer.items;
|
166
|
+
if (items.length == 1 && items[0].kind === 'file' && items[0].webkitGetAsEntry().isFile){
|
167
|
+
uploadFileSelector.files = e.dataTransfer.files;
|
168
|
+
uploadFileNameInput.value = e.dataTransfer.files[0].name;
|
168
169
|
uploadFileNameInput.focus();
|
170
|
+
return;
|
169
171
|
}
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
172
|
+
|
173
|
+
/** @type {[string, File][]} */
|
174
|
+
const uploadInputVal = uploadFileNameInput.value? uploadFileNameInput.value : '';
|
175
|
+
let dstPath = store.dirpath + uploadInputVal;
|
176
|
+
if (!dstPath.endsWith('/')){ dstPath += '/'; }
|
177
|
+
|
178
|
+
if (!confirm(`\
|
174
179
|
You are trying to upload multiple files at once.
|
175
180
|
This will directly upload the files to the [${dstPath}] directory without renaming.
|
176
181
|
Note that same name files will be overwritten.
|
177
|
-
Are you sure you want to proceed
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
}
|
186
|
-
catch (err){
|
187
|
-
showPopup('Failed to upload file [' + file.name + ']: ' + err, {level: 'error', timeout: 5000});
|
188
|
-
}
|
189
|
-
counter += 1;
|
190
|
-
console.log("Uploading file: ", counter, "/", files.length);
|
182
|
+
Are you sure you want to proceed?\
|
183
|
+
`)){ return; }
|
184
|
+
|
185
|
+
let counter = 0;
|
186
|
+
let totalCount = 0;
|
187
|
+
const uploadPopup = showPopup('Uploading multiple files...', {level: 'info', timeout: 999999});
|
188
|
+
async function uploadFileFn(path, file){
|
189
|
+
try{
|
190
|
+
await uploadFile(conn, path, file, {conflict: 'overwrite'});
|
191
191
|
}
|
192
|
-
|
193
|
-
|
194
|
-
for (let i = 0; i < files.length; i++){
|
195
|
-
const file = files[i];
|
196
|
-
const path = dstPath + file.name;
|
197
|
-
promises.push(uploadFileFn(file, path));
|
192
|
+
catch (err){
|
193
|
+
showPopup('Failed to upload file [' + file.name + ']: ' + err, {level: 'error', timeout: 5000});
|
198
194
|
}
|
199
|
-
|
200
|
-
|
201
|
-
() => {
|
202
|
-
showPopup('Upload success.', {level: 'success', timeout: 3000});
|
203
|
-
refreshFileList();
|
204
|
-
},
|
205
|
-
(err) => {
|
206
|
-
showPopup('Failed to upload some files: ' + err, {level: 'error', timeout: 5000});
|
207
|
-
}
|
208
|
-
);
|
195
|
+
console.log(`[${counter}/${totalCount}] Uploaded file: ${path}`);
|
196
|
+
uploadPopup.setContent(`Uploading multiple files... [${counter}/${totalCount}]`);
|
209
197
|
}
|
198
|
+
|
199
|
+
const promises = await forEachFile(e, async (relPath, filePromiseFn) => {
|
200
|
+
counter += 1;
|
201
|
+
const file = await filePromiseFn();
|
202
|
+
await uploadFileFn(dstPath + relPath, file);
|
203
|
+
});
|
204
|
+
totalCount = promises.length;
|
205
|
+
|
206
|
+
Promise.all(promises).then(
|
207
|
+
() => {
|
208
|
+
window.setTimeout(uploadPopup.close, 3000);
|
209
|
+
showPopup('Upload success.', {level: 'success', timeout: 3000});
|
210
|
+
refreshFileList();
|
211
|
+
},
|
212
|
+
(err) => {
|
213
|
+
showPopup('Failed to upload some files: ' + err, {level: 'error', timeout: 5000});
|
214
|
+
}
|
215
|
+
);
|
216
|
+
|
210
217
|
});
|
211
218
|
}
|
212
219
|
|
frontend/utils.js
CHANGED
@@ -93,4 +93,101 @@ export function asHtmlText(text){
|
|
93
93
|
anonElem.textContent = text;
|
94
94
|
const htmlText = anonElem.innerHTML;
|
95
95
|
return htmlText;
|
96
|
-
}
|
96
|
+
}
|
97
|
+
|
98
|
+
/**
|
99
|
+
* Iterates over all files dropped in the event,
|
100
|
+
* including files inside directories, and processes them
|
101
|
+
* using the provided callback with a concurrency limit.
|
102
|
+
*
|
103
|
+
* @param {Event} e The drop event.
|
104
|
+
* @param {(relPath: string, file: () => Promise<File>) => Promise<void>} callback A function
|
105
|
+
* that receives the relative path and a promise for the File.
|
106
|
+
* @param {number} [maxConcurrent=5] Maximum number of concurrent callback executions.
|
107
|
+
* @returns {Promise<Promise<void>[]>} A promise resolving to an array of callback promises.
|
108
|
+
*/
|
109
|
+
export async function forEachFile(e, callback, maxConcurrent = 16) {
|
110
|
+
const results = []; // to collect callback promises
|
111
|
+
|
112
|
+
// Concurrency barrier variables.
|
113
|
+
let activeCount = 0;
|
114
|
+
const queue = [];
|
115
|
+
|
116
|
+
/**
|
117
|
+
* Runs the given async task when below the concurrency limit.
|
118
|
+
* If at limit, waits until a slot is free.
|
119
|
+
*
|
120
|
+
* @param {() => Promise<any>} task An async function returning a promise.
|
121
|
+
* @returns {Promise<any>}
|
122
|
+
*/
|
123
|
+
async function runWithLimit(task) {
|
124
|
+
// If we reached the concurrency limit, wait for a free slot.
|
125
|
+
if (activeCount >= maxConcurrent) {
|
126
|
+
await new Promise(resolve => queue.push(resolve));
|
127
|
+
}
|
128
|
+
activeCount++;
|
129
|
+
try {
|
130
|
+
return await task();
|
131
|
+
} finally {
|
132
|
+
activeCount--;
|
133
|
+
// If there are waiting tasks, allow the next one to run.
|
134
|
+
if (queue.length) {
|
135
|
+
queue.shift()();
|
136
|
+
}
|
137
|
+
}
|
138
|
+
}
|
139
|
+
|
140
|
+
/**
|
141
|
+
* Recursively traverses a file system entry.
|
142
|
+
*
|
143
|
+
* @param {FileSystemEntry} entry The entry (file or directory).
|
144
|
+
* @param {string} path The current relative path.
|
145
|
+
*/
|
146
|
+
async function traverse(entry, path) {
|
147
|
+
if (entry.isFile) {
|
148
|
+
// Wrap file retrieval in a promise.
|
149
|
+
const filePromiseFn = () =>
|
150
|
+
new Promise((resolve, reject) => entry.file(resolve, reject));
|
151
|
+
// Use the concurrency barrier for the callback invocation.
|
152
|
+
results.push(runWithLimit(() => callback(path + entry.name, filePromiseFn)));
|
153
|
+
} else if (entry.isDirectory) {
|
154
|
+
const reader = entry.createReader();
|
155
|
+
|
156
|
+
async function readAllEntries(reader) {
|
157
|
+
const entries = [];
|
158
|
+
while (true) {
|
159
|
+
const chunk = await new Promise((resolve, reject) => {
|
160
|
+
reader.readEntries(resolve, reject);
|
161
|
+
});
|
162
|
+
if (chunk.length === 0) break;
|
163
|
+
entries.push(...chunk);
|
164
|
+
}
|
165
|
+
return entries;
|
166
|
+
}
|
167
|
+
|
168
|
+
const entries = await readAllEntries(reader);
|
169
|
+
await Promise.all(
|
170
|
+
entries.map(ent => traverse(ent, path + entry.name + '/'))
|
171
|
+
);
|
172
|
+
}
|
173
|
+
}
|
174
|
+
|
175
|
+
// Process using DataTransfer items if available.
|
176
|
+
if (e.dataTransfer && e.dataTransfer.items) {
|
177
|
+
await Promise.all(
|
178
|
+
Array.from(e.dataTransfer.items).map(async item => {
|
179
|
+
const entry = item.webkitGetAsEntry && item.webkitGetAsEntry();
|
180
|
+
if (entry) {
|
181
|
+
await traverse(entry, '');
|
182
|
+
}
|
183
|
+
})
|
184
|
+
);
|
185
|
+
} else if (e.dataTransfer && e.dataTransfer.files) {
|
186
|
+
// Fallback for browsers that support only dataTransfer.files.
|
187
|
+
Array.from(e.dataTransfer.files).forEach(file => {
|
188
|
+
results.push(runWithLimit(() => callback(file.name, Promise.resolve(file))));
|
189
|
+
});
|
190
|
+
}
|
191
|
+
return results;
|
192
|
+
}
|
193
|
+
|
lfss/api/__init__.py
CHANGED
@@ -113,7 +113,7 @@ def download_file(
|
|
113
113
|
print(f"File {file_path} already exists, skipping download.")
|
114
114
|
return True, error_msg
|
115
115
|
try:
|
116
|
-
fmeta = connector.
|
116
|
+
fmeta = connector.get_meta(src_url)
|
117
117
|
if fmeta is None:
|
118
118
|
error_msg = "File not found."
|
119
119
|
return False, error_msg
|
lfss/api/connector.py
CHANGED
@@ -98,7 +98,7 @@ class Connector:
|
|
98
98
|
|
99
99
|
# Skip ahead by checking if the file already exists
|
100
100
|
if conflict == 'skip-ahead':
|
101
|
-
exists = self.
|
101
|
+
exists = self.get_meta(path)
|
102
102
|
if exists is None:
|
103
103
|
conflict = 'skip'
|
104
104
|
else:
|
@@ -122,7 +122,7 @@ class Connector:
|
|
122
122
|
|
123
123
|
# Skip ahead by checking if the file already exists
|
124
124
|
if conflict == 'skip-ahead':
|
125
|
-
exists = self.
|
125
|
+
exists = self.get_meta(path)
|
126
126
|
if exists is None:
|
127
127
|
conflict = 'skip'
|
128
128
|
else:
|
@@ -154,7 +154,7 @@ class Connector:
|
|
154
154
|
|
155
155
|
# Skip ahead by checking if the file already exists
|
156
156
|
if conflict == 'skip-ahead':
|
157
|
-
exists = self.
|
157
|
+
exists = self.get_meta(path)
|
158
158
|
if exists is None:
|
159
159
|
conflict = 'skip'
|
160
160
|
else:
|
@@ -211,7 +211,7 @@ class Connector:
|
|
211
211
|
"""Deletes the file at the specified path."""
|
212
212
|
self._fetch_factory('DELETE', path)()
|
213
213
|
|
214
|
-
def
|
214
|
+
def get_meta(self, path: str) -> Optional[FileRecord | DirectoryRecord]:
|
215
215
|
"""Gets the metadata for the file at the specified path."""
|
216
216
|
try:
|
217
217
|
response = self._fetch_factory('GET', '_api/meta', {'path': path})()
|
@@ -223,6 +223,9 @@ class Connector:
|
|
223
223
|
if e.response.status_code == 404:
|
224
224
|
return None
|
225
225
|
raise e
|
226
|
+
# shorthand methods for type constraints
|
227
|
+
def get_fmeta(self, path: str) -> Optional[FileRecord]: assert (f:=self.get_meta(path)) is None or isinstance(f, FileRecord); return f
|
228
|
+
def get_dmeta(self, path: str) -> Optional[DirectoryRecord]: assert (d:=self.get_meta(path)) is None or isinstance(d, DirectoryRecord); return d
|
226
229
|
|
227
230
|
def list_path(self, path: str) -> PathContents:
|
228
231
|
"""
|
lfss/cli/cli.py
CHANGED
@@ -126,7 +126,7 @@ def main():
|
|
126
126
|
elif args.command == "query":
|
127
127
|
for path in args.path:
|
128
128
|
with catch_request_error():
|
129
|
-
res = connector.
|
129
|
+
res = connector.get_meta(path)
|
130
130
|
if res is None:
|
131
131
|
print(f"\033[31mNot found\033[0m ({path})")
|
132
132
|
else:
|
lfss/cli/log.py
ADDED
@@ -0,0 +1,77 @@
|
|
1
|
+
from typing import Optional
|
2
|
+
import argparse
|
3
|
+
import rich.console
|
4
|
+
import logging
|
5
|
+
import sqlite3
|
6
|
+
from lfss.eng.log import eval_logline
|
7
|
+
|
8
|
+
console = rich.console.Console()
|
9
|
+
def levelstr2int(levelstr: str) -> int:
|
10
|
+
import sys
|
11
|
+
if sys.version_info < (3, 11):
|
12
|
+
return logging.getLevelName(levelstr.upper())
|
13
|
+
else:
|
14
|
+
return logging.getLevelNamesMapping()[levelstr.upper()]
|
15
|
+
|
16
|
+
def view(
|
17
|
+
db_file: str,
|
18
|
+
level: Optional[str] = None,
|
19
|
+
offset: int = 0,
|
20
|
+
limit: int = 1000
|
21
|
+
):
|
22
|
+
conn = sqlite3.connect(db_file)
|
23
|
+
cursor = conn.cursor()
|
24
|
+
if level is None:
|
25
|
+
cursor.execute("SELECT * FROM log ORDER BY created DESC LIMIT ? OFFSET ?", (limit, offset))
|
26
|
+
else:
|
27
|
+
level_int = levelstr2int(level)
|
28
|
+
cursor.execute("SELECT * FROM log WHERE level >= ? ORDER BY created DESC LIMIT ? OFFSET ?", (level_int, limit, offset))
|
29
|
+
levelname_color = {
|
30
|
+
'DEBUG': 'blue',
|
31
|
+
'INFO': 'green',
|
32
|
+
'WARNING': 'yellow',
|
33
|
+
'ERROR': 'red',
|
34
|
+
'CRITICAL': 'bold red',
|
35
|
+
'FATAL': 'bold red'
|
36
|
+
}
|
37
|
+
for row in cursor.fetchall():
|
38
|
+
log = eval_logline(row)
|
39
|
+
console.print(f"{log.created} [{levelname_color[log.levelname]}][{log.levelname}] [default]{log.message}")
|
40
|
+
conn.close()
|
41
|
+
|
42
|
+
def trim(db_file: str, keep: int = 1000, level: Optional[str] = None):
|
43
|
+
conn = sqlite3.connect(db_file)
|
44
|
+
cursor = conn.cursor()
|
45
|
+
if level is None:
|
46
|
+
cursor.execute("DELETE FROM log WHERE id NOT IN (SELECT id FROM log ORDER BY created DESC LIMIT ?)", (keep,))
|
47
|
+
else:
|
48
|
+
cursor.execute("DELETE FROM log WHERE levelname = ? and id NOT IN (SELECT id FROM log WHERE levelname = ? ORDER BY created DESC LIMIT ?)", (level.upper(), level.upper(), keep))
|
49
|
+
conn.commit()
|
50
|
+
conn.execute("VACUUM")
|
51
|
+
conn.close()
|
52
|
+
|
53
|
+
def main():
|
54
|
+
parser = argparse.ArgumentParser(description="Log operations utility")
|
55
|
+
subparsers = parser.add_subparsers(title='subcommands', description='valid subcommands', help='additional help')
|
56
|
+
|
57
|
+
parser_show = subparsers.add_parser('view', help='Show logs')
|
58
|
+
parser_show.add_argument('db_file', type=str, help='Database file path')
|
59
|
+
parser_show.add_argument('-l', '--level', type=str, required=False, help='Log level')
|
60
|
+
parser_show.add_argument('--offset', type=int, default=0, help='Starting offset')
|
61
|
+
parser_show.add_argument('--limit', type=int, default=1000, help='Maximum number of entries to display')
|
62
|
+
parser_show.set_defaults(func=view)
|
63
|
+
|
64
|
+
parser_trim = subparsers.add_parser('trim', help='Trim logs')
|
65
|
+
parser_trim.add_argument('db_file', type=str, help='Database file path')
|
66
|
+
parser_trim.add_argument('-l', '--level', type=str, required=False, help='Log level')
|
67
|
+
parser_trim.add_argument('--keep', type=int, default=1000, help='Number of entries to keep')
|
68
|
+
parser_trim.set_defaults(func=trim)
|
69
|
+
|
70
|
+
args = parser.parse_args()
|
71
|
+
if hasattr(args, 'func'):
|
72
|
+
kwargs = vars(args)
|
73
|
+
func = kwargs.pop('func')
|
74
|
+
func(**kwargs)
|
75
|
+
|
76
|
+
if __name__ == '__main__':
|
77
|
+
main()
|
lfss/cli/vacuum.py
CHANGED
@@ -2,7 +2,7 @@
|
|
2
2
|
Vacuum the database and external storage to ensure that the storage is consistent and minimal.
|
3
3
|
"""
|
4
4
|
|
5
|
-
from lfss.eng.config import LARGE_BLOB_DIR, THUMB_DB
|
5
|
+
from lfss.eng.config import LARGE_BLOB_DIR, THUMB_DB, LOG_DIR
|
6
6
|
import argparse, time, itertools
|
7
7
|
from functools import wraps
|
8
8
|
from asyncio import Semaphore
|
@@ -14,6 +14,7 @@ from lfss.eng.database import transaction, unique_cursor
|
|
14
14
|
from lfss.svc.request_log import RequestDB
|
15
15
|
from lfss.eng.utils import now_stamp
|
16
16
|
from lfss.eng.connection_pool import global_entrance
|
17
|
+
from lfss.cli.log import trim
|
17
18
|
|
18
19
|
sem: Semaphore
|
19
20
|
|
@@ -33,7 +34,7 @@ def barriered(func):
|
|
33
34
|
return wrapper
|
34
35
|
|
35
36
|
@global_entrance()
|
36
|
-
async def vacuum_main(index: bool = False, blobs: bool = False, thumbs: bool = False, vacuum_all: bool = False):
|
37
|
+
async def vacuum_main(index: bool = False, blobs: bool = False, thumbs: bool = False, logs: bool = False, vacuum_all: bool = False):
|
37
38
|
|
38
39
|
# check if any file in the Large Blob directory is not in the database
|
39
40
|
# the reverse operation is not necessary, because by design, the database should be the source of truth...
|
@@ -73,6 +74,11 @@ async def vacuum_main(index: bool = False, blobs: bool = False, thumbs: bool = F
|
|
73
74
|
async with unique_cursor(is_write=True) as c:
|
74
75
|
await c.execute("VACUUM blobs")
|
75
76
|
|
77
|
+
if logs or vacuum_all:
|
78
|
+
with indicator("VACUUM-logs"):
|
79
|
+
for log_file in LOG_DIR.glob("*.log.db"):
|
80
|
+
trim(str(log_file), keep=10_000)
|
81
|
+
|
76
82
|
if thumbs or vacuum_all:
|
77
83
|
try:
|
78
84
|
async with transaction() as c:
|
@@ -123,9 +129,10 @@ def main():
|
|
123
129
|
parser.add_argument("-d", "--data", action="store_true", help="Vacuum blobs")
|
124
130
|
parser.add_argument("-t", "--thumb", action="store_true", help="Vacuum thumbnails")
|
125
131
|
parser.add_argument("-r", "--requests", action="store_true", help="Vacuum request logs to only keep at most recent 1M rows in 7 days")
|
132
|
+
parser.add_argument("-l", "--logs", action="store_true", help="Trim log to keep at most recent 10k rows for each category")
|
126
133
|
args = parser.parse_args()
|
127
134
|
sem = Semaphore(args.jobs)
|
128
|
-
asyncio.run(vacuum_main(index=args.metadata, blobs=args.data, thumbs=args.thumb, vacuum_all=args.all))
|
135
|
+
asyncio.run(vacuum_main(index=args.metadata, blobs=args.data, thumbs=args.thumb, logs = args.logs, vacuum_all=args.all))
|
129
136
|
|
130
137
|
if args.requests or args.all:
|
131
138
|
asyncio.run(vacuum_requests())
|
lfss/eng/config.py
CHANGED
@@ -11,14 +11,15 @@ if not DATA_HOME.exists():
|
|
11
11
|
DATA_HOME = DATA_HOME.resolve().absolute()
|
12
12
|
LARGE_BLOB_DIR = DATA_HOME / 'large_blobs'
|
13
13
|
LARGE_BLOB_DIR.mkdir(exist_ok=True)
|
14
|
+
LOG_DIR = DATA_HOME / 'logs'
|
14
15
|
|
15
16
|
# https://sqlite.org/fasterthanfs.html
|
16
17
|
__env_large_file = os.environ.get('LFSS_LARGE_FILE', None)
|
17
18
|
if __env_large_file is not None:
|
18
19
|
LARGE_FILE_BYTES = parse_storage_size(__env_large_file)
|
19
20
|
else:
|
20
|
-
LARGE_FILE_BYTES =
|
21
|
-
MAX_MEM_FILE_BYTES = 128 * 1024 * 1024
|
21
|
+
LARGE_FILE_BYTES = 1 * 1024 * 1024 # 1MB
|
22
|
+
MAX_MEM_FILE_BYTES = 128 * 1024 * 1024 # 128MB
|
22
23
|
CHUNK_SIZE = 1024 * 1024 # 1MB chunks for streaming (on large files)
|
23
24
|
DEBUG_MODE = os.environ.get('LFSS_DEBUG', '0') == '1'
|
24
25
|
|
lfss/eng/database.py
CHANGED
@@ -225,12 +225,12 @@ class FileConn(DBObjectBase):
|
|
225
225
|
await self.cur.execute("SELECT username FROM user")
|
226
226
|
res = await self.cur.fetchall()
|
227
227
|
dirnames = [u[0] + '/' for u in res]
|
228
|
-
dirs = [await self.
|
228
|
+
dirs = [await self.get_dir_record(u) for u in dirnames] if not skim else [DirectoryRecord(u) for u in dirnames]
|
229
229
|
return dirs
|
230
230
|
else:
|
231
231
|
# list specific users
|
232
232
|
dirnames = [uname + '/' for uname in usernames]
|
233
|
-
dirs = [await self.
|
233
|
+
dirs = [await self.get_dir_record(u) for u in dirnames] if not skim else [DirectoryRecord(u) for u in dirnames]
|
234
234
|
return dirs
|
235
235
|
|
236
236
|
async def count_path_dirs(self, url: str):
|
@@ -278,11 +278,11 @@ class FileConn(DBObjectBase):
|
|
278
278
|
if skim:
|
279
279
|
return DirectoryRecord(dir_url)
|
280
280
|
else:
|
281
|
-
return await self.
|
281
|
+
return await self.get_dir_record(dir_url)
|
282
282
|
dirs = [await get_dir(url + d) for d in dirs_str]
|
283
283
|
return dirs
|
284
284
|
|
285
|
-
async def
|
285
|
+
async def count_dir_files(self, url: str, flat: bool = False):
|
286
286
|
if not url.endswith('/'): url += '/'
|
287
287
|
if url == '/': url = ''
|
288
288
|
if flat:
|
@@ -293,7 +293,7 @@ class FileConn(DBObjectBase):
|
|
293
293
|
assert res is not None, "Error: count_path_files"
|
294
294
|
return res[0]
|
295
295
|
|
296
|
-
async def
|
296
|
+
async def list_dir_files(
|
297
297
|
self, url: str,
|
298
298
|
offset: int = 0, limit: int = 10_000,
|
299
299
|
order_by: FileSortKey = '', order_desc: bool = False,
|
@@ -328,15 +328,15 @@ class FileConn(DBObjectBase):
|
|
328
328
|
"""
|
329
329
|
MAX_ITEMS = 10_000
|
330
330
|
dir_count = await self.count_path_dirs(url)
|
331
|
-
file_count = await self.
|
331
|
+
file_count = await self.count_dir_files(url, flat=False)
|
332
332
|
if dir_count + file_count > MAX_ITEMS:
|
333
333
|
raise TooManyItemsError("Too many items, please paginate")
|
334
334
|
return PathContents(
|
335
335
|
dirs = await self.list_path_dirs(url, skim=True, limit=MAX_ITEMS),
|
336
|
-
files = await self.
|
336
|
+
files = await self.list_dir_files(url, flat=False, limit=MAX_ITEMS)
|
337
337
|
)
|
338
338
|
|
339
|
-
async def
|
339
|
+
async def get_dir_record(self, url: str) -> DirectoryRecord:
|
340
340
|
"""
|
341
341
|
Get the full record of a directory, including size, create_time, update_time, access_time etc.
|
342
342
|
"""
|
@@ -411,7 +411,6 @@ class FileConn(DBObjectBase):
|
|
411
411
|
await self._user_size_inc(owner_id, file_size)
|
412
412
|
self.logger.info(f"File {url} created")
|
413
413
|
|
414
|
-
# not tested
|
415
414
|
async def copy_file(self, old_url: str, new_url: str, user_id: Optional[int] = None):
|
416
415
|
old = await self.get_file_record(old_url)
|
417
416
|
if old is None:
|
@@ -428,7 +427,7 @@ class FileConn(DBObjectBase):
|
|
428
427
|
await self._user_size_inc(user_id, old.file_size)
|
429
428
|
self.logger.info(f"Copied file {old_url} to {new_url}")
|
430
429
|
|
431
|
-
async def
|
430
|
+
async def copy_dir(self, old_url: str, new_url: str, user_id: Optional[int] = None):
|
432
431
|
assert old_url.endswith('/'), "Old path must end with /"
|
433
432
|
assert new_url.endswith('/'), "New path must end with /"
|
434
433
|
if user_id is None:
|
@@ -461,7 +460,7 @@ class FileConn(DBObjectBase):
|
|
461
460
|
await self.cur.execute("UPDATE fmeta SET url = ?, create_time = CURRENT_TIMESTAMP WHERE url = ?", (new_url, old_url))
|
462
461
|
self.logger.info(f"Moved file {old_url} to {new_url}")
|
463
462
|
|
464
|
-
async def
|
463
|
+
async def move_dir(self, old_url: str, new_url: str, user_id: Optional[int] = None):
|
465
464
|
assert old_url.endswith('/'), "Old path must end with /"
|
466
465
|
assert new_url.endswith('/'), "New path must end with /"
|
467
466
|
if user_id is None:
|
@@ -500,7 +499,7 @@ class FileConn(DBObjectBase):
|
|
500
499
|
self.logger.info(f"Deleted {len(ret)} file records for user {owner_id}") # type: ignore
|
501
500
|
return ret
|
502
501
|
|
503
|
-
async def
|
502
|
+
async def delete_records_by_prefix(self, path: str, under_owner_id: Optional[int] = None) -> list[FileRecord]:
|
504
503
|
"""Delete all records with url starting with path"""
|
505
504
|
# update user size
|
506
505
|
cursor = await self.cur.execute("SELECT DISTINCT owner_id FROM fmeta WHERE url LIKE ?", (path + '%', ))
|
@@ -689,7 +688,7 @@ async def delayed_log_access(url: str):
|
|
689
688
|
])
|
690
689
|
),
|
691
690
|
)
|
692
|
-
def validate_url(url: str,
|
691
|
+
def validate_url(url: str, utype: Literal['file', 'dir'] = 'file'):
|
693
692
|
""" Check if a path is valid. The input path is considered url safe """
|
694
693
|
if len(url) > 1024:
|
695
694
|
raise InvalidPathError(f"URL too long: {url}")
|
@@ -703,7 +702,7 @@ def validate_url(url: str, is_file = True):
|
|
703
702
|
is_valid = False
|
704
703
|
break
|
705
704
|
|
706
|
-
if
|
705
|
+
if utype == 'file': is_valid = is_valid and not url.endswith('/')
|
707
706
|
else: is_valid = is_valid and url.endswith('/')
|
708
707
|
|
709
708
|
if not is_valid:
|
@@ -885,9 +884,9 @@ class Database:
|
|
885
884
|
raise PermissionDeniedError(f"Permission denied: {op_user.username} cannot copy file to {new_url}")
|
886
885
|
await fconn.copy_file(old_url, new_url, user_id=op_user.id if op_user is not None else None)
|
887
886
|
|
888
|
-
async def
|
889
|
-
validate_url(old_url,
|
890
|
-
validate_url(new_url,
|
887
|
+
async def move_dir(self, old_url: str, new_url: str, op_user: UserRecord):
|
888
|
+
validate_url(old_url, 'dir')
|
889
|
+
validate_url(new_url, 'dir')
|
891
890
|
|
892
891
|
if new_url.startswith('/'):
|
893
892
|
new_url = new_url[1:]
|
@@ -906,12 +905,11 @@ class Database:
|
|
906
905
|
|
907
906
|
async with transaction() as cur:
|
908
907
|
fconn = FileConn(cur)
|
909
|
-
await fconn.
|
908
|
+
await fconn.move_dir(old_url, new_url, op_user.id)
|
910
909
|
|
911
|
-
|
912
|
-
|
913
|
-
validate_url(
|
914
|
-
validate_url(new_url, is_file=False)
|
910
|
+
async def copy_dir(self, old_url: str, new_url: str, op_user: UserRecord):
|
911
|
+
validate_url(old_url, 'dir')
|
912
|
+
validate_url(new_url, 'dir')
|
915
913
|
|
916
914
|
if new_url.startswith('/'):
|
917
915
|
new_url = new_url[1:]
|
@@ -930,7 +928,7 @@ class Database:
|
|
930
928
|
|
931
929
|
async with transaction() as cur:
|
932
930
|
fconn = FileConn(cur)
|
933
|
-
await fconn.
|
931
|
+
await fconn.copy_dir(old_url, new_url, op_user.id)
|
934
932
|
|
935
933
|
async def __batch_delete_file_blobs(self, fconn: FileConn, file_records: list[FileRecord], batch_size: int = 512):
|
936
934
|
# https://github.com/langchain-ai/langchain/issues/10321
|
@@ -951,13 +949,13 @@ class Database:
|
|
951
949
|
await del_internal()
|
952
950
|
await del_external()
|
953
951
|
|
954
|
-
async def
|
955
|
-
validate_url(url,
|
952
|
+
async def delete_dir(self, url: str, op_user: Optional[UserRecord] = None) -> Optional[list[FileRecord]]:
|
953
|
+
validate_url(url, 'dir')
|
956
954
|
from_owner_id = op_user.id if op_user is not None and not (op_user.is_admin or await check_path_permission(url, op_user) >= AccessLevel.WRITE) else None
|
957
955
|
|
958
956
|
async with transaction() as cur:
|
959
957
|
fconn = FileConn(cur)
|
960
|
-
records = await fconn.
|
958
|
+
records = await fconn.delete_records_by_prefix(url, from_owner_id)
|
961
959
|
if not records:
|
962
960
|
return None
|
963
961
|
await self.__batch_delete_file_blobs(fconn, records)
|
@@ -981,14 +979,15 @@ class Database:
|
|
981
979
|
|
982
980
|
# make sure the user's directory is deleted,
|
983
981
|
# may contain admin's files, but delete them all
|
984
|
-
await fconn.
|
982
|
+
await fconn.delete_records_by_prefix(user.username + '/')
|
985
983
|
|
986
|
-
async def
|
984
|
+
async def iter_dir(self, top_url: str, urls: Optional[list[str]]) -> AsyncIterable[tuple[FileRecord, bytes | AsyncIterable[bytes]]]:
|
985
|
+
validate_url(top_url, 'dir')
|
987
986
|
async with unique_cursor() as cur:
|
988
987
|
fconn = FileConn(cur)
|
989
988
|
if urls is None:
|
990
|
-
fcount = await fconn.
|
991
|
-
urls = [r.url for r in (await fconn.
|
989
|
+
fcount = await fconn.count_dir_files(top_url, flat=True)
|
990
|
+
urls = [r.url for r in (await fconn.list_dir_files(top_url, flat=True, limit=fcount))]
|
992
991
|
|
993
992
|
for url in urls:
|
994
993
|
if not url.startswith(top_url):
|
@@ -1003,7 +1002,7 @@ class Database:
|
|
1003
1002
|
blob = await fconn.get_file_blob(f_id)
|
1004
1003
|
yield r, blob
|
1005
1004
|
|
1006
|
-
async def
|
1005
|
+
async def zip_dir_stream(self, top_url: str, op_user: Optional[UserRecord] = None) -> AsyncIterable[bytes]:
|
1007
1006
|
from stat import S_IFREG
|
1008
1007
|
from stream_zip import async_stream_zip, ZIP_64
|
1009
1008
|
if top_url.startswith('/'):
|
@@ -1015,7 +1014,7 @@ class Database:
|
|
1015
1014
|
|
1016
1015
|
# https://stream-zip.docs.trade.gov.uk/async-interface/
|
1017
1016
|
async def data_iter():
|
1018
|
-
async for (r, blob) in self.
|
1017
|
+
async for (r, blob) in self.iter_dir(top_url, None):
|
1019
1018
|
rel_path = r.url[len(top_url):]
|
1020
1019
|
rel_path = decode_uri_compnents(rel_path)
|
1021
1020
|
b_iter: AsyncIterable[bytes]
|
@@ -1035,7 +1034,7 @@ class Database:
|
|
1035
1034
|
return async_stream_zip(data_iter())
|
1036
1035
|
|
1037
1036
|
@concurrent_wrap()
|
1038
|
-
async def
|
1037
|
+
async def zip_dir(self, top_url: str, op_user: Optional[UserRecord]) -> io.BytesIO:
|
1039
1038
|
if top_url.startswith('/'):
|
1040
1039
|
top_url = top_url[1:]
|
1041
1040
|
|
@@ -1045,7 +1044,7 @@ class Database:
|
|
1045
1044
|
|
1046
1045
|
buffer = io.BytesIO()
|
1047
1046
|
with zipfile.ZipFile(buffer, 'w') as zf:
|
1048
|
-
async for (r, blob) in self.
|
1047
|
+
async for (r, blob) in self.iter_dir(top_url, None):
|
1049
1048
|
rel_path = r.url[len(top_url):]
|
1050
1049
|
rel_path = decode_uri_compnents(rel_path)
|
1051
1050
|
if r.external:
|
@@ -1062,7 +1061,7 @@ async def _get_path_owner(cur: aiosqlite.Cursor, path: str) -> UserRecord:
|
|
1062
1061
|
uconn = UserConn(cur)
|
1063
1062
|
path_user = await uconn.get_user(path_username)
|
1064
1063
|
if path_user is None:
|
1065
|
-
raise
|
1064
|
+
raise InvalidPathError(f"Invalid path: {path_username} is not a valid username")
|
1066
1065
|
return path_user
|
1067
1066
|
|
1068
1067
|
async def check_file_read_permission(user: UserRecord, file: FileRecord, cursor: Optional[aiosqlite.Cursor] = None) -> tuple[bool, str]:
|
@@ -1111,12 +1110,6 @@ async def check_path_permission(path: str, user: UserRecord, cursor: Optional[ai
|
|
1111
1110
|
If the path is a file, the user will have all access if the user is the owner.
|
1112
1111
|
Otherwise, the user will have alias level access w.r.t. the path user.
|
1113
1112
|
"""
|
1114
|
-
if user.id == 0:
|
1115
|
-
return AccessLevel.GUEST
|
1116
|
-
|
1117
|
-
if user.is_admin:
|
1118
|
-
return AccessLevel.ALL
|
1119
|
-
|
1120
1113
|
@asynccontextmanager
|
1121
1114
|
async def this_cur():
|
1122
1115
|
if cursor is None:
|
@@ -1125,10 +1118,16 @@ async def check_path_permission(path: str, user: UserRecord, cursor: Optional[ai
|
|
1125
1118
|
else:
|
1126
1119
|
yield cursor
|
1127
1120
|
|
1128
|
-
# check if path user exists
|
1121
|
+
# check if path user exists, may raise exception
|
1129
1122
|
async with this_cur() as cur:
|
1130
1123
|
path_owner = await _get_path_owner(cur, path)
|
1131
1124
|
|
1125
|
+
if user.id == 0:
|
1126
|
+
return AccessLevel.GUEST
|
1127
|
+
|
1128
|
+
if user.is_admin:
|
1129
|
+
return AccessLevel.ALL
|
1130
|
+
|
1132
1131
|
# check if user is admin or the owner of the path
|
1133
1132
|
if user.id == path_owner.id:
|
1134
1133
|
return AccessLevel.ALL
|
lfss/eng/log.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1
|
-
from .config import
|
1
|
+
from .config import LOG_DIR
|
2
|
+
import time, sqlite3, dataclasses
|
2
3
|
from typing import TypeVar, Callable, Literal, Optional
|
3
4
|
from concurrent.futures import ThreadPoolExecutor
|
4
5
|
from functools import wraps
|
@@ -57,15 +58,81 @@ class BaseLogger(logging.Logger):
|
|
57
58
|
@thread_wrap
|
58
59
|
def error(self, *args, **kwargs): super().error(*args, **kwargs)
|
59
60
|
|
60
|
-
|
61
|
+
class SQLiteFileHandler(logging.FileHandler):
|
62
|
+
def __init__(self, filename, *args, **kwargs):
|
63
|
+
super().__init__(filename, *args, **kwargs)
|
64
|
+
self._db_file = filename
|
65
|
+
self._buffer: list[logging.LogRecord] = []
|
66
|
+
self._buffer_size = 100
|
67
|
+
self._flush_interval = 10
|
68
|
+
self._last_flush = time.time()
|
69
|
+
conn = sqlite3.connect(self._db_file, check_same_thread=False)
|
70
|
+
conn.execute('PRAGMA journal_mode=WAL')
|
71
|
+
conn.execute('''
|
72
|
+
CREATE TABLE IF NOT EXISTS log (
|
73
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
74
|
+
created TIMESTAMP,
|
75
|
+
created_epoch FLOAT,
|
76
|
+
name TEXT,
|
77
|
+
levelname VARCHAR(16),
|
78
|
+
level INTEGER,
|
79
|
+
message TEXT
|
80
|
+
)
|
81
|
+
''')
|
82
|
+
conn.commit()
|
83
|
+
conn.close()
|
84
|
+
|
85
|
+
def flush(self):
|
86
|
+
def format_time(self, record: logging.LogRecord):
|
87
|
+
""" Create a time stamp """
|
88
|
+
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(record.created))
|
89
|
+
self.acquire()
|
90
|
+
try:
|
91
|
+
conn = sqlite3.connect(self._db_file, check_same_thread=False)
|
92
|
+
conn.executemany('''
|
93
|
+
INSERT INTO log (created, created_epoch, name, levelname, level, message)
|
94
|
+
VALUES (?, ?, ?, ?, ?, ?)
|
95
|
+
''', [
|
96
|
+
(format_time(self, record), record.created, record.name, record.levelname, record.levelno, record.getMessage())
|
97
|
+
for record in self._buffer
|
98
|
+
])
|
99
|
+
conn.commit()
|
100
|
+
conn.close()
|
101
|
+
self._buffer.clear()
|
102
|
+
self._last_flush = time.time()
|
103
|
+
finally:
|
104
|
+
self.release()
|
105
|
+
|
106
|
+
def emit(self, record: logging.LogRecord):
|
107
|
+
self._buffer.append(record)
|
108
|
+
if len(self._buffer) > self._buffer_size or time.time() - self._last_flush > self._flush_interval:
|
109
|
+
self.flush()
|
110
|
+
|
111
|
+
def close(self):
|
112
|
+
self.flush()
|
113
|
+
return super().close()
|
114
|
+
|
115
|
+
def eval_logline(row: sqlite3.Row):
|
116
|
+
@dataclasses.dataclass
|
117
|
+
class DBLogRecord:
|
118
|
+
id: int
|
119
|
+
created: str
|
120
|
+
created_epoch: float
|
121
|
+
name: str
|
122
|
+
levelname: str
|
123
|
+
level: int
|
124
|
+
message: str
|
125
|
+
return DBLogRecord(*row)
|
126
|
+
|
127
|
+
_fh_T = Literal['rotate', 'simple', 'daily', 'sqlite']
|
61
128
|
|
62
129
|
__g_logger_dict: dict[str, BaseLogger] = {}
|
63
130
|
def get_logger(
|
64
131
|
name = 'default',
|
65
|
-
log_home =
|
132
|
+
log_home = LOG_DIR,
|
66
133
|
level = 'DEBUG',
|
67
134
|
term_level = 'INFO',
|
68
|
-
file_handler_type: _fh_T = '
|
135
|
+
file_handler_type: _fh_T = 'sqlite',
|
69
136
|
global_instance = True
|
70
137
|
)->BaseLogger:
|
71
138
|
if global_instance and name in __g_logger_dict:
|
@@ -100,6 +167,8 @@ def get_logger(
|
|
100
167
|
file_handler = handlers.RotatingFileHandler(
|
101
168
|
log_file, maxBytes=1024*1024, backupCount=5
|
102
169
|
)
|
170
|
+
elif file_handler_type == 'sqlite':
|
171
|
+
file_handler = SQLiteFileHandler(log_file if log_file.suffix == '.db' else log_file.with_suffix('.log.db'))
|
103
172
|
|
104
173
|
file_handler.setFormatter(formatter_plain)
|
105
174
|
logger.addHandler(file_handler)
|
lfss/eng/utils.py
CHANGED
@@ -11,7 +11,6 @@ from concurrent.futures import ThreadPoolExecutor
|
|
11
11
|
from typing import TypeVar, Callable, Awaitable
|
12
12
|
from functools import wraps, partial
|
13
13
|
from uuid import uuid4
|
14
|
-
import os
|
15
14
|
|
16
15
|
async def copy_file(source: str|pathlib.Path, destination: str|pathlib.Path):
|
17
16
|
async with aiofiles.open(source, mode='rb') as src:
|
@@ -160,7 +159,7 @@ _g_executor = None
|
|
160
159
|
def get_global_executor():
|
161
160
|
global _g_executor
|
162
161
|
if _g_executor is None:
|
163
|
-
_g_executor = ThreadPoolExecutor(
|
162
|
+
_g_executor = ThreadPoolExecutor()
|
164
163
|
return _g_executor
|
165
164
|
def async_wrap(executor=None):
|
166
165
|
if executor is None:
|
lfss/svc/app_dav.py
CHANGED
@@ -57,9 +57,9 @@ async def eval_path(path: str) -> tuple[ptype, str, Optional[FileRecord | Direct
|
|
57
57
|
if len(dir_path_sp) > 2:
|
58
58
|
async with unique_cursor() as c:
|
59
59
|
fconn = FileConn(c)
|
60
|
-
if await fconn.
|
60
|
+
if await fconn.count_dir_files(path, flat=True) == 0:
|
61
61
|
return None, lfss_path, None
|
62
|
-
return "dir", lfss_path, await fconn.
|
62
|
+
return "dir", lfss_path, await fconn.get_dir_record(path)
|
63
63
|
else:
|
64
64
|
# test if its a user's root directory
|
65
65
|
assert len(dir_path_sp) == 2
|
@@ -85,8 +85,8 @@ async def eval_path(path: str) -> tuple[ptype, str, Optional[FileRecord | Direct
|
|
85
85
|
async with unique_cursor() as c:
|
86
86
|
lfss_path = path + "/"
|
87
87
|
fconn = FileConn(c)
|
88
|
-
if await fconn.
|
89
|
-
return "dir", lfss_path, await fconn.
|
88
|
+
if await fconn.count_dir_files(lfss_path) > 0:
|
89
|
+
return "dir", lfss_path, await fconn.get_dir_record(lfss_path)
|
90
90
|
|
91
91
|
return None, path, None
|
92
92
|
|
@@ -235,7 +235,7 @@ async def dav_propfind(request: Request, path: str, user: UserRecord = Depends(r
|
|
235
235
|
# query root directory content
|
236
236
|
async def user_path_record(user_name: str, cur) -> DirectoryRecord:
|
237
237
|
try:
|
238
|
-
return await FileConn(cur).
|
238
|
+
return await FileConn(cur).get_dir_record(user_name + "/")
|
239
239
|
except PathNotFoundError:
|
240
240
|
return DirectoryRecord(user_name + "/", size=0, n_files=0, create_time="1970-01-01 00:00:00", update_time="1970-01-01 00:00:00", access_time="1970-01-01 00:00:00")
|
241
241
|
|
@@ -253,7 +253,7 @@ async def dav_propfind(request: Request, path: str, user: UserRecord = Depends(r
|
|
253
253
|
elif path_type == "dir":
|
254
254
|
# query directory content
|
255
255
|
async with unique_cursor() as c:
|
256
|
-
flist = await FileConn(c).
|
256
|
+
flist = await FileConn(c).list_dir_files(lfss_path, flat = True if depth == "infinity" else False)
|
257
257
|
for frecord in flist:
|
258
258
|
if frecord.url.endswith(f"/{MKDIR_PLACEHOLDER}"): continue
|
259
259
|
file_el = await create_file_xml_element(frecord)
|
@@ -315,7 +315,7 @@ async def dav_move(request: Request, path: str, user: UserRecord = Depends(regis
|
|
315
315
|
assert ptype == "dir", "Directory path should end with /"
|
316
316
|
assert lfss_path.endswith("/"), "Directory path should end with /"
|
317
317
|
if not dlfss_path.endswith("/"): dlfss_path += "/" # the header destination may not end with /
|
318
|
-
await db.
|
318
|
+
await db.move_dir(lfss_path, dlfss_path, user)
|
319
319
|
return Response(status_code=201)
|
320
320
|
|
321
321
|
@router_dav.api_route("/{path:path}", methods=["COPY"])
|
lfss/svc/app_native.py
CHANGED
@@ -90,13 +90,13 @@ async def bundle_files(path: str, user: UserRecord = Depends(registered_user)):
|
|
90
90
|
raise HTTPException(status_code=400, detail="Cannot bundle root")
|
91
91
|
|
92
92
|
async with unique_cursor() as cur:
|
93
|
-
dir_record = await FileConn(cur).
|
93
|
+
dir_record = await FileConn(cur).get_dir_record(path)
|
94
94
|
|
95
95
|
pathname = f"{path.split('/')[-2]}"
|
96
96
|
|
97
97
|
if dir_record.size < MAX_MEM_FILE_BYTES:
|
98
98
|
logger.debug(f"Bundle {path} in memory")
|
99
|
-
dir_bytes = (await db.
|
99
|
+
dir_bytes = (await db.zip_dir(path, op_user=user)).getvalue()
|
100
100
|
return Response(
|
101
101
|
content = dir_bytes,
|
102
102
|
media_type = "application/zip",
|
@@ -109,7 +109,7 @@ async def bundle_files(path: str, user: UserRecord = Depends(registered_user)):
|
|
109
109
|
else:
|
110
110
|
logger.debug(f"Bundle {path} in stream")
|
111
111
|
return StreamingResponse(
|
112
|
-
content = await db.
|
112
|
+
content = await db.zip_dir_stream(path, op_user=user),
|
113
113
|
media_type = "application/zip",
|
114
114
|
headers = {
|
115
115
|
f"Content-Disposition": f"attachment; filename=bundle-{pathname}.zip",
|
@@ -134,7 +134,7 @@ async def get_file_meta(path: str, user: UserRecord = Depends(registered_user)):
|
|
134
134
|
else:
|
135
135
|
if await check_path_permission(path, user, cursor=cur) < AccessLevel.READ:
|
136
136
|
raise HTTPException(status_code=403, detail="Permission denied")
|
137
|
-
record = await fconn.
|
137
|
+
record = await fconn.get_dir_record(path)
|
138
138
|
return record
|
139
139
|
|
140
140
|
@router_api.post("/meta")
|
@@ -171,7 +171,7 @@ async def update_file_meta(
|
|
171
171
|
new_path = ensure_uri_compnents(new_path)
|
172
172
|
logger.info(f"Update path of {path} to {new_path}")
|
173
173
|
# will raise duplicate path error if same name path exists in the new path
|
174
|
-
await db.
|
174
|
+
await db.move_dir(path, new_path, user)
|
175
175
|
|
176
176
|
return Response(status_code=200, content="OK")
|
177
177
|
|
@@ -194,7 +194,7 @@ async def count_files(path: str, flat: bool = False, user: UserRecord = Depends(
|
|
194
194
|
path = ensure_uri_compnents(path)
|
195
195
|
async with unique_cursor() as conn:
|
196
196
|
fconn = FileConn(conn)
|
197
|
-
return { "count": await fconn.
|
197
|
+
return { "count": await fconn.count_dir_files(url = path, flat = flat) }
|
198
198
|
@router_api.get("/list-files")
|
199
199
|
async def list_files(
|
200
200
|
path: str, offset: int = 0, limit: int = 1000,
|
@@ -205,7 +205,7 @@ async def list_files(
|
|
205
205
|
path = ensure_uri_compnents(path)
|
206
206
|
async with unique_cursor() as conn:
|
207
207
|
fconn = FileConn(conn)
|
208
|
-
return await fconn.
|
208
|
+
return await fconn.list_dir_files(
|
209
209
|
url = path, offset = offset, limit = limit,
|
210
210
|
order_by=order_by, order_desc=order_desc,
|
211
211
|
flat=flat
|
lfss/svc/common_impl.py
CHANGED
@@ -180,7 +180,7 @@ async def _get_dir_impl(
|
|
180
180
|
else:
|
181
181
|
raise HTTPException(status_code=404, detail="User not found")
|
182
182
|
else:
|
183
|
-
if await FileConn(cur).
|
183
|
+
if await FileConn(cur).count_dir_files(path, flat=True) > 0:
|
184
184
|
return Response(status_code=200)
|
185
185
|
else:
|
186
186
|
raise HTTPException(status_code=404, detail="Path not found")
|
@@ -295,7 +295,7 @@ async def delete_impl(path: str, user: UserRecord):
|
|
295
295
|
logger.info(f"DELETE {path}, user: {user.username}")
|
296
296
|
|
297
297
|
if path.endswith("/"):
|
298
|
-
res = await db.
|
298
|
+
res = await db.delete_dir(path, user)
|
299
299
|
else:
|
300
300
|
res = await db.delete_file(path, user)
|
301
301
|
|
@@ -327,8 +327,8 @@ async def copy_impl(
|
|
327
327
|
else:
|
328
328
|
async with unique_cursor() as cur:
|
329
329
|
fconn = FileConn(cur)
|
330
|
-
dst_fcount = await fconn.
|
330
|
+
dst_fcount = await fconn.count_dir_files(dst_path, flat=True)
|
331
331
|
if dst_fcount > 0:
|
332
332
|
raise HTTPException(status_code=409, detail="Destination exists")
|
333
|
-
await db.
|
333
|
+
await db.copy_dir(src_path, dst_path, op_user)
|
334
334
|
return Response(status_code=201, content="OK")
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: lfss
|
3
|
-
Version: 0.11.
|
3
|
+
Version: 0.11.2
|
4
4
|
Summary: Lightweight file storage service
|
5
5
|
Home-page: https://github.com/MenxLi/lfss
|
6
6
|
Author: Li, Mengxun
|
@@ -17,6 +17,7 @@ Requires-Dist: mimesniff (==1.*)
|
|
17
17
|
Requires-Dist: pillow
|
18
18
|
Requires-Dist: python-multipart
|
19
19
|
Requires-Dist: requests (==2.*)
|
20
|
+
Requires-Dist: rich
|
20
21
|
Requires-Dist: stream-zip (==0.*)
|
21
22
|
Requires-Dist: uvicorn (==0.*)
|
22
23
|
Project-URL: Repository, https://github.com/MenxLi/lfss
|
@@ -3,49 +3,50 @@ docs/Enviroment_variables.md,sha256=xaL8qBwT8B2Qe11FaOU3xWrRCh1mJ1VyTFCeFbkd0rs,
|
|
3
3
|
docs/Known_issues.md,sha256=ZqETcWP8lzTOel9b2mxEgCnADFF8IxOrEtiVO1NoMAk,251
|
4
4
|
docs/Permission.md,sha256=thUJx7YRoU63Pb-eqo5l5450DrZN3QYZ36GCn8r66no,3152
|
5
5
|
docs/Webdav.md,sha256=-Ja-BTWSY1BEMAyZycvEMNnkNTPZ49gSPzmf3Lbib70,1547
|
6
|
-
docs/changelog.md,sha256=
|
6
|
+
docs/changelog.md,sha256=fE0rE2IcovbxMhdTeqhnCnknT1vtVr7A860zIh7AEnE,1581
|
7
7
|
frontend/api.js,sha256=GlQsNoZFEcy7QUUsLbXv7aP-KxRnIxM37FQHTaakGiQ,19387
|
8
8
|
frontend/index.html,sha256=-k0bJ5FRqdl_H-O441D_H9E-iejgRCaL_z5UeYaS2qc,3384
|
9
9
|
frontend/info.css,sha256=Ny0N3GywQ3a9q1_Qph_QFEKB4fEnTe_2DJ1Y5OsLLmQ,595
|
10
10
|
frontend/info.js,sha256=xGUJPCSrtDhuSu0ELLQZ77PmVWldg-prU1mwQGbdEoA,5797
|
11
11
|
frontend/login.css,sha256=VMM0QfbDFYerxKWKSGhMI1yg5IRBXg0TTdLJEEhQZNk,355
|
12
|
-
frontend/login.js,sha256=
|
12
|
+
frontend/login.js,sha256=xJkulk8dlvV4BhevADLeUrnZwShiFTWv3Wg2iJFUZlY,2423
|
13
13
|
frontend/popup.css,sha256=TJZYFW1ZcdD1IVTlNPYNtMWKPbN6XDbQ4hKBOFK8uLg,1284
|
14
|
-
frontend/popup.js,sha256=
|
15
|
-
frontend/scripts.js,sha256=
|
14
|
+
frontend/popup.js,sha256=cyUjtO0wbtqbEodHfwyUsak9iWbcDXeWMGDhpCPbcoE,5453
|
15
|
+
frontend/scripts.js,sha256=T3kMjTxrjOkp93OV4ZMGgCLRRaQgRmNzzxriOMGVeZM,24412
|
16
16
|
frontend/state.js,sha256=vbNL5DProRKmSEY7xu9mZH6IY0PBenF8WGxPtGgDnLI,1680
|
17
17
|
frontend/styles.css,sha256=xcNLqI3KBsY5TLnku8UIP0Jfr7QLajr1_KNlZj9eheM,4935
|
18
18
|
frontend/thumb.css,sha256=rNsx766amYS2DajSQNabhpQ92gdTpNoQKmV69OKvtpI,295
|
19
19
|
frontend/thumb.js,sha256=46ViD2TlTTWy0fx6wjoAs_5CQ4ajYB90vVzM7UO2IHw,6182
|
20
|
-
frontend/utils.js,sha256=
|
21
|
-
lfss/api/__init__.py,sha256=
|
22
|
-
lfss/api/connector.py,sha256=
|
20
|
+
frontend/utils.js,sha256=XP5hM_mROYaxK5dqn9qZVwv7GdQuiDzByilFskbrnxA,6068
|
21
|
+
lfss/api/__init__.py,sha256=zT1JCiUM76wX-GtRrmKhTUzSYYfcmoyI1vYwN0fCcLw,6818
|
22
|
+
lfss/api/connector.py,sha256=xl_WrvupplepZSYJs4pN9zN7GDnuZR2A8-pc08ILutI,13231
|
23
23
|
lfss/cli/__init__.py,sha256=lPwPmqpa7EXQ4zlU7E7LOe6X2kw_xATGdwoHphUEirA,827
|
24
24
|
lfss/cli/balance.py,sha256=fUbKKAUyaDn74f7mmxMfBL4Q4voyBLHu6Lg_g8GfMOQ,4121
|
25
|
-
lfss/cli/cli.py,sha256=
|
25
|
+
lfss/cli/cli.py,sha256=tPeUgj0BR_M649AGcBYwfsrGioes0qzGc0lghFkrjoo,8086
|
26
|
+
lfss/cli/log.py,sha256=TBlt8mhHMouv8ZBUMHYfGZiV6-0yPdajJQ5mkGHEojI,3016
|
26
27
|
lfss/cli/panel.py,sha256=Xq3I_n-ctveym-Gh9LaUpzHiLlvt3a_nuDiwUS-MGrg,1597
|
27
28
|
lfss/cli/serve.py,sha256=vTo6_BiD7Dn3VLvHsC5RKRBC3lMu45JVr_0SqpgHdj0,1086
|
28
29
|
lfss/cli/user.py,sha256=1mTroQbaKxHjFCPHT67xwd08v-zxH0RZ_OnVc-4MzL0,5364
|
29
|
-
lfss/cli/vacuum.py,sha256=
|
30
|
+
lfss/cli/vacuum.py,sha256=arEY89kYJKEpzuzjKtf21V7s0QzM1t3QWa1hNghhT0Q,6611
|
30
31
|
lfss/eng/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
31
32
|
lfss/eng/bounded_pool.py,sha256=BI1dU-MBf82TMwJBYbjhEty7w1jIUKc5Bn9SnZ_-hoY,1288
|
32
|
-
lfss/eng/config.py,sha256=
|
33
|
+
lfss/eng/config.py,sha256=vP-0h_9TkAfu5626KjowHjCgX-CnVGZajw3sxBs5jtU,902
|
33
34
|
lfss/eng/connection_pool.py,sha256=1aq7nSgd7hB9YNV4PjD1RDRyl_moDw3ubBtSLyfgGBs,6320
|
34
|
-
lfss/eng/database.py,sha256=
|
35
|
+
lfss/eng/database.py,sha256=huYSOvTO5jES9wVl6Zity2XzNXyJBSQQwuCQHrEVf-Q,53255
|
35
36
|
lfss/eng/datatype.py,sha256=27UB7-l9SICy5lAvKjdzpTL_GohZjzstQcr9PtAq7nM,2709
|
36
37
|
lfss/eng/error.py,sha256=JGf5NV-f4rL6tNIDSAx5-l9MG8dEj7F2w_MuOjj1d1o,732
|
37
|
-
lfss/eng/log.py,sha256=
|
38
|
+
lfss/eng/log.py,sha256=jJKOnC64Lb5EoVJK_oi7vl4iRrH_gtCKM_zjHiIUA-4,7590
|
38
39
|
lfss/eng/thumb.py,sha256=AFyWEkkpuCKGWOB9bLlaDwPKzQ9JtCSSmHMhX2Gu3CI,3096
|
39
|
-
lfss/eng/utils.py,sha256=
|
40
|
+
lfss/eng/utils.py,sha256=jQUJWWmzOPmXdTCId2Y307m1cZfB4hpzHcTjO0mkOrU,6683
|
40
41
|
lfss/sql/init.sql,sha256=FBmVzkNjYUnWjEELRFzf7xb50GngmzmeDVffT1Uk8u8,1625
|
41
42
|
lfss/sql/pragma.sql,sha256=uENx7xXjARmro-A3XAK8OM8v5AxDMdCCRj47f86UuXg,206
|
42
43
|
lfss/svc/app.py,sha256=r1KUO3sPaaJWbkJF0bcVTD7arPKLs2jFlq52Ixicomo,220
|
43
44
|
lfss/svc/app_base.py,sha256=bTQbz945xalyB3UZLlqVBvL6JKGNQ8Fm2KpIvvucPZQ,6850
|
44
|
-
lfss/svc/app_dav.py,sha256=
|
45
|
-
lfss/svc/app_native.py,sha256=
|
46
|
-
lfss/svc/common_impl.py,sha256=
|
45
|
+
lfss/svc/app_dav.py,sha256=H3aL3MEdYaPK1w3FQvTzrGYGaaow4m8LZ7R35MN351A,18238
|
46
|
+
lfss/svc/app_native.py,sha256=_dhcq_R1VoafRCLuuWxXuttuhBAVaFVdlIQ6ep6ZQvs,8883
|
47
|
+
lfss/svc/common_impl.py,sha256=7QflWnxRqghLOSMpDz2UCRqEn49X1GLS3agCb5msia8,13729
|
47
48
|
lfss/svc/request_log.py,sha256=v8yXEIzPjaksu76Oh5vgdbUEUrw8Kt4etLAXBWSGie8,3207
|
48
|
-
lfss-0.11.
|
49
|
-
lfss-0.11.
|
50
|
-
lfss-0.11.
|
51
|
-
lfss-0.11.
|
49
|
+
lfss-0.11.2.dist-info/METADATA,sha256=__YXS_WBv6oNQlzcamUPEWayjek6bVsF4zRoGR0iJb8,2732
|
50
|
+
lfss-0.11.2.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
51
|
+
lfss-0.11.2.dist-info/entry_points.txt,sha256=R4uOP1y6eD0Qp3j1ySA8kRPVMdt6_W_9o-Zj9Ra4D0A,232
|
52
|
+
lfss-0.11.2.dist-info/RECORD,,
|
File without changes
|