lfss 0.8.4__tar.gz → 0.9.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {lfss-0.8.4 → lfss-0.9.1}/PKG-INFO +9 -4
- {lfss-0.8.4 → lfss-0.9.1}/Readme.md +6 -1
- lfss-0.9.1/docs/Permission.md +58 -0
- lfss-0.9.1/docs/Webdav.md +22 -0
- {lfss-0.8.4 → lfss-0.9.1}/frontend/api.js +21 -10
- {lfss-0.8.4 → lfss-0.9.1}/lfss/api/__init__.py +3 -3
- {lfss-0.8.4 → lfss-0.9.1}/lfss/api/connector.py +13 -7
- {lfss-0.8.4 → lfss-0.9.1}/lfss/cli/balance.py +3 -3
- {lfss-0.8.4 → lfss-0.9.1}/lfss/cli/cli.py +5 -10
- {lfss-0.8.4 → lfss-0.9.1}/lfss/cli/panel.py +8 -0
- {lfss-0.8.4 → lfss-0.9.1}/lfss/cli/serve.py +4 -2
- {lfss-0.8.4 → lfss-0.9.1}/lfss/cli/user.py +31 -4
- {lfss-0.8.4 → lfss-0.9.1}/lfss/cli/vacuum.py +5 -5
- {lfss-0.8.4/lfss/src → lfss-0.9.1/lfss/eng}/config.py +1 -0
- {lfss-0.8.4/lfss/src → lfss-0.9.1/lfss/eng}/connection_pool.py +22 -3
- {lfss-0.8.4/lfss/src → lfss-0.9.1/lfss/eng}/database.py +280 -67
- {lfss-0.8.4/lfss/src → lfss-0.9.1/lfss/eng}/datatype.py +7 -0
- {lfss-0.8.4/lfss/src → lfss-0.9.1/lfss/eng}/error.py +7 -0
- {lfss-0.8.4/lfss/src → lfss-0.9.1/lfss/eng}/thumb.py +10 -9
- {lfss-0.8.4/lfss/src → lfss-0.9.1/lfss/eng}/utils.py +18 -7
- {lfss-0.8.4 → lfss-0.9.1}/lfss/sql/init.sql +9 -0
- lfss-0.9.1/lfss/svc/app.py +9 -0
- lfss-0.9.1/lfss/svc/app_base.py +152 -0
- lfss-0.9.1/lfss/svc/app_dav.py +374 -0
- lfss-0.9.1/lfss/svc/app_native.py +247 -0
- lfss-0.9.1/lfss/svc/common_impl.py +270 -0
- lfss-0.8.4/lfss/src/stat.py → lfss-0.9.1/lfss/svc/request_log.py +2 -2
- {lfss-0.8.4 → lfss-0.9.1}/pyproject.toml +2 -2
- lfss-0.8.4/docs/Permission.md +0 -46
- lfss-0.8.4/lfss/src/server.py +0 -621
- {lfss-0.8.4 → lfss-0.9.1}/docs/Known_issues.md +0 -0
- {lfss-0.8.4 → lfss-0.9.1}/frontend/index.html +0 -0
- {lfss-0.8.4 → lfss-0.9.1}/frontend/info.css +0 -0
- {lfss-0.8.4 → lfss-0.9.1}/frontend/info.js +0 -0
- {lfss-0.8.4 → lfss-0.9.1}/frontend/login.css +0 -0
- {lfss-0.8.4 → lfss-0.9.1}/frontend/login.js +0 -0
- {lfss-0.8.4 → lfss-0.9.1}/frontend/popup.css +0 -0
- {lfss-0.8.4 → lfss-0.9.1}/frontend/popup.js +0 -0
- {lfss-0.8.4 → lfss-0.9.1}/frontend/scripts.js +0 -0
- {lfss-0.8.4 → lfss-0.9.1}/frontend/state.js +0 -0
- {lfss-0.8.4 → lfss-0.9.1}/frontend/styles.css +0 -0
- {lfss-0.8.4 → lfss-0.9.1}/frontend/thumb.css +0 -0
- {lfss-0.8.4 → lfss-0.9.1}/frontend/thumb.js +0 -0
- {lfss-0.8.4 → lfss-0.9.1}/frontend/utils.js +0 -0
- {lfss-0.8.4 → lfss-0.9.1}/lfss/cli/__init__.py +0 -0
- {lfss-0.8.4/lfss/src → lfss-0.9.1/lfss/eng}/__init__.py +0 -0
- {lfss-0.8.4/lfss/src → lfss-0.9.1/lfss/eng}/bounded_pool.py +0 -0
- {lfss-0.8.4/lfss/src → lfss-0.9.1/lfss/eng}/log.py +0 -0
- {lfss-0.8.4 → lfss-0.9.1}/lfss/sql/pragma.sql +0 -0
@@ -1,10 +1,10 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: lfss
|
3
|
-
Version: 0.
|
3
|
+
Version: 0.9.1
|
4
4
|
Summary: Lightweight file storage service
|
5
5
|
Home-page: https://github.com/MenxLi/lfss
|
6
|
-
Author:
|
7
|
-
Author-email: limengxun45@
|
6
|
+
Author: li_mengxun
|
7
|
+
Author-email: limengxun45@outlookc.com
|
8
8
|
Requires-Python: >=3.10
|
9
9
|
Classifier: Programming Language :: Python :: 3
|
10
10
|
Classifier: Programming Language :: Python :: 3.10
|
@@ -32,6 +32,7 @@ My experiment on a lightweight and high-performance file/object storage service.
|
|
32
32
|
- Pagination and sorted file listing for vast number of files.
|
33
33
|
- High performance: high concurrency, near-native speed on stress tests.
|
34
34
|
- Support range requests, so you can stream large files / resume download.
|
35
|
+
- WebDAV compatible ([NOTE](./docs/Webdav.md)).
|
35
36
|
|
36
37
|
It stores small files and metadata in sqlite, large files in the filesystem.
|
37
38
|
Tested on 2 million files, and it is still fast.
|
@@ -53,7 +54,11 @@ lfss-panel --open
|
|
53
54
|
Or, you can start a web server at `/frontend` and open `index.html` in your browser.
|
54
55
|
|
55
56
|
The API usage is simple, just `GET`, `PUT`, `DELETE` to the `/<username>/file/url` path.
|
56
|
-
|
57
|
+
The authentication can be acheived through one of the following methods:
|
58
|
+
1. `Authorization` header with the value `Bearer sha256(<username><password>)`.
|
59
|
+
2. `token` query parameter with the value `sha256(<username><password>)`.
|
60
|
+
3. HTTP Basic Authentication with the username and password.
|
61
|
+
|
57
62
|
You can refer to `frontend` as an application example, `lfss/api/connector.py` for more APIs.
|
58
63
|
|
59
64
|
By default, the service exposes all files to the public for `GET` requests,
|
@@ -9,6 +9,7 @@ My experiment on a lightweight and high-performance file/object storage service.
|
|
9
9
|
- Pagination and sorted file listing for vast number of files.
|
10
10
|
- High performance: high concurrency, near-native speed on stress tests.
|
11
11
|
- Support range requests, so you can stream large files / resume download.
|
12
|
+
- WebDAV compatible ([NOTE](./docs/Webdav.md)).
|
12
13
|
|
13
14
|
It stores small files and metadata in sqlite, large files in the filesystem.
|
14
15
|
Tested on 2 million files, and it is still fast.
|
@@ -30,7 +31,11 @@ lfss-panel --open
|
|
30
31
|
Or, you can start a web server at `/frontend` and open `index.html` in your browser.
|
31
32
|
|
32
33
|
The API usage is simple, just `GET`, `PUT`, `DELETE` to the `/<username>/file/url` path.
|
33
|
-
|
34
|
+
The authentication can be acheived through one of the following methods:
|
35
|
+
1. `Authorization` header with the value `Bearer sha256(<username><password>)`.
|
36
|
+
2. `token` query parameter with the value `sha256(<username><password>)`.
|
37
|
+
3. HTTP Basic Authentication with the username and password.
|
38
|
+
|
34
39
|
You can refer to `frontend` as an application example, `lfss/api/connector.py` for more APIs.
|
35
40
|
|
36
41
|
By default, the service exposes all files to the public for `GET` requests,
|
@@ -0,0 +1,58 @@
|
|
1
|
+
|
2
|
+
# Permission System
|
3
|
+
There are two user roles in the system: Admin and Normal User ("users" are like "buckets" to some extent).
|
4
|
+
A user have all permissions of the files and subpaths under its path (starting with `/<user>/`).
|
5
|
+
Admins have all permissions of all files and paths.
|
6
|
+
|
7
|
+
> **path** ends with `/` and **file** does not end with `/`.
|
8
|
+
|
9
|
+
## Peers
|
10
|
+
The user can have multiple peer users. The peer user can have read or write access to the user's path, depending on the access level set when adding the peer user.
|
11
|
+
The peer user can list the files under the user's path.
|
12
|
+
If the peer user only has read access (peer-r), then the peer user can only `GET` files under the user's path.
|
13
|
+
If the peer user has write access (peer-w), then the peer user can `GET`/`PUT`/`POST`/`DELETE` files under the user's path.
|
14
|
+
|
15
|
+
## Ownership
|
16
|
+
A file is owned by the user who created it, may not necessarily be the user under whose path the file is stored (admin/write-peer can create files under any user's path).
|
17
|
+
|
18
|
+
# Non-peer and public access
|
19
|
+
|
20
|
+
**NOTE:** below discussion is based on the assumption that the user is not a peer of the path owner, or is guest user (public access).
|
21
|
+
|
22
|
+
## File access with `GET` permission
|
23
|
+
|
24
|
+
### File access
|
25
|
+
For accessing file content, the user must have `GET` permission of the file, which is determined by the `permission` field of both the owner and the file.
|
26
|
+
|
27
|
+
There are four types of permissions: `unset`, `public`, `protected`, `private`.
|
28
|
+
Non-admin users can access files based on:
|
29
|
+
|
30
|
+
- If the file is `public`, then all users can access it.
|
31
|
+
- If the file is `protected`, then only the logged-in user can access it.
|
32
|
+
- If the file is `private`, then only the owner can access it.
|
33
|
+
- If the file is `unset`, then the file's permission is inherited from the owner's permission.
|
34
|
+
- If both the owner and the file have `unset` permission, then the file is `public`.
|
35
|
+
|
36
|
+
## File creation with `PUT`/`POST` permission
|
37
|
+
`PUT`/`POST` permission is not allowed for non-peer users.
|
38
|
+
|
39
|
+
## File `DELETE` and moving permissions
|
40
|
+
- Non-login user don't have `DELETE`/move permission.
|
41
|
+
- Every user can have `DELETE` permission that they own.
|
42
|
+
- User can move files if they have write access to the destination path.
|
43
|
+
|
44
|
+
## Path-listing
|
45
|
+
Path-listing is not allowed for these users.
|
46
|
+
|
47
|
+
# Summary
|
48
|
+
|
49
|
+
| Permission | Admin | User | Peer-r | Peer-w | Owner (not the user) | Non-peer user / Guest |
|
50
|
+
|------------|-------|------|--------|--------|----------------------|------------------------|
|
51
|
+
| GET | Yes | Yes | Yes | Yes | Yes | Depends on file |
|
52
|
+
| PUT/POST | Yes | Yes | No | Yes | Yes | No |
|
53
|
+
| DELETE file| Yes | Yes | No | Yes | Yes | No |
|
54
|
+
| DELETE path| Yes | Yes | No | Yes | N/A | No |
|
55
|
+
| move | Yes | Yes | No | Yes | Dep. on destination | No |
|
56
|
+
| list | Yes | Yes | Yes | Yes | No if not peer | No |
|
57
|
+
|
58
|
+
> Capitilized methods are HTTP methods, N/A means not applicable.
|
@@ -0,0 +1,22 @@
|
|
1
|
+
# WebDAV
|
2
|
+
|
3
|
+
It is convinient to make LFSS WebDAV compatible, because they both use HTTP `GET`, `PUT`, `DELETE` methods to interact with files.
|
4
|
+
|
5
|
+
However, WebDAV utilize more HTTP methods,
|
6
|
+
which are disabled by default in LFSS, because they may not be supported by many middlewares or clients.
|
7
|
+
|
8
|
+
The WebDAV support can be enabled by setting the `LFSS_WEBDAV` environment variable to `1`.
|
9
|
+
i.e.
|
10
|
+
```sh
|
11
|
+
LFSS_WEBDAV=1 lfss-serve
|
12
|
+
```
|
13
|
+
Please note:
|
14
|
+
1. **WebDAV support is experimental, and is currently not well-tested.**
|
15
|
+
2. LFSS not allow creating files in the root directory, however some client such as [Finder](https://sabre.io/dav/clients/finder/) will try to create files in the root directory. Thus, it is safer to mount the user directory only, e.g. `http://localhost:8000/<username>/`.
|
16
|
+
3. LFSS not allow directory creation, instead it creates directoy implicitly when a file is uploaded to a non-exist directory.
|
17
|
+
i.e. `PUT http://localhost:8000/<username>/dir/file.txt` will create the `dir` directory if it does not exist.
|
18
|
+
However, the WebDAV `MKCOL` method requires the directory to be created explicitly, so WebDAV `MKCOL` method instead create a decoy file on the path (`.lfss-keep`), and hide the file from the file listing by `PROPFIND` method.
|
19
|
+
This leads to:
|
20
|
+
1) You may see a `.lfss-keep` file in the directory with native file listing (e.g. `/_api/list-files`), but it is hidden in WebDAV clients.
|
21
|
+
2) The directory may be deleted if there is no file in it and the `.lfss-keep` file is not created by WebDAV client.
|
22
|
+
|
@@ -45,6 +45,17 @@ export const permMap = {
|
|
45
45
|
3: 'private'
|
46
46
|
}
|
47
47
|
|
48
|
+
async function fmtFailedResponse(res){
|
49
|
+
const raw = await res.text();
|
50
|
+
const json = raw ? JSON.parse(raw) : {};
|
51
|
+
const txt = JSON.stringify(json.detail || json || "No message");
|
52
|
+
const maxWords = 32;
|
53
|
+
if (txt.length > maxWords){
|
54
|
+
return txt.slice(0, maxWords) + '...';
|
55
|
+
}
|
56
|
+
return txt;
|
57
|
+
}
|
58
|
+
|
48
59
|
export default class Connector {
|
49
60
|
|
50
61
|
constructor(){
|
@@ -79,7 +90,7 @@ export default class Connector {
|
|
79
90
|
body: fileBytes
|
80
91
|
});
|
81
92
|
if (res.status != 200 && res.status != 201){
|
82
|
-
throw new Error(`Failed to upload file, status code: ${res.status}, message: ${await res
|
93
|
+
throw new Error(`Failed to upload file, status code: ${res.status}, message: ${await fmtFailedResponse(res)}`);
|
83
94
|
}
|
84
95
|
return (await res.json()).url;
|
85
96
|
}
|
@@ -111,7 +122,7 @@ export default class Connector {
|
|
111
122
|
});
|
112
123
|
|
113
124
|
if (res.status != 200 && res.status != 201){
|
114
|
-
throw new Error(`Failed to upload file, status code: ${res.status}, message: ${await res
|
125
|
+
throw new Error(`Failed to upload file, status code: ${res.status}, message: ${await fmtFailedResponse(res)}`);
|
115
126
|
}
|
116
127
|
return (await res.json()).url;
|
117
128
|
}
|
@@ -133,7 +144,7 @@ export default class Connector {
|
|
133
144
|
body: JSON.stringify(data)
|
134
145
|
});
|
135
146
|
if (res.status != 200 && res.status != 201){
|
136
|
-
throw new Error(`Failed to upload object, status code: ${res.status}, message: ${await res
|
147
|
+
throw new Error(`Failed to upload object, status code: ${res.status}, message: ${await fmtFailedResponse(res)}`);
|
137
148
|
}
|
138
149
|
return (await res.json()).url;
|
139
150
|
}
|
@@ -147,7 +158,7 @@ export default class Connector {
|
|
147
158
|
},
|
148
159
|
});
|
149
160
|
if (res.status == 200) return;
|
150
|
-
throw new Error(`Failed to delete file, status code: ${res.status}, message: ${await res
|
161
|
+
throw new Error(`Failed to delete file, status code: ${res.status}, message: ${await fmtFailedResponse(res)}`);
|
151
162
|
}
|
152
163
|
|
153
164
|
/**
|
@@ -211,7 +222,7 @@ export default class Connector {
|
|
211
222
|
},
|
212
223
|
});
|
213
224
|
if (res.status != 200){
|
214
|
-
throw new Error(`Failed to count files, status code: ${res.status}, message: ${await res
|
225
|
+
throw new Error(`Failed to count files, status code: ${res.status}, message: ${await fmtFailedResponse(res)}`);
|
215
226
|
}
|
216
227
|
return (await res.json()).count;
|
217
228
|
}
|
@@ -250,7 +261,7 @@ export default class Connector {
|
|
250
261
|
},
|
251
262
|
});
|
252
263
|
if (res.status != 200){
|
253
|
-
throw new Error(`Failed to list files, status code: ${res.status}, message: ${await res
|
264
|
+
throw new Error(`Failed to list files, status code: ${res.status}, message: ${await fmtFailedResponse(res)}`);
|
254
265
|
}
|
255
266
|
return await res.json();
|
256
267
|
}
|
@@ -270,7 +281,7 @@ export default class Connector {
|
|
270
281
|
},
|
271
282
|
});
|
272
283
|
if (res.status != 200){
|
273
|
-
throw new Error(`Failed to count directories, status code: ${res.status}, message: ${await res
|
284
|
+
throw new Error(`Failed to count directories, status code: ${res.status}, message: ${await fmtFailedResponse(res)}`);
|
274
285
|
}
|
275
286
|
return (await res.json()).count;
|
276
287
|
}
|
@@ -309,7 +320,7 @@ export default class Connector {
|
|
309
320
|
},
|
310
321
|
});
|
311
322
|
if (res.status != 200){
|
312
|
-
throw new Error(`Failed to list directories, status code: ${res.status}, message: ${await res
|
323
|
+
throw new Error(`Failed to list directories, status code: ${res.status}, message: ${await fmtFailedResponse(res)}`);
|
313
324
|
}
|
314
325
|
return await res.json();
|
315
326
|
}
|
@@ -347,7 +358,7 @@ export default class Connector {
|
|
347
358
|
},
|
348
359
|
});
|
349
360
|
if (res.status != 200){
|
350
|
-
throw new Error(`Failed to set permission, status code: ${res.status}, message: ${await res
|
361
|
+
throw new Error(`Failed to set permission, status code: ${res.status}, message: ${await fmtFailedResponse(res)}`);
|
351
362
|
}
|
352
363
|
}
|
353
364
|
|
@@ -369,7 +380,7 @@ export default class Connector {
|
|
369
380
|
},
|
370
381
|
});
|
371
382
|
if (res.status != 200){
|
372
|
-
throw new Error(`Failed to move file, status code: ${res.status}, message: ${await res
|
383
|
+
throw new Error(`Failed to move file, status code: ${res.status}, message: ${await fmtFailedResponse(res)}`);
|
373
384
|
}
|
374
385
|
}
|
375
386
|
|
@@ -1,9 +1,9 @@
|
|
1
1
|
import os, time, pathlib
|
2
2
|
from threading import Lock
|
3
3
|
from .connector import Connector
|
4
|
-
from ..
|
5
|
-
from ..
|
6
|
-
from ..
|
4
|
+
from ..eng.datatype import FileRecord
|
5
|
+
from ..eng.utils import decode_uri_compnents
|
6
|
+
from ..eng.bounded_pool import BoundedThreadPoolExecutor
|
7
7
|
|
8
8
|
def upload_file(
|
9
9
|
connector: Connector,
|
@@ -5,26 +5,32 @@ import requests
|
|
5
5
|
import requests.adapters
|
6
6
|
import urllib.parse
|
7
7
|
from tempfile import SpooledTemporaryFile
|
8
|
-
from lfss.
|
9
|
-
from lfss.
|
8
|
+
from lfss.eng.error import PathNotFoundError
|
9
|
+
from lfss.eng.datatype import (
|
10
10
|
FileReadPermission, FileRecord, DirectoryRecord, UserRecord, PathContents,
|
11
11
|
FileSortKey, DirSortKey
|
12
12
|
)
|
13
|
-
from lfss.
|
13
|
+
from lfss.eng.utils import ensure_uri_compnents
|
14
14
|
|
15
15
|
_default_endpoint = os.environ.get('LFSS_ENDPOINT', 'http://localhost:8000')
|
16
16
|
_default_token = os.environ.get('LFSS_TOKEN', '')
|
17
17
|
|
18
18
|
class Connector:
|
19
19
|
class Session:
|
20
|
-
def __init__(
|
20
|
+
def __init__(
|
21
|
+
self, connector: Connector, pool_size: int = 10,
|
22
|
+
retry: int = 1, backoff_factor: float = 0.5, status_forcelist: list[int] = [503]
|
23
|
+
):
|
21
24
|
self.connector = connector
|
22
25
|
self.pool_size = pool_size
|
26
|
+
self.retry_adapter = requests.adapters.Retry(
|
27
|
+
total=retry, backoff_factor=backoff_factor, status_forcelist=status_forcelist,
|
28
|
+
)
|
23
29
|
def open(self):
|
24
30
|
self.close()
|
25
31
|
if self.connector._session is None:
|
26
32
|
s = requests.Session()
|
27
|
-
adapter = requests.adapters.HTTPAdapter(pool_connections=self.pool_size, pool_maxsize=self.pool_size)
|
33
|
+
adapter = requests.adapters.HTTPAdapter(pool_connections=self.pool_size, pool_maxsize=self.pool_size, max_retries=self.retry_adapter)
|
28
34
|
s.mount('http://', adapter)
|
29
35
|
s.mount('https://', adapter)
|
30
36
|
self.connector._session = s
|
@@ -48,9 +54,9 @@ class Connector:
|
|
48
54
|
}
|
49
55
|
self._session: Optional[requests.Session] = None
|
50
56
|
|
51
|
-
def session(self, pool_size: int = 10):
|
57
|
+
def session( self, pool_size: int = 10, **kwargs):
|
52
58
|
""" avoid creating a new session for each request. """
|
53
|
-
return self.Session(self, pool_size)
|
59
|
+
return self.Session(self, pool_size, **kwargs)
|
54
60
|
|
55
61
|
def _fetch_factory(
|
56
62
|
self, method: Literal['GET', 'POST', 'PUT', 'DELETE'],
|
@@ -2,14 +2,14 @@
|
|
2
2
|
Balance the storage by ensuring that large file thresholds are met.
|
3
3
|
"""
|
4
4
|
|
5
|
-
from lfss.
|
5
|
+
from lfss.eng.config import LARGE_BLOB_DIR, LARGE_FILE_BYTES
|
6
6
|
import argparse, time, itertools
|
7
7
|
from functools import wraps
|
8
8
|
from asyncio import Semaphore
|
9
9
|
import aiofiles, asyncio
|
10
10
|
import aiofiles.os
|
11
|
-
from lfss.
|
12
|
-
from lfss.
|
11
|
+
from lfss.eng.database import transaction, unique_cursor
|
12
|
+
from lfss.eng.connection_pool import global_entrance
|
13
13
|
|
14
14
|
sem: Semaphore
|
15
15
|
|
@@ -1,19 +1,14 @@
|
|
1
1
|
from pathlib import Path
|
2
2
|
import argparse, typing
|
3
3
|
from lfss.api import Connector, upload_directory, upload_file, download_file, download_directory
|
4
|
-
from lfss.
|
5
|
-
from lfss.
|
4
|
+
from lfss.eng.datatype import FileReadPermission, FileSortKey, DirSortKey
|
5
|
+
from lfss.eng.utils import decode_uri_compnents
|
6
6
|
from . import catch_request_error, line_sep
|
7
7
|
|
8
8
|
def parse_permission(s: str) -> FileReadPermission:
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
return FileReadPermission.PROTECTED
|
13
|
-
if s.lower() == "private":
|
14
|
-
return FileReadPermission.PRIVATE
|
15
|
-
if s.lower() == "unset":
|
16
|
-
return FileReadPermission.UNSET
|
9
|
+
for p in FileReadPermission:
|
10
|
+
if p.name.lower() == s.lower():
|
11
|
+
return p
|
17
12
|
raise ValueError(f"Invalid permission {s}")
|
18
13
|
|
19
14
|
def parse_arguments():
|
@@ -2,6 +2,7 @@
|
|
2
2
|
import uvicorn
|
3
3
|
from fastapi import FastAPI
|
4
4
|
from fastapi.staticfiles import StaticFiles
|
5
|
+
from fastapi.middleware.cors import CORSMiddleware
|
5
6
|
|
6
7
|
import argparse
|
7
8
|
from contextlib import asynccontextmanager
|
@@ -27,6 +28,13 @@ assert (__frontend_dir / "index.html").exists(), "Frontend panel not found"
|
|
27
28
|
|
28
29
|
app = FastAPI(lifespan=app_lifespan)
|
29
30
|
app.mount("/", StaticFiles(directory=__frontend_dir, html=True), name="static")
|
31
|
+
app.add_middleware(
|
32
|
+
CORSMiddleware,
|
33
|
+
allow_origins=["*"],
|
34
|
+
allow_credentials=True,
|
35
|
+
allow_methods=["*"],
|
36
|
+
allow_headers=["*"],
|
37
|
+
)
|
30
38
|
|
31
39
|
def main():
|
32
40
|
parser = argparse.ArgumentParser(description="Serve frontend panel")
|
@@ -1,7 +1,9 @@
|
|
1
1
|
import argparse
|
2
2
|
from uvicorn import Config, Server
|
3
3
|
from uvicorn.config import LOGGING_CONFIG
|
4
|
-
from ..
|
4
|
+
from ..eng.config import DEBUG_MODE
|
5
|
+
from ..svc.app_base import logger
|
6
|
+
from ..svc.app import app
|
5
7
|
|
6
8
|
def main():
|
7
9
|
parser = argparse.ArgumentParser()
|
@@ -19,7 +21,7 @@ def main():
|
|
19
21
|
app=app,
|
20
22
|
host=args.host,
|
21
23
|
port=args.port,
|
22
|
-
access_log=False,
|
24
|
+
access_log=True if DEBUG_MODE else False,
|
23
25
|
workers=args.workers,
|
24
26
|
log_config=default_logging_config
|
25
27
|
)
|
@@ -1,9 +1,16 @@
|
|
1
1
|
import argparse, asyncio, os
|
2
2
|
from contextlib import asynccontextmanager
|
3
3
|
from .cli import parse_permission, FileReadPermission
|
4
|
-
from ..
|
5
|
-
from ..
|
6
|
-
from ..
|
4
|
+
from ..eng.utils import parse_storage_size, fmt_storage_size
|
5
|
+
from ..eng.datatype import AccessLevel
|
6
|
+
from ..eng.database import Database, FileReadPermission, transaction, UserConn, unique_cursor, FileConn
|
7
|
+
from ..eng.connection_pool import global_entrance
|
8
|
+
|
9
|
+
def parse_access_level(s: str) -> AccessLevel:
|
10
|
+
for p in AccessLevel:
|
11
|
+
if p.name.lower() == s.lower():
|
12
|
+
return p
|
13
|
+
raise ValueError(f"Invalid access level {s}")
|
7
14
|
|
8
15
|
@global_entrance(1)
|
9
16
|
async def _main():
|
@@ -31,11 +38,16 @@ async def _main():
|
|
31
38
|
sp_set.add_argument('-a', '--admin', type=parse_bool, default=None)
|
32
39
|
sp_set.add_argument('--permission', type=parse_permission, default=None)
|
33
40
|
sp_set.add_argument('--max-storage', type=parse_storage_size, default=None)
|
34
|
-
|
41
|
+
|
35
42
|
sp_list = sp.add_parser('list')
|
36
43
|
sp_list.add_argument("username", nargs='*', type=str, default=None)
|
37
44
|
sp_list.add_argument("-l", "--long", action="store_true")
|
38
45
|
|
46
|
+
sp_peer = sp.add_parser('set-peer')
|
47
|
+
sp_peer.add_argument('src_username', type=str)
|
48
|
+
sp_peer.add_argument('dst_username', type=str)
|
49
|
+
sp_peer.add_argument('--level', type=parse_access_level, default=AccessLevel.READ, help="Access level")
|
50
|
+
|
39
51
|
args = parser.parse_args()
|
40
52
|
db = await Database().init()
|
41
53
|
|
@@ -72,6 +84,16 @@ async def _main():
|
|
72
84
|
assert user is not None
|
73
85
|
print('User updated, credential:', user.credential)
|
74
86
|
|
87
|
+
if args.subparser_name == 'set-peer':
|
88
|
+
async with get_uconn() as uconn:
|
89
|
+
src_user = await uconn.get_user(args.src_username)
|
90
|
+
dst_user = await uconn.get_user(args.dst_username)
|
91
|
+
if src_user is None or dst_user is None:
|
92
|
+
print('User not found')
|
93
|
+
exit(1)
|
94
|
+
await uconn.set_peer_level(src_user.id, dst_user.id, args.level)
|
95
|
+
print(f"Peer set: [{src_user.username}] now have [{args.level.name}] access to [{dst_user.username}]")
|
96
|
+
|
75
97
|
if args.subparser_name == 'list':
|
76
98
|
async with get_uconn() as uconn:
|
77
99
|
term_width = os.get_terminal_size().columns
|
@@ -86,6 +108,11 @@ async def _main():
|
|
86
108
|
user_size_used = await fconn.user_size(user.id)
|
87
109
|
print('- Credential: ', user.credential)
|
88
110
|
print(f'- Storage: {fmt_storage_size(user_size_used)} / {fmt_storage_size(user.max_storage)}')
|
111
|
+
for p in AccessLevel:
|
112
|
+
if p > AccessLevel.NONE:
|
113
|
+
usernames = [x.username for x in await uconn.list_peer_users(user.id, p)]
|
114
|
+
if usernames:
|
115
|
+
print(f'- Peers [{p.name}]: {", ".join(usernames)}')
|
89
116
|
|
90
117
|
def main():
|
91
118
|
asyncio.run(_main())
|
@@ -2,17 +2,17 @@
|
|
2
2
|
Vacuum the database and external storage to ensure that the storage is consistent and minimal.
|
3
3
|
"""
|
4
4
|
|
5
|
-
from lfss.
|
5
|
+
from lfss.eng.config import LARGE_BLOB_DIR
|
6
6
|
import argparse, time
|
7
7
|
from functools import wraps
|
8
8
|
from asyncio import Semaphore
|
9
9
|
import aiofiles, asyncio
|
10
10
|
import aiofiles.os
|
11
11
|
from contextlib import contextmanager
|
12
|
-
from lfss.
|
13
|
-
from lfss.
|
14
|
-
from lfss.
|
15
|
-
from lfss.
|
12
|
+
from lfss.eng.database import transaction, unique_cursor
|
13
|
+
from lfss.svc.request_log import RequestDB
|
14
|
+
from lfss.eng.utils import now_stamp
|
15
|
+
from lfss.eng.connection_pool import global_entrance
|
16
16
|
|
17
17
|
sem: Semaphore
|
18
18
|
|
@@ -21,6 +21,7 @@ else:
|
|
21
21
|
MAX_MEM_FILE_BYTES = 128 * 1024 * 1024 # 128MB
|
22
22
|
MAX_BUNDLE_BYTES = 512 * 1024 * 1024 # 512MB
|
23
23
|
CHUNK_SIZE = 1024 * 1024 # 1MB chunks for streaming (on large files)
|
24
|
+
DEBUG_MODE = os.environ.get('LFSS_DEBUG', '0') == '1'
|
24
25
|
|
25
26
|
THUMB_DB = DATA_HOME / 'thumbs.db'
|
26
27
|
THUMB_SIZE = (48, 48)
|
@@ -8,6 +8,7 @@ from functools import wraps
|
|
8
8
|
from typing import Callable, Awaitable
|
9
9
|
|
10
10
|
from .log import get_logger
|
11
|
+
from .error import DatabaseLockedError
|
11
12
|
from .config import DATA_HOME
|
12
13
|
|
13
14
|
async def execute_sql(conn: aiosqlite.Connection | aiosqlite.Cursor, name: str):
|
@@ -28,7 +29,7 @@ async def get_connection(read_only: bool = False) -> aiosqlite.Connection:
|
|
28
29
|
|
29
30
|
conn = await aiosqlite.connect(
|
30
31
|
get_db_uri(DATA_HOME / 'index.db', read_only=read_only),
|
31
|
-
timeout =
|
32
|
+
timeout = 20, uri = True
|
32
33
|
)
|
33
34
|
async with conn.cursor() as c:
|
34
35
|
await c.execute(
|
@@ -46,7 +47,7 @@ class SqlConnection:
|
|
46
47
|
|
47
48
|
class SqlConnectionPool:
|
48
49
|
_r_sem: Semaphore
|
49
|
-
_w_sem: Lock
|
50
|
+
_w_sem: Lock
|
50
51
|
def __init__(self):
|
51
52
|
self._readers: list[SqlConnection] = []
|
52
53
|
self._writer: None | SqlConnection = None
|
@@ -65,6 +66,17 @@ class SqlConnectionPool:
|
|
65
66
|
self._readers.append(SqlConnection(conn))
|
66
67
|
self._r_sem = Semaphore(n_read)
|
67
68
|
|
69
|
+
def status(self): # debug
|
70
|
+
assert self._writer
|
71
|
+
assert len(self._readers) == self.n_read
|
72
|
+
n_free_readers = sum([1 for c in self._readers if c.is_available])
|
73
|
+
n_free_writers = 1 if self._writer.is_available else 0
|
74
|
+
n_free_r_sem = self._r_sem._value
|
75
|
+
n_free_w_sem = 1 - self._w_sem.locked()
|
76
|
+
assert n_free_readers == n_free_r_sem, f"{n_free_readers} != {n_free_r_sem}"
|
77
|
+
assert n_free_writers == n_free_w_sem, f"{n_free_writers} != {n_free_w_sem}"
|
78
|
+
return f"Readers: {n_free_readers}/{self.n_read}, Writers: {n_free_writers}/{1}"
|
79
|
+
|
68
80
|
@property
|
69
81
|
def n_read(self):
|
70
82
|
return len(self._readers)
|
@@ -142,6 +154,10 @@ async def unique_cursor(is_write: bool = False):
|
|
142
154
|
connection_obj = await g_pool.get()
|
143
155
|
try:
|
144
156
|
yield await connection_obj.conn.cursor()
|
157
|
+
except Exception as e:
|
158
|
+
if 'database is locked' in str(e):
|
159
|
+
raise DatabaseLockedError from e
|
160
|
+
raise e
|
145
161
|
finally:
|
146
162
|
await g_pool.release(connection_obj)
|
147
163
|
else:
|
@@ -149,10 +165,13 @@ async def unique_cursor(is_write: bool = False):
|
|
149
165
|
connection_obj = await g_pool.get(w=True)
|
150
166
|
try:
|
151
167
|
yield await connection_obj.conn.cursor()
|
168
|
+
except Exception as e:
|
169
|
+
if 'database is locked' in str(e):
|
170
|
+
raise DatabaseLockedError from e
|
171
|
+
raise e
|
152
172
|
finally:
|
153
173
|
await g_pool.release(connection_obj)
|
154
174
|
|
155
|
-
# todo: add exclusive transaction option
|
156
175
|
@asynccontextmanager
|
157
176
|
async def transaction():
|
158
177
|
async with unique_cursor(is_write=True) as cur:
|