lfss 0.7.0__tar.gz → 0.7.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {lfss-0.7.0 → lfss-0.7.1}/PKG-INFO +1 -1
- {lfss-0.7.0 → lfss-0.7.1}/frontend/scripts.js +1 -1
- {lfss-0.7.0 → lfss-0.7.1}/lfss/cli/balance.py +10 -7
- {lfss-0.7.0 → lfss-0.7.1}/lfss/cli/user.py +34 -32
- {lfss-0.7.0 → lfss-0.7.1}/lfss/sql/init.sql +0 -6
- lfss-0.7.1/lfss/src/connection_pool.py +151 -0
- {lfss-0.7.0 → lfss-0.7.1}/lfss/src/database.py +253 -279
- {lfss-0.7.0 → lfss-0.7.1}/lfss/src/server.py +69 -75
- {lfss-0.7.0 → lfss-0.7.1}/pyproject.toml +1 -1
- {lfss-0.7.0 → lfss-0.7.1}/Readme.md +0 -0
- {lfss-0.7.0 → lfss-0.7.1}/docs/Known_issues.md +0 -0
- {lfss-0.7.0 → lfss-0.7.1}/docs/Permission.md +0 -0
- {lfss-0.7.0 → lfss-0.7.1}/frontend/api.js +0 -0
- {lfss-0.7.0 → lfss-0.7.1}/frontend/index.html +0 -0
- {lfss-0.7.0 → lfss-0.7.1}/frontend/popup.css +0 -0
- {lfss-0.7.0 → lfss-0.7.1}/frontend/popup.js +0 -0
- {lfss-0.7.0 → lfss-0.7.1}/frontend/styles.css +0 -0
- {lfss-0.7.0 → lfss-0.7.1}/frontend/utils.js +0 -0
- {lfss-0.7.0 → lfss-0.7.1}/lfss/cli/cli.py +0 -0
- {lfss-0.7.0 → lfss-0.7.1}/lfss/cli/panel.py +0 -0
- {lfss-0.7.0 → lfss-0.7.1}/lfss/cli/serve.py +0 -0
- {lfss-0.7.0 → lfss-0.7.1}/lfss/client/__init__.py +0 -0
- {lfss-0.7.0 → lfss-0.7.1}/lfss/client/api.py +0 -0
- {lfss-0.7.0 → lfss-0.7.1}/lfss/sql/pragma.sql +0 -0
- {lfss-0.7.0 → lfss-0.7.1}/lfss/src/__init__.py +0 -0
- {lfss-0.7.0 → lfss-0.7.1}/lfss/src/config.py +0 -0
- {lfss-0.7.0 → lfss-0.7.1}/lfss/src/datatype.py +0 -0
- {lfss-0.7.0 → lfss-0.7.1}/lfss/src/error.py +0 -0
- {lfss-0.7.0 → lfss-0.7.1}/lfss/src/log.py +0 -0
- {lfss-0.7.0 → lfss-0.7.1}/lfss/src/stat.py +0 -0
- {lfss-0.7.0 → lfss-0.7.1}/lfss/src/utils.py +0 -0
@@ -132,7 +132,7 @@ uploadButton.addEventListener('click', () => {
|
|
132
132
|
}
|
133
133
|
path = path + fileName;
|
134
134
|
showPopup('Uploading...', {level: 'info', timeout: 3000});
|
135
|
-
conn.put(path, file)
|
135
|
+
conn.put(path, file, {'conflict': 'overwrite'})
|
136
136
|
.then(() => {
|
137
137
|
refreshFileList();
|
138
138
|
uploadFileNameInput.value = '';
|
@@ -2,14 +2,15 @@
|
|
2
2
|
Balance the storage by ensuring that large file thresholds are met.
|
3
3
|
"""
|
4
4
|
|
5
|
-
from lfss.src.config import
|
5
|
+
from lfss.src.config import LARGE_BLOB_DIR, LARGE_FILE_BYTES
|
6
6
|
import argparse, time
|
7
7
|
from functools import wraps
|
8
8
|
from asyncio import Semaphore
|
9
|
-
import
|
9
|
+
import aiofiles, asyncio
|
10
|
+
from lfss.src.database import transaction, unique_cursor
|
11
|
+
from lfss.src.connection_pool import global_entrance
|
10
12
|
|
11
13
|
sem = Semaphore(1)
|
12
|
-
db_file = DATA_HOME / 'lfss.db'
|
13
14
|
|
14
15
|
def _get_sem():
|
15
16
|
return sem
|
@@ -23,7 +24,8 @@ def barriered(func):
|
|
23
24
|
|
24
25
|
@barriered
|
25
26
|
async def move_to_external(f_id: str, flag: str = ''):
|
26
|
-
async with aiosqlite.connect(db_file, timeout = 60) as c:
|
27
|
+
# async with aiosqlite.connect(db_file, timeout = 60) as c:
|
28
|
+
async with transaction() as c:
|
27
29
|
async with c.execute( "SELECT data FROM blobs.fdata WHERE file_id = ?", (f_id,)) as cursor:
|
28
30
|
blob_row = await cursor.fetchone()
|
29
31
|
if blob_row is None:
|
@@ -47,7 +49,7 @@ async def move_to_external(f_id: str, flag: str = ''):
|
|
47
49
|
|
48
50
|
@barriered
|
49
51
|
async def move_to_internal(f_id: str, flag: str = ''):
|
50
|
-
async with
|
52
|
+
async with transaction() as c:
|
51
53
|
if not (LARGE_BLOB_DIR / f_id).exists():
|
52
54
|
print(f"{flag}File {f_id} not found in external storage")
|
53
55
|
return
|
@@ -68,6 +70,7 @@ async def move_to_internal(f_id: str, flag: str = ''):
|
|
68
70
|
raise e
|
69
71
|
|
70
72
|
|
73
|
+
@global_entrance()
|
71
74
|
async def _main(batch_size: int = 10000):
|
72
75
|
|
73
76
|
tasks = []
|
@@ -76,7 +79,7 @@ async def _main(batch_size: int = 10000):
|
|
76
79
|
e_cout = 0
|
77
80
|
batch_count = 0
|
78
81
|
while True:
|
79
|
-
async with
|
82
|
+
async with unique_cursor() as conn:
|
80
83
|
exceeded_rows = list(await (await conn.execute(
|
81
84
|
"SELECT file_id FROM fmeta WHERE file_size > ? AND external = 0 LIMIT ? OFFSET ?",
|
82
85
|
(LARGE_FILE_BYTES, batch_size, batch_size * batch_count)
|
@@ -93,7 +96,7 @@ async def _main(batch_size: int = 10000):
|
|
93
96
|
i_count = 0
|
94
97
|
batch_count = 0
|
95
98
|
while True:
|
96
|
-
async with
|
99
|
+
async with unique_cursor() as conn:
|
97
100
|
under_rows = list(await (await conn.execute(
|
98
101
|
"SELECT file_id, file_size, external FROM fmeta WHERE file_size <= ? AND external = 1 LIMIT ? OFFSET ?",
|
99
102
|
(LARGE_FILE_BYTES, batch_size, batch_size * batch_count)
|
@@ -1,5 +1,7 @@
|
|
1
1
|
import argparse, asyncio
|
2
|
-
from
|
2
|
+
from contextlib import asynccontextmanager
|
3
|
+
from ..src.database import Database, FileReadPermission, transaction, UserConn
|
4
|
+
from ..src.connection_pool import global_entrance
|
3
5
|
|
4
6
|
def parse_storage_size(s: str) -> int:
|
5
7
|
if s[-1] in 'Kk':
|
@@ -12,6 +14,7 @@ def parse_storage_size(s: str) -> int:
|
|
12
14
|
return int(s[:-1]) * 1024 * 1024 * 1024 * 1024
|
13
15
|
return int(s)
|
14
16
|
|
17
|
+
@global_entrance(1)
|
15
18
|
async def _main():
|
16
19
|
parser = argparse.ArgumentParser()
|
17
20
|
sp = parser.add_subparsers(dest='subparser_name', required=True)
|
@@ -42,49 +45,48 @@ async def _main():
|
|
42
45
|
sp_list.add_argument("-l", "--long", action="store_true")
|
43
46
|
|
44
47
|
args = parser.parse_args()
|
45
|
-
|
48
|
+
|
49
|
+
@asynccontextmanager
|
50
|
+
async def get_uconn():
|
51
|
+
async with transaction() as conn:
|
52
|
+
yield UserConn(conn)
|
46
53
|
|
47
|
-
|
48
|
-
|
49
|
-
await
|
50
|
-
user = await
|
54
|
+
if args.subparser_name == 'add':
|
55
|
+
async with get_uconn() as uconn:
|
56
|
+
await uconn.create_user(args.username, args.password, args.admin, max_storage=args.max_storage, permission=args.permission)
|
57
|
+
user = await uconn.get_user(args.username)
|
51
58
|
assert user is not None
|
52
59
|
print('User created, credential:', user.credential)
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
60
|
+
|
61
|
+
if args.subparser_name == 'delete':
|
62
|
+
async with get_uconn() as uconn:
|
63
|
+
user = await uconn.get_user(args.username)
|
64
|
+
if user is None:
|
65
|
+
print('User not found')
|
66
|
+
exit(1)
|
67
|
+
else:
|
68
|
+
db = await Database().init()
|
69
|
+
await db.delete_user(user.id)
|
70
|
+
print('User deleted')
|
71
|
+
|
72
|
+
if args.subparser_name == 'set':
|
73
|
+
async with get_uconn() as uconn:
|
74
|
+
user = await uconn.get_user(args.username)
|
65
75
|
if user is None:
|
66
76
|
print('User not found')
|
67
77
|
exit(1)
|
68
|
-
await
|
69
|
-
user = await
|
78
|
+
await uconn.update_user(user.username, args.password, args.admin, max_storage=args.max_storage, permission=args.permission)
|
79
|
+
user = await uconn.get_user(args.username)
|
70
80
|
assert user is not None
|
71
81
|
print('User updated, credential:', user.credential)
|
72
|
-
|
73
|
-
|
74
|
-
|
82
|
+
|
83
|
+
if args.subparser_name == 'list':
|
84
|
+
async with get_uconn() as uconn:
|
85
|
+
async for user in uconn.all():
|
75
86
|
print(user)
|
76
87
|
if args.long:
|
77
88
|
print(' ', user.credential)
|
78
89
|
|
79
|
-
await conn.commit()
|
80
|
-
|
81
|
-
except Exception as e:
|
82
|
-
conn.logger.error(f'Error: {e}')
|
83
|
-
await conn.rollback()
|
84
|
-
|
85
|
-
finally:
|
86
|
-
await conn.close()
|
87
|
-
|
88
90
|
def main():
|
89
91
|
asyncio.run(_main())
|
90
92
|
|
@@ -0,0 +1,151 @@
|
|
1
|
+
import os
|
2
|
+
from pathlib import Path
|
3
|
+
import aiosqlite, aiofiles
|
4
|
+
from contextlib import asynccontextmanager
|
5
|
+
from dataclasses import dataclass
|
6
|
+
from asyncio import Semaphore, Lock
|
7
|
+
from functools import wraps
|
8
|
+
|
9
|
+
from .log import get_logger
|
10
|
+
from .config import DATA_HOME
|
11
|
+
|
12
|
+
async def execute_sql(conn: aiosqlite.Connection | aiosqlite.Cursor, name: str):
|
13
|
+
this_dir = Path(__file__).parent
|
14
|
+
sql_dir = this_dir.parent / 'sql'
|
15
|
+
async with aiofiles.open(sql_dir / name, 'r') as f:
|
16
|
+
sql = await f.read()
|
17
|
+
sql = sql.split(';')
|
18
|
+
for s in sql:
|
19
|
+
await conn.execute(s)
|
20
|
+
|
21
|
+
async def get_connection() -> aiosqlite.Connection:
|
22
|
+
if not os.environ.get('SQLITE_TEMPDIR'):
|
23
|
+
os.environ['SQLITE_TEMPDIR'] = str(DATA_HOME)
|
24
|
+
# large blobs are stored in a separate database, should be more efficient
|
25
|
+
conn = await aiosqlite.connect(DATA_HOME / 'index.db', timeout = 60)
|
26
|
+
async with conn.cursor() as c:
|
27
|
+
await c.execute(f"ATTACH DATABASE ? AS blobs", (str(DATA_HOME/'blobs.db'), ))
|
28
|
+
await execute_sql(conn, 'pragma.sql')
|
29
|
+
return conn
|
30
|
+
|
31
|
+
|
32
|
+
@dataclass
|
33
|
+
class SqlConnection:
|
34
|
+
conn: aiosqlite.Connection
|
35
|
+
is_available: bool = True
|
36
|
+
|
37
|
+
class SqlConnectionPool:
|
38
|
+
_sem: Semaphore
|
39
|
+
_w_sem: Semaphore
|
40
|
+
def __init__(self):
|
41
|
+
self._connections: list[SqlConnection] = []
|
42
|
+
self._w_connection: None | SqlConnection = None
|
43
|
+
self._lock = Lock()
|
44
|
+
|
45
|
+
async def init(self, n_read: int):
|
46
|
+
await self.close()
|
47
|
+
self._connections = []
|
48
|
+
for _ in range(n_read):
|
49
|
+
conn = await get_connection()
|
50
|
+
self._connections.append(SqlConnection(conn))
|
51
|
+
self._w_connection = SqlConnection(await get_connection())
|
52
|
+
self._sem = Semaphore(n_read)
|
53
|
+
self._w_sem = Semaphore(1)
|
54
|
+
|
55
|
+
@property
|
56
|
+
def n_read(self):
|
57
|
+
return len(self._connections)
|
58
|
+
@property
|
59
|
+
def sem(self):
|
60
|
+
return self._sem
|
61
|
+
@property
|
62
|
+
def w_sem(self):
|
63
|
+
return self._w_sem
|
64
|
+
|
65
|
+
async def get(self, w: bool = False) -> SqlConnection:
|
66
|
+
if len(self._connections) == 0:
|
67
|
+
raise Exception("No available connections, please init the pool first")
|
68
|
+
|
69
|
+
if w:
|
70
|
+
assert self._w_connection
|
71
|
+
if self._w_connection.is_available:
|
72
|
+
self._w_connection.is_available = False
|
73
|
+
return self._w_connection
|
74
|
+
raise Exception("Write connection is not available")
|
75
|
+
|
76
|
+
async with self._lock:
|
77
|
+
for c in self._connections:
|
78
|
+
if c.is_available:
|
79
|
+
c.is_available = False
|
80
|
+
return c
|
81
|
+
raise Exception("No available connections, impossible?")
|
82
|
+
|
83
|
+
async def release(self, conn: SqlConnection):
|
84
|
+
if conn == self._w_connection:
|
85
|
+
conn.is_available = True
|
86
|
+
return
|
87
|
+
|
88
|
+
async with self._lock:
|
89
|
+
if not conn in self._connections:
|
90
|
+
raise Exception("Connection not in pool")
|
91
|
+
conn.is_available = True
|
92
|
+
|
93
|
+
async def close(self):
|
94
|
+
for c in self._connections:
|
95
|
+
await c.conn.close()
|
96
|
+
if self._w_connection:
|
97
|
+
await self._w_connection.conn.close()
|
98
|
+
|
99
|
+
# these two functions shold be called before and after the event loop
|
100
|
+
g_pool = SqlConnectionPool()
|
101
|
+
async def global_connection_init(n_read: int = 1):
|
102
|
+
await g_pool.init(n_read)
|
103
|
+
|
104
|
+
async def global_connection_close():
|
105
|
+
await g_pool.close()
|
106
|
+
|
107
|
+
@asynccontextmanager
|
108
|
+
async def global_connection(n_read: int = 1):
|
109
|
+
await global_connection_init(n_read)
|
110
|
+
try:
|
111
|
+
yield g_pool
|
112
|
+
finally:
|
113
|
+
await global_connection_close()
|
114
|
+
|
115
|
+
def global_entrance(n_read: int = 1):
|
116
|
+
def decorator(func):
|
117
|
+
@wraps(func)
|
118
|
+
async def wrapper(*args, **kwargs):
|
119
|
+
async with global_connection(n_read):
|
120
|
+
return await func(*args, **kwargs)
|
121
|
+
return wrapper
|
122
|
+
return decorator
|
123
|
+
|
124
|
+
@asynccontextmanager
|
125
|
+
async def unique_cursor(is_write: bool = False):
|
126
|
+
if not is_write:
|
127
|
+
async with g_pool.sem:
|
128
|
+
connection_obj = await g_pool.get()
|
129
|
+
try:
|
130
|
+
yield await connection_obj.conn.cursor()
|
131
|
+
finally:
|
132
|
+
await g_pool.release(connection_obj)
|
133
|
+
else:
|
134
|
+
async with g_pool.w_sem:
|
135
|
+
connection_obj = await g_pool.get(w=True)
|
136
|
+
try:
|
137
|
+
yield await connection_obj.conn.cursor()
|
138
|
+
finally:
|
139
|
+
await g_pool.release(connection_obj)
|
140
|
+
|
141
|
+
@asynccontextmanager
|
142
|
+
async def transaction():
|
143
|
+
async with unique_cursor(is_write=True) as cur:
|
144
|
+
try:
|
145
|
+
await cur.execute('BEGIN')
|
146
|
+
yield cur
|
147
|
+
await cur.execute('COMMIT')
|
148
|
+
except Exception as e:
|
149
|
+
get_logger('database', global_instance=True).error(f"Error in transaction: {e}, rollback.")
|
150
|
+
await cur.execute('ROLLBACK')
|
151
|
+
raise e
|