lfss 0.11.0__tar.gz → 0.11.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (51) hide show
  1. {lfss-0.11.0 → lfss-0.11.2}/PKG-INFO +2 -1
  2. {lfss-0.11.0 → lfss-0.11.2}/docs/changelog.md +27 -0
  3. {lfss-0.11.0 → lfss-0.11.2}/frontend/login.js +0 -1
  4. {lfss-0.11.0 → lfss-0.11.2}/frontend/popup.js +18 -3
  5. {lfss-0.11.0 → lfss-0.11.2}/frontend/scripts.js +46 -39
  6. lfss-0.11.2/frontend/utils.js +193 -0
  7. {lfss-0.11.0 → lfss-0.11.2}/lfss/api/__init__.py +1 -1
  8. {lfss-0.11.0 → lfss-0.11.2}/lfss/api/connector.py +7 -4
  9. {lfss-0.11.0 → lfss-0.11.2}/lfss/cli/cli.py +1 -1
  10. lfss-0.11.2/lfss/cli/log.py +77 -0
  11. {lfss-0.11.0 → lfss-0.11.2}/lfss/cli/vacuum.py +10 -3
  12. {lfss-0.11.0 → lfss-0.11.2}/lfss/eng/config.py +3 -2
  13. {lfss-0.11.0 → lfss-0.11.2}/lfss/eng/database.py +41 -42
  14. {lfss-0.11.0 → lfss-0.11.2}/lfss/eng/log.py +73 -4
  15. {lfss-0.11.0 → lfss-0.11.2}/lfss/eng/utils.py +1 -2
  16. {lfss-0.11.0 → lfss-0.11.2}/lfss/svc/app_dav.py +7 -7
  17. {lfss-0.11.0 → lfss-0.11.2}/lfss/svc/app_native.py +7 -7
  18. {lfss-0.11.0 → lfss-0.11.2}/lfss/svc/common_impl.py +4 -4
  19. {lfss-0.11.0 → lfss-0.11.2}/pyproject.toml +3 -1
  20. lfss-0.11.0/frontend/utils.js +0 -96
  21. {lfss-0.11.0 → lfss-0.11.2}/Readme.md +0 -0
  22. {lfss-0.11.0 → lfss-0.11.2}/docs/Enviroment_variables.md +0 -0
  23. {lfss-0.11.0 → lfss-0.11.2}/docs/Known_issues.md +0 -0
  24. {lfss-0.11.0 → lfss-0.11.2}/docs/Permission.md +0 -0
  25. {lfss-0.11.0 → lfss-0.11.2}/docs/Webdav.md +0 -0
  26. {lfss-0.11.0 → lfss-0.11.2}/frontend/api.js +0 -0
  27. {lfss-0.11.0 → lfss-0.11.2}/frontend/index.html +0 -0
  28. {lfss-0.11.0 → lfss-0.11.2}/frontend/info.css +0 -0
  29. {lfss-0.11.0 → lfss-0.11.2}/frontend/info.js +0 -0
  30. {lfss-0.11.0 → lfss-0.11.2}/frontend/login.css +0 -0
  31. {lfss-0.11.0 → lfss-0.11.2}/frontend/popup.css +0 -0
  32. {lfss-0.11.0 → lfss-0.11.2}/frontend/state.js +0 -0
  33. {lfss-0.11.0 → lfss-0.11.2}/frontend/styles.css +0 -0
  34. {lfss-0.11.0 → lfss-0.11.2}/frontend/thumb.css +0 -0
  35. {lfss-0.11.0 → lfss-0.11.2}/frontend/thumb.js +0 -0
  36. {lfss-0.11.0 → lfss-0.11.2}/lfss/cli/__init__.py +0 -0
  37. {lfss-0.11.0 → lfss-0.11.2}/lfss/cli/balance.py +0 -0
  38. {lfss-0.11.0 → lfss-0.11.2}/lfss/cli/panel.py +0 -0
  39. {lfss-0.11.0 → lfss-0.11.2}/lfss/cli/serve.py +0 -0
  40. {lfss-0.11.0 → lfss-0.11.2}/lfss/cli/user.py +0 -0
  41. {lfss-0.11.0 → lfss-0.11.2}/lfss/eng/__init__.py +0 -0
  42. {lfss-0.11.0 → lfss-0.11.2}/lfss/eng/bounded_pool.py +0 -0
  43. {lfss-0.11.0 → lfss-0.11.2}/lfss/eng/connection_pool.py +0 -0
  44. {lfss-0.11.0 → lfss-0.11.2}/lfss/eng/datatype.py +0 -0
  45. {lfss-0.11.0 → lfss-0.11.2}/lfss/eng/error.py +0 -0
  46. {lfss-0.11.0 → lfss-0.11.2}/lfss/eng/thumb.py +0 -0
  47. {lfss-0.11.0 → lfss-0.11.2}/lfss/sql/init.sql +0 -0
  48. {lfss-0.11.0 → lfss-0.11.2}/lfss/sql/pragma.sql +0 -0
  49. {lfss-0.11.0 → lfss-0.11.2}/lfss/svc/app.py +0 -0
  50. {lfss-0.11.0 → lfss-0.11.2}/lfss/svc/app_base.py +0 -0
  51. {lfss-0.11.0 → lfss-0.11.2}/lfss/svc/request_log.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lfss
3
- Version: 0.11.0
3
+ Version: 0.11.2
4
4
  Summary: Lightweight file storage service
5
5
  Home-page: https://github.com/MenxLi/lfss
6
6
  Author: Li, Mengxun
@@ -17,6 +17,7 @@ Requires-Dist: mimesniff (==1.*)
17
17
  Requires-Dist: pillow
18
18
  Requires-Dist: python-multipart
19
19
  Requires-Dist: requests (==2.*)
20
+ Requires-Dist: rich
20
21
  Requires-Dist: stream-zip (==0.*)
21
22
  Requires-Dist: uvicorn (==0.*)
22
23
  Project-URL: Repository, https://github.com/MenxLi/lfss
@@ -1,3 +1,30 @@
1
+ ## 0.11
2
+
3
+ ### 0.11.2
4
+ - Improve frontend directory upload feedback.
5
+ - Set default large file threashold to 1M.
6
+ - Increase default concurrent threads.
7
+ - Use sqlite for logging.
8
+ - Add vacuum logs.
9
+ - Refactor: use dir for directory path.
10
+
11
+ ### 0.11.1
12
+ - Rename api `get_meta` function.
13
+ - Frontend support upload directory.
14
+ - Fix admin put to non-exists user path.
15
+
16
+ ### 0.11.0
17
+ - Copy file as hard link.
18
+ - Add vacuum thumb and all.
19
+ - Thumb database use file_id as index.
20
+ - improve username and url check with regular expression.
21
+
22
+ ## 0.10
23
+
24
+ ### 0.10.0
25
+ - Inherit permission from path owner for `unset` permission files.
26
+ - Add timeout and verify options for client api.
27
+ - Bundle small files in memory.
1
28
 
2
29
  ## 0.9
3
30
 
@@ -3,7 +3,6 @@ import { createFloatingWindow, showPopup } from "./popup.js";
3
3
 
4
4
  /**
5
5
  * @import { store } from "./state.js";
6
- * @import { UserRecord } from "./api.js";
7
6
  *
8
7
  * Shows the login panel if necessary.
9
8
  * @param {store} store - The store object.
@@ -109,7 +109,14 @@ export function showPopup(content = '', {
109
109
  } = {}){
110
110
  const popup = document.createElement("div");
111
111
  popup.classList.add("popup-window");
112
- popup.innerHTML = showTime? `<span>[${new Date().toLocaleTimeString()}]</span> ${content}` : content;
112
+ /**
113
+ * @param {string} c
114
+ * @returns {void}
115
+ */
116
+ function setPopupContent(c){
117
+ popup.innerHTML = showTime? `<span>[${new Date().toLocaleTimeString()}]</span> ${c}` : c;
118
+ }
119
+ setPopupContent(content);
113
120
  popup.style.width = width;
114
121
  const popupHeight = '1rem';
115
122
  popup.style.height = popupHeight;
@@ -132,11 +139,19 @@ export function showPopup(content = '', {
132
139
  if (level === "success") popup.style.backgroundColor = "darkgreen";
133
140
  document.body.appendChild(popup);
134
141
  shownPopups.push(popup);
135
- window.setTimeout(() => {
142
+
143
+ function closePopup(){
136
144
  if (popup.parentNode) document.body.removeChild(popup);
137
145
  shownPopups.splice(shownPopups.indexOf(popup), 1);
138
146
  for (let i = 0; i < shownPopups.length; i++) {
139
147
  shownPopups[i].style.top = `${i * (parseInt(popupHeight) + 2*parseInt(paddingHeight))*1.2 + 0.5}rem`;
140
148
  }
141
- }, timeout);
149
+ }
150
+
151
+ window.setTimeout(closePopup, timeout);
152
+ return {
153
+ elem: popup,
154
+ setContent: setPopupContent,
155
+ close: closePopup
156
+ }
142
157
  }
@@ -5,6 +5,7 @@ import { showInfoPanel, showDirInfoPanel } from './info.js';
5
5
  import { makeThumbHtml } from './thumb.js';
6
6
  import { store } from './state.js';
7
7
  import { maybeShowLoginPanel } from './login.js';
8
+ import { forEachFile } from './utils.js';
8
9
 
9
10
  /** @type {import('./api.js').UserRecord}*/
10
11
  let userRecord = null;
@@ -158,55 +159,61 @@ uploadFileNameInput.addEventListener('input', debounce(onFileNameInpuChange, 500
158
159
  e.preventDefault();
159
160
  e.stopPropagation();
160
161
  });
161
- window.addEventListener('drop', (e) => {
162
+ window.addEventListener('drop', async (e) => {
162
163
  e.preventDefault();
163
164
  e.stopPropagation();
164
- const files = e.dataTransfer.files;
165
- if (files.length == 1){
166
- uploadFileSelector.files = files;
167
- uploadFileNameInput.value = files[0].name;
165
+ const items = e.dataTransfer.items;
166
+ if (items.length == 1 && items[0].kind === 'file' && items[0].webkitGetAsEntry().isFile){
167
+ uploadFileSelector.files = e.dataTransfer.files;
168
+ uploadFileNameInput.value = e.dataTransfer.files[0].name;
168
169
  uploadFileNameInput.focus();
170
+ return;
169
171
  }
170
- else if (files.length > 1){
171
- let dstPath = store.dirpath + uploadFileNameInput.value;
172
- if (!dstPath.endsWith('/')){ dstPath += '/'; }
173
- if (!confirm(`
172
+
173
+ /** @type {[string, File][]} */
174
+ const uploadInputVal = uploadFileNameInput.value? uploadFileNameInput.value : '';
175
+ let dstPath = store.dirpath + uploadInputVal;
176
+ if (!dstPath.endsWith('/')){ dstPath += '/'; }
177
+
178
+ if (!confirm(`\
174
179
  You are trying to upload multiple files at once.
175
180
  This will directly upload the files to the [${dstPath}] directory without renaming.
176
181
  Note that same name files will be overwritten.
177
- Are you sure you want to proceed?
178
- `)){ return; }
179
-
180
- let counter = 0;
181
- async function uploadFileFn(...args){
182
- const [file, path] = args;
183
- try{
184
- await uploadFile(conn, path, file, {conflict: 'overwrite'});
185
- }
186
- catch (err){
187
- showPopup('Failed to upload file [' + file.name + ']: ' + err, {level: 'error', timeout: 5000});
188
- }
189
- counter += 1;
190
- console.log("Uploading file: ", counter, "/", files.length);
182
+ Are you sure you want to proceed?\
183
+ `)){ return; }
184
+
185
+ let counter = 0;
186
+ let totalCount = 0;
187
+ const uploadPopup = showPopup('Uploading multiple files...', {level: 'info', timeout: 999999});
188
+ async function uploadFileFn(path, file){
189
+ try{
190
+ await uploadFile(conn, path, file, {conflict: 'overwrite'});
191
191
  }
192
-
193
- let promises = [];
194
- for (let i = 0; i < files.length; i++){
195
- const file = files[i];
196
- const path = dstPath + file.name;
197
- promises.push(uploadFileFn(file, path));
192
+ catch (err){
193
+ showPopup('Failed to upload file [' + file.name + ']: ' + err, {level: 'error', timeout: 5000});
198
194
  }
199
- showPopup('Uploading multiple files...', {level: 'info', timeout: 3000});
200
- Promise.all(promises).then(
201
- () => {
202
- showPopup('Upload success.', {level: 'success', timeout: 3000});
203
- refreshFileList();
204
- },
205
- (err) => {
206
- showPopup('Failed to upload some files: ' + err, {level: 'error', timeout: 5000});
207
- }
208
- );
195
+ console.log(`[${counter}/${totalCount}] Uploaded file: ${path}`);
196
+ uploadPopup.setContent(`Uploading multiple files... [${counter}/${totalCount}]`);
209
197
  }
198
+
199
+ const promises = await forEachFile(e, async (relPath, filePromiseFn) => {
200
+ counter += 1;
201
+ const file = await filePromiseFn();
202
+ await uploadFileFn(dstPath + relPath, file);
203
+ });
204
+ totalCount = promises.length;
205
+
206
+ Promise.all(promises).then(
207
+ () => {
208
+ window.setTimeout(uploadPopup.close, 3000);
209
+ showPopup('Upload success.', {level: 'success', timeout: 3000});
210
+ refreshFileList();
211
+ },
212
+ (err) => {
213
+ showPopup('Failed to upload some files: ' + err, {level: 'error', timeout: 5000});
214
+ }
215
+ );
216
+
210
217
  });
211
218
  }
212
219
 
@@ -0,0 +1,193 @@
1
+
2
+ export function formatSize(size){
3
+ if (size < 0){
4
+ return '';
5
+ }
6
+ const sizeInKb = size / 1024;
7
+ const sizeInMb = sizeInKb / 1024;
8
+ const sizeInGb = sizeInMb / 1024;
9
+ if (sizeInGb > 1){
10
+ return sizeInGb.toFixed(2) + ' GB';
11
+ }
12
+ else if (sizeInMb > 1){
13
+ return sizeInMb.toFixed(2) + ' MB';
14
+ }
15
+ else if (sizeInKb > 1){
16
+ return sizeInKb.toFixed(2) + ' KB';
17
+ }
18
+ else {
19
+ return size + ' B';
20
+ }
21
+ }
22
+
23
+ export function copyToClipboard(text){
24
+ function secureCopy(text){
25
+ navigator.clipboard.writeText(text);
26
+ }
27
+ function unsecureCopy(text){
28
+ const el = document.createElement('textarea');
29
+ el.value = text;
30
+ document.body.appendChild(el);
31
+ el.select();
32
+ document.execCommand('copy');
33
+ document.body.removeChild(el);
34
+ }
35
+ if (navigator.clipboard){
36
+ secureCopy(text);
37
+ }
38
+ else {
39
+ unsecureCopy(text);
40
+ }
41
+ }
42
+
43
+ export function encodePathURI(path){
44
+ return path.split('/').map(encodeURIComponent).join('/');
45
+ }
46
+
47
+ export function decodePathURI(path){
48
+ return path.split('/').map(decodeURIComponent).join('/');
49
+ }
50
+
51
+ export function ensurePathURI(path){
52
+ return encodePathURI(decodePathURI(path));
53
+ }
54
+
55
+ export function getRandomString(n, additionalCharset='0123456789_-(=)[]{}'){
56
+ let result = '';
57
+ let charset = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ';
58
+ const firstChar = charset[Math.floor(Math.random() * charset.length)];
59
+ const lastChar = charset[Math.floor(Math.random() * charset.length)];
60
+ result += firstChar;
61
+ charset += additionalCharset;
62
+ for (let i = 0; i < n-2; i++){
63
+ result += charset[Math.floor(Math.random() * charset.length)];
64
+ }
65
+ result += lastChar;
66
+ return result;
67
+ };
68
+
69
+ /**
70
+ * @param {string} dateStr
71
+ * @returns {string}
72
+ */
73
+ export function cvtGMT2Local(dateStr){
74
+ if (!dateStr || dateStr === 'N/A'){
75
+ return '';
76
+ }
77
+ const gmtdate = new Date(dateStr);
78
+ const localdate = new Date(gmtdate.getTime() + gmtdate.getTimezoneOffset() * 60000);
79
+ return localdate.toISOString().slice(0, 19).replace('T', ' ');
80
+ }
81
+
82
+ export function debounce(fn,wait){
83
+ let timeout;
84
+ return function(...args){
85
+ const context = this;
86
+ if (timeout) clearTimeout(timeout);
87
+ timeout = setTimeout(() => fn.apply(context, args), wait);
88
+ }
89
+ }
90
+
91
+ export function asHtmlText(text){
92
+ const anonElem = document.createElement('div');
93
+ anonElem.textContent = text;
94
+ const htmlText = anonElem.innerHTML;
95
+ return htmlText;
96
+ }
97
+
98
+ /**
99
+ * Iterates over all files dropped in the event,
100
+ * including files inside directories, and processes them
101
+ * using the provided callback with a concurrency limit.
102
+ *
103
+ * @param {Event} e The drop event.
104
+ * @param {(relPath: string, file: () => Promise<File>) => Promise<void>} callback A function
105
+ * that receives the relative path and a promise for the File.
106
+ * @param {number} [maxConcurrent=5] Maximum number of concurrent callback executions.
107
+ * @returns {Promise<Promise<void>[]>} A promise resolving to an array of callback promises.
108
+ */
109
+ export async function forEachFile(e, callback, maxConcurrent = 16) {
110
+ const results = []; // to collect callback promises
111
+
112
+ // Concurrency barrier variables.
113
+ let activeCount = 0;
114
+ const queue = [];
115
+
116
+ /**
117
+ * Runs the given async task when below the concurrency limit.
118
+ * If at limit, waits until a slot is free.
119
+ *
120
+ * @param {() => Promise<any>} task An async function returning a promise.
121
+ * @returns {Promise<any>}
122
+ */
123
+ async function runWithLimit(task) {
124
+ // If we reached the concurrency limit, wait for a free slot.
125
+ if (activeCount >= maxConcurrent) {
126
+ await new Promise(resolve => queue.push(resolve));
127
+ }
128
+ activeCount++;
129
+ try {
130
+ return await task();
131
+ } finally {
132
+ activeCount--;
133
+ // If there are waiting tasks, allow the next one to run.
134
+ if (queue.length) {
135
+ queue.shift()();
136
+ }
137
+ }
138
+ }
139
+
140
+ /**
141
+ * Recursively traverses a file system entry.
142
+ *
143
+ * @param {FileSystemEntry} entry The entry (file or directory).
144
+ * @param {string} path The current relative path.
145
+ */
146
+ async function traverse(entry, path) {
147
+ if (entry.isFile) {
148
+ // Wrap file retrieval in a promise.
149
+ const filePromiseFn = () =>
150
+ new Promise((resolve, reject) => entry.file(resolve, reject));
151
+ // Use the concurrency barrier for the callback invocation.
152
+ results.push(runWithLimit(() => callback(path + entry.name, filePromiseFn)));
153
+ } else if (entry.isDirectory) {
154
+ const reader = entry.createReader();
155
+
156
+ async function readAllEntries(reader) {
157
+ const entries = [];
158
+ while (true) {
159
+ const chunk = await new Promise((resolve, reject) => {
160
+ reader.readEntries(resolve, reject);
161
+ });
162
+ if (chunk.length === 0) break;
163
+ entries.push(...chunk);
164
+ }
165
+ return entries;
166
+ }
167
+
168
+ const entries = await readAllEntries(reader);
169
+ await Promise.all(
170
+ entries.map(ent => traverse(ent, path + entry.name + '/'))
171
+ );
172
+ }
173
+ }
174
+
175
+ // Process using DataTransfer items if available.
176
+ if (e.dataTransfer && e.dataTransfer.items) {
177
+ await Promise.all(
178
+ Array.from(e.dataTransfer.items).map(async item => {
179
+ const entry = item.webkitGetAsEntry && item.webkitGetAsEntry();
180
+ if (entry) {
181
+ await traverse(entry, '');
182
+ }
183
+ })
184
+ );
185
+ } else if (e.dataTransfer && e.dataTransfer.files) {
186
+ // Fallback for browsers that support only dataTransfer.files.
187
+ Array.from(e.dataTransfer.files).forEach(file => {
188
+ results.push(runWithLimit(() => callback(file.name, Promise.resolve(file))));
189
+ });
190
+ }
191
+ return results;
192
+ }
193
+
@@ -113,7 +113,7 @@ def download_file(
113
113
  print(f"File {file_path} already exists, skipping download.")
114
114
  return True, error_msg
115
115
  try:
116
- fmeta = connector.get_metadata(src_url)
116
+ fmeta = connector.get_meta(src_url)
117
117
  if fmeta is None:
118
118
  error_msg = "File not found."
119
119
  return False, error_msg
@@ -98,7 +98,7 @@ class Connector:
98
98
 
99
99
  # Skip ahead by checking if the file already exists
100
100
  if conflict == 'skip-ahead':
101
- exists = self.get_metadata(path)
101
+ exists = self.get_meta(path)
102
102
  if exists is None:
103
103
  conflict = 'skip'
104
104
  else:
@@ -122,7 +122,7 @@ class Connector:
122
122
 
123
123
  # Skip ahead by checking if the file already exists
124
124
  if conflict == 'skip-ahead':
125
- exists = self.get_metadata(path)
125
+ exists = self.get_meta(path)
126
126
  if exists is None:
127
127
  conflict = 'skip'
128
128
  else:
@@ -154,7 +154,7 @@ class Connector:
154
154
 
155
155
  # Skip ahead by checking if the file already exists
156
156
  if conflict == 'skip-ahead':
157
- exists = self.get_metadata(path)
157
+ exists = self.get_meta(path)
158
158
  if exists is None:
159
159
  conflict = 'skip'
160
160
  else:
@@ -211,7 +211,7 @@ class Connector:
211
211
  """Deletes the file at the specified path."""
212
212
  self._fetch_factory('DELETE', path)()
213
213
 
214
- def get_metadata(self, path: str) -> Optional[FileRecord | DirectoryRecord]:
214
+ def get_meta(self, path: str) -> Optional[FileRecord | DirectoryRecord]:
215
215
  """Gets the metadata for the file at the specified path."""
216
216
  try:
217
217
  response = self._fetch_factory('GET', '_api/meta', {'path': path})()
@@ -223,6 +223,9 @@ class Connector:
223
223
  if e.response.status_code == 404:
224
224
  return None
225
225
  raise e
226
+ # shorthand methods for type constraints
227
+ def get_fmeta(self, path: str) -> Optional[FileRecord]: assert (f:=self.get_meta(path)) is None or isinstance(f, FileRecord); return f
228
+ def get_dmeta(self, path: str) -> Optional[DirectoryRecord]: assert (d:=self.get_meta(path)) is None or isinstance(d, DirectoryRecord); return d
226
229
 
227
230
  def list_path(self, path: str) -> PathContents:
228
231
  """
@@ -126,7 +126,7 @@ def main():
126
126
  elif args.command == "query":
127
127
  for path in args.path:
128
128
  with catch_request_error():
129
- res = connector.get_metadata(path)
129
+ res = connector.get_meta(path)
130
130
  if res is None:
131
131
  print(f"\033[31mNot found\033[0m ({path})")
132
132
  else:
@@ -0,0 +1,77 @@
1
+ from typing import Optional
2
+ import argparse
3
+ import rich.console
4
+ import logging
5
+ import sqlite3
6
+ from lfss.eng.log import eval_logline
7
+
8
+ console = rich.console.Console()
9
+ def levelstr2int(levelstr: str) -> int:
10
+ import sys
11
+ if sys.version_info < (3, 11):
12
+ return logging.getLevelName(levelstr.upper())
13
+ else:
14
+ return logging.getLevelNamesMapping()[levelstr.upper()]
15
+
16
+ def view(
17
+ db_file: str,
18
+ level: Optional[str] = None,
19
+ offset: int = 0,
20
+ limit: int = 1000
21
+ ):
22
+ conn = sqlite3.connect(db_file)
23
+ cursor = conn.cursor()
24
+ if level is None:
25
+ cursor.execute("SELECT * FROM log ORDER BY created DESC LIMIT ? OFFSET ?", (limit, offset))
26
+ else:
27
+ level_int = levelstr2int(level)
28
+ cursor.execute("SELECT * FROM log WHERE level >= ? ORDER BY created DESC LIMIT ? OFFSET ?", (level_int, limit, offset))
29
+ levelname_color = {
30
+ 'DEBUG': 'blue',
31
+ 'INFO': 'green',
32
+ 'WARNING': 'yellow',
33
+ 'ERROR': 'red',
34
+ 'CRITICAL': 'bold red',
35
+ 'FATAL': 'bold red'
36
+ }
37
+ for row in cursor.fetchall():
38
+ log = eval_logline(row)
39
+ console.print(f"{log.created} [{levelname_color[log.levelname]}][{log.levelname}] [default]{log.message}")
40
+ conn.close()
41
+
42
+ def trim(db_file: str, keep: int = 1000, level: Optional[str] = None):
43
+ conn = sqlite3.connect(db_file)
44
+ cursor = conn.cursor()
45
+ if level is None:
46
+ cursor.execute("DELETE FROM log WHERE id NOT IN (SELECT id FROM log ORDER BY created DESC LIMIT ?)", (keep,))
47
+ else:
48
+ cursor.execute("DELETE FROM log WHERE levelname = ? and id NOT IN (SELECT id FROM log WHERE levelname = ? ORDER BY created DESC LIMIT ?)", (level.upper(), level.upper(), keep))
49
+ conn.commit()
50
+ conn.execute("VACUUM")
51
+ conn.close()
52
+
53
+ def main():
54
+ parser = argparse.ArgumentParser(description="Log operations utility")
55
+ subparsers = parser.add_subparsers(title='subcommands', description='valid subcommands', help='additional help')
56
+
57
+ parser_show = subparsers.add_parser('view', help='Show logs')
58
+ parser_show.add_argument('db_file', type=str, help='Database file path')
59
+ parser_show.add_argument('-l', '--level', type=str, required=False, help='Log level')
60
+ parser_show.add_argument('--offset', type=int, default=0, help='Starting offset')
61
+ parser_show.add_argument('--limit', type=int, default=1000, help='Maximum number of entries to display')
62
+ parser_show.set_defaults(func=view)
63
+
64
+ parser_trim = subparsers.add_parser('trim', help='Trim logs')
65
+ parser_trim.add_argument('db_file', type=str, help='Database file path')
66
+ parser_trim.add_argument('-l', '--level', type=str, required=False, help='Log level')
67
+ parser_trim.add_argument('--keep', type=int, default=1000, help='Number of entries to keep')
68
+ parser_trim.set_defaults(func=trim)
69
+
70
+ args = parser.parse_args()
71
+ if hasattr(args, 'func'):
72
+ kwargs = vars(args)
73
+ func = kwargs.pop('func')
74
+ func(**kwargs)
75
+
76
+ if __name__ == '__main__':
77
+ main()
@@ -2,7 +2,7 @@
2
2
  Vacuum the database and external storage to ensure that the storage is consistent and minimal.
3
3
  """
4
4
 
5
- from lfss.eng.config import LARGE_BLOB_DIR, THUMB_DB
5
+ from lfss.eng.config import LARGE_BLOB_DIR, THUMB_DB, LOG_DIR
6
6
  import argparse, time, itertools
7
7
  from functools import wraps
8
8
  from asyncio import Semaphore
@@ -14,6 +14,7 @@ from lfss.eng.database import transaction, unique_cursor
14
14
  from lfss.svc.request_log import RequestDB
15
15
  from lfss.eng.utils import now_stamp
16
16
  from lfss.eng.connection_pool import global_entrance
17
+ from lfss.cli.log import trim
17
18
 
18
19
  sem: Semaphore
19
20
 
@@ -33,7 +34,7 @@ def barriered(func):
33
34
  return wrapper
34
35
 
35
36
  @global_entrance()
36
- async def vacuum_main(index: bool = False, blobs: bool = False, thumbs: bool = False, vacuum_all: bool = False):
37
+ async def vacuum_main(index: bool = False, blobs: bool = False, thumbs: bool = False, logs: bool = False, vacuum_all: bool = False):
37
38
 
38
39
  # check if any file in the Large Blob directory is not in the database
39
40
  # the reverse operation is not necessary, because by design, the database should be the source of truth...
@@ -73,6 +74,11 @@ async def vacuum_main(index: bool = False, blobs: bool = False, thumbs: bool = F
73
74
  async with unique_cursor(is_write=True) as c:
74
75
  await c.execute("VACUUM blobs")
75
76
 
77
+ if logs or vacuum_all:
78
+ with indicator("VACUUM-logs"):
79
+ for log_file in LOG_DIR.glob("*.log.db"):
80
+ trim(str(log_file), keep=10_000)
81
+
76
82
  if thumbs or vacuum_all:
77
83
  try:
78
84
  async with transaction() as c:
@@ -123,9 +129,10 @@ def main():
123
129
  parser.add_argument("-d", "--data", action="store_true", help="Vacuum blobs")
124
130
  parser.add_argument("-t", "--thumb", action="store_true", help="Vacuum thumbnails")
125
131
  parser.add_argument("-r", "--requests", action="store_true", help="Vacuum request logs to only keep at most recent 1M rows in 7 days")
132
+ parser.add_argument("-l", "--logs", action="store_true", help="Trim log to keep at most recent 10k rows for each category")
126
133
  args = parser.parse_args()
127
134
  sem = Semaphore(args.jobs)
128
- asyncio.run(vacuum_main(index=args.metadata, blobs=args.data, thumbs=args.thumb, vacuum_all=args.all))
135
+ asyncio.run(vacuum_main(index=args.metadata, blobs=args.data, thumbs=args.thumb, logs = args.logs, vacuum_all=args.all))
129
136
 
130
137
  if args.requests or args.all:
131
138
  asyncio.run(vacuum_requests())
@@ -11,14 +11,15 @@ if not DATA_HOME.exists():
11
11
  DATA_HOME = DATA_HOME.resolve().absolute()
12
12
  LARGE_BLOB_DIR = DATA_HOME / 'large_blobs'
13
13
  LARGE_BLOB_DIR.mkdir(exist_ok=True)
14
+ LOG_DIR = DATA_HOME / 'logs'
14
15
 
15
16
  # https://sqlite.org/fasterthanfs.html
16
17
  __env_large_file = os.environ.get('LFSS_LARGE_FILE', None)
17
18
  if __env_large_file is not None:
18
19
  LARGE_FILE_BYTES = parse_storage_size(__env_large_file)
19
20
  else:
20
- LARGE_FILE_BYTES = 8 * 1024 * 1024 # 8MB
21
- MAX_MEM_FILE_BYTES = 128 * 1024 * 1024 # 128MB
21
+ LARGE_FILE_BYTES = 1 * 1024 * 1024 # 1MB
22
+ MAX_MEM_FILE_BYTES = 128 * 1024 * 1024 # 128MB
22
23
  CHUNK_SIZE = 1024 * 1024 # 1MB chunks for streaming (on large files)
23
24
  DEBUG_MODE = os.environ.get('LFSS_DEBUG', '0') == '1'
24
25