@zenfs/core 2.4.4 → 2.5.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +13 -0
- package/dist/backends/cow.d.ts +5 -4
- package/dist/backends/cow.js +7 -3
- package/dist/backends/passthrough.js +1 -2
- package/dist/backends/store/fs.d.ts +2 -0
- package/dist/backends/store/fs.js +98 -24
- package/dist/config.d.ts +20 -0
- package/dist/config.js +142 -0
- package/dist/internal/filesystem.d.ts +1 -0
- package/dist/internal/filesystem.js +5 -0
- package/dist/mixins/mutexed.d.ts +1 -0
- package/dist/mixins/mutexed.js +3 -0
- package/package.json +1 -1
- package/tests/common/config.test.ts +57 -0
package/README.md
CHANGED
|
@@ -163,6 +163,19 @@ fs.umount('/mnt/zip'); // finished using the zip
|
|
|
163
163
|
|
|
164
164
|
ZenFS includes support for device files. These are designed to follow Linux's device file behavior, for consistency and ease of use. Check out the [Devices and Device Drivers](https://zenfs.dev/core/documents/Devices_and_Device_Drivers) documentation for more information.
|
|
165
165
|
|
|
166
|
+
## `node:*` emulation
|
|
167
|
+
|
|
168
|
+
ZenFS also includes emulation of some other `node:` modules for various reasons, importable from `@zenfs/core/<name>`:
|
|
169
|
+
|
|
170
|
+
- `node:path`
|
|
171
|
+
- `node:readline`
|
|
172
|
+
|
|
173
|
+
For example:
|
|
174
|
+
|
|
175
|
+
```ts
|
|
176
|
+
import * as path from '@zenfs/core/path';
|
|
177
|
+
```
|
|
178
|
+
|
|
166
179
|
## Bundling
|
|
167
180
|
|
|
168
181
|
ZenFS exports a drop-in for Node's `fs` module, so you can use it for your bundler of preference using the default export.
|
package/dist/backends/cow.d.ts
CHANGED
|
@@ -1,5 +1,6 @@
|
|
|
1
1
|
import type { CreationOptions, StreamOptions, UsageInfo } from '../internal/filesystem.js';
|
|
2
2
|
import type { InodeLike } from '../internal/inode.js';
|
|
3
|
+
import type { Backend } from './backend.js';
|
|
3
4
|
import { EventEmitter } from 'eventemitter3';
|
|
4
5
|
import { type MountConfiguration } from '../config.js';
|
|
5
6
|
import { FileSystem } from '../internal/filesystem.js';
|
|
@@ -9,18 +10,17 @@ import { FileSystem } from '../internal/filesystem.js';
|
|
|
9
10
|
*/
|
|
10
11
|
export interface CopyOnWriteOptions {
|
|
11
12
|
/** The file system that initially populates this file system. */
|
|
12
|
-
readable: MountConfiguration<
|
|
13
|
+
readable: MountConfiguration<Backend>;
|
|
13
14
|
/** The file system to write modified files to. */
|
|
14
|
-
writable: MountConfiguration<
|
|
15
|
+
writable: MountConfiguration<Backend>;
|
|
15
16
|
/** @see {@link Journal} */
|
|
16
17
|
journal?: Journal;
|
|
17
18
|
}
|
|
18
|
-
declare const journalOperations: readonly ["delete"];
|
|
19
19
|
/**
|
|
20
20
|
* @category Internals
|
|
21
21
|
* @internal
|
|
22
22
|
*/
|
|
23
|
-
export type JournalOperation =
|
|
23
|
+
export type JournalOperation = 'delete';
|
|
24
24
|
/**
|
|
25
25
|
* @category Internals
|
|
26
26
|
* @internal
|
|
@@ -61,6 +61,7 @@ export declare class CopyOnWriteFS extends FileSystem {
|
|
|
61
61
|
/** The journal to use for persisting deletions */
|
|
62
62
|
readonly journal: Journal;
|
|
63
63
|
ready(): Promise<void>;
|
|
64
|
+
readySync(): void;
|
|
64
65
|
constructor(
|
|
65
66
|
/** The file system that initially populates this file system. */
|
|
66
67
|
readable: FileSystem,
|
package/dist/backends/cow.js
CHANGED
|
@@ -6,12 +6,12 @@ import { resolveMountConfig } from '../config.js';
|
|
|
6
6
|
import { FileSystem } from '../internal/filesystem.js';
|
|
7
7
|
import { isDirectory } from '../internal/inode.js';
|
|
8
8
|
import { dirname, join } from '../path.js';
|
|
9
|
-
const journalOperations = ['delete'];
|
|
9
|
+
const journalOperations = new Set(['delete']);
|
|
10
10
|
/** Because TS doesn't work right w/o it */
|
|
11
11
|
function isJournalOp(op) {
|
|
12
|
-
return journalOperations.
|
|
12
|
+
return journalOperations.has(op);
|
|
13
13
|
}
|
|
14
|
-
const maxOpLength = Math.max(...journalOperations.map(op => op.length));
|
|
14
|
+
const maxOpLength = Math.max(...journalOperations.values().map(op => op.length));
|
|
15
15
|
const journalMagicString = '#journal@v0\n';
|
|
16
16
|
/**
|
|
17
17
|
* Tracks various operations for the CoW backend
|
|
@@ -79,6 +79,10 @@ export class CopyOnWriteFS extends FileSystem {
|
|
|
79
79
|
await this.readable.ready();
|
|
80
80
|
await this.writable.ready();
|
|
81
81
|
}
|
|
82
|
+
readySync() {
|
|
83
|
+
this.readable.readySync();
|
|
84
|
+
this.writable.readySync();
|
|
85
|
+
}
|
|
82
86
|
constructor(
|
|
83
87
|
/** The file system that initially populates this file system. */
|
|
84
88
|
readable,
|
|
@@ -54,7 +54,6 @@ var __disposeResources = (this && this.__disposeResources) || (function (Suppres
|
|
|
54
54
|
import { err, warn } from 'kerium/log';
|
|
55
55
|
import { FileSystem } from '../internal/filesystem.js';
|
|
56
56
|
import { isDirectory } from '../internal/inode.js';
|
|
57
|
-
import { resolve } from '../path.js';
|
|
58
57
|
export class PassthroughFS extends FileSystem {
|
|
59
58
|
nodeFS;
|
|
60
59
|
prefix;
|
|
@@ -306,7 +305,7 @@ const _Passthrough = {
|
|
|
306
305
|
prefix: { type: 'string', required: true },
|
|
307
306
|
},
|
|
308
307
|
create({ fs, prefix }) {
|
|
309
|
-
return new PassthroughFS(fs,
|
|
308
|
+
return new PassthroughFS(fs, prefix);
|
|
310
309
|
},
|
|
311
310
|
};
|
|
312
311
|
/**
|
|
@@ -40,6 +40,7 @@ export declare class StoreFS<T extends Store = Store> extends FileSystem {
|
|
|
40
40
|
_move(from: string, to: string): void;
|
|
41
41
|
protected _initialized: boolean;
|
|
42
42
|
ready(): Promise<void>;
|
|
43
|
+
readySync(): void;
|
|
43
44
|
constructor(store: T);
|
|
44
45
|
/**
|
|
45
46
|
* @experimental
|
|
@@ -107,6 +108,7 @@ export declare class StoreFS<T extends Store = Store> extends FileSystem {
|
|
|
107
108
|
* Populates the `_ids` and `_paths` maps with all existing files stored in the underlying `Store`.
|
|
108
109
|
*/
|
|
109
110
|
private _populate;
|
|
111
|
+
private _populateSync;
|
|
110
112
|
/**
|
|
111
113
|
* Find an inode without using the ID tables
|
|
112
114
|
*/
|
|
@@ -144,6 +144,16 @@ export class StoreFS extends FileSystem {
|
|
|
144
144
|
await this._populate();
|
|
145
145
|
this._initialized = true;
|
|
146
146
|
}
|
|
147
|
+
readySync() {
|
|
148
|
+
if (this._initialized)
|
|
149
|
+
return;
|
|
150
|
+
if (!this.attributes.has('no_async_preload')) {
|
|
151
|
+
this.checkRootSync();
|
|
152
|
+
}
|
|
153
|
+
this.checkRootSync();
|
|
154
|
+
this._populateSync();
|
|
155
|
+
this._initialized = true;
|
|
156
|
+
}
|
|
147
157
|
constructor(store) {
|
|
148
158
|
super(store.type ?? 0x6b766673, store.name);
|
|
149
159
|
this.store = store;
|
|
@@ -781,6 +791,70 @@ export class StoreFS extends FileSystem {
|
|
|
781
791
|
await result_11;
|
|
782
792
|
}
|
|
783
793
|
}
|
|
794
|
+
_populateSync() {
|
|
795
|
+
const env_22 = { stack: [], error: void 0, hasError: false };
|
|
796
|
+
try {
|
|
797
|
+
if (this._initialized) {
|
|
798
|
+
warn('Attempted to populate tables after initialization');
|
|
799
|
+
return;
|
|
800
|
+
}
|
|
801
|
+
debug('Populating tables with existing store metadata');
|
|
802
|
+
const tx = __addDisposableResource(env_22, this.transaction(), false);
|
|
803
|
+
const rootData = tx.getSync(rootIno);
|
|
804
|
+
if (!rootData) {
|
|
805
|
+
notice('Store does not have a root inode');
|
|
806
|
+
const inode = new Inode({ ino: rootIno, data: 1, mode: 0o777 | S_IFDIR });
|
|
807
|
+
tx.setSync(inode.data, encodeUTF8('{}'));
|
|
808
|
+
this._add(rootIno, '/');
|
|
809
|
+
tx.setSync(rootIno, inode);
|
|
810
|
+
tx.commitSync();
|
|
811
|
+
return;
|
|
812
|
+
}
|
|
813
|
+
if (rootData.length < sizeof(Inode)) {
|
|
814
|
+
crit('Store contains an invalid root inode. Refusing to populate tables');
|
|
815
|
+
return;
|
|
816
|
+
}
|
|
817
|
+
const visitedDirectories = new Set();
|
|
818
|
+
let i = 0;
|
|
819
|
+
const queue = [['/', rootIno]];
|
|
820
|
+
while (queue.length > 0) {
|
|
821
|
+
i++;
|
|
822
|
+
const [path, ino] = queue.shift();
|
|
823
|
+
this._add(ino, path);
|
|
824
|
+
const inodeData = tx.getSync(ino);
|
|
825
|
+
if (!inodeData) {
|
|
826
|
+
warn('Store is missing data for inode: ' + ino);
|
|
827
|
+
continue;
|
|
828
|
+
}
|
|
829
|
+
if (inodeData.length < sizeof(Inode)) {
|
|
830
|
+
warn(`Invalid inode size for ino ${ino}: ${inodeData.length}`);
|
|
831
|
+
continue;
|
|
832
|
+
}
|
|
833
|
+
const inode = new Inode(inodeData);
|
|
834
|
+
if ((inode.mode & S_IFDIR) != S_IFDIR || visitedDirectories.has(ino)) {
|
|
835
|
+
continue;
|
|
836
|
+
}
|
|
837
|
+
visitedDirectories.add(ino);
|
|
838
|
+
const dirData = tx.getSync(inode.data);
|
|
839
|
+
if (!dirData) {
|
|
840
|
+
warn('Store is missing directory data: ' + inode.data);
|
|
841
|
+
continue;
|
|
842
|
+
}
|
|
843
|
+
const dirListing = decodeDirListing(dirData);
|
|
844
|
+
for (const [entryName, childIno] of Object.entries(dirListing)) {
|
|
845
|
+
queue.push([join(path, entryName), childIno]);
|
|
846
|
+
}
|
|
847
|
+
}
|
|
848
|
+
debug(`Added ${i} existing inode(s) from store`);
|
|
849
|
+
}
|
|
850
|
+
catch (e_22) {
|
|
851
|
+
env_22.error = e_22;
|
|
852
|
+
env_22.hasError = true;
|
|
853
|
+
}
|
|
854
|
+
finally {
|
|
855
|
+
__disposeResources(env_22);
|
|
856
|
+
}
|
|
857
|
+
}
|
|
784
858
|
/**
|
|
785
859
|
* Find an inode without using the ID tables
|
|
786
860
|
*/
|
|
@@ -859,7 +933,7 @@ export class StoreFS extends FileSystem {
|
|
|
859
933
|
* @param data The data to store at the file's data node.
|
|
860
934
|
*/
|
|
861
935
|
async commitNew(path, options, data) {
|
|
862
|
-
const
|
|
936
|
+
const env_23 = { stack: [], error: void 0, hasError: false };
|
|
863
937
|
try {
|
|
864
938
|
/*
|
|
865
939
|
The root always exists.
|
|
@@ -868,7 +942,7 @@ export class StoreFS extends FileSystem {
|
|
|
868
942
|
*/
|
|
869
943
|
if (path == '/')
|
|
870
944
|
throw withErrno('EEXIST');
|
|
871
|
-
const tx = __addDisposableResource(
|
|
945
|
+
const tx = __addDisposableResource(env_23, this.transaction(), true);
|
|
872
946
|
const { dir: parentPath, base: fname } = parse(path);
|
|
873
947
|
const parent = await this.findInode(tx, parentPath);
|
|
874
948
|
const listing = decodeDirListing((await tx.get(parent.data)) ?? _throw(withErrno('ENOENT')));
|
|
@@ -892,12 +966,12 @@ export class StoreFS extends FileSystem {
|
|
|
892
966
|
await tx.commit();
|
|
893
967
|
return inode;
|
|
894
968
|
}
|
|
895
|
-
catch (
|
|
896
|
-
|
|
897
|
-
|
|
969
|
+
catch (e_23) {
|
|
970
|
+
env_23.error = e_23;
|
|
971
|
+
env_23.hasError = true;
|
|
898
972
|
}
|
|
899
973
|
finally {
|
|
900
|
-
const result_12 = __disposeResources(
|
|
974
|
+
const result_12 = __disposeResources(env_23);
|
|
901
975
|
if (result_12)
|
|
902
976
|
await result_12;
|
|
903
977
|
}
|
|
@@ -911,7 +985,7 @@ export class StoreFS extends FileSystem {
|
|
|
911
985
|
* @return The Inode for the new file.
|
|
912
986
|
*/
|
|
913
987
|
commitNewSync(path, options, data) {
|
|
914
|
-
const
|
|
988
|
+
const env_24 = { stack: [], error: void 0, hasError: false };
|
|
915
989
|
try {
|
|
916
990
|
/*
|
|
917
991
|
The root always exists.
|
|
@@ -920,7 +994,7 @@ export class StoreFS extends FileSystem {
|
|
|
920
994
|
*/
|
|
921
995
|
if (path == '/')
|
|
922
996
|
throw withErrno('EEXIST');
|
|
923
|
-
const tx = __addDisposableResource(
|
|
997
|
+
const tx = __addDisposableResource(env_24, this.transaction(), false);
|
|
924
998
|
const { dir: parentPath, base: fname } = parse(path);
|
|
925
999
|
const parent = this.findInodeSync(tx, parentPath);
|
|
926
1000
|
const listing = decodeDirListing(tx.getSync(parent.data) ?? _throw(withErrno('ENOENT')));
|
|
@@ -942,12 +1016,12 @@ export class StoreFS extends FileSystem {
|
|
|
942
1016
|
tx.commitSync();
|
|
943
1017
|
return inode;
|
|
944
1018
|
}
|
|
945
|
-
catch (
|
|
946
|
-
|
|
947
|
-
|
|
1019
|
+
catch (e_24) {
|
|
1020
|
+
env_24.error = e_24;
|
|
1021
|
+
env_24.hasError = true;
|
|
948
1022
|
}
|
|
949
1023
|
finally {
|
|
950
|
-
__disposeResources(
|
|
1024
|
+
__disposeResources(env_24);
|
|
951
1025
|
}
|
|
952
1026
|
}
|
|
953
1027
|
/**
|
|
@@ -956,9 +1030,9 @@ export class StoreFS extends FileSystem {
|
|
|
956
1030
|
* @param isDir Does the path belong to a directory, or a file?
|
|
957
1031
|
*/
|
|
958
1032
|
async remove(path, isDir) {
|
|
959
|
-
const
|
|
1033
|
+
const env_25 = { stack: [], error: void 0, hasError: false };
|
|
960
1034
|
try {
|
|
961
|
-
const tx = __addDisposableResource(
|
|
1035
|
+
const tx = __addDisposableResource(env_25, this.transaction(), true);
|
|
962
1036
|
const { dir: parent, base: fileName } = parse(path), parentNode = await this.findInode(tx, parent), listing = decodeDirListing((await tx.get(parentNode.data)) ?? _throw(withErrno('ENOENT')));
|
|
963
1037
|
if (!listing[fileName])
|
|
964
1038
|
throw withErrno('ENOENT');
|
|
@@ -979,12 +1053,12 @@ export class StoreFS extends FileSystem {
|
|
|
979
1053
|
}
|
|
980
1054
|
await tx.commit();
|
|
981
1055
|
}
|
|
982
|
-
catch (
|
|
983
|
-
|
|
984
|
-
|
|
1056
|
+
catch (e_25) {
|
|
1057
|
+
env_25.error = e_25;
|
|
1058
|
+
env_25.hasError = true;
|
|
985
1059
|
}
|
|
986
1060
|
finally {
|
|
987
|
-
const result_13 = __disposeResources(
|
|
1061
|
+
const result_13 = __disposeResources(env_25);
|
|
988
1062
|
if (result_13)
|
|
989
1063
|
await result_13;
|
|
990
1064
|
}
|
|
@@ -995,9 +1069,9 @@ export class StoreFS extends FileSystem {
|
|
|
995
1069
|
* @param isDir Does the path belong to a directory, or a file?
|
|
996
1070
|
*/
|
|
997
1071
|
removeSync(path, isDir) {
|
|
998
|
-
const
|
|
1072
|
+
const env_26 = { stack: [], error: void 0, hasError: false };
|
|
999
1073
|
try {
|
|
1000
|
-
const tx = __addDisposableResource(
|
|
1074
|
+
const tx = __addDisposableResource(env_26, this.transaction(), false);
|
|
1001
1075
|
const { dir: parent, base: fileName } = parse(path), parentNode = this.findInodeSync(tx, parent), listing = decodeDirListing(tx.getSync(parentNode.data) ?? _throw(withErrno('ENOENT'))), ino = listing[fileName];
|
|
1002
1076
|
if (!ino)
|
|
1003
1077
|
throw withErrno('ENOENT');
|
|
@@ -1017,12 +1091,12 @@ export class StoreFS extends FileSystem {
|
|
|
1017
1091
|
}
|
|
1018
1092
|
tx.commitSync();
|
|
1019
1093
|
}
|
|
1020
|
-
catch (
|
|
1021
|
-
|
|
1022
|
-
|
|
1094
|
+
catch (e_26) {
|
|
1095
|
+
env_26.error = e_26;
|
|
1096
|
+
env_26.hasError = true;
|
|
1023
1097
|
}
|
|
1024
1098
|
finally {
|
|
1025
|
-
__disposeResources(
|
|
1099
|
+
__disposeResources(env_26);
|
|
1026
1100
|
}
|
|
1027
1101
|
}
|
|
1028
1102
|
}
|
package/dist/config.d.ts
CHANGED
|
@@ -18,6 +18,13 @@ export type MountConfiguration<T extends Backend> = FilesystemOf<T> | BackendCon
|
|
|
18
18
|
* @see MountConfiguration
|
|
19
19
|
*/
|
|
20
20
|
export declare function resolveMountConfig<T extends Backend>(configuration: MountConfiguration<T>, _depth?: number): Promise<FilesystemOf<T>>;
|
|
21
|
+
/**
|
|
22
|
+
* @experimental
|
|
23
|
+
* Retrieve a file system with `configuration`.
|
|
24
|
+
* @category Backends and Configuration
|
|
25
|
+
* @see MountConfiguration
|
|
26
|
+
*/
|
|
27
|
+
export declare function resolveMountConfigSync<T extends Backend>(configuration: MountConfiguration<T>, _depth?: number): FilesystemOf<T>;
|
|
21
28
|
/**
|
|
22
29
|
* An object mapping mount points to backends
|
|
23
30
|
* @category Backends and Configuration
|
|
@@ -82,6 +89,12 @@ export interface Configuration<T extends ConfigMounts> extends SharedConfig {
|
|
|
82
89
|
* @category Backends and Configuration
|
|
83
90
|
*/
|
|
84
91
|
export declare function configureSingle<T extends Backend>(configuration: MountConfiguration<T>): Promise<void>;
|
|
92
|
+
/**
|
|
93
|
+
* @experimental
|
|
94
|
+
* Configures ZenFS with single mount point /
|
|
95
|
+
* @category Backends and Configuration
|
|
96
|
+
*/
|
|
97
|
+
export declare function configureSingleSync<T extends Backend>(configuration: MountConfiguration<T>): void;
|
|
85
98
|
/**
|
|
86
99
|
* @category Backends and Configuration
|
|
87
100
|
*/
|
|
@@ -92,4 +105,11 @@ export declare function addDevice(driver: DeviceDriver, options?: object): Devic
|
|
|
92
105
|
* @see Configuration
|
|
93
106
|
*/
|
|
94
107
|
export declare function configure<T extends ConfigMounts>(configuration: Partial<Configuration<T>>): Promise<void>;
|
|
108
|
+
/**
|
|
109
|
+
* @experimental
|
|
110
|
+
* Configures ZenFS with `configuration`
|
|
111
|
+
* @category Backends and Configuration
|
|
112
|
+
* @see Configuration
|
|
113
|
+
*/
|
|
114
|
+
export declare function configureSync<T extends ConfigMounts>(configuration: Partial<Configuration<T>>): void;
|
|
95
115
|
export declare function sync(): Promise<void>;
|
package/dist/config.js
CHANGED
|
@@ -5,6 +5,7 @@ import { createCredentials } from './internal/credentials.js';
|
|
|
5
5
|
import { DeviceFS } from './internal/devices.js';
|
|
6
6
|
import { FileSystem } from './internal/filesystem.js';
|
|
7
7
|
import { exists, mkdir, stat } from './node/promises.js';
|
|
8
|
+
import { existsSync, mkdirSync, statSync } from './node/sync.js';
|
|
8
9
|
import { _setAccessChecks } from './vfs/config.js';
|
|
9
10
|
import { mount, mounts, umount } from './vfs/shared.js';
|
|
10
11
|
/**
|
|
@@ -20,6 +21,9 @@ export function configureFileSystem(fs, config) {
|
|
|
20
21
|
function isMountConfig(arg) {
|
|
21
22
|
return isBackendConfig(arg) || isBackend(arg) || arg instanceof FileSystem;
|
|
22
23
|
}
|
|
24
|
+
function isThenable(value) {
|
|
25
|
+
return typeof value?.then == 'function';
|
|
26
|
+
}
|
|
23
27
|
/**
|
|
24
28
|
* Retrieve a file system with `configuration`.
|
|
25
29
|
* @category Backends and Configuration
|
|
@@ -60,6 +64,57 @@ export async function resolveMountConfig(configuration, _depth = 0) {
|
|
|
60
64
|
await mount.ready();
|
|
61
65
|
return mount;
|
|
62
66
|
}
|
|
67
|
+
/**
|
|
68
|
+
* @experimental
|
|
69
|
+
* Retrieve a file system with `configuration`.
|
|
70
|
+
* @category Backends and Configuration
|
|
71
|
+
* @see MountConfiguration
|
|
72
|
+
*/
|
|
73
|
+
export function resolveMountConfigSync(configuration, _depth = 0) {
|
|
74
|
+
if (typeof configuration !== 'object' || configuration == null) {
|
|
75
|
+
throw log.err(withErrno('EINVAL', 'Invalid options on mount configuration'));
|
|
76
|
+
}
|
|
77
|
+
if (!isMountConfig(configuration)) {
|
|
78
|
+
throw log.err(withErrno('EINVAL', 'Invalid mount configuration'));
|
|
79
|
+
}
|
|
80
|
+
if (configuration instanceof FileSystem) {
|
|
81
|
+
configuration.readySync();
|
|
82
|
+
return configuration;
|
|
83
|
+
}
|
|
84
|
+
if (isBackend(configuration)) {
|
|
85
|
+
configuration = { backend: configuration };
|
|
86
|
+
}
|
|
87
|
+
for (const [key, value] of Object.entries(configuration)) {
|
|
88
|
+
if (key == 'backend')
|
|
89
|
+
continue;
|
|
90
|
+
if (!isMountConfig(value))
|
|
91
|
+
continue;
|
|
92
|
+
log.info('Resolving nested mount configuration: ' + key);
|
|
93
|
+
if (_depth > 10) {
|
|
94
|
+
throw log.err(withErrno('EINVAL', 'Invalid configuration, too deep and possibly infinite'));
|
|
95
|
+
}
|
|
96
|
+
configuration[key] = resolveMountConfigSync(value, ++_depth);
|
|
97
|
+
}
|
|
98
|
+
const { backend } = configuration;
|
|
99
|
+
if (typeof backend.isAvailable == 'function') {
|
|
100
|
+
const available = backend.isAvailable(configuration);
|
|
101
|
+
if (isThenable(available)) {
|
|
102
|
+
throw log.err(withErrno('EAGAIN', 'Backend availability check would block: ' + backend.name));
|
|
103
|
+
}
|
|
104
|
+
if (!available) {
|
|
105
|
+
throw log.err(withErrno('EPERM', 'Backend not available: ' + backend.name));
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
checkOptions(backend, configuration);
|
|
109
|
+
const mountFs = backend.create(configuration);
|
|
110
|
+
if (isThenable(mountFs)) {
|
|
111
|
+
throw log.err(withErrno('EAGAIN', 'Backend initialization would block: ' + backend.name));
|
|
112
|
+
}
|
|
113
|
+
const resolved = mountFs;
|
|
114
|
+
configureFileSystem(resolved, configuration);
|
|
115
|
+
resolved.readySync();
|
|
116
|
+
return resolved;
|
|
117
|
+
}
|
|
63
118
|
/**
|
|
64
119
|
* Configures ZenFS with single mount point /
|
|
65
120
|
* @category Backends and Configuration
|
|
@@ -72,6 +127,19 @@ export async function configureSingle(configuration) {
|
|
|
72
127
|
umount('/');
|
|
73
128
|
mount('/', resolved);
|
|
74
129
|
}
|
|
130
|
+
/**
|
|
131
|
+
* @experimental
|
|
132
|
+
* Configures ZenFS with single mount point /
|
|
133
|
+
* @category Backends and Configuration
|
|
134
|
+
*/
|
|
135
|
+
export function configureSingleSync(configuration) {
|
|
136
|
+
if (!isMountConfig(configuration)) {
|
|
137
|
+
throw new TypeError('Invalid single mount point configuration');
|
|
138
|
+
}
|
|
139
|
+
const resolved = resolveMountConfigSync(configuration);
|
|
140
|
+
umount('/');
|
|
141
|
+
mount('/', resolved);
|
|
142
|
+
}
|
|
75
143
|
/**
|
|
76
144
|
* Like `fs.mount`, but it also creates missing directories.
|
|
77
145
|
* @privateRemarks
|
|
@@ -92,6 +160,33 @@ async function mountWithMkdir(path, fs) {
|
|
|
92
160
|
}
|
|
93
161
|
mount(path, fs);
|
|
94
162
|
}
|
|
163
|
+
/**
|
|
164
|
+
* Like `fs.mount`, but it also creates missing directories.
|
|
165
|
+
* @privateRemarks
|
|
166
|
+
* This is implemented as a separate function to avoid a circular dependency between vfs/shared.ts and other vfs layer files.
|
|
167
|
+
* @internal
|
|
168
|
+
*/
|
|
169
|
+
function mountWithMkdirSync(path, fs) {
|
|
170
|
+
if (path == '/') {
|
|
171
|
+
mount(path, fs);
|
|
172
|
+
return;
|
|
173
|
+
}
|
|
174
|
+
let stats = null;
|
|
175
|
+
try {
|
|
176
|
+
stats = statSync(path);
|
|
177
|
+
}
|
|
178
|
+
catch (error) {
|
|
179
|
+
if (error?.code != 'ENOENT')
|
|
180
|
+
throw error;
|
|
181
|
+
}
|
|
182
|
+
if (!stats) {
|
|
183
|
+
mkdirSync(path, { recursive: true });
|
|
184
|
+
}
|
|
185
|
+
else if (!stats.isDirectory()) {
|
|
186
|
+
throw withErrno('ENOTDIR', 'Missing directory at mount point: ' + path);
|
|
187
|
+
}
|
|
188
|
+
mount(path, fs);
|
|
189
|
+
}
|
|
95
190
|
/**
|
|
96
191
|
* @category Backends and Configuration
|
|
97
192
|
*/
|
|
@@ -149,6 +244,53 @@ export async function configure(configuration) {
|
|
|
149
244
|
}
|
|
150
245
|
}
|
|
151
246
|
}
|
|
247
|
+
/**
|
|
248
|
+
* @experimental
|
|
249
|
+
* Configures ZenFS with `configuration`
|
|
250
|
+
* @category Backends and Configuration
|
|
251
|
+
* @see Configuration
|
|
252
|
+
*/
|
|
253
|
+
export function configureSync(configuration) {
|
|
254
|
+
Object.assign(defaultContext.credentials, createCredentials({
|
|
255
|
+
uid: configuration.uid || 0,
|
|
256
|
+
gid: configuration.gid || 0,
|
|
257
|
+
}));
|
|
258
|
+
_setAccessChecks(!configuration.disableAccessChecks);
|
|
259
|
+
if (configuration.log)
|
|
260
|
+
log.configure(configuration.log);
|
|
261
|
+
if (configuration.mounts) {
|
|
262
|
+
for (const [_point, mountConfig] of Object.entries(configuration.mounts).sort(([a], [b]) => (a.length > b.length ? 1 : -1))) {
|
|
263
|
+
const point = _point.startsWith('/') ? _point : '/' + _point;
|
|
264
|
+
if (isBackendConfig(mountConfig)) {
|
|
265
|
+
mountConfig.disableAsyncCache ??= configuration.disableAsyncCache || false;
|
|
266
|
+
mountConfig.caseFold ??= configuration.caseFold;
|
|
267
|
+
}
|
|
268
|
+
if (point == '/')
|
|
269
|
+
umount('/');
|
|
270
|
+
mountWithMkdirSync(point, resolveMountConfigSync(mountConfig));
|
|
271
|
+
}
|
|
272
|
+
}
|
|
273
|
+
for (const fs of mounts.values()) {
|
|
274
|
+
configureFileSystem(fs, configuration);
|
|
275
|
+
}
|
|
276
|
+
if (configuration.addDevices && !mounts.has('/dev')) {
|
|
277
|
+
const devfs = new DeviceFS();
|
|
278
|
+
devfs.addDefaults();
|
|
279
|
+
devfs.readySync();
|
|
280
|
+
mountWithMkdirSync('/dev', devfs);
|
|
281
|
+
}
|
|
282
|
+
if (configuration.defaultDirectories) {
|
|
283
|
+
for (const dir of _defaultDirectories) {
|
|
284
|
+
if (existsSync(dir)) {
|
|
285
|
+
const stats = statSync(dir);
|
|
286
|
+
if (!stats.isDirectory())
|
|
287
|
+
log.warn('Default directory exists but is not a directory: ' + dir);
|
|
288
|
+
continue;
|
|
289
|
+
}
|
|
290
|
+
mkdirSync(dir);
|
|
291
|
+
}
|
|
292
|
+
}
|
|
293
|
+
}
|
|
152
294
|
export async function sync() {
|
|
153
295
|
for (const fs of mounts.values())
|
|
154
296
|
await fs.sync();
|
|
@@ -171,6 +171,7 @@ export declare abstract class FileSystem {
|
|
|
171
171
|
*/
|
|
172
172
|
usage(): UsageInfo;
|
|
173
173
|
ready(): Promise<void>;
|
|
174
|
+
readySync(): void;
|
|
174
175
|
abstract rename(oldPath: string, newPath: string): Promise<void>;
|
|
175
176
|
abstract renameSync(oldPath: string, newPath: string): void;
|
|
176
177
|
abstract stat(path: string): Promise<InodeLike>;
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import { withErrno } from 'kerium';
|
|
1
2
|
const _chunkSize = 0x1000;
|
|
2
3
|
/**
|
|
3
4
|
* Provides a consistent and easy to use internal API.
|
|
@@ -61,6 +62,10 @@ export class FileSystem {
|
|
|
61
62
|
};
|
|
62
63
|
}
|
|
63
64
|
async ready() { }
|
|
65
|
+
readySync() {
|
|
66
|
+
if (this.ready !== FileSystem.prototype.ready)
|
|
67
|
+
throw withErrno('EAGAIN');
|
|
68
|
+
}
|
|
64
69
|
/**
|
|
65
70
|
* Test whether or not `path` exists.
|
|
66
71
|
*/
|
package/dist/mixins/mutexed.d.ts
CHANGED
package/dist/mixins/mutexed.js
CHANGED
package/package.json
CHANGED
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
// SPDX-License-Identifier: LGPL-3.0-or-later
|
|
2
|
+
import { configureSingleSync, configureSync, fs, InMemory, mounts, SingleBuffer, type Backend } from '@zenfs/core';
|
|
3
|
+
import { Errno } from 'kerium';
|
|
4
|
+
import assert from 'node:assert/strict';
|
|
5
|
+
import { suite, test } from 'node:test';
|
|
6
|
+
|
|
7
|
+
const AsyncBackend = {
|
|
8
|
+
name: 'AsyncBackend',
|
|
9
|
+
options: {},
|
|
10
|
+
async create() {
|
|
11
|
+
await Promise.resolve();
|
|
12
|
+
return InMemory.create({ label: 'async-backend' });
|
|
13
|
+
},
|
|
14
|
+
} satisfies Backend;
|
|
15
|
+
|
|
16
|
+
suite('Sync configuration', () => {
|
|
17
|
+
test('configureSingleSync mounts root synchronously', () => {
|
|
18
|
+
configureSingleSync({ backend: InMemory, label: 'sync-root' });
|
|
19
|
+
assert.equal(mounts.get('/')?.label, 'sync-root');
|
|
20
|
+
|
|
21
|
+
fs.writeFileSync('/sync-file', 'sync');
|
|
22
|
+
assert.equal(fs.readFileSync('/sync-file', 'utf8'), 'sync');
|
|
23
|
+
});
|
|
24
|
+
|
|
25
|
+
test('configureSync mounts additional directories', () => {
|
|
26
|
+
configureSync({
|
|
27
|
+
mounts: {
|
|
28
|
+
tmp: { backend: InMemory, label: 'sync-tmp' },
|
|
29
|
+
},
|
|
30
|
+
});
|
|
31
|
+
|
|
32
|
+
assert.ok(mounts.has('/tmp'));
|
|
33
|
+
fs.writeFileSync('/tmp/sync.txt', 'ok');
|
|
34
|
+
assert.equal(fs.readFileSync('/tmp/sync.txt', 'utf8'), 'ok');
|
|
35
|
+
|
|
36
|
+
fs.umount('/tmp');
|
|
37
|
+
fs.rmSync('/tmp', { recursive: true, force: true });
|
|
38
|
+
});
|
|
39
|
+
|
|
40
|
+
test('configureSync rejects asynchronous backends', () => {
|
|
41
|
+
assert.throws(
|
|
42
|
+
() =>
|
|
43
|
+
configureSync({
|
|
44
|
+
mounts: { '/': { backend: AsyncBackend } },
|
|
45
|
+
}),
|
|
46
|
+
{ errno: Errno.EAGAIN }
|
|
47
|
+
);
|
|
48
|
+
});
|
|
49
|
+
|
|
50
|
+
test('configureSingleSync works with SingleBuffer', () => {
|
|
51
|
+
const buffer = new ArrayBuffer(0x20000);
|
|
52
|
+
configureSingleSync({ backend: SingleBuffer, buffer });
|
|
53
|
+
|
|
54
|
+
fs.writeFileSync('/sb.txt', 'single-buffer');
|
|
55
|
+
assert.equal(fs.readFileSync('/sb.txt', 'utf8'), 'single-buffer');
|
|
56
|
+
});
|
|
57
|
+
});
|