@peers-app/peers-sdk 0.1.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -0
- package/dist/context/data-context.d.ts +31 -0
- package/dist/context/data-context.js +56 -0
- package/dist/context/index.d.ts +3 -0
- package/dist/context/index.js +19 -0
- package/dist/context/user-context-singleton.d.ts +11 -0
- package/dist/context/user-context-singleton.js +121 -0
- package/dist/context/user-context.d.ts +55 -0
- package/dist/context/user-context.js +205 -0
- package/dist/data/assistants.d.ts +68 -0
- package/dist/data/assistants.js +64 -0
- package/dist/data/change-tracking.d.ts +219 -0
- package/dist/data/change-tracking.js +119 -0
- package/dist/data/channels.d.ts +29 -0
- package/dist/data/channels.js +25 -0
- package/dist/data/data-locks.d.ts +37 -0
- package/dist/data/data-locks.js +180 -0
- package/dist/data/data-locks.test.d.ts +1 -0
- package/dist/data/data-locks.test.js +456 -0
- package/dist/data/device-sync-info.d.ts +19 -0
- package/dist/data/device-sync-info.js +24 -0
- package/dist/data/devices.d.ts +51 -0
- package/dist/data/devices.js +36 -0
- package/dist/data/embeddings.d.ts +47 -0
- package/dist/data/embeddings.js +36 -0
- package/dist/data/files/file-read-stream.d.ts +27 -0
- package/dist/data/files/file-read-stream.js +195 -0
- package/dist/data/files/file-write-stream.d.ts +20 -0
- package/dist/data/files/file-write-stream.js +113 -0
- package/dist/data/files/file.types.d.ts +47 -0
- package/dist/data/files/file.types.js +55 -0
- package/dist/data/files/files.d.ts +28 -0
- package/dist/data/files/files.js +127 -0
- package/dist/data/files/files.test.d.ts +1 -0
- package/dist/data/files/files.test.js +728 -0
- package/dist/data/files/index.d.ts +4 -0
- package/dist/data/files/index.js +23 -0
- package/dist/data/group-member-roles.d.ts +9 -0
- package/dist/data/group-member-roles.js +25 -0
- package/dist/data/group-members.d.ts +39 -0
- package/dist/data/group-members.js +68 -0
- package/dist/data/group-members.test.d.ts +1 -0
- package/dist/data/group-members.test.js +287 -0
- package/dist/data/group-permissions.d.ts +8 -0
- package/dist/data/group-permissions.js +73 -0
- package/dist/data/group-share.d.ts +50 -0
- package/dist/data/group-share.js +196 -0
- package/dist/data/groups.d.ts +50 -0
- package/dist/data/groups.js +73 -0
- package/dist/data/groups.test.d.ts +1 -0
- package/dist/data/groups.test.js +153 -0
- package/dist/data/index.d.ts +31 -0
- package/dist/data/index.js +47 -0
- package/dist/data/knowledge/knowledge-frames.d.ts +34 -0
- package/dist/data/knowledge/knowledge-frames.js +34 -0
- package/dist/data/knowledge/knowledge-links.d.ts +30 -0
- package/dist/data/knowledge/knowledge-links.js +25 -0
- package/dist/data/knowledge/knowledge-values.d.ts +35 -0
- package/dist/data/knowledge/knowledge-values.js +35 -0
- package/dist/data/knowledge/peer-types.d.ts +112 -0
- package/dist/data/knowledge/peer-types.js +27 -0
- package/dist/data/knowledge/predicates.d.ts +34 -0
- package/dist/data/knowledge/predicates.js +27 -0
- package/dist/data/messages.d.ts +57 -0
- package/dist/data/messages.js +97 -0
- package/dist/data/orm/client-proxy.data-source.d.ts +27 -0
- package/dist/data/orm/client-proxy.data-source.js +65 -0
- package/dist/data/orm/cursor.d.ts +25 -0
- package/dist/data/orm/cursor.js +47 -0
- package/dist/data/orm/cursor.test.d.ts +1 -0
- package/dist/data/orm/cursor.test.js +315 -0
- package/dist/data/orm/data-query.d.ts +96 -0
- package/dist/data/orm/data-query.js +208 -0
- package/dist/data/orm/data-query.mongo.d.ts +17 -0
- package/dist/data/orm/data-query.mongo.js +267 -0
- package/dist/data/orm/data-query.mongo.test.d.ts +1 -0
- package/dist/data/orm/data-query.mongo.test.js +398 -0
- package/dist/data/orm/data-query.sqlite.d.ts +14 -0
- package/dist/data/orm/data-query.sqlite.js +297 -0
- package/dist/data/orm/data-query.sqlite.test.d.ts +1 -0
- package/dist/data/orm/data-query.sqlite.test.js +377 -0
- package/dist/data/orm/data-query.test.d.ts +1 -0
- package/dist/data/orm/data-query.test.js +553 -0
- package/dist/data/orm/decorators.d.ts +6 -0
- package/dist/data/orm/decorators.js +21 -0
- package/dist/data/orm/dependency-injection.test.d.ts +1 -0
- package/dist/data/orm/dependency-injection.test.js +171 -0
- package/dist/data/orm/doc.d.ts +26 -0
- package/dist/data/orm/doc.js +124 -0
- package/dist/data/orm/event-registry.d.ts +24 -0
- package/dist/data/orm/event-registry.js +40 -0
- package/dist/data/orm/event-registry.test.d.ts +1 -0
- package/dist/data/orm/event-registry.test.js +44 -0
- package/dist/data/orm/factory.d.ts +8 -0
- package/dist/data/orm/factory.js +147 -0
- package/dist/data/orm/index.d.ts +16 -0
- package/dist/data/orm/index.js +32 -0
- package/dist/data/orm/multi-cursors.d.ts +11 -0
- package/dist/data/orm/multi-cursors.js +146 -0
- package/dist/data/orm/multi-cursors.test.d.ts +1 -0
- package/dist/data/orm/multi-cursors.test.js +455 -0
- package/dist/data/orm/sql-db.d.ts +6 -0
- package/dist/data/orm/sql-db.js +2 -0
- package/dist/data/orm/sql.data-source.d.ts +38 -0
- package/dist/data/orm/sql.data-source.js +379 -0
- package/dist/data/orm/sql.data-source.test.d.ts +1 -0
- package/dist/data/orm/sql.data-source.test.js +406 -0
- package/dist/data/orm/subscribable.data-source.d.ts +25 -0
- package/dist/data/orm/subscribable.data-source.js +72 -0
- package/dist/data/orm/table-container-events.test.d.ts +1 -0
- package/dist/data/orm/table-container-events.test.js +93 -0
- package/dist/data/orm/table-container.d.ts +39 -0
- package/dist/data/orm/table-container.js +96 -0
- package/dist/data/orm/table-definitions.system.d.ts +9 -0
- package/dist/data/orm/table-definitions.system.js +29 -0
- package/dist/data/orm/table-definitions.type.d.ts +19 -0
- package/dist/data/orm/table-definitions.type.js +2 -0
- package/dist/data/orm/table-dependencies.d.ts +32 -0
- package/dist/data/orm/table-dependencies.js +2 -0
- package/dist/data/orm/table.d.ts +42 -0
- package/dist/data/orm/table.event-source.test.d.ts +1 -0
- package/dist/data/orm/table.event-source.test.js +341 -0
- package/dist/data/orm/table.js +244 -0
- package/dist/data/orm/types.d.ts +20 -0
- package/dist/data/orm/types.js +115 -0
- package/dist/data/orm/types.test.d.ts +1 -0
- package/dist/data/orm/types.test.js +71 -0
- package/dist/data/package-permissions.d.ts +7 -0
- package/dist/data/package-permissions.js +18 -0
- package/dist/data/packages.d.ts +92 -0
- package/dist/data/packages.js +90 -0
- package/dist/data/peer-events/peer-event-handlers.d.ts +21 -0
- package/dist/data/peer-events/peer-event-handlers.js +28 -0
- package/dist/data/peer-events/peer-event-types.d.ts +119 -0
- package/dist/data/peer-events/peer-event-types.js +29 -0
- package/dist/data/peer-events/peer-events.d.ts +41 -0
- package/dist/data/peer-events/peer-events.js +102 -0
- package/dist/data/persistent-vars.d.ts +87 -0
- package/dist/data/persistent-vars.js +230 -0
- package/dist/data/tool-tests.d.ts +37 -0
- package/dist/data/tool-tests.js +27 -0
- package/dist/data/tools.d.ts +358 -0
- package/dist/data/tools.js +48 -0
- package/dist/data/user-permissions.d.ts +15 -0
- package/dist/data/user-permissions.js +39 -0
- package/dist/data/user-permissions.test.d.ts +1 -0
- package/dist/data/user-permissions.test.js +252 -0
- package/dist/data/users.d.ts +38 -0
- package/dist/data/users.js +73 -0
- package/dist/data/workflow-logs.d.ts +106 -0
- package/dist/data/workflow-logs.js +67 -0
- package/dist/data/workflow-runs.d.ts +103 -0
- package/dist/data/workflow-runs.js +313 -0
- package/dist/data/workflows.d.ts +16 -0
- package/dist/data/workflows.js +21 -0
- package/dist/device/connection.d.ts +41 -0
- package/dist/device/connection.js +249 -0
- package/dist/device/connection.test.d.ts +1 -0
- package/dist/device/connection.test.js +292 -0
- package/dist/device/device-election.d.ts +36 -0
- package/dist/device/device-election.js +137 -0
- package/dist/device/device.d.ts +22 -0
- package/dist/device/device.js +110 -0
- package/dist/device/device.test.d.ts +1 -0
- package/dist/device/device.test.js +203 -0
- package/dist/device/get-trust-level.d.ts +3 -0
- package/dist/device/get-trust-level.js +87 -0
- package/dist/device/socket.type.d.ts +20 -0
- package/dist/device/socket.type.js +15 -0
- package/dist/device/streamed-socket.d.ts +27 -0
- package/dist/device/streamed-socket.js +154 -0
- package/dist/device/streamed-socket.test.d.ts +1 -0
- package/dist/device/streamed-socket.test.js +44 -0
- package/dist/events.d.ts +35 -0
- package/dist/events.js +128 -0
- package/dist/index.d.ts +33 -0
- package/dist/index.js +50 -0
- package/dist/keys.d.ts +51 -0
- package/dist/keys.js +234 -0
- package/dist/keys.test.d.ts +1 -0
- package/dist/keys.test.js +215 -0
- package/dist/mentions.d.ts +9 -0
- package/dist/mentions.js +46 -0
- package/dist/observable.d.ts +19 -0
- package/dist/observable.js +112 -0
- package/dist/observable.test.d.ts +1 -0
- package/dist/observable.test.js +183 -0
- package/dist/package-loader/get-require.d.ts +10 -0
- package/dist/package-loader/get-require.js +31 -0
- package/dist/package-loader/index.d.ts +1 -0
- package/dist/package-loader/index.js +17 -0
- package/dist/package-loader/package-loader.d.ts +16 -0
- package/dist/package-loader/package-loader.js +102 -0
- package/dist/peers-ui/peers-ui.d.ts +15 -0
- package/dist/peers-ui/peers-ui.js +23 -0
- package/dist/peers-ui/peers-ui.types.d.ts +35 -0
- package/dist/peers-ui/peers-ui.types.js +3 -0
- package/dist/rpc-types.d.ts +45 -0
- package/dist/rpc-types.js +47 -0
- package/dist/serial-json.d.ts +5 -0
- package/dist/serial-json.js +186 -0
- package/dist/serial-json.test.d.ts +1 -0
- package/dist/serial-json.test.js +86 -0
- package/dist/system-ids.d.ts +6 -0
- package/dist/system-ids.js +10 -0
- package/dist/tools/index.d.ts +1 -0
- package/dist/tools/index.js +17 -0
- package/dist/tools/tools-factory.d.ts +5 -0
- package/dist/tools/tools-factory.js +34 -0
- package/dist/types/app-nav.d.ts +18 -0
- package/dist/types/app-nav.js +10 -0
- package/dist/types/assistant-runner-args.d.ts +9 -0
- package/dist/types/assistant-runner-args.js +2 -0
- package/dist/types/field-type.d.ts +37 -0
- package/dist/types/field-type.js +26 -0
- package/dist/types/peer-device.d.ts +40 -0
- package/dist/types/peer-device.js +14 -0
- package/dist/types/peers-package.d.ts +23 -0
- package/dist/types/peers-package.js +2 -0
- package/dist/types/workflow-logger.d.ts +2 -0
- package/dist/types/workflow-logger.js +2 -0
- package/dist/types/workflow-run-context.d.ts +12 -0
- package/dist/types/workflow-run-context.js +2 -0
- package/dist/types/workflow.d.ts +72 -0
- package/dist/types/workflow.js +24 -0
- package/dist/types/zod-types.d.ts +7 -0
- package/dist/types/zod-types.js +12 -0
- package/dist/users.query.d.ts +13 -0
- package/dist/users.query.js +134 -0
- package/dist/utils.d.ts +39 -0
- package/dist/utils.js +240 -0
- package/dist/utils.test.d.ts +1 -0
- package/dist/utils.test.js +140 -0
- package/package.json +50 -0
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
import { IFile } from './file.types';
|
|
2
|
+
import type { FilesTable } from './files';
|
|
3
|
+
export declare class FileReadStream {
|
|
4
|
+
private fileRecord;
|
|
5
|
+
private fileTable;
|
|
6
|
+
private preloadChunksCount;
|
|
7
|
+
private chunkHashes;
|
|
8
|
+
private currentChunkIndex;
|
|
9
|
+
private currentChunkBuffer;
|
|
10
|
+
private chunkPromises;
|
|
11
|
+
private positionInChunk;
|
|
12
|
+
private totalPosition;
|
|
13
|
+
private eof;
|
|
14
|
+
constructor(fileRecord: IFile, fileTable: FilesTable, preloadChunksCount?: number);
|
|
15
|
+
private loadChunkHashes;
|
|
16
|
+
private loadChunkByIndex;
|
|
17
|
+
private preloadChunks;
|
|
18
|
+
private loadCurrentChunk;
|
|
19
|
+
read(size?: number): Promise<Uint8Array | null>;
|
|
20
|
+
seek(position: number): Promise<void>;
|
|
21
|
+
getMetadata(): IFile;
|
|
22
|
+
getPosition(): number;
|
|
23
|
+
getFileSize(): number;
|
|
24
|
+
isEOF(): boolean;
|
|
25
|
+
getBytesRemaining(): number;
|
|
26
|
+
readAll(): Promise<Uint8Array | null>;
|
|
27
|
+
}
|
|
@@ -0,0 +1,195 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.FileReadStream = void 0;
|
|
4
|
+
const keys_1 = require("../../keys");
|
|
5
|
+
const file_types_1 = require("./file.types");
|
|
6
|
+
class FileReadStream {
|
|
7
|
+
fileRecord;
|
|
8
|
+
fileTable;
|
|
9
|
+
preloadChunksCount;
|
|
10
|
+
chunkHashes = null;
|
|
11
|
+
currentChunkIndex = 0;
|
|
12
|
+
currentChunkBuffer = null;
|
|
13
|
+
chunkPromises = new Map();
|
|
14
|
+
positionInChunk = 0;
|
|
15
|
+
totalPosition = 0;
|
|
16
|
+
eof = false;
|
|
17
|
+
constructor(fileRecord, fileTable, preloadChunksCount = 2) {
|
|
18
|
+
this.fileRecord = fileRecord;
|
|
19
|
+
this.fileTable = fileTable;
|
|
20
|
+
this.preloadChunksCount = preloadChunksCount;
|
|
21
|
+
}
|
|
22
|
+
async loadChunkHashes() {
|
|
23
|
+
if (this.chunkHashes !== null) {
|
|
24
|
+
return; // Already loaded
|
|
25
|
+
}
|
|
26
|
+
if (this.fileRecord.chunkHashes) {
|
|
27
|
+
// Small file: chunk hashes stored directly in database
|
|
28
|
+
this.chunkHashes = this.fileRecord.chunkHashes;
|
|
29
|
+
}
|
|
30
|
+
else if (this.fileRecord.indexFileId) {
|
|
31
|
+
// Large file: chunk hashes stored in separate index file (recursively)
|
|
32
|
+
this.chunkHashes = await this.fileTable.loadChunkHashesRecursively(this.fileRecord.indexFileId);
|
|
33
|
+
}
|
|
34
|
+
else {
|
|
35
|
+
throw new Error(`File ${this.fileRecord.fileId} has neither chunkHashes nor indexFileId`);
|
|
36
|
+
}
|
|
37
|
+
}
|
|
38
|
+
async loadChunkByIndex(chunkIndex) {
|
|
39
|
+
await this.loadChunkHashes();
|
|
40
|
+
if (chunkIndex >= this.chunkHashes.length) {
|
|
41
|
+
throw new Error(`Chunk index ${chunkIndex} out of bounds`);
|
|
42
|
+
}
|
|
43
|
+
const chunkHash = this.chunkHashes[chunkIndex];
|
|
44
|
+
const ops = await (0, file_types_1.getFileOps)();
|
|
45
|
+
const chunkPath = `${file_types_1.CHUNKS_DIR}/${chunkHash}`;
|
|
46
|
+
let chunk;
|
|
47
|
+
if (await ops.fileExists(chunkPath)) {
|
|
48
|
+
// Read from local storage
|
|
49
|
+
chunk = await ops.readFile(chunkPath);
|
|
50
|
+
}
|
|
51
|
+
else {
|
|
52
|
+
// Download from peer by hash
|
|
53
|
+
chunk = (await ops.downloadFileChunk(chunkHash));
|
|
54
|
+
if (!chunk) {
|
|
55
|
+
throw new Error(`Chunk ${chunkHash} not found in storage or peers`);
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
// Verify chunk integrity
|
|
59
|
+
const actualChunkHash = (0, keys_1.hashBytes)(chunk);
|
|
60
|
+
if (actualChunkHash !== chunkHash) {
|
|
61
|
+
throw new Error(`Chunk integrity check failed: expected ${chunkHash}, got ${actualChunkHash}`);
|
|
62
|
+
}
|
|
63
|
+
return chunk;
|
|
64
|
+
}
|
|
65
|
+
async preloadChunks() {
|
|
66
|
+
await this.loadChunkHashes();
|
|
67
|
+
// Start from current chunk and preload up to preloadChunksCount ahead
|
|
68
|
+
for (let offset = 0; offset < this.preloadChunksCount; offset++) {
|
|
69
|
+
const chunkIndex = this.currentChunkIndex + offset;
|
|
70
|
+
if (chunkIndex >= this.chunkHashes.length) {
|
|
71
|
+
break; // No more chunks to preload
|
|
72
|
+
}
|
|
73
|
+
if (!this.chunkPromises.has(chunkIndex)) {
|
|
74
|
+
// Start preload but don't await it - let it happen in background
|
|
75
|
+
// Add error handling to prevent unhandled promise rejections
|
|
76
|
+
const safeChunkPromise = this.loadChunkByIndex(chunkIndex).catch(error => {
|
|
77
|
+
// Re-throw so error is available when promise is consumed
|
|
78
|
+
throw error;
|
|
79
|
+
});
|
|
80
|
+
// Add no-op catch to prevent unhandled promise rejection warnings
|
|
81
|
+
safeChunkPromise.catch(() => { });
|
|
82
|
+
this.chunkPromises.set(chunkIndex, safeChunkPromise);
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
}
|
|
86
|
+
async loadCurrentChunk() {
|
|
87
|
+
await this.preloadChunks();
|
|
88
|
+
if (this.currentChunkIndex >= this.chunkHashes.length) {
|
|
89
|
+
this.eof = true;
|
|
90
|
+
this.currentChunkBuffer = null;
|
|
91
|
+
this.chunkPromises.clear();
|
|
92
|
+
return;
|
|
93
|
+
}
|
|
94
|
+
const chunkPromise = this.chunkPromises.get(this.currentChunkIndex);
|
|
95
|
+
this.currentChunkBuffer = await chunkPromise;
|
|
96
|
+
this.chunkPromises.delete(this.currentChunkIndex);
|
|
97
|
+
}
|
|
98
|
+
async read(size) {
|
|
99
|
+
if (this.eof) {
|
|
100
|
+
return null;
|
|
101
|
+
}
|
|
102
|
+
// Default to reading one full chunk if no size specified
|
|
103
|
+
if (size === undefined) {
|
|
104
|
+
size = file_types_1.FILE_CHUNK_SIZE;
|
|
105
|
+
}
|
|
106
|
+
const chunks = [];
|
|
107
|
+
let remainingBytes = size;
|
|
108
|
+
while (remainingBytes > 0 && !this.eof) {
|
|
109
|
+
// Load current chunk if needed
|
|
110
|
+
if (!this.currentChunkBuffer) {
|
|
111
|
+
await this.loadCurrentChunk();
|
|
112
|
+
if (this.eof) {
|
|
113
|
+
break;
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
// Read from current chunk
|
|
117
|
+
const availableInChunk = this.currentChunkBuffer.length - this.positionInChunk;
|
|
118
|
+
const bytesToRead = Math.min(remainingBytes, availableInChunk);
|
|
119
|
+
const chunkPortion = this.currentChunkBuffer.subarray(this.positionInChunk, this.positionInChunk + bytesToRead);
|
|
120
|
+
chunks.push(chunkPortion);
|
|
121
|
+
remainingBytes -= bytesToRead;
|
|
122
|
+
this.positionInChunk += bytesToRead;
|
|
123
|
+
this.totalPosition += bytesToRead;
|
|
124
|
+
// If we've consumed the entire chunk, move to next one
|
|
125
|
+
if (this.positionInChunk >= this.currentChunkBuffer.length) {
|
|
126
|
+
this.currentChunkIndex++;
|
|
127
|
+
this.currentChunkBuffer = null;
|
|
128
|
+
this.positionInChunk = 0;
|
|
129
|
+
}
|
|
130
|
+
}
|
|
131
|
+
await this.preloadChunks();
|
|
132
|
+
return chunks.length > 0 ? new Uint8Array(Buffer.concat(chunks)) : null;
|
|
133
|
+
}
|
|
134
|
+
async seek(position) {
|
|
135
|
+
if (position < 0) {
|
|
136
|
+
throw new Error('Seek position cannot be negative');
|
|
137
|
+
}
|
|
138
|
+
if (position > this.fileRecord.fileSize) {
|
|
139
|
+
throw new Error('Seek position beyond file size');
|
|
140
|
+
}
|
|
141
|
+
await this.loadChunkHashes();
|
|
142
|
+
// Calculate which chunk contains the target position
|
|
143
|
+
const targetChunkIndex = Math.floor(position / file_types_1.FILE_CHUNK_SIZE);
|
|
144
|
+
const positionInTargetChunk = position % file_types_1.FILE_CHUNK_SIZE;
|
|
145
|
+
this.currentChunkIndex = targetChunkIndex;
|
|
146
|
+
this.positionInChunk = positionInTargetChunk;
|
|
147
|
+
this.totalPosition = position;
|
|
148
|
+
this.currentChunkBuffer = null; // Will be loaded on next read
|
|
149
|
+
// Only set EOF if we're seeking beyond the last chunk OR at the very end of the file
|
|
150
|
+
this.eof = (targetChunkIndex >= this.chunkHashes.length) || (position >= this.fileRecord.fileSize);
|
|
151
|
+
this.chunkPromises.clear();
|
|
152
|
+
await this.preloadChunks();
|
|
153
|
+
}
|
|
154
|
+
getMetadata() {
|
|
155
|
+
return { ...this.fileRecord };
|
|
156
|
+
}
|
|
157
|
+
getPosition() {
|
|
158
|
+
return this.totalPosition;
|
|
159
|
+
}
|
|
160
|
+
getFileSize() {
|
|
161
|
+
return this.fileRecord.fileSize;
|
|
162
|
+
}
|
|
163
|
+
isEOF() {
|
|
164
|
+
return this.eof;
|
|
165
|
+
}
|
|
166
|
+
getBytesRemaining() {
|
|
167
|
+
return this.fileRecord.fileSize - this.totalPosition;
|
|
168
|
+
}
|
|
169
|
+
// Read the entire file (for compatibility with existing getFile API)
|
|
170
|
+
async readAll() {
|
|
171
|
+
await this.seek(0); // Start from beginning
|
|
172
|
+
const chunks = [];
|
|
173
|
+
let chunk;
|
|
174
|
+
try {
|
|
175
|
+
while ((chunk = await this.read()) !== null) {
|
|
176
|
+
chunks.push(chunk);
|
|
177
|
+
}
|
|
178
|
+
return new Uint8Array(Buffer.concat(chunks));
|
|
179
|
+
}
|
|
180
|
+
catch (error) {
|
|
181
|
+
// Only return null for chunk-specific errors (not found, download failed)
|
|
182
|
+
// Rethrow other errors like index file issues
|
|
183
|
+
if (error instanceof Error &&
|
|
184
|
+
(error.message.includes('Chunk') &&
|
|
185
|
+
(error.message.includes('not found') || error.message.includes('integrity check failed')))) {
|
|
186
|
+
return null;
|
|
187
|
+
}
|
|
188
|
+
throw error;
|
|
189
|
+
}
|
|
190
|
+
finally {
|
|
191
|
+
this.chunkPromises.clear();
|
|
192
|
+
}
|
|
193
|
+
}
|
|
194
|
+
}
|
|
195
|
+
exports.FileReadStream = FileReadStream;
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
import { IFile, IFileInput } from './file.types';
|
|
2
|
+
import type { FilesTable } from './files';
|
|
3
|
+
export declare class FileWriteStream {
|
|
4
|
+
private metadata;
|
|
5
|
+
private fileTable;
|
|
6
|
+
private buffer;
|
|
7
|
+
private chunkHashes;
|
|
8
|
+
private bytesWritten;
|
|
9
|
+
private finalized;
|
|
10
|
+
private aborted;
|
|
11
|
+
constructor(metadata: IFileInput, fileTable: FilesTable);
|
|
12
|
+
write(chunk: Uint8Array): Promise<void>;
|
|
13
|
+
private processCompleteChunks;
|
|
14
|
+
finalize(): Promise<IFile>;
|
|
15
|
+
abort(): Promise<void>;
|
|
16
|
+
getBytesWritten(): number;
|
|
17
|
+
getChunkCount(): number;
|
|
18
|
+
isFinalized(): boolean;
|
|
19
|
+
isAborted(): boolean;
|
|
20
|
+
}
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.FileWriteStream = void 0;
|
|
4
|
+
const keys_1 = require("../../keys");
|
|
5
|
+
const file_types_1 = require("./file.types");
|
|
6
|
+
class FileWriteStream {
|
|
7
|
+
metadata;
|
|
8
|
+
fileTable;
|
|
9
|
+
buffer = new Uint8Array(0);
|
|
10
|
+
chunkHashes = [];
|
|
11
|
+
bytesWritten = 0;
|
|
12
|
+
finalized = false;
|
|
13
|
+
aborted = false;
|
|
14
|
+
constructor(metadata, fileTable) {
|
|
15
|
+
this.metadata = metadata;
|
|
16
|
+
this.fileTable = fileTable;
|
|
17
|
+
}
|
|
18
|
+
async write(chunk) {
|
|
19
|
+
if (this.finalized) {
|
|
20
|
+
throw new Error('Cannot write to finalized stream');
|
|
21
|
+
}
|
|
22
|
+
if (this.aborted) {
|
|
23
|
+
throw new Error('Cannot write to aborted stream');
|
|
24
|
+
}
|
|
25
|
+
// Add chunk to buffer
|
|
26
|
+
this.buffer = new Uint8Array(Buffer.concat([this.buffer, chunk]));
|
|
27
|
+
this.bytesWritten += chunk.length;
|
|
28
|
+
// Process complete chunks
|
|
29
|
+
await this.processCompleteChunks();
|
|
30
|
+
}
|
|
31
|
+
async processCompleteChunks() {
|
|
32
|
+
const ops = await (0, file_types_1.getFileOps)();
|
|
33
|
+
while (this.buffer.length >= file_types_1.FILE_CHUNK_SIZE) {
|
|
34
|
+
// Extract one chunk
|
|
35
|
+
const chunkData = this.buffer.subarray(0, file_types_1.FILE_CHUNK_SIZE);
|
|
36
|
+
this.buffer = this.buffer.subarray(file_types_1.FILE_CHUNK_SIZE);
|
|
37
|
+
// Hash and store the chunk
|
|
38
|
+
const chunkHash = (0, keys_1.hashBytes)(chunkData);
|
|
39
|
+
this.chunkHashes.push(chunkHash);
|
|
40
|
+
const chunkPath = `${file_types_1.CHUNKS_DIR}/${chunkHash}`;
|
|
41
|
+
await ops.writeFile(chunkPath, chunkData);
|
|
42
|
+
}
|
|
43
|
+
}
|
|
44
|
+
async finalize() {
|
|
45
|
+
if (this.finalized) {
|
|
46
|
+
throw new Error('Stream already finalized');
|
|
47
|
+
}
|
|
48
|
+
if (this.aborted) {
|
|
49
|
+
throw new Error('Cannot finalize aborted stream');
|
|
50
|
+
}
|
|
51
|
+
// First, process any complete chunks in the buffer
|
|
52
|
+
await this.processCompleteChunks();
|
|
53
|
+
// Then process any remaining partial data as final chunk
|
|
54
|
+
if (this.buffer.length > 0) {
|
|
55
|
+
const ops = await (0, file_types_1.getFileOps)();
|
|
56
|
+
const finalChunkHash = (0, keys_1.hashBytes)(this.buffer);
|
|
57
|
+
this.chunkHashes.push(finalChunkHash);
|
|
58
|
+
const chunkPath = `${file_types_1.CHUNKS_DIR}/${finalChunkHash}`;
|
|
59
|
+
await ops.writeFile(chunkPath, this.buffer);
|
|
60
|
+
this.buffer = new Uint8Array(0);
|
|
61
|
+
}
|
|
62
|
+
// Create the file record using the same logic as FileTable.saveFile
|
|
63
|
+
const chunkHashesString = JSON.stringify(this.chunkHashes);
|
|
64
|
+
const fileHash = (0, keys_1.hashBytes)(Buffer.from(chunkHashesString, 'utf8'));
|
|
65
|
+
let fileRecord;
|
|
66
|
+
if (this.chunkHashes.length > file_types_1.CHUNK_INDEX_THRESHOLD) {
|
|
67
|
+
// Large file: use recursive index file
|
|
68
|
+
const indexFileId = await this.fileTable.createIndexFileRecursively(this.chunkHashes);
|
|
69
|
+
fileRecord = {
|
|
70
|
+
...this.metadata,
|
|
71
|
+
fileSize: this.bytesWritten,
|
|
72
|
+
fileHash,
|
|
73
|
+
indexFileId
|
|
74
|
+
};
|
|
75
|
+
}
|
|
76
|
+
else {
|
|
77
|
+
// Small file: store chunk hashes directly
|
|
78
|
+
fileRecord = {
|
|
79
|
+
...this.metadata,
|
|
80
|
+
fileSize: this.bytesWritten,
|
|
81
|
+
fileHash,
|
|
82
|
+
chunkHashes: this.chunkHashes
|
|
83
|
+
};
|
|
84
|
+
}
|
|
85
|
+
// Save to database
|
|
86
|
+
await this.fileTable.saveFileRecord(fileRecord);
|
|
87
|
+
this.finalized = true;
|
|
88
|
+
return fileRecord;
|
|
89
|
+
}
|
|
90
|
+
async abort() {
|
|
91
|
+
if (this.finalized) {
|
|
92
|
+
throw new Error('Cannot abort finalized stream');
|
|
93
|
+
}
|
|
94
|
+
this.aborted = true;
|
|
95
|
+
this.buffer = Buffer.alloc(0);
|
|
96
|
+
this.chunkHashes = [];
|
|
97
|
+
// Note: We don't delete chunks since they may be shared with other files
|
|
98
|
+
// Chunk cleanup should be handled by a separate garbage collection process
|
|
99
|
+
}
|
|
100
|
+
getBytesWritten() {
|
|
101
|
+
return this.bytesWritten;
|
|
102
|
+
}
|
|
103
|
+
getChunkCount() {
|
|
104
|
+
return this.chunkHashes.length;
|
|
105
|
+
}
|
|
106
|
+
isFinalized() {
|
|
107
|
+
return this.finalized;
|
|
108
|
+
}
|
|
109
|
+
isAborted() {
|
|
110
|
+
return this.aborted;
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
exports.FileWriteStream = FileWriteStream;
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
import { z } from "zod";
|
|
2
|
+
import { ITableMetaData } from "../orm/types";
|
|
3
|
+
export declare const fileSchema: z.ZodObject<{
|
|
4
|
+
fileId: z.ZodString;
|
|
5
|
+
name: z.ZodString;
|
|
6
|
+
fileSize: z.ZodNumber;
|
|
7
|
+
fileHash: z.ZodString;
|
|
8
|
+
mimeType: z.ZodOptional<z.ZodString>;
|
|
9
|
+
chunkHashes: z.ZodOptional<z.ZodArray<z.ZodString, "many">>;
|
|
10
|
+
isIndexFile: z.ZodOptional<z.ZodBoolean>;
|
|
11
|
+
indexFileId: z.ZodOptional<z.ZodString>;
|
|
12
|
+
}, "strip", z.ZodTypeAny, {
|
|
13
|
+
name: string;
|
|
14
|
+
fileId: string;
|
|
15
|
+
fileSize: number;
|
|
16
|
+
fileHash: string;
|
|
17
|
+
mimeType?: string | undefined;
|
|
18
|
+
chunkHashes?: string[] | undefined;
|
|
19
|
+
isIndexFile?: boolean | undefined;
|
|
20
|
+
indexFileId?: string | undefined;
|
|
21
|
+
}, {
|
|
22
|
+
name: string;
|
|
23
|
+
fileId: string;
|
|
24
|
+
fileSize: number;
|
|
25
|
+
fileHash: string;
|
|
26
|
+
mimeType?: string | undefined;
|
|
27
|
+
chunkHashes?: string[] | undefined;
|
|
28
|
+
isIndexFile?: boolean | undefined;
|
|
29
|
+
indexFileId?: string | undefined;
|
|
30
|
+
}>;
|
|
31
|
+
export type IFile = z.infer<typeof fileSchema>;
|
|
32
|
+
export type IFileInput = Pick<IFile, 'fileId' | 'name' | 'fileSize' | 'mimeType' | 'isIndexFile'>;
|
|
33
|
+
export declare const filesMetaData: ITableMetaData;
|
|
34
|
+
export declare const FILE_CHUNK_SIZE: number;
|
|
35
|
+
export declare const CHUNKS_DIR = "file_chunks";
|
|
36
|
+
export declare let CHUNK_INDEX_THRESHOLD: number;
|
|
37
|
+
export declare function setChunkIndexThreshold(threshold: number): void;
|
|
38
|
+
export interface FileOps {
|
|
39
|
+
downloadFileChunk(chunkHash: string): Promise<Uint8Array | null>;
|
|
40
|
+
fileExists(path: string): Promise<boolean>;
|
|
41
|
+
readFile(path: string): Promise<Uint8Array>;
|
|
42
|
+
writeFile(path: string, data: Uint8Array): Promise<void>;
|
|
43
|
+
deletePath(path: string): Promise<void>;
|
|
44
|
+
}
|
|
45
|
+
export declare function setFileOps(ops: FileOps): void;
|
|
46
|
+
export declare function getFileOps(): Promise<FileOps>;
|
|
47
|
+
export declare function resetFileOps(): void;
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.CHUNK_INDEX_THRESHOLD = exports.CHUNKS_DIR = exports.FILE_CHUNK_SIZE = exports.filesMetaData = exports.fileSchema = void 0;
|
|
4
|
+
exports.setChunkIndexThreshold = setChunkIndexThreshold;
|
|
5
|
+
exports.setFileOps = setFileOps;
|
|
6
|
+
exports.getFileOps = getFileOps;
|
|
7
|
+
exports.resetFileOps = resetFileOps;
|
|
8
|
+
const zod_1 = require("zod");
|
|
9
|
+
const types_1 = require("../orm/types");
|
|
10
|
+
exports.fileSchema = zod_1.z.object({
|
|
11
|
+
fileId: zod_1.z.string(),
|
|
12
|
+
name: zod_1.z.string().describe('The name of the file'),
|
|
13
|
+
fileSize: zod_1.z.number().describe('The size of the file in bytes'),
|
|
14
|
+
fileHash: zod_1.z.string().describe('Hash of the chunk hashes array for integrity verification'),
|
|
15
|
+
mimeType: zod_1.z.string().optional().describe('The MIME type of the file'),
|
|
16
|
+
chunkHashes: zod_1.z.array(zod_1.z.string()).optional().describe('SHA-256 hashes of each chunk in order (for small files)'),
|
|
17
|
+
isIndexFile: zod_1.z.boolean().optional().describe('True if this is an index file for another large file'),
|
|
18
|
+
indexFileId: zod_1.z.string().optional().describe('FileId of index file (for large files)'),
|
|
19
|
+
});
|
|
20
|
+
exports.filesMetaData = {
|
|
21
|
+
name: 'Files',
|
|
22
|
+
description: 'Files stored in the chunked file system for peer sharing',
|
|
23
|
+
primaryKeyName: 'fileId',
|
|
24
|
+
fields: (0, types_1.schemaToFields)(exports.fileSchema),
|
|
25
|
+
};
|
|
26
|
+
// File storage configuration
|
|
27
|
+
exports.FILE_CHUNK_SIZE = 1024 * 1024; // 1MB chunks
|
|
28
|
+
exports.CHUNKS_DIR = 'file_chunks';
|
|
29
|
+
exports.CHUNK_INDEX_THRESHOLD = 1000; // Use chunk index file for files with >1000 chunks (~1GB)
|
|
30
|
+
// For testing - allow modifying the threshold
|
|
31
|
+
function setChunkIndexThreshold(threshold) {
|
|
32
|
+
exports.CHUNK_INDEX_THRESHOLD = threshold;
|
|
33
|
+
}
|
|
34
|
+
let fileOps = null;
|
|
35
|
+
let fileOpsReady = null;
|
|
36
|
+
function setFileOps(ops) {
|
|
37
|
+
fileOps = ops;
|
|
38
|
+
if (fileOpsReady) {
|
|
39
|
+
fileOpsReady(ops);
|
|
40
|
+
fileOpsReady = null; // Reset the promise if it was waiting
|
|
41
|
+
}
|
|
42
|
+
}
|
|
43
|
+
function getFileOps() {
|
|
44
|
+
if (fileOps) {
|
|
45
|
+
return Promise.resolve(fileOps);
|
|
46
|
+
}
|
|
47
|
+
return new Promise((resolve) => {
|
|
48
|
+
fileOpsReady = resolve;
|
|
49
|
+
});
|
|
50
|
+
}
|
|
51
|
+
// For testing - reset the global state
|
|
52
|
+
function resetFileOps() {
|
|
53
|
+
fileOps = null;
|
|
54
|
+
fileOpsReady = null;
|
|
55
|
+
}
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
import { Table } from "../orm";
|
|
2
|
+
import { FileReadStream } from "./file-read-stream";
|
|
3
|
+
import { FileWriteStream } from "./file-write-stream";
|
|
4
|
+
import { IFile, IFileInput } from "./file.types";
|
|
5
|
+
import type { DataContext } from "../../context/data-context";
|
|
6
|
+
export declare class FilesTable extends Table<IFile> {
|
|
7
|
+
/** @deprecated Direct inserts forbidden; use safeFile() or saveFileRecord() */
|
|
8
|
+
insert(..._args: Parameters<Table<any>['insert']>): never;
|
|
9
|
+
/** @deprecated Direct updates forbidden; use safeFile() or saveFileRecord() */
|
|
10
|
+
update(..._args: Parameters<Table<any>['update']>): never;
|
|
11
|
+
/** @deprecated Direct deletes forbidden; use deleteFile() */
|
|
12
|
+
delete(..._args: Parameters<Table<any>['delete']>): never;
|
|
13
|
+
createWriteStream(metadata: IFileInput): Promise<FileWriteStream>;
|
|
14
|
+
/**
|
|
15
|
+
* Note: Use `saveFile` instead for direct use. This method is intended for internal use
|
|
16
|
+
* to insert a file record into the database as part of streaming operations.
|
|
17
|
+
* @param fileRecord The file record to insert
|
|
18
|
+
* @returns The inserted file record
|
|
19
|
+
*/
|
|
20
|
+
saveFileRecord(fileRecord: IFile): Promise<IFile>;
|
|
21
|
+
openReadStream(fileId: string, preloadChunksCount?: number): Promise<FileReadStream | null>;
|
|
22
|
+
saveFile(metaData: IFileInput, data: Uint8Array | string): Promise<IFile>;
|
|
23
|
+
getFile(fileId: string): Promise<Uint8Array | null>;
|
|
24
|
+
deleteFile(fileId: string): Promise<void>;
|
|
25
|
+
createIndexFileRecursively(chunkHashes: string[]): Promise<string>;
|
|
26
|
+
loadChunkHashesRecursively(indexFileId: string): Promise<string[]>;
|
|
27
|
+
}
|
|
28
|
+
export declare function Files(dataContext?: DataContext): FilesTable;
|
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.FilesTable = void 0;
|
|
4
|
+
exports.Files = Files;
|
|
5
|
+
const rpc_types_1 = require("../../rpc-types");
|
|
6
|
+
const utils_1 = require("../../utils");
|
|
7
|
+
const orm_1 = require("../orm");
|
|
8
|
+
const user_context_singleton_1 = require("../../context/user-context-singleton");
|
|
9
|
+
const table_definitions_system_1 = require("../orm/table-definitions.system");
|
|
10
|
+
const file_read_stream_1 = require("./file-read-stream");
|
|
11
|
+
const file_write_stream_1 = require("./file-write-stream");
|
|
12
|
+
const file_types_1 = require("./file.types");
|
|
13
|
+
class FilesTable extends orm_1.Table {
|
|
14
|
+
/** @deprecated Direct inserts forbidden; use safeFile() or saveFileRecord() */
|
|
15
|
+
insert(..._args) {
|
|
16
|
+
throw new Error('Direct inserts forbidden; use safeFile() or saveFileRecord()');
|
|
17
|
+
}
|
|
18
|
+
/** @deprecated Direct updates forbidden; use safeFile() or saveFileRecord() */
|
|
19
|
+
update(..._args) {
|
|
20
|
+
throw new Error('Direct updates forbidden; use safeFile() or saveFileRecord()');
|
|
21
|
+
}
|
|
22
|
+
/** @deprecated Direct deletes forbidden; use deleteFile() */
|
|
23
|
+
delete(..._args) {
|
|
24
|
+
throw new Error('Direct deletes forbidden; use deleteFile()');
|
|
25
|
+
}
|
|
26
|
+
async createWriteStream(metadata) {
|
|
27
|
+
return new file_write_stream_1.FileWriteStream(metadata, this);
|
|
28
|
+
}
|
|
29
|
+
/**
|
|
30
|
+
* Note: Use `saveFile` instead for direct use. This method is intended for internal use
|
|
31
|
+
* to insert a file record into the database as part of streaming operations.
|
|
32
|
+
* @param fileRecord The file record to insert
|
|
33
|
+
* @returns The inserted file record
|
|
34
|
+
*/
|
|
35
|
+
async saveFileRecord(fileRecord) {
|
|
36
|
+
return await super.save(fileRecord);
|
|
37
|
+
}
|
|
38
|
+
async openReadStream(fileId, preloadChunksCount) {
|
|
39
|
+
const fileRecord = await this.get(fileId);
|
|
40
|
+
if (!fileRecord) {
|
|
41
|
+
return null;
|
|
42
|
+
}
|
|
43
|
+
return new file_read_stream_1.FileReadStream(fileRecord, this, preloadChunksCount);
|
|
44
|
+
}
|
|
45
|
+
async saveFile(metaData, data) {
|
|
46
|
+
// Use FileWriteStream internally to ensure consistent chunking logic
|
|
47
|
+
const writeStream = await this.createWriteStream({
|
|
48
|
+
...metaData,
|
|
49
|
+
fileSize: data.length // Set the actual file size
|
|
50
|
+
});
|
|
51
|
+
// Write the entire data to the stream
|
|
52
|
+
if (typeof data === 'string') {
|
|
53
|
+
data = new Uint8Array(Buffer.from(data, 'utf8'));
|
|
54
|
+
}
|
|
55
|
+
await writeStream.write(data);
|
|
56
|
+
// Finalize and return the result
|
|
57
|
+
return await writeStream.finalize();
|
|
58
|
+
}
|
|
59
|
+
async getFile(fileId) {
|
|
60
|
+
// Use FileReadStream internally to ensure consistent chunk reading logic
|
|
61
|
+
const readStream = await this.openReadStream(fileId);
|
|
62
|
+
if (!readStream) {
|
|
63
|
+
return null;
|
|
64
|
+
}
|
|
65
|
+
// Read the entire file using the streaming implementation
|
|
66
|
+
return await readStream.readAll();
|
|
67
|
+
}
|
|
68
|
+
async deleteFile(fileId) {
|
|
69
|
+
// Get file metadata from database
|
|
70
|
+
const fileRecord = await super.get(fileId);
|
|
71
|
+
if (!fileRecord) {
|
|
72
|
+
return; // File doesn't exist
|
|
73
|
+
}
|
|
74
|
+
// If this file has an index file, recursively delete it first
|
|
75
|
+
if (fileRecord.indexFileId) {
|
|
76
|
+
await this.deleteFile(fileRecord.indexFileId);
|
|
77
|
+
}
|
|
78
|
+
// Note: We don't delete chunks since they may be shared with other files
|
|
79
|
+
// Chunk cleanup could be implemented as a separate garbage collection process
|
|
80
|
+
// that removes chunks not referenced by any files
|
|
81
|
+
// Delete from database
|
|
82
|
+
await super.delete(fileId);
|
|
83
|
+
}
|
|
84
|
+
// Create an index file recursively for large files
|
|
85
|
+
async createIndexFileRecursively(chunkHashes) {
|
|
86
|
+
// Create JSON content with chunk hashes
|
|
87
|
+
const indexContent = JSON.stringify(chunkHashes);
|
|
88
|
+
const indexBuffer = Buffer.from(indexContent, 'utf8');
|
|
89
|
+
// Generate a new file ID for the index file
|
|
90
|
+
const indexFileId = (0, utils_1.newid)();
|
|
91
|
+
// Create metadata for the index file
|
|
92
|
+
const indexMetadata = {
|
|
93
|
+
fileId: indexFileId,
|
|
94
|
+
name: `index-${indexFileId}.json`,
|
|
95
|
+
fileSize: indexBuffer.length,
|
|
96
|
+
mimeType: 'application/json',
|
|
97
|
+
isIndexFile: true
|
|
98
|
+
};
|
|
99
|
+
// Recursively save the index file (this will chunk it if it's too large)
|
|
100
|
+
await this.saveFile(indexMetadata, indexBuffer);
|
|
101
|
+
return indexFileId;
|
|
102
|
+
}
|
|
103
|
+
// Load chunk hashes recursively from an index file
|
|
104
|
+
async loadChunkHashesRecursively(indexFileId) {
|
|
105
|
+
// Recursively load the index file
|
|
106
|
+
const indexBuffer = await this.getFile(indexFileId);
|
|
107
|
+
if (!indexBuffer) {
|
|
108
|
+
throw new Error(`Index file not found: ${indexFileId}`);
|
|
109
|
+
}
|
|
110
|
+
// Parse chunk hashes from JSON
|
|
111
|
+
const indexContent = Buffer.from(indexBuffer).toString('utf8');
|
|
112
|
+
return JSON.parse(indexContent);
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
exports.FilesTable = FilesTable;
|
|
116
|
+
(0, table_definitions_system_1.registerSystemTableDefinition)(file_types_1.filesMetaData, file_types_1.fileSchema, FilesTable);
|
|
117
|
+
function Files(dataContext) {
|
|
118
|
+
return (0, user_context_singleton_1.getTableContainer)(dataContext).getTable(file_types_1.filesMetaData, file_types_1.fileSchema);
|
|
119
|
+
}
|
|
120
|
+
// TODO implement permissions check for file access
|
|
121
|
+
rpc_types_1.rpcServerCalls.getFileContents = async (fileId, encoding = 'utf8') => {
|
|
122
|
+
const data = await Files().getFile(fileId);
|
|
123
|
+
if (data === null) {
|
|
124
|
+
throw new Error(`File not found: ${fileId}`);
|
|
125
|
+
}
|
|
126
|
+
return Buffer.from(data).toString(encoding);
|
|
127
|
+
};
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export {};
|