@peers-app/peers-sdk 0.1.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -0
- package/dist/context/data-context.d.ts +31 -0
- package/dist/context/data-context.js +56 -0
- package/dist/context/index.d.ts +3 -0
- package/dist/context/index.js +19 -0
- package/dist/context/user-context-singleton.d.ts +11 -0
- package/dist/context/user-context-singleton.js +121 -0
- package/dist/context/user-context.d.ts +55 -0
- package/dist/context/user-context.js +205 -0
- package/dist/data/assistants.d.ts +68 -0
- package/dist/data/assistants.js +64 -0
- package/dist/data/change-tracking.d.ts +219 -0
- package/dist/data/change-tracking.js +119 -0
- package/dist/data/channels.d.ts +29 -0
- package/dist/data/channels.js +25 -0
- package/dist/data/data-locks.d.ts +37 -0
- package/dist/data/data-locks.js +180 -0
- package/dist/data/data-locks.test.d.ts +1 -0
- package/dist/data/data-locks.test.js +456 -0
- package/dist/data/device-sync-info.d.ts +19 -0
- package/dist/data/device-sync-info.js +24 -0
- package/dist/data/devices.d.ts +51 -0
- package/dist/data/devices.js +36 -0
- package/dist/data/embeddings.d.ts +47 -0
- package/dist/data/embeddings.js +36 -0
- package/dist/data/files/file-read-stream.d.ts +27 -0
- package/dist/data/files/file-read-stream.js +195 -0
- package/dist/data/files/file-write-stream.d.ts +20 -0
- package/dist/data/files/file-write-stream.js +113 -0
- package/dist/data/files/file.types.d.ts +47 -0
- package/dist/data/files/file.types.js +55 -0
- package/dist/data/files/files.d.ts +28 -0
- package/dist/data/files/files.js +127 -0
- package/dist/data/files/files.test.d.ts +1 -0
- package/dist/data/files/files.test.js +728 -0
- package/dist/data/files/index.d.ts +4 -0
- package/dist/data/files/index.js +23 -0
- package/dist/data/group-member-roles.d.ts +9 -0
- package/dist/data/group-member-roles.js +25 -0
- package/dist/data/group-members.d.ts +39 -0
- package/dist/data/group-members.js +68 -0
- package/dist/data/group-members.test.d.ts +1 -0
- package/dist/data/group-members.test.js +287 -0
- package/dist/data/group-permissions.d.ts +8 -0
- package/dist/data/group-permissions.js +73 -0
- package/dist/data/group-share.d.ts +50 -0
- package/dist/data/group-share.js +196 -0
- package/dist/data/groups.d.ts +50 -0
- package/dist/data/groups.js +73 -0
- package/dist/data/groups.test.d.ts +1 -0
- package/dist/data/groups.test.js +153 -0
- package/dist/data/index.d.ts +31 -0
- package/dist/data/index.js +47 -0
- package/dist/data/knowledge/knowledge-frames.d.ts +34 -0
- package/dist/data/knowledge/knowledge-frames.js +34 -0
- package/dist/data/knowledge/knowledge-links.d.ts +30 -0
- package/dist/data/knowledge/knowledge-links.js +25 -0
- package/dist/data/knowledge/knowledge-values.d.ts +35 -0
- package/dist/data/knowledge/knowledge-values.js +35 -0
- package/dist/data/knowledge/peer-types.d.ts +112 -0
- package/dist/data/knowledge/peer-types.js +27 -0
- package/dist/data/knowledge/predicates.d.ts +34 -0
- package/dist/data/knowledge/predicates.js +27 -0
- package/dist/data/messages.d.ts +57 -0
- package/dist/data/messages.js +97 -0
- package/dist/data/orm/client-proxy.data-source.d.ts +27 -0
- package/dist/data/orm/client-proxy.data-source.js +65 -0
- package/dist/data/orm/cursor.d.ts +25 -0
- package/dist/data/orm/cursor.js +47 -0
- package/dist/data/orm/cursor.test.d.ts +1 -0
- package/dist/data/orm/cursor.test.js +315 -0
- package/dist/data/orm/data-query.d.ts +96 -0
- package/dist/data/orm/data-query.js +208 -0
- package/dist/data/orm/data-query.mongo.d.ts +17 -0
- package/dist/data/orm/data-query.mongo.js +267 -0
- package/dist/data/orm/data-query.mongo.test.d.ts +1 -0
- package/dist/data/orm/data-query.mongo.test.js +398 -0
- package/dist/data/orm/data-query.sqlite.d.ts +14 -0
- package/dist/data/orm/data-query.sqlite.js +297 -0
- package/dist/data/orm/data-query.sqlite.test.d.ts +1 -0
- package/dist/data/orm/data-query.sqlite.test.js +377 -0
- package/dist/data/orm/data-query.test.d.ts +1 -0
- package/dist/data/orm/data-query.test.js +553 -0
- package/dist/data/orm/decorators.d.ts +6 -0
- package/dist/data/orm/decorators.js +21 -0
- package/dist/data/orm/dependency-injection.test.d.ts +1 -0
- package/dist/data/orm/dependency-injection.test.js +171 -0
- package/dist/data/orm/doc.d.ts +26 -0
- package/dist/data/orm/doc.js +124 -0
- package/dist/data/orm/event-registry.d.ts +24 -0
- package/dist/data/orm/event-registry.js +40 -0
- package/dist/data/orm/event-registry.test.d.ts +1 -0
- package/dist/data/orm/event-registry.test.js +44 -0
- package/dist/data/orm/factory.d.ts +8 -0
- package/dist/data/orm/factory.js +147 -0
- package/dist/data/orm/index.d.ts +16 -0
- package/dist/data/orm/index.js +32 -0
- package/dist/data/orm/multi-cursors.d.ts +11 -0
- package/dist/data/orm/multi-cursors.js +146 -0
- package/dist/data/orm/multi-cursors.test.d.ts +1 -0
- package/dist/data/orm/multi-cursors.test.js +455 -0
- package/dist/data/orm/sql-db.d.ts +6 -0
- package/dist/data/orm/sql-db.js +2 -0
- package/dist/data/orm/sql.data-source.d.ts +38 -0
- package/dist/data/orm/sql.data-source.js +379 -0
- package/dist/data/orm/sql.data-source.test.d.ts +1 -0
- package/dist/data/orm/sql.data-source.test.js +406 -0
- package/dist/data/orm/subscribable.data-source.d.ts +25 -0
- package/dist/data/orm/subscribable.data-source.js +72 -0
- package/dist/data/orm/table-container-events.test.d.ts +1 -0
- package/dist/data/orm/table-container-events.test.js +93 -0
- package/dist/data/orm/table-container.d.ts +39 -0
- package/dist/data/orm/table-container.js +96 -0
- package/dist/data/orm/table-definitions.system.d.ts +9 -0
- package/dist/data/orm/table-definitions.system.js +29 -0
- package/dist/data/orm/table-definitions.type.d.ts +19 -0
- package/dist/data/orm/table-definitions.type.js +2 -0
- package/dist/data/orm/table-dependencies.d.ts +32 -0
- package/dist/data/orm/table-dependencies.js +2 -0
- package/dist/data/orm/table.d.ts +42 -0
- package/dist/data/orm/table.event-source.test.d.ts +1 -0
- package/dist/data/orm/table.event-source.test.js +341 -0
- package/dist/data/orm/table.js +244 -0
- package/dist/data/orm/types.d.ts +20 -0
- package/dist/data/orm/types.js +115 -0
- package/dist/data/orm/types.test.d.ts +1 -0
- package/dist/data/orm/types.test.js +71 -0
- package/dist/data/package-permissions.d.ts +7 -0
- package/dist/data/package-permissions.js +18 -0
- package/dist/data/packages.d.ts +92 -0
- package/dist/data/packages.js +90 -0
- package/dist/data/peer-events/peer-event-handlers.d.ts +21 -0
- package/dist/data/peer-events/peer-event-handlers.js +28 -0
- package/dist/data/peer-events/peer-event-types.d.ts +119 -0
- package/dist/data/peer-events/peer-event-types.js +29 -0
- package/dist/data/peer-events/peer-events.d.ts +41 -0
- package/dist/data/peer-events/peer-events.js +102 -0
- package/dist/data/persistent-vars.d.ts +87 -0
- package/dist/data/persistent-vars.js +230 -0
- package/dist/data/tool-tests.d.ts +37 -0
- package/dist/data/tool-tests.js +27 -0
- package/dist/data/tools.d.ts +358 -0
- package/dist/data/tools.js +48 -0
- package/dist/data/user-permissions.d.ts +15 -0
- package/dist/data/user-permissions.js +39 -0
- package/dist/data/user-permissions.test.d.ts +1 -0
- package/dist/data/user-permissions.test.js +252 -0
- package/dist/data/users.d.ts +38 -0
- package/dist/data/users.js +73 -0
- package/dist/data/workflow-logs.d.ts +106 -0
- package/dist/data/workflow-logs.js +67 -0
- package/dist/data/workflow-runs.d.ts +103 -0
- package/dist/data/workflow-runs.js +313 -0
- package/dist/data/workflows.d.ts +16 -0
- package/dist/data/workflows.js +21 -0
- package/dist/device/connection.d.ts +41 -0
- package/dist/device/connection.js +249 -0
- package/dist/device/connection.test.d.ts +1 -0
- package/dist/device/connection.test.js +292 -0
- package/dist/device/device-election.d.ts +36 -0
- package/dist/device/device-election.js +137 -0
- package/dist/device/device.d.ts +22 -0
- package/dist/device/device.js +110 -0
- package/dist/device/device.test.d.ts +1 -0
- package/dist/device/device.test.js +203 -0
- package/dist/device/get-trust-level.d.ts +3 -0
- package/dist/device/get-trust-level.js +87 -0
- package/dist/device/socket.type.d.ts +20 -0
- package/dist/device/socket.type.js +15 -0
- package/dist/device/streamed-socket.d.ts +27 -0
- package/dist/device/streamed-socket.js +154 -0
- package/dist/device/streamed-socket.test.d.ts +1 -0
- package/dist/device/streamed-socket.test.js +44 -0
- package/dist/events.d.ts +35 -0
- package/dist/events.js +128 -0
- package/dist/index.d.ts +33 -0
- package/dist/index.js +50 -0
- package/dist/keys.d.ts +51 -0
- package/dist/keys.js +234 -0
- package/dist/keys.test.d.ts +1 -0
- package/dist/keys.test.js +215 -0
- package/dist/mentions.d.ts +9 -0
- package/dist/mentions.js +46 -0
- package/dist/observable.d.ts +19 -0
- package/dist/observable.js +112 -0
- package/dist/observable.test.d.ts +1 -0
- package/dist/observable.test.js +183 -0
- package/dist/package-loader/get-require.d.ts +10 -0
- package/dist/package-loader/get-require.js +31 -0
- package/dist/package-loader/index.d.ts +1 -0
- package/dist/package-loader/index.js +17 -0
- package/dist/package-loader/package-loader.d.ts +16 -0
- package/dist/package-loader/package-loader.js +102 -0
- package/dist/peers-ui/peers-ui.d.ts +15 -0
- package/dist/peers-ui/peers-ui.js +23 -0
- package/dist/peers-ui/peers-ui.types.d.ts +35 -0
- package/dist/peers-ui/peers-ui.types.js +3 -0
- package/dist/rpc-types.d.ts +45 -0
- package/dist/rpc-types.js +47 -0
- package/dist/serial-json.d.ts +5 -0
- package/dist/serial-json.js +186 -0
- package/dist/serial-json.test.d.ts +1 -0
- package/dist/serial-json.test.js +86 -0
- package/dist/system-ids.d.ts +6 -0
- package/dist/system-ids.js +10 -0
- package/dist/tools/index.d.ts +1 -0
- package/dist/tools/index.js +17 -0
- package/dist/tools/tools-factory.d.ts +5 -0
- package/dist/tools/tools-factory.js +34 -0
- package/dist/types/app-nav.d.ts +18 -0
- package/dist/types/app-nav.js +10 -0
- package/dist/types/assistant-runner-args.d.ts +9 -0
- package/dist/types/assistant-runner-args.js +2 -0
- package/dist/types/field-type.d.ts +37 -0
- package/dist/types/field-type.js +26 -0
- package/dist/types/peer-device.d.ts +40 -0
- package/dist/types/peer-device.js +14 -0
- package/dist/types/peers-package.d.ts +23 -0
- package/dist/types/peers-package.js +2 -0
- package/dist/types/workflow-logger.d.ts +2 -0
- package/dist/types/workflow-logger.js +2 -0
- package/dist/types/workflow-run-context.d.ts +12 -0
- package/dist/types/workflow-run-context.js +2 -0
- package/dist/types/workflow.d.ts +72 -0
- package/dist/types/workflow.js +24 -0
- package/dist/types/zod-types.d.ts +7 -0
- package/dist/types/zod-types.js +12 -0
- package/dist/users.query.d.ts +13 -0
- package/dist/users.query.js +134 -0
- package/dist/utils.d.ts +39 -0
- package/dist/utils.js +240 -0
- package/dist/utils.test.d.ts +1 -0
- package/dist/utils.test.js +140 -0
- package/package.json +50 -0
|
@@ -0,0 +1,728 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
const field_type_1 = require("../../types/field-type");
|
|
4
|
+
const utils_1 = require("../../utils");
|
|
5
|
+
const file_types_1 = require("./file.types");
|
|
6
|
+
const files_1 = require("./files");
|
|
7
|
+
// Mock data source for testing
|
|
8
|
+
class MockDataSource {
|
|
9
|
+
tableName;
|
|
10
|
+
primaryKeyName;
|
|
11
|
+
data = new Map();
|
|
12
|
+
constructor(tableName, primaryKeyName = 'fileId', initialData = []) {
|
|
13
|
+
this.tableName = tableName;
|
|
14
|
+
this.primaryKeyName = primaryKeyName;
|
|
15
|
+
initialData.forEach(item => {
|
|
16
|
+
this.data.set(item[primaryKeyName], item);
|
|
17
|
+
});
|
|
18
|
+
}
|
|
19
|
+
async get(id) {
|
|
20
|
+
return this.data.get(id);
|
|
21
|
+
}
|
|
22
|
+
async list() {
|
|
23
|
+
return Array.from(this.data.values());
|
|
24
|
+
}
|
|
25
|
+
async save(record) {
|
|
26
|
+
this.data.set(record[this.primaryKeyName], record);
|
|
27
|
+
return record;
|
|
28
|
+
}
|
|
29
|
+
async insert(record) {
|
|
30
|
+
this.data.set(record[this.primaryKeyName], record);
|
|
31
|
+
return record;
|
|
32
|
+
}
|
|
33
|
+
async update(record) {
|
|
34
|
+
this.data.set(record[this.primaryKeyName], record);
|
|
35
|
+
return record;
|
|
36
|
+
}
|
|
37
|
+
async delete(idOrRecord) {
|
|
38
|
+
const id = typeof idOrRecord === 'string' ? idOrRecord : idOrRecord[this.primaryKeyName];
|
|
39
|
+
this.data.delete(id);
|
|
40
|
+
}
|
|
41
|
+
async count() {
|
|
42
|
+
return this.data.size;
|
|
43
|
+
}
|
|
44
|
+
}
|
|
45
|
+
// Mock FileOps for testing
|
|
46
|
+
class MockFileOps {
|
|
47
|
+
files = new Map();
|
|
48
|
+
async downloadFileChunk(chunkHash) {
|
|
49
|
+
// For testing, just return the chunk if it exists locally
|
|
50
|
+
const chunkPath = `file_chunks/${chunkHash}`;
|
|
51
|
+
const data = this.files.get(chunkPath);
|
|
52
|
+
if (!data) {
|
|
53
|
+
throw new Error(`Chunk not found: ${chunkPath}`);
|
|
54
|
+
}
|
|
55
|
+
return data;
|
|
56
|
+
}
|
|
57
|
+
async fileExists(path) {
|
|
58
|
+
return this.files.has(path);
|
|
59
|
+
}
|
|
60
|
+
async readFile(path) {
|
|
61
|
+
const data = this.files.get(path);
|
|
62
|
+
if (!data) {
|
|
63
|
+
throw new Error(`File not found: ${path}`);
|
|
64
|
+
}
|
|
65
|
+
return data;
|
|
66
|
+
}
|
|
67
|
+
async writeFile(path, data) {
|
|
68
|
+
this.files.set(path, data);
|
|
69
|
+
}
|
|
70
|
+
async deletePath(path) {
|
|
71
|
+
this.files.delete(path);
|
|
72
|
+
}
|
|
73
|
+
// Helper method for testing
|
|
74
|
+
getStoredFiles() {
|
|
75
|
+
return Array.from(this.files.keys());
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
describe('FileTable', () => {
|
|
79
|
+
let fileTable;
|
|
80
|
+
let mockFileOps;
|
|
81
|
+
beforeEach(() => {
|
|
82
|
+
// Create mock data source
|
|
83
|
+
const mockDataSource = new MockDataSource('Files', 'fileId');
|
|
84
|
+
// Use direct instantiation since factory requires setup
|
|
85
|
+
const metaData = {
|
|
86
|
+
name: 'Files',
|
|
87
|
+
description: 'Files stored in the chunked file system for peer sharing',
|
|
88
|
+
primaryKeyName: 'fileId',
|
|
89
|
+
fields: [
|
|
90
|
+
{ name: 'fileId', type: field_type_1.FieldType.string },
|
|
91
|
+
{ name: 'name', type: field_type_1.FieldType.string },
|
|
92
|
+
{ name: 'fileSize', type: field_type_1.FieldType.number },
|
|
93
|
+
{ name: 'fileHash', type: field_type_1.FieldType.string },
|
|
94
|
+
{ name: 'mimeType', type: field_type_1.FieldType.string, optional: true },
|
|
95
|
+
{ name: 'chunkHashes', type: field_type_1.FieldType.string, isArray: true, optional: true },
|
|
96
|
+
{ name: 'isIndexFile', type: field_type_1.FieldType.boolean, optional: true },
|
|
97
|
+
{ name: 'indexFileId', type: field_type_1.FieldType.string, optional: true },
|
|
98
|
+
]
|
|
99
|
+
};
|
|
100
|
+
// Create mock data context
|
|
101
|
+
const mockDataContextForRegistry = {
|
|
102
|
+
dataContextId: 'files-test'
|
|
103
|
+
};
|
|
104
|
+
const { EventRegistry } = require('../orm/event-registry');
|
|
105
|
+
const eventRegistry = new EventRegistry(mockDataContextForRegistry);
|
|
106
|
+
const mockDataContext = {
|
|
107
|
+
dataSourceFactory: () => mockDataSource,
|
|
108
|
+
eventRegistry: eventRegistry
|
|
109
|
+
};
|
|
110
|
+
// Create FileTable instance directly with mock dataSource
|
|
111
|
+
const deps = {
|
|
112
|
+
dataSource: mockDataSource,
|
|
113
|
+
eventRegistry,
|
|
114
|
+
schema: file_types_1.fileSchema
|
|
115
|
+
};
|
|
116
|
+
fileTable = new files_1.FilesTable(metaData, deps);
|
|
117
|
+
// Set up mock file operations
|
|
118
|
+
mockFileOps = new MockFileOps();
|
|
119
|
+
(0, file_types_1.setFileOps)(mockFileOps);
|
|
120
|
+
});
|
|
121
|
+
afterEach(() => {
|
|
122
|
+
// Clean up global state to prevent Jest from hanging
|
|
123
|
+
(0, file_types_1.resetFileOps)();
|
|
124
|
+
});
|
|
125
|
+
describe('saveFile', () => {
|
|
126
|
+
it('should save a small file in a single chunk', async () => {
|
|
127
|
+
const fileId = (0, utils_1.newid)();
|
|
128
|
+
const data = new Uint8Array(Buffer.from('Hello, World!', 'utf8'));
|
|
129
|
+
const metadata = {
|
|
130
|
+
fileId,
|
|
131
|
+
name: 'test.txt',
|
|
132
|
+
fileSize: data.length,
|
|
133
|
+
mimeType: 'text/plain',
|
|
134
|
+
};
|
|
135
|
+
const result = await fileTable.saveFile(metadata, data);
|
|
136
|
+
expect(result.fileId).toBe(fileId);
|
|
137
|
+
expect(result.chunkHashes).toHaveLength(1);
|
|
138
|
+
// Check that chunk was written by hash
|
|
139
|
+
const chunkHash = result.chunkHashes[0];
|
|
140
|
+
const chunkPath = `file_chunks/${chunkHash}`;
|
|
141
|
+
expect(await mockFileOps.fileExists(chunkPath)).toBe(true);
|
|
142
|
+
const storedChunk = await mockFileOps.readFile(chunkPath);
|
|
143
|
+
expect(storedChunk).toBeInstanceOf(Uint8Array);
|
|
144
|
+
expect(storedChunk).toEqual(data);
|
|
145
|
+
});
|
|
146
|
+
it('should save a large file in multiple chunks', async () => {
|
|
147
|
+
const fileId = (0, utils_1.newid)();
|
|
148
|
+
const largeData = new Uint8Array(Buffer.alloc(file_types_1.FILE_CHUNK_SIZE + 1000, 'A')); // Slightly larger than one chunk
|
|
149
|
+
const metadata = {
|
|
150
|
+
fileId,
|
|
151
|
+
name: 'large.txt',
|
|
152
|
+
fileSize: largeData.length,
|
|
153
|
+
mimeType: 'text/plain',
|
|
154
|
+
};
|
|
155
|
+
const result = await fileTable.saveFile(metadata, largeData);
|
|
156
|
+
expect(result.chunkHashes).toHaveLength(2);
|
|
157
|
+
// Check that both chunks were written by hash
|
|
158
|
+
const chunk0Hash = result.chunkHashes[0];
|
|
159
|
+
const chunk1Hash = result.chunkHashes[1];
|
|
160
|
+
expect(await mockFileOps.fileExists(`file_chunks/${chunk0Hash}`)).toBe(true);
|
|
161
|
+
expect(await mockFileOps.fileExists(`file_chunks/${chunk1Hash}`)).toBe(true);
|
|
162
|
+
// Verify chunk sizes
|
|
163
|
+
const chunk0 = await mockFileOps.readFile(`file_chunks/${chunk0Hash}`);
|
|
164
|
+
const chunk1 = await mockFileOps.readFile(`file_chunks/${chunk1Hash}`);
|
|
165
|
+
expect(chunk0.length).toBe(file_types_1.FILE_CHUNK_SIZE);
|
|
166
|
+
expect(chunk1.length).toBe(1000);
|
|
167
|
+
});
|
|
168
|
+
});
|
|
169
|
+
describe('getFile', () => {
|
|
170
|
+
it('should retrieve a single-chunk file', async () => {
|
|
171
|
+
const fileId = (0, utils_1.newid)();
|
|
172
|
+
const originalData = new Uint8Array(Buffer.from('Test content', 'utf8'));
|
|
173
|
+
const metadata = {
|
|
174
|
+
fileId,
|
|
175
|
+
name: 'test.txt',
|
|
176
|
+
fileSize: originalData.length,
|
|
177
|
+
mimeType: 'text/plain',
|
|
178
|
+
};
|
|
179
|
+
// Save file first
|
|
180
|
+
await fileTable.saveFile(metadata, originalData);
|
|
181
|
+
// Retrieve file
|
|
182
|
+
const retrievedData = await fileTable.getFile(fileId);
|
|
183
|
+
expect(new Uint8Array(retrievedData)).toEqual(originalData);
|
|
184
|
+
});
|
|
185
|
+
it('should retrieve a multi-chunk file', async () => {
|
|
186
|
+
const fileId = (0, utils_1.newid)();
|
|
187
|
+
const originalData = new Uint8Array(Buffer.alloc(file_types_1.FILE_CHUNK_SIZE + 500, 'B'));
|
|
188
|
+
const metadata = {
|
|
189
|
+
fileId,
|
|
190
|
+
name: 'large.txt',
|
|
191
|
+
fileSize: originalData.length,
|
|
192
|
+
mimeType: 'text/plain',
|
|
193
|
+
};
|
|
194
|
+
// Save file first
|
|
195
|
+
await fileTable.saveFile(metadata, originalData);
|
|
196
|
+
// Retrieve file
|
|
197
|
+
const retrievedData = await fileTable.getFile(fileId);
|
|
198
|
+
expect(new Uint8Array(retrievedData)).toEqual(originalData);
|
|
199
|
+
});
|
|
200
|
+
it('should return null for non-existent file', async () => {
|
|
201
|
+
const result = await fileTable.getFile('non-existent');
|
|
202
|
+
expect(result).toBeNull();
|
|
203
|
+
});
|
|
204
|
+
it('should return null when chunk is missing', async () => {
|
|
205
|
+
const fileId = (0, utils_1.newid)();
|
|
206
|
+
const metadata = {
|
|
207
|
+
fileId,
|
|
208
|
+
name: 'test.txt',
|
|
209
|
+
fileSize: 100,
|
|
210
|
+
fileHash: 'hash123',
|
|
211
|
+
chunkHashes: ['hash1', 'hash2'],
|
|
212
|
+
};
|
|
213
|
+
// Insert metadata directly without saving chunks
|
|
214
|
+
await fileTable.dataSource.insert(metadata);
|
|
215
|
+
const result = await fileTable.getFile(fileId);
|
|
216
|
+
expect(result).toBeNull();
|
|
217
|
+
});
|
|
218
|
+
it('should return null when chunk is missing during read', async () => {
|
|
219
|
+
const result = await fileTable.getFile('non-existent');
|
|
220
|
+
expect(result).toBeNull();
|
|
221
|
+
});
|
|
222
|
+
});
|
|
223
|
+
describe('deleteFile', () => {
|
|
224
|
+
it('should delete file from database but preserve chunks', async () => {
|
|
225
|
+
const fileId = (0, utils_1.newid)();
|
|
226
|
+
const data = new Uint8Array(Buffer.from('Delete me', 'utf8'));
|
|
227
|
+
const metadata = {
|
|
228
|
+
fileId,
|
|
229
|
+
name: 'delete.txt',
|
|
230
|
+
fileSize: data.length,
|
|
231
|
+
mimeType: 'text/plain',
|
|
232
|
+
};
|
|
233
|
+
// Save file first
|
|
234
|
+
const saved = await fileTable.saveFile(metadata, data);
|
|
235
|
+
// Verify file exists
|
|
236
|
+
const fileData = await fileTable.getFile(fileId);
|
|
237
|
+
expect(new Uint8Array(fileData)).toEqual(data);
|
|
238
|
+
const chunkHash = saved.chunkHashes[0];
|
|
239
|
+
expect(await mockFileOps.fileExists(`file_chunks/${chunkHash}`)).toBe(true);
|
|
240
|
+
// Delete file
|
|
241
|
+
await fileTable.deleteFile(fileId);
|
|
242
|
+
// Verify file metadata is gone but chunk remains (for potential sharing)
|
|
243
|
+
expect(await fileTable.getFile(fileId)).toBeNull();
|
|
244
|
+
expect(await mockFileOps.fileExists(`file_chunks/${chunkHash}`)).toBe(true);
|
|
245
|
+
});
|
|
246
|
+
it('should handle deleting non-existent file gracefully', async () => {
|
|
247
|
+
// Should not throw
|
|
248
|
+
await fileTable.deleteFile('non-existent');
|
|
249
|
+
});
|
|
250
|
+
it('should delete multi-chunk file from database but preserve chunks', async () => {
|
|
251
|
+
const fileId = (0, utils_1.newid)();
|
|
252
|
+
const data = new Uint8Array(Buffer.alloc(file_types_1.FILE_CHUNK_SIZE + 100, 'C'));
|
|
253
|
+
const metadata = {
|
|
254
|
+
fileId,
|
|
255
|
+
name: 'multi.txt',
|
|
256
|
+
fileSize: data.length,
|
|
257
|
+
mimeType: 'text/plain',
|
|
258
|
+
};
|
|
259
|
+
// Save file first
|
|
260
|
+
const saved = await fileTable.saveFile(metadata, data);
|
|
261
|
+
// Verify chunks exist by hash
|
|
262
|
+
const chunk0Hash = saved.chunkHashes[0];
|
|
263
|
+
const chunk1Hash = saved.chunkHashes[1];
|
|
264
|
+
expect(await mockFileOps.fileExists(`file_chunks/${chunk0Hash}`)).toBe(true);
|
|
265
|
+
expect(await mockFileOps.fileExists(`file_chunks/${chunk1Hash}`)).toBe(true);
|
|
266
|
+
// Delete file
|
|
267
|
+
await fileTable.deleteFile(fileId);
|
|
268
|
+
// Verify file metadata is gone but chunks remain (for potential sharing)
|
|
269
|
+
expect(await fileTable.getFile(fileId)).toBeNull();
|
|
270
|
+
expect(await mockFileOps.fileExists(`file_chunks/${chunk0Hash}`)).toBe(true);
|
|
271
|
+
expect(await mockFileOps.fileExists(`file_chunks/${chunk1Hash}`)).toBe(true);
|
|
272
|
+
});
|
|
273
|
+
});
|
|
274
|
+
describe('chunk deduplication', () => {
|
|
275
|
+
it('should deduplicate identical chunks across different files', async () => {
|
|
276
|
+
const fileId1 = (0, utils_1.newid)();
|
|
277
|
+
const fileId2 = (0, utils_1.newid)();
|
|
278
|
+
// Create two files with identical content
|
|
279
|
+
const data = new Uint8Array(Buffer.from('Identical content for deduplication test', 'utf8'));
|
|
280
|
+
const metadata1 = {
|
|
281
|
+
fileId: fileId1,
|
|
282
|
+
name: 'file1.txt',
|
|
283
|
+
fileSize: data.length,
|
|
284
|
+
mimeType: 'text/plain',
|
|
285
|
+
};
|
|
286
|
+
const metadata2 = {
|
|
287
|
+
fileId: fileId2,
|
|
288
|
+
name: 'file2.txt',
|
|
289
|
+
fileSize: data.length,
|
|
290
|
+
mimeType: 'text/plain',
|
|
291
|
+
};
|
|
292
|
+
// Save both files
|
|
293
|
+
const saved1 = await fileTable.saveFile(metadata1, data);
|
|
294
|
+
const saved2 = await fileTable.saveFile(metadata2, data);
|
|
295
|
+
// Both files should have the same chunk hash (deduplication)
|
|
296
|
+
expect(saved1.chunkHashes).toEqual(saved2.chunkHashes);
|
|
297
|
+
// Only one chunk should be stored physically
|
|
298
|
+
const chunkHash = saved1.chunkHashes[0];
|
|
299
|
+
const chunkPath = `file_chunks/${chunkHash}`;
|
|
300
|
+
expect(await mockFileOps.fileExists(chunkPath)).toBe(true);
|
|
301
|
+
expect(mockFileOps.getStoredFiles().filter(path => path.includes(chunkHash))).toHaveLength(1);
|
|
302
|
+
// Both files should retrieve the same content
|
|
303
|
+
const file1Data = await fileTable.getFile(fileId1);
|
|
304
|
+
const file2Data = await fileTable.getFile(fileId2);
|
|
305
|
+
expect(new Uint8Array(file1Data)).toEqual(data);
|
|
306
|
+
expect(new Uint8Array(file2Data)).toEqual(data);
|
|
307
|
+
});
|
|
308
|
+
});
|
|
309
|
+
describe('Merkle tree (chunk index files)', () => {
|
|
310
|
+
it('should use direct chunk hashes for files under threshold', async () => {
|
|
311
|
+
const fileId = (0, utils_1.newid)();
|
|
312
|
+
// Create a small file that's well under the 1000 chunk threshold
|
|
313
|
+
const smallData = new Uint8Array(Buffer.from('Small file content for testing', 'utf8'));
|
|
314
|
+
const metadata = {
|
|
315
|
+
fileId,
|
|
316
|
+
name: 'small.bin',
|
|
317
|
+
fileSize: smallData.length,
|
|
318
|
+
mimeType: 'application/octet-stream',
|
|
319
|
+
};
|
|
320
|
+
// Save small file
|
|
321
|
+
const result = await fileTable.saveFile(metadata, smallData);
|
|
322
|
+
// Small file should use direct chunk hashes, not index file
|
|
323
|
+
expect(result.chunkHashes).toBeTruthy();
|
|
324
|
+
expect(result.chunkHashes).toHaveLength(1);
|
|
325
|
+
expect(result.indexFileId).toBeUndefined();
|
|
326
|
+
// Should be able to retrieve the file
|
|
327
|
+
const retrievedData = await fileTable.getFile(fileId);
|
|
328
|
+
expect(new Uint8Array(retrievedData)).toEqual(smallData);
|
|
329
|
+
});
|
|
330
|
+
it('should use recursive index files for very large files', async () => {
|
|
331
|
+
// Temporarily lower the threshold to test recursive behavior with smaller data
|
|
332
|
+
const originalThreshold = file_types_1.CHUNK_INDEX_THRESHOLD;
|
|
333
|
+
(0, file_types_1.setChunkIndexThreshold)(3); // Use index file for files with >3 chunks
|
|
334
|
+
try {
|
|
335
|
+
const fileId = (0, utils_1.newid)();
|
|
336
|
+
// Create a file with 5 chunks (exceeds threshold of 3)
|
|
337
|
+
const largeData = new Uint8Array(Buffer.alloc(file_types_1.FILE_CHUNK_SIZE * 4 + 1000, 'X')); // 4 full chunks + partial chunk = 5 chunks
|
|
338
|
+
const metadata = {
|
|
339
|
+
fileId,
|
|
340
|
+
name: 'large.bin',
|
|
341
|
+
fileSize: largeData.length,
|
|
342
|
+
mimeType: 'application/octet-stream',
|
|
343
|
+
};
|
|
344
|
+
// Save large file - should create recursive index file
|
|
345
|
+
const result = await fileTable.saveFile(metadata, largeData);
|
|
346
|
+
// Large file should use index file, not direct chunk hashes
|
|
347
|
+
expect(result.chunkHashes).toBeUndefined();
|
|
348
|
+
expect(result.indexFileId).toBeTruthy();
|
|
349
|
+
// Should be able to retrieve the file
|
|
350
|
+
const retrievedData = await fileTable.getFile(fileId);
|
|
351
|
+
expect(retrievedData).toBeTruthy();
|
|
352
|
+
expect(new Uint8Array(retrievedData)).toEqual(largeData);
|
|
353
|
+
// The index file should be marked as an index file
|
|
354
|
+
const indexFile = await fileTable.dataSource.get(result.indexFileId);
|
|
355
|
+
expect(indexFile).toBeTruthy();
|
|
356
|
+
expect(indexFile.isIndexFile).toBe(true);
|
|
357
|
+
// The index file should contain JSON data
|
|
358
|
+
expect(indexFile.mimeType).toBe('application/json');
|
|
359
|
+
expect(indexFile.name).toMatch(/^index-.*\.json$/);
|
|
360
|
+
}
|
|
361
|
+
finally {
|
|
362
|
+
// Restore original threshold
|
|
363
|
+
(0, file_types_1.setChunkIndexThreshold)(originalThreshold);
|
|
364
|
+
}
|
|
365
|
+
});
|
|
366
|
+
it('should handle missing index file gracefully', async () => {
|
|
367
|
+
const fileId = (0, utils_1.newid)();
|
|
368
|
+
const metadata = {
|
|
369
|
+
fileId,
|
|
370
|
+
name: 'test.txt',
|
|
371
|
+
fileSize: 100,
|
|
372
|
+
fileHash: 'hash123',
|
|
373
|
+
indexFileId: 'missing_index_file_id',
|
|
374
|
+
};
|
|
375
|
+
// Insert metadata directly without creating index file
|
|
376
|
+
await fileTable.dataSource.insert(metadata);
|
|
377
|
+
await expect(fileTable.getFile(fileId)).rejects.toThrow('Index file not found: missing_index_file_id');
|
|
378
|
+
});
|
|
379
|
+
});
|
|
380
|
+
describe('Streaming API', () => {
|
|
381
|
+
describe('FileWriteStream', () => {
|
|
382
|
+
it('should create a write stream and write small file chunk by chunk', async () => {
|
|
383
|
+
const fileId = (0, utils_1.newid)();
|
|
384
|
+
const metadata = {
|
|
385
|
+
fileId,
|
|
386
|
+
name: 'stream-test.txt',
|
|
387
|
+
fileSize: 0, // Will be calculated
|
|
388
|
+
mimeType: 'text/plain',
|
|
389
|
+
};
|
|
390
|
+
const writeStream = await fileTable.createWriteStream(metadata);
|
|
391
|
+
// Write data in chunks
|
|
392
|
+
await writeStream.write(new Uint8Array(Buffer.from('Hello, ', 'utf8')));
|
|
393
|
+
await writeStream.write(new Uint8Array(Buffer.from('streaming ', 'utf8')));
|
|
394
|
+
await writeStream.write(new Uint8Array(Buffer.from('world!', 'utf8')));
|
|
395
|
+
// Finalize the stream
|
|
396
|
+
const result = await writeStream.finalize();
|
|
397
|
+
expect(result.fileId).toBe(fileId);
|
|
398
|
+
expect(result.name).toBe('stream-test.txt');
|
|
399
|
+
expect(result.fileSize).toBe(23); // Total bytes written: 'Hello, streaming world!'
|
|
400
|
+
expect(result.chunkHashes).toHaveLength(1); // Small file, single chunk
|
|
401
|
+
// Verify we can read the file back
|
|
402
|
+
const retrievedData = await fileTable.getFile(fileId);
|
|
403
|
+
expect(Buffer.from(retrievedData).toString('utf8')).toBe('Hello, streaming world!');
|
|
404
|
+
});
|
|
405
|
+
it('should handle large file streaming across multiple chunks', async () => {
|
|
406
|
+
const fileId = (0, utils_1.newid)();
|
|
407
|
+
const metadata = {
|
|
408
|
+
fileId,
|
|
409
|
+
name: 'large-stream.bin',
|
|
410
|
+
fileSize: 0,
|
|
411
|
+
mimeType: 'application/octet-stream',
|
|
412
|
+
};
|
|
413
|
+
const writeStream = await fileTable.createWriteStream(metadata);
|
|
414
|
+
// Write data larger than one chunk
|
|
415
|
+
const chunkSize = file_types_1.FILE_CHUNK_SIZE;
|
|
416
|
+
const chunk1 = new Uint8Array(Buffer.alloc(chunkSize, 'A'));
|
|
417
|
+
const chunk2 = new Uint8Array(Buffer.alloc(500, 'B')); // Partial second chunk
|
|
418
|
+
await writeStream.write(chunk1);
|
|
419
|
+
await writeStream.write(chunk2);
|
|
420
|
+
const result = await writeStream.finalize();
|
|
421
|
+
expect(result.fileSize).toBe(chunkSize + 500);
|
|
422
|
+
expect(result.chunkHashes).toHaveLength(2); // Two chunks
|
|
423
|
+
// Verify content
|
|
424
|
+
const retrievedData = await fileTable.getFile(fileId);
|
|
425
|
+
expect(retrievedData?.length).toBe(chunkSize + 500);
|
|
426
|
+
expect(new Uint8Array(retrievedData).subarray(0, chunkSize).every(b => b === 65)).toBe(true); // All 'A's
|
|
427
|
+
expect(new Uint8Array(retrievedData).subarray(chunkSize).every(b => b === 66)).toBe(true); // All 'B's
|
|
428
|
+
});
|
|
429
|
+
it('should support aborting a write stream', async () => {
|
|
430
|
+
const fileId = (0, utils_1.newid)();
|
|
431
|
+
const metadata = {
|
|
432
|
+
fileId,
|
|
433
|
+
name: 'aborted.txt',
|
|
434
|
+
fileSize: 0,
|
|
435
|
+
mimeType: 'text/plain',
|
|
436
|
+
};
|
|
437
|
+
const writeStream = await fileTable.createWriteStream(metadata);
|
|
438
|
+
await writeStream.write(new Uint8Array(Buffer.from('Some data', 'utf8')));
|
|
439
|
+
expect(writeStream.getBytesWritten()).toBe(9);
|
|
440
|
+
// Abort the stream
|
|
441
|
+
await writeStream.abort();
|
|
442
|
+
expect(writeStream.isAborted()).toBe(true);
|
|
443
|
+
// Should not be able to write after abort
|
|
444
|
+
await expect(writeStream.write(new Uint8Array(Buffer.from('More data', 'utf8'))))
|
|
445
|
+
.rejects.toThrow('Cannot write to aborted stream');
|
|
446
|
+
// Should not be able to finalize after abort
|
|
447
|
+
await expect(writeStream.finalize())
|
|
448
|
+
.rejects.toThrow('Cannot finalize aborted stream');
|
|
449
|
+
});
|
|
450
|
+
it('should track progress during streaming', async () => {
|
|
451
|
+
const fileId = (0, utils_1.newid)();
|
|
452
|
+
const metadata = {
|
|
453
|
+
fileId,
|
|
454
|
+
name: 'progress.txt',
|
|
455
|
+
fileSize: 0,
|
|
456
|
+
mimeType: 'text/plain',
|
|
457
|
+
};
|
|
458
|
+
const writeStream = await fileTable.createWriteStream(metadata);
|
|
459
|
+
expect(writeStream.getBytesWritten()).toBe(0);
|
|
460
|
+
expect(writeStream.getChunkCount()).toBe(0);
|
|
461
|
+
await writeStream.write(new Uint8Array(Buffer.from('First chunk', 'utf8')));
|
|
462
|
+
expect(writeStream.getBytesWritten()).toBe(11);
|
|
463
|
+
expect(writeStream.getChunkCount()).toBe(0); // Not complete chunk yet
|
|
464
|
+
await writeStream.write(new Uint8Array(Buffer.from(' Second part', 'utf8')));
|
|
465
|
+
expect(writeStream.getBytesWritten()).toBe(23);
|
|
466
|
+
await writeStream.finalize();
|
|
467
|
+
expect(writeStream.isFinalized()).toBe(true);
|
|
468
|
+
});
|
|
469
|
+
it('should handle finalize with multiple complete chunks in buffer', async () => {
|
|
470
|
+
const fileId = (0, utils_1.newid)();
|
|
471
|
+
const metadata = {
|
|
472
|
+
fileId,
|
|
473
|
+
name: 'buffered-chunks.bin',
|
|
474
|
+
fileSize: 0,
|
|
475
|
+
mimeType: 'application/octet-stream',
|
|
476
|
+
};
|
|
477
|
+
const writeStream = await fileTable.createWriteStream(metadata);
|
|
478
|
+
// Write data that will create multiple complete chunks but don't trigger processCompleteChunks
|
|
479
|
+
// This simulates the edge case where finalize() needs to process multiple chunks
|
|
480
|
+
const chunkSize = file_types_1.FILE_CHUNK_SIZE;
|
|
481
|
+
const buffer = Buffer.alloc(chunkSize * 2 + 500); // 2 full chunks + partial chunk
|
|
482
|
+
buffer.fill(65); // Fill with 'A'
|
|
483
|
+
const data = new Uint8Array(buffer);
|
|
484
|
+
// Write as one large chunk to buffer (avoiding automatic processing)
|
|
485
|
+
await writeStream.write(data);
|
|
486
|
+
// At this point the buffer should have 2+ MB of data
|
|
487
|
+
expect(writeStream.getBytesWritten()).toBe(data.length);
|
|
488
|
+
// Finalize should process all complete chunks + final partial chunk
|
|
489
|
+
const result = await writeStream.finalize();
|
|
490
|
+
expect(result.fileSize).toBe(data.length);
|
|
491
|
+
expect(result.chunkHashes).toHaveLength(3); // 2 complete + 1 partial chunk
|
|
492
|
+
// Verify we can read it back correctly
|
|
493
|
+
const retrievedData = await fileTable.getFile(fileId);
|
|
494
|
+
expect(retrievedData?.length).toBe(data.length);
|
|
495
|
+
expect(new Uint8Array(retrievedData)).toEqual(data);
|
|
496
|
+
});
|
|
497
|
+
});
|
|
498
|
+
describe('FileReadStream', () => {
|
|
499
|
+
it('should read file data in chunks using read stream', async () => {
|
|
500
|
+
// First save a file using traditional method
|
|
501
|
+
const fileId = (0, utils_1.newid)();
|
|
502
|
+
const originalData = new Uint8Array(Buffer.from('This is test data for streaming read operations!', 'utf8'));
|
|
503
|
+
const metadata = {
|
|
504
|
+
fileId,
|
|
505
|
+
name: 'read-test.txt',
|
|
506
|
+
fileSize: originalData.length,
|
|
507
|
+
mimeType: 'text/plain',
|
|
508
|
+
};
|
|
509
|
+
await fileTable.saveFile(metadata, originalData);
|
|
510
|
+
// Now open for streaming read
|
|
511
|
+
const readStream = await fileTable.openReadStream(fileId);
|
|
512
|
+
expect(readStream).toBeTruthy();
|
|
513
|
+
// Read entire file
|
|
514
|
+
const readData = await readStream.readAll();
|
|
515
|
+
expect(readData).not.toBeNull();
|
|
516
|
+
expect(new Uint8Array(readData)).toEqual(originalData);
|
|
517
|
+
// Check metadata
|
|
518
|
+
const meta = readStream.getMetadata();
|
|
519
|
+
expect(meta.fileId).toBe(fileId);
|
|
520
|
+
expect(meta.name).toBe('read-test.txt');
|
|
521
|
+
expect(meta.fileSize).toBe(originalData.length);
|
|
522
|
+
});
|
|
523
|
+
it('should support seeking within file', async () => {
|
|
524
|
+
const fileId = (0, utils_1.newid)();
|
|
525
|
+
const originalData = new Uint8Array(Buffer.from('0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'utf8'));
|
|
526
|
+
const metadata = {
|
|
527
|
+
fileId,
|
|
528
|
+
name: 'seek-test.txt',
|
|
529
|
+
fileSize: originalData.length,
|
|
530
|
+
mimeType: 'text/plain',
|
|
531
|
+
};
|
|
532
|
+
await fileTable.saveFile(metadata, originalData);
|
|
533
|
+
const readStream = await fileTable.openReadStream(fileId);
|
|
534
|
+
expect(readStream).toBeTruthy();
|
|
535
|
+
// Seek to position 10 (should be at 'A')
|
|
536
|
+
await readStream.seek(10);
|
|
537
|
+
expect(readStream.getPosition()).toBe(10);
|
|
538
|
+
// Read 5 bytes from position 10 (should get 'ABCDE')
|
|
539
|
+
const chunk = await readStream.read(5);
|
|
540
|
+
expect(Buffer.from(chunk).toString('utf8')).toBe('ABCDE');
|
|
541
|
+
expect(readStream.getPosition()).toBe(15);
|
|
542
|
+
// Seek back to beginning
|
|
543
|
+
await readStream.seek(0);
|
|
544
|
+
expect(readStream.getPosition()).toBe(0);
|
|
545
|
+
const firstChunk = await readStream.read(10);
|
|
546
|
+
expect(Buffer.from(firstChunk).toString('utf8')).toBe('0123456789');
|
|
547
|
+
expect(readStream.getPosition()).toBe(10);
|
|
548
|
+
// Seek to middle of data
|
|
549
|
+
await readStream.seek(20);
|
|
550
|
+
expect(readStream.getPosition()).toBe(20);
|
|
551
|
+
const middleChunk = await readStream.read(5);
|
|
552
|
+
expect(Buffer.from(middleChunk).toString('utf8')).toBe('KLMNO');
|
|
553
|
+
expect(readStream.getPosition()).toBe(25);
|
|
554
|
+
});
|
|
555
|
+
it('should handle seeking across chunk boundaries in large files', async () => {
|
|
556
|
+
// Create a file that spans multiple chunks
|
|
557
|
+
const fileId = (0, utils_1.newid)();
|
|
558
|
+
const chunkSize = file_types_1.FILE_CHUNK_SIZE;
|
|
559
|
+
const data1 = Buffer.alloc(chunkSize, 'A'); // First chunk: all A's
|
|
560
|
+
const data2 = Buffer.alloc(chunkSize, 'B'); // Second chunk: all B's
|
|
561
|
+
const data3 = Buffer.alloc(500, 'C'); // Third partial chunk: all C's
|
|
562
|
+
const largeData = new Uint8Array(Buffer.concat([data1, data2, data3]));
|
|
563
|
+
const metadata = {
|
|
564
|
+
fileId,
|
|
565
|
+
name: 'large-seek-test.bin',
|
|
566
|
+
fileSize: largeData.length,
|
|
567
|
+
mimeType: 'application/octet-stream',
|
|
568
|
+
};
|
|
569
|
+
await fileTable.saveFile(metadata, largeData);
|
|
570
|
+
const readStream = await fileTable.openReadStream(fileId);
|
|
571
|
+
expect(readStream).toBeTruthy();
|
|
572
|
+
// Seek to end of first chunk
|
|
573
|
+
await readStream.seek(chunkSize - 5);
|
|
574
|
+
const endOfFirst = await readStream.read(10); // Should span chunk boundary
|
|
575
|
+
expect(endOfFirst?.length).toBe(10);
|
|
576
|
+
expect(endOfFirst?.subarray(0, 5).every(b => b === 65)).toBe(true); // First 5 bytes: A's
|
|
577
|
+
expect(endOfFirst?.subarray(5, 10).every(b => b === 66)).toBe(true); // Next 5 bytes: B's
|
|
578
|
+
// Seek to middle of second chunk
|
|
579
|
+
await readStream.seek(chunkSize + 1000);
|
|
580
|
+
const middleOfSecond = await readStream.read(100);
|
|
581
|
+
expect(middleOfSecond?.length).toBe(100);
|
|
582
|
+
expect(middleOfSecond?.every(b => b === 66)).toBe(true); // All B's
|
|
583
|
+
// Seek to third chunk
|
|
584
|
+
await readStream.seek(chunkSize * 2 + 100);
|
|
585
|
+
const inThirdChunk = await readStream.read(200);
|
|
586
|
+
expect(inThirdChunk?.length).toBe(200);
|
|
587
|
+
expect(inThirdChunk?.every(b => b === 67)).toBe(true); // All C's
|
|
588
|
+
});
|
|
589
|
+
it('should handle seek error conditions', async () => {
|
|
590
|
+
const fileId = (0, utils_1.newid)();
|
|
591
|
+
const data = new Uint8Array(Buffer.from('Short test data', 'utf8'));
|
|
592
|
+
const metadata = {
|
|
593
|
+
fileId,
|
|
594
|
+
name: 'error-test.txt',
|
|
595
|
+
fileSize: data.length,
|
|
596
|
+
mimeType: 'text/plain',
|
|
597
|
+
};
|
|
598
|
+
await fileTable.saveFile(metadata, data);
|
|
599
|
+
const readStream = await fileTable.openReadStream(fileId);
|
|
600
|
+
expect(readStream).toBeTruthy();
|
|
601
|
+
// Test negative seek position
|
|
602
|
+
await expect(readStream.seek(-1)).rejects.toThrow('Seek position cannot be negative');
|
|
603
|
+
// Test seeking beyond file size
|
|
604
|
+
await expect(readStream.seek(data.length + 1)).rejects.toThrow('Seek position beyond file size');
|
|
605
|
+
// Test seeking to exact end of file
|
|
606
|
+
await readStream.seek(data.length);
|
|
607
|
+
expect(readStream.getPosition()).toBe(data.length);
|
|
608
|
+
expect(readStream.getBytesRemaining()).toBe(0);
|
|
609
|
+
// Should return null when reading at EOF
|
|
610
|
+
const atEof = await readStream.read();
|
|
611
|
+
expect(atEof).toBeNull();
|
|
612
|
+
});
|
|
613
|
+
it('should handle large file streaming reads', async () => {
|
|
614
|
+
// Create a large file with known pattern
|
|
615
|
+
const fileId = (0, utils_1.newid)();
|
|
616
|
+
const chunkSize = file_types_1.FILE_CHUNK_SIZE;
|
|
617
|
+
const buffer = Buffer.alloc(chunkSize + 1000);
|
|
618
|
+
// Fill with pattern: first chunk with 'A', remainder with 'B'
|
|
619
|
+
buffer.fill(65, 0, chunkSize); // 'A'
|
|
620
|
+
buffer.fill(66, chunkSize); // 'B'
|
|
621
|
+
const largeData = new Uint8Array(buffer);
|
|
622
|
+
const metadata = {
|
|
623
|
+
fileId,
|
|
624
|
+
name: 'large-read.bin',
|
|
625
|
+
fileSize: largeData.length,
|
|
626
|
+
mimeType: 'application/octet-stream',
|
|
627
|
+
};
|
|
628
|
+
await fileTable.saveFile(metadata, largeData);
|
|
629
|
+
const readStream = await fileTable.openReadStream(fileId);
|
|
630
|
+
expect(readStream).toBeTruthy();
|
|
631
|
+
// Read first chunk
|
|
632
|
+
const firstChunk = await readStream.read(chunkSize);
|
|
633
|
+
expect(firstChunk?.length).toBe(chunkSize);
|
|
634
|
+
expect(firstChunk?.every(b => b === 65)).toBe(true); // All 'A's
|
|
635
|
+
// Read remainder
|
|
636
|
+
const remainder = await readStream.read();
|
|
637
|
+
expect(remainder?.length).toBe(1000);
|
|
638
|
+
expect(remainder?.every(b => b === 66)).toBe(true); // All 'B's
|
|
639
|
+
// Should be at EOF
|
|
640
|
+
const eof = await readStream.read();
|
|
641
|
+
expect(eof).toBeNull();
|
|
642
|
+
expect(readStream.isEOF()).toBe(true);
|
|
643
|
+
});
|
|
644
|
+
it('should return null for non-existent file', async () => {
|
|
645
|
+
const readStream = await fileTable.openReadStream('non-existent-file');
|
|
646
|
+
expect(readStream).toBeNull();
|
|
647
|
+
});
|
|
648
|
+
it('should handle partial reads correctly', async () => {
|
|
649
|
+
const fileId = (0, utils_1.newid)();
|
|
650
|
+
const data = new Uint8Array(Buffer.from('ABCDEFGHIJKLMNOPQRSTUVWXYZ', 'utf8'));
|
|
651
|
+
const metadata = {
|
|
652
|
+
fileId,
|
|
653
|
+
name: 'partial-read.txt',
|
|
654
|
+
fileSize: data.length,
|
|
655
|
+
mimeType: 'text/plain',
|
|
656
|
+
};
|
|
657
|
+
await fileTable.saveFile(metadata, data);
|
|
658
|
+
const readStream = await fileTable.openReadStream(fileId);
|
|
659
|
+
expect(readStream).toBeTruthy();
|
|
660
|
+
// Read in small chunks
|
|
661
|
+
const chunk1 = await readStream.read(5);
|
|
662
|
+
expect(Buffer.from(chunk1).toString('utf8')).toBe('ABCDE');
|
|
663
|
+
expect(readStream.getPosition()).toBe(5);
|
|
664
|
+
expect(readStream.getBytesRemaining()).toBe(21);
|
|
665
|
+
const chunk2 = await readStream.read(10);
|
|
666
|
+
expect(Buffer.from(chunk2).toString('utf8')).toBe('FGHIJKLMNO');
|
|
667
|
+
expect(readStream.getPosition()).toBe(15);
|
|
668
|
+
const chunk3 = await readStream.read(20); // More than remaining
|
|
669
|
+
expect(Buffer.from(chunk3).toString('utf8')).toBe('PQRSTUVWXYZ');
|
|
670
|
+
expect(readStream.getPosition()).toBe(26);
|
|
671
|
+
expect(readStream.getBytesRemaining()).toBe(0);
|
|
672
|
+
// Should be EOF now
|
|
673
|
+
const eof = await readStream.read();
|
|
674
|
+
expect(eof).toBeNull();
|
|
675
|
+
});
|
|
676
|
+
it('should return null when chunk is unavailable', async () => {
|
|
677
|
+
const fileId = (0, utils_1.newid)();
|
|
678
|
+
const metadata = {
|
|
679
|
+
fileId,
|
|
680
|
+
name: 'missing-chunk.txt',
|
|
681
|
+
fileSize: 50,
|
|
682
|
+
fileHash: 'hash123',
|
|
683
|
+
chunkHashes: ['missing_chunk_hash'],
|
|
684
|
+
};
|
|
685
|
+
// Insert file metadata without storing the actual chunk
|
|
686
|
+
await fileTable.dataSource.insert(metadata);
|
|
687
|
+
// This should return null when the chunk can't be found
|
|
688
|
+
// instead of throwing an error or hanging
|
|
689
|
+
const result = await fileTable.getFile(fileId);
|
|
690
|
+
expect(result).toBeNull();
|
|
691
|
+
});
|
|
692
|
+
});
|
|
693
|
+
describe('Round-trip streaming', () => {
|
|
694
|
+
it('should write and read back identical data using streams', async () => {
|
|
695
|
+
const fileId = (0, utils_1.newid)();
|
|
696
|
+
const testData = 'This is a round-trip test with streaming! '.repeat(1000);
|
|
697
|
+
const originalBuffer = new Uint8Array(Buffer.from(testData, 'utf8'));
|
|
698
|
+
const metadata = {
|
|
699
|
+
fileId,
|
|
700
|
+
name: 'roundtrip.txt',
|
|
701
|
+
fileSize: 0,
|
|
702
|
+
mimeType: 'text/plain',
|
|
703
|
+
};
|
|
704
|
+
// Write using stream
|
|
705
|
+
const writeStream = await fileTable.createWriteStream(metadata);
|
|
706
|
+
// Write in chunks
|
|
707
|
+
let offset = 0;
|
|
708
|
+
const writeChunkSize = 1000;
|
|
709
|
+
while (offset < originalBuffer.length) {
|
|
710
|
+
const end = Math.min(offset + writeChunkSize, originalBuffer.length);
|
|
711
|
+
const chunk = originalBuffer.subarray(offset, end);
|
|
712
|
+
await writeStream.write(chunk);
|
|
713
|
+
offset = end;
|
|
714
|
+
}
|
|
715
|
+
const savedFile = await writeStream.finalize();
|
|
716
|
+
expect(savedFile.fileSize).toBe(originalBuffer.length);
|
|
717
|
+
// Read back using stream
|
|
718
|
+
const readStream = await fileTable.openReadStream(fileId);
|
|
719
|
+
expect(readStream).toBeTruthy();
|
|
720
|
+
const readBuffer = await readStream.readAll();
|
|
721
|
+
// Verify identical
|
|
722
|
+
expect(readBuffer).not.toBeNull();
|
|
723
|
+
expect(new Uint8Array(readBuffer)).toEqual(originalBuffer);
|
|
724
|
+
expect(Buffer.from(readBuffer).toString('utf8')).toBe(testData);
|
|
725
|
+
});
|
|
726
|
+
});
|
|
727
|
+
});
|
|
728
|
+
});
|