@lifestreamdynamics/vault-cli 1.3.9 → 1.3.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/commands/ai.js +24 -0
- package/dist/commands/audit.js +24 -4
- package/dist/commands/links.js +30 -2
- package/dist/commands/publish.js +5 -2
- package/dist/commands/sync.js +9 -5
- package/dist/commands/vaults.js +5 -1
- package/dist/commands/versions.js +27 -1
- package/dist/sync/engine.d.ts +7 -2
- package/dist/sync/engine.js +46 -10
- package/package.json +1 -1
package/dist/commands/ai.js
CHANGED
|
@@ -5,6 +5,30 @@ import { createOutput, handleError } from '../utils/output.js';
|
|
|
5
5
|
export function registerAiCommands(program) {
|
|
6
6
|
const ai = program.command('ai').description('AI chat and document summarization');
|
|
7
7
|
const sessions = ai.command('sessions').description('AI chat session management');
|
|
8
|
+
addGlobalFlags(sessions.command('create')
|
|
9
|
+
.description('Create a new AI chat session')
|
|
10
|
+
.option('--title <title>', 'Session title')
|
|
11
|
+
.option('--vault <vaultId>', 'Vault ID to scope the session'))
|
|
12
|
+
.action(async (_opts) => {
|
|
13
|
+
const flags = resolveFlags(_opts);
|
|
14
|
+
const out = createOutput(flags);
|
|
15
|
+
out.startSpinner('Creating AI session...');
|
|
16
|
+
try {
|
|
17
|
+
const client = await getClientAsync();
|
|
18
|
+
const session = await client.ai.createSession({
|
|
19
|
+
title: _opts.title ? String(_opts.title) : undefined,
|
|
20
|
+
vaultId: _opts.vault ? String(_opts.vault) : undefined,
|
|
21
|
+
});
|
|
22
|
+
out.success(`Session created: ${session.id}`, {
|
|
23
|
+
id: session.id,
|
|
24
|
+
title: session.title ?? 'Untitled',
|
|
25
|
+
vaultId: session.vaultId ?? null,
|
|
26
|
+
});
|
|
27
|
+
}
|
|
28
|
+
catch (err) {
|
|
29
|
+
handleError(out, err, 'Failed to create AI session');
|
|
30
|
+
}
|
|
31
|
+
});
|
|
8
32
|
addGlobalFlags(sessions.command('list')
|
|
9
33
|
.description('List AI chat sessions'))
|
|
10
34
|
.action(async (_opts) => {
|
package/dist/commands/audit.js
CHANGED
|
@@ -91,8 +91,8 @@ EXAMPLES
|
|
|
91
91
|
}
|
|
92
92
|
});
|
|
93
93
|
addGlobalFlags(audit.command('export')
|
|
94
|
-
.description('Export audit log entries to a CSV file or stdout')
|
|
95
|
-
.option('--format <format>', 'Export format (csv)', 'csv')
|
|
94
|
+
.description('Export audit log entries to a CSV or JSON file or stdout')
|
|
95
|
+
.option('--format <format>', 'Export format (csv or json)', 'csv')
|
|
96
96
|
.option('--file <file>', 'Output file path')
|
|
97
97
|
.option('--status <code>', 'Filter by HTTP status code', parseInt)
|
|
98
98
|
.option('--since <date>', 'Show entries since date (ISO 8601)')
|
|
@@ -102,8 +102,8 @@ EXAMPLES
|
|
|
102
102
|
const flags = resolveFlags(_opts);
|
|
103
103
|
const out = createOutput(flags);
|
|
104
104
|
try {
|
|
105
|
-
if (_opts.format !== 'csv') {
|
|
106
|
-
out.error(`Unsupported format: ${String(_opts.format)}.
|
|
105
|
+
if (_opts.format !== 'csv' && _opts.format !== 'json') {
|
|
106
|
+
out.error(`Unsupported format: ${String(_opts.format)}. Supported: csv, json`);
|
|
107
107
|
process.exitCode = 2;
|
|
108
108
|
return;
|
|
109
109
|
}
|
|
@@ -141,6 +141,26 @@ EXAMPLES
|
|
|
141
141
|
out.status('No audit log entries to export.');
|
|
142
142
|
return;
|
|
143
143
|
}
|
|
144
|
+
if (_opts.format === 'json') {
|
|
145
|
+
const jsonOutput = JSON.stringify(entries, null, 2);
|
|
146
|
+
if (_opts.file) {
|
|
147
|
+
const outputPath = String(_opts.file);
|
|
148
|
+
const outputDir = path.dirname(outputPath);
|
|
149
|
+
if (!fs.existsSync(outputDir)) {
|
|
150
|
+
fs.mkdirSync(outputDir, { recursive: true });
|
|
151
|
+
}
|
|
152
|
+
fs.writeFileSync(outputPath, jsonOutput, 'utf-8');
|
|
153
|
+
out.success(`Exported ${entries.length} entries to ${outputPath}`, {
|
|
154
|
+
entries: entries.length,
|
|
155
|
+
path: outputPath,
|
|
156
|
+
format: 'json',
|
|
157
|
+
});
|
|
158
|
+
}
|
|
159
|
+
else {
|
|
160
|
+
out.raw(jsonOutput + '\n');
|
|
161
|
+
}
|
|
162
|
+
return;
|
|
163
|
+
}
|
|
144
164
|
const csv = logger.exportCsv(entries);
|
|
145
165
|
if (_opts.file) {
|
|
146
166
|
const outputPath = String(_opts.file);
|
package/dist/commands/links.js
CHANGED
|
@@ -100,9 +100,37 @@ export function registerLinkCommands(program) {
|
|
|
100
100
|
process.stdout.write(JSON.stringify({ nodes: graph.nodes, edges: graph.edges }) + '\n');
|
|
101
101
|
}
|
|
102
102
|
else {
|
|
103
|
-
process.stdout.write(chalk.bold(`Nodes: ${graph.nodes.length} Edges: ${graph.edges.length}\n`));
|
|
103
|
+
process.stdout.write(chalk.bold(`Nodes: ${graph.nodes.length} Edges: ${graph.edges.length}\n\n`));
|
|
104
|
+
// Most connected nodes (top 5)
|
|
105
|
+
const connectionCounts = new Map();
|
|
104
106
|
for (const node of graph.nodes) {
|
|
105
|
-
|
|
107
|
+
connectionCounts.set(node.id, 0);
|
|
108
|
+
}
|
|
109
|
+
for (const edge of graph.edges) {
|
|
110
|
+
connectionCounts.set(edge.source, (connectionCounts.get(edge.source) ?? 0) + 1);
|
|
111
|
+
connectionCounts.set(edge.target, (connectionCounts.get(edge.target) ?? 0) + 1);
|
|
112
|
+
}
|
|
113
|
+
const sorted = [...connectionCounts.entries()].sort((a, b) => b[1] - a[1]);
|
|
114
|
+
const topConnected = sorted.slice(0, 5).filter(([, count]) => count > 0);
|
|
115
|
+
if (topConnected.length > 0) {
|
|
116
|
+
process.stdout.write(chalk.bold('Most connected:\n'));
|
|
117
|
+
for (const [nodeId, count] of topConnected) {
|
|
118
|
+
const node = graph.nodes.find((n) => n.id === nodeId);
|
|
119
|
+
process.stdout.write(` ${chalk.cyan(String(node?.path ?? nodeId))} (${count} links)\n`);
|
|
120
|
+
}
|
|
121
|
+
process.stdout.write('\n');
|
|
122
|
+
}
|
|
123
|
+
// Orphan nodes (no connections)
|
|
124
|
+
const orphans = sorted.filter(([, count]) => count === 0);
|
|
125
|
+
if (orphans.length > 0) {
|
|
126
|
+
process.stdout.write(chalk.bold(`Orphan nodes (${orphans.length}):\n`));
|
|
127
|
+
for (const [nodeId] of orphans.slice(0, 10)) {
|
|
128
|
+
const node = graph.nodes.find((n) => n.id === nodeId);
|
|
129
|
+
process.stdout.write(` ${chalk.dim(String(node?.path ?? nodeId))}\n`);
|
|
130
|
+
}
|
|
131
|
+
if (orphans.length > 10) {
|
|
132
|
+
process.stdout.write(chalk.dim(` ... and ${orphans.length - 10} more\n`));
|
|
133
|
+
}
|
|
106
134
|
}
|
|
107
135
|
}
|
|
108
136
|
}
|
package/dist/commands/publish.js
CHANGED
|
@@ -164,13 +164,16 @@ export function registerPublishCommands(program) {
|
|
|
164
164
|
const result = await client.publish.getSubdomain(vaultId);
|
|
165
165
|
out.stopSpinner();
|
|
166
166
|
if (flags.output === 'json') {
|
|
167
|
-
out.
|
|
167
|
+
out.raw(JSON.stringify(result, null, 2) + '\n');
|
|
168
168
|
}
|
|
169
169
|
else if (result.subdomain == null) {
|
|
170
170
|
out.status('No subdomain configured.');
|
|
171
171
|
}
|
|
172
172
|
else {
|
|
173
|
-
out.record({
|
|
173
|
+
out.record({
|
|
174
|
+
subdomain: result.subdomain,
|
|
175
|
+
url: `https://${result.subdomain}.lifestreamdynamics.com`,
|
|
176
|
+
});
|
|
174
177
|
}
|
|
175
178
|
}
|
|
176
179
|
catch (err) {
|
package/dist/commands/sync.js
CHANGED
|
@@ -9,7 +9,7 @@ import { formatUptime } from '../utils/format.js';
|
|
|
9
9
|
import { loadSyncConfigs, createSyncConfig, deleteSyncConfig, getSyncConfig, } from '../sync/config.js';
|
|
10
10
|
import { deleteSyncState, loadSyncState, saveSyncState, hashFileContent, buildRemoteFileState } from '../sync/state.js';
|
|
11
11
|
import { resolveIgnorePatterns } from '../sync/ignore.js';
|
|
12
|
-
import { scanLocalFiles, scanRemoteFiles, executePull, executePush, computePullDiff, computePushDiff, } from '../sync/engine.js';
|
|
12
|
+
import { scanLocalFiles, scanRemoteFiles, executePull, executePush, computePullDiff, computePushDiff, resolveConcurrency, } from '../sync/engine.js';
|
|
13
13
|
import { formatDiff } from '../sync/diff.js';
|
|
14
14
|
import { createWatcher } from '../sync/watcher.js';
|
|
15
15
|
import { createRemotePoller } from '../sync/remote-poller.js';
|
|
@@ -153,7 +153,8 @@ Sync modes:
|
|
|
153
153
|
// sync pull <syncId>
|
|
154
154
|
addGlobalFlags(sync.command('pull')
|
|
155
155
|
.description('Pull remote changes to local directory')
|
|
156
|
-
.argument('<syncId>', 'Sync configuration ID')
|
|
156
|
+
.argument('<syncId>', 'Sync configuration ID')
|
|
157
|
+
.option('--concurrency <n>', 'Max concurrent file transfers (1-16, default 4)', (v) => parseInt(v, 10)))
|
|
157
158
|
.action(async (syncId, _opts) => {
|
|
158
159
|
const flags = resolveFlags(_opts);
|
|
159
160
|
const out = createOutput(flags);
|
|
@@ -211,12 +212,13 @@ Sync modes:
|
|
|
211
212
|
if (flags.verbose) {
|
|
212
213
|
out.status(formatDiff(diff));
|
|
213
214
|
}
|
|
215
|
+
const concurrency = resolveConcurrency(_opts.concurrency);
|
|
214
216
|
out.startSpinner(`Pulling ${totalOps} file(s)...`);
|
|
215
217
|
const result = await executePull(client, config, diff, (progress) => {
|
|
216
218
|
if (progress.phase === 'transferring' && progress.currentFile) {
|
|
217
219
|
out.startSpinner(`[${progress.current}/${progress.total}] ${progress.currentFile}`);
|
|
218
220
|
}
|
|
219
|
-
});
|
|
221
|
+
}, concurrency);
|
|
220
222
|
if (result.errors.length > 0) {
|
|
221
223
|
out.failSpinner(`Pull completed with ${result.errors.length} error(s)`);
|
|
222
224
|
for (const err of result.errors) {
|
|
@@ -241,7 +243,8 @@ Sync modes:
|
|
|
241
243
|
// sync push <syncId>
|
|
242
244
|
addGlobalFlags(sync.command('push')
|
|
243
245
|
.description('Push local changes to remote vault')
|
|
244
|
-
.argument('<syncId>', 'Sync configuration ID')
|
|
246
|
+
.argument('<syncId>', 'Sync configuration ID')
|
|
247
|
+
.option('--concurrency <n>', 'Max concurrent file transfers (1-16, default 4)', (v) => parseInt(v, 10)))
|
|
245
248
|
.action(async (syncId, _opts) => {
|
|
246
249
|
const flags = resolveFlags(_opts);
|
|
247
250
|
const out = createOutput(flags);
|
|
@@ -299,12 +302,13 @@ Sync modes:
|
|
|
299
302
|
if (flags.verbose) {
|
|
300
303
|
out.status(formatDiff(diff));
|
|
301
304
|
}
|
|
305
|
+
const concurrency = resolveConcurrency(_opts.concurrency);
|
|
302
306
|
out.startSpinner(`Pushing ${totalOps} file(s)...`);
|
|
303
307
|
const result = await executePush(client, config, diff, (progress) => {
|
|
304
308
|
if (progress.phase === 'transferring' && progress.currentFile) {
|
|
305
309
|
out.startSpinner(`[${progress.current}/${progress.total}] ${progress.currentFile}`);
|
|
306
310
|
}
|
|
307
|
-
});
|
|
311
|
+
}, concurrency);
|
|
308
312
|
if (result.errors.length > 0) {
|
|
309
313
|
out.failSpinner(`Push completed with ${result.errors.length} error(s)`);
|
|
310
314
|
for (const err of result.errors) {
|
package/dist/commands/vaults.js
CHANGED
|
@@ -169,10 +169,12 @@ EXAMPLES
|
|
|
169
169
|
// vault tree
|
|
170
170
|
addGlobalFlags(vaults.command('tree')
|
|
171
171
|
.description('Show vault file tree')
|
|
172
|
-
.argument('<vaultId>', 'Vault ID or slug')
|
|
172
|
+
.argument('<vaultId>', 'Vault ID or slug')
|
|
173
|
+
.option('--depth <n>', 'Maximum display depth (0 = root only)', parseInt))
|
|
173
174
|
.action(async (vaultId, _opts) => {
|
|
174
175
|
const flags = resolveFlags(_opts);
|
|
175
176
|
const out = createOutput(flags);
|
|
177
|
+
const maxDepth = _opts.depth;
|
|
176
178
|
out.startSpinner('Fetching vault tree...');
|
|
177
179
|
try {
|
|
178
180
|
vaultId = await resolveVaultId(vaultId);
|
|
@@ -184,6 +186,8 @@ EXAMPLES
|
|
|
184
186
|
}
|
|
185
187
|
else {
|
|
186
188
|
function printNode(node, depth) {
|
|
189
|
+
if (maxDepth !== undefined && depth > maxDepth)
|
|
190
|
+
return;
|
|
187
191
|
const indent = ' '.repeat(depth);
|
|
188
192
|
const icon = node.type === 'directory' ? chalk.yellow('📁') : chalk.cyan('📄');
|
|
189
193
|
process.stdout.write(`${indent}${icon} ${node.name}\n`);
|
|
@@ -146,7 +146,8 @@ EXAMPLES
|
|
|
146
146
|
.argument('<version>', 'Version number to restore')
|
|
147
147
|
.addHelpText('after', `
|
|
148
148
|
EXAMPLES
|
|
149
|
-
lsvault versions restore abc123 notes/todo.md 2
|
|
149
|
+
lsvault versions restore abc123 notes/todo.md 2
|
|
150
|
+
lsvault versions restore abc123 notes/todo.md 2 --dry-run`))
|
|
150
151
|
.action(async (vaultId, docPath, versionStr, _opts) => {
|
|
151
152
|
const flags = resolveFlags(_opts);
|
|
152
153
|
const out = createOutput(flags);
|
|
@@ -156,6 +157,31 @@ EXAMPLES
|
|
|
156
157
|
process.exitCode = 1;
|
|
157
158
|
return;
|
|
158
159
|
}
|
|
160
|
+
if (flags.dryRun) {
|
|
161
|
+
out.startSpinner(`Fetching version ${versionNum} preview...`);
|
|
162
|
+
try {
|
|
163
|
+
vaultId = await resolveVaultId(vaultId);
|
|
164
|
+
const client = await getClientAsync();
|
|
165
|
+
const version = await client.documents.getVersion(vaultId, docPath, versionNum);
|
|
166
|
+
out.stopSpinner();
|
|
167
|
+
if (flags.output === 'json') {
|
|
168
|
+
out.raw(JSON.stringify({ dryRun: true, version: { version: version.versionNum, createdAt: version.createdAt, size: version.content?.length ?? 0 } }, null, 2) + '\n');
|
|
169
|
+
}
|
|
170
|
+
else {
|
|
171
|
+
process.stdout.write(chalk.bold('Dry run — no changes made\n\n'));
|
|
172
|
+
process.stdout.write(`Version: ${version.versionNum}\n`);
|
|
173
|
+
process.stdout.write(`Created: ${version.createdAt}\n`);
|
|
174
|
+
if (version.content) {
|
|
175
|
+
const preview = version.content.slice(0, 200);
|
|
176
|
+
process.stdout.write(`Content preview:\n${chalk.dim(preview)}${version.content.length > 200 ? '...' : ''}\n`);
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
}
|
|
180
|
+
catch (err) {
|
|
181
|
+
handleError(out, err, 'Failed to preview version');
|
|
182
|
+
}
|
|
183
|
+
return;
|
|
184
|
+
}
|
|
159
185
|
out.startSpinner(`Restoring to version ${versionNum}...`);
|
|
160
186
|
try {
|
|
161
187
|
vaultId = await resolveVaultId(vaultId);
|
package/dist/sync/engine.d.ts
CHANGED
|
@@ -30,12 +30,17 @@ export declare function scanLocalFiles(localPath: string, ignorePatterns: string
|
|
|
30
30
|
* Returns a map of doc paths -> FileState.
|
|
31
31
|
*/
|
|
32
32
|
export declare function scanRemoteFiles(client: LifestreamVaultClient, vaultId: string, ignorePatterns: string[]): Promise<Record<string, FileState>>;
|
|
33
|
+
/**
|
|
34
|
+
* Validates and clamps a user-supplied concurrency value. Throws on invalid
|
|
35
|
+
* values so the CLI can surface a clear error before kicking off any I/O.
|
|
36
|
+
*/
|
|
37
|
+
export declare function resolveConcurrency(value: number | undefined): number;
|
|
33
38
|
/**
|
|
34
39
|
* Execute a pull operation: download remote changes to local.
|
|
35
40
|
*/
|
|
36
|
-
export declare function executePull(client: LifestreamVaultClient, config: SyncConfig, diff: SyncDiff, onProgress?: ProgressCallback): Promise<SyncResult>;
|
|
41
|
+
export declare function executePull(client: LifestreamVaultClient, config: SyncConfig, diff: SyncDiff, onProgress?: ProgressCallback, concurrency?: number): Promise<SyncResult>;
|
|
37
42
|
/**
|
|
38
43
|
* Execute a push operation: upload local changes to remote.
|
|
39
44
|
*/
|
|
40
|
-
export declare function executePush(client: LifestreamVaultClient, config: SyncConfig, diff: SyncDiff, onProgress?: ProgressCallback): Promise<SyncResult>;
|
|
45
|
+
export declare function executePush(client: LifestreamVaultClient, config: SyncConfig, diff: SyncDiff, onProgress?: ProgressCallback, concurrency?: number): Promise<SyncResult>;
|
|
41
46
|
export { computePullDiff, computePushDiff, type SyncDiff, type SyncDiffEntry };
|
package/dist/sync/engine.js
CHANGED
|
@@ -54,7 +54,7 @@ export async function scanRemoteFiles(client, vaultId, ignorePatterns) {
|
|
|
54
54
|
if (!shouldIgnore(doc.path, ignorePatterns)) {
|
|
55
55
|
files[doc.path] = {
|
|
56
56
|
path: doc.path,
|
|
57
|
-
hash:
|
|
57
|
+
hash: doc.contentHash,
|
|
58
58
|
mtime: doc.fileModifiedAt,
|
|
59
59
|
size: doc.sizeBytes,
|
|
60
60
|
};
|
|
@@ -71,12 +71,31 @@ function atomicWriteFileSync(targetPath, content, encoding = 'utf-8') {
|
|
|
71
71
|
fs.writeFileSync(tmpFile, content, encoding);
|
|
72
72
|
fs.renameSync(tmpFile, targetPath);
|
|
73
73
|
}
|
|
74
|
+
/**
|
|
75
|
+
* Default in-flight transfer count. A small number flattens the load1 spike
|
|
76
|
+
* a full-vault `sync pull` causes on the API host without making single
|
|
77
|
+
* transfers measurably slower.
|
|
78
|
+
*/
|
|
79
|
+
const DEFAULT_TRANSFER_CONCURRENCY = 4;
|
|
80
|
+
const MAX_TRANSFER_CONCURRENCY = 16;
|
|
81
|
+
/**
|
|
82
|
+
* Validates and clamps a user-supplied concurrency value. Throws on invalid
|
|
83
|
+
* values so the CLI can surface a clear error before kicking off any I/O.
|
|
84
|
+
*/
|
|
85
|
+
export function resolveConcurrency(value) {
|
|
86
|
+
if (value === undefined)
|
|
87
|
+
return DEFAULT_TRANSFER_CONCURRENCY;
|
|
88
|
+
if (!Number.isInteger(value) || value < 1 || value > MAX_TRANSFER_CONCURRENCY) {
|
|
89
|
+
throw new Error(`--concurrency must be an integer between 1 and ${MAX_TRANSFER_CONCURRENCY} (got ${value})`);
|
|
90
|
+
}
|
|
91
|
+
return value;
|
|
92
|
+
}
|
|
74
93
|
/**
|
|
75
94
|
* Shared sync operation executor used by both pull and push.
|
|
76
95
|
* Handles result initialization, state loading, progress callbacks,
|
|
77
96
|
* quota error handling, state saving, and lastSync update.
|
|
78
97
|
*/
|
|
79
|
-
async function executeSyncOperation(config, diff, handlers, onProgress) {
|
|
98
|
+
async function executeSyncOperation(config, diff, handlers, onProgress, concurrency = DEFAULT_TRANSFER_CONCURRENCY) {
|
|
80
99
|
const result = {
|
|
81
100
|
filesUploaded: 0,
|
|
82
101
|
filesDownloaded: 0,
|
|
@@ -87,7 +106,12 @@ async function executeSyncOperation(config, diff, handlers, onProgress) {
|
|
|
87
106
|
const state = loadSyncState(config.id);
|
|
88
107
|
const allOps = [...handlers.transfers, ...handlers.deletes];
|
|
89
108
|
let current = 0;
|
|
90
|
-
|
|
109
|
+
// Once a quota error is hit anywhere in the pool we stop submitting new
|
|
110
|
+
// work but let in-flight transfers drain to keep state consistent.
|
|
111
|
+
let stopSubmitting = false;
|
|
112
|
+
async function runOne(entry) {
|
|
113
|
+
if (stopSubmitting)
|
|
114
|
+
return;
|
|
91
115
|
current++;
|
|
92
116
|
onProgress?.({
|
|
93
117
|
phase: 'transferring',
|
|
@@ -112,13 +136,25 @@ async function executeSyncOperation(config, diff, handlers, onProgress) {
|
|
|
112
136
|
}
|
|
113
137
|
catch (err) {
|
|
114
138
|
const message = err instanceof Error ? err.message : String(err);
|
|
139
|
+
result.errors.push({ path: entry.path, error: message });
|
|
115
140
|
if (isQuotaError(message)) {
|
|
116
|
-
|
|
117
|
-
break; // Stop immediately on quota errors
|
|
141
|
+
stopSubmitting = true;
|
|
118
142
|
}
|
|
119
|
-
result.errors.push({ path: entry.path, error: message });
|
|
120
143
|
}
|
|
121
144
|
}
|
|
145
|
+
// Bounded async pool. Workers race for entries off the queue tail; once
|
|
146
|
+
// the queue is empty (or stopSubmitting is set), each worker exits and
|
|
147
|
+
// Promise.all resolves only after every in-flight transfer has settled.
|
|
148
|
+
const queue = handlers.transfers.slice();
|
|
149
|
+
const poolSize = Math.min(Math.max(1, concurrency), Math.max(1, queue.length));
|
|
150
|
+
await Promise.all(Array.from({ length: poolSize }, async () => {
|
|
151
|
+
while (!stopSubmitting) {
|
|
152
|
+
const entry = queue.shift();
|
|
153
|
+
if (!entry)
|
|
154
|
+
return;
|
|
155
|
+
await runOne(entry);
|
|
156
|
+
}
|
|
157
|
+
}));
|
|
122
158
|
for (const entry of handlers.deletes) {
|
|
123
159
|
current++;
|
|
124
160
|
onProgress?.({
|
|
@@ -154,7 +190,7 @@ async function executeSyncOperation(config, diff, handlers, onProgress) {
|
|
|
154
190
|
/**
|
|
155
191
|
* Execute a pull operation: download remote changes to local.
|
|
156
192
|
*/
|
|
157
|
-
export async function executePull(client, config, diff, onProgress) {
|
|
193
|
+
export async function executePull(client, config, diff, onProgress, concurrency) {
|
|
158
194
|
return executeSyncOperation(config, diff, {
|
|
159
195
|
transfers: diff.downloads,
|
|
160
196
|
deletes: diff.deletes,
|
|
@@ -175,12 +211,12 @@ export async function executePull(client, config, diff, onProgress) {
|
|
|
175
211
|
fs.unlinkSync(localFile);
|
|
176
212
|
}
|
|
177
213
|
},
|
|
178
|
-
}, onProgress);
|
|
214
|
+
}, onProgress, concurrency);
|
|
179
215
|
}
|
|
180
216
|
/**
|
|
181
217
|
* Execute a push operation: upload local changes to remote.
|
|
182
218
|
*/
|
|
183
|
-
export async function executePush(client, config, diff, onProgress) {
|
|
219
|
+
export async function executePush(client, config, diff, onProgress, concurrency) {
|
|
184
220
|
return executeSyncOperation(config, diff, {
|
|
185
221
|
transfers: diff.uploads,
|
|
186
222
|
deletes: diff.deletes,
|
|
@@ -194,7 +230,7 @@ export async function executePush(client, config, diff, onProgress) {
|
|
|
194
230
|
async deleteFile(entry, cfg) {
|
|
195
231
|
await retryWithBackoff(() => client.documents.delete(cfg.vaultId, entry.path));
|
|
196
232
|
},
|
|
197
|
-
}, onProgress);
|
|
233
|
+
}, onProgress, concurrency);
|
|
198
234
|
}
|
|
199
235
|
/**
|
|
200
236
|
* Retry a function with exponential backoff (max 3 retries).
|