@uploadista/client-core 0.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.turbo/turbo-build.log +5 -0
- package/LICENSE +21 -0
- package/README.md +100 -0
- package/dist/auth/auth-http-client.d.ts +50 -0
- package/dist/auth/auth-http-client.d.ts.map +1 -0
- package/dist/auth/auth-http-client.js +110 -0
- package/dist/auth/direct-auth.d.ts +38 -0
- package/dist/auth/direct-auth.d.ts.map +1 -0
- package/dist/auth/direct-auth.js +95 -0
- package/dist/auth/index.d.ts +6 -0
- package/dist/auth/index.d.ts.map +1 -0
- package/dist/auth/index.js +5 -0
- package/dist/auth/no-auth.d.ts +26 -0
- package/dist/auth/no-auth.d.ts.map +1 -0
- package/dist/auth/no-auth.js +33 -0
- package/dist/auth/saas-auth.d.ts +80 -0
- package/dist/auth/saas-auth.d.ts.map +1 -0
- package/dist/auth/saas-auth.js +167 -0
- package/dist/auth/types.d.ts +101 -0
- package/dist/auth/types.d.ts.map +1 -0
- package/dist/auth/types.js +8 -0
- package/dist/chunk-buffer.d.ts +209 -0
- package/dist/chunk-buffer.d.ts.map +1 -0
- package/dist/chunk-buffer.js +236 -0
- package/dist/client/create-uploadista-client.d.ts +369 -0
- package/dist/client/create-uploadista-client.d.ts.map +1 -0
- package/dist/client/create-uploadista-client.js +518 -0
- package/dist/client/index.d.ts +4 -0
- package/dist/client/index.d.ts.map +1 -0
- package/dist/client/index.js +3 -0
- package/dist/client/uploadista-api.d.ts +284 -0
- package/dist/client/uploadista-api.d.ts.map +1 -0
- package/dist/client/uploadista-api.js +444 -0
- package/dist/client/uploadista-websocket-manager.d.ts +110 -0
- package/dist/client/uploadista-websocket-manager.d.ts.map +1 -0
- package/dist/client/uploadista-websocket-manager.js +207 -0
- package/dist/error.d.ts +106 -0
- package/dist/error.d.ts.map +1 -0
- package/dist/error.js +69 -0
- package/dist/index.d.ts +9 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +12 -0
- package/dist/logger.d.ts +70 -0
- package/dist/logger.d.ts.map +1 -0
- package/dist/logger.js +59 -0
- package/dist/mock-data-store.d.ts +30 -0
- package/dist/mock-data-store.d.ts.map +1 -0
- package/dist/mock-data-store.js +88 -0
- package/dist/network-monitor.d.ts +262 -0
- package/dist/network-monitor.d.ts.map +1 -0
- package/dist/network-monitor.js +291 -0
- package/dist/services/abort-controller-service.d.ts +19 -0
- package/dist/services/abort-controller-service.d.ts.map +1 -0
- package/dist/services/abort-controller-service.js +4 -0
- package/dist/services/checksum-service.d.ts +4 -0
- package/dist/services/checksum-service.d.ts.map +1 -0
- package/dist/services/checksum-service.js +1 -0
- package/dist/services/file-reader-service.d.ts +38 -0
- package/dist/services/file-reader-service.d.ts.map +1 -0
- package/dist/services/file-reader-service.js +4 -0
- package/dist/services/fingerprint-service.d.ts +4 -0
- package/dist/services/fingerprint-service.d.ts.map +1 -0
- package/dist/services/fingerprint-service.js +1 -0
- package/dist/services/http-client.d.ts +182 -0
- package/dist/services/http-client.d.ts.map +1 -0
- package/dist/services/http-client.js +1 -0
- package/dist/services/id-generation-service.d.ts +10 -0
- package/dist/services/id-generation-service.d.ts.map +1 -0
- package/dist/services/id-generation-service.js +1 -0
- package/dist/services/index.d.ts +11 -0
- package/dist/services/index.d.ts.map +1 -0
- package/dist/services/index.js +10 -0
- package/dist/services/platform-service.d.ts +48 -0
- package/dist/services/platform-service.d.ts.map +1 -0
- package/dist/services/platform-service.js +10 -0
- package/dist/services/service-container.d.ts +25 -0
- package/dist/services/service-container.d.ts.map +1 -0
- package/dist/services/service-container.js +1 -0
- package/dist/services/storage-service.d.ts +26 -0
- package/dist/services/storage-service.d.ts.map +1 -0
- package/dist/services/storage-service.js +1 -0
- package/dist/services/websocket-service.d.ts +36 -0
- package/dist/services/websocket-service.d.ts.map +1 -0
- package/dist/services/websocket-service.js +4 -0
- package/dist/smart-chunker.d.ts +72 -0
- package/dist/smart-chunker.d.ts.map +1 -0
- package/dist/smart-chunker.js +317 -0
- package/dist/storage/client-storage.d.ts +148 -0
- package/dist/storage/client-storage.d.ts.map +1 -0
- package/dist/storage/client-storage.js +62 -0
- package/dist/storage/in-memory-storage-service.d.ts +7 -0
- package/dist/storage/in-memory-storage-service.d.ts.map +1 -0
- package/dist/storage/in-memory-storage-service.js +24 -0
- package/dist/storage/index.d.ts +3 -0
- package/dist/storage/index.d.ts.map +1 -0
- package/dist/storage/index.js +2 -0
- package/dist/types/buffered-chunk.d.ts +6 -0
- package/dist/types/buffered-chunk.d.ts.map +1 -0
- package/dist/types/buffered-chunk.js +1 -0
- package/dist/types/chunk-metrics.d.ts +12 -0
- package/dist/types/chunk-metrics.d.ts.map +1 -0
- package/dist/types/chunk-metrics.js +1 -0
- package/dist/types/flow-result.d.ts +11 -0
- package/dist/types/flow-result.d.ts.map +1 -0
- package/dist/types/flow-result.js +1 -0
- package/dist/types/flow-upload-config.d.ts +54 -0
- package/dist/types/flow-upload-config.d.ts.map +1 -0
- package/dist/types/flow-upload-config.js +1 -0
- package/dist/types/flow-upload-item.d.ts +16 -0
- package/dist/types/flow-upload-item.d.ts.map +1 -0
- package/dist/types/flow-upload-item.js +1 -0
- package/dist/types/flow-upload-options.d.ts +41 -0
- package/dist/types/flow-upload-options.d.ts.map +1 -0
- package/dist/types/flow-upload-options.js +1 -0
- package/dist/types/index.d.ts +14 -0
- package/dist/types/index.d.ts.map +1 -0
- package/dist/types/index.js +13 -0
- package/dist/types/multi-flow-upload-options.d.ts +33 -0
- package/dist/types/multi-flow-upload-options.d.ts.map +1 -0
- package/dist/types/multi-flow-upload-options.js +1 -0
- package/dist/types/multi-flow-upload-state.d.ts +9 -0
- package/dist/types/multi-flow-upload-state.d.ts.map +1 -0
- package/dist/types/multi-flow-upload-state.js +1 -0
- package/dist/types/performance-insights.d.ts +11 -0
- package/dist/types/performance-insights.d.ts.map +1 -0
- package/dist/types/performance-insights.js +1 -0
- package/dist/types/previous-upload.d.ts +20 -0
- package/dist/types/previous-upload.d.ts.map +1 -0
- package/dist/types/previous-upload.js +9 -0
- package/dist/types/upload-options.d.ts +40 -0
- package/dist/types/upload-options.d.ts.map +1 -0
- package/dist/types/upload-options.js +1 -0
- package/dist/types/upload-response.d.ts +6 -0
- package/dist/types/upload-response.d.ts.map +1 -0
- package/dist/types/upload-response.js +1 -0
- package/dist/types/upload-result.d.ts +57 -0
- package/dist/types/upload-result.d.ts.map +1 -0
- package/dist/types/upload-result.js +1 -0
- package/dist/types/upload-session-metrics.d.ts +16 -0
- package/dist/types/upload-session-metrics.d.ts.map +1 -0
- package/dist/types/upload-session-metrics.js +1 -0
- package/dist/upload/chunk-upload.d.ts +40 -0
- package/dist/upload/chunk-upload.d.ts.map +1 -0
- package/dist/upload/chunk-upload.js +82 -0
- package/dist/upload/flow-upload.d.ts +48 -0
- package/dist/upload/flow-upload.d.ts.map +1 -0
- package/dist/upload/flow-upload.js +240 -0
- package/dist/upload/index.d.ts +3 -0
- package/dist/upload/index.d.ts.map +1 -0
- package/dist/upload/index.js +2 -0
- package/dist/upload/parallel-upload.d.ts +65 -0
- package/dist/upload/parallel-upload.d.ts.map +1 -0
- package/dist/upload/parallel-upload.js +231 -0
- package/dist/upload/single-upload.d.ts +118 -0
- package/dist/upload/single-upload.d.ts.map +1 -0
- package/dist/upload/single-upload.js +332 -0
- package/dist/upload/upload-manager.d.ts +30 -0
- package/dist/upload/upload-manager.d.ts.map +1 -0
- package/dist/upload/upload-manager.js +57 -0
- package/dist/upload/upload-metrics.d.ts +37 -0
- package/dist/upload/upload-metrics.d.ts.map +1 -0
- package/dist/upload/upload-metrics.js +236 -0
- package/dist/upload/upload-storage.d.ts +32 -0
- package/dist/upload/upload-storage.d.ts.map +1 -0
- package/dist/upload/upload-storage.js +46 -0
- package/dist/upload/upload-strategy.d.ts +66 -0
- package/dist/upload/upload-strategy.d.ts.map +1 -0
- package/dist/upload/upload-strategy.js +171 -0
- package/dist/upload/upload-utils.d.ts +26 -0
- package/dist/upload/upload-utils.d.ts.map +1 -0
- package/dist/upload/upload-utils.js +80 -0
- package/package.json +29 -0
- package/src/__tests__/smart-chunking.test.ts +399 -0
- package/src/auth/__tests__/auth-http-client.test.ts +327 -0
- package/src/auth/__tests__/direct-auth.test.ts +135 -0
- package/src/auth/__tests__/no-auth.test.ts +40 -0
- package/src/auth/__tests__/saas-auth.test.ts +337 -0
- package/src/auth/auth-http-client.ts +150 -0
- package/src/auth/direct-auth.ts +121 -0
- package/src/auth/index.ts +5 -0
- package/src/auth/no-auth.ts +39 -0
- package/src/auth/saas-auth.ts +218 -0
- package/src/auth/types.ts +105 -0
- package/src/chunk-buffer.ts +287 -0
- package/src/client/create-uploadista-client.ts +901 -0
- package/src/client/index.ts +3 -0
- package/src/client/uploadista-api.ts +857 -0
- package/src/client/uploadista-websocket-manager.ts +275 -0
- package/src/error.ts +149 -0
- package/src/index.ts +13 -0
- package/src/logger.ts +104 -0
- package/src/mock-data-store.ts +97 -0
- package/src/network-monitor.ts +445 -0
- package/src/services/abort-controller-service.ts +21 -0
- package/src/services/checksum-service.ts +3 -0
- package/src/services/file-reader-service.ts +44 -0
- package/src/services/fingerprint-service.ts +6 -0
- package/src/services/http-client.ts +229 -0
- package/src/services/id-generation-service.ts +9 -0
- package/src/services/index.ts +10 -0
- package/src/services/platform-service.ts +65 -0
- package/src/services/service-container.ts +24 -0
- package/src/services/storage-service.ts +29 -0
- package/src/services/websocket-service.ts +33 -0
- package/src/smart-chunker.ts +451 -0
- package/src/storage/client-storage.ts +186 -0
- package/src/storage/in-memory-storage-service.ts +33 -0
- package/src/storage/index.ts +2 -0
- package/src/types/buffered-chunk.ts +5 -0
- package/src/types/chunk-metrics.ts +11 -0
- package/src/types/flow-result.ts +14 -0
- package/src/types/flow-upload-config.ts +56 -0
- package/src/types/flow-upload-item.ts +16 -0
- package/src/types/flow-upload-options.ts +56 -0
- package/src/types/index.ts +13 -0
- package/src/types/multi-flow-upload-options.ts +39 -0
- package/src/types/multi-flow-upload-state.ts +9 -0
- package/src/types/performance-insights.ts +7 -0
- package/src/types/previous-upload.ts +22 -0
- package/src/types/upload-options.ts +56 -0
- package/src/types/upload-response.ts +6 -0
- package/src/types/upload-result.ts +60 -0
- package/src/types/upload-session-metrics.ts +15 -0
- package/src/upload/chunk-upload.ts +151 -0
- package/src/upload/flow-upload.ts +367 -0
- package/src/upload/index.ts +2 -0
- package/src/upload/parallel-upload.ts +387 -0
- package/src/upload/single-upload.ts +554 -0
- package/src/upload/upload-manager.ts +106 -0
- package/src/upload/upload-metrics.ts +340 -0
- package/src/upload/upload-storage.ts +87 -0
- package/src/upload/upload-strategy.ts +296 -0
- package/src/upload/upload-utils.ts +114 -0
- package/tsconfig.json +23 -0
- package/tsconfig.tsbuildinfo +1 -0
|
@@ -0,0 +1,240 @@
|
|
|
1
|
+
import { UploadistaError } from "../error";
|
|
2
|
+
import { shouldRetry } from "./chunk-upload";
|
|
3
|
+
import { inStatusCategory } from "./upload-utils";
|
|
4
|
+
/**
|
|
5
|
+
* Start a flow-based upload by initializing the streaming input node
|
|
6
|
+
*/
|
|
7
|
+
export async function startFlowUpload({ source, flowConfig, uploadistaApi, logger, platformService, openWebSocket, closeWebSocket, ...callbacks }) {
|
|
8
|
+
const { flowId, storageId } = flowConfig;
|
|
9
|
+
// Get the flow to find the streaming input node
|
|
10
|
+
const { flow } = await uploadistaApi.getFlow(flowId);
|
|
11
|
+
// Find the streaming-input-node in the flow
|
|
12
|
+
const inputNode = flow.nodes.find((node) => node.type === "input");
|
|
13
|
+
if (!inputNode) {
|
|
14
|
+
const error = new UploadistaError({
|
|
15
|
+
name: "FLOW_INCOMPATIBLE",
|
|
16
|
+
message: `Flow ${flowId} does not have a streaming input node. The flow must contain a node with type "input" to support flow uploads.`,
|
|
17
|
+
});
|
|
18
|
+
callbacks.onError?.(error);
|
|
19
|
+
throw error;
|
|
20
|
+
}
|
|
21
|
+
const inputNodeId = inputNode.id;
|
|
22
|
+
// Step 1: Initialize the flow with init operation
|
|
23
|
+
const metadata = {
|
|
24
|
+
originalName: source.name ?? "unknown",
|
|
25
|
+
mimeType: source.type ?? "application/octet-stream",
|
|
26
|
+
size: source.size ?? 0,
|
|
27
|
+
...flowConfig.metadata,
|
|
28
|
+
};
|
|
29
|
+
logger.log(`Starting flow upload for flow ${flowId}, node ${inputNodeId}`);
|
|
30
|
+
const { status, job } = await uploadistaApi.runFlow(flowId, storageId, {
|
|
31
|
+
[inputNodeId]: {
|
|
32
|
+
operation: "init",
|
|
33
|
+
storageId,
|
|
34
|
+
metadata,
|
|
35
|
+
},
|
|
36
|
+
});
|
|
37
|
+
const jobId = job.id;
|
|
38
|
+
if (!inStatusCategory(status, 200) || !jobId) {
|
|
39
|
+
const error = new UploadistaError({
|
|
40
|
+
name: "FLOW_INIT_FAILED",
|
|
41
|
+
message: "Failed to initialize flow upload",
|
|
42
|
+
});
|
|
43
|
+
callbacks.onError?.(error);
|
|
44
|
+
throw error;
|
|
45
|
+
}
|
|
46
|
+
callbacks.onJobStart?.(jobId);
|
|
47
|
+
logger.log(`Flow job ${jobId} created, opening WebSocket`);
|
|
48
|
+
// Open WebSocket to listen for flow events
|
|
49
|
+
// Events are buffered in the Durable Object until connection is established
|
|
50
|
+
openWebSocket(jobId);
|
|
51
|
+
logger.log(`Waiting for upload ID from node`);
|
|
52
|
+
// Step 2: Wait for the streaming-input-node to pause and return the upload file
|
|
53
|
+
// Poll job status until paused (with timeout)
|
|
54
|
+
const maxAttempts = 60; // 30 seconds total
|
|
55
|
+
const pollInterval = 500; // 0.5 second
|
|
56
|
+
let attempts = 0;
|
|
57
|
+
let jobStatus = await uploadistaApi.getJobStatus(jobId);
|
|
58
|
+
while (jobStatus.status !== "paused" && attempts < maxAttempts) {
|
|
59
|
+
await new Promise((resolve) => platformService.setTimeout(resolve, pollInterval));
|
|
60
|
+
jobStatus = await uploadistaApi.getJobStatus(jobId);
|
|
61
|
+
attempts++;
|
|
62
|
+
}
|
|
63
|
+
if (jobStatus.status !== "paused") {
|
|
64
|
+
const error = new UploadistaError({
|
|
65
|
+
name: "FLOW_TIMEOUT",
|
|
66
|
+
message: `Flow did not pause after init (status: ${jobStatus.status})`,
|
|
67
|
+
});
|
|
68
|
+
callbacks.onError?.(error);
|
|
69
|
+
throw error;
|
|
70
|
+
}
|
|
71
|
+
// Get the upload file from streaming input node task result
|
|
72
|
+
const streamingInputTask = jobStatus.tasks.find((task) => task.nodeId === inputNodeId);
|
|
73
|
+
const uploadFile = streamingInputTask?.result;
|
|
74
|
+
if (!uploadFile?.id) {
|
|
75
|
+
const error = new UploadistaError({
|
|
76
|
+
name: "FLOW_NO_UPLOAD_ID",
|
|
77
|
+
message: "Flow did not return upload ID after init",
|
|
78
|
+
});
|
|
79
|
+
callbacks.onError?.(error);
|
|
80
|
+
throw error;
|
|
81
|
+
}
|
|
82
|
+
logger.log(`Upload ID received: ${uploadFile.id}`);
|
|
83
|
+
callbacks.onStart?.({
|
|
84
|
+
uploadId: uploadFile.id,
|
|
85
|
+
size: source.size ?? null,
|
|
86
|
+
});
|
|
87
|
+
return { jobId, uploadFile, inputNodeId };
|
|
88
|
+
}
|
|
89
|
+
/**
|
|
90
|
+
* Upload chunks directly to the upload API (not through continueFlow)
|
|
91
|
+
* This is more efficient and reuses the existing upload infrastructure
|
|
92
|
+
*/
|
|
93
|
+
export async function performFlowUpload({ jobId, uploadFile, inputNodeId, offset, source, retryAttempt = 0, abortController, retryDelays, smartChunker, uploadistaApi, logger, smartChunking, metrics, platformService, onRetry, ...callbacks }) {
|
|
94
|
+
let offsetBeforeRetry = offset;
|
|
95
|
+
let currentOffset = offset;
|
|
96
|
+
try {
|
|
97
|
+
// Get optimal chunk size
|
|
98
|
+
const remainingBytes = source.size ? source.size - offset : undefined;
|
|
99
|
+
const chunkSizeDecision = smartChunker.getNextChunkSize(remainingBytes);
|
|
100
|
+
const chunkSize = chunkSizeDecision.size;
|
|
101
|
+
const endByte = Math.min(offset + chunkSize, source.size ?? 0);
|
|
102
|
+
const sliceResult = await source.slice(offset, endByte);
|
|
103
|
+
if (!sliceResult || !sliceResult.value) {
|
|
104
|
+
throw new UploadistaError({
|
|
105
|
+
name: "NETWORK_ERROR",
|
|
106
|
+
message: "Failed to read chunk from file",
|
|
107
|
+
});
|
|
108
|
+
}
|
|
109
|
+
const chunkData = sliceResult.value;
|
|
110
|
+
// Upload chunk directly to upload API (bypassing flow)
|
|
111
|
+
const startTime = Date.now();
|
|
112
|
+
const res = await uploadistaApi.uploadChunk(uploadFile.id, chunkData, {
|
|
113
|
+
abortController,
|
|
114
|
+
});
|
|
115
|
+
const duration = Date.now() - startTime;
|
|
116
|
+
if (!res.upload) {
|
|
117
|
+
throw new UploadistaError({
|
|
118
|
+
name: "UPLOAD_CHUNK_FAILED",
|
|
119
|
+
message: "Upload chunk response missing upload data",
|
|
120
|
+
});
|
|
121
|
+
}
|
|
122
|
+
currentOffset = res.upload.offset;
|
|
123
|
+
callbacks.onProgress?.(uploadFile.id, currentOffset, source.size ?? 0);
|
|
124
|
+
callbacks.onChunkComplete?.(currentOffset - offset, offset, source.size ?? 0);
|
|
125
|
+
// Record detailed chunk metrics
|
|
126
|
+
if (smartChunking?.enabled !== false) {
|
|
127
|
+
const chunkIndex = Math.floor(offset / chunkSize);
|
|
128
|
+
metrics.recordChunk({
|
|
129
|
+
chunkIndex,
|
|
130
|
+
size: chunkSize,
|
|
131
|
+
duration,
|
|
132
|
+
speed: chunkSize / (duration / 1000),
|
|
133
|
+
success: true,
|
|
134
|
+
retryCount: retryAttempt,
|
|
135
|
+
networkCondition: smartChunker.getLastDecision()?.networkCondition?.type,
|
|
136
|
+
chunkingStrategy: smartChunker.getLastDecision()?.strategy,
|
|
137
|
+
});
|
|
138
|
+
// Update smart chunker with connection metrics
|
|
139
|
+
const connectionMetrics = uploadistaApi.getConnectionMetrics();
|
|
140
|
+
smartChunker.updateConnectionMetrics(connectionMetrics);
|
|
141
|
+
}
|
|
142
|
+
// Check if upload is complete after uploading the chunk
|
|
143
|
+
if (currentOffset >= (source.size ?? 0)) {
|
|
144
|
+
if (source)
|
|
145
|
+
source.close();
|
|
146
|
+
// Complete metrics session
|
|
147
|
+
if (smartChunking?.enabled !== false) {
|
|
148
|
+
const sessionMetrics = metrics.endSession();
|
|
149
|
+
if (sessionMetrics) {
|
|
150
|
+
logger.log(`Flow upload completed: ${sessionMetrics.totalSize} bytes in ${sessionMetrics.totalDuration}ms, avg speed: ${Math.round(sessionMetrics.averageSpeed / 1024)}KB/s`);
|
|
151
|
+
}
|
|
152
|
+
}
|
|
153
|
+
// Upload is complete - finalize the flow
|
|
154
|
+
logger.log(`Finalizing flow upload for job ${jobId}`);
|
|
155
|
+
try {
|
|
156
|
+
await uploadistaApi.continueFlow(jobId, inputNodeId, {
|
|
157
|
+
operation: "finalize",
|
|
158
|
+
uploadId: uploadFile.id,
|
|
159
|
+
}, { contentType: "application/json" });
|
|
160
|
+
}
|
|
161
|
+
catch (err) {
|
|
162
|
+
// Finalization errors should not trigger chunk retry logic
|
|
163
|
+
const error = new UploadistaError({
|
|
164
|
+
name: "FLOW_FINALIZE_FAILED",
|
|
165
|
+
message: `Failed to finalize flow upload for job ${jobId}`,
|
|
166
|
+
cause: err,
|
|
167
|
+
});
|
|
168
|
+
callbacks.onError?.(error);
|
|
169
|
+
throw error;
|
|
170
|
+
}
|
|
171
|
+
return;
|
|
172
|
+
}
|
|
173
|
+
// Continue uploading next chunk
|
|
174
|
+
await performFlowUpload({
|
|
175
|
+
jobId,
|
|
176
|
+
uploadFile,
|
|
177
|
+
inputNodeId,
|
|
178
|
+
offset: currentOffset,
|
|
179
|
+
source,
|
|
180
|
+
platformService,
|
|
181
|
+
retryDelays,
|
|
182
|
+
smartChunker,
|
|
183
|
+
uploadistaApi,
|
|
184
|
+
logger,
|
|
185
|
+
smartChunking,
|
|
186
|
+
metrics,
|
|
187
|
+
onRetry,
|
|
188
|
+
abortController,
|
|
189
|
+
...callbacks,
|
|
190
|
+
});
|
|
191
|
+
}
|
|
192
|
+
catch (err) {
|
|
193
|
+
// Retry logic similar to single-upload
|
|
194
|
+
if (retryDelays != null) {
|
|
195
|
+
const shouldResetDelays = offset != null && currentOffset > offsetBeforeRetry;
|
|
196
|
+
if (shouldResetDelays) {
|
|
197
|
+
retryAttempt = 0;
|
|
198
|
+
}
|
|
199
|
+
const castedErr = !(err instanceof UploadistaError)
|
|
200
|
+
? new UploadistaError({
|
|
201
|
+
name: "NETWORK_ERROR",
|
|
202
|
+
message: "Network error during flow upload",
|
|
203
|
+
cause: err,
|
|
204
|
+
})
|
|
205
|
+
: err;
|
|
206
|
+
if (shouldRetry(platformService, castedErr, retryAttempt, retryDelays, callbacks.onShouldRetry)) {
|
|
207
|
+
const delay = retryDelays[retryAttempt];
|
|
208
|
+
offsetBeforeRetry = offset;
|
|
209
|
+
const timeout = platformService.setTimeout(async () => {
|
|
210
|
+
await performFlowUpload({
|
|
211
|
+
jobId,
|
|
212
|
+
uploadFile,
|
|
213
|
+
inputNodeId,
|
|
214
|
+
offset,
|
|
215
|
+
source,
|
|
216
|
+
retryAttempt: retryAttempt + 1,
|
|
217
|
+
retryDelays,
|
|
218
|
+
smartChunker,
|
|
219
|
+
uploadistaApi,
|
|
220
|
+
logger,
|
|
221
|
+
smartChunking,
|
|
222
|
+
metrics,
|
|
223
|
+
platformService,
|
|
224
|
+
onRetry,
|
|
225
|
+
abortController,
|
|
226
|
+
...callbacks,
|
|
227
|
+
});
|
|
228
|
+
}, delay);
|
|
229
|
+
onRetry?.(timeout);
|
|
230
|
+
}
|
|
231
|
+
else {
|
|
232
|
+
throw new UploadistaError({
|
|
233
|
+
name: "UPLOAD_CHUNK_FAILED",
|
|
234
|
+
message: `Failed to upload chunk for job ${jobId} at offset ${offset}`,
|
|
235
|
+
cause: err,
|
|
236
|
+
});
|
|
237
|
+
}
|
|
238
|
+
}
|
|
239
|
+
}
|
|
240
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../../src/upload/index.ts"],"names":[],"mappings":"AAAA,cAAc,kBAAkB,CAAC;AACjC,cAAc,gBAAgB,CAAC"}
|
|
@@ -0,0 +1,65 @@
|
|
|
1
|
+
import type { UploadistaApi } from "../client/uploadista-api";
|
|
2
|
+
import type { Logger } from "../logger";
|
|
3
|
+
import type { AbortControllerFactory, AbortControllerLike } from "../services/abort-controller-service";
|
|
4
|
+
import type { ChecksumService } from "../services/checksum-service";
|
|
5
|
+
import type { FileSource } from "../services/file-reader-service";
|
|
6
|
+
import type { IdGenerationService } from "../services/id-generation-service";
|
|
7
|
+
import type { PlatformService, Timeout } from "../services/platform-service";
|
|
8
|
+
import type { WebSocketLike } from "../services/websocket-service";
|
|
9
|
+
import type { SmartChunker, SmartChunkerConfig } from "../smart-chunker";
|
|
10
|
+
import type { ClientStorage } from "../storage/client-storage";
|
|
11
|
+
import { type Callbacks } from "./single-upload";
|
|
12
|
+
import type { UploadMetrics } from "./upload-metrics";
|
|
13
|
+
export type ParallelUploadSegment = {
|
|
14
|
+
uploadId: string;
|
|
15
|
+
uploadIdStorageKey: string | undefined;
|
|
16
|
+
segmentIndex: number;
|
|
17
|
+
startByte: number;
|
|
18
|
+
endByte: number;
|
|
19
|
+
offset: number;
|
|
20
|
+
abortController: AbortControllerLike;
|
|
21
|
+
retryTimeout: Timeout | null;
|
|
22
|
+
};
|
|
23
|
+
export type ParallelUploadState = {
|
|
24
|
+
segments: ParallelUploadSegment[];
|
|
25
|
+
totalProgress: number;
|
|
26
|
+
completed: boolean;
|
|
27
|
+
failed: boolean;
|
|
28
|
+
error?: Error;
|
|
29
|
+
};
|
|
30
|
+
export type ParallelUploadResult = {
|
|
31
|
+
parallelState: ParallelUploadState;
|
|
32
|
+
abort: () => Promise<void>;
|
|
33
|
+
};
|
|
34
|
+
/**
|
|
35
|
+
* Initiate the uploading procedure for a parallelized upload, where one file is split into
|
|
36
|
+
* multiple request which are run in parallel.
|
|
37
|
+
*/
|
|
38
|
+
export declare function startParallelUpload({ source, storageId, fingerprint, uploadLengthDeferred, parallelUploads, parallelChunkSize, retryDelays, smartChunker, uploadistaApi, logger, checksumService, smartChunking, metrics, clientStorage, generateId, storeFingerprintForResuming, openWebSocket, closeWebSocket, terminate, abortControllerFactory, platformService, ...callbacks }: {
|
|
39
|
+
source: FileSource;
|
|
40
|
+
storageId: string;
|
|
41
|
+
fingerprint: string;
|
|
42
|
+
uploadLengthDeferred: boolean | undefined;
|
|
43
|
+
parallelUploads: number;
|
|
44
|
+
parallelChunkSize?: number;
|
|
45
|
+
retryDelays?: number[];
|
|
46
|
+
smartChunker: SmartChunker;
|
|
47
|
+
uploadistaApi: UploadistaApi;
|
|
48
|
+
checksumService: ChecksumService;
|
|
49
|
+
logger: Logger;
|
|
50
|
+
smartChunking?: SmartChunkerConfig;
|
|
51
|
+
metrics: UploadMetrics;
|
|
52
|
+
clientStorage: ClientStorage;
|
|
53
|
+
generateId: IdGenerationService;
|
|
54
|
+
storeFingerprintForResuming: boolean;
|
|
55
|
+
openWebSocket: (uploadId: string) => WebSocketLike;
|
|
56
|
+
closeWebSocket: (uploadId: string) => void;
|
|
57
|
+
terminate: (uploadId: string) => Promise<void>;
|
|
58
|
+
abortControllerFactory: AbortControllerFactory;
|
|
59
|
+
platformService: PlatformService;
|
|
60
|
+
} & Callbacks): Promise<ParallelUploadResult | undefined>;
|
|
61
|
+
/**
|
|
62
|
+
* Abort a parallel upload by cleaning up all segments
|
|
63
|
+
*/
|
|
64
|
+
export declare function abortParallelUpload(state: ParallelUploadState, logger: Logger, terminate: (uploadId: string) => Promise<void>, closeWebSocket: (uploadId: string) => void, platformService: PlatformService): Promise<void>;
|
|
65
|
+
//# sourceMappingURL=parallel-upload.d.ts.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"parallel-upload.d.ts","sourceRoot":"","sources":["../../src/upload/parallel-upload.ts"],"names":[],"mappings":"AACA,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,0BAA0B,CAAC;AAE9D,OAAO,KAAK,EAAE,MAAM,EAAE,MAAM,WAAW,CAAC;AACxC,OAAO,KAAK,EACV,sBAAsB,EACtB,mBAAmB,EACpB,MAAM,sCAAsC,CAAC;AAC9C,OAAO,KAAK,EAAE,eAAe,EAAE,MAAM,8BAA8B,CAAC;AACpE,OAAO,KAAK,EAAE,UAAU,EAAE,MAAM,iCAAiC,CAAC;AAClE,OAAO,KAAK,EAAE,mBAAmB,EAAE,MAAM,mCAAmC,CAAC;AAC7E,OAAO,KAAK,EAAE,eAAe,EAAE,OAAO,EAAE,MAAM,8BAA8B,CAAC;AAC7E,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,+BAA+B,CAAC;AACnE,OAAO,KAAK,EAAE,YAAY,EAAE,kBAAkB,EAAE,MAAM,kBAAkB,CAAC;AACzE,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,2BAA2B,CAAC;AAC/D,OAAO,EAAE,KAAK,SAAS,EAA+B,MAAM,iBAAiB,CAAC;AAC9E,OAAO,KAAK,EAAE,aAAa,EAAE,MAAM,kBAAkB,CAAC;AAGtD,MAAM,MAAM,qBAAqB,GAAG;IAClC,QAAQ,EAAE,MAAM,CAAC;IACjB,kBAAkB,EAAE,MAAM,GAAG,SAAS,CAAC;IACvC,YAAY,EAAE,MAAM,CAAC;IACrB,SAAS,EAAE,MAAM,CAAC;IAClB,OAAO,EAAE,MAAM,CAAC;IAChB,MAAM,EAAE,MAAM,CAAC;IACf,eAAe,EAAE,mBAAmB,CAAC;IACrC,YAAY,EAAE,OAAO,GAAG,IAAI,CAAC;CAC9B,CAAC;AAEF,MAAM,MAAM,mBAAmB,GAAG;IAChC,QAAQ,EAAE,qBAAqB,EAAE,CAAC;IAClC,aAAa,EAAE,MAAM,CAAC;IACtB,SAAS,EAAE,OAAO,CAAC;IACnB,MAAM,EAAE,OAAO,CAAC;IAChB,KAAK,CAAC,EAAE,KAAK,CAAC;CACf,CAAC;AAEF,MAAM,MAAM,oBAAoB,GAAG;IACjC,aAAa,EAAE,mBAAmB,CAAC;IACnC,KAAK,EAAE,MAAM,OAAO,CAAC,IAAI,CAAC,CAAC;CAC5B,CAAC;AAEF;;;GAGG;AACH,wBAAsB,mBAAmB,CAAC,EACxC,MAAM,EACN,SAAS,EACT,WAAW,EACX,oBAAoB,EACpB,eAAe,EACf,iBAAiB,EACjB,WAAW,EACX,YAAY,EACZ,aAAa,EACb,MAAM,EACN,eAAe,EACf,aAAa,EACb,OAAO,EACP,aAAa,EACb,UAAU,EACV,2BAA2B,EAC3B,aAAa,EACb,cAAc,EACd,SAAS,EACT,sBAAsB,EACtB,eAAe,EACf,GAAG,SAAS,EACb,EAAE;IACD,MAAM,EAAE,UAAU,CAAC;IACnB,SAAS,EAAE,MAAM,CAAC;IAClB,WAAW,EAAE,MAAM,CAAC;IACpB,oBAAoB,EAAE,OAAO,GAAG,SAAS,CAAC;IAC1C,eAAe,EAAE,MAAM,CAAC;IACxB,iBAAiB,CAAC,EAAE,MAAM,CAAC;IAC3B,WAAW,CAAC,EAAE,MAAM,EAAE,CAAC;IACvB,YAAY,EAAE,YAAY,CAAC;IAC3B,aAAa,EAAE,aAAa,CAAC;IAC7B,eAAe,EAAE,eAAe,CAAC;IACjC,MAAM,EAAE,MAAM,CAAC;IACf,aAAa,CAAC,EAAE,kBAAkB,CAAC;IACnC,OAAO,EAAE,aAAa,CAAC;IACvB,aAAa,EAAE,aAAa,CAAC;IAC7B,UAAU,EAAE,mBAAmB,CAAC;IAChC,2BAA2B,EAAE,OAAO,CAAC;IACrC,aAAa,EAAE,CAAC,QAAQ,EAAE,MAAM,KAAK,aAAa,CAAC;IACnD,cAAc,EAAE,CAAC,QAAQ,EAAE,MAAM,KAAK,IAAI,CAAC;IAC3C,SAAS,EAAE,CAAC,QAAQ,EAAE,MAAM,KAAK,OAAO,CAAC,IAAI,CAAC,CAAC;IAC/C,sBAAsB,EAAE,sBAAsB,CAAC;IAC/C,eAAe,EAAE,eAAe,CAAC;CAClC,GAAG,SAAS,GAAG,OAAO,CAAC,oBAAoB,GAAG,SAAS,CAAC,CA+PxD;AAED;;GAEG;AACH,wBAAsB,mBAAmB,CACvC,KAAK,EAAE,mBAAmB,EAC1B,MAAM,EAAE,MAAM,EACd,SAAS,EAAE,CAAC,QAAQ,EAAE,MAAM,KAAK,OAAO,CAAC,IAAI,CAAC,EAC9C,cAAc,EAAE,CAAC,QAAQ,EAAE,MAAM,KAAK,IAAI,EAC1C,eAAe,EAAE,eAAe,GAC/B,OAAO,CAAC,IAAI,CAAC,CA4Bf"}
|
|
@@ -0,0 +1,231 @@
|
|
|
1
|
+
import { UploadistaError } from "../error";
|
|
2
|
+
import { createUpload, performUpload } from "./single-upload";
|
|
3
|
+
import { calculateSegments } from "./upload-utils";
|
|
4
|
+
/**
|
|
5
|
+
* Initiate the uploading procedure for a parallelized upload, where one file is split into
|
|
6
|
+
* multiple request which are run in parallel.
|
|
7
|
+
*/
|
|
8
|
+
export async function startParallelUpload({ source, storageId, fingerprint, uploadLengthDeferred, parallelUploads, parallelChunkSize, retryDelays, smartChunker, uploadistaApi, logger, checksumService, smartChunking, metrics, clientStorage, generateId, storeFingerprintForResuming, openWebSocket, closeWebSocket, terminate, abortControllerFactory, platformService, ...callbacks }) {
|
|
9
|
+
if (!source.size || source.size === 0) {
|
|
10
|
+
callbacks.onError?.(new UploadistaError({
|
|
11
|
+
name: "UPLOAD_SIZE_NOT_SPECIFIED",
|
|
12
|
+
message: "Parallel upload requires a known file size",
|
|
13
|
+
}));
|
|
14
|
+
return;
|
|
15
|
+
}
|
|
16
|
+
// Calculate segments for parallel upload
|
|
17
|
+
const segments = calculateSegments(source.size, parallelUploads, parallelChunkSize);
|
|
18
|
+
logger.log(`Starting parallel upload with ${segments.length} segments`);
|
|
19
|
+
// Initialize parallel upload state
|
|
20
|
+
const parallelState = {
|
|
21
|
+
segments: [],
|
|
22
|
+
totalProgress: 0,
|
|
23
|
+
completed: false,
|
|
24
|
+
failed: false,
|
|
25
|
+
};
|
|
26
|
+
// Progress tracking for aggregation
|
|
27
|
+
const segmentProgress = new Map();
|
|
28
|
+
const segmentTotals = new Map();
|
|
29
|
+
const updateTotalProgress = () => {
|
|
30
|
+
const totalBytes = Array.from(segmentTotals.values()).reduce((sum, size) => sum + size, 0);
|
|
31
|
+
const progressBytes = Array.from(segmentProgress.values()).reduce((sum, progress) => sum + progress, 0);
|
|
32
|
+
parallelState.totalProgress =
|
|
33
|
+
totalBytes > 0 ? progressBytes / totalBytes : 0;
|
|
34
|
+
// Aggregate progress callback
|
|
35
|
+
if (callbacks.onProgress && totalBytes > 0) {
|
|
36
|
+
callbacks.onProgress(`parallel-upload`, progressBytes, totalBytes);
|
|
37
|
+
}
|
|
38
|
+
};
|
|
39
|
+
try {
|
|
40
|
+
// Create upload sessions for each segment
|
|
41
|
+
const segmentUploads = await Promise.all(segments.map(async (segment) => {
|
|
42
|
+
// Create a segmented source for this chunk
|
|
43
|
+
const segmentSource = {
|
|
44
|
+
...source,
|
|
45
|
+
size: segment.endByte - segment.startByte,
|
|
46
|
+
async slice(start, end) {
|
|
47
|
+
// Adjust slice to segment boundaries
|
|
48
|
+
const actualStart = segment.startByte + (start ?? 0);
|
|
49
|
+
const actualEnd = Math.min(segment.startByte + (end ?? segment.endByte - segment.startByte), segment.endByte);
|
|
50
|
+
return await source.slice(actualStart, actualEnd);
|
|
51
|
+
},
|
|
52
|
+
};
|
|
53
|
+
const createResult = await createUpload({
|
|
54
|
+
fingerprint: `${fingerprint}-segment-${segment.segmentIndex}`,
|
|
55
|
+
storageId,
|
|
56
|
+
source: segmentSource,
|
|
57
|
+
uploadLengthDeferred,
|
|
58
|
+
platformService,
|
|
59
|
+
metadata: {
|
|
60
|
+
parallelUpload: "true",
|
|
61
|
+
segmentIndex: segment.segmentIndex.toString(),
|
|
62
|
+
totalSegments: segments.length.toString(),
|
|
63
|
+
parentFingerprint: fingerprint,
|
|
64
|
+
},
|
|
65
|
+
checksumService,
|
|
66
|
+
uploadistaApi,
|
|
67
|
+
logger,
|
|
68
|
+
clientStorage,
|
|
69
|
+
generateId,
|
|
70
|
+
storeFingerprintForResuming,
|
|
71
|
+
openWebSocket,
|
|
72
|
+
closeWebSocket,
|
|
73
|
+
onSuccess: () => { },
|
|
74
|
+
onError: (error) => logger.log(`Segment ${segment.segmentIndex} creation error: ${error}`),
|
|
75
|
+
onStart: (info) => {
|
|
76
|
+
segmentTotals.set(segment.segmentIndex, info.size ?? 0);
|
|
77
|
+
updateTotalProgress();
|
|
78
|
+
},
|
|
79
|
+
});
|
|
80
|
+
if (!createResult) {
|
|
81
|
+
throw new UploadistaError({
|
|
82
|
+
name: "PARALLEL_SEGMENT_CREATION_FAILED",
|
|
83
|
+
message: `Failed to create upload segment ${segment.segmentIndex}`,
|
|
84
|
+
});
|
|
85
|
+
}
|
|
86
|
+
const parallelSegment = {
|
|
87
|
+
uploadId: createResult.uploadId,
|
|
88
|
+
uploadIdStorageKey: createResult.uploadIdStorageKey,
|
|
89
|
+
segmentIndex: segment.segmentIndex,
|
|
90
|
+
startByte: segment.startByte,
|
|
91
|
+
endByte: segment.endByte,
|
|
92
|
+
offset: createResult.offset,
|
|
93
|
+
abortController: abortControllerFactory.create(),
|
|
94
|
+
retryTimeout: null,
|
|
95
|
+
};
|
|
96
|
+
return {
|
|
97
|
+
segment: parallelSegment,
|
|
98
|
+
source: segmentSource,
|
|
99
|
+
};
|
|
100
|
+
}));
|
|
101
|
+
// Store segments in state
|
|
102
|
+
parallelState.segments = segmentUploads.map((upload) => upload.segment);
|
|
103
|
+
// Notify start with combined upload info
|
|
104
|
+
callbacks.onStart?.({
|
|
105
|
+
uploadId: `parallel-${parallelState.segments.map((s) => s.uploadId).join(",")}`,
|
|
106
|
+
size: source.size,
|
|
107
|
+
});
|
|
108
|
+
// Start parallel upload for each segment
|
|
109
|
+
const uploadPromises = segmentUploads.map(async ({ segment, source: segmentSource }) => {
|
|
110
|
+
try {
|
|
111
|
+
await performUpload({
|
|
112
|
+
uploadId: segment.uploadId,
|
|
113
|
+
offset: segment.offset,
|
|
114
|
+
source: segmentSource,
|
|
115
|
+
uploadLengthDeferred,
|
|
116
|
+
abortController: segment.abortController,
|
|
117
|
+
retryDelays,
|
|
118
|
+
smartChunker,
|
|
119
|
+
uploadistaApi,
|
|
120
|
+
platformService,
|
|
121
|
+
logger,
|
|
122
|
+
smartChunking,
|
|
123
|
+
metrics,
|
|
124
|
+
onProgress: (_, bytes, total) => {
|
|
125
|
+
segmentProgress.set(segment.segmentIndex, bytes);
|
|
126
|
+
if (total)
|
|
127
|
+
segmentTotals.set(segment.segmentIndex, total);
|
|
128
|
+
updateTotalProgress();
|
|
129
|
+
},
|
|
130
|
+
onChunkComplete: (chunkSize, bytesAccepted, bytesTotal) => {
|
|
131
|
+
if (callbacks.onChunkComplete) {
|
|
132
|
+
callbacks.onChunkComplete(chunkSize, bytesAccepted, bytesTotal);
|
|
133
|
+
}
|
|
134
|
+
},
|
|
135
|
+
onSuccess: (_uploadFile) => {
|
|
136
|
+
logger.log(`Segment ${segment.segmentIndex} completed successfully`);
|
|
137
|
+
// Mark this segment as completed
|
|
138
|
+
segmentProgress.set(segment.segmentIndex, segmentTotals.get(segment.segmentIndex) ?? 0);
|
|
139
|
+
updateTotalProgress();
|
|
140
|
+
},
|
|
141
|
+
onShouldRetry: (error, retryAttempt) => {
|
|
142
|
+
logger.log(`Segment ${segment.segmentIndex} retry attempt ${retryAttempt}: ${error}`);
|
|
143
|
+
return retryAttempt < (retryDelays?.length ?? 0);
|
|
144
|
+
},
|
|
145
|
+
onRetry: (timeout) => {
|
|
146
|
+
segment.retryTimeout = timeout;
|
|
147
|
+
},
|
|
148
|
+
onError: (error) => {
|
|
149
|
+
logger.log(`Segment ${segment.segmentIndex} failed: ${error}`);
|
|
150
|
+
throw error;
|
|
151
|
+
},
|
|
152
|
+
});
|
|
153
|
+
}
|
|
154
|
+
catch (error) {
|
|
155
|
+
logger.log(`Segment ${segment.segmentIndex} upload failed: ${error}`);
|
|
156
|
+
throw new UploadistaError({
|
|
157
|
+
name: "PARALLEL_SEGMENT_UPLOAD_FAILED",
|
|
158
|
+
message: `Segment ${segment.segmentIndex} upload failed`,
|
|
159
|
+
cause: error,
|
|
160
|
+
});
|
|
161
|
+
}
|
|
162
|
+
});
|
|
163
|
+
// Wait for all segments to complete
|
|
164
|
+
await Promise.all(uploadPromises);
|
|
165
|
+
// Mark as completed
|
|
166
|
+
parallelState.completed = true;
|
|
167
|
+
logger.log("All parallel upload segments completed successfully");
|
|
168
|
+
// Call success callback with aggregated result
|
|
169
|
+
if (callbacks.onSuccess) {
|
|
170
|
+
const aggregatedResult = {
|
|
171
|
+
id: `parallel-${parallelState.segments.map((s) => s.uploadId).join(",")}`,
|
|
172
|
+
offset: source.size,
|
|
173
|
+
size: source.size,
|
|
174
|
+
storage: {
|
|
175
|
+
id: storageId,
|
|
176
|
+
type: "parallel-upload",
|
|
177
|
+
},
|
|
178
|
+
metadata: {
|
|
179
|
+
parallelUpload: "true",
|
|
180
|
+
totalSegments: segments.length.toString(),
|
|
181
|
+
fingerprint,
|
|
182
|
+
},
|
|
183
|
+
};
|
|
184
|
+
callbacks.onSuccess(aggregatedResult);
|
|
185
|
+
}
|
|
186
|
+
// Close all sources
|
|
187
|
+
for (const upload of segmentUploads) {
|
|
188
|
+
upload.source.close?.();
|
|
189
|
+
}
|
|
190
|
+
return {
|
|
191
|
+
parallelState,
|
|
192
|
+
abort: async () => {
|
|
193
|
+
await abortParallelUpload(parallelState, logger, terminate, closeWebSocket, platformService);
|
|
194
|
+
},
|
|
195
|
+
};
|
|
196
|
+
}
|
|
197
|
+
catch (error) {
|
|
198
|
+
parallelState.failed = true;
|
|
199
|
+
parallelState.error = error;
|
|
200
|
+
// Clean up any created segments
|
|
201
|
+
await abortParallelUpload(parallelState, logger, terminate, closeWebSocket, platformService);
|
|
202
|
+
callbacks.onError?.(error);
|
|
203
|
+
throw error;
|
|
204
|
+
}
|
|
205
|
+
}
|
|
206
|
+
/**
|
|
207
|
+
* Abort a parallel upload by cleaning up all segments
|
|
208
|
+
*/
|
|
209
|
+
export async function abortParallelUpload(state, logger, terminate, closeWebSocket, platformService) {
|
|
210
|
+
logger.log("Aborting parallel upload...");
|
|
211
|
+
// Abort all segment controllers
|
|
212
|
+
for (const segment of state.segments) {
|
|
213
|
+
segment.abortController.abort();
|
|
214
|
+
if (segment.retryTimeout) {
|
|
215
|
+
platformService.clearTimeout(segment.retryTimeout);
|
|
216
|
+
segment.retryTimeout = null;
|
|
217
|
+
}
|
|
218
|
+
// Attempt to terminate the upload on the server
|
|
219
|
+
try {
|
|
220
|
+
await terminate(segment.uploadId);
|
|
221
|
+
}
|
|
222
|
+
catch (error) {
|
|
223
|
+
logger.log(`Failed to terminate segment ${segment.segmentIndex}: ${error}`);
|
|
224
|
+
}
|
|
225
|
+
// Close websockets
|
|
226
|
+
closeWebSocket(segment.uploadId);
|
|
227
|
+
}
|
|
228
|
+
state.completed = false;
|
|
229
|
+
state.failed = true;
|
|
230
|
+
logger.log("Parallel upload aborted");
|
|
231
|
+
}
|
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
import type { UploadFile } from "@uploadista/core/types";
|
|
2
|
+
import type { UploadistaApi } from "../client/uploadista-api";
|
|
3
|
+
import { UploadistaError } from "../error";
|
|
4
|
+
import type { Logger } from "../logger";
|
|
5
|
+
import type { AbortControllerLike } from "../services/abort-controller-service";
|
|
6
|
+
import type { ChecksumService } from "../services/checksum-service";
|
|
7
|
+
import type { FileSource } from "../services/file-reader-service";
|
|
8
|
+
import type { IdGenerationService } from "../services/id-generation-service";
|
|
9
|
+
import type { PlatformService, Timeout } from "../services/platform-service";
|
|
10
|
+
import type { WebSocketLike } from "../services/websocket-service";
|
|
11
|
+
import type { SmartChunker, SmartChunkerConfig } from "../smart-chunker";
|
|
12
|
+
import type { ClientStorage } from "../storage/client-storage";
|
|
13
|
+
import { type OnProgress, type OnShouldRetry } from "./chunk-upload";
|
|
14
|
+
import type { UploadMetrics } from "./upload-metrics";
|
|
15
|
+
export type Callbacks = {
|
|
16
|
+
onProgress?: OnProgress;
|
|
17
|
+
onChunkComplete?: (chunkSize: number, bytesAccepted: number, bytesTotal: number | null) => void;
|
|
18
|
+
onSuccess?: (payload: UploadFile) => void;
|
|
19
|
+
onError?: (error: Error | UploadistaError) => void;
|
|
20
|
+
onStart?: (file: {
|
|
21
|
+
uploadId: string;
|
|
22
|
+
size: number | null;
|
|
23
|
+
}) => void;
|
|
24
|
+
onJobStart?: (jobId: string) => void;
|
|
25
|
+
onShouldRetry?: OnShouldRetry;
|
|
26
|
+
};
|
|
27
|
+
export type SingleUploadResult = {
|
|
28
|
+
uploadIdStorageKey: string | undefined;
|
|
29
|
+
uploadId: string;
|
|
30
|
+
offset: number;
|
|
31
|
+
};
|
|
32
|
+
/**
|
|
33
|
+
* Start uploading the file using PATCH requests. The file will be divided
|
|
34
|
+
* into chunks as specified in the chunkSize option. During the upload
|
|
35
|
+
* the onProgress event handler may be invoked multiple times.
|
|
36
|
+
*/
|
|
37
|
+
export declare function performUpload({ uploadId, offset, source, uploadLengthDeferred, retryAttempt, abortController, retryDelays, smartChunker, uploadistaApi, logger, smartChunking, metrics, platformService, onRetry, ...callbacks }: {
|
|
38
|
+
uploadId: string;
|
|
39
|
+
offset: number;
|
|
40
|
+
retryAttempt?: number;
|
|
41
|
+
source: FileSource;
|
|
42
|
+
abortController: AbortControllerLike;
|
|
43
|
+
uploadLengthDeferred: boolean | undefined;
|
|
44
|
+
retryDelays: number[] | undefined;
|
|
45
|
+
smartChunker: SmartChunker;
|
|
46
|
+
uploadistaApi: UploadistaApi;
|
|
47
|
+
logger: Logger;
|
|
48
|
+
smartChunking?: SmartChunkerConfig;
|
|
49
|
+
metrics: UploadMetrics;
|
|
50
|
+
platformService: PlatformService;
|
|
51
|
+
onRetry?: (timeout: Timeout) => void;
|
|
52
|
+
} & Callbacks): Promise<void>;
|
|
53
|
+
/**
|
|
54
|
+
* Create a new upload using the creation extension by sending a POST
|
|
55
|
+
* request to the endpoint. After successful creation the file will be
|
|
56
|
+
* uploaded
|
|
57
|
+
*/
|
|
58
|
+
export declare function createUpload({ fingerprint, storageId, source, uploadLengthDeferred, metadata, uploadistaApi, logger, checksumService, clientStorage, generateId, storeFingerprintForResuming, openWebSocket, closeWebSocket, computeChecksum, checksumAlgorithm, platformService, ...callbacks }: {
|
|
59
|
+
fingerprint: string;
|
|
60
|
+
storageId: string;
|
|
61
|
+
source: FileSource;
|
|
62
|
+
uploadLengthDeferred: boolean | undefined;
|
|
63
|
+
metadata: Record<string, string>;
|
|
64
|
+
uploadistaApi: UploadistaApi;
|
|
65
|
+
logger: Logger;
|
|
66
|
+
clientStorage: ClientStorage;
|
|
67
|
+
generateId: IdGenerationService;
|
|
68
|
+
storeFingerprintForResuming: boolean;
|
|
69
|
+
openWebSocket: (uploadId: string) => WebSocketLike;
|
|
70
|
+
closeWebSocket: (uploadId: string) => void;
|
|
71
|
+
checksumService: ChecksumService;
|
|
72
|
+
computeChecksum?: boolean;
|
|
73
|
+
checksumAlgorithm?: string;
|
|
74
|
+
platformService: PlatformService;
|
|
75
|
+
} & Callbacks): Promise<SingleUploadResult | undefined>;
|
|
76
|
+
/**
|
|
77
|
+
* Try to resume an existing upload. First a HEAD request will be sent
|
|
78
|
+
* to retrieve the offset. If the request fails a new upload will be
|
|
79
|
+
* created. In the case of a successful response the file will be uploaded.
|
|
80
|
+
*/
|
|
81
|
+
export declare function resumeUpload({ uploadId, storageId, uploadIdStorageKey, fingerprint, source, uploadLengthDeferred, uploadistaApi, logger, platformService, checksumService, clientStorage, generateId, storeFingerprintForResuming, openWebSocket, ...callbacks }: {
|
|
82
|
+
uploadId: string;
|
|
83
|
+
storageId: string;
|
|
84
|
+
uploadIdStorageKey: string;
|
|
85
|
+
fingerprint: string;
|
|
86
|
+
platformService: PlatformService;
|
|
87
|
+
source: FileSource;
|
|
88
|
+
uploadLengthDeferred: boolean | undefined;
|
|
89
|
+
uploadistaApi: UploadistaApi;
|
|
90
|
+
checksumService: ChecksumService;
|
|
91
|
+
logger: Logger;
|
|
92
|
+
clientStorage: ClientStorage;
|
|
93
|
+
generateId: IdGenerationService;
|
|
94
|
+
storeFingerprintForResuming: boolean;
|
|
95
|
+
openWebSocket: (uploadId: string) => WebSocketLike;
|
|
96
|
+
} & Callbacks): Promise<SingleUploadResult | undefined>;
|
|
97
|
+
/**
|
|
98
|
+
* Initiate the uploading procedure for a non-parallel upload. Here the entire file is
|
|
99
|
+
* uploaded in a sequential matter.
|
|
100
|
+
*/
|
|
101
|
+
export declare function startSingleUpload({ source, uploadId, uploadIdStorageKey, storageId, fingerprint, platformService, uploadLengthDeferred, uploadistaApi, checksumService, logger, clientStorage, generateId, storeFingerprintForResuming, openWebSocket, closeWebSocket, ...callbacks }: {
|
|
102
|
+
source: FileSource;
|
|
103
|
+
uploadId: string | null;
|
|
104
|
+
uploadIdStorageKey: string | null;
|
|
105
|
+
storageId: string;
|
|
106
|
+
fingerprint: string;
|
|
107
|
+
platformService: PlatformService;
|
|
108
|
+
uploadLengthDeferred: boolean | undefined;
|
|
109
|
+
uploadistaApi: UploadistaApi;
|
|
110
|
+
checksumService: ChecksumService;
|
|
111
|
+
logger: Logger;
|
|
112
|
+
clientStorage: ClientStorage;
|
|
113
|
+
generateId: IdGenerationService;
|
|
114
|
+
storeFingerprintForResuming: boolean;
|
|
115
|
+
openWebSocket: (uploadId: string) => WebSocketLike;
|
|
116
|
+
closeWebSocket: (uploadId: string) => void;
|
|
117
|
+
} & Callbacks): Promise<SingleUploadResult | undefined>;
|
|
118
|
+
//# sourceMappingURL=single-upload.d.ts.map
|