@uploadista/flow-documents-nodes 0.0.16-beta.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.turbo/turbo-build.log +16 -0
- package/LICENSE +21 -0
- package/README.md +57 -0
- package/dist/index.d.mts +1177 -0
- package/dist/index.d.mts.map +1 -0
- package/dist/index.mjs +396 -0
- package/dist/index.mjs.map +1 -0
- package/package.json +32 -0
- package/src/convert-to-markdown-node.ts +156 -0
- package/src/describe-document-node.ts +92 -0
- package/src/extract-text-node.ts +90 -0
- package/src/index.ts +27 -0
- package/src/merge-pdf-node.ts +144 -0
- package/src/ocr-node.ts +111 -0
- package/src/split-pdf-node.ts +176 -0
- package/tsconfig.json +14 -0
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.d.mts","names":[],"sources":["../src/convert-to-markdown-node.ts","../src/describe-document-node.ts","../src/extract-text-node.ts","../src/merge-pdf-node.ts","../src/ocr-node.ts","../src/split-pdf-node.ts"],"sourcesContent":[],"mappings":";;;;;;;;;;KAaY,2BAAA;;;;iBAKI,2BAAA,sBAEN,8BAAgC,MAAA,CAAA,OAAA,sBAAA,CAAA,YAAA;;IAP9B,EAAA,EAAA,MAAA;IAKI,MAAA,EAAA,MAAA;IAEN,OAAA,EAAA;MAAgC,EAAA,EAAA,MAAA;;;;;;;;;;;;;;;;;;;;MAAA,MAAA,EAAA,MAAA;MAAA,KAAA,EAAA,MAAA;;;;ICR9B,MAAA,EAAA,MAAA;IAEI,OAAA,EAAA;MAEL,EAAA,EAAA,MAAA;MAA+B,IAAA,EAAA,MAAA;;;;;;;;;;;;;;;;;;;MAAA,MAAA,EAAA,MAAA;MAAA,KAAA,EAAA,MAAA;;;;ICJ9B,EAAA,EAAA,MAAA;IAEI,MAAA,EAAA,MAAA;IAEL,OAAA,EAAA;MAA0B,EAAA,EAAA,MAAA;;;;;;;;;;;;;;;;;;;MAAA,MAAA,EAAA,MAAA;MAAA,MAAA,EAAA,MAAA;;;;ICHzB,EAAA,EAAA,MAAA;IAOI,MAAA,EAAA,MAAA;IAEL,OAAA,EAAA;MAAuB,EAAA,EAAA,MAAA;;;;;;;;;;;;;;;;;;;MAAA,MAAA,EAAA,MAAA;MAAA,MAAA,EAAA,MAAA;;;;ECTtB,GAAA,EAAA,CAAA,IAAA,EAAA;IAOI,IAAA,EAAA;MAEN,EAAA,EAAA,MAAA;MAAa,MAAA,EAAA,MAAA;;;;;;;;;;;;;;;;;;MAAA,QAAA,CAAA,EAAA,MAAA,GAAA,SAAA;MAAA,iBAAA,CAAA,EAAA,MAAA,GAAA,SAAA;;;;QCVX,KAAA,EAAA,MAAkB;MAMd,CAAA,GAAA,SAAkB;IAExB,CAAA;IAAkB,KAAA,EAAA,MAAA;;;;;;;;;;;;;;;;;;;IAAA,CAAA;IAAA,IAAA,CAAA,EAAA,MAAA,GAAA,SAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;KJRhB,0BAAA,GAA6B;iBAEzB,0BAAA,uBAEL,6BAA+B,MAAA,CAAA,OAAA,sBAAA,CAAA,YAAA;;;;;MDH9B,EAAA,EAAA,MAAA;MAKI,IAAA,EAAA,MAAA;MAEN,IAAA,CAAA,EAAA,MAAA,GAAA,SAAA;MAAgC,QAAA,CAAA,EAAA,MAAA,GAAA,SAAA;;;;;;;;;;;;;;;;;;;;EAAA,CAAA,EAAA,OAAA,gCAAA,CAAA;IAAA,EAAA,EAAA,MAAA;;;;MCR9B,IAAA,EAAA,MAAA;MAEI,IAAA,CAAA,EAAA,MAAA,GAAA,SAA0B;MAE/B,QAAA,CAAA,EAAA,MAAA,GAAA,SAAA;MAA+B,MAAA,CAAA,EAAA,MAAA,GAAA,SAAA;;;;;;;;;;;;;;;;;;;EAAA,CAAA,EAAA,OAAA,CAAA,CAAA;EAAA,YAAA,cAAA,CAAA;;;;MCJ9B,EAAA,EAAA,MAAA;MAEI,IAAA,EAAA,MAAA;MAEL,IAAA,CAAA,EAAA,MAAA,GAAA,SAAA;MAA0B,QAAA,CAAA,EAAA,MAAA,GAAA,SAAA;;;;;;;;;;;;;;;;;;;IAAA,CAAA,GAAA,SAAA;EAAA,CAAA,EAAA,OAAA,gCAAA,CAAA;;;;MCHzB,EAAA,EAAA,MAAA;MAOI,IAAA,EAAA,MAAA;MAEL,IAAA,CAAA,EAAA,MAAA,GAAA,SAAA;MAAuB,QAAA,CAAA,EAAA,MAAA,GAAA,SAAA;;;;;;;;;;;;;;;;;;;IAAA,CAAA,GAAA,SAAA;EAAA,CAAA,EAAA,OAAA,CAAA,CAAA;;;;MCTtB,MAAA,EAAa,MAAA;MAOT,OAAA,EAAa;QAEnB,EAAA,EAAA,MAAA;QAAa,IAAA,EAAA,MAAA;;;;;;;;;;;;;;;;;;QAAA,MAAA,EAAA,MAAA;QAAA,MAAA,EAAA,MAAA;;;;ICVX,KAAA,EAAA,MAAA;IAMI,SAAA,EAAA,MAAkB;IAExB,MAAA,EAAA,MAAA;IAAkB,MAAA,CAAA,QAAA,CAAA,MAAA,EAAA,OAAA,CAAA;;;;;;;;;;;;;;;;;;;IAAA,YAAA,CAAA,EAAA,MAAA,GAAA,SAAA;IAAA,GAAA,CAAA,EAAA,MAAA,GAAA,SAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;KHRhB,qBAAA,GAAwB;iBAEpB,qBAAA,uBAEL,wBAA0B,MAAA,CAAA,OAAA,sBAAA,CAAA,YAAA;;;;;MFHzB,EAAA,EAAA,MAAA;MAKI,IAAA,EAAA,MAAA;MAEN,IAAA,CAAA,EAAA,MAAA,GAAA,SAAA;MAAgC,QAAA,CAAA,EAAA,MAAA,GAAA,SAAA;;;;;;;;;;;;;;;;;;;;EAAA,CAAA,EAAA,OAAA,gCAAA,CAAA;IAAA,EAAA,EAAA,MAAA;;;;MCR9B,IAAA,EAAA,MAAA;MAEI,IAAA,CAAA,EAAA,MAAA,GAAA,SAA0B;MAE/B,QAAA,CAAA,EAAA,MAAA,GAAA,SAAA;MAA+B,MAAA,CAAA,EAAA,MAAA,GAAA,SAAA;;;;;;;;;;;;;;;;;;;EAAA,CAAA,EAAA,OAAA,CAAA,CAAA;EAAA,YAAA,cAAA,CAAA;;;;MCJ9B,EAAA,EAAA,MAAA;MAEI,IAAA,EAAA,MAAA;MAEL,IAAA,CAAA,EAAA,MAAA,GAAA,SAAA;MAA0B,QAAA,CAAA,EAAA,MAAA,GAAA,SAAA;;;;;;;;;;;;;;;;;;;IAAA,CAAA,GAAA,SAAA;EAAA,CAAA,EAAA,OAAA,gCAAA,CAAA;;;;MCHzB,EAAA,EAAA,MAAA;MAOI,IAAA,EAAA,MAAA;MAEL,IAAA,CAAA,EAAA,MAAA,GAAA,SAAA;MAAuB,QAAA,CAAA,EAAA,MAAA,GAAA,SAAA;;;;;;;;;;;;;;;;;;;IAAA,CAAA,GAAA,SAAA;EAAA,CAAA,EAAA,OAAA,CAAA,CAAA;;;;MCTtB,MAAA,EAAa,MAAA;MAOT,OAAA,EAAa;QAEnB,EAAA,EAAA,MAAA;QAAa,IAAA,EAAA,MAAA;;;;;;;;;;;;;;;;;;QAAA,MAAA,EAAA,MAAA;QAAA,MAAA,EAAA,MAAA;;;;ICVX,KAAA,EAAA,MAAA;IAMI,SAAA,EAAA,MAAkB;IAExB,MAAA,EAAA,MAAA;IAAkB,MAAA,CAAA,QAAA,CAAA,MAAA,EAAA,OAAA,CAAA;;;;;;;;;;;;;;;;;;;IAAA,YAAA,CAAA,EAAA,MAAA,GAAA,SAAA;IAAA,GAAA,CAAA,EAAA,MAAA,GAAA,SAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;KFPhB,kBAAA;;;iBAOI,kBAAA,uBAEL,qBAAuB,MAAA,CAAA,OAAA,sBAAA,CAAA,YAAA;;IHTtB,EAAA,EAAA,MAAA;IAKI,MAAA,EAAA,MAAA;IAEN,OAAA,EAAA;MAAgC,EAAA,EAAA,MAAA;;;;;;;;;;;;;;;;;;;;MAAA,MAAA,EAAA,MAAA;MAAA,KAAA,EAAA,MAAA;;;;ICR9B,MAAA,EAAA,MAAA;IAEI,OAAA,EAAA;MAEL,EAAA,EAAA,MAAA;MAA+B,IAAA,EAAA,MAAA;;;;;;;;;;;;;;;;;;;MAAA,MAAA,EAAA,MAAA;MAAA,KAAA,EAAA,MAAA;;;;ICJ9B,EAAA,EAAA,MAAA;IAEI,MAAA,EAAA,MAAA;IAEL,OAAA,EAAA;MAA0B,EAAA,EAAA,MAAA;;;;;;;;;;;;;;;;;;;MAAA,MAAA,EAAA,MAAA;MAAA,MAAA,EAAA,MAAA;;;;ICHzB,EAAA,EAAA,MAAA;IAOI,MAAA,EAAA,MAAA;IAEL,OAAA,EAAA;MAAuB,EAAA,EAAA,MAAA;;;;;;;;;;;;;;;;;;;MAAA,MAAA,EAAA,MAAA;MAAA,MAAA,EAAA,MAAA;;;;ECTtB,GAAA,EAAA,CAAA,IAAA,EAAA;IAOI,IAAA,EAAA;MAEN,EAAA,EAAA,MAAA;MAAa,MAAA,EAAA,MAAA;;;;;;;;;;;;;;;;;;MAAA,QAAA,CAAA,EAAA,MAAA,GAAA,SAAA;MAAA,iBAAA,CAAA,EAAA,MAAA,GAAA,SAAA;;;;QCVX,KAAA,EAAA,MAAkB;MAMd,CAAA,GAAA,SAAkB;IAExB,CAAA,EAAA;IAAkB,KAAA,EAAA,MAAA;;;;;;;;;;;;;;;;;;;IAAA,CAAA;IAAA,IAAA,CAAA,EAAA,MAAA,GAAA,SAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;KDPhB,aAAA;YACA;eACG;;;;iBAKC,aAAA,qBAEN,gBAAa,MAAA,CAAA,OAAA,sBAAA,CAAA,YAAA;EJTX,WAAA,cAA2B,CAAA;IAKvB,EAAA,EAAA,MAAA;IAEN,MAAA,EAAA,MAAA;IAAgC,OAAA,EAAA;;;;;;;;;;;;;;;;;;;;MAAA,MAAA,EAAA,MAAA;MAAA,MAAA,EAAA,MAAA;;;;ICR9B,EAAA,EAAA,MAAA;IAEI,MAAA,EAAA,MAAA;IAEL,OAAA,EAAA;MAA+B,EAAA,EAAA,MAAA;;;;;;;;;;;;;;;;;;;MAAA,MAAA,EAAA,MAAA;MAAA,MAAA,EAAA,MAAA;;;;ECJ9B,YAAA,cAAqB,CAAG;IAEpB,EAAA,EAAA,MAAA;IAEL,MAAA,EAAA,MAAA;IAA0B,OAAA,EAAA;;;;;;;;;;;;;;;;;;;IAAA,IAAA,CAAA,EAAA;MAAA,MAAA,EAAA,MAAA;;;;ECHzB,CAAA,EAAA,OAAA,gCAAkB,CAAA;IAOd,EAAA,EAAA,MAAA;IAEL,MAAA,EAAA,MAAA;IAAuB,OAAA,EAAA;;;;;;;;;;;;;;;;;;;IAAA,IAAA,CAAA,EAAA;MAAA,MAAA,EAAA,MAAA;;;;ECTtB,CAAA,EAAA,OAAA,CAAA,CAAA;EAOI,GAAA,EAAA,CAAA,IAAA,EAAA;IAEN,IAAA,EAAA;MAAa,EAAA,EAAA,MAAA;;;;;;;;;;;;;;;;;;MAAA,cAAA,CAAA,EAAA,OAAA,GAAA,SAAA;MAAA,QAAA,CAAA,EAAA,MAAA,GAAA,SAAA;;;;QCVX,MAAA,EAAA,MAAkB;QAMd,KAAA,EAAA,MAAkB;MAExB,CAAA,GAAA,SAAA;IAAkB,CAAA;;;;;;;;;;;;;;;;;;;MAAA,CAAA,EAAA,GAAA,SAAA;IAAA,CAAA;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;KARhB,kBAAA;;;;;iBAMI,kBAAA,qBAEN,qBAAkB,MAAA,CAAA,OAAA,sBAAA,CAAA,YAAA;ELPhB,WAAA,cAA2B,CAAA;IAKvB,EAAA,EAAA,MAAA;IAEN,MAAA,EAAA,MAAA;IAAgC,OAAA,EAAA;;;;;;;;;;;;;;;;;;;;MAAA,MAAA,EAAA,MAAA;MAAA,MAAA,EAAA,MAAA;;;;ICR9B,EAAA,EAAA,MAAA;IAEI,MAAA,EAAA,MAAA;IAEL,OAAA,EAAA;MAA+B,EAAA,EAAA,MAAA;;;;;;;;;;;;;;;;;;;MAAA,MAAA,EAAA,MAAA;MAAA,MAAA,EAAA,MAAA;;;;ECJ9B,YAAA,cAAqB,CAAG;IAEpB,EAAA,EAAA,MAAA;IAEL,MAAA,EAAA,MAAA;IAA0B,OAAA,EAAA;;;;;;;;;;;;;;;;;;;IAAA,IAAA,CAAA,EAAA;MAAA,MAAA,EAAA,MAAA;;;;ECHzB,CAAA,EAAA,OAAA,gCAAkB,CAAA;IAOd,EAAA,EAAA,MAAA;IAEL,MAAA,EAAA,MAAA;IAAuB,OAAA,EAAA;;;;;;;;;;;;;;;;;;;IAAA,IAAA,CAAA,EAAA;MAAA,MAAA,EAAA,MAAA;;;;ECTtB,CAAA,EAAA,OAAA,CAAA,CAAA;EAOI,GAAA,EAAA,CAAA,IAAA,EAAA;IAEN,IAAA,EAAA;MAAa,EAAA,EAAA,MAAA;;;;;;;;;;;;;;;;;;MAAA,cAAA,CAAA,EAAA,OAAA,GAAA,SAAA;MAAA,QAAA,CAAA,EAAA,MAAA,GAAA,SAAA;;;;QCVX,MAAA,EAAA,MAAkB;QAMd,KAAA,EAAA,MAAkB;MAExB,CAAA,GAAA,SAAA;IAAkB,CAAA;;;;;;;;;;;;;;;;;;;MAAA,CAAA,EAAA,GAAA,SAAA;IAAA,CAAA"}
|
package/dist/index.mjs
ADDED
|
@@ -0,0 +1,396 @@
|
|
|
1
|
+
import { UploadistaError } from "@uploadista/core/errors";
|
|
2
|
+
import { DocumentAiPlugin, DocumentPlugin, NodeType, completeNodeExecution, createFlowNode, resolveUploadMetadata } from "@uploadista/core/flow";
|
|
3
|
+
import { uploadFileSchema } from "@uploadista/core/types";
|
|
4
|
+
import { UploadServer } from "@uploadista/core/upload";
|
|
5
|
+
import { Effect, Either } from "effect";
|
|
6
|
+
import { z } from "zod";
|
|
7
|
+
|
|
8
|
+
//#region src/convert-to-markdown-node.ts
|
|
9
|
+
function createConvertToMarkdownNode(id, params = {}) {
|
|
10
|
+
return Effect.gen(function* () {
|
|
11
|
+
const documentService = yield* DocumentPlugin;
|
|
12
|
+
const documentAiService = yield* DocumentAiPlugin;
|
|
13
|
+
const uploadServer = yield* UploadServer;
|
|
14
|
+
return yield* createFlowNode({
|
|
15
|
+
id,
|
|
16
|
+
name: "Convert to Markdown",
|
|
17
|
+
description: "Convert documents to Markdown format (intelligently uses OCR if needed)",
|
|
18
|
+
type: NodeType.process,
|
|
19
|
+
inputSchema: uploadFileSchema,
|
|
20
|
+
outputSchema: uploadFileSchema,
|
|
21
|
+
run: ({ data: file, flowId, jobId, clientId }) => {
|
|
22
|
+
return Effect.gen(function* () {
|
|
23
|
+
const flow = {
|
|
24
|
+
flowId,
|
|
25
|
+
nodeId: id,
|
|
26
|
+
jobId
|
|
27
|
+
};
|
|
28
|
+
yield* Effect.logInfo(`Converting file ${file.id} to Markdown`);
|
|
29
|
+
const fileBytes = yield* uploadServer.read(file.id, clientId);
|
|
30
|
+
const extractResult = yield* documentService.extractText(fileBytes).pipe(Effect.either);
|
|
31
|
+
let markdown;
|
|
32
|
+
let markdownSource;
|
|
33
|
+
if (Either.isRight(extractResult) && extractResult.right.trim().length > 0) {
|
|
34
|
+
const text = extractResult.right;
|
|
35
|
+
yield* Effect.logInfo(`Successfully extracted ${text.length} characters from searchable PDF`);
|
|
36
|
+
markdown = text.split("\n\n").map((para) => para.trim()).filter((para) => para.length > 0).join("\n\n");
|
|
37
|
+
markdownSource = "text";
|
|
38
|
+
yield* Effect.logInfo(`Converted text to Markdown (${markdown.length} characters)`);
|
|
39
|
+
} else {
|
|
40
|
+
yield* Effect.logInfo("Text extraction failed or returned empty, falling back to OCR");
|
|
41
|
+
const fileUrl = file.url;
|
|
42
|
+
if (!fileUrl) return yield* UploadistaError.fromCode("FLOW_NODE_ERROR", { cause: "URL is required for OCR-based markdown conversion" }).toEffect();
|
|
43
|
+
const context = {
|
|
44
|
+
clientId,
|
|
45
|
+
credentialId: params.credentialId
|
|
46
|
+
};
|
|
47
|
+
markdown = (yield* documentAiService.performOCR(fileUrl, {
|
|
48
|
+
taskType: "convertToMarkdown",
|
|
49
|
+
resolution: params.resolution || "gundam"
|
|
50
|
+
}, context).pipe(Effect.catchAll((error) => Effect.gen(function* () {
|
|
51
|
+
yield* Effect.logError("Failed to perform OCR", error);
|
|
52
|
+
return yield* UploadistaError.fromCode("FLOW_NODE_ERROR", { cause: error instanceof Error ? error.message : "Failed to perform OCR for markdown conversion" }).toEffect();
|
|
53
|
+
})))).extractedText;
|
|
54
|
+
markdownSource = "ocr";
|
|
55
|
+
yield* Effect.logInfo(`Successfully converted scanned document to Markdown using OCR (${markdown.length} characters)`);
|
|
56
|
+
}
|
|
57
|
+
const { metadata } = resolveUploadMetadata(file.metadata);
|
|
58
|
+
const newMetadata = {
|
|
59
|
+
...file.metadata,
|
|
60
|
+
...metadata,
|
|
61
|
+
markdown,
|
|
62
|
+
markdownSource
|
|
63
|
+
};
|
|
64
|
+
yield* Effect.logInfo(`Successfully converted file ${file.id} to Markdown via ${markdownSource}`);
|
|
65
|
+
return completeNodeExecution({
|
|
66
|
+
...file,
|
|
67
|
+
metadata: newMetadata,
|
|
68
|
+
flow
|
|
69
|
+
});
|
|
70
|
+
});
|
|
71
|
+
}
|
|
72
|
+
});
|
|
73
|
+
});
|
|
74
|
+
}
|
|
75
|
+
|
|
76
|
+
//#endregion
|
|
77
|
+
//#region src/describe-document-node.ts
|
|
78
|
+
function createDescribeDocumentNode(id, _params = {}) {
|
|
79
|
+
return Effect.gen(function* () {
|
|
80
|
+
const documentService = yield* DocumentPlugin;
|
|
81
|
+
const uploadServer = yield* UploadServer;
|
|
82
|
+
return yield* createFlowNode({
|
|
83
|
+
id,
|
|
84
|
+
name: "Describe Document",
|
|
85
|
+
description: "Extract metadata from PDF documents",
|
|
86
|
+
type: NodeType.process,
|
|
87
|
+
inputSchema: uploadFileSchema,
|
|
88
|
+
outputSchema: uploadFileSchema,
|
|
89
|
+
run: ({ data: file, flowId, jobId, clientId }) => {
|
|
90
|
+
return Effect.gen(function* () {
|
|
91
|
+
const flow = {
|
|
92
|
+
flowId,
|
|
93
|
+
nodeId: id,
|
|
94
|
+
jobId
|
|
95
|
+
};
|
|
96
|
+
yield* Effect.logInfo(`Extracting metadata from PDF file ${file.id}`);
|
|
97
|
+
const fileBytes = yield* uploadServer.read(file.id, clientId);
|
|
98
|
+
const documentMetadata = yield* documentService.getMetadata(fileBytes).pipe(Effect.catchAll((error) => Effect.gen(function* () {
|
|
99
|
+
yield* Effect.logError("Failed to extract metadata", error);
|
|
100
|
+
return yield* UploadistaError.fromCode("FLOW_NODE_ERROR", { cause: error instanceof Error ? error.message : "Failed to extract metadata" }).toEffect();
|
|
101
|
+
})));
|
|
102
|
+
const { metadata } = resolveUploadMetadata(file.metadata);
|
|
103
|
+
const newMetadata = {
|
|
104
|
+
...file.metadata,
|
|
105
|
+
...metadata,
|
|
106
|
+
pageCount: documentMetadata.pageCount,
|
|
107
|
+
format: documentMetadata.format,
|
|
108
|
+
...documentMetadata.author && { author: documentMetadata.author },
|
|
109
|
+
...documentMetadata.title && { title: documentMetadata.title },
|
|
110
|
+
...documentMetadata.subject && { subject: documentMetadata.subject },
|
|
111
|
+
...documentMetadata.creator && { creator: documentMetadata.creator },
|
|
112
|
+
...documentMetadata.creationDate && { creationDate: documentMetadata.creationDate },
|
|
113
|
+
...documentMetadata.modifiedDate && { modifiedDate: documentMetadata.modifiedDate },
|
|
114
|
+
fileSize: documentMetadata.fileSize
|
|
115
|
+
};
|
|
116
|
+
yield* Effect.logInfo(`Successfully extracted metadata from file ${file.id}: ${documentMetadata.pageCount} pages`);
|
|
117
|
+
return completeNodeExecution({
|
|
118
|
+
...file,
|
|
119
|
+
metadata: newMetadata,
|
|
120
|
+
flow
|
|
121
|
+
});
|
|
122
|
+
});
|
|
123
|
+
}
|
|
124
|
+
});
|
|
125
|
+
});
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
//#endregion
|
|
129
|
+
//#region src/extract-text-node.ts
|
|
130
|
+
function createExtractTextNode(id, _params = {}) {
|
|
131
|
+
return Effect.gen(function* () {
|
|
132
|
+
const documentService = yield* DocumentPlugin;
|
|
133
|
+
const uploadServer = yield* UploadServer;
|
|
134
|
+
return yield* createFlowNode({
|
|
135
|
+
id,
|
|
136
|
+
name: "Extract Text",
|
|
137
|
+
description: "Extract text from searchable PDF documents",
|
|
138
|
+
type: NodeType.process,
|
|
139
|
+
inputSchema: uploadFileSchema,
|
|
140
|
+
outputSchema: uploadFileSchema,
|
|
141
|
+
run: ({ data: file, flowId, jobId, clientId }) => {
|
|
142
|
+
return Effect.gen(function* () {
|
|
143
|
+
const flow = {
|
|
144
|
+
flowId,
|
|
145
|
+
nodeId: id,
|
|
146
|
+
jobId
|
|
147
|
+
};
|
|
148
|
+
yield* Effect.logInfo(`Extracting text from PDF file ${file.id}`);
|
|
149
|
+
const fileBytes = yield* uploadServer.read(file.id, clientId);
|
|
150
|
+
const extractedText = yield* documentService.extractText(fileBytes).pipe(Effect.catchAll((error) => Effect.gen(function* () {
|
|
151
|
+
yield* Effect.logError("Failed to extract text", error);
|
|
152
|
+
return yield* UploadistaError.fromCode("FLOW_NODE_ERROR", { cause: error instanceof Error ? error.message : "Failed to extract text" }).toEffect();
|
|
153
|
+
})));
|
|
154
|
+
const { metadata } = resolveUploadMetadata(file.metadata);
|
|
155
|
+
const newMetadata = {
|
|
156
|
+
...file.metadata,
|
|
157
|
+
...metadata,
|
|
158
|
+
extractedText
|
|
159
|
+
};
|
|
160
|
+
if (!extractedText || extractedText.trim().length === 0) yield* Effect.logWarning(`No text extracted from file ${file.id}. This might be a scanned document. Consider using the OCR node instead.`);
|
|
161
|
+
else yield* Effect.logInfo(`Successfully extracted ${extractedText.length} characters from file ${file.id}`);
|
|
162
|
+
return completeNodeExecution({
|
|
163
|
+
...file,
|
|
164
|
+
metadata: newMetadata,
|
|
165
|
+
flow
|
|
166
|
+
});
|
|
167
|
+
});
|
|
168
|
+
}
|
|
169
|
+
});
|
|
170
|
+
});
|
|
171
|
+
}
|
|
172
|
+
|
|
173
|
+
//#endregion
|
|
174
|
+
//#region src/merge-pdf-node.ts
|
|
175
|
+
const multipleFilesSchema = z.array(uploadFileSchema);
|
|
176
|
+
function createMergePdfNode(id, _params = {}) {
|
|
177
|
+
return Effect.gen(function* () {
|
|
178
|
+
const documentService = yield* DocumentPlugin;
|
|
179
|
+
const uploadServer = yield* UploadServer;
|
|
180
|
+
return yield* createFlowNode({
|
|
181
|
+
id,
|
|
182
|
+
name: "Merge PDFs",
|
|
183
|
+
description: "Merge multiple PDF documents into one",
|
|
184
|
+
type: NodeType.process,
|
|
185
|
+
inputSchema: multipleFilesSchema,
|
|
186
|
+
outputSchema: uploadFileSchema,
|
|
187
|
+
run: ({ data: files, flowId, jobId, clientId }) => {
|
|
188
|
+
return Effect.gen(function* () {
|
|
189
|
+
const flow = {
|
|
190
|
+
flowId,
|
|
191
|
+
nodeId: id,
|
|
192
|
+
jobId
|
|
193
|
+
};
|
|
194
|
+
if (!Array.isArray(files)) return yield* UploadistaError.fromCode("FLOW_NODE_ERROR", { cause: "Merge PDF node requires an array of files from a Merge utility node" }).toEffect();
|
|
195
|
+
if (files.length === 0) return yield* UploadistaError.fromCode("FLOW_NODE_ERROR", { cause: "At least one PDF file is required for merging" }).toEffect();
|
|
196
|
+
const pdfBuffers = [];
|
|
197
|
+
let totalPages = 0;
|
|
198
|
+
for (const file of files) {
|
|
199
|
+
const fileBytes = yield* uploadServer.read(file.id, clientId);
|
|
200
|
+
pdfBuffers.push(fileBytes);
|
|
201
|
+
const fileMetadata = resolveUploadMetadata(file.metadata).metadata;
|
|
202
|
+
if (fileMetadata?.pageCount && typeof fileMetadata.pageCount === "number") totalPages += fileMetadata.pageCount;
|
|
203
|
+
}
|
|
204
|
+
yield* Effect.logInfo(`Merging ${files.length} PDF files`);
|
|
205
|
+
const mergedPdf = yield* documentService.mergePdfs({ pdfs: pdfBuffers }).pipe(Effect.catchAll((error) => Effect.gen(function* () {
|
|
206
|
+
yield* Effect.logError("Failed to merge PDFs", error);
|
|
207
|
+
return yield* UploadistaError.fromCode("FLOW_NODE_ERROR", { cause: error instanceof Error ? error.message : "Failed to merge PDFs" }).toEffect();
|
|
208
|
+
})));
|
|
209
|
+
const firstFile = files[0];
|
|
210
|
+
const { metadata } = resolveUploadMetadata(firstFile.metadata);
|
|
211
|
+
const stream = new ReadableStream({ start(controller) {
|
|
212
|
+
controller.enqueue(mergedPdf);
|
|
213
|
+
controller.close();
|
|
214
|
+
} });
|
|
215
|
+
const result = yield* uploadServer.upload({
|
|
216
|
+
storageId: firstFile.storage.id,
|
|
217
|
+
size: mergedPdf.byteLength,
|
|
218
|
+
type: "application/pdf",
|
|
219
|
+
fileName: `merged-${files.length}-documents.pdf`,
|
|
220
|
+
lastModified: 0,
|
|
221
|
+
metadata: JSON.stringify({
|
|
222
|
+
...metadata,
|
|
223
|
+
pageCount: totalPages,
|
|
224
|
+
mergedFrom: files.length
|
|
225
|
+
}),
|
|
226
|
+
flow
|
|
227
|
+
}, clientId, stream);
|
|
228
|
+
const newMetadata = {
|
|
229
|
+
...metadata,
|
|
230
|
+
pageCount: totalPages,
|
|
231
|
+
mergedFrom: files.length,
|
|
232
|
+
fileName: `merged-${files.length}-documents.pdf`
|
|
233
|
+
};
|
|
234
|
+
yield* Effect.logInfo(`Successfully merged ${files.length} PDFs into one document with ${totalPages} pages`);
|
|
235
|
+
return completeNodeExecution({
|
|
236
|
+
...result,
|
|
237
|
+
metadata: newMetadata
|
|
238
|
+
});
|
|
239
|
+
});
|
|
240
|
+
}
|
|
241
|
+
});
|
|
242
|
+
});
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
//#endregion
|
|
246
|
+
//#region src/ocr-node.ts
|
|
247
|
+
function createOcrNode(id, params) {
|
|
248
|
+
return Effect.gen(function* () {
|
|
249
|
+
const documentAiService = yield* DocumentAiPlugin;
|
|
250
|
+
return yield* createFlowNode({
|
|
251
|
+
id,
|
|
252
|
+
name: "OCR",
|
|
253
|
+
description: "Extract text from scanned documents using AI",
|
|
254
|
+
type: NodeType.process,
|
|
255
|
+
inputSchema: uploadFileSchema,
|
|
256
|
+
outputSchema: uploadFileSchema,
|
|
257
|
+
run: ({ data: file, flowId, jobId, clientId }) => {
|
|
258
|
+
return Effect.gen(function* () {
|
|
259
|
+
const flow = {
|
|
260
|
+
flowId,
|
|
261
|
+
nodeId: id,
|
|
262
|
+
jobId
|
|
263
|
+
};
|
|
264
|
+
const fileUrl = file.url;
|
|
265
|
+
if (!fileUrl) return yield* UploadistaError.fromCode("FLOW_NODE_ERROR", { cause: "URL is required for OCR operation" }).toEffect();
|
|
266
|
+
yield* Effect.logInfo(`Starting OCR for file ${file.id} with task type: ${params.taskType}`);
|
|
267
|
+
const context = {
|
|
268
|
+
clientId,
|
|
269
|
+
credentialId: params.credentialId
|
|
270
|
+
};
|
|
271
|
+
const ocrResult = yield* documentAiService.performOCR(fileUrl, {
|
|
272
|
+
taskType: params.taskType,
|
|
273
|
+
resolution: params.resolution,
|
|
274
|
+
referenceText: params.referenceText
|
|
275
|
+
}, context).pipe(Effect.catchAll((error) => Effect.gen(function* () {
|
|
276
|
+
yield* Effect.logError("Failed to perform OCR", error);
|
|
277
|
+
return yield* UploadistaError.fromCode("FLOW_NODE_ERROR", { cause: error instanceof Error ? error.message : "Failed to perform OCR" }).toEffect();
|
|
278
|
+
})));
|
|
279
|
+
const { metadata } = resolveUploadMetadata(file.metadata);
|
|
280
|
+
const newMetadata = {
|
|
281
|
+
...file.metadata,
|
|
282
|
+
...metadata,
|
|
283
|
+
ocrText: ocrResult.extractedText,
|
|
284
|
+
ocrFormat: ocrResult.format,
|
|
285
|
+
ocrTaskType: params.taskType
|
|
286
|
+
};
|
|
287
|
+
yield* Effect.logInfo(`Successfully completed OCR for file ${file.id}, extracted ${ocrResult.extractedText.length} characters`);
|
|
288
|
+
return completeNodeExecution({
|
|
289
|
+
...file,
|
|
290
|
+
metadata: newMetadata,
|
|
291
|
+
flow
|
|
292
|
+
});
|
|
293
|
+
});
|
|
294
|
+
}
|
|
295
|
+
});
|
|
296
|
+
});
|
|
297
|
+
}
|
|
298
|
+
|
|
299
|
+
//#endregion
|
|
300
|
+
//#region src/split-pdf-node.ts
|
|
301
|
+
function createSplitPdfNode(id, params) {
|
|
302
|
+
return Effect.gen(function* () {
|
|
303
|
+
const documentService = yield* DocumentPlugin;
|
|
304
|
+
const uploadServer = yield* UploadServer;
|
|
305
|
+
return yield* createFlowNode({
|
|
306
|
+
id,
|
|
307
|
+
name: "Split PDF",
|
|
308
|
+
description: "Split PDF into pages or page ranges",
|
|
309
|
+
type: NodeType.process,
|
|
310
|
+
inputSchema: uploadFileSchema,
|
|
311
|
+
outputSchema: uploadFileSchema,
|
|
312
|
+
run: ({ data: file, flowId, jobId, clientId }) => {
|
|
313
|
+
return Effect.gen(function* () {
|
|
314
|
+
const flow = {
|
|
315
|
+
flowId,
|
|
316
|
+
nodeId: id,
|
|
317
|
+
jobId
|
|
318
|
+
};
|
|
319
|
+
yield* Effect.logInfo(`Splitting PDF file ${file.id} in ${params.mode} mode`);
|
|
320
|
+
const fileBytes = yield* uploadServer.read(file.id, clientId);
|
|
321
|
+
const result = yield* documentService.splitPdf(fileBytes, params).pipe(Effect.catchAll((error) => Effect.gen(function* () {
|
|
322
|
+
yield* Effect.logError("Failed to split PDF", error);
|
|
323
|
+
return yield* UploadistaError.fromCode("FLOW_NODE_ERROR", { cause: error instanceof Error ? error.message : "Failed to split PDF" }).toEffect();
|
|
324
|
+
})));
|
|
325
|
+
const { metadata } = resolveUploadMetadata(file.metadata);
|
|
326
|
+
if (result.mode === "individual") {
|
|
327
|
+
yield* Effect.logInfo(`Successfully split PDF into ${result.pdfs.length} individual pages`);
|
|
328
|
+
yield* Effect.logWarning("Individual page mode returns multiple files - flow engine support required");
|
|
329
|
+
const pdfBytes$1 = result.pdfs[0];
|
|
330
|
+
const stream$1 = new ReadableStream({ start(controller) {
|
|
331
|
+
controller.enqueue(pdfBytes$1);
|
|
332
|
+
controller.close();
|
|
333
|
+
} });
|
|
334
|
+
const uploadResult$1 = yield* uploadServer.upload({
|
|
335
|
+
storageId: file.storage.id,
|
|
336
|
+
size: pdfBytes$1.byteLength,
|
|
337
|
+
type: "application/pdf",
|
|
338
|
+
fileName: `${metadata?.fileName || "document"}-page-1.pdf`,
|
|
339
|
+
lastModified: 0,
|
|
340
|
+
metadata: JSON.stringify({
|
|
341
|
+
...metadata,
|
|
342
|
+
pageCount: 1,
|
|
343
|
+
splitMode: "individual"
|
|
344
|
+
}),
|
|
345
|
+
flow
|
|
346
|
+
}, clientId, stream$1);
|
|
347
|
+
const newMetadata$1 = {
|
|
348
|
+
...metadata,
|
|
349
|
+
pageCount: 1,
|
|
350
|
+
splitMode: "individual"
|
|
351
|
+
};
|
|
352
|
+
return completeNodeExecution({
|
|
353
|
+
...uploadResult$1,
|
|
354
|
+
metadata: newMetadata$1
|
|
355
|
+
});
|
|
356
|
+
}
|
|
357
|
+
const pageCount = params.endPage && params.startPage ? params.endPage - params.startPage + 1 : 1;
|
|
358
|
+
const pdfBytes = result.pdf;
|
|
359
|
+
const stream = new ReadableStream({ start(controller) {
|
|
360
|
+
controller.enqueue(pdfBytes);
|
|
361
|
+
controller.close();
|
|
362
|
+
} });
|
|
363
|
+
const uploadResult = yield* uploadServer.upload({
|
|
364
|
+
storageId: file.storage.id,
|
|
365
|
+
size: pdfBytes.byteLength,
|
|
366
|
+
type: "application/pdf",
|
|
367
|
+
fileName: `${metadata?.fileName || "document"}-pages-${params.startPage}-${params.endPage}.pdf`,
|
|
368
|
+
lastModified: 0,
|
|
369
|
+
metadata: JSON.stringify({
|
|
370
|
+
...metadata,
|
|
371
|
+
pageCount,
|
|
372
|
+
splitMode: "range",
|
|
373
|
+
splitRange: `${params.startPage}-${params.endPage}`
|
|
374
|
+
}),
|
|
375
|
+
flow
|
|
376
|
+
}, clientId, stream);
|
|
377
|
+
const newMetadata = {
|
|
378
|
+
...metadata,
|
|
379
|
+
pageCount,
|
|
380
|
+
splitMode: "range",
|
|
381
|
+
splitRange: `${params.startPage}-${params.endPage}`
|
|
382
|
+
};
|
|
383
|
+
yield* Effect.logInfo(`Successfully split PDF to pages ${params.startPage}-${params.endPage}`);
|
|
384
|
+
return completeNodeExecution({
|
|
385
|
+
...uploadResult,
|
|
386
|
+
metadata: newMetadata
|
|
387
|
+
});
|
|
388
|
+
});
|
|
389
|
+
}
|
|
390
|
+
});
|
|
391
|
+
});
|
|
392
|
+
}
|
|
393
|
+
|
|
394
|
+
//#endregion
|
|
395
|
+
export { createConvertToMarkdownNode, createDescribeDocumentNode, createExtractTextNode, createMergePdfNode, createOcrNode, createSplitPdfNode };
|
|
396
|
+
//# sourceMappingURL=index.mjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"file":"index.mjs","names":["markdown: string","markdownSource: \"text\" | \"ocr\"","pdfBuffers: Uint8Array[]","pdfBytes","stream","uploadResult","newMetadata"],"sources":["../src/convert-to-markdown-node.ts","../src/describe-document-node.ts","../src/extract-text-node.ts","../src/merge-pdf-node.ts","../src/ocr-node.ts","../src/split-pdf-node.ts"],"sourcesContent":["import { UploadistaError } from \"@uploadista/core/errors\";\nimport {\n completeNodeExecution,\n createFlowNode,\n DocumentAiPlugin,\n DocumentPlugin,\n NodeType,\n resolveUploadMetadata,\n} from \"@uploadista/core/flow\";\nimport { uploadFileSchema } from \"@uploadista/core/types\";\nimport { UploadServer } from \"@uploadista/core/upload\";\nimport { Effect, Either } from \"effect\";\n\nexport type ConvertToMarkdownNodeParams = {\n credentialId?: string;\n resolution?: \"tiny\" | \"small\" | \"base\" | \"gundam\" | \"large\";\n};\n\nexport function createConvertToMarkdownNode(\n id: string,\n params: ConvertToMarkdownNodeParams = {},\n) {\n return Effect.gen(function* () {\n const documentService = yield* DocumentPlugin;\n const documentAiService = yield* DocumentAiPlugin;\n const uploadServer = yield* UploadServer;\n\n return yield* createFlowNode({\n id,\n name: \"Convert to Markdown\",\n description:\n \"Convert documents to Markdown format (intelligently uses OCR if needed)\",\n type: NodeType.process,\n inputSchema: uploadFileSchema,\n outputSchema: uploadFileSchema,\n run: ({ data: file, flowId, jobId, clientId }) => {\n return Effect.gen(function* () {\n const flow = {\n flowId,\n nodeId: id,\n jobId,\n };\n\n yield* Effect.logInfo(`Converting file ${file.id} to Markdown`);\n\n // Read file bytes from upload server\n const fileBytes = yield* uploadServer.read(file.id, clientId);\n\n // Try to extract text first (for searchable PDFs)\n const extractResult = yield* documentService\n .extractText(fileBytes)\n .pipe(Effect.either);\n\n let markdown: string;\n let markdownSource: \"text\" | \"ocr\";\n\n if (\n Either.isRight(extractResult) &&\n extractResult.right.trim().length > 0\n ) {\n // Successfully extracted text from searchable PDF\n const text = extractResult.right;\n\n yield* Effect.logInfo(\n `Successfully extracted ${text.length} characters from searchable PDF`,\n );\n\n // Simple text-to-markdown conversion\n // In a real implementation, this could be more sophisticated\n markdown = text\n .split(\"\\n\\n\")\n .map((para: string) => para.trim())\n .filter((para: string) => para.length > 0)\n .join(\"\\n\\n\");\n\n markdownSource = \"text\";\n\n yield* Effect.logInfo(\n `Converted text to Markdown (${markdown.length} characters)`,\n );\n } else {\n // Text extraction failed or returned empty - use OCR\n yield* Effect.logInfo(\n \"Text extraction failed or returned empty, falling back to OCR\",\n );\n\n const fileUrl = file.url;\n\n if (!fileUrl) {\n return yield* UploadistaError.fromCode(\"FLOW_NODE_ERROR\", {\n cause: \"URL is required for OCR-based markdown conversion\",\n }).toEffect();\n }\n\n // Build context for DocumentAI plugin\n const context = {\n clientId,\n credentialId: params.credentialId,\n };\n\n // Perform OCR with markdown conversion\n const ocrResult = yield* documentAiService\n .performOCR(\n fileUrl,\n {\n taskType: \"convertToMarkdown\",\n resolution: params.resolution || \"gundam\",\n },\n context,\n )\n .pipe(\n Effect.catchAll((error) =>\n Effect.gen(function* () {\n yield* Effect.logError(\"Failed to perform OCR\", error);\n return yield* UploadistaError.fromCode(\"FLOW_NODE_ERROR\", {\n cause:\n error instanceof Error\n ? error.message\n : \"Failed to perform OCR for markdown conversion\",\n }).toEffect();\n }),\n ),\n );\n\n markdown = ocrResult.extractedText;\n markdownSource = \"ocr\";\n\n yield* Effect.logInfo(\n `Successfully converted scanned document to Markdown using OCR (${markdown.length} characters)`,\n );\n }\n\n const { metadata } = resolveUploadMetadata(file.metadata);\n\n // Add markdown to metadata\n const newMetadata = {\n ...file.metadata,\n ...metadata,\n markdown,\n markdownSource,\n };\n\n yield* Effect.logInfo(\n `Successfully converted file ${file.id} to Markdown via ${markdownSource}`,\n );\n\n return completeNodeExecution({\n ...file,\n metadata: newMetadata,\n flow,\n });\n });\n },\n });\n });\n}\n","import { UploadistaError } from \"@uploadista/core/errors\";\nimport {\n completeNodeExecution,\n createFlowNode,\n DocumentPlugin,\n NodeType,\n resolveUploadMetadata,\n} from \"@uploadista/core/flow\";\nimport { uploadFileSchema } from \"@uploadista/core/types\";\nimport { UploadServer } from \"@uploadista/core/upload\";\nimport { Effect } from \"effect\";\n\nexport type DescribeDocumentNodeParams = Record<string, never>; // No parameters needed\n\nexport function createDescribeDocumentNode(\n id: string,\n _params: DescribeDocumentNodeParams = {},\n) {\n return Effect.gen(function* () {\n const documentService = yield* DocumentPlugin;\n const uploadServer = yield* UploadServer;\n\n return yield* createFlowNode({\n id,\n name: \"Describe Document\",\n description: \"Extract metadata from PDF documents\",\n type: NodeType.process,\n inputSchema: uploadFileSchema,\n outputSchema: uploadFileSchema,\n run: ({ data: file, flowId, jobId, clientId }) => {\n return Effect.gen(function* () {\n const flow = {\n flowId,\n nodeId: id,\n jobId,\n };\n\n yield* Effect.logInfo(\n `Extracting metadata from PDF file ${file.id}`,\n );\n\n // Read file bytes from upload server\n const fileBytes = yield* uploadServer.read(file.id, clientId);\n\n // Get metadata with error handling\n const documentMetadata = yield* documentService\n .getMetadata(fileBytes)\n .pipe(\n Effect.catchAll((error) =>\n Effect.gen(function* () {\n yield* Effect.logError(\"Failed to extract metadata\", error);\n return yield* UploadistaError.fromCode(\"FLOW_NODE_ERROR\", {\n cause:\n error instanceof Error\n ? error.message\n : \"Failed to extract metadata\",\n }).toEffect();\n }),\n ),\n );\n\n const { metadata } = resolveUploadMetadata(file.metadata);\n\n // Add document metadata to file metadata (filter out null values)\n const newMetadata = {\n ...file.metadata,\n ...metadata,\n pageCount: documentMetadata.pageCount,\n format: documentMetadata.format,\n ...(documentMetadata.author && { author: documentMetadata.author }),\n ...(documentMetadata.title && { title: documentMetadata.title }),\n ...(documentMetadata.subject && { subject: documentMetadata.subject }),\n ...(documentMetadata.creator && { creator: documentMetadata.creator }),\n ...(documentMetadata.creationDate && { creationDate: documentMetadata.creationDate }),\n ...(documentMetadata.modifiedDate && { modifiedDate: documentMetadata.modifiedDate }),\n fileSize: documentMetadata.fileSize,\n };\n\n yield* Effect.logInfo(\n `Successfully extracted metadata from file ${file.id}: ${documentMetadata.pageCount} pages`,\n );\n\n return completeNodeExecution({\n ...file,\n metadata: newMetadata,\n flow,\n });\n });\n },\n });\n });\n}\n","import { UploadistaError } from \"@uploadista/core/errors\";\nimport {\n completeNodeExecution,\n createFlowNode,\n DocumentPlugin,\n NodeType,\n resolveUploadMetadata,\n} from \"@uploadista/core/flow\";\nimport { uploadFileSchema } from \"@uploadista/core/types\";\nimport { UploadServer } from \"@uploadista/core/upload\";\nimport { Effect } from \"effect\";\n\nexport type ExtractTextNodeParams = Record<string, never>; // No parameters needed\n\nexport function createExtractTextNode(\n id: string,\n _params: ExtractTextNodeParams = {},\n) {\n return Effect.gen(function* () {\n const documentService = yield* DocumentPlugin;\n const uploadServer = yield* UploadServer;\n\n return yield* createFlowNode({\n id,\n name: \"Extract Text\",\n description: \"Extract text from searchable PDF documents\",\n type: NodeType.process,\n inputSchema: uploadFileSchema,\n outputSchema: uploadFileSchema,\n run: ({ data: file, flowId, jobId, clientId }) => {\n return Effect.gen(function* () {\n const flow = {\n flowId,\n nodeId: id,\n jobId,\n };\n\n yield* Effect.logInfo(\n `Extracting text from PDF file ${file.id}`,\n );\n\n // Read file bytes from upload server\n const fileBytes = yield* uploadServer.read(file.id, clientId);\n\n // Extract text with error handling\n const extractedText = yield* documentService\n .extractText(fileBytes)\n .pipe(\n Effect.catchAll((error) =>\n Effect.gen(function* () {\n yield* Effect.logError(\"Failed to extract text\", error);\n return yield* UploadistaError.fromCode(\"FLOW_NODE_ERROR\", {\n cause:\n error instanceof Error\n ? error.message\n : \"Failed to extract text\",\n }).toEffect();\n }),\n ),\n );\n\n const { metadata } = resolveUploadMetadata(file.metadata);\n\n // Add extracted text to metadata\n const newMetadata = {\n ...file.metadata,\n ...metadata,\n extractedText,\n };\n\n if (!extractedText || extractedText.trim().length === 0) {\n yield* Effect.logWarning(\n `No text extracted from file ${file.id}. This might be a scanned document. Consider using the OCR node instead.`,\n );\n } else {\n yield* Effect.logInfo(\n `Successfully extracted ${extractedText.length} characters from file ${file.id}`,\n );\n }\n\n return completeNodeExecution({\n ...file,\n metadata: newMetadata,\n flow,\n });\n });\n },\n });\n });\n}\n","import { UploadistaError } from \"@uploadista/core/errors\";\nimport {\n completeNodeExecution,\n createFlowNode,\n DocumentPlugin,\n NodeType,\n resolveUploadMetadata,\n} from \"@uploadista/core/flow\";\nimport { uploadFileSchema } from \"@uploadista/core/types\";\nimport { UploadServer } from \"@uploadista/core/upload\";\nimport { Effect } from \"effect\";\nimport { z } from \"zod\";\n\nexport type MergePdfNodeParams = {\n inputCount?: number;\n};\n\n// Schema for multiple file inputs\nconst multipleFilesSchema = z.array(uploadFileSchema);\n\nexport function createMergePdfNode(\n id: string,\n _params: MergePdfNodeParams = {},\n) {\n return Effect.gen(function* () {\n const documentService = yield* DocumentPlugin;\n const uploadServer = yield* UploadServer;\n\n return yield* createFlowNode({\n id,\n name: \"Merge PDFs\",\n description: \"Merge multiple PDF documents into one\",\n type: NodeType.process,\n inputSchema: multipleFilesSchema,\n outputSchema: uploadFileSchema,\n run: ({ data: files, flowId, jobId, clientId }) => {\n return Effect.gen(function* () {\n const flow = {\n flowId,\n nodeId: id,\n jobId,\n };\n\n // Validate that we have an array of files\n if (!Array.isArray(files)) {\n return yield* UploadistaError.fromCode(\"FLOW_NODE_ERROR\", {\n cause: \"Merge PDF node requires an array of files from a Merge utility node\",\n }).toEffect();\n }\n\n if (files.length === 0) {\n return yield* UploadistaError.fromCode(\"FLOW_NODE_ERROR\", {\n cause: \"At least one PDF file is required for merging\",\n }).toEffect();\n }\n\n // Read buffers from all files\n const pdfBuffers: Uint8Array[] = [];\n let totalPages = 0;\n\n for (const file of files) {\n // Read file bytes from upload server\n const fileBytes = yield* uploadServer.read(file.id, clientId);\n pdfBuffers.push(fileBytes);\n\n // Sum up page counts if available\n const fileMetadata = resolveUploadMetadata(file.metadata).metadata;\n if (fileMetadata?.pageCount && typeof fileMetadata.pageCount === 'number') {\n totalPages += fileMetadata.pageCount;\n }\n }\n\n yield* Effect.logInfo(\n `Merging ${files.length} PDF files`,\n );\n\n // Merge PDFs with error handling\n const mergedPdf = yield* documentService\n .mergePdfs({ pdfs: pdfBuffers })\n .pipe(\n Effect.catchAll((error) =>\n Effect.gen(function* () {\n yield* Effect.logError(\"Failed to merge PDFs\", error);\n return yield* UploadistaError.fromCode(\"FLOW_NODE_ERROR\", {\n cause:\n error instanceof Error\n ? error.message\n : \"Failed to merge PDFs\",\n }).toEffect();\n }),\n ),\n );\n\n // Use metadata from first file as base\n const firstFile = files[0];\n const { metadata } = resolveUploadMetadata(firstFile.metadata);\n\n // Create a stream from the merged PDF bytes\n const stream = new ReadableStream({\n start(controller) {\n controller.enqueue(mergedPdf);\n controller.close();\n },\n });\n\n // Upload the merged PDF back to the upload server\n const result = yield* uploadServer.upload(\n {\n storageId: firstFile.storage.id,\n size: mergedPdf.byteLength,\n type: \"application/pdf\",\n fileName: `merged-${files.length}-documents.pdf`,\n lastModified: 0,\n metadata: JSON.stringify({\n ...metadata,\n pageCount: totalPages,\n mergedFrom: files.length,\n }),\n flow,\n },\n clientId,\n stream,\n );\n\n const newMetadata = {\n ...metadata,\n pageCount: totalPages,\n mergedFrom: files.length,\n fileName: `merged-${files.length}-documents.pdf`,\n };\n\n yield* Effect.logInfo(\n `Successfully merged ${files.length} PDFs into one document with ${totalPages} pages`,\n );\n\n return completeNodeExecution({\n ...result,\n metadata: newMetadata,\n });\n });\n },\n });\n });\n}\n","import { UploadistaError } from \"@uploadista/core/errors\";\nimport {\n completeNodeExecution,\n createFlowNode,\n DocumentAiPlugin,\n NodeType,\n type OcrTaskType,\n type OcrResolution,\n resolveUploadMetadata,\n} from \"@uploadista/core/flow\";\nimport { uploadFileSchema } from \"@uploadista/core/types\";\nimport { Effect } from \"effect\";\n\nexport type OcrNodeParams = {\n taskType: OcrTaskType;\n resolution?: OcrResolution;\n credentialId?: string;\n referenceText?: string;\n};\n\nexport function createOcrNode(\n id: string,\n params: OcrNodeParams,\n) {\n return Effect.gen(function* () {\n const documentAiService = yield* DocumentAiPlugin;\n\n return yield* createFlowNode({\n id,\n name: \"OCR\",\n description: \"Extract text from scanned documents using AI\",\n type: NodeType.process,\n inputSchema: uploadFileSchema,\n outputSchema: uploadFileSchema,\n run: ({ data: file, flowId, jobId, clientId }) => {\n return Effect.gen(function* () {\n const flow = {\n flowId,\n nodeId: id,\n jobId,\n };\n\n const fileUrl = file.url;\n\n // Validate input\n if (!fileUrl) {\n return yield* UploadistaError.fromCode(\"FLOW_NODE_ERROR\", {\n cause: \"URL is required for OCR operation\",\n }).toEffect();\n }\n\n yield* Effect.logInfo(\n `Starting OCR for file ${file.id} with task type: ${params.taskType}`,\n );\n\n // Build context for DocumentAI plugin\n const context = {\n clientId,\n credentialId: params.credentialId,\n };\n\n // Perform OCR with error handling\n const ocrResult = yield* documentAiService\n .performOCR(\n fileUrl,\n {\n taskType: params.taskType,\n resolution: params.resolution,\n referenceText: params.referenceText,\n },\n context\n )\n .pipe(\n Effect.catchAll((error) =>\n Effect.gen(function* () {\n yield* Effect.logError(\"Failed to perform OCR\", error);\n return yield* UploadistaError.fromCode(\"FLOW_NODE_ERROR\", {\n cause:\n error instanceof Error\n ? error.message\n : \"Failed to perform OCR\",\n }).toEffect();\n }),\n ),\n );\n\n const { metadata } = resolveUploadMetadata(file.metadata);\n\n // Add OCR results to metadata\n const newMetadata = {\n ...file.metadata,\n ...metadata,\n ocrText: ocrResult.extractedText,\n ocrFormat: ocrResult.format,\n ocrTaskType: params.taskType,\n };\n\n yield* Effect.logInfo(\n `Successfully completed OCR for file ${file.id}, extracted ${ocrResult.extractedText.length} characters`,\n );\n\n return completeNodeExecution({\n ...file,\n metadata: newMetadata,\n flow,\n });\n });\n },\n });\n });\n}\n","import { UploadistaError } from \"@uploadista/core/errors\";\nimport {\n completeNodeExecution,\n createFlowNode,\n DocumentPlugin,\n NodeType,\n resolveUploadMetadata,\n} from \"@uploadista/core/flow\";\nimport { uploadFileSchema } from \"@uploadista/core/types\";\nimport { UploadServer } from \"@uploadista/core/upload\";\nimport { Effect } from \"effect\";\n\nexport type SplitPdfNodeParams = {\n mode: \"range\" | \"individual\";\n startPage?: number;\n endPage?: number;\n};\n\nexport function createSplitPdfNode(\n id: string,\n params: SplitPdfNodeParams,\n) {\n return Effect.gen(function* () {\n const documentService = yield* DocumentPlugin;\n const uploadServer = yield* UploadServer;\n\n return yield* createFlowNode({\n id,\n name: \"Split PDF\",\n description: \"Split PDF into pages or page ranges\",\n type: NodeType.process,\n inputSchema: uploadFileSchema,\n outputSchema: uploadFileSchema,\n run: ({ data: file, flowId, jobId, clientId }) => {\n return Effect.gen(function* () {\n const flow = {\n flowId,\n nodeId: id,\n jobId,\n };\n\n yield* Effect.logInfo(\n `Splitting PDF file ${file.id} in ${params.mode} mode`,\n );\n\n // Read file bytes from upload server\n const fileBytes = yield* uploadServer.read(file.id, clientId);\n\n // Split PDF with error handling\n const result = yield* documentService\n .splitPdf(fileBytes, params)\n .pipe(\n Effect.catchAll((error) =>\n Effect.gen(function* () {\n yield* Effect.logError(\"Failed to split PDF\", error);\n return yield* UploadistaError.fromCode(\"FLOW_NODE_ERROR\", {\n cause:\n error instanceof Error\n ? error.message\n : \"Failed to split PDF\",\n }).toEffect();\n }),\n ),\n );\n\n const { metadata } = resolveUploadMetadata(file.metadata);\n\n if (result.mode === \"individual\") {\n // Return array of files (one per page)\n yield* Effect.logInfo(\n `Successfully split PDF into ${result.pdfs.length} individual pages`,\n );\n\n // For individual mode, we'd need to return multiple files\n // This requires special handling in the flow engine\n // For now, we'll return the first page and log a warning\n yield* Effect.logWarning(\n \"Individual page mode returns multiple files - flow engine support required\",\n );\n\n const pdfBytes = result.pdfs[0];\n\n // Create a stream from the PDF bytes\n const stream = new ReadableStream({\n start(controller) {\n controller.enqueue(pdfBytes);\n controller.close();\n },\n });\n\n // Upload the split PDF back to the upload server\n const uploadResult = yield* uploadServer.upload(\n {\n storageId: file.storage.id,\n size: pdfBytes.byteLength,\n type: \"application/pdf\",\n fileName: `${metadata?.fileName || 'document'}-page-1.pdf`,\n lastModified: 0,\n metadata: JSON.stringify({\n ...metadata,\n pageCount: 1,\n splitMode: \"individual\",\n }),\n flow,\n },\n clientId,\n stream,\n );\n\n const newMetadata = {\n ...metadata,\n pageCount: 1,\n splitMode: \"individual\",\n };\n\n return completeNodeExecution({\n ...uploadResult,\n metadata: newMetadata,\n });\n }\n\n // Range mode - return single PDF with selected pages\n const pageCount = params.endPage && params.startPage\n ? params.endPage - params.startPage + 1\n : 1;\n\n const pdfBytes = result.pdf;\n\n // Create a stream from the PDF bytes\n const stream = new ReadableStream({\n start(controller) {\n controller.enqueue(pdfBytes);\n controller.close();\n },\n });\n\n // Upload the split PDF back to the upload server\n const uploadResult = yield* uploadServer.upload(\n {\n storageId: file.storage.id,\n size: pdfBytes.byteLength,\n type: \"application/pdf\",\n fileName: `${metadata?.fileName || 'document'}-pages-${params.startPage}-${params.endPage}.pdf`,\n lastModified: 0,\n metadata: JSON.stringify({\n ...metadata,\n pageCount,\n splitMode: \"range\",\n splitRange: `${params.startPage}-${params.endPage}`,\n }),\n flow,\n },\n clientId,\n stream,\n );\n\n const newMetadata = {\n ...metadata,\n pageCount,\n splitMode: \"range\",\n splitRange: `${params.startPage}-${params.endPage}`,\n };\n\n yield* Effect.logInfo(\n `Successfully split PDF to pages ${params.startPage}-${params.endPage}`,\n );\n\n return completeNodeExecution({\n ...uploadResult,\n metadata: newMetadata,\n });\n });\n },\n });\n });\n}\n"],"mappings":";;;;;;;;AAkBA,SAAgB,4BACd,IACA,SAAsC,EAAE,EACxC;AACA,QAAO,OAAO,IAAI,aAAa;EAC7B,MAAM,kBAAkB,OAAO;EAC/B,MAAM,oBAAoB,OAAO;EACjC,MAAM,eAAe,OAAO;AAE5B,SAAO,OAAO,eAAe;GAC3B;GACA,MAAM;GACN,aACE;GACF,MAAM,SAAS;GACf,aAAa;GACb,cAAc;GACd,MAAM,EAAE,MAAM,MAAM,QAAQ,OAAO,eAAe;AAChD,WAAO,OAAO,IAAI,aAAa;KAC7B,MAAM,OAAO;MACX;MACA,QAAQ;MACR;MACD;AAED,YAAO,OAAO,QAAQ,mBAAmB,KAAK,GAAG,cAAc;KAG/D,MAAM,YAAY,OAAO,aAAa,KAAK,KAAK,IAAI,SAAS;KAG7D,MAAM,gBAAgB,OAAO,gBAC1B,YAAY,UAAU,CACtB,KAAK,OAAO,OAAO;KAEtB,IAAIA;KACJ,IAAIC;AAEJ,SACE,OAAO,QAAQ,cAAc,IAC7B,cAAc,MAAM,MAAM,CAAC,SAAS,GACpC;MAEA,MAAM,OAAO,cAAc;AAE3B,aAAO,OAAO,QACZ,0BAA0B,KAAK,OAAO,iCACvC;AAID,iBAAW,KACR,MAAM,OAAO,CACb,KAAK,SAAiB,KAAK,MAAM,CAAC,CAClC,QAAQ,SAAiB,KAAK,SAAS,EAAE,CACzC,KAAK,OAAO;AAEf,uBAAiB;AAEjB,aAAO,OAAO,QACZ,+BAA+B,SAAS,OAAO,cAChD;YACI;AAEL,aAAO,OAAO,QACZ,gEACD;MAED,MAAM,UAAU,KAAK;AAErB,UAAI,CAAC,QACH,QAAO,OAAO,gBAAgB,SAAS,mBAAmB,EACxD,OAAO,qDACR,CAAC,CAAC,UAAU;MAIf,MAAM,UAAU;OACd;OACA,cAAc,OAAO;OACtB;AA0BD,kBAvBkB,OAAO,kBACtB,WACC,SACA;OACE,UAAU;OACV,YAAY,OAAO,cAAc;OAClC,EACD,QACD,CACA,KACC,OAAO,UAAU,UACf,OAAO,IAAI,aAAa;AACtB,cAAO,OAAO,SAAS,yBAAyB,MAAM;AACtD,cAAO,OAAO,gBAAgB,SAAS,mBAAmB,EACxD,OACE,iBAAiB,QACb,MAAM,UACN,iDACP,CAAC,CAAC,UAAU;QACb,CACH,CACF,EAEkB;AACrB,uBAAiB;AAEjB,aAAO,OAAO,QACZ,kEAAkE,SAAS,OAAO,cACnF;;KAGH,MAAM,EAAE,aAAa,sBAAsB,KAAK,SAAS;KAGzD,MAAM,cAAc;MAClB,GAAG,KAAK;MACR,GAAG;MACH;MACA;MACD;AAED,YAAO,OAAO,QACZ,+BAA+B,KAAK,GAAG,mBAAmB,iBAC3D;AAED,YAAO,sBAAsB;MAC3B,GAAG;MACH,UAAU;MACV;MACD,CAAC;MACF;;GAEL,CAAC;GACF;;;;;AC5IJ,SAAgB,2BACd,IACA,UAAsC,EAAE,EACxC;AACA,QAAO,OAAO,IAAI,aAAa;EAC7B,MAAM,kBAAkB,OAAO;EAC/B,MAAM,eAAe,OAAO;AAE5B,SAAO,OAAO,eAAe;GAC3B;GACA,MAAM;GACN,aAAa;GACb,MAAM,SAAS;GACf,aAAa;GACb,cAAc;GACd,MAAM,EAAE,MAAM,MAAM,QAAQ,OAAO,eAAe;AAChD,WAAO,OAAO,IAAI,aAAa;KAC7B,MAAM,OAAO;MACX;MACA,QAAQ;MACR;MACD;AAED,YAAO,OAAO,QACZ,qCAAqC,KAAK,KAC3C;KAGD,MAAM,YAAY,OAAO,aAAa,KAAK,KAAK,IAAI,SAAS;KAG7D,MAAM,mBAAmB,OAAO,gBAC7B,YAAY,UAAU,CACtB,KACC,OAAO,UAAU,UACf,OAAO,IAAI,aAAa;AACtB,aAAO,OAAO,SAAS,8BAA8B,MAAM;AAC3D,aAAO,OAAO,gBAAgB,SAAS,mBAAmB,EACxD,OACE,iBAAiB,QACb,MAAM,UACN,8BACP,CAAC,CAAC,UAAU;OACb,CACH,CACF;KAEH,MAAM,EAAE,aAAa,sBAAsB,KAAK,SAAS;KAGzD,MAAM,cAAc;MAClB,GAAG,KAAK;MACR,GAAG;MACH,WAAW,iBAAiB;MAC5B,QAAQ,iBAAiB;MACzB,GAAI,iBAAiB,UAAU,EAAE,QAAQ,iBAAiB,QAAQ;MAClE,GAAI,iBAAiB,SAAS,EAAE,OAAO,iBAAiB,OAAO;MAC/D,GAAI,iBAAiB,WAAW,EAAE,SAAS,iBAAiB,SAAS;MACrE,GAAI,iBAAiB,WAAW,EAAE,SAAS,iBAAiB,SAAS;MACrE,GAAI,iBAAiB,gBAAgB,EAAE,cAAc,iBAAiB,cAAc;MACpF,GAAI,iBAAiB,gBAAgB,EAAE,cAAc,iBAAiB,cAAc;MACpF,UAAU,iBAAiB;MAC5B;AAED,YAAO,OAAO,QACZ,6CAA6C,KAAK,GAAG,IAAI,iBAAiB,UAAU,QACrF;AAED,YAAO,sBAAsB;MAC3B,GAAG;MACH,UAAU;MACV;MACD,CAAC;MACF;;GAEL,CAAC;GACF;;;;;AC5EJ,SAAgB,sBACd,IACA,UAAiC,EAAE,EACnC;AACA,QAAO,OAAO,IAAI,aAAa;EAC7B,MAAM,kBAAkB,OAAO;EAC/B,MAAM,eAAe,OAAO;AAE5B,SAAO,OAAO,eAAe;GAC3B;GACA,MAAM;GACN,aAAa;GACb,MAAM,SAAS;GACf,aAAa;GACb,cAAc;GACd,MAAM,EAAE,MAAM,MAAM,QAAQ,OAAO,eAAe;AAChD,WAAO,OAAO,IAAI,aAAa;KAC7B,MAAM,OAAO;MACX;MACA,QAAQ;MACR;MACD;AAED,YAAO,OAAO,QACZ,iCAAiC,KAAK,KACvC;KAGD,MAAM,YAAY,OAAO,aAAa,KAAK,KAAK,IAAI,SAAS;KAG7D,MAAM,gBAAgB,OAAO,gBAC1B,YAAY,UAAU,CACtB,KACC,OAAO,UAAU,UACf,OAAO,IAAI,aAAa;AACtB,aAAO,OAAO,SAAS,0BAA0B,MAAM;AACvD,aAAO,OAAO,gBAAgB,SAAS,mBAAmB,EACxD,OACE,iBAAiB,QACb,MAAM,UACN,0BACP,CAAC,CAAC,UAAU;OACb,CACH,CACF;KAEH,MAAM,EAAE,aAAa,sBAAsB,KAAK,SAAS;KAGzD,MAAM,cAAc;MAClB,GAAG,KAAK;MACR,GAAG;MACH;MACD;AAED,SAAI,CAAC,iBAAiB,cAAc,MAAM,CAAC,WAAW,EACpD,QAAO,OAAO,WACZ,+BAA+B,KAAK,GAAG,0EACxC;SAED,QAAO,OAAO,QACZ,0BAA0B,cAAc,OAAO,wBAAwB,KAAK,KAC7E;AAGH,YAAO,sBAAsB;MAC3B,GAAG;MACH,UAAU;MACV;MACD,CAAC;MACF;;GAEL,CAAC;GACF;;;;;ACtEJ,MAAM,sBAAsB,EAAE,MAAM,iBAAiB;AAErD,SAAgB,mBACd,IACA,UAA8B,EAAE,EAChC;AACA,QAAO,OAAO,IAAI,aAAa;EAC7B,MAAM,kBAAkB,OAAO;EAC/B,MAAM,eAAe,OAAO;AAE5B,SAAO,OAAO,eAAe;GAC3B;GACA,MAAM;GACN,aAAa;GACb,MAAM,SAAS;GACf,aAAa;GACb,cAAc;GACd,MAAM,EAAE,MAAM,OAAO,QAAQ,OAAO,eAAe;AACjD,WAAO,OAAO,IAAI,aAAa;KAC7B,MAAM,OAAO;MACX;MACA,QAAQ;MACR;MACD;AAGD,SAAI,CAAC,MAAM,QAAQ,MAAM,CACvB,QAAO,OAAO,gBAAgB,SAAS,mBAAmB,EACxD,OAAO,uEACR,CAAC,CAAC,UAAU;AAGf,SAAI,MAAM,WAAW,EACnB,QAAO,OAAO,gBAAgB,SAAS,mBAAmB,EACxD,OAAO,iDACR,CAAC,CAAC,UAAU;KAIf,MAAMC,aAA2B,EAAE;KACnC,IAAI,aAAa;AAEjB,UAAK,MAAM,QAAQ,OAAO;MAExB,MAAM,YAAY,OAAO,aAAa,KAAK,KAAK,IAAI,SAAS;AAC7D,iBAAW,KAAK,UAAU;MAG1B,MAAM,eAAe,sBAAsB,KAAK,SAAS,CAAC;AAC1D,UAAI,cAAc,aAAa,OAAO,aAAa,cAAc,SAC/D,eAAc,aAAa;;AAI/B,YAAO,OAAO,QACZ,WAAW,MAAM,OAAO,YACzB;KAGD,MAAM,YAAY,OAAO,gBACtB,UAAU,EAAE,MAAM,YAAY,CAAC,CAC/B,KACC,OAAO,UAAU,UACf,OAAO,IAAI,aAAa;AACtB,aAAO,OAAO,SAAS,wBAAwB,MAAM;AACrD,aAAO,OAAO,gBAAgB,SAAS,mBAAmB,EACxD,OACE,iBAAiB,QACb,MAAM,UACN,wBACP,CAAC,CAAC,UAAU;OACb,CACH,CACF;KAGH,MAAM,YAAY,MAAM;KACxB,MAAM,EAAE,aAAa,sBAAsB,UAAU,SAAS;KAG9D,MAAM,SAAS,IAAI,eAAe,EAChC,MAAM,YAAY;AAChB,iBAAW,QAAQ,UAAU;AAC7B,iBAAW,OAAO;QAErB,CAAC;KAGF,MAAM,SAAS,OAAO,aAAa,OACjC;MACE,WAAW,UAAU,QAAQ;MAC7B,MAAM,UAAU;MAChB,MAAM;MACN,UAAU,UAAU,MAAM,OAAO;MACjC,cAAc;MACd,UAAU,KAAK,UAAU;OACvB,GAAG;OACH,WAAW;OACX,YAAY,MAAM;OACnB,CAAC;MACF;MACD,EACD,UACA,OACD;KAED,MAAM,cAAc;MAClB,GAAG;MACH,WAAW;MACX,YAAY,MAAM;MAClB,UAAU,UAAU,MAAM,OAAO;MAClC;AAED,YAAO,OAAO,QACZ,uBAAuB,MAAM,OAAO,+BAA+B,WAAW,QAC/E;AAED,YAAO,sBAAsB;MAC3B,GAAG;MACH,UAAU;MACX,CAAC;MACF;;GAEL,CAAC;GACF;;;;;AC1HJ,SAAgB,cACd,IACA,QACA;AACA,QAAO,OAAO,IAAI,aAAa;EAC7B,MAAM,oBAAoB,OAAO;AAEjC,SAAO,OAAO,eAAe;GAC3B;GACA,MAAM;GACN,aAAa;GACb,MAAM,SAAS;GACf,aAAa;GACb,cAAc;GACd,MAAM,EAAE,MAAM,MAAM,QAAQ,OAAO,eAAe;AAChD,WAAO,OAAO,IAAI,aAAa;KAC7B,MAAM,OAAO;MACX;MACA,QAAQ;MACR;MACD;KAED,MAAM,UAAU,KAAK;AAGrB,SAAI,CAAC,QACH,QAAO,OAAO,gBAAgB,SAAS,mBAAmB,EACxD,OAAO,qCACR,CAAC,CAAC,UAAU;AAGf,YAAO,OAAO,QACZ,yBAAyB,KAAK,GAAG,mBAAmB,OAAO,WAC5D;KAGD,MAAM,UAAU;MACd;MACA,cAAc,OAAO;MACtB;KAGD,MAAM,YAAY,OAAO,kBACtB,WACC,SACA;MACE,UAAU,OAAO;MACjB,YAAY,OAAO;MACnB,eAAe,OAAO;MACvB,EACD,QACD,CACA,KACC,OAAO,UAAU,UACf,OAAO,IAAI,aAAa;AACtB,aAAO,OAAO,SAAS,yBAAyB,MAAM;AACtD,aAAO,OAAO,gBAAgB,SAAS,mBAAmB,EACxD,OACE,iBAAiB,QACb,MAAM,UACN,yBACP,CAAC,CAAC,UAAU;OACb,CACH,CACF;KAEH,MAAM,EAAE,aAAa,sBAAsB,KAAK,SAAS;KAGzD,MAAM,cAAc;MAClB,GAAG,KAAK;MACR,GAAG;MACH,SAAS,UAAU;MACnB,WAAW,UAAU;MACrB,aAAa,OAAO;MACrB;AAED,YAAO,OAAO,QACZ,uCAAuC,KAAK,GAAG,cAAc,UAAU,cAAc,OAAO,aAC7F;AAED,YAAO,sBAAsB;MAC3B,GAAG;MACH,UAAU;MACV;MACD,CAAC;MACF;;GAEL,CAAC;GACF;;;;;AC3FJ,SAAgB,mBACd,IACA,QACA;AACA,QAAO,OAAO,IAAI,aAAa;EAC7B,MAAM,kBAAkB,OAAO;EAC/B,MAAM,eAAe,OAAO;AAE5B,SAAO,OAAO,eAAe;GAC3B;GACA,MAAM;GACN,aAAa;GACb,MAAM,SAAS;GACf,aAAa;GACb,cAAc;GACd,MAAM,EAAE,MAAM,MAAM,QAAQ,OAAO,eAAe;AAChD,WAAO,OAAO,IAAI,aAAa;KAC7B,MAAM,OAAO;MACX;MACA,QAAQ;MACR;MACD;AAED,YAAO,OAAO,QACZ,sBAAsB,KAAK,GAAG,MAAM,OAAO,KAAK,OACjD;KAGD,MAAM,YAAY,OAAO,aAAa,KAAK,KAAK,IAAI,SAAS;KAG7D,MAAM,SAAS,OAAO,gBACnB,SAAS,WAAW,OAAO,CAC3B,KACC,OAAO,UAAU,UACf,OAAO,IAAI,aAAa;AACtB,aAAO,OAAO,SAAS,uBAAuB,MAAM;AACpD,aAAO,OAAO,gBAAgB,SAAS,mBAAmB,EACxD,OACE,iBAAiB,QACb,MAAM,UACN,uBACP,CAAC,CAAC,UAAU;OACb,CACH,CACF;KAEH,MAAM,EAAE,aAAa,sBAAsB,KAAK,SAAS;AAEzD,SAAI,OAAO,SAAS,cAAc;AAEhC,aAAO,OAAO,QACZ,+BAA+B,OAAO,KAAK,OAAO,mBACnD;AAKD,aAAO,OAAO,WACZ,6EACD;MAED,MAAMC,aAAW,OAAO,KAAK;MAG7B,MAAMC,WAAS,IAAI,eAAe,EAChC,MAAM,YAAY;AAChB,kBAAW,QAAQD,WAAS;AAC5B,kBAAW,OAAO;SAErB,CAAC;MAGF,MAAME,iBAAe,OAAO,aAAa,OACvC;OACE,WAAW,KAAK,QAAQ;OACxB,MAAMF,WAAS;OACf,MAAM;OACN,UAAU,GAAG,UAAU,YAAY,WAAW;OAC9C,cAAc;OACd,UAAU,KAAK,UAAU;QACvB,GAAG;QACH,WAAW;QACX,WAAW;QACZ,CAAC;OACF;OACD,EACD,UACAC,SACD;MAED,MAAME,gBAAc;OAClB,GAAG;OACH,WAAW;OACX,WAAW;OACZ;AAED,aAAO,sBAAsB;OAC3B,GAAGD;OACH,UAAUC;OACX,CAAC;;KAIJ,MAAM,YAAY,OAAO,WAAW,OAAO,YACvC,OAAO,UAAU,OAAO,YAAY,IACpC;KAEJ,MAAM,WAAW,OAAO;KAGxB,MAAM,SAAS,IAAI,eAAe,EAChC,MAAM,YAAY;AAChB,iBAAW,QAAQ,SAAS;AAC5B,iBAAW,OAAO;QAErB,CAAC;KAGF,MAAM,eAAe,OAAO,aAAa,OACvC;MACE,WAAW,KAAK,QAAQ;MACxB,MAAM,SAAS;MACf,MAAM;MACN,UAAU,GAAG,UAAU,YAAY,WAAW,SAAS,OAAO,UAAU,GAAG,OAAO,QAAQ;MAC1F,cAAc;MACd,UAAU,KAAK,UAAU;OACvB,GAAG;OACH;OACA,WAAW;OACX,YAAY,GAAG,OAAO,UAAU,GAAG,OAAO;OAC3C,CAAC;MACF;MACD,EACD,UACA,OACD;KAED,MAAM,cAAc;MAClB,GAAG;MACH;MACA,WAAW;MACX,YAAY,GAAG,OAAO,UAAU,GAAG,OAAO;MAC3C;AAED,YAAO,OAAO,QACZ,mCAAmC,OAAO,UAAU,GAAG,OAAO,UAC/D;AAED,YAAO,sBAAsB;MAC3B,GAAG;MACH,UAAU;MACX,CAAC;MACF;;GAEL,CAAC;GACF"}
|
package/package.json
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@uploadista/flow-documents-nodes",
|
|
3
|
+
"type": "module",
|
|
4
|
+
"version": "0.0.16-beta.2",
|
|
5
|
+
"description": "Document processing nodes for Uploadista Flow",
|
|
6
|
+
"license": "MIT",
|
|
7
|
+
"author": "Uploadista",
|
|
8
|
+
"exports": {
|
|
9
|
+
".": {
|
|
10
|
+
"types": "./dist/index.d.mts",
|
|
11
|
+
"import": "./dist/index.mjs",
|
|
12
|
+
"require": "./dist/index.cjs",
|
|
13
|
+
"default": "./dist/index.mjs"
|
|
14
|
+
}
|
|
15
|
+
},
|
|
16
|
+
"dependencies": {
|
|
17
|
+
"effect": "3.19.4",
|
|
18
|
+
"zod": "4.1.12",
|
|
19
|
+
"@uploadista/core": "0.0.16-beta.2"
|
|
20
|
+
},
|
|
21
|
+
"devDependencies": {
|
|
22
|
+
"@types/node": "24.10.1",
|
|
23
|
+
"tsdown": "0.16.5",
|
|
24
|
+
"@uploadista/typescript-config": "0.0.16-beta.2"
|
|
25
|
+
},
|
|
26
|
+
"scripts": {
|
|
27
|
+
"build": "tsdown",
|
|
28
|
+
"format": "biome format --write ./src",
|
|
29
|
+
"lint": "biome lint --write ./src",
|
|
30
|
+
"check": "biome check --write ./src"
|
|
31
|
+
}
|
|
32
|
+
}
|