@heripo/pdf-parser 0.1.4 → 0.1.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/chunk-WWNI354M.js +121 -0
- package/dist/chunk-WWNI354M.js.map +1 -0
- package/dist/index.cjs +315 -48
- package/dist/index.cjs.map +1 -1
- package/dist/index.d.cts +19 -5
- package/dist/index.d.ts +19 -5
- package/dist/index.js +195 -41
- package/dist/index.js.map +1 -1
- package/dist/vlm-models.cjs +147 -0
- package/dist/vlm-models.cjs.map +1 -0
- package/dist/vlm-models.d.cts +34 -0
- package/dist/vlm-models.d.ts +34 -0
- package/dist/vlm-models.js +12 -0
- package/dist/vlm-models.js.map +1 -0
- package/package.json +15 -9
|
@@ -0,0 +1,147 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
+
var __export = (target, all) => {
|
|
7
|
+
for (var name in all)
|
|
8
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
9
|
+
};
|
|
10
|
+
var __copyProps = (to, from, except, desc) => {
|
|
11
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
+
for (let key of __getOwnPropNames(from))
|
|
13
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
+
}
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
|
+
|
|
20
|
+
// src/config/vlm-models.ts
|
|
21
|
+
var vlm_models_exports = {};
|
|
22
|
+
__export(vlm_models_exports, {
|
|
23
|
+
DEFAULT_VLM_MODEL: () => DEFAULT_VLM_MODEL,
|
|
24
|
+
VLM_MODELS: () => VLM_MODELS,
|
|
25
|
+
resolveVlmModel: () => resolveVlmModel
|
|
26
|
+
});
|
|
27
|
+
module.exports = __toCommonJS(vlm_models_exports);
|
|
28
|
+
var VLM_MODELS = {
|
|
29
|
+
// ── DocTags models (specialized document structure output) ──────────
|
|
30
|
+
"granite-docling-258M-mlx": {
|
|
31
|
+
repo_id: "ibm-granite/granite-docling-258M-mlx",
|
|
32
|
+
inference_framework: "mlx",
|
|
33
|
+
response_format: "doctags",
|
|
34
|
+
transformers_model_type: "automodel-vision2seq",
|
|
35
|
+
description: "Granite Docling 258M (MLX, Apple Silicon optimized, ~6s/page)"
|
|
36
|
+
},
|
|
37
|
+
"granite-docling-258M": {
|
|
38
|
+
repo_id: "ibm-granite/granite-docling-258M",
|
|
39
|
+
inference_framework: "transformers",
|
|
40
|
+
response_format: "doctags",
|
|
41
|
+
transformers_model_type: "automodel-vision2seq",
|
|
42
|
+
description: "Granite Docling 258M (Transformers, cross-platform)"
|
|
43
|
+
},
|
|
44
|
+
"smoldocling-256M-mlx": {
|
|
45
|
+
repo_id: "docling-project/SmolDocling-256M-preview-mlx-bf16",
|
|
46
|
+
inference_framework: "mlx",
|
|
47
|
+
response_format: "doctags",
|
|
48
|
+
transformers_model_type: "automodel-vision2seq",
|
|
49
|
+
description: "SmolDocling 256M (MLX, fastest option)"
|
|
50
|
+
},
|
|
51
|
+
"smoldocling-256M": {
|
|
52
|
+
repo_id: "docling-project/SmolDocling-256M-preview",
|
|
53
|
+
inference_framework: "transformers",
|
|
54
|
+
response_format: "doctags",
|
|
55
|
+
transformers_model_type: "automodel-vision2seq",
|
|
56
|
+
description: "SmolDocling 256M (Transformers)"
|
|
57
|
+
},
|
|
58
|
+
// ── Markdown models (general-purpose vision LLMs) ──────────────────
|
|
59
|
+
"granite-vision-2B": {
|
|
60
|
+
repo_id: "ibm-granite/granite-vision-3.2-2b",
|
|
61
|
+
inference_framework: "transformers",
|
|
62
|
+
response_format: "markdown",
|
|
63
|
+
transformers_model_type: "automodel-vision2seq",
|
|
64
|
+
description: "Granite Vision 3.2 2B (IBM, higher accuracy)"
|
|
65
|
+
},
|
|
66
|
+
"qwen25-vl-3B-mlx": {
|
|
67
|
+
repo_id: "mlx-community/Qwen2.5-VL-3B-Instruct-bf16",
|
|
68
|
+
inference_framework: "mlx",
|
|
69
|
+
response_format: "markdown",
|
|
70
|
+
transformers_model_type: "automodel-vision2seq",
|
|
71
|
+
description: "Qwen 2.5 VL 3B (MLX, multilingual, good KCJ support)"
|
|
72
|
+
},
|
|
73
|
+
phi4: {
|
|
74
|
+
repo_id: "microsoft/Phi-4-multimodal-instruct",
|
|
75
|
+
inference_framework: "transformers",
|
|
76
|
+
response_format: "markdown",
|
|
77
|
+
transformers_model_type: "automodel",
|
|
78
|
+
description: "Phi-4 Multimodal (Microsoft, CausalLM)"
|
|
79
|
+
},
|
|
80
|
+
"pixtral-12B-mlx": {
|
|
81
|
+
repo_id: "mlx-community/pixtral-12b-bf16",
|
|
82
|
+
inference_framework: "mlx",
|
|
83
|
+
response_format: "markdown",
|
|
84
|
+
transformers_model_type: "automodel-vision2seq",
|
|
85
|
+
description: "Pixtral 12B (MLX, Mistral, high accuracy)"
|
|
86
|
+
},
|
|
87
|
+
"pixtral-12B": {
|
|
88
|
+
repo_id: "mistral-community/pixtral-12b",
|
|
89
|
+
inference_framework: "transformers",
|
|
90
|
+
response_format: "markdown",
|
|
91
|
+
transformers_model_type: "automodel-vision2seq",
|
|
92
|
+
description: "Pixtral 12B (Transformers, Mistral)"
|
|
93
|
+
},
|
|
94
|
+
got2: {
|
|
95
|
+
repo_id: "stepfun-ai/GOT-OCR-2.0-hf",
|
|
96
|
+
inference_framework: "transformers",
|
|
97
|
+
response_format: "markdown",
|
|
98
|
+
transformers_model_type: "automodel-vision2seq",
|
|
99
|
+
description: "GOT-OCR 2.0 (StepFun, OCR-specialized)"
|
|
100
|
+
},
|
|
101
|
+
"gemma3-12B-mlx": {
|
|
102
|
+
repo_id: "mlx-community/gemma-3-12b-it-bf16",
|
|
103
|
+
inference_framework: "mlx",
|
|
104
|
+
response_format: "markdown",
|
|
105
|
+
transformers_model_type: "automodel-vision2seq",
|
|
106
|
+
description: "Gemma 3 12B (MLX, Google)"
|
|
107
|
+
},
|
|
108
|
+
"gemma3-27B-mlx": {
|
|
109
|
+
repo_id: "mlx-community/gemma-3-27b-it-bf16",
|
|
110
|
+
inference_framework: "mlx",
|
|
111
|
+
response_format: "markdown",
|
|
112
|
+
transformers_model_type: "automodel-vision2seq",
|
|
113
|
+
description: "Gemma 3 27B (MLX, Google, highest accuracy)"
|
|
114
|
+
},
|
|
115
|
+
dolphin: {
|
|
116
|
+
repo_id: "ByteDance/Dolphin",
|
|
117
|
+
inference_framework: "transformers",
|
|
118
|
+
response_format: "markdown",
|
|
119
|
+
transformers_model_type: "automodel-vision2seq",
|
|
120
|
+
description: "Dolphin (ByteDance, document-oriented)"
|
|
121
|
+
}
|
|
122
|
+
};
|
|
123
|
+
var DEFAULT_VLM_MODEL = "granite-docling-258M-mlx";
|
|
124
|
+
function resolveVlmModel(model) {
|
|
125
|
+
if (typeof model === "string") {
|
|
126
|
+
const preset = VLM_MODELS[model];
|
|
127
|
+
if (!preset) {
|
|
128
|
+
throw new Error(
|
|
129
|
+
`Unknown VLM model preset: "${model}". Available presets: ${Object.keys(VLM_MODELS).join(", ")}`
|
|
130
|
+
);
|
|
131
|
+
}
|
|
132
|
+
return {
|
|
133
|
+
repo_id: preset.repo_id,
|
|
134
|
+
inference_framework: preset.inference_framework,
|
|
135
|
+
response_format: preset.response_format,
|
|
136
|
+
transformers_model_type: preset.transformers_model_type
|
|
137
|
+
};
|
|
138
|
+
}
|
|
139
|
+
return model;
|
|
140
|
+
}
|
|
141
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
142
|
+
0 && (module.exports = {
|
|
143
|
+
DEFAULT_VLM_MODEL,
|
|
144
|
+
VLM_MODELS,
|
|
145
|
+
resolveVlmModel
|
|
146
|
+
});
|
|
147
|
+
//# sourceMappingURL=vlm-models.cjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/config/vlm-models.ts"],"sourcesContent":["import type { VlmModelLocal } from 'docling-sdk';\n\n/**\n * VLM model preset with description\n */\nexport interface VlmModelPreset {\n repo_id: string;\n inference_framework: 'mlx' | 'transformers';\n response_format: 'doctags' | 'markdown';\n transformers_model_type: 'automodel-vision2seq' | 'automodel';\n description: string;\n}\n\n/**\n * Available VLM model presets\n *\n * Based on Docling's official VLM model specs:\n * https://docling-project.github.io/docling/usage/vision_models/#available-local-models\n *\n * Users can select a preset key or provide a custom VlmModelLocal object.\n */\nexport const VLM_MODELS: Record<string, VlmModelPreset> = {\n // ── DocTags models (specialized document structure output) ──────────\n\n 'granite-docling-258M-mlx': {\n repo_id: 'ibm-granite/granite-docling-258M-mlx',\n inference_framework: 'mlx',\n response_format: 'doctags',\n transformers_model_type: 'automodel-vision2seq',\n description:\n 'Granite Docling 258M (MLX, Apple Silicon optimized, ~6s/page)',\n },\n 'granite-docling-258M': {\n repo_id: 'ibm-granite/granite-docling-258M',\n inference_framework: 'transformers',\n response_format: 'doctags',\n transformers_model_type: 'automodel-vision2seq',\n description: 'Granite Docling 258M (Transformers, cross-platform)',\n },\n 'smoldocling-256M-mlx': {\n repo_id: 'docling-project/SmolDocling-256M-preview-mlx-bf16',\n inference_framework: 'mlx',\n response_format: 'doctags',\n transformers_model_type: 'automodel-vision2seq',\n description: 'SmolDocling 256M (MLX, fastest option)',\n },\n 'smoldocling-256M': {\n repo_id: 'docling-project/SmolDocling-256M-preview',\n inference_framework: 'transformers',\n response_format: 'doctags',\n transformers_model_type: 'automodel-vision2seq',\n description: 'SmolDocling 256M (Transformers)',\n },\n\n // ── Markdown models (general-purpose vision LLMs) ──────────────────\n\n 'granite-vision-2B': {\n repo_id: 'ibm-granite/granite-vision-3.2-2b',\n inference_framework: 'transformers',\n response_format: 'markdown',\n transformers_model_type: 'automodel-vision2seq',\n description: 'Granite Vision 3.2 2B (IBM, higher accuracy)',\n },\n 'qwen25-vl-3B-mlx': {\n repo_id: 'mlx-community/Qwen2.5-VL-3B-Instruct-bf16',\n inference_framework: 'mlx',\n response_format: 'markdown',\n transformers_model_type: 'automodel-vision2seq',\n description: 'Qwen 2.5 VL 3B (MLX, multilingual, good KCJ support)',\n },\n phi4: {\n repo_id: 'microsoft/Phi-4-multimodal-instruct',\n inference_framework: 'transformers',\n response_format: 'markdown',\n transformers_model_type: 'automodel',\n description: 'Phi-4 Multimodal (Microsoft, CausalLM)',\n },\n 'pixtral-12B-mlx': {\n repo_id: 'mlx-community/pixtral-12b-bf16',\n inference_framework: 'mlx',\n response_format: 'markdown',\n transformers_model_type: 'automodel-vision2seq',\n description: 'Pixtral 12B (MLX, Mistral, high accuracy)',\n },\n 'pixtral-12B': {\n repo_id: 'mistral-community/pixtral-12b',\n inference_framework: 'transformers',\n response_format: 'markdown',\n transformers_model_type: 'automodel-vision2seq',\n description: 'Pixtral 12B (Transformers, Mistral)',\n },\n got2: {\n repo_id: 'stepfun-ai/GOT-OCR-2.0-hf',\n inference_framework: 'transformers',\n response_format: 'markdown',\n transformers_model_type: 'automodel-vision2seq',\n description: 'GOT-OCR 2.0 (StepFun, OCR-specialized)',\n },\n 'gemma3-12B-mlx': {\n repo_id: 'mlx-community/gemma-3-12b-it-bf16',\n inference_framework: 'mlx',\n response_format: 'markdown',\n transformers_model_type: 'automodel-vision2seq',\n description: 'Gemma 3 12B (MLX, Google)',\n },\n 'gemma3-27B-mlx': {\n repo_id: 'mlx-community/gemma-3-27b-it-bf16',\n inference_framework: 'mlx',\n response_format: 'markdown',\n transformers_model_type: 'automodel-vision2seq',\n description: 'Gemma 3 27B (MLX, Google, highest accuracy)',\n },\n dolphin: {\n repo_id: 'ByteDance/Dolphin',\n inference_framework: 'transformers',\n response_format: 'markdown',\n transformers_model_type: 'automodel-vision2seq',\n description: 'Dolphin (ByteDance, document-oriented)',\n },\n} as const;\n\n/**\n * Default VLM model preset key\n */\nexport const DEFAULT_VLM_MODEL = 'granite-docling-258M-mlx';\n\n/**\n * Resolve a VLM model from a preset key or custom VlmModelLocal object.\n *\n * When using a preset key, only required fields are populated.\n * Optional fields (prompt, scale, extra_generation_config) use Docling defaults.\n */\nexport function resolveVlmModel(model: string | VlmModelLocal): VlmModelLocal {\n if (typeof model === 'string') {\n const preset = VLM_MODELS[model];\n if (!preset) {\n throw new Error(\n `Unknown VLM model preset: \"${model}\". Available presets: ${Object.keys(VLM_MODELS).join(', ')}`,\n );\n }\n return {\n repo_id: preset.repo_id,\n inference_framework: preset.inference_framework,\n response_format: preset.response_format,\n transformers_model_type: preset.transformers_model_type,\n } as VlmModelLocal;\n }\n return model;\n}\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAqBO,IAAM,aAA6C;AAAA;AAAA,EAGxD,4BAA4B;AAAA,IAC1B,SAAS;AAAA,IACT,qBAAqB;AAAA,IACrB,iBAAiB;AAAA,IACjB,yBAAyB;AAAA,IACzB,aACE;AAAA,EACJ;AAAA,EACA,wBAAwB;AAAA,IACtB,SAAS;AAAA,IACT,qBAAqB;AAAA,IACrB,iBAAiB;AAAA,IACjB,yBAAyB;AAAA,IACzB,aAAa;AAAA,EACf;AAAA,EACA,wBAAwB;AAAA,IACtB,SAAS;AAAA,IACT,qBAAqB;AAAA,IACrB,iBAAiB;AAAA,IACjB,yBAAyB;AAAA,IACzB,aAAa;AAAA,EACf;AAAA,EACA,oBAAoB;AAAA,IAClB,SAAS;AAAA,IACT,qBAAqB;AAAA,IACrB,iBAAiB;AAAA,IACjB,yBAAyB;AAAA,IACzB,aAAa;AAAA,EACf;AAAA;AAAA,EAIA,qBAAqB;AAAA,IACnB,SAAS;AAAA,IACT,qBAAqB;AAAA,IACrB,iBAAiB;AAAA,IACjB,yBAAyB;AAAA,IACzB,aAAa;AAAA,EACf;AAAA,EACA,oBAAoB;AAAA,IAClB,SAAS;AAAA,IACT,qBAAqB;AAAA,IACrB,iBAAiB;AAAA,IACjB,yBAAyB;AAAA,IACzB,aAAa;AAAA,EACf;AAAA,EACA,MAAM;AAAA,IACJ,SAAS;AAAA,IACT,qBAAqB;AAAA,IACrB,iBAAiB;AAAA,IACjB,yBAAyB;AAAA,IACzB,aAAa;AAAA,EACf;AAAA,EACA,mBAAmB;AAAA,IACjB,SAAS;AAAA,IACT,qBAAqB;AAAA,IACrB,iBAAiB;AAAA,IACjB,yBAAyB;AAAA,IACzB,aAAa;AAAA,EACf;AAAA,EACA,eAAe;AAAA,IACb,SAAS;AAAA,IACT,qBAAqB;AAAA,IACrB,iBAAiB;AAAA,IACjB,yBAAyB;AAAA,IACzB,aAAa;AAAA,EACf;AAAA,EACA,MAAM;AAAA,IACJ,SAAS;AAAA,IACT,qBAAqB;AAAA,IACrB,iBAAiB;AAAA,IACjB,yBAAyB;AAAA,IACzB,aAAa;AAAA,EACf;AAAA,EACA,kBAAkB;AAAA,IAChB,SAAS;AAAA,IACT,qBAAqB;AAAA,IACrB,iBAAiB;AAAA,IACjB,yBAAyB;AAAA,IACzB,aAAa;AAAA,EACf;AAAA,EACA,kBAAkB;AAAA,IAChB,SAAS;AAAA,IACT,qBAAqB;AAAA,IACrB,iBAAiB;AAAA,IACjB,yBAAyB;AAAA,IACzB,aAAa;AAAA,EACf;AAAA,EACA,SAAS;AAAA,IACP,SAAS;AAAA,IACT,qBAAqB;AAAA,IACrB,iBAAiB;AAAA,IACjB,yBAAyB;AAAA,IACzB,aAAa;AAAA,EACf;AACF;AAKO,IAAM,oBAAoB;AAQ1B,SAAS,gBAAgB,OAA8C;AAC5E,MAAI,OAAO,UAAU,UAAU;AAC7B,UAAM,SAAS,WAAW,KAAK;AAC/B,QAAI,CAAC,QAAQ;AACX,YAAM,IAAI;AAAA,QACR,8BAA8B,KAAK,yBAAyB,OAAO,KAAK,UAAU,EAAE,KAAK,IAAI,CAAC;AAAA,MAChG;AAAA,IACF;AACA,WAAO;AAAA,MACL,SAAS,OAAO;AAAA,MAChB,qBAAqB,OAAO;AAAA,MAC5B,iBAAiB,OAAO;AAAA,MACxB,yBAAyB,OAAO;AAAA,IAClC;AAAA,EACF;AACA,SAAO;AACT;","names":[]}
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
import { VlmModelLocal } from 'docling-sdk';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* VLM model preset with description
|
|
5
|
+
*/
|
|
6
|
+
interface VlmModelPreset {
|
|
7
|
+
repo_id: string;
|
|
8
|
+
inference_framework: 'mlx' | 'transformers';
|
|
9
|
+
response_format: 'doctags' | 'markdown';
|
|
10
|
+
transformers_model_type: 'automodel-vision2seq' | 'automodel';
|
|
11
|
+
description: string;
|
|
12
|
+
}
|
|
13
|
+
/**
|
|
14
|
+
* Available VLM model presets
|
|
15
|
+
*
|
|
16
|
+
* Based on Docling's official VLM model specs:
|
|
17
|
+
* https://docling-project.github.io/docling/usage/vision_models/#available-local-models
|
|
18
|
+
*
|
|
19
|
+
* Users can select a preset key or provide a custom VlmModelLocal object.
|
|
20
|
+
*/
|
|
21
|
+
declare const VLM_MODELS: Record<string, VlmModelPreset>;
|
|
22
|
+
/**
|
|
23
|
+
* Default VLM model preset key
|
|
24
|
+
*/
|
|
25
|
+
declare const DEFAULT_VLM_MODEL = "granite-docling-258M-mlx";
|
|
26
|
+
/**
|
|
27
|
+
* Resolve a VLM model from a preset key or custom VlmModelLocal object.
|
|
28
|
+
*
|
|
29
|
+
* When using a preset key, only required fields are populated.
|
|
30
|
+
* Optional fields (prompt, scale, extra_generation_config) use Docling defaults.
|
|
31
|
+
*/
|
|
32
|
+
declare function resolveVlmModel(model: string | VlmModelLocal): VlmModelLocal;
|
|
33
|
+
|
|
34
|
+
export { DEFAULT_VLM_MODEL, VLM_MODELS, type VlmModelPreset, resolveVlmModel };
|
|
@@ -0,0 +1,34 @@
|
|
|
1
|
+
import { VlmModelLocal } from 'docling-sdk';
|
|
2
|
+
|
|
3
|
+
/**
|
|
4
|
+
* VLM model preset with description
|
|
5
|
+
*/
|
|
6
|
+
interface VlmModelPreset {
|
|
7
|
+
repo_id: string;
|
|
8
|
+
inference_framework: 'mlx' | 'transformers';
|
|
9
|
+
response_format: 'doctags' | 'markdown';
|
|
10
|
+
transformers_model_type: 'automodel-vision2seq' | 'automodel';
|
|
11
|
+
description: string;
|
|
12
|
+
}
|
|
13
|
+
/**
|
|
14
|
+
* Available VLM model presets
|
|
15
|
+
*
|
|
16
|
+
* Based on Docling's official VLM model specs:
|
|
17
|
+
* https://docling-project.github.io/docling/usage/vision_models/#available-local-models
|
|
18
|
+
*
|
|
19
|
+
* Users can select a preset key or provide a custom VlmModelLocal object.
|
|
20
|
+
*/
|
|
21
|
+
declare const VLM_MODELS: Record<string, VlmModelPreset>;
|
|
22
|
+
/**
|
|
23
|
+
* Default VLM model preset key
|
|
24
|
+
*/
|
|
25
|
+
declare const DEFAULT_VLM_MODEL = "granite-docling-258M-mlx";
|
|
26
|
+
/**
|
|
27
|
+
* Resolve a VLM model from a preset key or custom VlmModelLocal object.
|
|
28
|
+
*
|
|
29
|
+
* When using a preset key, only required fields are populated.
|
|
30
|
+
* Optional fields (prompt, scale, extra_generation_config) use Docling defaults.
|
|
31
|
+
*/
|
|
32
|
+
declare function resolveVlmModel(model: string | VlmModelLocal): VlmModelLocal;
|
|
33
|
+
|
|
34
|
+
export { DEFAULT_VLM_MODEL, VLM_MODELS, type VlmModelPreset, resolveVlmModel };
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":[],"sourcesContent":[],"mappings":"","names":[]}
|
package/package.json
CHANGED
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
"name": "@heripo/pdf-parser",
|
|
3
3
|
"private": false,
|
|
4
4
|
"type": "module",
|
|
5
|
-
"version": "0.1.
|
|
5
|
+
"version": "0.1.6",
|
|
6
6
|
"description": "PDF parsing library using Docling SDK with OCR support for macOS",
|
|
7
7
|
"main": "dist/index.cjs",
|
|
8
8
|
"module": "dist/index.js",
|
|
@@ -13,6 +13,12 @@
|
|
|
13
13
|
"import": "./dist/index.js",
|
|
14
14
|
"require": "./dist/index.cjs",
|
|
15
15
|
"default": "./dist/index.js"
|
|
16
|
+
},
|
|
17
|
+
"./vlm-models": {
|
|
18
|
+
"types": "./dist/vlm-models.d.ts",
|
|
19
|
+
"import": "./dist/vlm-models.js",
|
|
20
|
+
"require": "./dist/vlm-models.cjs",
|
|
21
|
+
"default": "./dist/vlm-models.js"
|
|
16
22
|
}
|
|
17
23
|
},
|
|
18
24
|
"files": [
|
|
@@ -47,7 +53,7 @@
|
|
|
47
53
|
"macos"
|
|
48
54
|
],
|
|
49
55
|
"engines": {
|
|
50
|
-
"node": ">=
|
|
56
|
+
"node": ">=24"
|
|
51
57
|
},
|
|
52
58
|
"os": [
|
|
53
59
|
"darwin"
|
|
@@ -59,19 +65,19 @@
|
|
|
59
65
|
"docling-sdk": "^1.3.6",
|
|
60
66
|
"es-toolkit": "^1.44.0",
|
|
61
67
|
"yauzl": "^3.2.0",
|
|
62
|
-
"@heripo/model": "0.1.
|
|
68
|
+
"@heripo/model": "0.1.6"
|
|
63
69
|
},
|
|
64
70
|
"devDependencies": {
|
|
65
71
|
"@types/yauzl": "^2.10.3",
|
|
66
|
-
"@vitest/coverage-v8": "^
|
|
67
|
-
"@vitest/expect": "^
|
|
72
|
+
"@vitest/coverage-v8": "^4.0.18",
|
|
73
|
+
"@vitest/expect": "^4.0.18",
|
|
68
74
|
"tsup": "^8.5.1",
|
|
69
|
-
"vitest": "^
|
|
75
|
+
"vitest": "^4.0.18",
|
|
70
76
|
"@heripo/logger": "0.0.0",
|
|
71
|
-
"@heripo/shared": "0.0.0",
|
|
72
77
|
"@heripo/tsconfig": "0.0.0",
|
|
73
|
-
"@heripo/
|
|
74
|
-
"@heripo/tsup-config": "0.0.0"
|
|
78
|
+
"@heripo/shared": "0.0.0",
|
|
79
|
+
"@heripo/tsup-config": "0.0.0",
|
|
80
|
+
"@heripo/vitest-config": "0.0.0"
|
|
75
81
|
},
|
|
76
82
|
"scripts": {
|
|
77
83
|
"clean": "rimraf dist",
|