sparrow-parse 0.4.6__tar.gz → 0.4.8__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {sparrow-parse-0.4.6 → sparrow-parse-0.4.8}/PKG-INFO +1 -1
- {sparrow-parse-0.4.6 → sparrow-parse-0.4.8}/setup.py +1 -1
- sparrow-parse-0.4.8/sparrow_parse/__init__.py +1 -0
- {sparrow-parse-0.4.6 → sparrow-parse-0.4.8}/sparrow_parse/extractors/vllm_extractor.py +31 -31
- {sparrow-parse-0.4.6 → sparrow-parse-0.4.8}/sparrow_parse/vllm/inference_factory.py +0 -6
- {sparrow-parse-0.4.6 → sparrow-parse-0.4.8}/sparrow_parse/vllm/mlx_inference.py +43 -70
- {sparrow-parse-0.4.6 → sparrow-parse-0.4.8}/sparrow_parse.egg-info/PKG-INFO +1 -1
- sparrow-parse-0.4.6/sparrow_parse/__init__.py +0 -1
- {sparrow-parse-0.4.6 → sparrow-parse-0.4.8}/README.md +0 -0
- {sparrow-parse-0.4.6 → sparrow-parse-0.4.8}/setup.cfg +0 -0
- {sparrow-parse-0.4.6 → sparrow-parse-0.4.8}/sparrow_parse/__main__.py +0 -0
- {sparrow-parse-0.4.6 → sparrow-parse-0.4.8}/sparrow_parse/extractors/__init__.py +0 -0
- {sparrow-parse-0.4.6 → sparrow-parse-0.4.8}/sparrow_parse/helpers/__init__.py +0 -0
- {sparrow-parse-0.4.6 → sparrow-parse-0.4.8}/sparrow_parse/helpers/pdf_optimizer.py +0 -0
- {sparrow-parse-0.4.6 → sparrow-parse-0.4.8}/sparrow_parse/processors/__init__.py +0 -0
- {sparrow-parse-0.4.6 → sparrow-parse-0.4.8}/sparrow_parse/processors/table_structure_processor.py +0 -0
- {sparrow-parse-0.4.6 → sparrow-parse-0.4.8}/sparrow_parse/vllm/__init__.py +0 -0
- {sparrow-parse-0.4.6 → sparrow-parse-0.4.8}/sparrow_parse/vllm/huggingface_inference.py +0 -0
- {sparrow-parse-0.4.6 → sparrow-parse-0.4.8}/sparrow_parse/vllm/inference_base.py +0 -0
- {sparrow-parse-0.4.6 → sparrow-parse-0.4.8}/sparrow_parse/vllm/local_gpu_inference.py +0 -0
- {sparrow-parse-0.4.6 → sparrow-parse-0.4.8}/sparrow_parse.egg-info/SOURCES.txt +0 -0
- {sparrow-parse-0.4.6 → sparrow-parse-0.4.8}/sparrow_parse.egg-info/dependency_links.txt +0 -0
- {sparrow-parse-0.4.6 → sparrow-parse-0.4.8}/sparrow_parse.egg-info/entry_points.txt +0 -0
- {sparrow-parse-0.4.6 → sparrow-parse-0.4.8}/sparrow_parse.egg-info/requires.txt +0 -0
- {sparrow-parse-0.4.6 → sparrow-parse-0.4.8}/sparrow_parse.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: sparrow-parse
|
3
|
-
Version: 0.4.
|
3
|
+
Version: 0.4.8
|
4
4
|
Summary: Sparrow Parse is a Python package (part of Sparrow) for parsing and extracting information from documents.
|
5
5
|
Home-page: https://github.com/katanaml/sparrow/tree/main/sparrow-data/parse
|
6
6
|
Author: Andrej Baranovskij
|
@@ -8,7 +8,7 @@ with open("requirements.txt", "r", encoding="utf-8") as fh:
|
|
8
8
|
|
9
9
|
setup(
|
10
10
|
name="sparrow-parse",
|
11
|
-
version="0.4.
|
11
|
+
version="0.4.8",
|
12
12
|
author="Andrej Baranovskij",
|
13
13
|
author_email="andrejus.baranovskis@gmail.com",
|
14
14
|
description="Sparrow Parse is a Python package (part of Sparrow) for parsing and extracting information from documents.",
|
@@ -0,0 +1 @@
|
|
1
|
+
__version__ = '0.4.8'
|
@@ -152,34 +152,34 @@ if __name__ == "__main__":
|
|
152
152
|
|
153
153
|
extractor = VLLMExtractor()
|
154
154
|
|
155
|
-
# export HF_TOKEN="hf_"
|
156
|
-
config = {
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
}
|
164
|
-
|
165
|
-
# Use the factory to get the correct instance
|
166
|
-
factory = InferenceFactory(config)
|
167
|
-
model_inference_instance = factory.get_inference_instance()
|
168
|
-
|
169
|
-
input_data = [
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
]
|
175
|
-
|
176
|
-
# Now you can run inference without knowing which implementation is used
|
177
|
-
results_array, num_pages = extractor.run_inference(model_inference_instance, input_data, tables_only=True,
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
for i, result in enumerate(results_array):
|
184
|
-
|
185
|
-
print(f"Number of pages: {num_pages}")
|
155
|
+
# # export HF_TOKEN="hf_"
|
156
|
+
# config = {
|
157
|
+
# "method": "mlx", # Could be 'huggingface', 'mlx' or 'local_gpu'
|
158
|
+
# "model_name": "mlx-community/Qwen2-VL-7B-Instruct-8bit",
|
159
|
+
# # "hf_space": "katanaml/sparrow-qwen2-vl-7b",
|
160
|
+
# # "hf_token": os.getenv('HF_TOKEN'),
|
161
|
+
# # Additional fields for local GPU inference
|
162
|
+
# # "device": "cuda", "model_path": "model.pth"
|
163
|
+
# }
|
164
|
+
#
|
165
|
+
# # Use the factory to get the correct instance
|
166
|
+
# factory = InferenceFactory(config)
|
167
|
+
# model_inference_instance = factory.get_inference_instance()
|
168
|
+
#
|
169
|
+
# input_data = [
|
170
|
+
# {
|
171
|
+
# "file_path": "/Users/andrejb/Work/katana-git/sparrow/sparrow-ml/llm/data/invoice_1.jpg",
|
172
|
+
# "text_input": "retrieve document data. return response in JSON format"
|
173
|
+
# }
|
174
|
+
# ]
|
175
|
+
#
|
176
|
+
# # Now you can run inference without knowing which implementation is used
|
177
|
+
# results_array, num_pages = extractor.run_inference(model_inference_instance, input_data, tables_only=True,
|
178
|
+
# generic_query=False,
|
179
|
+
# debug_dir="/Users/andrejb/Work/katana-git/sparrow/sparrow-ml/llm/data/",
|
180
|
+
# debug=True,
|
181
|
+
# mode=None)
|
182
|
+
#
|
183
|
+
# for i, result in enumerate(results_array):
|
184
|
+
# print(f"Result for page {i + 1}:", result)
|
185
|
+
# print(f"Number of pages: {num_pages}")
|
@@ -23,9 +23,3 @@ class InferenceFactory:
|
|
23
23
|
# model = torch.load('model.pth')
|
24
24
|
# return model
|
25
25
|
raise NotImplementedError("Model loading logic not implemented")
|
26
|
-
|
27
|
-
|
28
|
-
def unload_inference_instance(self, instance):
|
29
|
-
if instance and hasattr(instance, "unload_model"):
|
30
|
-
instance.unload_model()
|
31
|
-
print(f"Inference instance of type {type(instance).__name__} has been unloaded.")
|
@@ -4,7 +4,7 @@ from mlx_vlm.utils import load_image
|
|
4
4
|
from sparrow_parse.vllm.inference_base import ModelInference
|
5
5
|
import os
|
6
6
|
import json
|
7
|
-
import
|
7
|
+
from rich import print
|
8
8
|
|
9
9
|
|
10
10
|
class MLXInference(ModelInference):
|
@@ -20,40 +20,19 @@ class MLXInference(ModelInference):
|
|
20
20
|
:param model_name: Name of the model to load.
|
21
21
|
"""
|
22
22
|
self.model_name = model_name
|
23
|
-
|
24
|
-
self.processor = None
|
25
|
-
print(f"MLXInference initialized with model: {model_name}")
|
23
|
+
print(f"MLXInference initialized for model: {model_name}")
|
26
24
|
|
27
25
|
|
28
|
-
|
29
|
-
|
30
|
-
Unload the model and release resources.
|
31
|
-
"""
|
32
|
-
if self.model:
|
33
|
-
print(f"Unloading model: {self.model_name}")
|
34
|
-
del self.model
|
35
|
-
self.model = None
|
36
|
-
if self.processor:
|
37
|
-
print(f"Unloading processor for model: {self.model_name}")
|
38
|
-
del self.processor
|
39
|
-
self.processor = None
|
40
|
-
|
41
|
-
# Force garbage collection to release memory
|
42
|
-
gc.collect()
|
43
|
-
print(f"Model {self.model_name} and its resources have been unloaded, memory cleared.")
|
44
|
-
|
45
|
-
|
46
|
-
def _load_model_and_processor(self, model_name):
|
26
|
+
@staticmethod
|
27
|
+
def _load_model_and_processor(model_name):
|
47
28
|
"""
|
48
29
|
Load the model and processor for inference.
|
30
|
+
|
49
31
|
:param model_name: Name of the model to load.
|
50
32
|
:return: Tuple containing the loaded model and processor.
|
51
33
|
"""
|
52
|
-
print(f"Loading model and processor for: {model_name}...")
|
53
34
|
model, processor = load(model_name)
|
54
|
-
|
55
|
-
self.processor = processor # Store processor instance
|
56
|
-
print(f"Model and processor for '{model_name}' loaded successfully.")
|
35
|
+
print(f"Loaded model: {model_name}")
|
57
36
|
return model, processor
|
58
37
|
|
59
38
|
|
@@ -103,54 +82,48 @@ class MLXInference(ModelInference):
|
|
103
82
|
def inference(self, input_data, mode=None):
|
104
83
|
"""
|
105
84
|
Perform inference on input data using the specified model.
|
85
|
+
|
106
86
|
:param input_data: A list of dictionaries containing image file paths and text inputs.
|
107
87
|
:param mode: Optional mode for inference ("static" for simple JSON output).
|
108
88
|
:return: List of processed model responses.
|
109
89
|
"""
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
print("Inference completed successfully for: ", file_path)
|
147
|
-
|
148
|
-
return results
|
149
|
-
|
150
|
-
finally:
|
151
|
-
# Always unload the model after inference
|
152
|
-
self.unload_model()
|
90
|
+
if mode == "static":
|
91
|
+
return [self.get_simple_json()]
|
92
|
+
|
93
|
+
# Load the model and processor
|
94
|
+
model, processor = self._load_model_and_processor(self.model_name)
|
95
|
+
config = model.config
|
96
|
+
|
97
|
+
# Prepare absolute file paths
|
98
|
+
file_paths = self._extract_file_paths(input_data)
|
99
|
+
|
100
|
+
results = []
|
101
|
+
for file_path in file_paths:
|
102
|
+
image, width, height = self.load_image_data(file_path)
|
103
|
+
|
104
|
+
# Prepare messages for the chat model
|
105
|
+
messages = [
|
106
|
+
{"role": "system", "content": "You are an expert at extracting structured text from image documents."},
|
107
|
+
{"role": "user", "content": input_data[0]["text_input"]},
|
108
|
+
]
|
109
|
+
|
110
|
+
# Generate and process response
|
111
|
+
prompt = apply_chat_template(processor, config, messages) # Assuming defined
|
112
|
+
response = generate(
|
113
|
+
model,
|
114
|
+
processor,
|
115
|
+
image,
|
116
|
+
prompt,
|
117
|
+
resize_shape=(width, height),
|
118
|
+
max_tokens=4000,
|
119
|
+
temperature=0.0,
|
120
|
+
verbose=False
|
121
|
+
)
|
122
|
+
results.append(self.process_response(response))
|
123
|
+
|
124
|
+
print("Inference completed successfully for: ", file_path)
|
153
125
|
|
126
|
+
return results
|
154
127
|
|
155
128
|
@staticmethod
|
156
129
|
def _extract_file_paths(input_data):
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: sparrow-parse
|
3
|
-
Version: 0.4.
|
3
|
+
Version: 0.4.8
|
4
4
|
Summary: Sparrow Parse is a Python package (part of Sparrow) for parsing and extracting information from documents.
|
5
5
|
Home-page: https://github.com/katanaml/sparrow/tree/main/sparrow-data/parse
|
6
6
|
Author: Andrej Baranovskij
|
@@ -1 +0,0 @@
|
|
1
|
-
__version__ = '0.4.6'
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{sparrow-parse-0.4.6 → sparrow-parse-0.4.8}/sparrow_parse/processors/table_structure_processor.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|