sparrow-parse 0.3.8__py3-none-any.whl → 0.3.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sparrow_parse/__init__.py +1 -1
- sparrow_parse/extractors/vllm_extractor.py +34 -29
- sparrow_parse/vllm/inference_factory.py +3 -0
- sparrow_parse/vllm/mlx_inference.py +135 -0
- {sparrow_parse-0.3.8.dist-info → sparrow_parse-0.3.9.dist-info}/METADATA +18 -12
- {sparrow_parse-0.3.8.dist-info → sparrow_parse-0.3.9.dist-info}/RECORD +9 -8
- {sparrow_parse-0.3.8.dist-info → sparrow_parse-0.3.9.dist-info}/WHEEL +0 -0
- {sparrow_parse-0.3.8.dist-info → sparrow_parse-0.3.9.dist-info}/entry_points.txt +0 -0
- {sparrow_parse-0.3.8.dist-info → sparrow_parse-0.3.9.dist-info}/top_level.txt +0 -0
sparrow_parse/__init__.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
__version__ = '0.3.
|
1
|
+
__version__ = '0.3.9'
|
@@ -9,41 +9,45 @@ class VLLMExtractor(object):
|
|
9
9
|
def __init__(self):
|
10
10
|
pass
|
11
11
|
|
12
|
-
def run_inference(self,
|
13
|
-
|
14
|
-
|
15
|
-
generic_query=False,
|
16
|
-
debug_dir=None,
|
17
|
-
debug=False,
|
18
|
-
mode=None):
|
12
|
+
def run_inference(self, model_inference_instance, input_data,
|
13
|
+
generic_query=False, debug_dir=None, debug=False, mode=None):
|
14
|
+
# Modify input for generic queries
|
19
15
|
if generic_query:
|
20
16
|
input_data[0]["text_input"] = "retrieve document data. return response in JSON format"
|
21
17
|
|
22
18
|
if debug:
|
23
19
|
print("Input Data:", input_data)
|
24
20
|
|
25
|
-
|
21
|
+
# Check if the input file is a PDF
|
22
|
+
file_path = input_data[0]["file_path"]
|
23
|
+
if self.is_pdf(file_path):
|
24
|
+
return self._process_pdf(model_inference_instance, input_data, debug_dir, mode)
|
26
25
|
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
True)
|
26
|
+
# Default processing for non-PDF files
|
27
|
+
input_data[0]["file_path"] = [file_path]
|
28
|
+
results_array = model_inference_instance.inference(input_data)
|
29
|
+
return results_array, 1
|
32
30
|
|
33
|
-
input_data[0]["file_path"] = output_files
|
34
31
|
|
35
|
-
|
36
|
-
|
32
|
+
def _process_pdf(self, model_inference_instance, input_data, debug_dir, mode):
|
33
|
+
"""Handles processing and inference for PDF files."""
|
34
|
+
pdf_optimizer = PDFOptimizer()
|
35
|
+
num_pages, output_files, temp_dir = pdf_optimizer.split_pdf_to_pages(input_data[0]["file_path"],
|
36
|
+
debug_dir,
|
37
|
+
True)
|
38
|
+
# Update file paths for PDF pages
|
39
|
+
input_data[0]["file_path"] = output_files
|
37
40
|
|
38
|
-
|
39
|
-
|
41
|
+
# Run inference on PDF pages
|
42
|
+
results_array = model_inference_instance.inference(input_data, mode)
|
40
43
|
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
return results_array, 1
|
44
|
+
# Clean up temporary directory
|
45
|
+
shutil.rmtree(temp_dir, ignore_errors=True)
|
46
|
+
return results_array, num_pages
|
45
47
|
|
46
|
-
|
48
|
+
@staticmethod
|
49
|
+
def is_pdf(file_path):
|
50
|
+
"""Checks if a file is a PDF based on its extension."""
|
47
51
|
return file_path.lower().endswith('.pdf')
|
48
52
|
|
49
53
|
if __name__ == "__main__":
|
@@ -53,9 +57,10 @@ if __name__ == "__main__":
|
|
53
57
|
|
54
58
|
# # export HF_TOKEN="hf_"
|
55
59
|
# config = {
|
56
|
-
# "method": "
|
57
|
-
# "
|
58
|
-
# "
|
60
|
+
# "method": "mlx", # Could be 'huggingface', 'mlx' or 'local_gpu'
|
61
|
+
# "model_name": "mlx-community/Qwen2-VL-72B-Instruct-4bit",
|
62
|
+
# # "hf_space": "katanaml/sparrow-qwen2-vl-7b",
|
63
|
+
# # "hf_token": os.getenv('HF_TOKEN'),
|
59
64
|
# # Additional fields for local GPU inference
|
60
65
|
# # "device": "cuda", "model_path": "model.pth"
|
61
66
|
# }
|
@@ -66,14 +71,14 @@ if __name__ == "__main__":
|
|
66
71
|
#
|
67
72
|
# input_data = [
|
68
73
|
# {
|
69
|
-
# "file_path": "/Users/andrejb/
|
70
|
-
# "text_input": "retrieve
|
74
|
+
# "file_path": "/Users/andrejb/Work/katana-git/sparrow/sparrow-ml/llm/data/bonds_table.jpg",
|
75
|
+
# "text_input": "retrieve all data. return response in JSON format"
|
71
76
|
# }
|
72
77
|
# ]
|
73
78
|
#
|
74
79
|
# # Now you can run inference without knowing which implementation is used
|
75
80
|
# results_array, num_pages = extractor.run_inference(model_inference_instance, input_data, generic_query=False,
|
76
|
-
# debug_dir=
|
81
|
+
# debug_dir=None,
|
77
82
|
# debug=True,
|
78
83
|
# mode=None)
|
79
84
|
#
|
@@ -1,5 +1,6 @@
|
|
1
1
|
from sparrow_parse.vllm.huggingface_inference import HuggingFaceInference
|
2
2
|
from sparrow_parse.vllm.local_gpu_inference import LocalGPUInference
|
3
|
+
from sparrow_parse.vllm.mlx_inference import MLXInference
|
3
4
|
|
4
5
|
|
5
6
|
class InferenceFactory:
|
@@ -12,6 +13,8 @@ class InferenceFactory:
|
|
12
13
|
elif self.config["method"] == "local_gpu":
|
13
14
|
model = self._load_local_model() # Replace with actual model loading logic
|
14
15
|
return LocalGPUInference(model=model, device=self.config.get("device", "cuda"))
|
16
|
+
elif self.config["method"] == "mlx":
|
17
|
+
return MLXInference(model_name=self.config["model_name"])
|
15
18
|
else:
|
16
19
|
raise ValueError(f"Unknown method: {self.config['method']}")
|
17
20
|
|
@@ -0,0 +1,135 @@
|
|
1
|
+
from mlx_vlm import load, generate
|
2
|
+
from mlx_vlm.prompt_utils import apply_chat_template
|
3
|
+
from mlx_vlm.utils import load_image
|
4
|
+
from sparrow_parse.vllm.inference_base import ModelInference
|
5
|
+
import os
|
6
|
+
import json
|
7
|
+
|
8
|
+
|
9
|
+
class MLXInference(ModelInference):
|
10
|
+
"""
|
11
|
+
A class for performing inference using the MLX model.
|
12
|
+
Handles image preprocessing, response formatting, and model interaction.
|
13
|
+
"""
|
14
|
+
|
15
|
+
def __init__(self, model_name):
|
16
|
+
"""
|
17
|
+
Initialize the inference class with the given model name and load the model once.
|
18
|
+
|
19
|
+
:param model_name: Name of the model to load.
|
20
|
+
"""
|
21
|
+
self.model, self.processor = self._load_model_and_processor(model_name)
|
22
|
+
self.config = self.model.config
|
23
|
+
|
24
|
+
print(f"Loaded model: {model_name}")
|
25
|
+
|
26
|
+
|
27
|
+
@staticmethod
|
28
|
+
def _load_model_and_processor(model_name):
|
29
|
+
"""
|
30
|
+
Load the model and processor for inference.
|
31
|
+
|
32
|
+
:param model_name: Name of the model to load.
|
33
|
+
:return: Tuple containing the loaded model and processor.
|
34
|
+
"""
|
35
|
+
return load(model_name)
|
36
|
+
|
37
|
+
|
38
|
+
def process_response(self, output_text):
|
39
|
+
"""
|
40
|
+
Process and clean the model's raw output to format as JSON.
|
41
|
+
|
42
|
+
:param output_text: Raw output text from the model.
|
43
|
+
:return: A formatted JSON string or the original text in case of errors.
|
44
|
+
"""
|
45
|
+
try:
|
46
|
+
cleaned_text = (
|
47
|
+
output_text.strip("[]'")
|
48
|
+
.replace("```json\n", "")
|
49
|
+
.replace("\n```", "")
|
50
|
+
.replace("'", "")
|
51
|
+
)
|
52
|
+
formatted_json = json.loads(cleaned_text)
|
53
|
+
return json.dumps(formatted_json, indent=2)
|
54
|
+
except json.JSONDecodeError as e:
|
55
|
+
print(f"Failed to parse JSON: {e}")
|
56
|
+
return output_text
|
57
|
+
|
58
|
+
|
59
|
+
def load_image_data(self, image_filepath, max_width=1250, max_height=1750):
|
60
|
+
"""
|
61
|
+
Load and resize image while maintaining its aspect ratio.
|
62
|
+
|
63
|
+
:param image_filepath: Path to the image file.
|
64
|
+
:param max_width: Maximum allowed width of the image.
|
65
|
+
:param max_height: Maximum allowed height of the image.
|
66
|
+
:return: Tuple containing the image object and its new dimensions.
|
67
|
+
"""
|
68
|
+
image = load_image(image_filepath) # Assuming load_image is defined elsewhere
|
69
|
+
width, height = image.size
|
70
|
+
|
71
|
+
# Calculate new dimensions while maintaining the aspect ratio
|
72
|
+
if width > max_width or height > max_height:
|
73
|
+
aspect_ratio = width / height
|
74
|
+
new_width = min(max_width, int(max_height * aspect_ratio))
|
75
|
+
new_height = min(max_height, int(max_width / aspect_ratio))
|
76
|
+
return image, new_width, new_height
|
77
|
+
|
78
|
+
return image, width, height
|
79
|
+
|
80
|
+
|
81
|
+
def inference(self, input_data, mode=None):
|
82
|
+
"""
|
83
|
+
Perform inference on input data using the specified model.
|
84
|
+
|
85
|
+
:param input_data: A list of dictionaries containing image file paths and text inputs.
|
86
|
+
:param mode: Optional mode for inference ("static" for simple JSON output).
|
87
|
+
:return: List of processed model responses.
|
88
|
+
"""
|
89
|
+
if mode == "static":
|
90
|
+
return [self.get_simple_json()]
|
91
|
+
|
92
|
+
# Prepare absolute file paths
|
93
|
+
file_paths = self._extract_file_paths(input_data)
|
94
|
+
|
95
|
+
results = []
|
96
|
+
for file_path in file_paths:
|
97
|
+
image, width, height = self.load_image_data(file_path)
|
98
|
+
|
99
|
+
# Prepare messages for the chat model
|
100
|
+
messages = [
|
101
|
+
{"role": "system", "content": "You are an expert at extracting structured text from image documents."},
|
102
|
+
{"role": "user", "content": input_data[0]["text_input"]},
|
103
|
+
]
|
104
|
+
|
105
|
+
# Generate and process response
|
106
|
+
prompt = apply_chat_template(self.processor, self.config, messages) # Assuming defined
|
107
|
+
response = generate(
|
108
|
+
self.model,
|
109
|
+
self.processor,
|
110
|
+
image,
|
111
|
+
prompt,
|
112
|
+
resize_shape=(width, height),
|
113
|
+
max_tokens=4000,
|
114
|
+
temperature=0.0,
|
115
|
+
verbose=False
|
116
|
+
)
|
117
|
+
results.append(self.process_response(response))
|
118
|
+
|
119
|
+
print("Inference completed successfully for: ", file_path)
|
120
|
+
|
121
|
+
return results
|
122
|
+
|
123
|
+
@staticmethod
|
124
|
+
def _extract_file_paths(input_data):
|
125
|
+
"""
|
126
|
+
Extract and resolve absolute file paths from input data.
|
127
|
+
|
128
|
+
:param input_data: List of dictionaries containing image file paths.
|
129
|
+
:return: List of absolute file paths.
|
130
|
+
"""
|
131
|
+
return [
|
132
|
+
os.path.abspath(file_path)
|
133
|
+
for data in input_data
|
134
|
+
for file_path in data.get("file_path", [])
|
135
|
+
]
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: sparrow-parse
|
3
|
-
Version: 0.3.
|
3
|
+
Version: 0.3.9
|
4
4
|
Summary: Sparrow Parse is a Python package (part of Sparrow) for parsing and extracting information from documents.
|
5
5
|
Home-page: https://github.com/katanaml/sparrow/tree/main/sparrow-data/parse
|
6
6
|
Author: Andrej Baranovskij
|
@@ -24,7 +24,7 @@ Requires-Dist: pypdf==4.3.0
|
|
24
24
|
Requires-Dist: easyocr==1.7.1
|
25
25
|
Requires-Dist: gradio-client
|
26
26
|
Requires-Dist: pdf2image
|
27
|
-
Requires-Dist: mlx-vlm==0.1.
|
27
|
+
Requires-Dist: mlx-vlm==0.1.3; sys_platform == "darwin" and platform_machine == "arm64"
|
28
28
|
|
29
29
|
# Sparrow Parse
|
30
30
|
|
@@ -40,7 +40,7 @@ pip install sparrow-parse
|
|
40
40
|
|
41
41
|
## Parsing and extraction
|
42
42
|
|
43
|
-
### Sparrow Parse VL (vision-language model) extractor with Hugging Face GPU infra
|
43
|
+
### Sparrow Parse VL (vision-language model) extractor with local MLX or Hugging Face Cloud GPU infra
|
44
44
|
|
45
45
|
```
|
46
46
|
# run locally: python -m sparrow_parse.extractors.vllm_extractor
|
@@ -50,13 +50,9 @@ from sparrow_parse.extractors.vllm_extractor import VLLMExtractor
|
|
50
50
|
|
51
51
|
extractor = VLLMExtractor()
|
52
52
|
|
53
|
-
# export HF_TOKEN="hf_"
|
54
53
|
config = {
|
55
|
-
"method": "
|
56
|
-
"
|
57
|
-
"hf_token": os.getenv('HF_TOKEN'),
|
58
|
-
# Additional fields for local GPU inference
|
59
|
-
# "device": "cuda", "model_path": "model.pth"
|
54
|
+
"method": "mlx", # Could be 'huggingface', 'mlx' or 'local_gpu'
|
55
|
+
"model_name": "mlx-community/Qwen2-VL-72B-Instruct-4bit",
|
60
56
|
}
|
61
57
|
|
62
58
|
# Use the factory to get the correct instance
|
@@ -65,14 +61,14 @@ model_inference_instance = factory.get_inference_instance()
|
|
65
61
|
|
66
62
|
input_data = [
|
67
63
|
{
|
68
|
-
"file_path": "/data/
|
69
|
-
"text_input": "retrieve
|
64
|
+
"file_path": "/Users/andrejb/Work/katana-git/sparrow/sparrow-ml/llm/data/bonds_table.jpg",
|
65
|
+
"text_input": "retrieve all data. return response in JSON format"
|
70
66
|
}
|
71
67
|
]
|
72
68
|
|
73
69
|
# Now you can run inference without knowing which implementation is used
|
74
70
|
results_array, num_pages = extractor.run_inference(model_inference_instance, input_data, generic_query=False,
|
75
|
-
debug_dir=
|
71
|
+
debug_dir=None,
|
76
72
|
debug=True,
|
77
73
|
mode=None)
|
78
74
|
|
@@ -85,6 +81,16 @@ Use `mode="static"` if you want to simulate LLM call, without executing LLM back
|
|
85
81
|
|
86
82
|
Method `run_inference` will return results and number of pages processed.
|
87
83
|
|
84
|
+
To run with Hugging Face backend use these config values:
|
85
|
+
|
86
|
+
```
|
87
|
+
config = {
|
88
|
+
"method": "huggingface", # Could be 'huggingface' or 'local_gpu'
|
89
|
+
"hf_space": "katanaml/sparrow-qwen2-vl-7b",
|
90
|
+
"hf_token": os.getenv('HF_TOKEN'),
|
91
|
+
}
|
92
|
+
```
|
93
|
+
|
88
94
|
Note: GPU backend `katanaml/sparrow-qwen2-vl-7b` is private, to be able to run below command, you need to create your own backend on Hugging Face space using [code](https://github.com/katanaml/sparrow/tree/main/sparrow-data/parse/sparrow_parse/vllm/infra/qwen2_vl_7b) from Sparrow Parse.
|
89
95
|
|
90
96
|
## PDF pre-processing
|
@@ -1,7 +1,7 @@
|
|
1
|
-
sparrow_parse/__init__.py,sha256=
|
1
|
+
sparrow_parse/__init__.py,sha256=OJRl30XMXR02jOFfAqFJB3-IohqKdn-uwu-tvsSqqhc,21
|
2
2
|
sparrow_parse/__main__.py,sha256=Xs1bpJV0n08KWOoQE34FBYn6EBXZA9HIYJKrE4ZdG78,153
|
3
3
|
sparrow_parse/extractors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
4
|
-
sparrow_parse/extractors/vllm_extractor.py,sha256=
|
4
|
+
sparrow_parse/extractors/vllm_extractor.py,sha256=FI1y5WGG6Z-msH0XHkvjyA8Oh2x2nBc1_kswNlsw27Y,3464
|
5
5
|
sparrow_parse/helpers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
6
6
|
sparrow_parse/helpers/pdf_optimizer.py,sha256=GIqQYWtixFeZGCRFXL0lQfQByapCDuQzzRHAkzcPwLE,3302
|
7
7
|
sparrow_parse/processors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -9,10 +9,11 @@ sparrow_parse/processors/table_structure_processor.py,sha256=bG_6jx66n_KNdY_O6hr
|
|
9
9
|
sparrow_parse/vllm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
10
10
|
sparrow_parse/vllm/huggingface_inference.py,sha256=EJnG6PesGKMc_0qGPN8ufE6pSnhAgFu0XjCbaLCNVyM,1980
|
11
11
|
sparrow_parse/vllm/inference_base.py,sha256=4mwGoAY63MB4cHZpV0czTkJWEzimmiTzqqzKmLNzgjw,820
|
12
|
-
sparrow_parse/vllm/inference_factory.py,sha256=
|
12
|
+
sparrow_parse/vllm/inference_factory.py,sha256=FTM65O-dW2WZchHOrNN7_Q3-FlVoAc65iSptuuUuClM,1166
|
13
13
|
sparrow_parse/vllm/local_gpu_inference.py,sha256=aHoJTejb5xrXjWDIGu5RBQWEyRCOBCB04sMvO2Wyvg8,628
|
14
|
-
sparrow_parse
|
15
|
-
sparrow_parse-0.3.
|
16
|
-
sparrow_parse-0.3.
|
17
|
-
sparrow_parse-0.3.
|
18
|
-
sparrow_parse-0.3.
|
14
|
+
sparrow_parse/vllm/mlx_inference.py,sha256=3srKhlhVRoYqeVy_PziNZRHcHl6D3ksibvcOAug4irA,4723
|
15
|
+
sparrow_parse-0.3.9.dist-info/METADATA,sha256=kbHkfNEfmoqd2yt3aM1IqwVwf6HYBWR-FTnc8XOIWSQ,6351
|
16
|
+
sparrow_parse-0.3.9.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
|
17
|
+
sparrow_parse-0.3.9.dist-info/entry_points.txt,sha256=8CrvTVTTcz1YuZ8aRCYNOH15ZOAaYLlcbYX3t28HwJY,54
|
18
|
+
sparrow_parse-0.3.9.dist-info/top_level.txt,sha256=n6b-WtT91zKLyCPZTP7wvne8v_yvIahcsz-4sX8I0rY,14
|
19
|
+
sparrow_parse-0.3.9.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|