sparrow-parse 0.3.5__py3-none-any.whl → 0.3.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- sparrow_parse/__init__.py +1 -1
- sparrow_parse/extractors/vllm_extractor.py +62 -26
- sparrow_parse/helpers/pdf_optimizer.py +11 -6
- sparrow_parse/vllm/huggingface_inference.py +30 -6
- sparrow_parse/vllm/inference_base.py +24 -1
- sparrow_parse/vllm/local_gpu_inference.py +1 -1
- {sparrow_parse-0.3.5.dist-info → sparrow_parse-0.3.7.dist-info}/METADATA +19 -5
- sparrow_parse-0.3.7.dist-info/RECORD +18 -0
- sparrow_parse-0.3.5.dist-info/RECORD +0 -18
- {sparrow_parse-0.3.5.dist-info → sparrow_parse-0.3.7.dist-info}/WHEEL +0 -0
- {sparrow_parse-0.3.5.dist-info → sparrow_parse-0.3.7.dist-info}/entry_points.txt +0 -0
- {sparrow_parse-0.3.5.dist-info → sparrow_parse-0.3.7.dist-info}/top_level.txt +0 -0
sparrow_parse/__init__.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
__version__ = '0.3.
|
1
|
+
__version__ = '0.3.7'
|
@@ -1,46 +1,82 @@
|
|
1
1
|
from sparrow_parse.vllm.inference_factory import InferenceFactory
|
2
|
+
from sparrow_parse.helpers.pdf_optimizer import PDFOptimizer
|
2
3
|
from rich import print
|
3
4
|
import os
|
5
|
+
import shutil
|
4
6
|
|
5
7
|
|
6
8
|
class VLLMExtractor(object):
|
7
9
|
def __init__(self):
|
8
10
|
pass
|
9
11
|
|
10
|
-
def run_inference(self,
|
12
|
+
def run_inference(self,
|
13
|
+
model_inference_instance,
|
14
|
+
input_data,
|
15
|
+
generic_query=False,
|
16
|
+
debug_dir=None,
|
17
|
+
debug=False,
|
18
|
+
mode=None):
|
11
19
|
if generic_query:
|
12
20
|
input_data[0]["text_input"] = "retrieve document data. return response in JSON format"
|
13
21
|
|
14
22
|
if debug:
|
15
23
|
print("Input Data:", input_data)
|
16
24
|
|
17
|
-
|
25
|
+
results_array = []
|
18
26
|
|
19
|
-
|
27
|
+
if self.is_pdf(input_data[0]["file_path"]):
|
28
|
+
pdf_optimizer = PDFOptimizer()
|
29
|
+
num_pages, output_files, temp_dir = pdf_optimizer.split_pdf_to_pages(input_data[0]["file_path"],
|
30
|
+
debug_dir,
|
31
|
+
True)
|
32
|
+
|
33
|
+
input_data[0]["file_path"] = output_files
|
34
|
+
|
35
|
+
# Run inference on the page
|
36
|
+
results_array = model_inference_instance.inference(input_data, mode)
|
37
|
+
|
38
|
+
shutil.rmtree(temp_dir, ignore_errors=True)
|
39
|
+
return results_array, num_pages
|
40
|
+
|
41
|
+
input_data[0]["file_path"] = [input_data[0]["file_path"]]
|
42
|
+
results_array = model_inference_instance.inference(input_data)
|
43
|
+
|
44
|
+
return results_array, 1
|
45
|
+
|
46
|
+
def is_pdf(self, file_path):
|
47
|
+
return file_path.lower().endswith('.pdf')
|
20
48
|
|
21
49
|
if __name__ == "__main__":
|
50
|
+
# run locally: python -m sparrow_parse.extractors.vllm_extractor
|
51
|
+
|
22
52
|
extractor = VLLMExtractor()
|
23
53
|
|
24
|
-
# export HF_TOKEN="hf_"
|
25
|
-
config = {
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
}
|
32
|
-
|
33
|
-
# Use the factory to get the correct instance
|
34
|
-
factory = InferenceFactory(config)
|
35
|
-
model_inference_instance = factory.get_inference_instance()
|
36
|
-
|
37
|
-
input_data = [
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
]
|
43
|
-
|
44
|
-
# Now you can run inference without knowing which implementation is used
|
45
|
-
|
46
|
-
|
54
|
+
# # export HF_TOKEN="hf_"
|
55
|
+
# config = {
|
56
|
+
# "method": "huggingface", # Could be 'huggingface' or 'local_gpu'
|
57
|
+
# "hf_space": "katanaml/sparrow-qwen2-vl-7b",
|
58
|
+
# "hf_token": os.getenv('HF_TOKEN'),
|
59
|
+
# # Additional fields for local GPU inference
|
60
|
+
# # "device": "cuda", "model_path": "model.pth"
|
61
|
+
# }
|
62
|
+
#
|
63
|
+
# # Use the factory to get the correct instance
|
64
|
+
# factory = InferenceFactory(config)
|
65
|
+
# model_inference_instance = factory.get_inference_instance()
|
66
|
+
#
|
67
|
+
# input_data = [
|
68
|
+
# {
|
69
|
+
# "file_path": "/Users/andrejb/infra/shared/katana-git/sparrow/sparrow-ml/llm/data/oracle_10k_2014_q1_small.pdf",
|
70
|
+
# "text_input": "retrieve {\"table\": [{\"description\": \"str\", \"latest_amount\": 0, \"previous_amount\": 0}]}. return response in JSON format"
|
71
|
+
# }
|
72
|
+
# ]
|
73
|
+
#
|
74
|
+
# # Now you can run inference without knowing which implementation is used
|
75
|
+
# results_array, num_pages = extractor.run_inference(model_inference_instance, input_data, generic_query=False,
|
76
|
+
# debug_dir="/Users/andrejb/infra/shared/katana-git/sparrow/sparrow-ml/llm/data/",
|
77
|
+
# debug=True,
|
78
|
+
# mode=None)
|
79
|
+
#
|
80
|
+
# for i, result in enumerate(results_array):
|
81
|
+
# print(f"Result for page {i + 1}:", result)
|
82
|
+
# print(f"Number of pages: {num_pages}")
|
@@ -40,17 +40,18 @@ class PDFOptimizer(object):
|
|
40
40
|
return number_of_pages, output_files, temp_dir
|
41
41
|
else:
|
42
42
|
# Convert the PDF to images
|
43
|
-
images = convert_from_path(file_path, dpi=
|
43
|
+
images = convert_from_path(file_path, dpi=300)
|
44
|
+
base_name = os.path.splitext(os.path.basename(file_path))[0]
|
44
45
|
|
45
46
|
# Save the images to the temporary directory
|
46
47
|
for i, image in enumerate(images):
|
47
|
-
output_filename = os.path.join(temp_dir, f'
|
48
|
+
output_filename = os.path.join(temp_dir, f'{base_name}_page_{i + 1}.jpg')
|
48
49
|
image.save(output_filename, 'JPEG')
|
49
50
|
output_files.append(output_filename)
|
50
51
|
|
51
52
|
if output_dir:
|
52
53
|
# Save each image to the debug folder
|
53
|
-
debug_output_filename = os.path.join(output_dir, f'
|
54
|
+
debug_output_filename = os.path.join(output_dir, f'{base_name}_page_{i + 1}.jpg')
|
54
55
|
image.save(debug_output_filename, 'JPEG')
|
55
56
|
|
56
57
|
# Return the number of pages, the list of file paths, and the temporary directory
|
@@ -60,13 +61,17 @@ class PDFOptimizer(object):
|
|
60
61
|
if __name__ == "__main__":
|
61
62
|
pdf_optimizer = PDFOptimizer()
|
62
63
|
|
63
|
-
# output_directory = "/Users/andrejb/
|
64
|
+
# output_directory = "/Users/andrejb/infra/shared/katana-git/sparrow/sparrow-ml/llm/data/"
|
64
65
|
# # Ensure the output directory exists
|
65
66
|
# os.makedirs(output_directory, exist_ok=True)
|
66
67
|
#
|
67
68
|
# # Split the optimized PDF into separate pages
|
68
|
-
# num_pages, output_files, temp_dir = pdf_optimizer.split_pdf_to_pages("/Users/andrejb/
|
69
|
+
# num_pages, output_files, temp_dir = pdf_optimizer.split_pdf_to_pages("/Users/andrejb/infra/shared/katana-git/sparrow/sparrow-ml/llm/data/oracle_10k_2014_q1_small.pdf",
|
69
70
|
# output_directory,
|
70
|
-
#
|
71
|
+
# True)
|
72
|
+
#
|
73
|
+
# print(f"Number of pages: {num_pages}")
|
74
|
+
# print(f"Output files: {output_files}")
|
75
|
+
# print(f"Temporary directory: {temp_dir}")
|
71
76
|
#
|
72
77
|
# shutil.rmtree(temp_dir, ignore_errors=True)
|
@@ -1,6 +1,8 @@
|
|
1
1
|
from gradio_client import Client, handle_file
|
2
2
|
from sparrow_parse.vllm.inference_base import ModelInference
|
3
3
|
import json
|
4
|
+
import os
|
5
|
+
import ast
|
4
6
|
|
5
7
|
|
6
8
|
class HuggingFaceInference(ModelInference):
|
@@ -24,13 +26,35 @@ class HuggingFaceInference(ModelInference):
|
|
24
26
|
return output_text
|
25
27
|
|
26
28
|
|
27
|
-
def inference(self, input_data):
|
29
|
+
def inference(self, input_data, mode=None):
|
30
|
+
if mode == "static":
|
31
|
+
simple_json = self.get_simple_json()
|
32
|
+
return [simple_json]
|
33
|
+
|
28
34
|
client = Client(self.hf_space, hf_token=self.hf_token)
|
29
35
|
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
36
|
+
# Extract and prepare the absolute paths for all file paths in input_data
|
37
|
+
file_paths = [
|
38
|
+
os.path.abspath(file_path)
|
39
|
+
for data in input_data
|
40
|
+
for file_path in data["file_path"]
|
41
|
+
]
|
42
|
+
|
43
|
+
# Validate file existence and prepare files for the Gradio client
|
44
|
+
image_files = [handle_file(path) for path in file_paths if os.path.exists(path)]
|
45
|
+
|
46
|
+
results = client.predict(
|
47
|
+
input_imgs=image_files,
|
48
|
+
text_input=input_data[0]["text_input"], # Single shared text input for all images
|
49
|
+
api_name="/run_inference" # Specify the Gradio API endpoint
|
34
50
|
)
|
35
51
|
|
36
|
-
|
52
|
+
# Convert the string into a Python list
|
53
|
+
parsed_results = ast.literal_eval(results)
|
54
|
+
|
55
|
+
results_array = []
|
56
|
+
for page_output in parsed_results:
|
57
|
+
page_result = self.process_response(page_output)
|
58
|
+
results_array.append(page_result)
|
59
|
+
|
60
|
+
return results_array
|
@@ -1,7 +1,30 @@
|
|
1
1
|
from abc import ABC, abstractmethod
|
2
|
+
import json
|
3
|
+
|
2
4
|
|
3
5
|
class ModelInference(ABC):
|
4
6
|
@abstractmethod
|
5
|
-
def inference(self, input_data):
|
7
|
+
def inference(self, input_data, mode=None):
|
6
8
|
"""This method should be implemented by subclasses."""
|
7
9
|
pass
|
10
|
+
|
11
|
+
def get_simple_json(self):
|
12
|
+
# Define a simple data structure
|
13
|
+
data = {
|
14
|
+
"table": [
|
15
|
+
{
|
16
|
+
"description": "Revenues",
|
17
|
+
"latest_amount": 12453,
|
18
|
+
"previous_amount": 11445
|
19
|
+
},
|
20
|
+
{
|
21
|
+
"description": "Operating expenses",
|
22
|
+
"latest_amount": 9157,
|
23
|
+
"previous_amount": 8822
|
24
|
+
}
|
25
|
+
]
|
26
|
+
}
|
27
|
+
|
28
|
+
# Convert the dictionary to a JSON string
|
29
|
+
json_data = json.dumps(data, indent=4)
|
30
|
+
return json_data
|
@@ -8,7 +8,7 @@ class LocalGPUInference(ModelInference):
|
|
8
8
|
self.device = device
|
9
9
|
self.model.to(self.device)
|
10
10
|
|
11
|
-
def inference(self, input_data):
|
11
|
+
def inference(self, input_data, mode=None):
|
12
12
|
self.model.eval() # Set the model to evaluation mode
|
13
13
|
with torch.no_grad(): # No need to calculate gradients
|
14
14
|
input_tensor = torch.tensor(input_data).to(self.device)
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: sparrow-parse
|
3
|
-
Version: 0.3.
|
3
|
+
Version: 0.3.7
|
4
4
|
Summary: Sparrow Parse is a Python package (part of Sparrow) for parsing and extracting information from documents.
|
5
5
|
Home-page: https://github.com/katanaml/sparrow/tree/main/sparrow-data/parse
|
6
6
|
Author: Andrej Baranovskij
|
@@ -42,6 +42,8 @@ pip install sparrow-parse
|
|
42
42
|
### Sparrow Parse VL (vision-language model) extractor with Hugging Face GPU infra
|
43
43
|
|
44
44
|
```
|
45
|
+
# run locally: python -m sparrow_parse.extractors.vllm_extractor
|
46
|
+
|
45
47
|
from sparrow_parse.vllm.inference_factory import InferenceFactory
|
46
48
|
from sparrow_parse.extractors.vllm_extractor import VLLMExtractor
|
47
49
|
|
@@ -62,16 +64,28 @@ model_inference_instance = factory.get_inference_instance()
|
|
62
64
|
|
63
65
|
input_data = [
|
64
66
|
{
|
65
|
-
"
|
66
|
-
"text_input": "retrieve
|
67
|
+
"file_path": "/data/oracle_10k_2014_q1_small.pdf",
|
68
|
+
"text_input": "retrieve {"table": [{"description": "str", "latest_amount": 0, "previous_amount": 0}]}. return response in JSON format"
|
67
69
|
}
|
68
70
|
]
|
69
71
|
|
70
72
|
# Now you can run inference without knowing which implementation is used
|
71
|
-
|
72
|
-
|
73
|
+
results_array, num_pages = extractor.run_inference(model_inference_instance, input_data, generic_query=False,
|
74
|
+
debug_dir="/data/",
|
75
|
+
debug=True,
|
76
|
+
mode=None)
|
77
|
+
|
78
|
+
for i, result in enumerate(results_array):
|
79
|
+
print(f"Result for page {i + 1}:", result)
|
80
|
+
print(f"Number of pages: {num_pages}")
|
73
81
|
```
|
74
82
|
|
83
|
+
Use `mode="static"` if you want to simulate LLM call, without executing LLM backend.
|
84
|
+
|
85
|
+
Method `run_inference` will return results and number of pages processed.
|
86
|
+
|
87
|
+
Note: GPU backend `katanaml/sparrow-qwen2-vl-7b` is private, to be able to run below command, you need to create your own backend on Hugging Face space using [code](https://github.com/katanaml/sparrow/tree/main/sparrow-data/parse/sparrow_parse/vllm/infra/qwen2_vl_7b) from Sparrow Parse.
|
88
|
+
|
75
89
|
## PDF pre-processing
|
76
90
|
|
77
91
|
```
|
@@ -0,0 +1,18 @@
|
|
1
|
+
sparrow_parse/__init__.py,sha256=V3RDzgFfGW_qKkRklGT6eISHLybQsgfScnd9neXG7Cs,21
|
2
|
+
sparrow_parse/__main__.py,sha256=Xs1bpJV0n08KWOoQE34FBYn6EBXZA9HIYJKrE4ZdG78,153
|
3
|
+
sparrow_parse/extractors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
4
|
+
sparrow_parse/extractors/vllm_extractor.py,sha256=mBPgeyMuHUa6jN_OZLVE-426tD4zYnFT61oxebk7XJc,3191
|
5
|
+
sparrow_parse/helpers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
6
|
+
sparrow_parse/helpers/pdf_optimizer.py,sha256=GIqQYWtixFeZGCRFXL0lQfQByapCDuQzzRHAkzcPwLE,3302
|
7
|
+
sparrow_parse/processors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
8
|
+
sparrow_parse/processors/table_structure_processor.py,sha256=bG_6jx66n_KNdY_O6hrZD1D4DHX5Qy__RYcKHmrSGnc,23894
|
9
|
+
sparrow_parse/vllm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
10
|
+
sparrow_parse/vllm/huggingface_inference.py,sha256=EJnG6PesGKMc_0qGPN8ufE6pSnhAgFu0XjCbaLCNVyM,1980
|
11
|
+
sparrow_parse/vllm/inference_base.py,sha256=4mwGoAY63MB4cHZpV0czTkJWEzimmiTzqqzKmLNzgjw,820
|
12
|
+
sparrow_parse/vllm/inference_factory.py,sha256=r04e95uPWG5l8Q23yeDqKmvFxLyF991aA2m0hfBTNn8,993
|
13
|
+
sparrow_parse/vllm/local_gpu_inference.py,sha256=aHoJTejb5xrXjWDIGu5RBQWEyRCOBCB04sMvO2Wyvg8,628
|
14
|
+
sparrow_parse-0.3.7.dist-info/METADATA,sha256=ErE4fDTkcyOrVbgpc6x9AO9cU3Gf8HbEGsbKmK-F0RA,6187
|
15
|
+
sparrow_parse-0.3.7.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
|
16
|
+
sparrow_parse-0.3.7.dist-info/entry_points.txt,sha256=8CrvTVTTcz1YuZ8aRCYNOH15ZOAaYLlcbYX3t28HwJY,54
|
17
|
+
sparrow_parse-0.3.7.dist-info/top_level.txt,sha256=n6b-WtT91zKLyCPZTP7wvne8v_yvIahcsz-4sX8I0rY,14
|
18
|
+
sparrow_parse-0.3.7.dist-info/RECORD,,
|
@@ -1,18 +0,0 @@
|
|
1
|
-
sparrow_parse/__init__.py,sha256=e9arv8KorBrIZFQXAlN4DOQTh91btae1iR36M_3Wafk,21
|
2
|
-
sparrow_parse/__main__.py,sha256=Xs1bpJV0n08KWOoQE34FBYn6EBXZA9HIYJKrE4ZdG78,153
|
3
|
-
sparrow_parse/extractors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
4
|
-
sparrow_parse/extractors/vllm_extractor.py,sha256=Qwmf-SW4z_UstiiynX5TkyovlkokVhLuzcbUVZ16TXM,1540
|
5
|
-
sparrow_parse/helpers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
6
|
-
sparrow_parse/helpers/pdf_optimizer.py,sha256=KI_EweGt9Y_rDH1uCpYD5wKCW3rdjSFFhoVtiPBxX8k,3013
|
7
|
-
sparrow_parse/processors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
8
|
-
sparrow_parse/processors/table_structure_processor.py,sha256=bG_6jx66n_KNdY_O6hrZD1D4DHX5Qy__RYcKHmrSGnc,23894
|
9
|
-
sparrow_parse/vllm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
10
|
-
sparrow_parse/vllm/huggingface_inference.py,sha256=Q2Ju65LDzbO-8RWW7cXzrR-pbZ1zKuPVODlKOTWKg_E,1114
|
11
|
-
sparrow_parse/vllm/inference_base.py,sha256=W0N2khehGdF1XHzZACG3I1UZaydHMk6BZgWNvaJD4Ck,197
|
12
|
-
sparrow_parse/vllm/inference_factory.py,sha256=r04e95uPWG5l8Q23yeDqKmvFxLyF991aA2m0hfBTNn8,993
|
13
|
-
sparrow_parse/vllm/local_gpu_inference.py,sha256=I_uWYiFAQhRrykOKbVz69NzftDxuemDKtAye4kWhtnU,617
|
14
|
-
sparrow_parse-0.3.5.dist-info/METADATA,sha256=4i_-BJalUQFFUZoo919pfr51ZqvU1Jfq-mEFkHf0gWU,5342
|
15
|
-
sparrow_parse-0.3.5.dist-info/WHEEL,sha256=eOLhNAGa2EW3wWl_TU484h7q1UNgy0JXjjoqKoxAAQc,92
|
16
|
-
sparrow_parse-0.3.5.dist-info/entry_points.txt,sha256=8CrvTVTTcz1YuZ8aRCYNOH15ZOAaYLlcbYX3t28HwJY,54
|
17
|
-
sparrow_parse-0.3.5.dist-info/top_level.txt,sha256=n6b-WtT91zKLyCPZTP7wvne8v_yvIahcsz-4sX8I0rY,14
|
18
|
-
sparrow_parse-0.3.5.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|