sparrow-parse 0.3.5__tar.gz → 0.3.7__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. {sparrow-parse-0.3.5 → sparrow-parse-0.3.7}/PKG-INFO +19 -5
  2. {sparrow-parse-0.3.5 → sparrow-parse-0.3.7}/README.md +18 -4
  3. {sparrow-parse-0.3.5 → sparrow-parse-0.3.7}/setup.py +1 -1
  4. sparrow-parse-0.3.7/sparrow_parse/__init__.py +1 -0
  5. sparrow-parse-0.3.7/sparrow_parse/extractors/vllm_extractor.py +82 -0
  6. {sparrow-parse-0.3.5 → sparrow-parse-0.3.7}/sparrow_parse/helpers/pdf_optimizer.py +11 -6
  7. sparrow-parse-0.3.7/sparrow_parse/vllm/huggingface_inference.py +60 -0
  8. sparrow-parse-0.3.7/sparrow_parse/vllm/inference_base.py +30 -0
  9. {sparrow-parse-0.3.5 → sparrow-parse-0.3.7}/sparrow_parse/vllm/local_gpu_inference.py +1 -1
  10. {sparrow-parse-0.3.5 → sparrow-parse-0.3.7}/sparrow_parse.egg-info/PKG-INFO +19 -5
  11. sparrow-parse-0.3.5/sparrow_parse/__init__.py +0 -1
  12. sparrow-parse-0.3.5/sparrow_parse/extractors/vllm_extractor.py +0 -46
  13. sparrow-parse-0.3.5/sparrow_parse/vllm/huggingface_inference.py +0 -36
  14. sparrow-parse-0.3.5/sparrow_parse/vllm/inference_base.py +0 -7
  15. {sparrow-parse-0.3.5 → sparrow-parse-0.3.7}/setup.cfg +0 -0
  16. {sparrow-parse-0.3.5 → sparrow-parse-0.3.7}/sparrow_parse/__main__.py +0 -0
  17. {sparrow-parse-0.3.5 → sparrow-parse-0.3.7}/sparrow_parse/extractors/__init__.py +0 -0
  18. {sparrow-parse-0.3.5 → sparrow-parse-0.3.7}/sparrow_parse/helpers/__init__.py +0 -0
  19. {sparrow-parse-0.3.5 → sparrow-parse-0.3.7}/sparrow_parse/processors/__init__.py +0 -0
  20. {sparrow-parse-0.3.5 → sparrow-parse-0.3.7}/sparrow_parse/processors/table_structure_processor.py +0 -0
  21. {sparrow-parse-0.3.5 → sparrow-parse-0.3.7}/sparrow_parse/vllm/__init__.py +0 -0
  22. {sparrow-parse-0.3.5 → sparrow-parse-0.3.7}/sparrow_parse/vllm/inference_factory.py +0 -0
  23. {sparrow-parse-0.3.5 → sparrow-parse-0.3.7}/sparrow_parse.egg-info/SOURCES.txt +0 -0
  24. {sparrow-parse-0.3.5 → sparrow-parse-0.3.7}/sparrow_parse.egg-info/dependency_links.txt +0 -0
  25. {sparrow-parse-0.3.5 → sparrow-parse-0.3.7}/sparrow_parse.egg-info/entry_points.txt +0 -0
  26. {sparrow-parse-0.3.5 → sparrow-parse-0.3.7}/sparrow_parse.egg-info/requires.txt +0 -0
  27. {sparrow-parse-0.3.5 → sparrow-parse-0.3.7}/sparrow_parse.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sparrow-parse
3
- Version: 0.3.5
3
+ Version: 0.3.7
4
4
  Summary: Sparrow Parse is a Python package (part of Sparrow) for parsing and extracting information from documents.
5
5
  Home-page: https://github.com/katanaml/sparrow/tree/main/sparrow-data/parse
6
6
  Author: Andrej Baranovskij
@@ -34,6 +34,8 @@ pip install sparrow-parse
34
34
  ### Sparrow Parse VL (vision-language model) extractor with Hugging Face GPU infra
35
35
 
36
36
  ```
37
+ # run locally: python -m sparrow_parse.extractors.vllm_extractor
38
+
37
39
  from sparrow_parse.vllm.inference_factory import InferenceFactory
38
40
  from sparrow_parse.extractors.vllm_extractor import VLLMExtractor
39
41
 
@@ -54,16 +56,28 @@ model_inference_instance = factory.get_inference_instance()
54
56
 
55
57
  input_data = [
56
58
  {
57
- "image": "/data/bonds_table.png",
58
- "text_input": "retrieve all data. return response in JSON format"
59
+ "file_path": "/data/oracle_10k_2014_q1_small.pdf",
60
+ "text_input": "retrieve {"table": [{"description": "str", "latest_amount": 0, "previous_amount": 0}]}. return response in JSON format"
59
61
  }
60
62
  ]
61
63
 
62
64
  # Now you can run inference without knowing which implementation is used
63
- result = extractor.run_inference(model_inference_instance, input_data, generic_query=False, debug=True)
64
- print("Inference Result:", result)
65
+ results_array, num_pages = extractor.run_inference(model_inference_instance, input_data, generic_query=False,
66
+ debug_dir="/data/",
67
+ debug=True,
68
+ mode=None)
69
+
70
+ for i, result in enumerate(results_array):
71
+ print(f"Result for page {i + 1}:", result)
72
+ print(f"Number of pages: {num_pages}")
65
73
  ```
66
74
 
75
+ Use `mode="static"` if you want to simulate LLM call, without executing LLM backend.
76
+
77
+ Method `run_inference` will return results and number of pages processed.
78
+
79
+ Note: GPU backend `katanaml/sparrow-qwen2-vl-7b` is private, to be able to run below command, you need to create your own backend on Hugging Face space using [code](https://github.com/katanaml/sparrow/tree/main/sparrow-data/parse/sparrow_parse/vllm/infra/qwen2_vl_7b) from Sparrow Parse.
80
+
67
81
  ## PDF pre-processing
68
82
 
69
83
  ```
@@ -15,6 +15,8 @@ pip install sparrow-parse
15
15
  ### Sparrow Parse VL (vision-language model) extractor with Hugging Face GPU infra
16
16
 
17
17
  ```
18
+ # run locally: python -m sparrow_parse.extractors.vllm_extractor
19
+
18
20
  from sparrow_parse.vllm.inference_factory import InferenceFactory
19
21
  from sparrow_parse.extractors.vllm_extractor import VLLMExtractor
20
22
 
@@ -35,16 +37,28 @@ model_inference_instance = factory.get_inference_instance()
35
37
 
36
38
  input_data = [
37
39
  {
38
- "image": "/data/bonds_table.png",
39
- "text_input": "retrieve all data. return response in JSON format"
40
+ "file_path": "/data/oracle_10k_2014_q1_small.pdf",
41
+ "text_input": "retrieve {"table": [{"description": "str", "latest_amount": 0, "previous_amount": 0}]}. return response in JSON format"
40
42
  }
41
43
  ]
42
44
 
43
45
  # Now you can run inference without knowing which implementation is used
44
- result = extractor.run_inference(model_inference_instance, input_data, generic_query=False, debug=True)
45
- print("Inference Result:", result)
46
+ results_array, num_pages = extractor.run_inference(model_inference_instance, input_data, generic_query=False,
47
+ debug_dir="/data/",
48
+ debug=True,
49
+ mode=None)
50
+
51
+ for i, result in enumerate(results_array):
52
+ print(f"Result for page {i + 1}:", result)
53
+ print(f"Number of pages: {num_pages}")
46
54
  ```
47
55
 
56
+ Use `mode="static"` if you want to simulate LLM call, without executing LLM backend.
57
+
58
+ Method `run_inference` will return results and number of pages processed.
59
+
60
+ Note: GPU backend `katanaml/sparrow-qwen2-vl-7b` is private, to be able to run below command, you need to create your own backend on Hugging Face space using [code](https://github.com/katanaml/sparrow/tree/main/sparrow-data/parse/sparrow_parse/vllm/infra/qwen2_vl_7b) from Sparrow Parse.
61
+
48
62
  ## PDF pre-processing
49
63
 
50
64
  ```
@@ -8,7 +8,7 @@ with open("requirements.txt", "r", encoding="utf-8") as fh:
8
8
 
9
9
  setup(
10
10
  name="sparrow-parse",
11
- version="0.3.5",
11
+ version="0.3.7",
12
12
  author="Andrej Baranovskij",
13
13
  author_email="andrejus.baranovskis@gmail.com",
14
14
  description="Sparrow Parse is a Python package (part of Sparrow) for parsing and extracting information from documents.",
@@ -0,0 +1 @@
1
+ __version__ = '0.3.7'
@@ -0,0 +1,82 @@
1
+ from sparrow_parse.vllm.inference_factory import InferenceFactory
2
+ from sparrow_parse.helpers.pdf_optimizer import PDFOptimizer
3
+ from rich import print
4
+ import os
5
+ import shutil
6
+
7
+
8
+ class VLLMExtractor(object):
9
+ def __init__(self):
10
+ pass
11
+
12
+ def run_inference(self,
13
+ model_inference_instance,
14
+ input_data,
15
+ generic_query=False,
16
+ debug_dir=None,
17
+ debug=False,
18
+ mode=None):
19
+ if generic_query:
20
+ input_data[0]["text_input"] = "retrieve document data. return response in JSON format"
21
+
22
+ if debug:
23
+ print("Input Data:", input_data)
24
+
25
+ results_array = []
26
+
27
+ if self.is_pdf(input_data[0]["file_path"]):
28
+ pdf_optimizer = PDFOptimizer()
29
+ num_pages, output_files, temp_dir = pdf_optimizer.split_pdf_to_pages(input_data[0]["file_path"],
30
+ debug_dir,
31
+ True)
32
+
33
+ input_data[0]["file_path"] = output_files
34
+
35
+ # Run inference on the page
36
+ results_array = model_inference_instance.inference(input_data, mode)
37
+
38
+ shutil.rmtree(temp_dir, ignore_errors=True)
39
+ return results_array, num_pages
40
+
41
+ input_data[0]["file_path"] = [input_data[0]["file_path"]]
42
+ results_array = model_inference_instance.inference(input_data)
43
+
44
+ return results_array, 1
45
+
46
+ def is_pdf(self, file_path):
47
+ return file_path.lower().endswith('.pdf')
48
+
49
+ if __name__ == "__main__":
50
+ # run locally: python -m sparrow_parse.extractors.vllm_extractor
51
+
52
+ extractor = VLLMExtractor()
53
+
54
+ # # export HF_TOKEN="hf_"
55
+ # config = {
56
+ # "method": "huggingface", # Could be 'huggingface' or 'local_gpu'
57
+ # "hf_space": "katanaml/sparrow-qwen2-vl-7b",
58
+ # "hf_token": os.getenv('HF_TOKEN'),
59
+ # # Additional fields for local GPU inference
60
+ # # "device": "cuda", "model_path": "model.pth"
61
+ # }
62
+ #
63
+ # # Use the factory to get the correct instance
64
+ # factory = InferenceFactory(config)
65
+ # model_inference_instance = factory.get_inference_instance()
66
+ #
67
+ # input_data = [
68
+ # {
69
+ # "file_path": "/Users/andrejb/infra/shared/katana-git/sparrow/sparrow-ml/llm/data/oracle_10k_2014_q1_small.pdf",
70
+ # "text_input": "retrieve {\"table\": [{\"description\": \"str\", \"latest_amount\": 0, \"previous_amount\": 0}]}. return response in JSON format"
71
+ # }
72
+ # ]
73
+ #
74
+ # # Now you can run inference without knowing which implementation is used
75
+ # results_array, num_pages = extractor.run_inference(model_inference_instance, input_data, generic_query=False,
76
+ # debug_dir="/Users/andrejb/infra/shared/katana-git/sparrow/sparrow-ml/llm/data/",
77
+ # debug=True,
78
+ # mode=None)
79
+ #
80
+ # for i, result in enumerate(results_array):
81
+ # print(f"Result for page {i + 1}:", result)
82
+ # print(f"Number of pages: {num_pages}")
@@ -40,17 +40,18 @@ class PDFOptimizer(object):
40
40
  return number_of_pages, output_files, temp_dir
41
41
  else:
42
42
  # Convert the PDF to images
43
- images = convert_from_path(file_path, dpi=400)
43
+ images = convert_from_path(file_path, dpi=300)
44
+ base_name = os.path.splitext(os.path.basename(file_path))[0]
44
45
 
45
46
  # Save the images to the temporary directory
46
47
  for i, image in enumerate(images):
47
- output_filename = os.path.join(temp_dir, f'page_{i + 1}.jpg')
48
+ output_filename = os.path.join(temp_dir, f'{base_name}_page_{i + 1}.jpg')
48
49
  image.save(output_filename, 'JPEG')
49
50
  output_files.append(output_filename)
50
51
 
51
52
  if output_dir:
52
53
  # Save each image to the debug folder
53
- debug_output_filename = os.path.join(output_dir, f'page_{i + 1}.jpg')
54
+ debug_output_filename = os.path.join(output_dir, f'{base_name}_page_{i + 1}.jpg')
54
55
  image.save(debug_output_filename, 'JPEG')
55
56
 
56
57
  # Return the number of pages, the list of file paths, and the temporary directory
@@ -60,13 +61,17 @@ class PDFOptimizer(object):
60
61
  if __name__ == "__main__":
61
62
  pdf_optimizer = PDFOptimizer()
62
63
 
63
- # output_directory = "/Users/andrejb/Documents/work/bankstatement/output_pages"
64
+ # output_directory = "/Users/andrejb/infra/shared/katana-git/sparrow/sparrow-ml/llm/data/"
64
65
  # # Ensure the output directory exists
65
66
  # os.makedirs(output_directory, exist_ok=True)
66
67
  #
67
68
  # # Split the optimized PDF into separate pages
68
- # num_pages, output_files, temp_dir = pdf_optimizer.split_pdf_to_pages("/Users/andrejb/Documents/work/bankstatement/statement.pdf",
69
+ # num_pages, output_files, temp_dir = pdf_optimizer.split_pdf_to_pages("/Users/andrejb/infra/shared/katana-git/sparrow/sparrow-ml/llm/data/oracle_10k_2014_q1_small.pdf",
69
70
  # output_directory,
70
- # False)
71
+ # True)
72
+ #
73
+ # print(f"Number of pages: {num_pages}")
74
+ # print(f"Output files: {output_files}")
75
+ # print(f"Temporary directory: {temp_dir}")
71
76
  #
72
77
  # shutil.rmtree(temp_dir, ignore_errors=True)
@@ -0,0 +1,60 @@
1
+ from gradio_client import Client, handle_file
2
+ from sparrow_parse.vllm.inference_base import ModelInference
3
+ import json
4
+ import os
5
+ import ast
6
+
7
+
8
+ class HuggingFaceInference(ModelInference):
9
+ def __init__(self, hf_space, hf_token):
10
+ self.hf_space = hf_space
11
+ self.hf_token = hf_token
12
+
13
+
14
+ def process_response(self, output_text):
15
+ json_string = output_text
16
+
17
+ json_string = json_string.strip("[]'")
18
+ json_string = json_string.replace("```json\n", "").replace("\n```", "")
19
+ json_string = json_string.replace("'", "")
20
+
21
+ try:
22
+ formatted_json = json.loads(json_string)
23
+ return json.dumps(formatted_json, indent=2)
24
+ except json.JSONDecodeError as e:
25
+ print("Failed to parse JSON:", e)
26
+ return output_text
27
+
28
+
29
+ def inference(self, input_data, mode=None):
30
+ if mode == "static":
31
+ simple_json = self.get_simple_json()
32
+ return [simple_json]
33
+
34
+ client = Client(self.hf_space, hf_token=self.hf_token)
35
+
36
+ # Extract and prepare the absolute paths for all file paths in input_data
37
+ file_paths = [
38
+ os.path.abspath(file_path)
39
+ for data in input_data
40
+ for file_path in data["file_path"]
41
+ ]
42
+
43
+ # Validate file existence and prepare files for the Gradio client
44
+ image_files = [handle_file(path) for path in file_paths if os.path.exists(path)]
45
+
46
+ results = client.predict(
47
+ input_imgs=image_files,
48
+ text_input=input_data[0]["text_input"], # Single shared text input for all images
49
+ api_name="/run_inference" # Specify the Gradio API endpoint
50
+ )
51
+
52
+ # Convert the string into a Python list
53
+ parsed_results = ast.literal_eval(results)
54
+
55
+ results_array = []
56
+ for page_output in parsed_results:
57
+ page_result = self.process_response(page_output)
58
+ results_array.append(page_result)
59
+
60
+ return results_array
@@ -0,0 +1,30 @@
1
+ from abc import ABC, abstractmethod
2
+ import json
3
+
4
+
5
+ class ModelInference(ABC):
6
+ @abstractmethod
7
+ def inference(self, input_data, mode=None):
8
+ """This method should be implemented by subclasses."""
9
+ pass
10
+
11
+ def get_simple_json(self):
12
+ # Define a simple data structure
13
+ data = {
14
+ "table": [
15
+ {
16
+ "description": "Revenues",
17
+ "latest_amount": 12453,
18
+ "previous_amount": 11445
19
+ },
20
+ {
21
+ "description": "Operating expenses",
22
+ "latest_amount": 9157,
23
+ "previous_amount": 8822
24
+ }
25
+ ]
26
+ }
27
+
28
+ # Convert the dictionary to a JSON string
29
+ json_data = json.dumps(data, indent=4)
30
+ return json_data
@@ -8,7 +8,7 @@ class LocalGPUInference(ModelInference):
8
8
  self.device = device
9
9
  self.model.to(self.device)
10
10
 
11
- def inference(self, input_data):
11
+ def inference(self, input_data, mode=None):
12
12
  self.model.eval() # Set the model to evaluation mode
13
13
  with torch.no_grad(): # No need to calculate gradients
14
14
  input_tensor = torch.tensor(input_data).to(self.device)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sparrow-parse
3
- Version: 0.3.5
3
+ Version: 0.3.7
4
4
  Summary: Sparrow Parse is a Python package (part of Sparrow) for parsing and extracting information from documents.
5
5
  Home-page: https://github.com/katanaml/sparrow/tree/main/sparrow-data/parse
6
6
  Author: Andrej Baranovskij
@@ -34,6 +34,8 @@ pip install sparrow-parse
34
34
  ### Sparrow Parse VL (vision-language model) extractor with Hugging Face GPU infra
35
35
 
36
36
  ```
37
+ # run locally: python -m sparrow_parse.extractors.vllm_extractor
38
+
37
39
  from sparrow_parse.vllm.inference_factory import InferenceFactory
38
40
  from sparrow_parse.extractors.vllm_extractor import VLLMExtractor
39
41
 
@@ -54,16 +56,28 @@ model_inference_instance = factory.get_inference_instance()
54
56
 
55
57
  input_data = [
56
58
  {
57
- "image": "/data/bonds_table.png",
58
- "text_input": "retrieve all data. return response in JSON format"
59
+ "file_path": "/data/oracle_10k_2014_q1_small.pdf",
60
+ "text_input": "retrieve {"table": [{"description": "str", "latest_amount": 0, "previous_amount": 0}]}. return response in JSON format"
59
61
  }
60
62
  ]
61
63
 
62
64
  # Now you can run inference without knowing which implementation is used
63
- result = extractor.run_inference(model_inference_instance, input_data, generic_query=False, debug=True)
64
- print("Inference Result:", result)
65
+ results_array, num_pages = extractor.run_inference(model_inference_instance, input_data, generic_query=False,
66
+ debug_dir="/data/",
67
+ debug=True,
68
+ mode=None)
69
+
70
+ for i, result in enumerate(results_array):
71
+ print(f"Result for page {i + 1}:", result)
72
+ print(f"Number of pages: {num_pages}")
65
73
  ```
66
74
 
75
+ Use `mode="static"` if you want to simulate LLM call, without executing LLM backend.
76
+
77
+ Method `run_inference` will return results and number of pages processed.
78
+
79
+ Note: GPU backend `katanaml/sparrow-qwen2-vl-7b` is private, to be able to run below command, you need to create your own backend on Hugging Face space using [code](https://github.com/katanaml/sparrow/tree/main/sparrow-data/parse/sparrow_parse/vllm/infra/qwen2_vl_7b) from Sparrow Parse.
80
+
67
81
  ## PDF pre-processing
68
82
 
69
83
  ```
@@ -1 +0,0 @@
1
- __version__ = '0.3.5'
@@ -1,46 +0,0 @@
1
- from sparrow_parse.vllm.inference_factory import InferenceFactory
2
- from rich import print
3
- import os
4
-
5
-
6
- class VLLMExtractor(object):
7
- def __init__(self):
8
- pass
9
-
10
- def run_inference(self, model_inference_instance, input_data, generic_query=False, debug=False):
11
- if generic_query:
12
- input_data[0]["text_input"] = "retrieve document data. return response in JSON format"
13
-
14
- if debug:
15
- print("Input Data:", input_data)
16
-
17
- result = model_inference_instance.inference(input_data)
18
-
19
- return result
20
-
21
- if __name__ == "__main__":
22
- extractor = VLLMExtractor()
23
-
24
- # export HF_TOKEN="hf_"
25
- config = {
26
- "method": "huggingface", # Could be 'huggingface' or 'local_gpu'
27
- "hf_space": "katanaml/sparrow-qwen2-vl-7b",
28
- "hf_token": os.getenv('HF_TOKEN'),
29
- # Additional fields for local GPU inference
30
- # "device": "cuda", "model_path": "model.pth"
31
- }
32
-
33
- # Use the factory to get the correct instance
34
- factory = InferenceFactory(config)
35
- model_inference_instance = factory.get_inference_instance()
36
-
37
- input_data = [
38
- {
39
- "image": "/Users/andrejb/Documents/work/epik/bankstatement/bonds_table.png",
40
- "text_input": "retrieve financial instruments data. return response in JSON format"
41
- }
42
- ]
43
-
44
- # Now you can run inference without knowing which implementation is used
45
- result = extractor.run_inference(model_inference_instance, input_data, generic_query=False, debug=True)
46
- print("Inference Result:", result)
@@ -1,36 +0,0 @@
1
- from gradio_client import Client, handle_file
2
- from sparrow_parse.vllm.inference_base import ModelInference
3
- import json
4
-
5
-
6
- class HuggingFaceInference(ModelInference):
7
- def __init__(self, hf_space, hf_token):
8
- self.hf_space = hf_space
9
- self.hf_token = hf_token
10
-
11
-
12
- def process_response(self, output_text):
13
- json_string = output_text
14
-
15
- json_string = json_string.strip("[]'")
16
- json_string = json_string.replace("```json\n", "").replace("\n```", "")
17
- json_string = json_string.replace("'", "")
18
-
19
- try:
20
- formatted_json = json.loads(json_string)
21
- return json.dumps(formatted_json, indent=2)
22
- except json.JSONDecodeError as e:
23
- print("Failed to parse JSON:", e)
24
- return output_text
25
-
26
-
27
- def inference(self, input_data):
28
- client = Client(self.hf_space, hf_token=self.hf_token)
29
-
30
- result = client.predict(
31
- image=handle_file(input_data[0]["image"]),
32
- text_input=input_data[0]["text_input"],
33
- api_name="/run_inference"
34
- )
35
-
36
- return self.process_response(result)
@@ -1,7 +0,0 @@
1
- from abc import ABC, abstractmethod
2
-
3
- class ModelInference(ABC):
4
- @abstractmethod
5
- def inference(self, input_data):
6
- """This method should be implemented by subclasses."""
7
- pass
File without changes