sparrow-parse 0.3.5__tar.gz → 0.3.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (26) hide show
  1. {sparrow-parse-0.3.5 → sparrow-parse-0.3.6}/PKG-INFO +15 -5
  2. {sparrow-parse-0.3.5 → sparrow-parse-0.3.6}/README.md +14 -4
  3. {sparrow-parse-0.3.5 → sparrow-parse-0.3.6}/setup.py +1 -1
  4. sparrow-parse-0.3.6/sparrow_parse/__init__.py +1 -0
  5. sparrow-parse-0.3.6/sparrow_parse/extractors/vllm_extractor.py +87 -0
  6. {sparrow-parse-0.3.5 → sparrow-parse-0.3.6}/sparrow_parse/helpers/pdf_optimizer.py +11 -6
  7. {sparrow-parse-0.3.5 → sparrow-parse-0.3.6}/sparrow_parse/vllm/huggingface_inference.py +6 -2
  8. sparrow-parse-0.3.6/sparrow_parse/vllm/inference_base.py +30 -0
  9. {sparrow-parse-0.3.5 → sparrow-parse-0.3.6}/sparrow_parse/vllm/local_gpu_inference.py +1 -1
  10. {sparrow-parse-0.3.5 → sparrow-parse-0.3.6}/sparrow_parse.egg-info/PKG-INFO +15 -5
  11. sparrow-parse-0.3.5/sparrow_parse/__init__.py +0 -1
  12. sparrow-parse-0.3.5/sparrow_parse/extractors/vllm_extractor.py +0 -46
  13. sparrow-parse-0.3.5/sparrow_parse/vllm/inference_base.py +0 -7
  14. {sparrow-parse-0.3.5 → sparrow-parse-0.3.6}/setup.cfg +0 -0
  15. {sparrow-parse-0.3.5 → sparrow-parse-0.3.6}/sparrow_parse/__main__.py +0 -0
  16. {sparrow-parse-0.3.5 → sparrow-parse-0.3.6}/sparrow_parse/extractors/__init__.py +0 -0
  17. {sparrow-parse-0.3.5 → sparrow-parse-0.3.6}/sparrow_parse/helpers/__init__.py +0 -0
  18. {sparrow-parse-0.3.5 → sparrow-parse-0.3.6}/sparrow_parse/processors/__init__.py +0 -0
  19. {sparrow-parse-0.3.5 → sparrow-parse-0.3.6}/sparrow_parse/processors/table_structure_processor.py +0 -0
  20. {sparrow-parse-0.3.5 → sparrow-parse-0.3.6}/sparrow_parse/vllm/__init__.py +0 -0
  21. {sparrow-parse-0.3.5 → sparrow-parse-0.3.6}/sparrow_parse/vllm/inference_factory.py +0 -0
  22. {sparrow-parse-0.3.5 → sparrow-parse-0.3.6}/sparrow_parse.egg-info/SOURCES.txt +0 -0
  23. {sparrow-parse-0.3.5 → sparrow-parse-0.3.6}/sparrow_parse.egg-info/dependency_links.txt +0 -0
  24. {sparrow-parse-0.3.5 → sparrow-parse-0.3.6}/sparrow_parse.egg-info/entry_points.txt +0 -0
  25. {sparrow-parse-0.3.5 → sparrow-parse-0.3.6}/sparrow_parse.egg-info/requires.txt +0 -0
  26. {sparrow-parse-0.3.5 → sparrow-parse-0.3.6}/sparrow_parse.egg-info/top_level.txt +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sparrow-parse
3
- Version: 0.3.5
3
+ Version: 0.3.6
4
4
  Summary: Sparrow Parse is a Python package (part of Sparrow) for parsing and extracting information from documents.
5
5
  Home-page: https://github.com/katanaml/sparrow/tree/main/sparrow-data/parse
6
6
  Author: Andrej Baranovskij
@@ -34,6 +34,8 @@ pip install sparrow-parse
34
34
  ### Sparrow Parse VL (vision-language model) extractor with Hugging Face GPU infra
35
35
 
36
36
  ```
37
+ # run locally: python -m sparrow_parse.extractors.vllm_extractor
38
+
37
39
  from sparrow_parse.vllm.inference_factory import InferenceFactory
38
40
  from sparrow_parse.extractors.vllm_extractor import VLLMExtractor
39
41
 
@@ -54,16 +56,24 @@ model_inference_instance = factory.get_inference_instance()
54
56
 
55
57
  input_data = [
56
58
  {
57
- "image": "/data/bonds_table.png",
58
- "text_input": "retrieve all data. return response in JSON format"
59
+ "file_path": "/data/oracle_10k_2014_q1_small.pdf",
60
+ "text_input": "retrieve {"table": [{"description": "str", "latest_amount": 0, "previous_amount": 0}]}. return response in JSON format"
59
61
  }
60
62
  ]
61
63
 
62
64
  # Now you can run inference without knowing which implementation is used
63
- result = extractor.run_inference(model_inference_instance, input_data, generic_query=False, debug=True)
64
- print("Inference Result:", result)
65
+ results_array, num_pages = extractor.run_inference(model_inference_instance, input_data, generic_query=False,
66
+ debug_dir="/data/",
67
+ debug=True,
68
+ mode="static")
69
+
70
+ for i, result in enumerate(results_array):
71
+ print(f"Result for page {i + 1}:", result)
72
+ print(f"Number of pages: {num_pages}")
65
73
  ```
66
74
 
75
+ Use `mode="static"` if you want to simulate LLM call, without executing LLM backend.
76
+
67
77
  ## PDF pre-processing
68
78
 
69
79
  ```
@@ -15,6 +15,8 @@ pip install sparrow-parse
15
15
  ### Sparrow Parse VL (vision-language model) extractor with Hugging Face GPU infra
16
16
 
17
17
  ```
18
+ # run locally: python -m sparrow_parse.extractors.vllm_extractor
19
+
18
20
  from sparrow_parse.vllm.inference_factory import InferenceFactory
19
21
  from sparrow_parse.extractors.vllm_extractor import VLLMExtractor
20
22
 
@@ -35,16 +37,24 @@ model_inference_instance = factory.get_inference_instance()
35
37
 
36
38
  input_data = [
37
39
  {
38
- "image": "/data/bonds_table.png",
39
- "text_input": "retrieve all data. return response in JSON format"
40
+ "file_path": "/data/oracle_10k_2014_q1_small.pdf",
41
+ "text_input": "retrieve {"table": [{"description": "str", "latest_amount": 0, "previous_amount": 0}]}. return response in JSON format"
40
42
  }
41
43
  ]
42
44
 
43
45
  # Now you can run inference without knowing which implementation is used
44
- result = extractor.run_inference(model_inference_instance, input_data, generic_query=False, debug=True)
45
- print("Inference Result:", result)
46
+ results_array, num_pages = extractor.run_inference(model_inference_instance, input_data, generic_query=False,
47
+ debug_dir="/data/",
48
+ debug=True,
49
+ mode="static")
50
+
51
+ for i, result in enumerate(results_array):
52
+ print(f"Result for page {i + 1}:", result)
53
+ print(f"Number of pages: {num_pages}")
46
54
  ```
47
55
 
56
+ Use `mode="static"` if you want to simulate LLM call, without executing LLM backend.
57
+
48
58
  ## PDF pre-processing
49
59
 
50
60
  ```
@@ -8,7 +8,7 @@ with open("requirements.txt", "r", encoding="utf-8") as fh:
8
8
 
9
9
  setup(
10
10
  name="sparrow-parse",
11
- version="0.3.5",
11
+ version="0.3.6",
12
12
  author="Andrej Baranovskij",
13
13
  author_email="andrejus.baranovskis@gmail.com",
14
14
  description="Sparrow Parse is a Python package (part of Sparrow) for parsing and extracting information from documents.",
@@ -0,0 +1 @@
1
+ __version__ = '0.3.6'
@@ -0,0 +1,87 @@
1
+ from sparrow_parse.vllm.inference_factory import InferenceFactory
2
+ from sparrow_parse.helpers.pdf_optimizer import PDFOptimizer
3
+ from rich import print
4
+ import os
5
+ import shutil
6
+
7
+
8
+ class VLLMExtractor(object):
9
+ def __init__(self):
10
+ pass
11
+
12
+ def run_inference(self,
13
+ model_inference_instance,
14
+ input_data,
15
+ generic_query=False,
16
+ debug_dir=None,
17
+ debug=False,
18
+ mode=None):
19
+ if generic_query:
20
+ input_data[0]["text_input"] = "retrieve document data. return response in JSON format"
21
+
22
+ if debug:
23
+ print("Input Data:", input_data)
24
+
25
+ results_array = []
26
+
27
+ if self.is_pdf(input_data[0]["file_path"]):
28
+ pdf_optimizer = PDFOptimizer()
29
+ num_pages, output_files, temp_dir = pdf_optimizer.split_pdf_to_pages(input_data[0]["file_path"],
30
+ debug_dir,
31
+ True)
32
+
33
+ # Run inference on each page
34
+ for page_num, output_file in enumerate(output_files):
35
+ input_data[0]["file_path"] = output_file
36
+ if debug:
37
+ print(f"Running inference on page {page_num + 1}...")
38
+
39
+ # Run inference on the page
40
+ result = model_inference_instance.inference(input_data, mode)
41
+ results_array.append(result)
42
+
43
+ shutil.rmtree(temp_dir, ignore_errors=True)
44
+ return results_array, num_pages
45
+
46
+ result = model_inference_instance.inference(input_data)
47
+ results_array.append(result)
48
+
49
+ return results_array, 1
50
+
51
+ def is_pdf(self, file_path):
52
+ return file_path.lower().endswith('.pdf')
53
+
54
+ if __name__ == "__main__":
55
+ # run locally: python -m sparrow_parse.extractors.vllm_extractor
56
+
57
+ extractor = VLLMExtractor()
58
+
59
+ # # export HF_TOKEN="hf_"
60
+ # config = {
61
+ # "method": "huggingface", # Could be 'huggingface' or 'local_gpu'
62
+ # "hf_space": "katanaml/sparrow-qwen2-vl-7b",
63
+ # "hf_token": os.getenv('HF_TOKEN'),
64
+ # # Additional fields for local GPU inference
65
+ # # "device": "cuda", "model_path": "model.pth"
66
+ # }
67
+ #
68
+ # # Use the factory to get the correct instance
69
+ # factory = InferenceFactory(config)
70
+ # model_inference_instance = factory.get_inference_instance()
71
+ #
72
+ # input_data = [
73
+ # {
74
+ # "file_path": "/Users/andrejb/infra/shared/katana-git/sparrow/sparrow-ml/llm/data/oracle_10k_2014_q1_small.pdf",
75
+ # "text_input": "retrieve {\"table\": [{\"description\": \"str\", \"latest_amount\": 0, \"previous_amount\": 0}]}. return response in JSON format"
76
+ # }
77
+ # ]
78
+ #
79
+ # # Now you can run inference without knowing which implementation is used
80
+ # results_array, num_pages = extractor.run_inference(model_inference_instance, input_data, generic_query=False,
81
+ # debug_dir="/Users/andrejb/infra/shared/katana-git/sparrow/sparrow-ml/llm/data/",
82
+ # debug=True,
83
+ # mode="static")
84
+ #
85
+ # for i, result in enumerate(results_array):
86
+ # print(f"Result for page {i + 1}:", result)
87
+ # print(f"Number of pages: {num_pages}")
@@ -40,17 +40,18 @@ class PDFOptimizer(object):
40
40
  return number_of_pages, output_files, temp_dir
41
41
  else:
42
42
  # Convert the PDF to images
43
- images = convert_from_path(file_path, dpi=400)
43
+ images = convert_from_path(file_path, dpi=300)
44
+ base_name = os.path.splitext(os.path.basename(file_path))[0]
44
45
 
45
46
  # Save the images to the temporary directory
46
47
  for i, image in enumerate(images):
47
- output_filename = os.path.join(temp_dir, f'page_{i + 1}.jpg')
48
+ output_filename = os.path.join(temp_dir, f'{base_name}_page_{i + 1}.jpg')
48
49
  image.save(output_filename, 'JPEG')
49
50
  output_files.append(output_filename)
50
51
 
51
52
  if output_dir:
52
53
  # Save each image to the debug folder
53
- debug_output_filename = os.path.join(output_dir, f'page_{i + 1}.jpg')
54
+ debug_output_filename = os.path.join(output_dir, f'{base_name}_page_{i + 1}.jpg')
54
55
  image.save(debug_output_filename, 'JPEG')
55
56
 
56
57
  # Return the number of pages, the list of file paths, and the temporary directory
@@ -60,13 +61,17 @@ class PDFOptimizer(object):
60
61
  if __name__ == "__main__":
61
62
  pdf_optimizer = PDFOptimizer()
62
63
 
63
- # output_directory = "/Users/andrejb/Documents/work/bankstatement/output_pages"
64
+ # output_directory = "/Users/andrejb/infra/shared/katana-git/sparrow/sparrow-ml/llm/data/"
64
65
  # # Ensure the output directory exists
65
66
  # os.makedirs(output_directory, exist_ok=True)
66
67
  #
67
68
  # # Split the optimized PDF into separate pages
68
- # num_pages, output_files, temp_dir = pdf_optimizer.split_pdf_to_pages("/Users/andrejb/Documents/work/bankstatement/statement.pdf",
69
+ # num_pages, output_files, temp_dir = pdf_optimizer.split_pdf_to_pages("/Users/andrejb/infra/shared/katana-git/sparrow/sparrow-ml/llm/data/oracle_10k_2014_q1_small.pdf",
69
70
  # output_directory,
70
- # False)
71
+ # True)
72
+ #
73
+ # print(f"Number of pages: {num_pages}")
74
+ # print(f"Output files: {output_files}")
75
+ # print(f"Temporary directory: {temp_dir}")
71
76
  #
72
77
  # shutil.rmtree(temp_dir, ignore_errors=True)
@@ -24,11 +24,15 @@ class HuggingFaceInference(ModelInference):
24
24
  return output_text
25
25
 
26
26
 
27
- def inference(self, input_data):
27
+ def inference(self, input_data, mode=None):
28
+ if mode == "static":
29
+ simple_json = self.get_simple_json()
30
+ return simple_json
31
+
28
32
  client = Client(self.hf_space, hf_token=self.hf_token)
29
33
 
30
34
  result = client.predict(
31
- image=handle_file(input_data[0]["image"]),
35
+ image=handle_file(input_data[0]["file_path"]),
32
36
  text_input=input_data[0]["text_input"],
33
37
  api_name="/run_inference"
34
38
  )
@@ -0,0 +1,30 @@
1
+ from abc import ABC, abstractmethod
2
+ import json
3
+
4
+
5
+ class ModelInference(ABC):
6
+ @abstractmethod
7
+ def inference(self, input_data, mode=None):
8
+ """This method should be implemented by subclasses."""
9
+ pass
10
+
11
+ def get_simple_json(self):
12
+ # Define a simple data structure
13
+ data = {
14
+ "table": [
15
+ {
16
+ "description": "Revenues",
17
+ "latest_amount": 12453,
18
+ "previous_amount": 11445
19
+ },
20
+ {
21
+ "description": "Operating expenses",
22
+ "latest_amount": 9157,
23
+ "previous_amount": 8822
24
+ }
25
+ ]
26
+ }
27
+
28
+ # Convert the dictionary to a JSON string
29
+ json_data = json.dumps(data, indent=4)
30
+ return json_data
@@ -8,7 +8,7 @@ class LocalGPUInference(ModelInference):
8
8
  self.device = device
9
9
  self.model.to(self.device)
10
10
 
11
- def inference(self, input_data):
11
+ def inference(self, input_data, mode=None):
12
12
  self.model.eval() # Set the model to evaluation mode
13
13
  with torch.no_grad(): # No need to calculate gradients
14
14
  input_tensor = torch.tensor(input_data).to(self.device)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sparrow-parse
3
- Version: 0.3.5
3
+ Version: 0.3.6
4
4
  Summary: Sparrow Parse is a Python package (part of Sparrow) for parsing and extracting information from documents.
5
5
  Home-page: https://github.com/katanaml/sparrow/tree/main/sparrow-data/parse
6
6
  Author: Andrej Baranovskij
@@ -34,6 +34,8 @@ pip install sparrow-parse
34
34
  ### Sparrow Parse VL (vision-language model) extractor with Hugging Face GPU infra
35
35
 
36
36
  ```
37
+ # run locally: python -m sparrow_parse.extractors.vllm_extractor
38
+
37
39
  from sparrow_parse.vllm.inference_factory import InferenceFactory
38
40
  from sparrow_parse.extractors.vllm_extractor import VLLMExtractor
39
41
 
@@ -54,16 +56,24 @@ model_inference_instance = factory.get_inference_instance()
54
56
 
55
57
  input_data = [
56
58
  {
57
- "image": "/data/bonds_table.png",
58
- "text_input": "retrieve all data. return response in JSON format"
59
+ "file_path": "/data/oracle_10k_2014_q1_small.pdf",
60
+ "text_input": "retrieve {"table": [{"description": "str", "latest_amount": 0, "previous_amount": 0}]}. return response in JSON format"
59
61
  }
60
62
  ]
61
63
 
62
64
  # Now you can run inference without knowing which implementation is used
63
- result = extractor.run_inference(model_inference_instance, input_data, generic_query=False, debug=True)
64
- print("Inference Result:", result)
65
+ results_array, num_pages = extractor.run_inference(model_inference_instance, input_data, generic_query=False,
66
+ debug_dir="/data/",
67
+ debug=True,
68
+ mode="static")
69
+
70
+ for i, result in enumerate(results_array):
71
+ print(f"Result for page {i + 1}:", result)
72
+ print(f"Number of pages: {num_pages}")
65
73
  ```
66
74
 
75
+ Use `mode="static"` if you want to simulate LLM call, without executing LLM backend.
76
+
67
77
  ## PDF pre-processing
68
78
 
69
79
  ```
@@ -1 +0,0 @@
1
- __version__ = '0.3.5'
@@ -1,46 +0,0 @@
1
- from sparrow_parse.vllm.inference_factory import InferenceFactory
2
- from rich import print
3
- import os
4
-
5
-
6
- class VLLMExtractor(object):
7
- def __init__(self):
8
- pass
9
-
10
- def run_inference(self, model_inference_instance, input_data, generic_query=False, debug=False):
11
- if generic_query:
12
- input_data[0]["text_input"] = "retrieve document data. return response in JSON format"
13
-
14
- if debug:
15
- print("Input Data:", input_data)
16
-
17
- result = model_inference_instance.inference(input_data)
18
-
19
- return result
20
-
21
- if __name__ == "__main__":
22
- extractor = VLLMExtractor()
23
-
24
- # export HF_TOKEN="hf_"
25
- config = {
26
- "method": "huggingface", # Could be 'huggingface' or 'local_gpu'
27
- "hf_space": "katanaml/sparrow-qwen2-vl-7b",
28
- "hf_token": os.getenv('HF_TOKEN'),
29
- # Additional fields for local GPU inference
30
- # "device": "cuda", "model_path": "model.pth"
31
- }
32
-
33
- # Use the factory to get the correct instance
34
- factory = InferenceFactory(config)
35
- model_inference_instance = factory.get_inference_instance()
36
-
37
- input_data = [
38
- {
39
- "image": "/Users/andrejb/Documents/work/epik/bankstatement/bonds_table.png",
40
- "text_input": "retrieve financial instruments data. return response in JSON format"
41
- }
42
- ]
43
-
44
- # Now you can run inference without knowing which implementation is used
45
- result = extractor.run_inference(model_inference_instance, input_data, generic_query=False, debug=True)
46
- print("Inference Result:", result)
@@ -1,7 +0,0 @@
1
- from abc import ABC, abstractmethod
2
-
3
- class ModelInference(ABC):
4
- @abstractmethod
5
- def inference(self, input_data):
6
- """This method should be implemented by subclasses."""
7
- pass
File without changes