sparrow-parse 0.4.3__py3-none-any.whl → 0.4.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
sparrow_parse/__init__.py CHANGED
@@ -1 +1 @@
1
- __version__ = '0.4.3'
1
+ __version__ = '0.4.4'
@@ -7,7 +7,6 @@ from rich import print
7
7
  import os
8
8
  import tempfile
9
9
  import shutil
10
- from typing import Any, Dict, List, Union
11
10
 
12
11
 
13
12
  class VLLMExtractor(object):
@@ -156,7 +155,7 @@ if __name__ == "__main__":
156
155
  # # export HF_TOKEN="hf_"
157
156
  # config = {
158
157
  # "method": "mlx", # Could be 'huggingface', 'mlx' or 'local_gpu'
159
- # "model_name": "mlx-community/Qwen2-VL-72B-Instruct-4bit",
158
+ # "model_name": "mlx-community/Qwen2-VL-7B-Instruct-8bit",
160
159
  # # "hf_space": "katanaml/sparrow-qwen2-vl-7b",
161
160
  # # "hf_token": os.getenv('HF_TOKEN'),
162
161
  # # Additional fields for local GPU inference
@@ -19,18 +19,43 @@ class MLXInference(ModelInference):
19
19
  :param model_name: Name of the model to load.
20
20
  """
21
21
  self.model_name = model_name
22
+ self.model = None
23
+ self.processor = None
22
24
  print(f"MLXInference initialized with model: {model_name}")
23
25
 
24
26
 
25
- @staticmethod
26
- def _load_model_and_processor(model_name):
27
+ def __del__(self):
27
28
  """
28
- Load the model and processor for inference.
29
+ Destructor to clean up resources when the object is deleted.
30
+ """
31
+ self.unload_model()
32
+
33
+
34
+ def unload_model(self):
35
+ """
36
+ Unload the model and release resources.
37
+ """
38
+ if self.model:
39
+ del self.model
40
+ self.model = None
41
+ if self.processor:
42
+ del self.processor
43
+ self.processor = None
44
+ print(f"Model {self.model_name} and its resources have been unloaded.")
29
45
 
46
+
47
+ def _load_model_and_processor(self, model_name):
48
+ """
49
+ Load the model and processor for inference.
30
50
  :param model_name: Name of the model to load.
31
51
  :return: Tuple containing the loaded model and processor.
32
52
  """
33
- return load(model_name)
53
+ print(f"Loading model and processor for: {model_name}...")
54
+ model, processor = load(model_name)
55
+ self.model = model # Store model instance
56
+ self.processor = processor # Store processor instance
57
+ print(f"Model and processor for '{model_name}' loaded successfully.")
58
+ return model, processor
34
59
 
35
60
 
36
61
  def process_response(self, output_text):
@@ -79,48 +104,54 @@ class MLXInference(ModelInference):
79
104
  def inference(self, input_data, mode=None):
80
105
  """
81
106
  Perform inference on input data using the specified model.
82
-
83
107
  :param input_data: A list of dictionaries containing image file paths and text inputs.
84
108
  :param mode: Optional mode for inference ("static" for simple JSON output).
85
109
  :return: List of processed model responses.
86
110
  """
87
- if mode == "static":
88
- return [self.get_simple_json()]
89
-
90
- # Load the model and processor
91
- model, processor = self._load_model_and_processor(self.model_name)
92
- config = model.config
93
-
94
- # Prepare absolute file paths
95
- file_paths = self._extract_file_paths(input_data)
96
-
97
- results = []
98
- for file_path in file_paths:
99
- image, width, height = self.load_image_data(file_path)
100
-
101
- # Prepare messages for the chat model
102
- messages = [
103
- {"role": "system", "content": "You are an expert at extracting structured text from image documents."},
104
- {"role": "user", "content": input_data[0]["text_input"]},
105
- ]
106
-
107
- # Generate and process response
108
- prompt = apply_chat_template(processor, config, messages) # Assuming defined
109
- response = generate(
110
- model,
111
- processor,
112
- image,
113
- prompt,
114
- resize_shape=(width, height),
115
- max_tokens=4000,
116
- temperature=0.0,
117
- verbose=False
118
- )
119
- results.append(self.process_response(response))
120
-
121
- print("Inference completed successfully for: ", file_path)
111
+ try:
112
+ if mode == "static":
113
+ return [self.get_simple_json()]
114
+
115
+ # Load the model and processor
116
+ model, processor = self._load_model_and_processor(self.model_name)
117
+ config = model.config
118
+
119
+ # Prepare absolute file paths
120
+ file_paths = self._extract_file_paths(input_data)
121
+
122
+ results = []
123
+ for file_path in file_paths:
124
+ image, width, height = self.load_image_data(file_path)
125
+
126
+ # Prepare messages for the chat model
127
+ messages = [
128
+ {"role": "system",
129
+ "content": "You are an expert at extracting structured text from image documents."},
130
+ {"role": "user", "content": input_data[0]["text_input"]},
131
+ ]
132
+
133
+ # Generate and process response
134
+ prompt = apply_chat_template(processor, config, messages)
135
+ response = generate(
136
+ model,
137
+ processor,
138
+ image,
139
+ prompt,
140
+ resize_shape=(width, height),
141
+ max_tokens=4000,
142
+ temperature=0.0,
143
+ verbose=False
144
+ )
145
+ results.append(self.process_response(response))
146
+
147
+ print("Inference completed successfully for: ", file_path)
148
+
149
+ return results
150
+
151
+ finally:
152
+ # Always unload the model after inference
153
+ self.unload_model()
122
154
 
123
- return results
124
155
 
125
156
  @staticmethod
126
157
  def _extract_file_paths(input_data):
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: sparrow-parse
3
- Version: 0.4.3
3
+ Version: 0.4.4
4
4
  Summary: Sparrow Parse is a Python package (part of Sparrow) for parsing and extracting information from documents.
5
5
  Home-page: https://github.com/katanaml/sparrow/tree/main/sparrow-data/parse
6
6
  Author: Andrej Baranovskij
@@ -1,7 +1,7 @@
1
- sparrow_parse/__init__.py,sha256=udnlByVnFcZDwWir50pEbTU0bIwgBrpNtAiVExFEzu0,21
1
+ sparrow_parse/__init__.py,sha256=jP9l7AhBCN2A-6tezbTIihxoMTDna4SLTYhvVxwbdNM,21
2
2
  sparrow_parse/__main__.py,sha256=Xs1bpJV0n08KWOoQE34FBYn6EBXZA9HIYJKrE4ZdG78,153
3
3
  sparrow_parse/extractors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
- sparrow_parse/extractors/vllm_extractor.py,sha256=ybWpRpDH0YHoYpHkjIJtm7DQoHJBKNsirK2YIAlMvGo,7863
4
+ sparrow_parse/extractors/vllm_extractor.py,sha256=PDLgLlKiq3Bv-UOQTzX3AgxNOLcEU2EniGAXLjMC30U,7820
5
5
  sparrow_parse/helpers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
6
  sparrow_parse/helpers/pdf_optimizer.py,sha256=GIqQYWtixFeZGCRFXL0lQfQByapCDuQzzRHAkzcPwLE,3302
7
7
  sparrow_parse/processors/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -11,9 +11,9 @@ sparrow_parse/vllm/huggingface_inference.py,sha256=EJnG6PesGKMc_0qGPN8ufE6pSnhAg
11
11
  sparrow_parse/vllm/inference_base.py,sha256=4mwGoAY63MB4cHZpV0czTkJWEzimmiTzqqzKmLNzgjw,820
12
12
  sparrow_parse/vllm/inference_factory.py,sha256=FTM65O-dW2WZchHOrNN7_Q3-FlVoAc65iSptuuUuClM,1166
13
13
  sparrow_parse/vllm/local_gpu_inference.py,sha256=aHoJTejb5xrXjWDIGu5RBQWEyRCOBCB04sMvO2Wyvg8,628
14
- sparrow_parse/vllm/mlx_inference.py,sha256=cx-PLXf1t8ro50YALddj70FiR7s0gk_Ddp-I9XlPQQU,4788
15
- sparrow_parse-0.4.3.dist-info/METADATA,sha256=W7zeOHa09rgn-58aIdTkNOSqBLgpziDF7sZ_059jaoo,6432
16
- sparrow_parse-0.4.3.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
17
- sparrow_parse-0.4.3.dist-info/entry_points.txt,sha256=8CrvTVTTcz1YuZ8aRCYNOH15ZOAaYLlcbYX3t28HwJY,54
18
- sparrow_parse-0.4.3.dist-info/top_level.txt,sha256=n6b-WtT91zKLyCPZTP7wvne8v_yvIahcsz-4sX8I0rY,14
19
- sparrow_parse-0.4.3.dist-info/RECORD,,
14
+ sparrow_parse/vllm/mlx_inference.py,sha256=c6-s493jLXE3DfYnwsybiqgk3GU9GEaWt3CrfqLSWKQ,5872
15
+ sparrow_parse-0.4.4.dist-info/METADATA,sha256=x_jaR76FUv5-kR9R5YR9So3OZbWj_rG8hjFZ07lZuto,6432
16
+ sparrow_parse-0.4.4.dist-info/WHEEL,sha256=tZoeGjtWxWRfdplE7E3d45VPlLNQnvbKiYnx7gwAy8A,92
17
+ sparrow_parse-0.4.4.dist-info/entry_points.txt,sha256=8CrvTVTTcz1YuZ8aRCYNOH15ZOAaYLlcbYX3t28HwJY,54
18
+ sparrow_parse-0.4.4.dist-info/top_level.txt,sha256=n6b-WtT91zKLyCPZTP7wvne8v_yvIahcsz-4sX8I0rY,14
19
+ sparrow_parse-0.4.4.dist-info/RECORD,,