parallex 0.3.4__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -10,14 +10,19 @@ from parallex.utils.logger import logger
10
10
 
11
11
  # Exceptions for missing keys, etc
12
12
  class OpenAIClient:
13
- def __init__(self, model: str, remote_file_handler: RemoteFileHandler):
14
- self.model = model
13
+ def __init__(
14
+ self,
15
+ remote_file_handler: RemoteFileHandler,
16
+ azure_endpoint_env_name: str,
17
+ azure_api_key_env_name: str,
18
+ azure_api_version_env_name: str,
19
+ ):
15
20
  self.file_handler = remote_file_handler
16
21
 
17
22
  self._client = AsyncAzureOpenAI(
18
- azure_endpoint=os.getenv("AZURE_API_BASE"),
19
- api_key=os.getenv("AZURE_API_KEY"),
20
- api_version=os.getenv("AZURE_API_VERSION"),
23
+ azure_endpoint=os.getenv(azure_endpoint_env_name),
24
+ api_key=os.getenv(azure_api_key_env_name),
25
+ api_version=os.getenv(azure_api_version_env_name),
21
26
  )
22
27
 
23
28
  async def upload(self, file_path: str) -> FileObject:
@@ -1,5 +1,7 @@
1
1
  import json
2
- from typing import TypeVar, Callable
2
+ from typing import TypeVar, Callable, Optional
3
+
4
+ from pydantic import BaseModel
3
5
 
4
6
  from parallex.ai.open_ai_client import OpenAIClient
5
7
  from parallex.models.page_response import PageResponse
@@ -8,11 +10,12 @@ from parallex.utils.constants import CUSTOM_ID_DELINEATOR
8
10
 
9
11
 
10
12
  async def process_images_output(
11
- client: OpenAIClient, output_file_id: str
13
+ client: OpenAIClient, output_file_id: str, model: Optional[type[BaseModel]] = None
12
14
  ) -> list[PageResponse]:
13
15
  return await _process_output(
14
16
  client,
15
17
  output_file_id,
18
+ model,
16
19
  lambda content, identifier: PageResponse(
17
20
  output_content=content, page_number=int(identifier)
18
21
  ),
@@ -20,12 +23,13 @@ async def process_images_output(
20
23
 
21
24
 
22
25
  async def process_prompts_output(
23
- client: OpenAIClient, output_file_id: str
26
+ client: OpenAIClient, output_file_id: str, model: Optional[type[BaseModel]] = None
24
27
  ) -> list[PromptResponse]:
25
28
  """Gets content from completed Batch to create PromptResponse with LLM answers to given prompts"""
26
29
  return await _process_output(
27
30
  client,
28
31
  output_file_id,
32
+ model,
29
33
  lambda content, identifier: PromptResponse(
30
34
  output_content=content, prompt_index=int(identifier)
31
35
  ),
@@ -38,6 +42,7 @@ ResponseType = TypeVar("ResponseType")
38
42
  async def _process_output(
39
43
  client: OpenAIClient,
40
44
  output_file_id: str,
45
+ model: Optional[type[BaseModel]],
41
46
  response_builder: Callable[[str, str], ResponseType],
42
47
  ) -> list[ResponseType]:
43
48
  file_response = await client.retrieve_file(output_file_id)
@@ -48,9 +53,10 @@ async def _process_output(
48
53
  json_response = json.loads(raw_response)
49
54
  custom_id = json_response["custom_id"]
50
55
  identifier = custom_id.split(CUSTOM_ID_DELINEATOR)[1].split(".")[0]
51
- output_content = json_response["response"]["body"]["choices"][0]["message"][
52
- "content"
53
- ]
56
+ output_content = json_response["response"]["body"]["choices"][0]["message"]["content"]
57
+ if model:
58
+ json_data = json.loads(output_content)
59
+ output_content = model(**json_data)
54
60
  response = response_builder(output_content, identifier)
55
61
  responses.append(response)
56
62
 
parallex/ai/uploader.py CHANGED
@@ -1,8 +1,12 @@
1
1
  import base64
2
2
  import json
3
3
  import os
4
+ from typing import Optional
4
5
  from uuid import UUID
5
6
 
7
+ from openai.lib._pydantic import to_strict_json_schema
8
+ from pydantic import BaseModel
9
+
6
10
  from parallex.ai.open_ai_client import OpenAIClient
7
11
  from parallex.file_management.utils import file_in_temp_dir
8
12
  from parallex.models.batch_file import BatchFile
@@ -17,6 +21,8 @@ async def upload_images_for_processing(
17
21
  image_files: list[ImageFile],
18
22
  temp_directory: str,
19
23
  prompt_text: str,
24
+ azure_api_deployment_env_name: str,
25
+ model: Optional[type[BaseModel]] = None,
20
26
  ) -> list[BatchFile]:
21
27
  """Base64 encodes image, converts to expected jsonl format and uploads"""
22
28
  trace_id = image_files[0].trace_id
@@ -43,7 +49,13 @@ async def upload_images_for_processing(
43
49
  prompt_custom_id = (
44
50
  f"{image_file.trace_id}{CUSTOM_ID_DELINEATOR}{image_file.page_number}.jsonl"
45
51
  )
46
- jsonl = _image_jsonl_format(prompt_custom_id, base64_encoded_image, prompt_text)
52
+ jsonl = _image_jsonl_format(
53
+ prompt_custom_id,
54
+ base64_encoded_image,
55
+ prompt_text,
56
+ azure_api_deployment_env_name,
57
+ model
58
+ )
47
59
  with open(upload_file_location, "a") as jsonl_file:
48
60
  jsonl_file.write(json.dumps(jsonl) + "\n")
49
61
  batch_file = await _create_batch_file(client, trace_id, upload_file_location)
@@ -52,7 +64,11 @@ async def upload_images_for_processing(
52
64
 
53
65
 
54
66
  async def upload_prompts_for_processing(
55
- client: OpenAIClient, prompts: list[str], temp_directory: str, trace_id: UUID
67
+ client: OpenAIClient,
68
+ prompts: list[str], temp_directory: str,
69
+ trace_id: UUID,
70
+ azure_api_deployment_env_name: str,
71
+ model: Optional[type[BaseModel]] = None,
56
72
  ) -> list[BatchFile]:
57
73
  """Creates jsonl file and uploads for processing"""
58
74
  current_index = 0
@@ -73,7 +89,12 @@ async def upload_prompts_for_processing(
73
89
  )
74
90
 
75
91
  prompt_custom_id = f"{trace_id}{CUSTOM_ID_DELINEATOR}{index}.jsonl"
76
- jsonl = _simple_jsonl_format(prompt_custom_id, prompt)
92
+ jsonl = _simple_jsonl_format(
93
+ prompt_custom_id,
94
+ prompt,
95
+ azure_api_deployment_env_name,
96
+ model
97
+ )
77
98
  with open(upload_file_location, "a") as jsonl_file:
78
99
  jsonl_file.write(json.dumps(jsonl) + "\n")
79
100
  batch_file = await _create_batch_file(client, trace_id, upload_file_location)
@@ -119,26 +140,52 @@ async def _create_batch_file(
119
140
  )
120
141
 
121
142
 
122
- def _simple_jsonl_format(prompt_custom_id: str, prompt_text: str) -> dict:
143
+ def _response_format(model: type[BaseModel]) -> dict:
144
+ schema = to_strict_json_schema(model)
123
145
  return {
146
+ "type": "json_schema",
147
+ "json_schema": {
148
+ "name": model.__name__,
149
+ "strict": True,
150
+ "schema": schema
151
+ }
152
+ }
153
+
154
+
155
+ def _simple_jsonl_format(
156
+ prompt_custom_id: str,
157
+ prompt_text: str,
158
+ azure_api_deployment_env_name: str,
159
+ model: Optional[type[BaseModel]]
160
+ ) -> dict:
161
+ payload = {
124
162
  "custom_id": prompt_custom_id,
125
163
  "method": "POST",
126
164
  "url": "/chat/completions",
127
165
  "body": {
128
- "model": os.getenv("AZURE_API_DEPLOYMENT"),
166
+ "model": os.getenv(azure_api_deployment_env_name),
129
167
  "messages": [{"role": "user", "content": prompt_text}],
130
168
  "temperature": 0.0, # TODO make configurable
131
169
  },
132
170
  }
171
+ if model is not None:
172
+ payload["body"]["response_format"] = _response_format(model)
173
+ return payload
133
174
 
134
175
 
135
- def _image_jsonl_format(prompt_custom_id: str, encoded_image: str, prompt_text: str):
136
- return {
176
+ def _image_jsonl_format(
177
+ prompt_custom_id: str,
178
+ encoded_image: str,
179
+ prompt_text: str,
180
+ azure_api_deployment_env_name: str,
181
+ model: Optional[type[BaseModel]] = None
182
+ ) -> dict:
183
+ payload = {
137
184
  "custom_id": prompt_custom_id,
138
185
  "method": "POST",
139
186
  "url": "/chat/completions",
140
187
  "body": {
141
- "model": os.getenv("AZURE_API_DEPLOYMENT"),
188
+ "model": os.getenv(azure_api_deployment_env_name),
142
189
  "messages": [
143
190
  {
144
191
  "role": "user",
@@ -154,5 +201,9 @@ def _image_jsonl_format(prompt_custom_id: str, encoded_image: str, prompt_text:
154
201
  }
155
202
  ],
156
203
  "max_tokens": 2000,
204
+ "response_format": {"type": "json_object"}
157
205
  },
158
206
  }
207
+ if model is not None:
208
+ payload["body"]["response_format"] = _response_format(model)
209
+ return payload
@@ -2,5 +2,5 @@ from pydantic import BaseModel, Field
2
2
 
3
3
 
4
4
  class PageResponse(BaseModel):
5
- output_content: str = Field(description="Markdown generated for the page")
5
+ output_content: str | BaseModel = Field(description="Markdown generated for the page")
6
6
  page_number: int = Field(description="Page number of the associated PDF")
@@ -2,5 +2,5 @@ from pydantic import BaseModel, Field
2
2
 
3
3
 
4
4
  class PromptResponse(BaseModel):
5
- output_content: str = Field(description="Response from the model")
5
+ output_content: str | BaseModel = Field(description="Response from the model")
6
6
  prompt_index: int = Field(description="Index corresponding to the given prompts")
parallex/parallex.py CHANGED
@@ -4,6 +4,8 @@ import uuid
4
4
  from typing import Callable, Optional
5
5
  from uuid import UUID
6
6
 
7
+ from pydantic import BaseModel
8
+
7
9
  from parallex.ai.batch_processor import wait_for_batch_completion, create_batch
8
10
  from parallex.ai.open_ai_client import OpenAIClient
9
11
  from parallex.ai.output_processor import process_images_output, process_prompts_output
@@ -32,10 +34,20 @@ async def parallex(
32
34
  concurrency: Optional[int] = 20,
33
35
  prompt_text: Optional[str] = DEFAULT_PROMPT,
34
36
  log_level: Optional[str] = "ERROR",
37
+ response_model: Optional[type[BaseModel]] = None,
38
+ azure_endpoint_env_name: Optional[str] = "AZURE_API_BASE",
39
+ azure_api_key_env_name: Optional[str] = "AZURE_API_KEY",
40
+ azure_api_version_env_name: Optional[str] = "AZURE_API_VERSION",
41
+ azure_api_deployment_env_name: Optional[str] = "AZURE_API_DEPLOYMENT",
35
42
  ) -> ParallexCallableOutput:
36
43
  setup_logger(log_level)
37
44
  remote_file_handler = RemoteFileHandler()
38
- open_ai_client = OpenAIClient(model=model, remote_file_handler=remote_file_handler)
45
+ open_ai_client = OpenAIClient(
46
+ remote_file_handler=remote_file_handler,
47
+ azure_endpoint_env_name=azure_endpoint_env_name,
48
+ azure_api_key_env_name=azure_api_key_env_name,
49
+ azure_api_version_env_name=azure_api_version_env_name,
50
+ )
39
51
  try:
40
52
  return await _execute(
41
53
  open_ai_client=open_ai_client,
@@ -43,6 +55,8 @@ async def parallex(
43
55
  post_process_callable=post_process_callable,
44
56
  concurrency=concurrency,
45
57
  prompt_text=prompt_text,
58
+ azure_api_deployment_env_name=azure_api_deployment_env_name,
59
+ model=response_model
46
60
  )
47
61
  except Exception as e:
48
62
  logger.error(f"Error occurred: {e}")
@@ -52,21 +66,32 @@ async def parallex(
52
66
 
53
67
 
54
68
  async def parallex_simple_prompts(
55
- model: str,
56
69
  prompts: list[str],
57
70
  post_process_callable: Optional[Callable[..., None]] = None,
58
71
  log_level: Optional[str] = "ERROR",
59
72
  concurrency: Optional[int] = 20,
73
+ response_model: Optional[type[BaseModel]] = None,
74
+ azure_endpoint_env_name: Optional[str] = "AZURE_API_BASE",
75
+ azure_api_key_env_name: Optional[str] = "AZURE_API_KEY",
76
+ azure_api_version_env_name: Optional[str] = "AZURE_API_VERSION",
77
+ azure_api_deployment_env_name: Optional[str] = "AZURE_API_DEPLOYMENT",
60
78
  ) -> ParallexPromptsCallableOutput:
61
79
  setup_logger(log_level)
62
80
  remote_file_handler = RemoteFileHandler()
63
- open_ai_client = OpenAIClient(model=model, remote_file_handler=remote_file_handler)
81
+ open_ai_client = OpenAIClient(
82
+ remote_file_handler=remote_file_handler,
83
+ azure_endpoint_env_name=azure_endpoint_env_name,
84
+ azure_api_key_env_name=azure_api_key_env_name,
85
+ azure_api_version_env_name=azure_api_version_env_name,
86
+ )
64
87
  try:
65
88
  return await _prompts_execute(
66
89
  open_ai_client=open_ai_client,
67
90
  prompts=prompts,
68
91
  post_process_callable=post_process_callable,
69
92
  concurrency=concurrency,
93
+ model=response_model,
94
+ azure_api_deployment_env_name=azure_api_deployment_env_name
70
95
  )
71
96
  except Exception as e:
72
97
  logger.error(f"Error occurred: {e}")
@@ -78,8 +103,10 @@ async def parallex_simple_prompts(
78
103
  async def _prompts_execute(
79
104
  open_ai_client: OpenAIClient,
80
105
  prompts: list[str],
106
+ azure_api_deployment_env_name: str,
81
107
  post_process_callable: Optional[Callable[..., None]] = None,
82
108
  concurrency: Optional[int] = 20,
109
+ model: Optional[type[BaseModel]] = None,
83
110
  ):
84
111
  with tempfile.TemporaryDirectory() as temp_directory:
85
112
  trace_id = uuid.uuid4()
@@ -88,6 +115,8 @@ async def _prompts_execute(
88
115
  prompts=prompts,
89
116
  temp_directory=temp_directory,
90
117
  trace_id=trace_id,
118
+ azure_api_deployment_env_name=azure_api_deployment_env_name,
119
+ model=model,
91
120
  )
92
121
  start_batch_semaphore = asyncio.Semaphore(concurrency)
93
122
  start_batch_tasks = []
@@ -110,7 +139,7 @@ async def _prompts_execute(
110
139
  f"waiting for batch to complete - {batch.id} - {batch.trace_id}"
111
140
  )
112
141
  prompt_task = asyncio.create_task(
113
- _wait_and_create_prompt_responses(batch=batch, client=open_ai_client, semaphore=process_semaphore)
142
+ _wait_and_create_prompt_responses(batch=batch, client=open_ai_client, semaphore=process_semaphore, model=model)
114
143
  )
115
144
  prompt_tasks.append(prompt_task)
116
145
  prompt_response_groups = await asyncio.gather(*prompt_tasks)
@@ -131,9 +160,11 @@ async def _prompts_execute(
131
160
  async def _execute(
132
161
  open_ai_client: OpenAIClient,
133
162
  pdf_source_url: str,
163
+ azure_api_deployment_env_name: str,
134
164
  post_process_callable: Optional[Callable[..., None]] = None,
135
165
  concurrency: Optional[int] = 20,
136
166
  prompt_text: Optional[str] = DEFAULT_PROMPT,
167
+ model: Optional[type[BaseModel]] = None,
137
168
  ) -> ParallexCallableOutput:
138
169
  with tempfile.TemporaryDirectory() as temp_directory:
139
170
  raw_file = await add_file_to_temp_directory(
@@ -149,6 +180,8 @@ async def _execute(
149
180
  image_files=image_files,
150
181
  temp_directory=temp_directory,
151
182
  prompt_text=prompt_text,
183
+ model=model,
184
+ azure_api_deployment_env_name=azure_api_deployment_env_name
152
185
  )
153
186
  start_batch_semaphore = asyncio.Semaphore(concurrency)
154
187
  start_batch_tasks = []
@@ -169,7 +202,7 @@ async def _execute(
169
202
  for batch in batch_jobs:
170
203
  page_task = asyncio.create_task(
171
204
  _wait_and_create_pages(
172
- batch=batch, client=open_ai_client, semaphore=process_semaphore
205
+ batch=batch, client=open_ai_client, semaphore=process_semaphore, model=model
173
206
  )
174
207
  )
175
208
  pages_tasks.append(page_task)
@@ -192,27 +225,27 @@ async def _execute(
192
225
 
193
226
 
194
227
  async def _wait_and_create_pages(
195
- batch: UploadBatch, client: OpenAIClient, semaphore: asyncio.Semaphore
228
+ batch: UploadBatch, client: OpenAIClient, semaphore: asyncio.Semaphore, model: Optional[type[BaseModel]] = None
196
229
  ):
197
230
  async with semaphore:
198
231
  logger.info(f"waiting for batch to complete - {batch.id} - {batch.trace_id}")
199
232
  output_file_id = await wait_for_batch_completion(client=client, batch=batch)
200
233
  logger.info(f"batch completed - {batch.id} - {batch.trace_id}")
201
234
  page_responses = await process_images_output(
202
- client=client, output_file_id=output_file_id
235
+ client=client, output_file_id=output_file_id, model=model,
203
236
  )
204
237
  return page_responses
205
238
 
206
239
 
207
240
  async def _wait_and_create_prompt_responses(
208
- batch: UploadBatch, client: OpenAIClient, semaphore: asyncio.Semaphore
241
+ batch: UploadBatch, client: OpenAIClient, semaphore: asyncio.Semaphore, model: Optional[type[BaseModel]] = None
209
242
  ):
210
243
  async with semaphore:
211
244
  logger.info(f"waiting for batch to complete - {batch.id} - {batch.trace_id}")
212
245
  output_file_id = await wait_for_batch_completion(client=client, batch=batch)
213
246
  logger.info(f"batch completed - {batch.id} - {batch.trace_id}")
214
247
  prompt_responses = await process_prompts_output(
215
- client=client, output_file_id=output_file_id
248
+ client=client, output_file_id=output_file_id, model=model,
216
249
  )
217
250
  return prompt_responses
218
251
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: parallex
3
- Version: 0.3.4
3
+ Version: 0.5.0
4
4
  Summary: PDF to markdown using Azure OpenAI batch processing
5
5
  Home-page: https://github.com/Summed-AI/parallex
6
6
  Author: Jeff Hostetler
@@ -1,24 +1,24 @@
1
1
  parallex/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
2
  parallex/ai/batch_processor.py,sha256=O5q_jaIU0VI93p7Riq4aZ_qUiN9Omxp5GOfn0IqEYgo,1361
3
- parallex/ai/open_ai_client.py,sha256=TRH78oYod_EWpp3hjEh097OT7hwsQmtv44_j3X9Frxo,2047
4
- parallex/ai/output_processor.py,sha256=Rwp8dkLo4xsqooeBh3Xv-uGVbJMG1JQkwyxdUoOs2tQ,1800
5
- parallex/ai/uploader.py,sha256=Il4dllaPn6NGoU1YWi56ZJkzaOQzKg9lUngfc3ANOKg,5500
3
+ parallex/ai/open_ai_client.py,sha256=Nkl8F4EaHQbkNtgOXjLAtynWr10w9Q1Ym3TDisjtIiw,2168
4
+ parallex/ai/output_processor.py,sha256=kd50DwB2txhzz4_MPYl97bPOtLMl0KV2UP_eFmUtq34,2087
5
+ parallex/ai/uploader.py,sha256=t_R-3FMX3OVo90EQRCGL0VqBn3vKKn5iUe3qoWVIbMM,6772
6
6
  parallex/file_management/converter.py,sha256=Rj-93LXNl2gCY-XUOCZv7DdCNI2-GyRpS5FobnTqwzo,1111
7
7
  parallex/file_management/file_finder.py,sha256=BPvrkxZlwOYmRXzzS138wGTsVzuhDIKfQZn0CISUj3o,1598
8
8
  parallex/file_management/remote_file_handler.py,sha256=jsI9NhOrKQR8K3yo536lGplVBGis9XY0G4dRpumgWFM,213
9
9
  parallex/file_management/utils.py,sha256=WMdXd9UOFbJDHnL2IWfDXyyD2jhwnGtpCVI_npiSlIk,98
10
10
  parallex/models/batch_file.py,sha256=JwARFB48sMOTN-wf7J5YbsWIac2rxXnZ4fBABFESA0M,405
11
11
  parallex/models/image_file.py,sha256=LjQne2b6rIDWpQpdYT41KXNDWpg5kv9bkM1SCx6jnAI,402
12
- parallex/models/page_response.py,sha256=KADCAV3XnkqWm-q_FBCfbt5nqDbiHg9MroZvFXaBbt0,228
12
+ parallex/models/page_response.py,sha256=uqVdHXoEWX3NVvr0Y2_izSA1cpw3EXFZRe1HmI4ypLk,240
13
13
  parallex/models/parallex_callable_output.py,sha256=CkJKA8mwsc5olNnG1K6nrWUu4xTkJvp8bp3SSPQEX5c,465
14
14
  parallex/models/parallex_prompts_callable_output.py,sha256=IlNX9627_E8aXWQ-vDBuv2-9jMFXqn4LFBbShPzxoc4,421
15
- parallex/models/prompt_response.py,sha256=LcctuyqwiTHWrZHSahwauMaSBsin5Ws6fQRAzGXTsAA,230
15
+ parallex/models/prompt_response.py,sha256=2Zmnwlj8Ou2VgEHmi1VZrlnv5XRzw5VLMEkpQ1VelQQ,242
16
16
  parallex/models/raw_file.py,sha256=Nlv6u_jlDCXDgU2_Ff7DRbDCx27pB1NZugNhEoaBMQU,483
17
17
  parallex/models/upload_batch.py,sha256=jrnds9ryXg9drL4TF8TGimMVTCDfKaWsBzFv_ed0i88,2068
18
- parallex/parallex.py,sha256=0nOfEXeiuTKi0gQSnqdNyPxIYvuE7Wfp4HtmSbVsEs4,8864
18
+ parallex/parallex.py,sha256=uP36YPJkWhaSgfrXcOLprea2W-9ZwQ-MXmU7liE-aKk,10591
19
19
  parallex/utils/constants.py,sha256=508ieZLZ5kse0T4_QyNJp57Aq0DMNFjjyFlsKa0xtek,366
20
20
  parallex/utils/logger.py,sha256=i3ZZ7YTUmhUStbvVME67F9ffnkLOv5ijm7wVUyJT8Ys,440
21
- parallex-0.3.4.dist-info/LICENSE,sha256=wPwCqGrisXnEcpaUxSO79C2mdOUTbtjhLjyy8mVW6p8,1046
22
- parallex-0.3.4.dist-info/METADATA,sha256=gIXiPBgPJVnqZbfa8xsxMN0cTDJjalZmplnOUHfI9-0,4461
23
- parallex-0.3.4.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
24
- parallex-0.3.4.dist-info/RECORD,,
21
+ parallex-0.5.0.dist-info/LICENSE,sha256=wPwCqGrisXnEcpaUxSO79C2mdOUTbtjhLjyy8mVW6p8,1046
22
+ parallex-0.5.0.dist-info/METADATA,sha256=0Mm0BYWvEGpYr5SNBkQw2qLjI2TGKWRVUDVw_e8XyMo,4461
23
+ parallex-0.5.0.dist-info/WHEEL,sha256=Nq82e9rUAnEjt98J6MlVmMCZb-t9cYE2Ir1kpBmnWfs,88
24
+ parallex-0.5.0.dist-info/RECORD,,