retab 0.0.35__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- retab-0.0.35.dist-info/METADATA +417 -0
- retab-0.0.35.dist-info/RECORD +111 -0
- retab-0.0.35.dist-info/WHEEL +5 -0
- retab-0.0.35.dist-info/top_level.txt +1 -0
- uiform/__init__.py +4 -0
- uiform/_resource.py +28 -0
- uiform/_utils/__init__.py +0 -0
- uiform/_utils/ai_models.py +100 -0
- uiform/_utils/benchmarking copy.py +588 -0
- uiform/_utils/benchmarking.py +485 -0
- uiform/_utils/chat.py +332 -0
- uiform/_utils/display.py +443 -0
- uiform/_utils/json_schema.py +2161 -0
- uiform/_utils/mime.py +168 -0
- uiform/_utils/responses.py +163 -0
- uiform/_utils/stream_context_managers.py +52 -0
- uiform/_utils/usage/__init__.py +0 -0
- uiform/_utils/usage/usage.py +300 -0
- uiform/client.py +701 -0
- uiform/py.typed +0 -0
- uiform/resources/__init__.py +0 -0
- uiform/resources/consensus/__init__.py +3 -0
- uiform/resources/consensus/client.py +114 -0
- uiform/resources/consensus/completions.py +252 -0
- uiform/resources/consensus/completions_stream.py +278 -0
- uiform/resources/consensus/responses.py +325 -0
- uiform/resources/consensus/responses_stream.py +373 -0
- uiform/resources/deployments/__init__.py +9 -0
- uiform/resources/deployments/client.py +78 -0
- uiform/resources/deployments/endpoints.py +322 -0
- uiform/resources/deployments/links.py +452 -0
- uiform/resources/deployments/logs.py +211 -0
- uiform/resources/deployments/mailboxes.py +496 -0
- uiform/resources/deployments/outlook.py +531 -0
- uiform/resources/deployments/tests.py +158 -0
- uiform/resources/documents/__init__.py +3 -0
- uiform/resources/documents/client.py +255 -0
- uiform/resources/documents/extractions.py +441 -0
- uiform/resources/evals.py +812 -0
- uiform/resources/files.py +24 -0
- uiform/resources/finetuning.py +62 -0
- uiform/resources/jsonlUtils.py +1046 -0
- uiform/resources/models.py +45 -0
- uiform/resources/openai_example.py +22 -0
- uiform/resources/processors/__init__.py +3 -0
- uiform/resources/processors/automations/__init__.py +9 -0
- uiform/resources/processors/automations/client.py +78 -0
- uiform/resources/processors/automations/endpoints.py +317 -0
- uiform/resources/processors/automations/links.py +356 -0
- uiform/resources/processors/automations/logs.py +211 -0
- uiform/resources/processors/automations/mailboxes.py +435 -0
- uiform/resources/processors/automations/outlook.py +444 -0
- uiform/resources/processors/automations/tests.py +158 -0
- uiform/resources/processors/client.py +474 -0
- uiform/resources/prompt_optimization.py +76 -0
- uiform/resources/schemas.py +369 -0
- uiform/resources/secrets/__init__.py +9 -0
- uiform/resources/secrets/client.py +20 -0
- uiform/resources/secrets/external_api_keys.py +109 -0
- uiform/resources/secrets/webhook.py +62 -0
- uiform/resources/usage.py +271 -0
- uiform/types/__init__.py +0 -0
- uiform/types/ai_models.py +645 -0
- uiform/types/automations/__init__.py +0 -0
- uiform/types/automations/cron.py +58 -0
- uiform/types/automations/endpoints.py +21 -0
- uiform/types/automations/links.py +28 -0
- uiform/types/automations/mailboxes.py +60 -0
- uiform/types/automations/outlook.py +68 -0
- uiform/types/automations/webhooks.py +21 -0
- uiform/types/chat.py +8 -0
- uiform/types/completions.py +93 -0
- uiform/types/consensus.py +10 -0
- uiform/types/db/__init__.py +0 -0
- uiform/types/db/annotations.py +24 -0
- uiform/types/db/files.py +36 -0
- uiform/types/deployments/__init__.py +0 -0
- uiform/types/deployments/cron.py +59 -0
- uiform/types/deployments/endpoints.py +28 -0
- uiform/types/deployments/links.py +36 -0
- uiform/types/deployments/mailboxes.py +67 -0
- uiform/types/deployments/outlook.py +76 -0
- uiform/types/deployments/webhooks.py +21 -0
- uiform/types/documents/__init__.py +0 -0
- uiform/types/documents/correct_orientation.py +13 -0
- uiform/types/documents/create_messages.py +226 -0
- uiform/types/documents/extractions.py +297 -0
- uiform/types/evals.py +207 -0
- uiform/types/events.py +76 -0
- uiform/types/extractions.py +85 -0
- uiform/types/jobs/__init__.py +0 -0
- uiform/types/jobs/base.py +150 -0
- uiform/types/jobs/batch_annotation.py +22 -0
- uiform/types/jobs/evaluation.py +133 -0
- uiform/types/jobs/finetune.py +6 -0
- uiform/types/jobs/prompt_optimization.py +41 -0
- uiform/types/jobs/webcrawl.py +6 -0
- uiform/types/logs.py +231 -0
- uiform/types/mime.py +257 -0
- uiform/types/modalities.py +68 -0
- uiform/types/pagination.py +6 -0
- uiform/types/schemas/__init__.py +0 -0
- uiform/types/schemas/enhance.py +53 -0
- uiform/types/schemas/evaluate.py +55 -0
- uiform/types/schemas/generate.py +32 -0
- uiform/types/schemas/layout.py +58 -0
- uiform/types/schemas/object.py +631 -0
- uiform/types/schemas/templates.py +107 -0
- uiform/types/secrets/__init__.py +0 -0
- uiform/types/secrets/external_api_keys.py +22 -0
- uiform/types/standards.py +39 -0
@@ -0,0 +1,474 @@
|
|
1
|
+
import base64
|
2
|
+
import datetime
|
3
|
+
from io import IOBase
|
4
|
+
from pathlib import Path
|
5
|
+
from typing import Any, Dict, List, Literal, Optional
|
6
|
+
|
7
|
+
import httpx
|
8
|
+
import PIL.Image
|
9
|
+
from openai.types.chat.chat_completion_reasoning_effort import ChatCompletionReasoningEffort
|
10
|
+
from pydantic import HttpUrl
|
11
|
+
|
12
|
+
from ..._resource import AsyncAPIResource, SyncAPIResource
|
13
|
+
from ..._utils.ai_models import assert_valid_model_extraction
|
14
|
+
from ..._utils.mime import MIMEData, prepare_mime_document
|
15
|
+
from ...types.logs import ProcessorConfig, UpdateProcessorRequest
|
16
|
+
from ...types.pagination import ListMetadata
|
17
|
+
from ...types.documents.extractions import UiParsedChatCompletion, DocumentExtractRequest
|
18
|
+
from pydantic import BaseModel
|
19
|
+
|
20
|
+
# from ...types.documents.extractions import DocumentExtractResponse
|
21
|
+
from ...types.mime import BaseMIMEData
|
22
|
+
from ...types.modalities import Modality
|
23
|
+
from ...types.standards import PreparedRequest
|
24
|
+
from .automations import Automations, AsyncAutomations
|
25
|
+
|
26
|
+
|
27
|
+
class ListProcessors(BaseModel):
|
28
|
+
data: List[ProcessorConfig]
|
29
|
+
list_metadata: ListMetadata
|
30
|
+
|
31
|
+
|
32
|
+
class ProcessorsMixin:
|
33
|
+
def prepare_create(
|
34
|
+
self,
|
35
|
+
name: str,
|
36
|
+
json_schema: Dict[str, Any],
|
37
|
+
modality: Modality = "native",
|
38
|
+
model: str = "gpt-4o-mini",
|
39
|
+
temperature: float = 0,
|
40
|
+
reasoning_effort: ChatCompletionReasoningEffort = "medium",
|
41
|
+
image_resolution_dpi: Optional[int] = 96,
|
42
|
+
browser_canvas: Optional[Literal['A3', 'A4', 'A5']] = 'A4',
|
43
|
+
n_consensus: int = 1,
|
44
|
+
) -> PreparedRequest:
|
45
|
+
assert_valid_model_extraction(model)
|
46
|
+
|
47
|
+
data = {
|
48
|
+
"name": name,
|
49
|
+
"json_schema": json_schema,
|
50
|
+
"modality": modality,
|
51
|
+
"model": model,
|
52
|
+
"temperature": temperature,
|
53
|
+
"reasoning_effort": reasoning_effort,
|
54
|
+
"image_resolution_dpi": image_resolution_dpi,
|
55
|
+
"browser_canvas": browser_canvas,
|
56
|
+
"n_consensus": n_consensus,
|
57
|
+
}
|
58
|
+
|
59
|
+
request = ProcessorConfig.model_validate(data)
|
60
|
+
return PreparedRequest(method="POST", url="/v1/processors", data=request.model_dump(mode='json'))
|
61
|
+
|
62
|
+
def prepare_list(
|
63
|
+
self,
|
64
|
+
before: Optional[str] = None,
|
65
|
+
after: Optional[str] = None,
|
66
|
+
limit: Optional[int] = 10,
|
67
|
+
order: Optional[Literal["asc", "desc"]] = "desc",
|
68
|
+
# Filtering parameters
|
69
|
+
name: Optional[str] = None,
|
70
|
+
modality: Optional[str] = None,
|
71
|
+
model: Optional[str] = None,
|
72
|
+
schema_id: Optional[str] = None,
|
73
|
+
schema_data_id: Optional[str] = None,
|
74
|
+
) -> PreparedRequest:
|
75
|
+
params = {
|
76
|
+
"before": before,
|
77
|
+
"after": after,
|
78
|
+
"limit": limit,
|
79
|
+
"order": order,
|
80
|
+
"name": name,
|
81
|
+
"modality": modality,
|
82
|
+
"model": model,
|
83
|
+
"schema_id": schema_id,
|
84
|
+
"schema_data_id": schema_data_id,
|
85
|
+
}
|
86
|
+
# Remove None values
|
87
|
+
params = {k: v for k, v in params.items() if v is not None}
|
88
|
+
|
89
|
+
return PreparedRequest(method="GET", url="/v1/processors", params=params)
|
90
|
+
|
91
|
+
def prepare_get(self, processor_id: str) -> PreparedRequest:
|
92
|
+
"""Get a specific processor configuration.
|
93
|
+
|
94
|
+
Args:
|
95
|
+
processor_id: ID of the processor
|
96
|
+
|
97
|
+
Returns:
|
98
|
+
ProcessorConfig: The processor configuration
|
99
|
+
"""
|
100
|
+
return PreparedRequest(method="GET", url=f"/v1/processors/{processor_id}")
|
101
|
+
|
102
|
+
def prepare_update(
|
103
|
+
self,
|
104
|
+
processor_id: str,
|
105
|
+
name: Optional[str] = None,
|
106
|
+
modality: Optional[Modality] = None,
|
107
|
+
image_resolution_dpi: Optional[int] = None,
|
108
|
+
browser_canvas: Optional[Literal['A3', 'A4', 'A5']] = None,
|
109
|
+
model: Optional[str] = None,
|
110
|
+
json_schema: Optional[Dict[str, Any]] = None,
|
111
|
+
temperature: Optional[float] = None,
|
112
|
+
reasoning_effort: Optional[ChatCompletionReasoningEffort] = None,
|
113
|
+
n_consensus: Optional[int] = None,
|
114
|
+
) -> PreparedRequest:
|
115
|
+
data: dict[str, Any] = {}
|
116
|
+
|
117
|
+
if name is not None:
|
118
|
+
data["name"] = name
|
119
|
+
if modality is not None:
|
120
|
+
data["modality"] = modality
|
121
|
+
if image_resolution_dpi is not None:
|
122
|
+
data["image_resolution_dpi"] = image_resolution_dpi
|
123
|
+
if browser_canvas is not None:
|
124
|
+
data["browser_canvas"] = browser_canvas
|
125
|
+
if model is not None:
|
126
|
+
assert_valid_model_extraction(model)
|
127
|
+
data["model"] = model
|
128
|
+
if json_schema is not None:
|
129
|
+
data["json_schema"] = json_schema
|
130
|
+
if temperature is not None:
|
131
|
+
data["temperature"] = temperature
|
132
|
+
if reasoning_effort is not None:
|
133
|
+
data["reasoning_effort"] = reasoning_effort
|
134
|
+
if n_consensus is not None:
|
135
|
+
data["n_consensus"] = n_consensus
|
136
|
+
request = UpdateProcessorRequest.model_validate(data)
|
137
|
+
return PreparedRequest(method="PUT", url=f"/v1/processors/{processor_id}", data=request.model_dump(mode='json'))
|
138
|
+
|
139
|
+
def prepare_delete(self, processor_id: str) -> PreparedRequest:
|
140
|
+
return PreparedRequest(method="DELETE", url=f"/v1/processors/{processor_id}")
|
141
|
+
|
142
|
+
def prepare_submit(
|
143
|
+
self,
|
144
|
+
processor_id: str,
|
145
|
+
document: Optional[Path | str | bytes | IOBase | MIMEData | PIL.Image.Image | HttpUrl] = None,
|
146
|
+
documents: Optional[List[Path | str | bytes | IOBase | MIMEData | PIL.Image.Image | HttpUrl]] = None,
|
147
|
+
temperature: Optional[float] = None,
|
148
|
+
seed: Optional[int] = None,
|
149
|
+
store: bool = True,
|
150
|
+
) -> PreparedRequest:
|
151
|
+
"""Prepare a request to submit documents to a processor.
|
152
|
+
|
153
|
+
Args:
|
154
|
+
processor_id: ID of the processor
|
155
|
+
document: Single document to process (mutually exclusive with documents)
|
156
|
+
documents: List of documents to process (mutually exclusive with document)
|
157
|
+
temperature: Optional temperature override
|
158
|
+
seed: Optional seed for reproducibility
|
159
|
+
store: Whether to store the results
|
160
|
+
|
161
|
+
Returns:
|
162
|
+
PreparedRequest: The prepared request
|
163
|
+
"""
|
164
|
+
# Validate that either document or documents is provided, but not both
|
165
|
+
if not document and not documents:
|
166
|
+
raise ValueError("Either 'document' or 'documents' must be provided")
|
167
|
+
|
168
|
+
if document and documents:
|
169
|
+
raise ValueError("Provide either 'document' (single) or 'documents' (multiple), not both")
|
170
|
+
|
171
|
+
# Prepare form data parameters
|
172
|
+
form_data = {
|
173
|
+
"temperature": temperature,
|
174
|
+
"seed": seed,
|
175
|
+
"store": store,
|
176
|
+
}
|
177
|
+
# Remove None values
|
178
|
+
form_data = {k: v for k, v in form_data.items() if v is not None}
|
179
|
+
|
180
|
+
# Prepare files for upload
|
181
|
+
files = {}
|
182
|
+
if document:
|
183
|
+
# Convert document to MIMEData if needed
|
184
|
+
mime_document = prepare_mime_document(document)
|
185
|
+
# Single document upload
|
186
|
+
files["document"] = (
|
187
|
+
mime_document.filename,
|
188
|
+
base64.b64decode(mime_document.content),
|
189
|
+
mime_document.mime_type
|
190
|
+
)
|
191
|
+
elif documents:
|
192
|
+
# Multiple documents upload - httpx supports multiple files with same field name using a list
|
193
|
+
files_list = []
|
194
|
+
for doc in documents:
|
195
|
+
# Convert each document to MIMEData if needed
|
196
|
+
mime_doc = prepare_mime_document(doc)
|
197
|
+
files_list.append((
|
198
|
+
"documents", # field name
|
199
|
+
(
|
200
|
+
mime_doc.filename,
|
201
|
+
base64.b64decode(mime_doc.content),
|
202
|
+
mime_doc.mime_type
|
203
|
+
)
|
204
|
+
))
|
205
|
+
files = files_list
|
206
|
+
|
207
|
+
url = f"/v1/processors/{processor_id}/submit"
|
208
|
+
# if stream:
|
209
|
+
# url = f"/v1/processors/{processor_id}/submit/stream"
|
210
|
+
|
211
|
+
return PreparedRequest(method="POST", url=url, form_data=form_data, files=files)
|
212
|
+
|
213
|
+
|
214
|
+
class Processors(SyncAPIResource, ProcessorsMixin):
|
215
|
+
"""Processors API wrapper for managing processor configurations"""
|
216
|
+
|
217
|
+
def __init__(self, client: Any) -> None:
|
218
|
+
super().__init__(client=client)
|
219
|
+
self.automations = Automations(client=client)
|
220
|
+
|
221
|
+
def create(
|
222
|
+
self,
|
223
|
+
name: str,
|
224
|
+
json_schema: Dict[str, Any],
|
225
|
+
modality: Modality = "native",
|
226
|
+
model: str = "gpt-4o-mini",
|
227
|
+
temperature: float = 0,
|
228
|
+
reasoning_effort: ChatCompletionReasoningEffort = "medium",
|
229
|
+
image_resolution_dpi: Optional[int] = 96,
|
230
|
+
browser_canvas: Optional[Literal['A3', 'A4', 'A5']] = 'A4',
|
231
|
+
n_consensus: int = 1,
|
232
|
+
) -> ProcessorConfig:
|
233
|
+
"""Create a new processor configuration.
|
234
|
+
|
235
|
+
Args:
|
236
|
+
name: Name of the processor
|
237
|
+
json_schema: JSON schema for the processor
|
238
|
+
image_resolution_dpi: Optional image resolution DPI
|
239
|
+
browser_canvas: Optional browser canvas size
|
240
|
+
modality: Processing modality (currently only "native" supported)
|
241
|
+
model: AI model to use for processing
|
242
|
+
temperature: Model temperature setting
|
243
|
+
reasoning_effort: The effort level for the model to reason about the input data.
|
244
|
+
n_consensus: Number of consensus required to validate the data
|
245
|
+
Returns:
|
246
|
+
ProcessorConfig: The created processor configuration
|
247
|
+
"""
|
248
|
+
request = self.prepare_create(name, json_schema, modality, model, temperature, reasoning_effort, image_resolution_dpi, browser_canvas, n_consensus)
|
249
|
+
response = self._client._prepared_request(request)
|
250
|
+
print(f"Processor ID: {response['id']}. Processor available at https://www.uiform.com/dashboard/processors/{response['id']}")
|
251
|
+
return ProcessorConfig.model_validate(response)
|
252
|
+
|
253
|
+
def list(
|
254
|
+
self,
|
255
|
+
before: Optional[str] = None,
|
256
|
+
after: Optional[str] = None,
|
257
|
+
limit: Optional[int] = 10,
|
258
|
+
order: Optional[Literal["asc", "desc"]] = "desc",
|
259
|
+
name: Optional[str] = None,
|
260
|
+
modality: Optional[str] = None,
|
261
|
+
model: Optional[str] = None,
|
262
|
+
schema_id: Optional[str] = None,
|
263
|
+
schema_data_id: Optional[str] = None,
|
264
|
+
) -> ListProcessors:
|
265
|
+
"""List processor configurations with pagination support.
|
266
|
+
|
267
|
+
Args:
|
268
|
+
before: Optional cursor for pagination before a specific processor ID
|
269
|
+
after: Optional cursor for pagination after a specific processor ID
|
270
|
+
limit: Optional limit on number of results (max 100)
|
271
|
+
order: Optional sort order ("asc" or "desc")
|
272
|
+
name: Optional filter by processor name
|
273
|
+
modality: Optional filter by modality
|
274
|
+
model: Optional filter by model
|
275
|
+
schema_id: Optional filter by schema ID
|
276
|
+
schema_data_id: Optional filter by schema data ID
|
277
|
+
|
278
|
+
Returns:
|
279
|
+
ListProcessors: Paginated list of processor configurations with metadata
|
280
|
+
"""
|
281
|
+
request = self.prepare_list(before, after, limit, order, name, modality, model, schema_id, schema_data_id)
|
282
|
+
response = self._client._prepared_request(request)
|
283
|
+
return ListProcessors.model_validate(response)
|
284
|
+
|
285
|
+
def get(self, processor_id: str) -> ProcessorConfig:
|
286
|
+
"""Get a specific processor configuration.
|
287
|
+
|
288
|
+
Args:
|
289
|
+
processor_id: ID of the processor
|
290
|
+
|
291
|
+
Returns:
|
292
|
+
ProcessorConfig: The processor configuration
|
293
|
+
"""
|
294
|
+
request = self.prepare_get(processor_id)
|
295
|
+
response = self._client._prepared_request(request)
|
296
|
+
return ProcessorConfig.model_validate(response)
|
297
|
+
|
298
|
+
def update(
|
299
|
+
self,
|
300
|
+
processor_id: str,
|
301
|
+
name: Optional[str] = None,
|
302
|
+
modality: Optional[Modality] = None,
|
303
|
+
image_resolution_dpi: Optional[int] = None,
|
304
|
+
browser_canvas: Optional[Literal['A3', 'A4', 'A5']] = None,
|
305
|
+
model: Optional[str] = None,
|
306
|
+
json_schema: Optional[Dict[str, Any]] = None,
|
307
|
+
temperature: Optional[float] = None,
|
308
|
+
reasoning_effort: Optional[ChatCompletionReasoningEffort] = None,
|
309
|
+
n_consensus: Optional[int] = None,
|
310
|
+
) -> ProcessorConfig:
|
311
|
+
"""Update a processor configuration.
|
312
|
+
|
313
|
+
Args:
|
314
|
+
processor_id: ID of the processor to update
|
315
|
+
name: New name for the processor
|
316
|
+
modality: New processing modality
|
317
|
+
image_resolution_dpi: New image resolution DPI
|
318
|
+
browser_canvas: New browser canvas size
|
319
|
+
model: New AI model
|
320
|
+
json_schema: New JSON schema for the processor
|
321
|
+
temperature: New temperature setting
|
322
|
+
reasoning_effort: The effort level for the model to reason about the input data.
|
323
|
+
n_consensus: New number of consensus required
|
324
|
+
Returns:
|
325
|
+
ProcessorConfig: The updated processor configuration
|
326
|
+
"""
|
327
|
+
request = self.prepare_update(processor_id, name, modality, image_resolution_dpi, browser_canvas, model, json_schema, temperature, reasoning_effort, n_consensus)
|
328
|
+
response = self._client._prepared_request(request)
|
329
|
+
return ProcessorConfig.model_validate(response)
|
330
|
+
|
331
|
+
def delete(self, processor_id: str) -> None:
|
332
|
+
"""Delete a processor configuration.
|
333
|
+
|
334
|
+
Args:
|
335
|
+
processor_id: ID of the processor to delete
|
336
|
+
"""
|
337
|
+
request = self.prepare_delete(processor_id)
|
338
|
+
self._client._prepared_request(request)
|
339
|
+
print(f"Processor Deleted. ID: {processor_id}")
|
340
|
+
|
341
|
+
def submit(
|
342
|
+
self,
|
343
|
+
processor_id: str,
|
344
|
+
document: Optional[Path | str | bytes | IOBase | MIMEData | PIL.Image.Image | HttpUrl] = None,
|
345
|
+
documents: Optional[List[Path | str | bytes | IOBase | MIMEData | PIL.Image.Image | HttpUrl]] = None,
|
346
|
+
temperature: Optional[float] = None,
|
347
|
+
seed: Optional[int] = None,
|
348
|
+
store: bool = True,
|
349
|
+
) -> UiParsedChatCompletion:
|
350
|
+
"""Submit documents to a processor for processing.
|
351
|
+
|
352
|
+
Args:
|
353
|
+
processor_id: ID of the processor
|
354
|
+
document: Single document to process (mutually exclusive with documents)
|
355
|
+
documents: List of documents to process (mutually exclusive with document)
|
356
|
+
temperature: Optional temperature override
|
357
|
+
seed: Optional seed for reproducibility
|
358
|
+
store: Whether to store the results
|
359
|
+
|
360
|
+
Returns:
|
361
|
+
UiParsedChatCompletion: The processing result
|
362
|
+
"""
|
363
|
+
request = self.prepare_submit(
|
364
|
+
processor_id=processor_id,
|
365
|
+
document=document,
|
366
|
+
documents=documents,
|
367
|
+
temperature=temperature,
|
368
|
+
seed=seed,
|
369
|
+
store=store
|
370
|
+
)
|
371
|
+
response = self._client._prepared_request(request)
|
372
|
+
return UiParsedChatCompletion.model_validate(response)
|
373
|
+
|
374
|
+
|
375
|
+
class AsyncProcessors(AsyncAPIResource, ProcessorsMixin):
|
376
|
+
"""Async Processors API wrapper for managing processor configurations"""
|
377
|
+
|
378
|
+
def __init__(self, client: Any) -> None:
|
379
|
+
super().__init__(client=client)
|
380
|
+
self.automations = AsyncAutomations(client=client)
|
381
|
+
|
382
|
+
async def create(
|
383
|
+
self,
|
384
|
+
name: str,
|
385
|
+
json_schema: Dict[str, Any],
|
386
|
+
modality: Modality = "native",
|
387
|
+
model: str = "gpt-4o-mini",
|
388
|
+
temperature: float = 0,
|
389
|
+
reasoning_effort: ChatCompletionReasoningEffort = "medium",
|
390
|
+
image_resolution_dpi: Optional[int] = 96,
|
391
|
+
browser_canvas: Optional[Literal['A3', 'A4', 'A5']] = 'A4',
|
392
|
+
n_consensus: int = 1,
|
393
|
+
) -> ProcessorConfig:
|
394
|
+
request = self.prepare_create(name, json_schema, modality, model, temperature, reasoning_effort, image_resolution_dpi, browser_canvas, n_consensus)
|
395
|
+
response = await self._client._prepared_request(request)
|
396
|
+
print(f"Processor ID: {response['id']}. Processor available at https://www.uiform.com/dashboard/processors/{response['id']}")
|
397
|
+
|
398
|
+
return ProcessorConfig.model_validate(response)
|
399
|
+
|
400
|
+
async def list(
|
401
|
+
self,
|
402
|
+
before: Optional[str] = None,
|
403
|
+
after: Optional[str] = None,
|
404
|
+
limit: Optional[int] = 10,
|
405
|
+
order: Optional[Literal["asc", "desc"]] = "desc",
|
406
|
+
name: Optional[str] = None,
|
407
|
+
modality: Optional[str] = None,
|
408
|
+
model: Optional[str] = None,
|
409
|
+
schema_id: Optional[str] = None,
|
410
|
+
schema_data_id: Optional[str] = None,
|
411
|
+
) -> ListProcessors:
|
412
|
+
request = self.prepare_list(before, after, limit, order, name, modality, model, schema_id, schema_data_id)
|
413
|
+
response = await self._client._prepared_request(request)
|
414
|
+
return ListProcessors.model_validate(response)
|
415
|
+
|
416
|
+
async def get(self, processor_id: str) -> ProcessorConfig:
|
417
|
+
request = self.prepare_get(processor_id)
|
418
|
+
response = await self._client._prepared_request(request)
|
419
|
+
return ProcessorConfig.model_validate(response)
|
420
|
+
|
421
|
+
async def update(
|
422
|
+
self,
|
423
|
+
processor_id: str,
|
424
|
+
name: Optional[str] = None,
|
425
|
+
modality: Optional[Modality] = None,
|
426
|
+
image_resolution_dpi: Optional[int] = None,
|
427
|
+
browser_canvas: Optional[Literal['A3', 'A4', 'A5']] = None,
|
428
|
+
model: Optional[str] = None,
|
429
|
+
json_schema: Optional[Dict[str, Any]] = None,
|
430
|
+
temperature: Optional[float] = None,
|
431
|
+
reasoning_effort: Optional[ChatCompletionReasoningEffort] = None,
|
432
|
+
n_consensus: Optional[int] = None,
|
433
|
+
) -> ProcessorConfig:
|
434
|
+
request = self.prepare_update(processor_id, name, modality, image_resolution_dpi, browser_canvas, model, json_schema, temperature, reasoning_effort, n_consensus)
|
435
|
+
response = await self._client._prepared_request(request)
|
436
|
+
return ProcessorConfig.model_validate(response)
|
437
|
+
|
438
|
+
async def delete(self, processor_id: str) -> None:
|
439
|
+
request = self.prepare_delete(processor_id)
|
440
|
+
await self._client._prepared_request(request)
|
441
|
+
print(f"Processor Deleted. ID: {processor_id}")
|
442
|
+
|
443
|
+
async def submit(
|
444
|
+
self,
|
445
|
+
processor_id: str,
|
446
|
+
document: Optional[Path | str | bytes | IOBase | MIMEData | PIL.Image.Image | HttpUrl] = None,
|
447
|
+
documents: Optional[List[Path | str | bytes | IOBase | MIMEData | PIL.Image.Image | HttpUrl]] = None,
|
448
|
+
temperature: Optional[float] = None,
|
449
|
+
seed: Optional[int] = None,
|
450
|
+
store: bool = True,
|
451
|
+
) -> UiParsedChatCompletion:
|
452
|
+
"""Submit documents to a processor for processing.
|
453
|
+
|
454
|
+
Args:
|
455
|
+
processor_id: ID of the processor
|
456
|
+
document: Single document to process (mutually exclusive with documents)
|
457
|
+
documents: List of documents to process (mutually exclusive with document)
|
458
|
+
temperature: Optional temperature override
|
459
|
+
seed: Optional seed for reproducibility
|
460
|
+
store: Whether to store the results
|
461
|
+
|
462
|
+
Returns:
|
463
|
+
UiParsedChatCompletion: The processing result
|
464
|
+
"""
|
465
|
+
request = self.prepare_submit(
|
466
|
+
processor_id=processor_id,
|
467
|
+
document=document,
|
468
|
+
documents=documents,
|
469
|
+
temperature=temperature,
|
470
|
+
seed=seed,
|
471
|
+
store=store
|
472
|
+
)
|
473
|
+
response = await self._client._prepared_request(request)
|
474
|
+
return UiParsedChatCompletion.model_validate(response)
|
@@ -0,0 +1,76 @@
|
|
1
|
+
# from typing import Any
|
2
|
+
# from pathlib import Path
|
3
|
+
# import json
|
4
|
+
|
5
|
+
# from .._resource import SyncAPIResource, AsyncAPIResource
|
6
|
+
# from .._utils.json_schema import load_json_schema
|
7
|
+
# from ..types.jobs import JobResponse
|
8
|
+
# from ..types.jobs.prompt_optimization import PromptOptimizationObject, PromptOptimizationProps, PromptOptimizationJobInputData, PromptOptimizationJob
|
9
|
+
|
10
|
+
# MAX_TRAINING_SAMPLES = 10
|
11
|
+
|
12
|
+
# class PromptOptimizationJobsMixin:
|
13
|
+
# def prepare_create(self, json_schema: dict[str, Any] | Path | str,
|
14
|
+
# training_file: str,
|
15
|
+
# schema_optimization_props: dict[str, Any]) -> PromptOptimizationJob:
|
16
|
+
|
17
|
+
# optimization_objects = []
|
18
|
+
# with open(training_file, "r") as f:
|
19
|
+
# for line in f:
|
20
|
+
# optimization_objects.append(PromptOptimizationObject(**json.loads(line)))
|
21
|
+
|
22
|
+
|
23
|
+
# job = PromptOptimizationJob(
|
24
|
+
# job_type="prompt-optimization",
|
25
|
+
# input_data=PromptOptimizationJobInputData(
|
26
|
+
# json_schema=load_json_schema(json_schema),
|
27
|
+
# optimization_objects=optimization_objects[:MAX_TRAINING_SAMPLES],
|
28
|
+
# schema_optimization_props=PromptOptimizationProps.model_validate(schema_optimization_props)
|
29
|
+
# )
|
30
|
+
# )
|
31
|
+
# return job
|
32
|
+
|
33
|
+
# class PromptOptimizationJobs(SyncAPIResource, PromptOptimizationJobsMixin):
|
34
|
+
# def create(self, json_schema: dict[str, Any] | Path | str, training_file: str, schema_optimization_props: dict[str, Any]) -> JobResponse:
|
35
|
+
# """Create a new prompt optimization job"""
|
36
|
+
|
37
|
+
# request_data = self.prepare_create(json_schema, training_file, schema_optimization_props)
|
38
|
+
# response = self._client._request("POST", "/v1/jobs", data=request_data.model_dump(mode="json"))
|
39
|
+
# return JobResponse.model_validate(response)
|
40
|
+
|
41
|
+
|
42
|
+
# def retrieve(self, job_id: str) -> Any:
|
43
|
+
# """Retrieve status of a prompt optimization job"""
|
44
|
+
# response = self._client._request("GET", f"/v1/jobs/{job_id}")
|
45
|
+
# return JobResponse.model_validate(response)
|
46
|
+
|
47
|
+
# class AsyncPromptOptimizationJobs(AsyncAPIResource, PromptOptimizationJobsMixin):
|
48
|
+
# async def create(self, json_schema: dict[str, Any] | Path | str, training_file: str, schema_optimization_props: dict[str, Any]) -> Any:
|
49
|
+
# """Create a new prompt optimization job"""
|
50
|
+
|
51
|
+
# request_data = self.prepare_create(json_schema, training_file, schema_optimization_props)
|
52
|
+
# response = await self._client._request("POST", "/v1/jobs/", data=request_data.model_dump(mode="json"))
|
53
|
+
# return JobResponse.model_validate(response)
|
54
|
+
|
55
|
+
# async def retrieve(self, job_id: str) -> Any:
|
56
|
+
# """Retrieve status of a prompt optimization job"""
|
57
|
+
|
58
|
+
# response = await self._client._request("GET", f"/v1/jobs/{job_id}")
|
59
|
+
# return JobResponse.model_validate(response)
|
60
|
+
|
61
|
+
|
62
|
+
# class PromptOptimization(SyncAPIResource):
|
63
|
+
# """Prompt optimization jobs API wrapper"""
|
64
|
+
# _jobs: PromptOptimizationJobs
|
65
|
+
|
66
|
+
# def __init__(self, *args: Any, **kwargs: Any):
|
67
|
+
# super().__init__(*args, **kwargs)
|
68
|
+
# self._jobs = PromptOptimizationJobs(client=self._client)
|
69
|
+
|
70
|
+
# class AsyncPromptOptimization(AsyncAPIResource):
|
71
|
+
# """Prompt optimization jobs Asyncronous API wrapper"""
|
72
|
+
# _jobs: AsyncPromptOptimizationJobs
|
73
|
+
|
74
|
+
# def __init__(self, *args: Any, **kwargs: Any):
|
75
|
+
# super().__init__(*args, **kwargs)
|
76
|
+
# self._jobs = AsyncPromptOptimizationJobs(client=self._client)
|