retab 0.0.42__py3-none-any.whl → 0.0.44__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- retab/__init__.py +2 -1
- retab/client.py +26 -51
- retab/generate_types.py +180 -0
- retab/resources/consensus/client.py +1 -1
- retab/resources/consensus/responses.py +1 -1
- retab/resources/deployments/__init__.py +3 -0
- retab/resources/deployments/automations/__init__.py +9 -0
- retab/resources/deployments/automations/client.py +244 -0
- retab/resources/deployments/automations/endpoints.py +290 -0
- retab/resources/deployments/automations/links.py +303 -0
- retab/resources/deployments/automations/logs.py +222 -0
- retab/resources/deployments/automations/mailboxes.py +423 -0
- retab/resources/deployments/automations/outlook.py +377 -0
- retab/resources/deployments/automations/tests.py +161 -0
- retab/resources/deployments/client.py +148 -0
- retab/resources/documents/client.py +94 -68
- retab/resources/documents/extractions.py +55 -46
- retab/resources/evaluations/__init__.py +2 -2
- retab/resources/evaluations/client.py +61 -77
- retab/resources/evaluations/documents.py +48 -37
- retab/resources/evaluations/iterations.py +58 -40
- retab/resources/jsonlUtils.py +3 -4
- retab/resources/processors/automations/endpoints.py +49 -39
- retab/resources/processors/automations/links.py +52 -43
- retab/resources/processors/automations/mailboxes.py +74 -59
- retab/resources/processors/automations/outlook.py +104 -82
- retab/resources/processors/client.py +35 -30
- retab/resources/projects/__init__.py +3 -0
- retab/resources/projects/client.py +285 -0
- retab/resources/projects/documents.py +244 -0
- retab/resources/projects/iterations.py +470 -0
- retab/resources/usage.py +2 -0
- retab/types/ai_models.py +2 -1
- retab/types/deprecated_evals.py +195 -0
- retab/types/evaluations/__init__.py +5 -2
- retab/types/evaluations/iterations.py +9 -43
- retab/types/evaluations/model.py +19 -24
- retab/types/extractions.py +1 -0
- retab/types/jobs/base.py +1 -1
- retab/types/jobs/evaluation.py +1 -1
- retab/types/logs.py +5 -6
- retab/types/mime.py +1 -10
- retab/types/projects/__init__.py +34 -0
- retab/types/projects/documents.py +30 -0
- retab/types/projects/iterations.py +78 -0
- retab/types/projects/model.py +68 -0
- retab/types/schemas/enhance.py +22 -5
- retab/types/schemas/evaluate.py +2 -2
- retab/types/schemas/object.py +27 -25
- retab/types/standards.py +2 -2
- retab/utils/__init__.py +3 -0
- retab/utils/ai_models.py +127 -12
- retab/utils/hashing.py +24 -0
- retab/utils/json_schema.py +1 -26
- retab/utils/mime.py +0 -17
- retab/utils/usage/usage.py +0 -1
- {retab-0.0.42.dist-info → retab-0.0.44.dist-info}/METADATA +4 -6
- {retab-0.0.42.dist-info → retab-0.0.44.dist-info}/RECORD +60 -55
- retab/_utils/__init__.py +0 -0
- retab/_utils/_model_cards/anthropic.yaml +0 -59
- retab/_utils/_model_cards/auto.yaml +0 -43
- retab/_utils/_model_cards/gemini.yaml +0 -117
- retab/_utils/_model_cards/openai.yaml +0 -301
- retab/_utils/_model_cards/xai.yaml +0 -28
- retab/_utils/ai_models.py +0 -138
- retab/_utils/benchmarking.py +0 -484
- retab/_utils/chat.py +0 -327
- retab/_utils/display.py +0 -440
- retab/_utils/json_schema.py +0 -2156
- retab/_utils/mime.py +0 -165
- retab/_utils/responses.py +0 -169
- retab/_utils/stream_context_managers.py +0 -52
- retab/_utils/usage/__init__.py +0 -0
- retab/_utils/usage/usage.py +0 -301
- {retab-0.0.42.dist-info → retab-0.0.44.dist-info}/WHEEL +0 -0
- {retab-0.0.42.dist-info → retab-0.0.44.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,470 @@
|
|
1
|
+
from typing import Any, Dict, List, Optional
|
2
|
+
|
3
|
+
from openai.types.chat.chat_completion_reasoning_effort import ChatCompletionReasoningEffort
|
4
|
+
|
5
|
+
from ..._resource import AsyncAPIResource, SyncAPIResource
|
6
|
+
from ...types.browser_canvas import BrowserCanvas
|
7
|
+
from ...types.projects import CreateIterationRequest, Iteration, ProcessIterationRequest, IterationDocumentStatusResponse, PatchIterationRequest
|
8
|
+
from ...types.inference_settings import InferenceSettings
|
9
|
+
from ...types.metrics import DistancesResult
|
10
|
+
from ...types.modalities import Modality
|
11
|
+
from ...types.standards import DeleteResponse, PreparedRequest, FieldUnset
|
12
|
+
from ...types.documents.extractions import RetabParsedChatCompletion
|
13
|
+
|
14
|
+
|
15
|
+
class IterationsMixin:
|
16
|
+
def prepare_get(self, evaluation_id: str, iteration_id: str) -> PreparedRequest:
|
17
|
+
return PreparedRequest(method="GET", url=f"/v1/projects/{evaluation_id}/iterations/{iteration_id}")
|
18
|
+
|
19
|
+
def prepare_list(self, evaluation_id: str, model: Optional[str] = None) -> PreparedRequest:
|
20
|
+
params = {}
|
21
|
+
if model:
|
22
|
+
params["model"] = model
|
23
|
+
return PreparedRequest(method="GET", url=f"/v1/projects/{evaluation_id}/iterations", params=params)
|
24
|
+
|
25
|
+
def prepare_create(
|
26
|
+
self,
|
27
|
+
evaluation_id: str,
|
28
|
+
model: str = FieldUnset,
|
29
|
+
json_schema: Optional[Dict[str, Any]] = None,
|
30
|
+
temperature: float = FieldUnset,
|
31
|
+
modality: Modality = FieldUnset,
|
32
|
+
reasoning_effort: ChatCompletionReasoningEffort = FieldUnset,
|
33
|
+
image_resolution_dpi: int = FieldUnset,
|
34
|
+
browser_canvas: BrowserCanvas = FieldUnset,
|
35
|
+
n_consensus: int = FieldUnset,
|
36
|
+
) -> PreparedRequest:
|
37
|
+
inference_dict = {}
|
38
|
+
if model is not FieldUnset:
|
39
|
+
inference_dict["model"] = model
|
40
|
+
if temperature is not FieldUnset:
|
41
|
+
inference_dict["temperature"] = temperature
|
42
|
+
if modality is not FieldUnset:
|
43
|
+
inference_dict["modality"] = modality
|
44
|
+
if reasoning_effort is not FieldUnset:
|
45
|
+
inference_dict["reasoning_effort"] = reasoning_effort
|
46
|
+
if image_resolution_dpi is not FieldUnset:
|
47
|
+
inference_dict["image_resolution_dpi"] = image_resolution_dpi
|
48
|
+
if browser_canvas is not FieldUnset:
|
49
|
+
inference_dict["browser_canvas"] = browser_canvas
|
50
|
+
if n_consensus is not FieldUnset:
|
51
|
+
inference_dict["n_consensus"] = n_consensus
|
52
|
+
|
53
|
+
inference_settings = InferenceSettings(**inference_dict)
|
54
|
+
|
55
|
+
request = CreateIterationRequest(inference_settings=inference_settings, json_schema=json_schema)
|
56
|
+
|
57
|
+
return PreparedRequest(method="POST", url=f"/v1/projects/{evaluation_id}/iterations", data=request.model_dump(exclude_unset=True, exclude_defaults=True, mode="json"))
|
58
|
+
|
59
|
+
def prepare_update(
|
60
|
+
self,
|
61
|
+
evaluation_id: str,
|
62
|
+
iteration_id: str,
|
63
|
+
json_schema: Dict[str, Any] = FieldUnset,
|
64
|
+
model: str = FieldUnset,
|
65
|
+
temperature: float = FieldUnset,
|
66
|
+
modality: Modality = FieldUnset,
|
67
|
+
reasoning_effort: ChatCompletionReasoningEffort = FieldUnset,
|
68
|
+
image_resolution_dpi: int = FieldUnset,
|
69
|
+
browser_canvas: BrowserCanvas = FieldUnset,
|
70
|
+
n_consensus: int = FieldUnset,
|
71
|
+
) -> PreparedRequest:
|
72
|
+
inference_dict = {}
|
73
|
+
if model is not FieldUnset:
|
74
|
+
inference_dict["model"] = model
|
75
|
+
if temperature is not FieldUnset:
|
76
|
+
inference_dict["temperature"] = temperature
|
77
|
+
if modality is not FieldUnset:
|
78
|
+
inference_dict["modality"] = modality
|
79
|
+
if reasoning_effort is not FieldUnset:
|
80
|
+
inference_dict["reasoning_effort"] = reasoning_effort
|
81
|
+
if image_resolution_dpi is not FieldUnset:
|
82
|
+
inference_dict["image_resolution_dpi"] = image_resolution_dpi
|
83
|
+
if browser_canvas is not FieldUnset:
|
84
|
+
inference_dict["browser_canvas"] = browser_canvas
|
85
|
+
if n_consensus is not FieldUnset:
|
86
|
+
inference_dict["n_consensus"] = n_consensus
|
87
|
+
|
88
|
+
iteration_dict = {}
|
89
|
+
if json_schema is not FieldUnset:
|
90
|
+
iteration_dict["json_schema"] = json_schema
|
91
|
+
if inference_dict: # Only add inference_settings if we have at least one field
|
92
|
+
iteration_dict["inference_settings"] = InferenceSettings(**inference_dict)
|
93
|
+
|
94
|
+
iteration_data = PatchIterationRequest(**iteration_dict)
|
95
|
+
|
96
|
+
return PreparedRequest(
|
97
|
+
method="PATCH", url=f"/v1/projects/{evaluation_id}/iterations/{iteration_id}", data=iteration_data.model_dump(exclude_unset=True, exclude_defaults=True, mode="json")
|
98
|
+
)
|
99
|
+
|
100
|
+
def prepare_delete(self, evaluation_id: str, iteration_id: str) -> PreparedRequest:
|
101
|
+
return PreparedRequest(method="DELETE", url=f"/v1/projects/{evaluation_id}/iterations/{iteration_id}")
|
102
|
+
|
103
|
+
def prepare_compute_distances(self, evaluation_id: str, iteration_id: str, document_id: str) -> PreparedRequest:
|
104
|
+
return PreparedRequest(method="GET", url=f"/v1/projects/{evaluation_id}/iterations/{iteration_id}/documents/{document_id}/distances")
|
105
|
+
|
106
|
+
def prepare_process(
|
107
|
+
self,
|
108
|
+
evaluation_id: str,
|
109
|
+
iteration_id: str,
|
110
|
+
document_ids: Optional[List[str]] = None,
|
111
|
+
only_outdated: bool = True,
|
112
|
+
) -> PreparedRequest:
|
113
|
+
request = ProcessIterationRequest(
|
114
|
+
document_ids=document_ids,
|
115
|
+
only_outdated=only_outdated,
|
116
|
+
)
|
117
|
+
return PreparedRequest(method="POST", url=f"/v1/projects/{evaluation_id}/iterations/{iteration_id}/process", data=request.model_dump(exclude_none=True, mode="json"))
|
118
|
+
|
119
|
+
def prepare_process_document(self, evaluation_id: str, iteration_id: str, document_id: str) -> PreparedRequest:
|
120
|
+
return PreparedRequest(method="POST", url=f"/v1/projects/{evaluation_id}/iterations/{iteration_id}/documents/{document_id}/process", data={"stream": False})
|
121
|
+
|
122
|
+
def prepare_status(self, evaluation_id: str, iteration_id: str) -> PreparedRequest:
|
123
|
+
return PreparedRequest(method="GET", url=f"/v1/projects/{evaluation_id}/iterations/{iteration_id}/status")
|
124
|
+
|
125
|
+
|
126
|
+
class Iterations(SyncAPIResource, IterationsMixin):
|
127
|
+
"""Iterations API wrapper for evaluations"""
|
128
|
+
|
129
|
+
def __init__(self, *args, **kwargs):
|
130
|
+
super().__init__(*args, **kwargs)
|
131
|
+
|
132
|
+
def get(self, evaluation_id: str, iteration_id: str) -> Iteration:
|
133
|
+
request = self.prepare_get(evaluation_id, iteration_id)
|
134
|
+
response = self._client._prepared_request(request)
|
135
|
+
return Iteration(**response)
|
136
|
+
|
137
|
+
def list(self, evaluation_id: str, model: Optional[str] = None) -> List[Iteration]:
|
138
|
+
"""
|
139
|
+
List iterations for an evaluation.
|
140
|
+
|
141
|
+
Args:
|
142
|
+
evaluation_id: The ID of the evaluation
|
143
|
+
model: Optional model to filter by
|
144
|
+
|
145
|
+
Returns:
|
146
|
+
List[Iteration]: List of iterations
|
147
|
+
Raises:
|
148
|
+
HTTPException if the request fails
|
149
|
+
"""
|
150
|
+
request = self.prepare_list(evaluation_id, model)
|
151
|
+
response = self._client._prepared_request(request)
|
152
|
+
return [Iteration(**item) for item in response.get("data", [])]
|
153
|
+
|
154
|
+
def create(
|
155
|
+
self,
|
156
|
+
evaluation_id: str,
|
157
|
+
model: str = FieldUnset,
|
158
|
+
temperature: float = FieldUnset,
|
159
|
+
modality: Modality = FieldUnset,
|
160
|
+
json_schema: Optional[Dict[str, Any]] = FieldUnset,
|
161
|
+
reasoning_effort: ChatCompletionReasoningEffort = FieldUnset,
|
162
|
+
image_resolution_dpi: int = FieldUnset,
|
163
|
+
browser_canvas: BrowserCanvas = FieldUnset,
|
164
|
+
n_consensus: int = FieldUnset,
|
165
|
+
) -> Iteration:
|
166
|
+
"""
|
167
|
+
Create a new iteration for an evaluation.
|
168
|
+
|
169
|
+
Args:
|
170
|
+
evaluation_id: The ID of the evaluation
|
171
|
+
json_schema: The JSON schema for the iteration (if not set, we use the one of the eval)
|
172
|
+
model: The model to use for the iteration
|
173
|
+
temperature: The temperature to use for the model
|
174
|
+
modality: The modality to use (text, image, etc.)
|
175
|
+
reasoning_effort: The reasoning effort setting for the model (auto, low, medium, high)
|
176
|
+
image_resolution_dpi: The DPI of the image. Defaults to 96.
|
177
|
+
browser_canvas: The canvas size of the browser. Must be one of:
|
178
|
+
- "A3" (11.7in x 16.54in)
|
179
|
+
- "A4" (8.27in x 11.7in)
|
180
|
+
- "A5" (5.83in x 8.27in)
|
181
|
+
Defaults to "A4".
|
182
|
+
n_consensus: Number of consensus iterations to perform
|
183
|
+
|
184
|
+
Returns:
|
185
|
+
Iteration: The created iteration
|
186
|
+
Raises:
|
187
|
+
HTTPException if the request fails
|
188
|
+
"""
|
189
|
+
request = self.prepare_create(
|
190
|
+
evaluation_id=evaluation_id,
|
191
|
+
json_schema=json_schema,
|
192
|
+
model=model,
|
193
|
+
temperature=temperature,
|
194
|
+
modality=modality,
|
195
|
+
reasoning_effort=reasoning_effort,
|
196
|
+
image_resolution_dpi=image_resolution_dpi,
|
197
|
+
browser_canvas=browser_canvas,
|
198
|
+
n_consensus=n_consensus,
|
199
|
+
)
|
200
|
+
response = self._client._prepared_request(request)
|
201
|
+
return Iteration(**response)
|
202
|
+
|
203
|
+
def delete(self, evaluation_id: str, iteration_id: str) -> DeleteResponse:
|
204
|
+
"""
|
205
|
+
Delete an iteration.
|
206
|
+
|
207
|
+
Args:
|
208
|
+
iteration_id: The ID of the iteration
|
209
|
+
|
210
|
+
Returns:
|
211
|
+
DeleteResponse: The response containing success status and ID
|
212
|
+
Raises:
|
213
|
+
HTTPException if the request fails
|
214
|
+
"""
|
215
|
+
request = self.prepare_delete(evaluation_id, iteration_id)
|
216
|
+
return self._client._prepared_request(request)
|
217
|
+
|
218
|
+
def compute_distances(self, evaluation_id: str, iteration_id: str, document_id: str) -> DistancesResult:
|
219
|
+
"""
|
220
|
+
Get distances for a document in an iteration.
|
221
|
+
|
222
|
+
Args:
|
223
|
+
iteration_id: The ID of the iteration
|
224
|
+
document_id: The ID of the document
|
225
|
+
|
226
|
+
Returns:
|
227
|
+
DistancesResult: The distances
|
228
|
+
Raises:
|
229
|
+
HTTPException if the request fails
|
230
|
+
"""
|
231
|
+
request = self.prepare_compute_distances(evaluation_id, iteration_id, document_id)
|
232
|
+
response = self._client._prepared_request(request)
|
233
|
+
return DistancesResult(**response)
|
234
|
+
|
235
|
+
def process(
|
236
|
+
self,
|
237
|
+
evaluation_id: str,
|
238
|
+
iteration_id: str,
|
239
|
+
document_ids: Optional[List[str]] = None,
|
240
|
+
only_outdated: bool = True,
|
241
|
+
) -> Iteration:
|
242
|
+
"""
|
243
|
+
Process an iteration by running extractions on documents.
|
244
|
+
|
245
|
+
Args:
|
246
|
+
iteration_id: The ID of the iteration
|
247
|
+
document_ids: Optional list of specific document IDs to process
|
248
|
+
only_outdated: Whether to only process documents that need updates
|
249
|
+
|
250
|
+
Returns:
|
251
|
+
Iteration: The updated iteration
|
252
|
+
Raises:
|
253
|
+
HTTPException if the request fails
|
254
|
+
"""
|
255
|
+
request = self.prepare_process(evaluation_id, iteration_id, document_ids, only_outdated)
|
256
|
+
response = self._client._prepared_request(request)
|
257
|
+
return Iteration(**response)
|
258
|
+
|
259
|
+
def process_document(self, evaluation_id: str, iteration_id: str, document_id: str) -> RetabParsedChatCompletion:
|
260
|
+
"""
|
261
|
+
Process a single document within an iteration.
|
262
|
+
This method updates the iteration document with the latest extraction.
|
263
|
+
|
264
|
+
Args:
|
265
|
+
iteration_id: The ID of the iteration
|
266
|
+
document_id: The ID of the document
|
267
|
+
|
268
|
+
Returns:
|
269
|
+
RetabParsedChatCompletion: The parsed chat completion
|
270
|
+
Raises:
|
271
|
+
HTTPException if the request fails
|
272
|
+
"""
|
273
|
+
request = self.prepare_process_document(evaluation_id, iteration_id, document_id)
|
274
|
+
response = self._client._prepared_request(request)
|
275
|
+
return RetabParsedChatCompletion(**response)
|
276
|
+
|
277
|
+
def status(self, evaluation_id: str, iteration_id: str) -> IterationDocumentStatusResponse:
|
278
|
+
"""
|
279
|
+
Get the status of documents in an iteration.
|
280
|
+
|
281
|
+
Args:
|
282
|
+
iteration_id: The ID of the iteration
|
283
|
+
|
284
|
+
Returns:
|
285
|
+
IterationDocumentStatusResponse: The status of documents
|
286
|
+
Raises:
|
287
|
+
HTTPException if the request fails
|
288
|
+
"""
|
289
|
+
request = self.prepare_status(evaluation_id, iteration_id)
|
290
|
+
response = self._client._prepared_request(request)
|
291
|
+
return IterationDocumentStatusResponse(**response)
|
292
|
+
|
293
|
+
|
294
|
+
class AsyncIterations(AsyncAPIResource, IterationsMixin):
|
295
|
+
"""Async Iterations API wrapper for evaluations"""
|
296
|
+
|
297
|
+
def __init__(self, *args, **kwargs):
|
298
|
+
super().__init__(*args, **kwargs)
|
299
|
+
|
300
|
+
async def get(self, evaluation_id: str, iteration_id: str) -> Iteration:
|
301
|
+
"""
|
302
|
+
Get an iteration by ID.
|
303
|
+
|
304
|
+
Args:
|
305
|
+
iteration_id: The ID of the iteration
|
306
|
+
|
307
|
+
Returns:
|
308
|
+
Iteration: The iteration
|
309
|
+
Raises:
|
310
|
+
HTTPException if the request fails
|
311
|
+
"""
|
312
|
+
request = self.prepare_get(evaluation_id, iteration_id)
|
313
|
+
response = await self._client._prepared_request(request)
|
314
|
+
return Iteration(**response)
|
315
|
+
|
316
|
+
async def list(self, evaluation_id: str, model: Optional[str] = None) -> List[Iteration]:
|
317
|
+
"""
|
318
|
+
List iterations for an evaluation.
|
319
|
+
|
320
|
+
Args:
|
321
|
+
evaluation_id: The ID of the evaluation
|
322
|
+
model: Optional model to filter by
|
323
|
+
|
324
|
+
Returns:
|
325
|
+
List[Iteration]: List of iterations
|
326
|
+
Raises:
|
327
|
+
HTTPException if the request fails
|
328
|
+
"""
|
329
|
+
request = self.prepare_list(evaluation_id, model)
|
330
|
+
response = await self._client._prepared_request(request)
|
331
|
+
return [Iteration(**item) for item in response.get("data", [])]
|
332
|
+
|
333
|
+
async def create(
|
334
|
+
self,
|
335
|
+
evaluation_id: str,
|
336
|
+
model: str,
|
337
|
+
temperature: float = 0.0,
|
338
|
+
modality: Modality = "native",
|
339
|
+
json_schema: Optional[Dict[str, Any]] = None,
|
340
|
+
reasoning_effort: ChatCompletionReasoningEffort = "medium",
|
341
|
+
image_resolution_dpi: int = 96,
|
342
|
+
browser_canvas: BrowserCanvas = "A4",
|
343
|
+
n_consensus: int = 1,
|
344
|
+
) -> Iteration:
|
345
|
+
"""
|
346
|
+
Create a new iteration for an evaluation.
|
347
|
+
|
348
|
+
Args:
|
349
|
+
evaluation_id: The ID of the evaluation
|
350
|
+
json_schema: The JSON schema for the iteration
|
351
|
+
model: The model to use for the iteration
|
352
|
+
temperature: The temperature to use for the model
|
353
|
+
modality: The modality to use (text, image, etc.)
|
354
|
+
reasoning_effort: The reasoning effort setting for the model (auto, low, medium, high)
|
355
|
+
image_resolution_dpi: The DPI of the image. Defaults to 96.
|
356
|
+
browser_canvas: The canvas size of the browser. Must be one of:
|
357
|
+
- "A3" (11.7in x 16.54in)
|
358
|
+
- "A4" (8.27in x 11.7in)
|
359
|
+
- "A5" (5.83in x 8.27in)
|
360
|
+
Defaults to "A4".
|
361
|
+
n_consensus: Number of consensus iterations to perform
|
362
|
+
|
363
|
+
Returns:
|
364
|
+
Iteration: The created iteration
|
365
|
+
Raises:
|
366
|
+
HTTPException if the request fails
|
367
|
+
"""
|
368
|
+
request = self.prepare_create(
|
369
|
+
evaluation_id=evaluation_id,
|
370
|
+
json_schema=json_schema,
|
371
|
+
model=model,
|
372
|
+
temperature=temperature,
|
373
|
+
modality=modality,
|
374
|
+
reasoning_effort=reasoning_effort,
|
375
|
+
image_resolution_dpi=image_resolution_dpi,
|
376
|
+
browser_canvas=browser_canvas,
|
377
|
+
n_consensus=n_consensus,
|
378
|
+
)
|
379
|
+
response = await self._client._prepared_request(request)
|
380
|
+
return Iteration(**response)
|
381
|
+
|
382
|
+
async def delete(self, evaluation_id: str, iteration_id: str) -> DeleteResponse:
|
383
|
+
"""
|
384
|
+
Delete an iteration.
|
385
|
+
|
386
|
+
Args:
|
387
|
+
iteration_id: The ID of the iteration
|
388
|
+
|
389
|
+
Returns:
|
390
|
+
DeleteResponse: The response containing success status and ID
|
391
|
+
Raises:
|
392
|
+
HTTPException if the request fails
|
393
|
+
"""
|
394
|
+
request = self.prepare_delete(evaluation_id, iteration_id)
|
395
|
+
return await self._client._prepared_request(request)
|
396
|
+
|
397
|
+
async def compute_distances(self, evaluation_id: str, iteration_id: str, document_id: str) -> DistancesResult:
|
398
|
+
"""
|
399
|
+
Get distances for a document in an iteration.
|
400
|
+
|
401
|
+
Args:
|
402
|
+
iteration_id: The ID of the iteration
|
403
|
+
document_id: The ID of the document
|
404
|
+
|
405
|
+
Returns:
|
406
|
+
DistancesResult: The distances
|
407
|
+
Raises:
|
408
|
+
HTTPException if the request fails
|
409
|
+
"""
|
410
|
+
request = self.prepare_compute_distances(evaluation_id, iteration_id, document_id)
|
411
|
+
response = await self._client._prepared_request(request)
|
412
|
+
return DistancesResult(**response)
|
413
|
+
|
414
|
+
async def process(
|
415
|
+
self,
|
416
|
+
evaluation_id: str,
|
417
|
+
iteration_id: str,
|
418
|
+
document_ids: Optional[List[str]] = None,
|
419
|
+
only_outdated: bool = True,
|
420
|
+
) -> Iteration:
|
421
|
+
"""
|
422
|
+
Process an iteration by running extractions on documents.
|
423
|
+
|
424
|
+
Args:
|
425
|
+
iteration_id: The ID of the iteration
|
426
|
+
document_ids: Optional list of specific document IDs to process
|
427
|
+
only_outdated: Whether to only process documents that need updates
|
428
|
+
|
429
|
+
Returns:
|
430
|
+
Iteration: The updated iteration
|
431
|
+
Raises:
|
432
|
+
HTTPException if the request fails
|
433
|
+
"""
|
434
|
+
request = self.prepare_process(evaluation_id, iteration_id, document_ids, only_outdated)
|
435
|
+
response = await self._client._prepared_request(request)
|
436
|
+
return Iteration(**response)
|
437
|
+
|
438
|
+
async def process_document(self, evaluation_id: str, iteration_id: str, document_id: str) -> RetabParsedChatCompletion:
|
439
|
+
"""
|
440
|
+
Process a single document within an iteration.
|
441
|
+
This method updates the iteration document with the latest extraction.
|
442
|
+
|
443
|
+
Args:
|
444
|
+
iteration_id: The ID of the iteration
|
445
|
+
document_id: The ID of the document
|
446
|
+
|
447
|
+
Returns:
|
448
|
+
RetabParsedChatCompletion: The parsed chat completion
|
449
|
+
Raises:
|
450
|
+
HTTPException if the request fails
|
451
|
+
"""
|
452
|
+
request = self.prepare_process_document(evaluation_id, iteration_id, document_id)
|
453
|
+
response = await self._client._prepared_request(request)
|
454
|
+
return RetabParsedChatCompletion(**response)
|
455
|
+
|
456
|
+
async def status(self, evaluation_id: str, iteration_id: str) -> IterationDocumentStatusResponse:
|
457
|
+
"""
|
458
|
+
Get the status of documents in an iteration.
|
459
|
+
|
460
|
+
Args:
|
461
|
+
iteration_id: The ID of the iteration
|
462
|
+
|
463
|
+
Returns:
|
464
|
+
IterationDocumentStatusResponse: The status of documents
|
465
|
+
Raises:
|
466
|
+
HTTPException if the request fails
|
467
|
+
"""
|
468
|
+
request = self.prepare_status(evaluation_id, iteration_id)
|
469
|
+
response = await self._client._prepared_request(request)
|
470
|
+
return IterationDocumentStatusResponse(**response)
|
retab/resources/usage.py
CHANGED
@@ -84,6 +84,7 @@ class Usage(SyncAPIResource, UsageMixin):
|
|
84
84
|
def monthly_credits_usage(self) -> MonthlyUsageResponse:
|
85
85
|
"""
|
86
86
|
Get monthly credits usage information.
|
87
|
+
Credits are calculated dynamically based on MIME type and consumption.
|
87
88
|
|
88
89
|
Returns:
|
89
90
|
dict: Monthly usage data including credits consumed and limits
|
@@ -198,6 +199,7 @@ class AsyncUsage(AsyncAPIResource, UsageMixin):
|
|
198
199
|
async def monthly_credits_usage(self) -> MonthlyUsageResponse:
|
199
200
|
"""
|
200
201
|
Get monthly credits usage information.
|
202
|
+
Credits are calculated dynamically based on MIME type and consumption.
|
201
203
|
|
202
204
|
Returns:
|
203
205
|
dict: Monthly usage data including credits consumed and limits
|
retab/types/ai_models.py
CHANGED
@@ -2,6 +2,7 @@ import datetime
|
|
2
2
|
from typing import List, Literal, Optional
|
3
3
|
from pydantic import BaseModel, Field, computed_field
|
4
4
|
from retab.types.inference_settings import InferenceSettings
|
5
|
+
from openai.types.model import Model
|
5
6
|
|
6
7
|
|
7
8
|
AIProvider = Literal["OpenAI", "Anthropic", "Gemini", "xAI", "Retab"]
|
@@ -79,7 +80,7 @@ class FinetunedModel(BaseModel):
|
|
79
80
|
|
80
81
|
# Monthly Usage
|
81
82
|
class MonthlyUsageResponseContent(BaseModel):
|
82
|
-
|
83
|
+
credits_count: float # Changed to float to support decimal credits
|
83
84
|
|
84
85
|
|
85
86
|
MonthlyUsageResponse = MonthlyUsageResponseContent
|