retab 0.0.36__py3-none-any.whl → 0.0.38__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- retab/__init__.py +4 -0
- {uiform → retab}/_resource.py +5 -5
- {uiform → retab}/_utils/ai_models.py +2 -2
- {uiform → retab}/_utils/benchmarking.py +15 -16
- {uiform → retab}/_utils/chat.py +29 -34
- {uiform → retab}/_utils/display.py +0 -3
- {uiform → retab}/_utils/json_schema.py +9 -14
- {uiform → retab}/_utils/mime.py +11 -14
- {uiform → retab}/_utils/responses.py +16 -10
- {uiform → retab}/_utils/stream_context_managers.py +1 -1
- {uiform → retab}/_utils/usage/usage.py +31 -31
- {uiform → retab}/client.py +54 -53
- {uiform → retab}/resources/consensus/client.py +19 -38
- {uiform → retab}/resources/consensus/completions.py +36 -59
- {uiform → retab}/resources/consensus/completions_stream.py +35 -47
- {uiform → retab}/resources/consensus/responses.py +37 -86
- {uiform → retab}/resources/consensus/responses_stream.py +41 -89
- retab/resources/documents/client.py +455 -0
- {uiform → retab}/resources/documents/extractions.py +192 -101
- {uiform → retab}/resources/evals.py +56 -43
- retab/resources/evaluations/__init__.py +3 -0
- retab/resources/evaluations/client.py +301 -0
- retab/resources/evaluations/documents.py +233 -0
- retab/resources/evaluations/iterations.py +452 -0
- {uiform → retab}/resources/files.py +2 -2
- {uiform → retab}/resources/jsonlUtils.py +225 -221
- retab/resources/models.py +73 -0
- retab/resources/processors/automations/client.py +244 -0
- {uiform → retab}/resources/processors/automations/endpoints.py +79 -120
- retab/resources/processors/automations/links.py +294 -0
- {uiform → retab}/resources/processors/automations/logs.py +30 -19
- retab/resources/processors/automations/mailboxes.py +397 -0
- retab/resources/processors/automations/outlook.py +337 -0
- {uiform → retab}/resources/processors/automations/tests.py +22 -25
- {uiform → retab}/resources/processors/client.py +181 -166
- {uiform → retab}/resources/schemas.py +78 -66
- {uiform → retab}/resources/secrets/external_api_keys.py +1 -5
- retab/resources/secrets/webhook.py +64 -0
- {uiform → retab}/resources/usage.py +41 -4
- {uiform → retab}/types/ai_models.py +17 -17
- {uiform → retab}/types/automations/cron.py +19 -12
- {uiform → retab}/types/automations/endpoints.py +7 -4
- {uiform → retab}/types/automations/links.py +7 -3
- {uiform → retab}/types/automations/mailboxes.py +10 -10
- {uiform → retab}/types/automations/outlook.py +15 -11
- {uiform → retab}/types/automations/webhooks.py +1 -1
- retab/types/browser_canvas.py +3 -0
- retab/types/chat.py +8 -0
- {uiform → retab}/types/completions.py +12 -15
- retab/types/consensus.py +19 -0
- {uiform → retab}/types/db/annotations.py +3 -3
- {uiform → retab}/types/db/files.py +8 -6
- {uiform → retab}/types/documents/create_messages.py +20 -22
- {uiform → retab}/types/documents/extractions.py +71 -26
- {uiform → retab}/types/evals.py +5 -5
- retab/types/evaluations/__init__.py +31 -0
- retab/types/evaluations/documents.py +30 -0
- retab/types/evaluations/iterations.py +112 -0
- retab/types/evaluations/model.py +73 -0
- retab/types/events.py +79 -0
- {uiform → retab}/types/extractions.py +36 -13
- retab/types/inference_settings.py +15 -0
- retab/types/jobs/base.py +54 -0
- retab/types/jobs/batch_annotation.py +12 -0
- {uiform → retab}/types/jobs/evaluation.py +1 -2
- {uiform → retab}/types/logs.py +37 -34
- retab/types/metrics.py +32 -0
- {uiform → retab}/types/mime.py +22 -20
- {uiform → retab}/types/modalities.py +10 -10
- retab/types/predictions.py +19 -0
- {uiform → retab}/types/schemas/enhance.py +4 -2
- {uiform → retab}/types/schemas/evaluate.py +7 -4
- {uiform → retab}/types/schemas/generate.py +6 -3
- {uiform → retab}/types/schemas/layout.py +1 -1
- {uiform → retab}/types/schemas/object.py +16 -17
- {uiform → retab}/types/schemas/templates.py +1 -3
- {uiform → retab}/types/secrets/external_api_keys.py +0 -1
- {uiform → retab}/types/standards.py +18 -1
- {retab-0.0.36.dist-info → retab-0.0.38.dist-info}/METADATA +78 -77
- retab-0.0.38.dist-info/RECORD +107 -0
- retab-0.0.38.dist-info/top_level.txt +1 -0
- retab-0.0.36.dist-info/RECORD +0 -96
- retab-0.0.36.dist-info/top_level.txt +0 -1
- uiform/__init__.py +0 -4
- uiform/_utils/benchmarking copy.py +0 -588
- uiform/resources/documents/client.py +0 -255
- uiform/resources/models.py +0 -45
- uiform/resources/processors/automations/client.py +0 -78
- uiform/resources/processors/automations/links.py +0 -356
- uiform/resources/processors/automations/mailboxes.py +0 -435
- uiform/resources/processors/automations/outlook.py +0 -444
- uiform/resources/secrets/webhook.py +0 -62
- uiform/types/chat.py +0 -8
- uiform/types/consensus.py +0 -10
- uiform/types/events.py +0 -76
- uiform/types/jobs/base.py +0 -150
- uiform/types/jobs/batch_annotation.py +0 -22
- {uiform → retab}/_utils/__init__.py +0 -0
- {uiform → retab}/_utils/usage/__init__.py +0 -0
- {uiform → retab}/py.typed +0 -0
- {uiform → retab}/resources/__init__.py +0 -0
- {uiform → retab}/resources/consensus/__init__.py +0 -0
- {uiform → retab}/resources/documents/__init__.py +0 -0
- {uiform → retab}/resources/finetuning.py +0 -0
- {uiform → retab}/resources/openai_example.py +0 -0
- {uiform → retab}/resources/processors/__init__.py +0 -0
- {uiform → retab}/resources/processors/automations/__init__.py +0 -0
- {uiform → retab}/resources/prompt_optimization.py +0 -0
- {uiform → retab}/resources/secrets/__init__.py +0 -0
- {uiform → retab}/resources/secrets/client.py +0 -0
- {uiform → retab}/types/__init__.py +0 -0
- {uiform → retab}/types/automations/__init__.py +0 -0
- {uiform → retab}/types/db/__init__.py +0 -0
- {uiform → retab}/types/documents/__init__.py +0 -0
- {uiform → retab}/types/documents/correct_orientation.py +0 -0
- {uiform → retab}/types/jobs/__init__.py +0 -0
- {uiform → retab}/types/jobs/finetune.py +0 -0
- {uiform → retab}/types/jobs/prompt_optimization.py +0 -0
- {uiform → retab}/types/jobs/webcrawl.py +0 -0
- {uiform → retab}/types/pagination.py +0 -0
- {uiform → retab}/types/schemas/__init__.py +0 -0
- {uiform → retab}/types/secrets/__init__.py +0 -0
- {retab-0.0.36.dist-info → retab-0.0.38.dist-info}/WHEEL +0 -0
@@ -0,0 +1,233 @@
|
|
1
|
+
from io import IOBase
|
2
|
+
from pathlib import Path
|
3
|
+
from typing import Any, Dict, List, Union
|
4
|
+
|
5
|
+
import PIL.Image
|
6
|
+
from pydantic import HttpUrl
|
7
|
+
|
8
|
+
from ..._resource import AsyncAPIResource, SyncAPIResource
|
9
|
+
from ..._utils.mime import prepare_mime_document
|
10
|
+
from ...types.evaluations import DocumentItem, EvaluationDocument, PatchEvaluationDocumentRequest
|
11
|
+
from ...types.mime import MIMEData
|
12
|
+
from ...types.standards import PreparedRequest, DeleteResponse, FieldUnset
|
13
|
+
from ...types.documents.extractions import UiParsedChatCompletion
|
14
|
+
|
15
|
+
|
16
|
+
class DocumentsMixin:
|
17
|
+
def prepare_get(self, evaluation_id: str, document_id: str) -> PreparedRequest:
|
18
|
+
return PreparedRequest(method="GET", url=f"/v1/evaluations/{evaluation_id}/documents/{document_id}")
|
19
|
+
|
20
|
+
def prepare_create(self, evaluation_id: str, document: MIMEData, annotation: dict[str, Any]) -> PreparedRequest:
|
21
|
+
# Serialize the MIMEData
|
22
|
+
document_item = DocumentItem(mime_data=document, annotation=annotation, annotation_metadata=None)
|
23
|
+
return PreparedRequest(method="POST", url=f"/v1/evaluations/{evaluation_id}/documents", data=document_item.model_dump(mode="json"))
|
24
|
+
|
25
|
+
def prepare_list(self, evaluation_id: str) -> PreparedRequest:
|
26
|
+
return PreparedRequest(method="GET", url=f"/v1/evaluations/{evaluation_id}/documents")
|
27
|
+
|
28
|
+
def prepare_update(self, evaluation_id: str, document_id: str, annotation: dict[str, Any] = FieldUnset) -> PreparedRequest:
|
29
|
+
update_request = PatchEvaluationDocumentRequest(annotation=annotation)
|
30
|
+
return PreparedRequest(
|
31
|
+
method="PATCH", url=f"/v1/evaluations/{evaluation_id}/documents/{document_id}", data=update_request.model_dump(mode="json", exclude_unset=True, exclude_defaults=True)
|
32
|
+
)
|
33
|
+
|
34
|
+
def prepare_delete(self, evaluation_id: str, document_id: str) -> PreparedRequest:
|
35
|
+
return PreparedRequest(method="DELETE", url=f"/v1/evaluations/{evaluation_id}/documents/{document_id}")
|
36
|
+
|
37
|
+
def prepare_llm_annotate(self, evaluation_id: str, document_id: str) -> PreparedRequest:
|
38
|
+
return PreparedRequest(method="POST", url=f"/v1/evaluations/{evaluation_id}/documents/{document_id}/llm-annotate", data={"stream": False})
|
39
|
+
|
40
|
+
|
41
|
+
class Documents(SyncAPIResource, DocumentsMixin):
|
42
|
+
"""Documents API wrapper for evaluations"""
|
43
|
+
|
44
|
+
def create(self, evaluation_id: str, document: Union[Path, str, IOBase, MIMEData, PIL.Image.Image, HttpUrl], annotation: Dict[str, Any]) -> EvaluationDocument:
|
45
|
+
"""
|
46
|
+
Create a document for an evaluation.
|
47
|
+
|
48
|
+
Args:
|
49
|
+
evaluation_id: The ID of the evaluation
|
50
|
+
document: The document to process. Can be:
|
51
|
+
- A file path (Path or str)
|
52
|
+
- A file-like object (IOBase)
|
53
|
+
- A MIMEData object
|
54
|
+
- A PIL Image object
|
55
|
+
- A URL (HttpUrl)
|
56
|
+
annotation: The ground truth for the document
|
57
|
+
|
58
|
+
Returns:
|
59
|
+
EvaluationDocument: The created document
|
60
|
+
Raises:
|
61
|
+
HTTPException if the request fails
|
62
|
+
"""
|
63
|
+
# Convert document to MIME data format
|
64
|
+
mime_document: MIMEData = prepare_mime_document(document)
|
65
|
+
|
66
|
+
# Let prepare_create handle the serialization
|
67
|
+
request = self.prepare_create(evaluation_id, mime_document, annotation)
|
68
|
+
response = self._client._prepared_request(request)
|
69
|
+
return EvaluationDocument(**response)
|
70
|
+
|
71
|
+
def list(self, evaluation_id: str) -> List[EvaluationDocument]:
|
72
|
+
"""
|
73
|
+
List documents for an evaluation.
|
74
|
+
|
75
|
+
Args:
|
76
|
+
evaluation_id: The ID of the evaluation
|
77
|
+
|
78
|
+
Returns:
|
79
|
+
List[EvaluationDocument]: List of documents
|
80
|
+
Raises:
|
81
|
+
HTTPException if the request fails
|
82
|
+
"""
|
83
|
+
request = self.prepare_list(evaluation_id)
|
84
|
+
response = self._client._prepared_request(request)
|
85
|
+
return [EvaluationDocument(**item) for item in response.get("data", [])]
|
86
|
+
|
87
|
+
def get(self, evaluation_id: str, document_id: str) -> EvaluationDocument:
|
88
|
+
"""
|
89
|
+
Get a document by ID.
|
90
|
+
|
91
|
+
Args:
|
92
|
+
evaluation_id: The ID of the evaluation
|
93
|
+
document_id: The ID of the document
|
94
|
+
|
95
|
+
Returns:
|
96
|
+
EvaluationDocument: The document
|
97
|
+
Raises:
|
98
|
+
HTTPException if the request fails
|
99
|
+
"""
|
100
|
+
request = self.prepare_get(evaluation_id, document_id)
|
101
|
+
response = self._client._prepared_request(request)
|
102
|
+
return EvaluationDocument(**response)
|
103
|
+
|
104
|
+
def update(self, evaluation_id: str, document_id: str, annotation: dict[str, Any] = FieldUnset) -> EvaluationDocument:
|
105
|
+
"""
|
106
|
+
Update a document.
|
107
|
+
|
108
|
+
Args:
|
109
|
+
evaluation_id: The ID of the evaluation
|
110
|
+
document_id: The ID of the document
|
111
|
+
annotation: The ground truth for the document
|
112
|
+
Returns:
|
113
|
+
EvaluationDocument: The updated document
|
114
|
+
Raises:
|
115
|
+
HTTPException if the request fails
|
116
|
+
"""
|
117
|
+
request = self.prepare_update(evaluation_id, document_id, annotation=annotation)
|
118
|
+
response = self._client._prepared_request(request)
|
119
|
+
return EvaluationDocument(**response)
|
120
|
+
|
121
|
+
def delete(self, evaluation_id: str, document_id: str) -> DeleteResponse:
|
122
|
+
"""
|
123
|
+
Delete a document.
|
124
|
+
|
125
|
+
Args:
|
126
|
+
evaluation_id: The ID of the evaluation
|
127
|
+
document_id: The ID of the document
|
128
|
+
|
129
|
+
Returns:
|
130
|
+
DeleteResponse: The response containing success status and ID
|
131
|
+
Raises:
|
132
|
+
HTTPException if the request fails
|
133
|
+
"""
|
134
|
+
request = self.prepare_delete(evaluation_id, document_id)
|
135
|
+
return self._client._prepared_request(request)
|
136
|
+
|
137
|
+
def llm_annotate(self, evaluation_id: str, document_id: str) -> UiParsedChatCompletion:
|
138
|
+
"""
|
139
|
+
Annotate a document with an LLM. This method updates the document (within the evaluation) with the latest extraction.
|
140
|
+
"""
|
141
|
+
request = self.prepare_llm_annotate(evaluation_id, document_id)
|
142
|
+
response = self._client._prepared_request(request)
|
143
|
+
return UiParsedChatCompletion(**response)
|
144
|
+
|
145
|
+
|
146
|
+
class AsyncDocuments(AsyncAPIResource, DocumentsMixin):
|
147
|
+
"""Async Documents API wrapper for evaluations"""
|
148
|
+
|
149
|
+
async def create(self, evaluation_id: str, document: Union[Path, str, IOBase, MIMEData, PIL.Image.Image, HttpUrl], annotation: Dict[str, Any]) -> EvaluationDocument:
|
150
|
+
"""
|
151
|
+
Create a document for an evaluation.
|
152
|
+
|
153
|
+
Args:
|
154
|
+
evaluation_id: The ID of the evaluation
|
155
|
+
document: The document to process. Can be:
|
156
|
+
- A file path (Path or str)
|
157
|
+
- A file-like object (IOBase)
|
158
|
+
- A MIMEData object
|
159
|
+
- A PIL Image object
|
160
|
+
- A URL (HttpUrl)
|
161
|
+
annotation: The ground truth for the document
|
162
|
+
|
163
|
+
Returns:
|
164
|
+
EvaluationDocument: The created document
|
165
|
+
Raises:
|
166
|
+
HTTPException if the request fails
|
167
|
+
"""
|
168
|
+
# Convert document to MIME data format
|
169
|
+
mime_document: MIMEData = prepare_mime_document(document)
|
170
|
+
|
171
|
+
# Let prepare_create handle the serialization
|
172
|
+
request = self.prepare_create(evaluation_id, mime_document, annotation)
|
173
|
+
response = await self._client._prepared_request(request)
|
174
|
+
return EvaluationDocument(**response)
|
175
|
+
|
176
|
+
async def list(self, evaluation_id: str) -> List[EvaluationDocument]:
|
177
|
+
"""
|
178
|
+
List documents for an evaluation.
|
179
|
+
|
180
|
+
Args:
|
181
|
+
evaluation_id: The ID of the evaluation
|
182
|
+
|
183
|
+
Returns:
|
184
|
+
List[EvaluationDocument]: List of documents
|
185
|
+
Raises:
|
186
|
+
HTTPException if the request fails
|
187
|
+
"""
|
188
|
+
request = self.prepare_list(evaluation_id)
|
189
|
+
response = await self._client._prepared_request(request)
|
190
|
+
return [EvaluationDocument(**item) for item in response.get("data", [])]
|
191
|
+
|
192
|
+
async def update(self, evaluation_id: str, document_id: str, annotation: dict[str, Any] = FieldUnset) -> EvaluationDocument:
|
193
|
+
"""
|
194
|
+
Update a document.
|
195
|
+
|
196
|
+
Args:
|
197
|
+
evaluation_id: The ID of the evaluation
|
198
|
+
document_id: The ID of the document
|
199
|
+
annotation: The ground truth for the document
|
200
|
+
|
201
|
+
Returns:
|
202
|
+
EvaluationDocument: The updated document
|
203
|
+
Raises:
|
204
|
+
HTTPException if the request fails
|
205
|
+
"""
|
206
|
+
request = self.prepare_update(evaluation_id, document_id, annotation)
|
207
|
+
response = await self._client._prepared_request(request)
|
208
|
+
return EvaluationDocument(**response)
|
209
|
+
|
210
|
+
async def delete(self, evaluation_id: str, document_id: str) -> DeleteResponse:
|
211
|
+
"""
|
212
|
+
Delete a document.
|
213
|
+
|
214
|
+
Args:
|
215
|
+
evaluation_id: The ID of the evaluation
|
216
|
+
document_id: The ID of the document
|
217
|
+
|
218
|
+
Returns:
|
219
|
+
DeleteResponse: The response containing success status and ID
|
220
|
+
Raises:
|
221
|
+
HTTPException if the request fails
|
222
|
+
"""
|
223
|
+
request = self.prepare_delete(evaluation_id, document_id)
|
224
|
+
return await self._client._prepared_request(request)
|
225
|
+
|
226
|
+
async def llm_annotate(self, evaluation_id: str, document_id: str) -> UiParsedChatCompletion:
|
227
|
+
"""
|
228
|
+
Annotate a document with an LLM.
|
229
|
+
This method updates the document (within the evaluation) with the latest extraction.
|
230
|
+
"""
|
231
|
+
request = self.prepare_llm_annotate(evaluation_id, document_id)
|
232
|
+
response = await self._client._prepared_request(request)
|
233
|
+
return UiParsedChatCompletion(**response)
|
@@ -0,0 +1,452 @@
|
|
1
|
+
from typing import Any, Dict, List, Optional
|
2
|
+
|
3
|
+
from openai.types.chat.chat_completion_reasoning_effort import ChatCompletionReasoningEffort
|
4
|
+
|
5
|
+
from ..._resource import AsyncAPIResource, SyncAPIResource
|
6
|
+
from ...types.browser_canvas import BrowserCanvas
|
7
|
+
from ...types.evaluations import CreateIterationRequest, Iteration, ProcessIterationRequest, IterationDocumentStatusResponse, PatchIterationRequest
|
8
|
+
from ...types.inference_settings import InferenceSettings
|
9
|
+
from ...types.metrics import DistancesResult
|
10
|
+
from ...types.modalities import Modality
|
11
|
+
from ...types.standards import DeleteResponse, PreparedRequest, FieldUnset
|
12
|
+
from ...types.documents.extractions import UiParsedChatCompletion
|
13
|
+
|
14
|
+
|
15
|
+
class IterationsMixin:
|
16
|
+
def prepare_get(self, evaluation_id: str, iteration_id: str) -> PreparedRequest:
|
17
|
+
return PreparedRequest(method="GET", url=f"/v1/evaluations/{evaluation_id}/iterations/{iteration_id}")
|
18
|
+
|
19
|
+
def prepare_list(self, evaluation_id: str, model: Optional[str] = None) -> PreparedRequest:
|
20
|
+
params = {}
|
21
|
+
if model:
|
22
|
+
params["model"] = model
|
23
|
+
return PreparedRequest(method="GET", url=f"/v1/evaluations/{evaluation_id}/iterations", params=params)
|
24
|
+
|
25
|
+
def prepare_create(
|
26
|
+
self,
|
27
|
+
evaluation_id: str,
|
28
|
+
model: str = FieldUnset,
|
29
|
+
json_schema: Optional[Dict[str, Any]] = None,
|
30
|
+
temperature: float = FieldUnset,
|
31
|
+
modality: Modality = FieldUnset,
|
32
|
+
reasoning_effort: ChatCompletionReasoningEffort = FieldUnset,
|
33
|
+
image_resolution_dpi: int = FieldUnset,
|
34
|
+
browser_canvas: BrowserCanvas = FieldUnset,
|
35
|
+
n_consensus: int = FieldUnset,
|
36
|
+
) -> PreparedRequest:
|
37
|
+
inference_settings = InferenceSettings(
|
38
|
+
model=model,
|
39
|
+
temperature=temperature,
|
40
|
+
modality=modality,
|
41
|
+
reasoning_effort=reasoning_effort,
|
42
|
+
image_resolution_dpi=image_resolution_dpi,
|
43
|
+
browser_canvas=browser_canvas,
|
44
|
+
n_consensus=n_consensus,
|
45
|
+
)
|
46
|
+
|
47
|
+
request = CreateIterationRequest(inference_settings=inference_settings, json_schema=json_schema)
|
48
|
+
|
49
|
+
return PreparedRequest(method="POST", url=f"/v1/evaluations/{evaluation_id}/iterations", data=request.model_dump(exclude_unset=True, exclude_defaults=True, mode="json"))
|
50
|
+
|
51
|
+
def prepare_update(
|
52
|
+
self,
|
53
|
+
evaluation_id: str,
|
54
|
+
iteration_id: str,
|
55
|
+
json_schema: Dict[str, Any] = FieldUnset,
|
56
|
+
model: str = FieldUnset,
|
57
|
+
temperature: float = FieldUnset,
|
58
|
+
modality: Modality = FieldUnset,
|
59
|
+
reasoning_effort: ChatCompletionReasoningEffort = FieldUnset,
|
60
|
+
image_resolution_dpi: int = FieldUnset,
|
61
|
+
browser_canvas: BrowserCanvas = FieldUnset,
|
62
|
+
n_consensus: int = FieldUnset,
|
63
|
+
) -> PreparedRequest:
|
64
|
+
inference_settings = InferenceSettings(
|
65
|
+
model=model,
|
66
|
+
temperature=temperature,
|
67
|
+
modality=modality,
|
68
|
+
reasoning_effort=reasoning_effort,
|
69
|
+
image_resolution_dpi=image_resolution_dpi,
|
70
|
+
browser_canvas=browser_canvas,
|
71
|
+
n_consensus=n_consensus,
|
72
|
+
)
|
73
|
+
if not inference_settings.model_dump(exclude_unset=True, mode="json"):
|
74
|
+
inference_settings = FieldUnset
|
75
|
+
|
76
|
+
iteration_data = PatchIterationRequest(json_schema=json_schema, inference_settings=inference_settings)
|
77
|
+
|
78
|
+
return PreparedRequest(
|
79
|
+
method="PATCH", url=f"/v1/evaluations/{evaluation_id}/iterations/{iteration_id}", data=iteration_data.model_dump(exclude_unset=True, exclude_defaults=True, mode="json")
|
80
|
+
)
|
81
|
+
|
82
|
+
def prepare_delete(self, evaluation_id: str, iteration_id: str) -> PreparedRequest:
|
83
|
+
return PreparedRequest(method="DELETE", url=f"/v1/evaluations/{evaluation_id}/iterations/{iteration_id}")
|
84
|
+
|
85
|
+
def prepare_compute_distances(self, evaluation_id: str, iteration_id: str, document_id: str) -> PreparedRequest:
|
86
|
+
return PreparedRequest(method="GET", url=f"/v1/evaluations/{evaluation_id}/iterations/{iteration_id}/documents/{document_id}/distances")
|
87
|
+
|
88
|
+
def prepare_process(
|
89
|
+
self,
|
90
|
+
evaluation_id: str,
|
91
|
+
iteration_id: str,
|
92
|
+
document_ids: Optional[List[str]] = None,
|
93
|
+
only_outdated: bool = True,
|
94
|
+
) -> PreparedRequest:
|
95
|
+
request = ProcessIterationRequest(
|
96
|
+
document_ids=document_ids,
|
97
|
+
only_outdated=only_outdated,
|
98
|
+
)
|
99
|
+
return PreparedRequest(method="POST", url=f"/v1/evaluations/{evaluation_id}/iterations/{iteration_id}/process", data=request.model_dump(exclude_none=True, mode="json"))
|
100
|
+
|
101
|
+
def prepare_process_document(self, evaluation_id: str, iteration_id: str, document_id: str) -> PreparedRequest:
|
102
|
+
return PreparedRequest(method="POST", url=f"/v1/evaluations/{evaluation_id}/iterations/{iteration_id}/documents/{document_id}/process", data={"stream": False})
|
103
|
+
|
104
|
+
def prepare_status(self, evaluation_id: str, iteration_id: str) -> PreparedRequest:
|
105
|
+
return PreparedRequest(method="GET", url=f"/v1/evaluations/{evaluation_id}/iterations/{iteration_id}/status")
|
106
|
+
|
107
|
+
|
108
|
+
class Iterations(SyncAPIResource, IterationsMixin):
|
109
|
+
"""Iterations API wrapper for evaluations"""
|
110
|
+
|
111
|
+
def __init__(self, *args, **kwargs):
|
112
|
+
super().__init__(*args, **kwargs)
|
113
|
+
|
114
|
+
def get(self, evaluation_id: str, iteration_id: str) -> Iteration:
|
115
|
+
request = self.prepare_get(evaluation_id, iteration_id)
|
116
|
+
response = self._client._prepared_request(request)
|
117
|
+
return Iteration(**response)
|
118
|
+
|
119
|
+
def list(self, evaluation_id: str, model: Optional[str] = None) -> List[Iteration]:
|
120
|
+
"""
|
121
|
+
List iterations for an evaluation.
|
122
|
+
|
123
|
+
Args:
|
124
|
+
evaluation_id: The ID of the evaluation
|
125
|
+
model: Optional model to filter by
|
126
|
+
|
127
|
+
Returns:
|
128
|
+
List[Iteration]: List of iterations
|
129
|
+
Raises:
|
130
|
+
HTTPException if the request fails
|
131
|
+
"""
|
132
|
+
request = self.prepare_list(evaluation_id, model)
|
133
|
+
response = self._client._prepared_request(request)
|
134
|
+
return [Iteration(**item) for item in response.get("data", [])]
|
135
|
+
|
136
|
+
def create(
|
137
|
+
self,
|
138
|
+
evaluation_id: str,
|
139
|
+
model: str,
|
140
|
+
temperature: float = 0.0,
|
141
|
+
modality: Modality = "native",
|
142
|
+
json_schema: Optional[Dict[str, Any]] = None,
|
143
|
+
reasoning_effort: ChatCompletionReasoningEffort = "medium",
|
144
|
+
image_resolution_dpi: int = 96,
|
145
|
+
browser_canvas: BrowserCanvas = "A4",
|
146
|
+
n_consensus: int = 1,
|
147
|
+
) -> Iteration:
|
148
|
+
"""
|
149
|
+
Create a new iteration for an evaluation.
|
150
|
+
|
151
|
+
Args:
|
152
|
+
evaluation_id: The ID of the evaluation
|
153
|
+
json_schema: The JSON schema for the iteration (if not set, we use the one of the eval)
|
154
|
+
model: The model to use for the iteration
|
155
|
+
temperature: The temperature to use for the model
|
156
|
+
modality: The modality to use (text, image, etc.)
|
157
|
+
reasoning_effort: The reasoning effort setting for the model (auto, low, medium, high)
|
158
|
+
image_resolution_dpi: The DPI of the image. Defaults to 96.
|
159
|
+
browser_canvas: The canvas size of the browser. Must be one of:
|
160
|
+
- "A3" (11.7in x 16.54in)
|
161
|
+
- "A4" (8.27in x 11.7in)
|
162
|
+
- "A5" (5.83in x 8.27in)
|
163
|
+
Defaults to "A4".
|
164
|
+
n_consensus: Number of consensus iterations to perform
|
165
|
+
|
166
|
+
Returns:
|
167
|
+
Iteration: The created iteration
|
168
|
+
Raises:
|
169
|
+
HTTPException if the request fails
|
170
|
+
"""
|
171
|
+
request = self.prepare_create(
|
172
|
+
evaluation_id=evaluation_id,
|
173
|
+
json_schema=json_schema,
|
174
|
+
model=model,
|
175
|
+
temperature=temperature,
|
176
|
+
modality=modality,
|
177
|
+
reasoning_effort=reasoning_effort,
|
178
|
+
image_resolution_dpi=image_resolution_dpi,
|
179
|
+
browser_canvas=browser_canvas,
|
180
|
+
n_consensus=n_consensus,
|
181
|
+
)
|
182
|
+
response = self._client._prepared_request(request)
|
183
|
+
return Iteration(**response)
|
184
|
+
|
185
|
+
def delete(self, evaluation_id: str, iteration_id: str) -> DeleteResponse:
|
186
|
+
"""
|
187
|
+
Delete an iteration.
|
188
|
+
|
189
|
+
Args:
|
190
|
+
iteration_id: The ID of the iteration
|
191
|
+
|
192
|
+
Returns:
|
193
|
+
DeleteResponse: The response containing success status and ID
|
194
|
+
Raises:
|
195
|
+
HTTPException if the request fails
|
196
|
+
"""
|
197
|
+
request = self.prepare_delete(evaluation_id, iteration_id)
|
198
|
+
return self._client._prepared_request(request)
|
199
|
+
|
200
|
+
def compute_distances(self, evaluation_id: str, iteration_id: str, document_id: str) -> DistancesResult:
|
201
|
+
"""
|
202
|
+
Get distances for a document in an iteration.
|
203
|
+
|
204
|
+
Args:
|
205
|
+
iteration_id: The ID of the iteration
|
206
|
+
document_id: The ID of the document
|
207
|
+
|
208
|
+
Returns:
|
209
|
+
DistancesResult: The distances
|
210
|
+
Raises:
|
211
|
+
HTTPException if the request fails
|
212
|
+
"""
|
213
|
+
request = self.prepare_compute_distances(evaluation_id, iteration_id, document_id)
|
214
|
+
response = self._client._prepared_request(request)
|
215
|
+
return DistancesResult(**response)
|
216
|
+
|
217
|
+
def process(
|
218
|
+
self,
|
219
|
+
evaluation_id: str,
|
220
|
+
iteration_id: str,
|
221
|
+
document_ids: Optional[List[str]] = None,
|
222
|
+
only_outdated: bool = True,
|
223
|
+
) -> Iteration:
|
224
|
+
"""
|
225
|
+
Process an iteration by running extractions on documents.
|
226
|
+
|
227
|
+
Args:
|
228
|
+
iteration_id: The ID of the iteration
|
229
|
+
document_ids: Optional list of specific document IDs to process
|
230
|
+
only_outdated: Whether to only process documents that need updates
|
231
|
+
|
232
|
+
Returns:
|
233
|
+
Iteration: The updated iteration
|
234
|
+
Raises:
|
235
|
+
HTTPException if the request fails
|
236
|
+
"""
|
237
|
+
request = self.prepare_process(evaluation_id, iteration_id, document_ids, only_outdated)
|
238
|
+
response = self._client._prepared_request(request)
|
239
|
+
return Iteration(**response)
|
240
|
+
|
241
|
+
def process_document(self, evaluation_id: str, iteration_id: str, document_id: str) -> UiParsedChatCompletion:
|
242
|
+
"""
|
243
|
+
Process a single document within an iteration.
|
244
|
+
This method updates the iteration document with the latest extraction.
|
245
|
+
|
246
|
+
Args:
|
247
|
+
iteration_id: The ID of the iteration
|
248
|
+
document_id: The ID of the document
|
249
|
+
|
250
|
+
Returns:
|
251
|
+
UiParsedChatCompletion: The parsed chat completion
|
252
|
+
Raises:
|
253
|
+
HTTPException if the request fails
|
254
|
+
"""
|
255
|
+
request = self.prepare_process_document(evaluation_id, iteration_id, document_id)
|
256
|
+
response = self._client._prepared_request(request)
|
257
|
+
return UiParsedChatCompletion(**response)
|
258
|
+
|
259
|
+
def status(self, evaluation_id: str, iteration_id: str) -> IterationDocumentStatusResponse:
|
260
|
+
"""
|
261
|
+
Get the status of documents in an iteration.
|
262
|
+
|
263
|
+
Args:
|
264
|
+
iteration_id: The ID of the iteration
|
265
|
+
|
266
|
+
Returns:
|
267
|
+
IterationDocumentStatusResponse: The status of documents
|
268
|
+
Raises:
|
269
|
+
HTTPException if the request fails
|
270
|
+
"""
|
271
|
+
request = self.prepare_status(evaluation_id, iteration_id)
|
272
|
+
response = self._client._prepared_request(request)
|
273
|
+
return IterationDocumentStatusResponse(**response)
|
274
|
+
|
275
|
+
|
276
|
+
class AsyncIterations(AsyncAPIResource, IterationsMixin):
|
277
|
+
"""Async Iterations API wrapper for evaluations"""
|
278
|
+
|
279
|
+
def __init__(self, *args, **kwargs):
|
280
|
+
super().__init__(*args, **kwargs)
|
281
|
+
|
282
|
+
async def get(self, evaluation_id: str, iteration_id: str) -> Iteration:
|
283
|
+
"""
|
284
|
+
Get an iteration by ID.
|
285
|
+
|
286
|
+
Args:
|
287
|
+
iteration_id: The ID of the iteration
|
288
|
+
|
289
|
+
Returns:
|
290
|
+
Iteration: The iteration
|
291
|
+
Raises:
|
292
|
+
HTTPException if the request fails
|
293
|
+
"""
|
294
|
+
request = self.prepare_get(evaluation_id, iteration_id)
|
295
|
+
response = await self._client._prepared_request(request)
|
296
|
+
return Iteration(**response)
|
297
|
+
|
298
|
+
async def list(self, evaluation_id: str, model: Optional[str] = None) -> List[Iteration]:
|
299
|
+
"""
|
300
|
+
List iterations for an evaluation.
|
301
|
+
|
302
|
+
Args:
|
303
|
+
evaluation_id: The ID of the evaluation
|
304
|
+
model: Optional model to filter by
|
305
|
+
|
306
|
+
Returns:
|
307
|
+
List[Iteration]: List of iterations
|
308
|
+
Raises:
|
309
|
+
HTTPException if the request fails
|
310
|
+
"""
|
311
|
+
request = self.prepare_list(evaluation_id, model)
|
312
|
+
response = await self._client._prepared_request(request)
|
313
|
+
return [Iteration(**item) for item in response.get("data", [])]
|
314
|
+
|
315
|
+
async def create(
|
316
|
+
self,
|
317
|
+
evaluation_id: str,
|
318
|
+
model: str,
|
319
|
+
temperature: float = 0.0,
|
320
|
+
modality: Modality = "native",
|
321
|
+
json_schema: Optional[Dict[str, Any]] = None,
|
322
|
+
reasoning_effort: ChatCompletionReasoningEffort = "medium",
|
323
|
+
image_resolution_dpi: int = 96,
|
324
|
+
browser_canvas: BrowserCanvas = "A4",
|
325
|
+
n_consensus: int = 1,
|
326
|
+
) -> Iteration:
|
327
|
+
"""
|
328
|
+
Create a new iteration for an evaluation.
|
329
|
+
|
330
|
+
Args:
|
331
|
+
evaluation_id: The ID of the evaluation
|
332
|
+
json_schema: The JSON schema for the iteration
|
333
|
+
model: The model to use for the iteration
|
334
|
+
temperature: The temperature to use for the model
|
335
|
+
modality: The modality to use (text, image, etc.)
|
336
|
+
reasoning_effort: The reasoning effort setting for the model (auto, low, medium, high)
|
337
|
+
image_resolution_dpi: The DPI of the image. Defaults to 96.
|
338
|
+
browser_canvas: The canvas size of the browser. Must be one of:
|
339
|
+
- "A3" (11.7in x 16.54in)
|
340
|
+
- "A4" (8.27in x 11.7in)
|
341
|
+
- "A5" (5.83in x 8.27in)
|
342
|
+
Defaults to "A4".
|
343
|
+
n_consensus: Number of consensus iterations to perform
|
344
|
+
|
345
|
+
Returns:
|
346
|
+
Iteration: The created iteration
|
347
|
+
Raises:
|
348
|
+
HTTPException if the request fails
|
349
|
+
"""
|
350
|
+
request = self.prepare_create(
|
351
|
+
evaluation_id=evaluation_id,
|
352
|
+
json_schema=json_schema,
|
353
|
+
model=model,
|
354
|
+
temperature=temperature,
|
355
|
+
modality=modality,
|
356
|
+
reasoning_effort=reasoning_effort,
|
357
|
+
image_resolution_dpi=image_resolution_dpi,
|
358
|
+
browser_canvas=browser_canvas,
|
359
|
+
n_consensus=n_consensus,
|
360
|
+
)
|
361
|
+
response = await self._client._prepared_request(request)
|
362
|
+
return Iteration(**response)
|
363
|
+
|
364
|
+
async def delete(self, evaluation_id: str, iteration_id: str) -> DeleteResponse:
|
365
|
+
"""
|
366
|
+
Delete an iteration.
|
367
|
+
|
368
|
+
Args:
|
369
|
+
iteration_id: The ID of the iteration
|
370
|
+
|
371
|
+
Returns:
|
372
|
+
DeleteResponse: The response containing success status and ID
|
373
|
+
Raises:
|
374
|
+
HTTPException if the request fails
|
375
|
+
"""
|
376
|
+
request = self.prepare_delete(evaluation_id, iteration_id)
|
377
|
+
return await self._client._prepared_request(request)
|
378
|
+
|
379
|
+
async def compute_distances(self, evaluation_id: str, iteration_id: str, document_id: str) -> DistancesResult:
|
380
|
+
"""
|
381
|
+
Get distances for a document in an iteration.
|
382
|
+
|
383
|
+
Args:
|
384
|
+
iteration_id: The ID of the iteration
|
385
|
+
document_id: The ID of the document
|
386
|
+
|
387
|
+
Returns:
|
388
|
+
DistancesResult: The distances
|
389
|
+
Raises:
|
390
|
+
HTTPException if the request fails
|
391
|
+
"""
|
392
|
+
request = self.prepare_compute_distances(evaluation_id, iteration_id, document_id)
|
393
|
+
response = await self._client._prepared_request(request)
|
394
|
+
return DistancesResult(**response)
|
395
|
+
|
396
|
+
async def process(
|
397
|
+
self,
|
398
|
+
evaluation_id: str,
|
399
|
+
iteration_id: str,
|
400
|
+
document_ids: Optional[List[str]] = None,
|
401
|
+
only_outdated: bool = True,
|
402
|
+
) -> Iteration:
|
403
|
+
"""
|
404
|
+
Process an iteration by running extractions on documents.
|
405
|
+
|
406
|
+
Args:
|
407
|
+
iteration_id: The ID of the iteration
|
408
|
+
document_ids: Optional list of specific document IDs to process
|
409
|
+
only_outdated: Whether to only process documents that need updates
|
410
|
+
|
411
|
+
Returns:
|
412
|
+
Iteration: The updated iteration
|
413
|
+
Raises:
|
414
|
+
HTTPException if the request fails
|
415
|
+
"""
|
416
|
+
request = self.prepare_process(evaluation_id, iteration_id, document_ids, only_outdated)
|
417
|
+
response = await self._client._prepared_request(request)
|
418
|
+
return Iteration(**response)
|
419
|
+
|
420
|
+
async def process_document(self, evaluation_id: str, iteration_id: str, document_id: str) -> UiParsedChatCompletion:
|
421
|
+
"""
|
422
|
+
Process a single document within an iteration.
|
423
|
+
This method updates the iteration document with the latest extraction.
|
424
|
+
|
425
|
+
Args:
|
426
|
+
iteration_id: The ID of the iteration
|
427
|
+
document_id: The ID of the document
|
428
|
+
|
429
|
+
Returns:
|
430
|
+
UiParsedChatCompletion: The parsed chat completion
|
431
|
+
Raises:
|
432
|
+
HTTPException if the request fails
|
433
|
+
"""
|
434
|
+
request = self.prepare_process_document(evaluation_id, iteration_id, document_id)
|
435
|
+
response = await self._client._prepared_request(request)
|
436
|
+
return UiParsedChatCompletion(**response)
|
437
|
+
|
438
|
+
async def status(self, evaluation_id: str, iteration_id: str) -> IterationDocumentStatusResponse:
|
439
|
+
"""
|
440
|
+
Get the status of documents in an iteration.
|
441
|
+
|
442
|
+
Args:
|
443
|
+
iteration_id: The ID of the iteration
|
444
|
+
|
445
|
+
Returns:
|
446
|
+
IterationDocumentStatusResponse: The status of documents
|
447
|
+
Raises:
|
448
|
+
HTTPException if the request fails
|
449
|
+
"""
|
450
|
+
request = self.prepare_status(evaluation_id, iteration_id)
|
451
|
+
response = await self._client._prepared_request(request)
|
452
|
+
return IterationDocumentStatusResponse(**response)
|