retab 0.0.35__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- retab-0.0.35.dist-info/METADATA +417 -0
- retab-0.0.35.dist-info/RECORD +111 -0
- retab-0.0.35.dist-info/WHEEL +5 -0
- retab-0.0.35.dist-info/top_level.txt +1 -0
- uiform/__init__.py +4 -0
- uiform/_resource.py +28 -0
- uiform/_utils/__init__.py +0 -0
- uiform/_utils/ai_models.py +100 -0
- uiform/_utils/benchmarking copy.py +588 -0
- uiform/_utils/benchmarking.py +485 -0
- uiform/_utils/chat.py +332 -0
- uiform/_utils/display.py +443 -0
- uiform/_utils/json_schema.py +2161 -0
- uiform/_utils/mime.py +168 -0
- uiform/_utils/responses.py +163 -0
- uiform/_utils/stream_context_managers.py +52 -0
- uiform/_utils/usage/__init__.py +0 -0
- uiform/_utils/usage/usage.py +300 -0
- uiform/client.py +701 -0
- uiform/py.typed +0 -0
- uiform/resources/__init__.py +0 -0
- uiform/resources/consensus/__init__.py +3 -0
- uiform/resources/consensus/client.py +114 -0
- uiform/resources/consensus/completions.py +252 -0
- uiform/resources/consensus/completions_stream.py +278 -0
- uiform/resources/consensus/responses.py +325 -0
- uiform/resources/consensus/responses_stream.py +373 -0
- uiform/resources/deployments/__init__.py +9 -0
- uiform/resources/deployments/client.py +78 -0
- uiform/resources/deployments/endpoints.py +322 -0
- uiform/resources/deployments/links.py +452 -0
- uiform/resources/deployments/logs.py +211 -0
- uiform/resources/deployments/mailboxes.py +496 -0
- uiform/resources/deployments/outlook.py +531 -0
- uiform/resources/deployments/tests.py +158 -0
- uiform/resources/documents/__init__.py +3 -0
- uiform/resources/documents/client.py +255 -0
- uiform/resources/documents/extractions.py +441 -0
- uiform/resources/evals.py +812 -0
- uiform/resources/files.py +24 -0
- uiform/resources/finetuning.py +62 -0
- uiform/resources/jsonlUtils.py +1046 -0
- uiform/resources/models.py +45 -0
- uiform/resources/openai_example.py +22 -0
- uiform/resources/processors/__init__.py +3 -0
- uiform/resources/processors/automations/__init__.py +9 -0
- uiform/resources/processors/automations/client.py +78 -0
- uiform/resources/processors/automations/endpoints.py +317 -0
- uiform/resources/processors/automations/links.py +356 -0
- uiform/resources/processors/automations/logs.py +211 -0
- uiform/resources/processors/automations/mailboxes.py +435 -0
- uiform/resources/processors/automations/outlook.py +444 -0
- uiform/resources/processors/automations/tests.py +158 -0
- uiform/resources/processors/client.py +474 -0
- uiform/resources/prompt_optimization.py +76 -0
- uiform/resources/schemas.py +369 -0
- uiform/resources/secrets/__init__.py +9 -0
- uiform/resources/secrets/client.py +20 -0
- uiform/resources/secrets/external_api_keys.py +109 -0
- uiform/resources/secrets/webhook.py +62 -0
- uiform/resources/usage.py +271 -0
- uiform/types/__init__.py +0 -0
- uiform/types/ai_models.py +645 -0
- uiform/types/automations/__init__.py +0 -0
- uiform/types/automations/cron.py +58 -0
- uiform/types/automations/endpoints.py +21 -0
- uiform/types/automations/links.py +28 -0
- uiform/types/automations/mailboxes.py +60 -0
- uiform/types/automations/outlook.py +68 -0
- uiform/types/automations/webhooks.py +21 -0
- uiform/types/chat.py +8 -0
- uiform/types/completions.py +93 -0
- uiform/types/consensus.py +10 -0
- uiform/types/db/__init__.py +0 -0
- uiform/types/db/annotations.py +24 -0
- uiform/types/db/files.py +36 -0
- uiform/types/deployments/__init__.py +0 -0
- uiform/types/deployments/cron.py +59 -0
- uiform/types/deployments/endpoints.py +28 -0
- uiform/types/deployments/links.py +36 -0
- uiform/types/deployments/mailboxes.py +67 -0
- uiform/types/deployments/outlook.py +76 -0
- uiform/types/deployments/webhooks.py +21 -0
- uiform/types/documents/__init__.py +0 -0
- uiform/types/documents/correct_orientation.py +13 -0
- uiform/types/documents/create_messages.py +226 -0
- uiform/types/documents/extractions.py +297 -0
- uiform/types/evals.py +207 -0
- uiform/types/events.py +76 -0
- uiform/types/extractions.py +85 -0
- uiform/types/jobs/__init__.py +0 -0
- uiform/types/jobs/base.py +150 -0
- uiform/types/jobs/batch_annotation.py +22 -0
- uiform/types/jobs/evaluation.py +133 -0
- uiform/types/jobs/finetune.py +6 -0
- uiform/types/jobs/prompt_optimization.py +41 -0
- uiform/types/jobs/webcrawl.py +6 -0
- uiform/types/logs.py +231 -0
- uiform/types/mime.py +257 -0
- uiform/types/modalities.py +68 -0
- uiform/types/pagination.py +6 -0
- uiform/types/schemas/__init__.py +0 -0
- uiform/types/schemas/enhance.py +53 -0
- uiform/types/schemas/evaluate.py +55 -0
- uiform/types/schemas/generate.py +32 -0
- uiform/types/schemas/layout.py +58 -0
- uiform/types/schemas/object.py +631 -0
- uiform/types/schemas/templates.py +107 -0
- uiform/types/secrets/__init__.py +0 -0
- uiform/types/secrets/external_api_keys.py +22 -0
- uiform/types/standards.py +39 -0
@@ -0,0 +1,812 @@
|
|
1
|
+
from typing import Any, Dict, List, Optional, TypedDict, Union, Literal
|
2
|
+
from io import IOBase
|
3
|
+
from pathlib import Path
|
4
|
+
|
5
|
+
import PIL.Image
|
6
|
+
from pydantic import HttpUrl
|
7
|
+
|
8
|
+
from .._resource import AsyncAPIResource, SyncAPIResource
|
9
|
+
from ..types.standards import PreparedRequest
|
10
|
+
from ..types.evals import (
|
11
|
+
Evaluation,
|
12
|
+
EvaluationDocument,
|
13
|
+
Iteration,
|
14
|
+
DistancesResult,
|
15
|
+
PredictionData,
|
16
|
+
AddIterationFromJsonlRequest,
|
17
|
+
DocumentItem,
|
18
|
+
UpdateEvaluationDocumentRequest,
|
19
|
+
PredictionMetadata,
|
20
|
+
CreateIterationRequest,
|
21
|
+
)
|
22
|
+
from ..types.jobs.base import InferenceSettings
|
23
|
+
|
24
|
+
from ..types.mime import MIMEData
|
25
|
+
from .._utils.mime import prepare_mime_document
|
26
|
+
from ..types.modalities import Modality
|
27
|
+
from openai.types.chat.chat_completion_reasoning_effort import ChatCompletionReasoningEffort
|
28
|
+
|
29
|
+
from tqdm import tqdm
|
30
|
+
|
31
|
+
|
32
|
+
class DeleteResponse(TypedDict):
|
33
|
+
"""Response from a delete operation"""
|
34
|
+
|
35
|
+
success: bool
|
36
|
+
id: str
|
37
|
+
|
38
|
+
|
39
|
+
class ExportResponse(TypedDict):
|
40
|
+
"""Response from an export operation"""
|
41
|
+
|
42
|
+
success: bool
|
43
|
+
path: str
|
44
|
+
|
45
|
+
|
46
|
+
class EvalsMixin:
|
47
|
+
def prepare_create(
|
48
|
+
self,
|
49
|
+
name: str,
|
50
|
+
json_schema: Dict[str, Any],
|
51
|
+
project_id: str | None = None,
|
52
|
+
documents: List[EvaluationDocument] = [],
|
53
|
+
iterations: List[Iteration] = [],
|
54
|
+
default_inference_settings: Optional[InferenceSettings] = None,
|
55
|
+
) -> PreparedRequest:
|
56
|
+
eval_data = Evaluation(
|
57
|
+
name=name,
|
58
|
+
json_schema=json_schema,
|
59
|
+
project_id=project_id if project_id else "default_spreadsheets",
|
60
|
+
documents=documents,
|
61
|
+
iterations=iterations,
|
62
|
+
default_inference_settings=default_inference_settings,
|
63
|
+
)
|
64
|
+
return PreparedRequest(method="POST", url="/v1/evals", data=eval_data.model_dump(exclude_none=True, mode="json"))
|
65
|
+
|
66
|
+
def prepare_get(self, evaluation_id: str) -> PreparedRequest:
|
67
|
+
return PreparedRequest(method="GET", url=f"/v1/evals/{evaluation_id}")
|
68
|
+
|
69
|
+
def prepare_update(
|
70
|
+
self,
|
71
|
+
evaluation_id: str,
|
72
|
+
name: Optional[str] = None,
|
73
|
+
project_id: Optional[str] = None,
|
74
|
+
json_schema: Optional[Dict[str, Any]] = None,
|
75
|
+
documents: Optional[List[EvaluationDocument]] = None,
|
76
|
+
iterations: Optional[List[Iteration]] = None,
|
77
|
+
default_inference_settings: Optional[InferenceSettings] = None,
|
78
|
+
) -> PreparedRequest:
|
79
|
+
"""
|
80
|
+
Prepare a request to update an evaluation with partial updates.
|
81
|
+
|
82
|
+
Only the provided fields will be updated. Fields set to None will be excluded from the update.
|
83
|
+
"""
|
84
|
+
# Build a dictionary with only the provided fields
|
85
|
+
update_data = {}
|
86
|
+
if name is not None:
|
87
|
+
update_data["name"] = name
|
88
|
+
if project_id is not None:
|
89
|
+
update_data["project_id"] = project_id
|
90
|
+
if json_schema is not None:
|
91
|
+
update_data["json_schema"] = json_schema
|
92
|
+
if documents is not None:
|
93
|
+
update_data["documents"] = [doc.model_dump(exclude_none=True, mode="json") for doc in documents]
|
94
|
+
if iterations is not None:
|
95
|
+
update_data["iterations"] = [iter.model_dump(exclude_none=True, mode="json") for iter in iterations]
|
96
|
+
if default_inference_settings is not None:
|
97
|
+
update_data["default_inference_settings"] = default_inference_settings.model_dump(exclude_none=True, mode="json")
|
98
|
+
|
99
|
+
return PreparedRequest(method="PATCH", url=f"/v1/evals/{evaluation_id}", data=update_data)
|
100
|
+
|
101
|
+
def prepare_list(self, project_id: Optional[str] = None) -> PreparedRequest:
|
102
|
+
params = {}
|
103
|
+
if project_id:
|
104
|
+
params["project_id"] = project_id
|
105
|
+
return PreparedRequest(method="GET", url="/v1/evals", params=params)
|
106
|
+
|
107
|
+
def prepare_delete(self, id: str) -> PreparedRequest:
|
108
|
+
return PreparedRequest(method="DELETE", url=f"/v1/evals/{id}")
|
109
|
+
|
110
|
+
|
111
|
+
class DocumentsMixin:
|
112
|
+
def prepare_get(self, evaluation_id: str, document_id: str) -> PreparedRequest:
|
113
|
+
return PreparedRequest(method="GET", url=f"/v1/evals/{evaluation_id}/documents/{document_id}")
|
114
|
+
|
115
|
+
def prepare_create(self, evaluation_id: str, document: MIMEData, annotation: Dict[str, Any]) -> PreparedRequest:
|
116
|
+
# Serialize the MIMEData
|
117
|
+
|
118
|
+
document_item = DocumentItem(mime_data=document, annotation=annotation, annotation_metadata=None)
|
119
|
+
|
120
|
+
return PreparedRequest(method="POST", url=f"/v1/evals/{evaluation_id}/documents", data=document_item.model_dump(mode="json"))
|
121
|
+
|
122
|
+
def prepare_list(self, evaluation_id: str, filename: Optional[str] = None) -> PreparedRequest:
|
123
|
+
params = {}
|
124
|
+
if filename:
|
125
|
+
params["filename"] = filename
|
126
|
+
return PreparedRequest(method="GET", url=f"/v1/evals/{evaluation_id}/documents", params=params)
|
127
|
+
|
128
|
+
def prepare_update(self, evaluation_id: str, document_id: str, annotation: Dict[str, Any]) -> PreparedRequest:
|
129
|
+
update_request = UpdateEvaluationDocumentRequest(annotation=annotation, annotation_metadata=None)
|
130
|
+
|
131
|
+
return PreparedRequest(method="PUT", url=f"/v1/evals/{evaluation_id}/documents/{document_id}", data=update_request.model_dump(mode="json", exclude_none=True))
|
132
|
+
|
133
|
+
def prepare_delete(self, evaluation_id: str, document_id: str) -> PreparedRequest:
|
134
|
+
return PreparedRequest(method="DELETE", url=f"/v1/evals/{evaluation_id}/documents/{document_id}")
|
135
|
+
|
136
|
+
|
137
|
+
class IterationsMixin:
|
138
|
+
def prepare_get(self, iteration_id: str) -> PreparedRequest:
|
139
|
+
return PreparedRequest(method="GET", url=f"/v1/evals/iterations/{iteration_id}")
|
140
|
+
|
141
|
+
def prepare_list(self, evaluation_id: str, model: Optional[str] = None) -> PreparedRequest:
|
142
|
+
params = {}
|
143
|
+
if model:
|
144
|
+
params["model"] = model
|
145
|
+
return PreparedRequest(method="GET", url=f"/v1/evals/{evaluation_id}/iterations", params=params)
|
146
|
+
|
147
|
+
def prepare_create(
|
148
|
+
self,
|
149
|
+
evaluation_id: str,
|
150
|
+
model: str,
|
151
|
+
json_schema: Optional[Dict[str, Any]] = None,
|
152
|
+
temperature: float = 0.0,
|
153
|
+
modality: Modality = "native",
|
154
|
+
reasoning_effort: ChatCompletionReasoningEffort = "medium",
|
155
|
+
image_resolution_dpi: int = 96,
|
156
|
+
browser_canvas: Literal['A3', 'A4', 'A5'] = 'A4',
|
157
|
+
n_consensus: int = 1,
|
158
|
+
) -> PreparedRequest:
|
159
|
+
props = InferenceSettings(
|
160
|
+
model=model,
|
161
|
+
temperature=temperature,
|
162
|
+
modality=modality,
|
163
|
+
reasoning_effort=reasoning_effort,
|
164
|
+
image_resolution_dpi=image_resolution_dpi,
|
165
|
+
browser_canvas=browser_canvas,
|
166
|
+
n_consensus=n_consensus,
|
167
|
+
)
|
168
|
+
|
169
|
+
perform_iteration_request = CreateIterationRequest(inference_settings=props, json_schema=json_schema)
|
170
|
+
|
171
|
+
return PreparedRequest(method="POST", url=f"/v1/evals/{evaluation_id}/iterations/create", data=perform_iteration_request.model_dump(exclude_none=True, mode="json"))
|
172
|
+
|
173
|
+
def prepare_update(
|
174
|
+
self, iteration_id: str, json_schema: Dict[str, Any], model: str, temperature: float = 0.0, image_resolution_dpi: int = 96, browser_canvas: Literal['A3', 'A4', 'A5'] = 'A4'
|
175
|
+
) -> PreparedRequest:
|
176
|
+
inference_settings = InferenceSettings(
|
177
|
+
model=model,
|
178
|
+
temperature=temperature,
|
179
|
+
image_resolution_dpi=image_resolution_dpi,
|
180
|
+
browser_canvas=browser_canvas,
|
181
|
+
)
|
182
|
+
|
183
|
+
iteration_data = Iteration(id=iteration_id, json_schema=json_schema, inference_settings=inference_settings, predictions=[])
|
184
|
+
|
185
|
+
return PreparedRequest(method="PUT", url=f"/v1/evals/iterations/{iteration_id}", data=iteration_data.model_dump(exclude_none=True, mode="json"))
|
186
|
+
|
187
|
+
def prepare_delete(self, iteration_id: str) -> PreparedRequest:
|
188
|
+
return PreparedRequest(method="DELETE", url=f"/v1/evals/iterations/{iteration_id}")
|
189
|
+
|
190
|
+
def prepare_compute_distances(self, iteration_id: str, document_id: str) -> PreparedRequest:
|
191
|
+
return PreparedRequest(method="GET", url=f"/v1/evals/iterations/{iteration_id}/compute_distances/{document_id}")
|
192
|
+
|
193
|
+
|
194
|
+
class Evals(SyncAPIResource, EvalsMixin):
|
195
|
+
"""Evals API wrapper"""
|
196
|
+
|
197
|
+
def __init__(self, *args, **kwargs):
|
198
|
+
super().__init__(*args, **kwargs)
|
199
|
+
self.documents = Documents(self._client)
|
200
|
+
self.iterations = Iterations(self._client)
|
201
|
+
|
202
|
+
def create(self, name: str, json_schema: Dict[str, Any], project_id: str | None = None) -> Evaluation:
|
203
|
+
"""
|
204
|
+
Create a new evaluation.
|
205
|
+
|
206
|
+
Args:
|
207
|
+
name: The name of the evaluation
|
208
|
+
json_schema: The JSON schema for the evaluation
|
209
|
+
project_id: The project ID to associate with the evaluation
|
210
|
+
|
211
|
+
Returns:
|
212
|
+
Evaluation: The created evaluation
|
213
|
+
Raises:
|
214
|
+
HTTPException if the request fails
|
215
|
+
"""
|
216
|
+
request = self.prepare_create(name, json_schema, project_id)
|
217
|
+
response = self._client._prepared_request(request)
|
218
|
+
return Evaluation(**response)
|
219
|
+
|
220
|
+
def get(self, evaluation_id: str) -> Evaluation:
|
221
|
+
"""
|
222
|
+
Get an evaluation by ID.
|
223
|
+
|
224
|
+
Args:
|
225
|
+
evaluation_id: The ID of the evaluation to retrieve
|
226
|
+
|
227
|
+
Returns:
|
228
|
+
Evaluation: The evaluation
|
229
|
+
Raises:
|
230
|
+
HTTPException if the request fails
|
231
|
+
"""
|
232
|
+
request = self.prepare_get(evaluation_id)
|
233
|
+
response = self._client._prepared_request(request)
|
234
|
+
return Evaluation(**response)
|
235
|
+
|
236
|
+
def update(
|
237
|
+
self,
|
238
|
+
evaluation_id: str,
|
239
|
+
name: Optional[str] = None,
|
240
|
+
project_id: Optional[str] = None,
|
241
|
+
json_schema: Optional[Dict[str, Any]] = None,
|
242
|
+
documents: Optional[List[EvaluationDocument]] = None,
|
243
|
+
iterations: Optional[List[Iteration]] = None,
|
244
|
+
default_inference_settings: Optional[InferenceSettings] = None,
|
245
|
+
) -> Evaluation:
|
246
|
+
"""
|
247
|
+
Update an evaluation with partial updates.
|
248
|
+
|
249
|
+
Args:
|
250
|
+
evaluation_id: The ID of the evaluation to update
|
251
|
+
name: Optional new name for the evaluation
|
252
|
+
project_id: Optional new project ID
|
253
|
+
json_schema: Optional new JSON schema
|
254
|
+
documents: Optional list of documents to update
|
255
|
+
iterations: Optional list of iterations to update
|
256
|
+
default_inference_settings: Optional annotation properties
|
257
|
+
|
258
|
+
Returns:
|
259
|
+
Evaluation: The updated evaluation
|
260
|
+
Raises:
|
261
|
+
HTTPException if the request fails
|
262
|
+
"""
|
263
|
+
request = self.prepare_update(
|
264
|
+
evaluation_id=evaluation_id, name=name, project_id=project_id, json_schema=json_schema, documents=documents, iterations=iterations, default_inference_settings=default_inference_settings
|
265
|
+
)
|
266
|
+
response = self._client._prepared_request(request)
|
267
|
+
return Evaluation(**response)
|
268
|
+
|
269
|
+
def list(self, project_id: str) -> List[Evaluation]:
|
270
|
+
"""
|
271
|
+
List evaluations for a project.
|
272
|
+
|
273
|
+
Args:
|
274
|
+
project_id: The project ID to list evaluations for
|
275
|
+
|
276
|
+
Returns:
|
277
|
+
List[Evaluation]: List of evaluations
|
278
|
+
Raises:
|
279
|
+
HTTPException if the request fails
|
280
|
+
"""
|
281
|
+
request = self.prepare_list(project_id)
|
282
|
+
response = self._client._prepared_request(request)
|
283
|
+
return [Evaluation(**item) for item in response.get("data", [])]
|
284
|
+
|
285
|
+
def delete(self, evaluation_id: str) -> DeleteResponse:
|
286
|
+
"""
|
287
|
+
Delete an evaluation.
|
288
|
+
|
289
|
+
Args:
|
290
|
+
evaluation_id: The ID of the evaluation to delete
|
291
|
+
|
292
|
+
Returns:
|
293
|
+
DeleteResponse: The response containing success status and ID
|
294
|
+
Raises:
|
295
|
+
HTTPException if the request fails
|
296
|
+
"""
|
297
|
+
request = self.prepare_delete(evaluation_id)
|
298
|
+
return self._client._prepared_request(request)
|
299
|
+
|
300
|
+
|
301
|
+
class Documents(SyncAPIResource, DocumentsMixin):
|
302
|
+
"""Documents API wrapper for evaluations"""
|
303
|
+
|
304
|
+
def create(self, evaluation_id: str, document: Union[Path, str, IOBase, MIMEData, PIL.Image.Image, HttpUrl], annotation: Dict[str, Any]) -> EvaluationDocument:
|
305
|
+
"""
|
306
|
+
Create a document for an evaluation.
|
307
|
+
|
308
|
+
Args:
|
309
|
+
evaluation_id: The ID of the evaluation
|
310
|
+
document: The document to process. Can be:
|
311
|
+
- A file path (Path or str)
|
312
|
+
- A file-like object (IOBase)
|
313
|
+
- A MIMEData object
|
314
|
+
- A PIL Image object
|
315
|
+
- A URL (HttpUrl)
|
316
|
+
annotation: The ground truth for the document
|
317
|
+
|
318
|
+
Returns:
|
319
|
+
EvaluationDocument: The created document
|
320
|
+
Raises:
|
321
|
+
HTTPException if the request fails
|
322
|
+
"""
|
323
|
+
# Convert document to MIME data format
|
324
|
+
mime_document: MIMEData = prepare_mime_document(document)
|
325
|
+
|
326
|
+
# Let prepare_create handle the serialization
|
327
|
+
request = self.prepare_create(evaluation_id, mime_document, annotation)
|
328
|
+
response = self._client._prepared_request(request)
|
329
|
+
return EvaluationDocument(**response)
|
330
|
+
|
331
|
+
def list(self, evaluation_id: str, filename: Optional[str] = None) -> List[EvaluationDocument]:
|
332
|
+
"""
|
333
|
+
List documents for an evaluation.
|
334
|
+
|
335
|
+
Args:
|
336
|
+
evaluation_id: The ID of the evaluation
|
337
|
+
filename: Optional filename to filter by
|
338
|
+
|
339
|
+
Returns:
|
340
|
+
List[EvaluationDocument]: List of documents
|
341
|
+
Raises:
|
342
|
+
HTTPException if the request fails
|
343
|
+
"""
|
344
|
+
request = self.prepare_list(evaluation_id, filename)
|
345
|
+
response = self._client._prepared_request(request)
|
346
|
+
return [EvaluationDocument(**item) for item in response.get("data", [])]
|
347
|
+
|
348
|
+
def get(self, evaluation_id: str, document_id: str) -> EvaluationDocument:
|
349
|
+
"""
|
350
|
+
Get a document by ID.
|
351
|
+
|
352
|
+
Args:
|
353
|
+
evaluation_id: The ID of the evaluation
|
354
|
+
document_id: The ID of the document
|
355
|
+
|
356
|
+
Returns:
|
357
|
+
EvaluationDocument: The document
|
358
|
+
Raises:
|
359
|
+
HTTPException if the request fails
|
360
|
+
"""
|
361
|
+
request = self.prepare_get(evaluation_id, document_id)
|
362
|
+
response = self._client._prepared_request(request)
|
363
|
+
return EvaluationDocument(**response)
|
364
|
+
|
365
|
+
def update(self, evaluation_id: str, document_id: str, annotation: Dict[str, Any]) -> EvaluationDocument:
|
366
|
+
"""
|
367
|
+
Update a document.
|
368
|
+
|
369
|
+
Args:
|
370
|
+
evaluation_id: The ID of the evaluation
|
371
|
+
document_id: The ID of the document
|
372
|
+
annotation: The ground truth for the document
|
373
|
+
|
374
|
+
Returns:
|
375
|
+
EvaluationDocument: The updated document
|
376
|
+
Raises:
|
377
|
+
HTTPException if the request fails
|
378
|
+
"""
|
379
|
+
request = self.prepare_update(evaluation_id, document_id, annotation)
|
380
|
+
response = self._client._prepared_request(request)
|
381
|
+
return EvaluationDocument(**response)
|
382
|
+
|
383
|
+
def delete(self, evaluation_id: str, document_id: str) -> DeleteResponse:
|
384
|
+
"""
|
385
|
+
Delete a document.
|
386
|
+
|
387
|
+
Args:
|
388
|
+
evaluation_id: The ID of the evaluation
|
389
|
+
document_id: The ID of the document
|
390
|
+
|
391
|
+
Returns:
|
392
|
+
DeleteResponse: The response containing success status and ID
|
393
|
+
Raises:
|
394
|
+
HTTPException if the request fails
|
395
|
+
"""
|
396
|
+
request = self.prepare_delete(evaluation_id, document_id)
|
397
|
+
return self._client._prepared_request(request)
|
398
|
+
|
399
|
+
|
400
|
+
class Iterations(SyncAPIResource, IterationsMixin):
|
401
|
+
"""Iterations API wrapper for evaluations"""
|
402
|
+
|
403
|
+
def __init__(self, *args, **kwargs):
|
404
|
+
super().__init__(*args, **kwargs)
|
405
|
+
|
406
|
+
def list(self, evaluation_id: str, model: Optional[str] = None) -> List[Iteration]:
|
407
|
+
"""
|
408
|
+
List iterations for an evaluation.
|
409
|
+
|
410
|
+
Args:
|
411
|
+
evaluation_id: The ID of the evaluation
|
412
|
+
model: Optional model to filter by
|
413
|
+
|
414
|
+
Returns:
|
415
|
+
List[Iteration]: List of iterations
|
416
|
+
Raises:
|
417
|
+
HTTPException if the request fails
|
418
|
+
"""
|
419
|
+
request = self.prepare_list(evaluation_id, model)
|
420
|
+
response = self._client._prepared_request(request)
|
421
|
+
return [Iteration(**item) for item in response.get("data", [])]
|
422
|
+
|
423
|
+
def create(
|
424
|
+
self,
|
425
|
+
evaluation_id: str,
|
426
|
+
model: str,
|
427
|
+
temperature: float = 0.0,
|
428
|
+
modality: Modality = "native",
|
429
|
+
json_schema: Optional[Dict[str, Any]] = None,
|
430
|
+
reasoning_effort: ChatCompletionReasoningEffort = "medium",
|
431
|
+
image_resolution_dpi: int = 96,
|
432
|
+
browser_canvas: Literal['A3', 'A4', 'A5'] = 'A4',
|
433
|
+
n_consensus: int = 1,
|
434
|
+
) -> Iteration:
|
435
|
+
"""
|
436
|
+
Create a new iteration for an evaluation.
|
437
|
+
|
438
|
+
Args:
|
439
|
+
evaluation_id: The ID of the evaluation
|
440
|
+
json_schema: The JSON schema for the iteration (if not set, we use the one of the eval)
|
441
|
+
model: The model to use for the iteration
|
442
|
+
temperature: The temperature to use for the model
|
443
|
+
modality: The modality to use (text, image, etc.)
|
444
|
+
reasoning_effort: The reasoning effort setting for the model (auto, low, medium, high)
|
445
|
+
image_resolution_dpi: The DPI of the image. Defaults to 96.
|
446
|
+
browser_canvas: The canvas size of the browser. Must be one of:
|
447
|
+
- "A3" (11.7in x 16.54in)
|
448
|
+
- "A4" (8.27in x 11.7in)
|
449
|
+
- "A5" (5.83in x 8.27in)
|
450
|
+
Defaults to "A4".
|
451
|
+
n_consensus: Number of consensus iterations to perform
|
452
|
+
|
453
|
+
Returns:
|
454
|
+
Iteration: The created iteration
|
455
|
+
Raises:
|
456
|
+
HTTPException if the request fails
|
457
|
+
"""
|
458
|
+
request = self.prepare_create(
|
459
|
+
evaluation_id=evaluation_id,
|
460
|
+
json_schema=json_schema,
|
461
|
+
model=model,
|
462
|
+
temperature=temperature,
|
463
|
+
modality=modality,
|
464
|
+
reasoning_effort=reasoning_effort,
|
465
|
+
image_resolution_dpi=image_resolution_dpi,
|
466
|
+
browser_canvas=browser_canvas,
|
467
|
+
n_consensus=n_consensus,
|
468
|
+
)
|
469
|
+
response = self._client._prepared_request(request)
|
470
|
+
return Iteration(**response)
|
471
|
+
|
472
|
+
def delete(self, iteration_id: str) -> DeleteResponse:
|
473
|
+
"""
|
474
|
+
Delete an iteration.
|
475
|
+
|
476
|
+
Args:
|
477
|
+
iteration_id: The ID of the iteration
|
478
|
+
|
479
|
+
Returns:
|
480
|
+
DeleteResponse: The response containing success status and ID
|
481
|
+
Raises:
|
482
|
+
HTTPException if the request fails
|
483
|
+
"""
|
484
|
+
request = self.prepare_delete(iteration_id)
|
485
|
+
return self._client._prepared_request(request)
|
486
|
+
|
487
|
+
def compute_distances(self, iteration_id: str, document_id: str) -> DistancesResult:
|
488
|
+
"""
|
489
|
+
Get distances for a document in an iteration.
|
490
|
+
|
491
|
+
Args:
|
492
|
+
iteration_id: The ID of the iteration
|
493
|
+
document_id: The ID of the document
|
494
|
+
|
495
|
+
Returns:
|
496
|
+
DistancesResult: The distances
|
497
|
+
Raises:
|
498
|
+
HTTPException if the request fails
|
499
|
+
"""
|
500
|
+
request = self.prepare_compute_distances(iteration_id, document_id)
|
501
|
+
response = self._client._prepared_request(request)
|
502
|
+
return DistancesResult(**response)
|
503
|
+
|
504
|
+
|
505
|
+
class AsyncEvals(AsyncAPIResource, EvalsMixin):
|
506
|
+
"""Async Evals API wrapper"""
|
507
|
+
|
508
|
+
def __init__(self, *args, **kwargs):
|
509
|
+
super().__init__(*args, **kwargs)
|
510
|
+
self.documents = AsyncDocuments(self._client)
|
511
|
+
self.iterations = AsyncIterations(self._client)
|
512
|
+
|
513
|
+
async def create(self, name: str, json_schema: Dict[str, Any], project_id: str | None = None) -> Evaluation:
|
514
|
+
"""
|
515
|
+
Create a new evaluation.
|
516
|
+
|
517
|
+
Args:
|
518
|
+
name: The name of the evaluation
|
519
|
+
json_schema: The JSON schema for the evaluation
|
520
|
+
project_id: The project ID to associate with the evaluation
|
521
|
+
|
522
|
+
Returns:
|
523
|
+
Evaluation: The created evaluation
|
524
|
+
Raises:
|
525
|
+
HTTPException if the request fails
|
526
|
+
"""
|
527
|
+
request = self.prepare_create(name, json_schema, project_id)
|
528
|
+
response = await self._client._prepared_request(request)
|
529
|
+
return Evaluation(**response)
|
530
|
+
|
531
|
+
async def get(self, evaluation_id: str) -> Evaluation:
|
532
|
+
"""
|
533
|
+
Get an evaluation by ID.
|
534
|
+
|
535
|
+
Args:
|
536
|
+
evaluation_id: The ID of the evaluation to retrieve
|
537
|
+
|
538
|
+
Returns:
|
539
|
+
Evaluation: The evaluation
|
540
|
+
Raises:
|
541
|
+
HTTPException if the request fails
|
542
|
+
"""
|
543
|
+
request = self.prepare_get(evaluation_id)
|
544
|
+
response = await self._client._prepared_request(request)
|
545
|
+
return Evaluation(**response)
|
546
|
+
|
547
|
+
async def update(
|
548
|
+
self,
|
549
|
+
evaluation_id: str,
|
550
|
+
name: Optional[str] = None,
|
551
|
+
project_id: Optional[str] = None,
|
552
|
+
json_schema: Optional[Dict[str, Any]] = None,
|
553
|
+
documents: Optional[List[EvaluationDocument]] = None,
|
554
|
+
iterations: Optional[List[Iteration]] = None,
|
555
|
+
default_inference_settings: Optional[InferenceSettings] = None,
|
556
|
+
) -> Evaluation:
|
557
|
+
"""
|
558
|
+
Update an evaluation with partial updates.
|
559
|
+
|
560
|
+
Args:
|
561
|
+
id: The ID of the evaluation to update
|
562
|
+
name: Optional new name for the evaluation
|
563
|
+
project_id: Optional new project ID
|
564
|
+
json_schema: Optional new JSON schema
|
565
|
+
documents: Optional list of documents to update
|
566
|
+
iterations: Optional list of iterations to update
|
567
|
+
default_inference_settings: Optional annotation properties
|
568
|
+
|
569
|
+
Returns:
|
570
|
+
Evaluation: The updated evaluation
|
571
|
+
Raises:
|
572
|
+
HTTPException if the request fails
|
573
|
+
"""
|
574
|
+
request = self.prepare_update(
|
575
|
+
evaluation_id=evaluation_id, name=name, project_id=project_id, json_schema=json_schema, documents=documents, iterations=iterations, default_inference_settings=default_inference_settings
|
576
|
+
)
|
577
|
+
response = await self._client._prepared_request(request)
|
578
|
+
return Evaluation(**response)
|
579
|
+
|
580
|
+
async def list(self, project_id: Optional[str] = None) -> List[Evaluation]:
|
581
|
+
"""
|
582
|
+
List evaluations for a project.
|
583
|
+
|
584
|
+
Args:
|
585
|
+
project_id: The project ID to list evaluations for
|
586
|
+
|
587
|
+
Returns:
|
588
|
+
List[Evaluation]: List of evaluations
|
589
|
+
Raises:
|
590
|
+
HTTPException if the request fails
|
591
|
+
"""
|
592
|
+
request = self.prepare_list(project_id)
|
593
|
+
response = await self._client._prepared_request(request)
|
594
|
+
return [Evaluation(**item) for item in response.get("data", [])]
|
595
|
+
|
596
|
+
async def delete(self, evaluation_id: str) -> DeleteResponse:
|
597
|
+
"""
|
598
|
+
Delete an evaluation.
|
599
|
+
|
600
|
+
Args:
|
601
|
+
evaluation_id: The ID of the evaluation to delete
|
602
|
+
|
603
|
+
Returns:
|
604
|
+
DeleteResponse: The response containing success status and ID
|
605
|
+
Raises:
|
606
|
+
HTTPException if the request fails
|
607
|
+
"""
|
608
|
+
request = self.prepare_delete(evaluation_id)
|
609
|
+
return await self._client._prepared_request(request)
|
610
|
+
|
611
|
+
|
612
|
+
class AsyncDocuments(AsyncAPIResource, DocumentsMixin):
|
613
|
+
"""Async Documents API wrapper for evaluations"""
|
614
|
+
|
615
|
+
async def create(self, evaluation_id: str, document: Union[Path, str, IOBase, MIMEData, PIL.Image.Image, HttpUrl], annotation: Dict[str, Any]) -> EvaluationDocument:
|
616
|
+
"""
|
617
|
+
Create a document for an evaluation.
|
618
|
+
|
619
|
+
Args:
|
620
|
+
evaluation_id: The ID of the evaluation
|
621
|
+
document: The document to process. Can be:
|
622
|
+
- A file path (Path or str)
|
623
|
+
- A file-like object (IOBase)
|
624
|
+
- A MIMEData object
|
625
|
+
- A PIL Image object
|
626
|
+
- A URL (HttpUrl)
|
627
|
+
annotation: The ground truth for the document
|
628
|
+
|
629
|
+
Returns:
|
630
|
+
EvaluationDocument: The created document
|
631
|
+
Raises:
|
632
|
+
HTTPException if the request fails
|
633
|
+
"""
|
634
|
+
# Convert document to MIME data format
|
635
|
+
mime_document: MIMEData = prepare_mime_document(document)
|
636
|
+
|
637
|
+
# Let prepare_create handle the serialization
|
638
|
+
request = self.prepare_create(evaluation_id, mime_document, annotation)
|
639
|
+
response = await self._client._prepared_request(request)
|
640
|
+
return EvaluationDocument(**response)
|
641
|
+
|
642
|
+
async def list(self, evaluation_id: str, filename: Optional[str] = None) -> List[EvaluationDocument]:
|
643
|
+
"""
|
644
|
+
List documents for an evaluation.
|
645
|
+
|
646
|
+
Args:
|
647
|
+
evaluation_id: The ID of the evaluation
|
648
|
+
filename: Optional filename to filter by
|
649
|
+
|
650
|
+
Returns:
|
651
|
+
List[EvaluationDocument]: List of documents
|
652
|
+
Raises:
|
653
|
+
HTTPException if the request fails
|
654
|
+
"""
|
655
|
+
request = self.prepare_list(evaluation_id, filename)
|
656
|
+
response = await self._client._prepared_request(request)
|
657
|
+
return [EvaluationDocument(**item) for item in response.get("data", [])]
|
658
|
+
|
659
|
+
async def update(self, evaluation_id: str, document_id: str, annotation: Dict[str, Any]) -> EvaluationDocument:
|
660
|
+
"""
|
661
|
+
Update a document.
|
662
|
+
|
663
|
+
Args:
|
664
|
+
evaluation_id: The ID of the evaluation
|
665
|
+
document_id: The ID of the document
|
666
|
+
annotation: The ground truth for the document
|
667
|
+
|
668
|
+
Returns:
|
669
|
+
EvaluationDocument: The updated document
|
670
|
+
Raises:
|
671
|
+
HTTPException if the request fails
|
672
|
+
"""
|
673
|
+
request = self.prepare_update(evaluation_id, document_id, annotation)
|
674
|
+
response = await self._client._prepared_request(request)
|
675
|
+
return EvaluationDocument(**response)
|
676
|
+
|
677
|
+
async def delete(self, evaluation_id: str, document_id: str) -> DeleteResponse:
|
678
|
+
"""
|
679
|
+
Delete a document.
|
680
|
+
|
681
|
+
Args:
|
682
|
+
evaluation_id: The ID of the evaluation
|
683
|
+
document_id: The ID of the document
|
684
|
+
|
685
|
+
Returns:
|
686
|
+
DeleteResponse: The response containing success status and ID
|
687
|
+
Raises:
|
688
|
+
HTTPException if the request fails
|
689
|
+
"""
|
690
|
+
request = self.prepare_delete(evaluation_id, document_id)
|
691
|
+
return await self._client._prepared_request(request)
|
692
|
+
|
693
|
+
|
694
|
+
class AsyncIterations(AsyncAPIResource, IterationsMixin):
|
695
|
+
"""Async Iterations API wrapper for evaluations"""
|
696
|
+
|
697
|
+
def __init__(self, *args, **kwargs):
|
698
|
+
super().__init__(*args, **kwargs)
|
699
|
+
|
700
|
+
async def get(self, iteration_id: str) -> Iteration:
|
701
|
+
"""
|
702
|
+
Get an iteration by ID.
|
703
|
+
|
704
|
+
Args:
|
705
|
+
iteration_id: The ID of the iteration
|
706
|
+
|
707
|
+
Returns:
|
708
|
+
Iteration: The iteration
|
709
|
+
Raises:
|
710
|
+
HTTPException if the request fails
|
711
|
+
"""
|
712
|
+
request = self.prepare_get(iteration_id)
|
713
|
+
response = await self._client._prepared_request(request)
|
714
|
+
return Iteration(**response)
|
715
|
+
|
716
|
+
async def list(self, evaluation_id: str, model: Optional[str] = None) -> List[Iteration]:
|
717
|
+
"""
|
718
|
+
List iterations for an evaluation.
|
719
|
+
|
720
|
+
Args:
|
721
|
+
evaluation_id: The ID of the evaluation
|
722
|
+
model: Optional model to filter by
|
723
|
+
|
724
|
+
Returns:
|
725
|
+
List[Iteration]: List of iterations
|
726
|
+
Raises:
|
727
|
+
HTTPException if the request fails
|
728
|
+
"""
|
729
|
+
request = self.prepare_list(evaluation_id, model)
|
730
|
+
response = await self._client._prepared_request(request)
|
731
|
+
return [Iteration(**item) for item in response.get("data", [])]
|
732
|
+
|
733
|
+
async def create(
|
734
|
+
self,
|
735
|
+
evaluation_id: str,
|
736
|
+
model: str,
|
737
|
+
temperature: float = 0.0,
|
738
|
+
modality: Modality = "native",
|
739
|
+
json_schema: Optional[Dict[str, Any]] = None,
|
740
|
+
reasoning_effort: ChatCompletionReasoningEffort = "medium",
|
741
|
+
image_resolution_dpi: int = 96,
|
742
|
+
browser_canvas: Literal['A3', 'A4', 'A5'] = 'A4',
|
743
|
+
n_consensus: int = 1,
|
744
|
+
) -> Iteration:
|
745
|
+
"""
|
746
|
+
Create a new iteration for an evaluation.
|
747
|
+
|
748
|
+
Args:
|
749
|
+
evaluation_id: The ID of the evaluation
|
750
|
+
json_schema: The JSON schema for the iteration
|
751
|
+
model: The model to use for the iteration
|
752
|
+
temperature: The temperature to use for the model
|
753
|
+
modality: The modality to use (text, image, etc.)
|
754
|
+
reasoning_effort: The reasoning effort setting for the model (auto, low, medium, high)
|
755
|
+
image_resolution_dpi: The DPI of the image. Defaults to 96.
|
756
|
+
browser_canvas: The canvas size of the browser. Must be one of:
|
757
|
+
- "A3" (11.7in x 16.54in)
|
758
|
+
- "A4" (8.27in x 11.7in)
|
759
|
+
- "A5" (5.83in x 8.27in)
|
760
|
+
Defaults to "A4".
|
761
|
+
n_consensus: Number of consensus iterations to perform
|
762
|
+
|
763
|
+
Returns:
|
764
|
+
Iteration: The created iteration
|
765
|
+
Raises:
|
766
|
+
HTTPException if the request fails
|
767
|
+
"""
|
768
|
+
request = self.prepare_create(
|
769
|
+
evaluation_id=evaluation_id,
|
770
|
+
json_schema=json_schema,
|
771
|
+
model=model,
|
772
|
+
temperature=temperature,
|
773
|
+
modality=modality,
|
774
|
+
reasoning_effort=reasoning_effort,
|
775
|
+
image_resolution_dpi=image_resolution_dpi,
|
776
|
+
browser_canvas=browser_canvas,
|
777
|
+
n_consensus=n_consensus,
|
778
|
+
)
|
779
|
+
response = await self._client._prepared_request(request)
|
780
|
+
return Iteration(**response)
|
781
|
+
|
782
|
+
async def delete(self, iteration_id: str) -> DeleteResponse:
|
783
|
+
"""
|
784
|
+
Delete an iteration.
|
785
|
+
|
786
|
+
Args:
|
787
|
+
iteration_id: The ID of the iteration
|
788
|
+
|
789
|
+
Returns:
|
790
|
+
DeleteResponse: The response containing success status and ID
|
791
|
+
Raises:
|
792
|
+
HTTPException if the request fails
|
793
|
+
"""
|
794
|
+
request = self.prepare_delete(iteration_id)
|
795
|
+
return await self._client._prepared_request(request)
|
796
|
+
|
797
|
+
async def compute_distances(self, iteration_id: str, document_id: str) -> DistancesResult:
|
798
|
+
"""
|
799
|
+
Get distances for a document in an iteration.
|
800
|
+
|
801
|
+
Args:
|
802
|
+
iteration_id: The ID of the iteration
|
803
|
+
document_id: The ID of the document
|
804
|
+
|
805
|
+
Returns:
|
806
|
+
DistancesResult: The distances
|
807
|
+
Raises:
|
808
|
+
HTTPException if the request fails
|
809
|
+
"""
|
810
|
+
request = self.prepare_compute_distances(iteration_id, document_id)
|
811
|
+
response = await self._client._prepared_request(request)
|
812
|
+
return DistancesResult(**response)
|