retab 0.0.36__py3-none-any.whl → 0.0.37__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (119) hide show
  1. {uiform → retab}/_utils/ai_models.py +2 -2
  2. {uiform → retab}/_utils/benchmarking.py +15 -16
  3. {uiform → retab}/_utils/chat.py +9 -14
  4. {uiform → retab}/_utils/display.py +0 -3
  5. {uiform → retab}/_utils/json_schema.py +9 -14
  6. {uiform → retab}/_utils/mime.py +11 -14
  7. {uiform → retab}/_utils/responses.py +9 -3
  8. {uiform → retab}/_utils/stream_context_managers.py +1 -1
  9. {uiform → retab}/_utils/usage/usage.py +28 -28
  10. {uiform → retab}/client.py +32 -31
  11. {uiform → retab}/resources/consensus/client.py +17 -36
  12. {uiform → retab}/resources/consensus/completions.py +24 -47
  13. {uiform → retab}/resources/consensus/completions_stream.py +26 -38
  14. {uiform → retab}/resources/consensus/responses.py +31 -80
  15. {uiform → retab}/resources/consensus/responses_stream.py +31 -79
  16. {uiform → retab}/resources/documents/client.py +59 -45
  17. {uiform → retab}/resources/documents/extractions.py +181 -90
  18. {uiform → retab}/resources/evals.py +56 -43
  19. retab/resources/evaluations/__init__.py +3 -0
  20. retab/resources/evaluations/client.py +301 -0
  21. retab/resources/evaluations/documents.py +233 -0
  22. retab/resources/evaluations/iterations.py +452 -0
  23. {uiform → retab}/resources/files.py +2 -2
  24. {uiform → retab}/resources/jsonlUtils.py +220 -216
  25. retab/resources/models.py +73 -0
  26. retab/resources/processors/automations/client.py +244 -0
  27. {uiform → retab}/resources/processors/automations/endpoints.py +77 -118
  28. retab/resources/processors/automations/links.py +294 -0
  29. {uiform → retab}/resources/processors/automations/logs.py +30 -19
  30. {uiform → retab}/resources/processors/automations/mailboxes.py +136 -174
  31. retab/resources/processors/automations/outlook.py +337 -0
  32. {uiform → retab}/resources/processors/automations/tests.py +22 -25
  33. {uiform → retab}/resources/processors/client.py +179 -164
  34. {uiform → retab}/resources/schemas.py +78 -66
  35. {uiform → retab}/resources/secrets/external_api_keys.py +1 -5
  36. retab/resources/secrets/webhook.py +64 -0
  37. {uiform → retab}/resources/usage.py +39 -2
  38. {uiform → retab}/types/ai_models.py +13 -13
  39. {uiform → retab}/types/automations/cron.py +19 -12
  40. {uiform → retab}/types/automations/endpoints.py +7 -4
  41. {uiform → retab}/types/automations/links.py +7 -3
  42. {uiform → retab}/types/automations/mailboxes.py +9 -9
  43. {uiform → retab}/types/automations/outlook.py +15 -11
  44. retab/types/browser_canvas.py +3 -0
  45. {uiform → retab}/types/chat.py +2 -2
  46. {uiform → retab}/types/completions.py +9 -12
  47. retab/types/consensus.py +19 -0
  48. {uiform → retab}/types/db/annotations.py +3 -3
  49. {uiform → retab}/types/db/files.py +8 -6
  50. {uiform → retab}/types/documents/create_messages.py +18 -20
  51. {uiform → retab}/types/documents/extractions.py +69 -24
  52. {uiform → retab}/types/evals.py +5 -5
  53. retab/types/evaluations/__init__.py +31 -0
  54. retab/types/evaluations/documents.py +30 -0
  55. retab/types/evaluations/iterations.py +112 -0
  56. retab/types/evaluations/model.py +73 -0
  57. retab/types/events.py +79 -0
  58. {uiform → retab}/types/extractions.py +33 -10
  59. retab/types/inference_settings.py +15 -0
  60. retab/types/jobs/base.py +54 -0
  61. retab/types/jobs/batch_annotation.py +12 -0
  62. {uiform → retab}/types/jobs/evaluation.py +1 -2
  63. {uiform → retab}/types/logs.py +37 -34
  64. retab/types/metrics.py +32 -0
  65. {uiform → retab}/types/mime.py +22 -20
  66. {uiform → retab}/types/modalities.py +10 -10
  67. retab/types/predictions.py +19 -0
  68. {uiform → retab}/types/schemas/enhance.py +4 -2
  69. {uiform → retab}/types/schemas/evaluate.py +7 -4
  70. {uiform → retab}/types/schemas/generate.py +6 -3
  71. {uiform → retab}/types/schemas/layout.py +1 -1
  72. {uiform → retab}/types/schemas/object.py +13 -14
  73. {uiform → retab}/types/schemas/templates.py +1 -3
  74. {uiform → retab}/types/secrets/external_api_keys.py +0 -1
  75. {uiform → retab}/types/standards.py +18 -1
  76. {retab-0.0.36.dist-info → retab-0.0.37.dist-info}/METADATA +7 -6
  77. retab-0.0.37.dist-info/RECORD +107 -0
  78. retab-0.0.37.dist-info/top_level.txt +1 -0
  79. retab-0.0.36.dist-info/RECORD +0 -96
  80. retab-0.0.36.dist-info/top_level.txt +0 -1
  81. uiform/_utils/benchmarking copy.py +0 -588
  82. uiform/resources/models.py +0 -45
  83. uiform/resources/processors/automations/client.py +0 -78
  84. uiform/resources/processors/automations/links.py +0 -356
  85. uiform/resources/processors/automations/outlook.py +0 -444
  86. uiform/resources/secrets/webhook.py +0 -62
  87. uiform/types/consensus.py +0 -10
  88. uiform/types/events.py +0 -76
  89. uiform/types/jobs/base.py +0 -150
  90. uiform/types/jobs/batch_annotation.py +0 -22
  91. {uiform → retab}/__init__.py +0 -0
  92. {uiform → retab}/_resource.py +0 -0
  93. {uiform → retab}/_utils/__init__.py +0 -0
  94. {uiform → retab}/_utils/usage/__init__.py +0 -0
  95. {uiform → retab}/py.typed +0 -0
  96. {uiform → retab}/resources/__init__.py +0 -0
  97. {uiform → retab}/resources/consensus/__init__.py +0 -0
  98. {uiform → retab}/resources/documents/__init__.py +0 -0
  99. {uiform → retab}/resources/finetuning.py +0 -0
  100. {uiform → retab}/resources/openai_example.py +0 -0
  101. {uiform → retab}/resources/processors/__init__.py +0 -0
  102. {uiform → retab}/resources/processors/automations/__init__.py +0 -0
  103. {uiform → retab}/resources/prompt_optimization.py +0 -0
  104. {uiform → retab}/resources/secrets/__init__.py +0 -0
  105. {uiform → retab}/resources/secrets/client.py +0 -0
  106. {uiform → retab}/types/__init__.py +0 -0
  107. {uiform → retab}/types/automations/__init__.py +0 -0
  108. {uiform → retab}/types/automations/webhooks.py +0 -0
  109. {uiform → retab}/types/db/__init__.py +0 -0
  110. {uiform → retab}/types/documents/__init__.py +0 -0
  111. {uiform → retab}/types/documents/correct_orientation.py +0 -0
  112. {uiform → retab}/types/jobs/__init__.py +0 -0
  113. {uiform → retab}/types/jobs/finetune.py +0 -0
  114. {uiform → retab}/types/jobs/prompt_optimization.py +0 -0
  115. {uiform → retab}/types/jobs/webcrawl.py +0 -0
  116. {uiform → retab}/types/pagination.py +0 -0
  117. {uiform → retab}/types/schemas/__init__.py +0 -0
  118. {uiform → retab}/types/secrets/__init__.py +0 -0
  119. {retab-0.0.36.dist-info → retab-0.0.37.dist-info}/WHEEL +0 -0
@@ -0,0 +1,3 @@
1
+ from .client import AsyncEvaluations, Evaluations
2
+
3
+ __all__ = ["Evaluations", "AsyncEvaluations"]
@@ -0,0 +1,301 @@
1
+ from typing import Any, Dict, List
2
+
3
+ from ..._resource import AsyncAPIResource, SyncAPIResource
4
+ from ...types.evaluations import Evaluation, PatchEvaluationRequest, ListEvaluationParams, CreateEvaluation
5
+ from ...types.inference_settings import InferenceSettings
6
+ from ...types.standards import PreparedRequest, DeleteResponse, FieldUnset
7
+ from .documents import Documents, AsyncDocuments
8
+ from .iterations import Iterations, AsyncIterations
9
+
10
+
11
+ class EvaluationsMixin:
12
+ def prepare_create(
13
+ self,
14
+ name: str,
15
+ json_schema: dict[str, Any],
16
+ project_id: str = FieldUnset,
17
+ default_inference_settings: InferenceSettings = FieldUnset,
18
+ ) -> PreparedRequest:
19
+ # Use CreateEvaluation model
20
+ eval_data = CreateEvaluation(
21
+ name=name,
22
+ json_schema=json_schema,
23
+ project_id=project_id,
24
+ default_inference_settings=default_inference_settings,
25
+ )
26
+ return PreparedRequest(method="POST", url="/v1/evaluations", data=eval_data.model_dump(exclude_none=True, mode="json"))
27
+
28
+ def prepare_get(self, evaluation_id: str) -> PreparedRequest:
29
+ return PreparedRequest(method="GET", url=f"/v1/evaluations/{evaluation_id}")
30
+
31
+ def prepare_update(
32
+ self,
33
+ evaluation_id: str,
34
+ name: str = FieldUnset,
35
+ project_id: str = FieldUnset,
36
+ json_schema: dict[str, Any] = FieldUnset,
37
+ default_inference_settings: InferenceSettings = FieldUnset,
38
+ ) -> PreparedRequest:
39
+ """
40
+ Prepare a request to update an evaluation with partial updates.
41
+
42
+ Only the provided fields will be updated. Fields set to None will be excluded from the update.
43
+ """
44
+ # Build a dictionary with only the provided fields
45
+ data = PatchEvaluationRequest(
46
+ name=name,
47
+ project_id=project_id,
48
+ json_schema=json_schema,
49
+ default_inference_settings=default_inference_settings,
50
+ ).model_dump(exclude_unset=True, mode="json")
51
+
52
+ return PreparedRequest(method="PATCH", url=f"/v1/evaluations/{evaluation_id}", data=data)
53
+
54
+ def prepare_list(self, project_id: str = FieldUnset) -> PreparedRequest:
55
+ """
56
+ Prepare a request to list evaluations.
57
+
58
+ Usage:
59
+ >>> client.evals.list(project_id="project_id") # List all evaluations for a project
60
+ >>> client.evals.list() # List all evaluations (no project_id)
61
+
62
+ This does not work:
63
+ >>> client.evals.list(project_id=None)
64
+
65
+ Args:
66
+ project_id: The project ID to list evaluations for
67
+
68
+ Returns:
69
+ PreparedRequest: The prepared request
70
+ """
71
+ params = ListEvaluationParams(project_id=project_id).model_dump(exclude_unset=True, exclude_defaults=True, mode="json")
72
+ return PreparedRequest(method="GET", url="/v1/evaluations", params=params)
73
+
74
+ def prepare_delete(self, id: str) -> PreparedRequest:
75
+ return PreparedRequest(method="DELETE", url=f"/v1/evaluations/{id}")
76
+
77
+
78
+ class Evaluations(SyncAPIResource, EvaluationsMixin):
79
+ """Evaluations API wrapper"""
80
+
81
+ def __init__(self, *args, **kwargs):
82
+ super().__init__(*args, **kwargs)
83
+ self.documents = Documents(self._client)
84
+ self.iterations = Iterations(self._client)
85
+
86
+ def create(
87
+ self,
88
+ name: str,
89
+ json_schema: dict[str, Any],
90
+ project_id: str = FieldUnset,
91
+ default_inference_settings: InferenceSettings = FieldUnset,
92
+ ) -> Evaluation:
93
+ """
94
+ Create a new evaluation.
95
+
96
+ Args:
97
+ name: The name of the evaluation
98
+ json_schema: The JSON schema for the evaluation
99
+ project_id: The project ID to associate with the evaluation
100
+ documents: The documents to associate with the evaluation
101
+ default_inference_settings: The default inference settings to associate with the evaluation
102
+
103
+ Returns:
104
+ Evaluation: The created evaluation
105
+ Raises:
106
+ HTTPException if the request fails
107
+ """
108
+ request = self.prepare_create(name, json_schema, project_id, default_inference_settings=default_inference_settings)
109
+ response = self._client._prepared_request(request)
110
+ return Evaluation(**response)
111
+
112
+ def get(self, evaluation_id: str) -> Evaluation:
113
+ """
114
+ Get an evaluation by ID.
115
+
116
+ Args:
117
+ evaluation_id: The ID of the evaluation to retrieve
118
+
119
+ Returns:
120
+ Evaluation: The evaluation
121
+ Raises:
122
+ HTTPException if the request fails
123
+ """
124
+ request = self.prepare_get(evaluation_id)
125
+ response = self._client._prepared_request(request)
126
+ return Evaluation(**response)
127
+
128
+ def update(
129
+ self,
130
+ evaluation_id: str,
131
+ name: str = FieldUnset,
132
+ project_id: str = FieldUnset,
133
+ json_schema: dict[str, Any] = FieldUnset,
134
+ default_inference_settings: InferenceSettings = FieldUnset,
135
+ ) -> Evaluation:
136
+ """
137
+ Update an evaluation with partial updates.
138
+
139
+ Args:
140
+ evaluation_id: The ID of the evaluation to update
141
+ name: Optional new name for the evaluation
142
+ project_id: Optional new project ID
143
+ json_schema: Optional new JSON schema
144
+ documents: Optional list of documents to update
145
+ iterations: Optional list of iterations to update
146
+ default_inference_settings: Optional annotation properties
147
+
148
+ Returns:
149
+ Evaluation: The updated evaluation
150
+ Raises:
151
+ HTTPException if the request fails
152
+ """
153
+ request = self.prepare_update(
154
+ evaluation_id=evaluation_id,
155
+ name=name,
156
+ project_id=project_id,
157
+ json_schema=json_schema,
158
+ default_inference_settings=default_inference_settings,
159
+ )
160
+ response = self._client._prepared_request(request)
161
+ return Evaluation(**response)
162
+
163
+ def list(self, project_id: str = FieldUnset) -> List[Evaluation]:
164
+ """
165
+ List evaluations for a project.
166
+
167
+ Args:
168
+ project_id: The project ID to list evaluations for
169
+
170
+ Returns:
171
+ List[Evaluation]: List of evaluations
172
+ Raises:
173
+ HTTPException if the request fails
174
+ """
175
+ request = self.prepare_list(project_id)
176
+ response = self._client._prepared_request(request)
177
+ return [Evaluation(**item) for item in response.get("data", [])]
178
+
179
+ def delete(self, evaluation_id: str) -> DeleteResponse:
180
+ """
181
+ Delete an evaluation.
182
+
183
+ Args:
184
+ evaluation_id: The ID of the evaluation to delete
185
+
186
+ Returns:
187
+ DeleteResponse: The response containing success status and ID
188
+ Raises:
189
+ HTTPException if the request fails
190
+ """
191
+ request = self.prepare_delete(evaluation_id)
192
+ return self._client._prepared_request(request)
193
+
194
+
195
+ class AsyncEvaluations(AsyncAPIResource, EvaluationsMixin):
196
+ """Async Evaluations API wrapper"""
197
+
198
+ def __init__(self, *args, **kwargs):
199
+ super().__init__(*args, **kwargs)
200
+ self.documents = AsyncDocuments(self._client)
201
+ self.iterations = AsyncIterations(self._client)
202
+
203
+ async def create(self, name: str, json_schema: Dict[str, Any], project_id: str = FieldUnset) -> Evaluation:
204
+ """
205
+ Create a new evaluation.
206
+
207
+ Args:
208
+ name: The name of the evaluation
209
+ json_schema: The JSON schema for the evaluation
210
+ project_id: The project ID to associate with the evaluation
211
+
212
+ Returns:
213
+ Evaluation: The created evaluation
214
+ Raises:
215
+ HTTPException if the request fails
216
+ """
217
+ request = self.prepare_create(name, json_schema, project_id=project_id)
218
+ response = await self._client._prepared_request(request)
219
+ return Evaluation(**response)
220
+
221
+ async def get(self, evaluation_id: str) -> Evaluation:
222
+ """
223
+ Get an evaluation by ID.
224
+
225
+ Args:
226
+ evaluation_id: The ID of the evaluation to retrieve
227
+
228
+ Returns:
229
+ Evaluation: The evaluation
230
+ Raises:
231
+ HTTPException if the request fails
232
+ """
233
+ request = self.prepare_get(evaluation_id)
234
+ response = await self._client._prepared_request(request)
235
+ return Evaluation(**response)
236
+
237
+ async def update(
238
+ self,
239
+ evaluation_id: str,
240
+ name: str = FieldUnset,
241
+ project_id: str = FieldUnset,
242
+ json_schema: dict[str, Any] = FieldUnset,
243
+ default_inference_settings: InferenceSettings = FieldUnset,
244
+ ) -> Evaluation:
245
+ """
246
+ Update an evaluation with partial updates.
247
+
248
+ Args:
249
+ id: The ID of the evaluation to update
250
+ name: Optional new name for the evaluation
251
+ project_id: Optional new project ID
252
+ json_schema: Optional new JSON schema
253
+ documents: Optional list of documents to update
254
+ iterations: Optional list of iterations to update
255
+ default_inference_settings: Optional annotation properties
256
+
257
+ Returns:
258
+ Evaluation: The updated evaluation
259
+ Raises:
260
+ HTTPException if the request fails
261
+ """
262
+ request = self.prepare_update(
263
+ evaluation_id=evaluation_id,
264
+ name=name,
265
+ project_id=project_id,
266
+ json_schema=json_schema,
267
+ default_inference_settings=default_inference_settings,
268
+ )
269
+ response = await self._client._prepared_request(request)
270
+ return Evaluation(**response)
271
+
272
+ async def list(self, project_id: str = FieldUnset) -> List[Evaluation]:
273
+ """
274
+ List evaluations for a project.
275
+
276
+ Args:
277
+ project_id: The project ID to list evaluations for
278
+
279
+ Returns:
280
+ List[Evaluation]: List of evaluations
281
+ Raises:
282
+ HTTPException if the request fails
283
+ """
284
+ request = self.prepare_list(project_id)
285
+ response = await self._client._prepared_request(request)
286
+ return [Evaluation(**item) for item in response.get("data", [])]
287
+
288
+ async def delete(self, evaluation_id: str) -> DeleteResponse:
289
+ """
290
+ Delete an evaluation.
291
+
292
+ Args:
293
+ evaluation_id: The ID of the evaluation to delete
294
+
295
+ Returns:
296
+ DeleteResponse: The response containing success status and ID
297
+ Raises:
298
+ HTTPException if the request fails
299
+ """
300
+ request = self.prepare_delete(evaluation_id)
301
+ return await self._client._prepared_request(request)
@@ -0,0 +1,233 @@
1
+ from io import IOBase
2
+ from pathlib import Path
3
+ from typing import Any, Dict, List, Union
4
+
5
+ import PIL.Image
6
+ from pydantic import HttpUrl
7
+
8
+ from ..._resource import AsyncAPIResource, SyncAPIResource
9
+ from ..._utils.mime import prepare_mime_document
10
+ from ...types.evaluations import DocumentItem, EvaluationDocument, PatchEvaluationDocumentRequest
11
+ from ...types.mime import MIMEData
12
+ from ...types.standards import PreparedRequest, DeleteResponse, FieldUnset
13
+ from ...types.documents.extractions import UiParsedChatCompletion
14
+
15
+
16
+ class DocumentsMixin:
17
+ def prepare_get(self, evaluation_id: str, document_id: str) -> PreparedRequest:
18
+ return PreparedRequest(method="GET", url=f"/v1/evaluations/{evaluation_id}/documents/{document_id}")
19
+
20
+ def prepare_create(self, evaluation_id: str, document: MIMEData, annotation: dict[str, Any]) -> PreparedRequest:
21
+ # Serialize the MIMEData
22
+ document_item = DocumentItem(mime_data=document, annotation=annotation, annotation_metadata=None)
23
+ return PreparedRequest(method="POST", url=f"/v1/evaluations/{evaluation_id}/documents", data=document_item.model_dump(mode="json"))
24
+
25
+ def prepare_list(self, evaluation_id: str) -> PreparedRequest:
26
+ return PreparedRequest(method="GET", url=f"/v1/evaluations/{evaluation_id}/documents")
27
+
28
+ def prepare_update(self, evaluation_id: str, document_id: str, annotation: dict[str, Any] = FieldUnset) -> PreparedRequest:
29
+ update_request = PatchEvaluationDocumentRequest(annotation=annotation)
30
+ return PreparedRequest(
31
+ method="PATCH", url=f"/v1/evaluations/{evaluation_id}/documents/{document_id}", data=update_request.model_dump(mode="json", exclude_unset=True, exclude_defaults=True)
32
+ )
33
+
34
+ def prepare_delete(self, evaluation_id: str, document_id: str) -> PreparedRequest:
35
+ return PreparedRequest(method="DELETE", url=f"/v1/evaluations/{evaluation_id}/documents/{document_id}")
36
+
37
+ def prepare_llm_annotate(self, evaluation_id: str, document_id: str) -> PreparedRequest:
38
+ return PreparedRequest(method="POST", url=f"/v1/evaluations/{evaluation_id}/documents/{document_id}/llm-annotate", data={"stream": False})
39
+
40
+
41
+ class Documents(SyncAPIResource, DocumentsMixin):
42
+ """Documents API wrapper for evaluations"""
43
+
44
+ def create(self, evaluation_id: str, document: Union[Path, str, IOBase, MIMEData, PIL.Image.Image, HttpUrl], annotation: Dict[str, Any]) -> EvaluationDocument:
45
+ """
46
+ Create a document for an evaluation.
47
+
48
+ Args:
49
+ evaluation_id: The ID of the evaluation
50
+ document: The document to process. Can be:
51
+ - A file path (Path or str)
52
+ - A file-like object (IOBase)
53
+ - A MIMEData object
54
+ - A PIL Image object
55
+ - A URL (HttpUrl)
56
+ annotation: The ground truth for the document
57
+
58
+ Returns:
59
+ EvaluationDocument: The created document
60
+ Raises:
61
+ HTTPException if the request fails
62
+ """
63
+ # Convert document to MIME data format
64
+ mime_document: MIMEData = prepare_mime_document(document)
65
+
66
+ # Let prepare_create handle the serialization
67
+ request = self.prepare_create(evaluation_id, mime_document, annotation)
68
+ response = self._client._prepared_request(request)
69
+ return EvaluationDocument(**response)
70
+
71
+ def list(self, evaluation_id: str) -> List[EvaluationDocument]:
72
+ """
73
+ List documents for an evaluation.
74
+
75
+ Args:
76
+ evaluation_id: The ID of the evaluation
77
+
78
+ Returns:
79
+ List[EvaluationDocument]: List of documents
80
+ Raises:
81
+ HTTPException if the request fails
82
+ """
83
+ request = self.prepare_list(evaluation_id)
84
+ response = self._client._prepared_request(request)
85
+ return [EvaluationDocument(**item) for item in response.get("data", [])]
86
+
87
+ def get(self, evaluation_id: str, document_id: str) -> EvaluationDocument:
88
+ """
89
+ Get a document by ID.
90
+
91
+ Args:
92
+ evaluation_id: The ID of the evaluation
93
+ document_id: The ID of the document
94
+
95
+ Returns:
96
+ EvaluationDocument: The document
97
+ Raises:
98
+ HTTPException if the request fails
99
+ """
100
+ request = self.prepare_get(evaluation_id, document_id)
101
+ response = self._client._prepared_request(request)
102
+ return EvaluationDocument(**response)
103
+
104
+ def update(self, evaluation_id: str, document_id: str, annotation: dict[str, Any] = FieldUnset) -> EvaluationDocument:
105
+ """
106
+ Update a document.
107
+
108
+ Args:
109
+ evaluation_id: The ID of the evaluation
110
+ document_id: The ID of the document
111
+ annotation: The ground truth for the document
112
+ Returns:
113
+ EvaluationDocument: The updated document
114
+ Raises:
115
+ HTTPException if the request fails
116
+ """
117
+ request = self.prepare_update(evaluation_id, document_id, annotation=annotation)
118
+ response = self._client._prepared_request(request)
119
+ return EvaluationDocument(**response)
120
+
121
+ def delete(self, evaluation_id: str, document_id: str) -> DeleteResponse:
122
+ """
123
+ Delete a document.
124
+
125
+ Args:
126
+ evaluation_id: The ID of the evaluation
127
+ document_id: The ID of the document
128
+
129
+ Returns:
130
+ DeleteResponse: The response containing success status and ID
131
+ Raises:
132
+ HTTPException if the request fails
133
+ """
134
+ request = self.prepare_delete(evaluation_id, document_id)
135
+ return self._client._prepared_request(request)
136
+
137
+ def llm_annotate(self, evaluation_id: str, document_id: str) -> UiParsedChatCompletion:
138
+ """
139
+ Annotate a document with an LLM. This method updates the document (within the evaluation) with the latest extraction.
140
+ """
141
+ request = self.prepare_llm_annotate(evaluation_id, document_id)
142
+ response = self._client._prepared_request(request)
143
+ return UiParsedChatCompletion(**response)
144
+
145
+
146
+ class AsyncDocuments(AsyncAPIResource, DocumentsMixin):
147
+ """Async Documents API wrapper for evaluations"""
148
+
149
+ async def create(self, evaluation_id: str, document: Union[Path, str, IOBase, MIMEData, PIL.Image.Image, HttpUrl], annotation: Dict[str, Any]) -> EvaluationDocument:
150
+ """
151
+ Create a document for an evaluation.
152
+
153
+ Args:
154
+ evaluation_id: The ID of the evaluation
155
+ document: The document to process. Can be:
156
+ - A file path (Path or str)
157
+ - A file-like object (IOBase)
158
+ - A MIMEData object
159
+ - A PIL Image object
160
+ - A URL (HttpUrl)
161
+ annotation: The ground truth for the document
162
+
163
+ Returns:
164
+ EvaluationDocument: The created document
165
+ Raises:
166
+ HTTPException if the request fails
167
+ """
168
+ # Convert document to MIME data format
169
+ mime_document: MIMEData = prepare_mime_document(document)
170
+
171
+ # Let prepare_create handle the serialization
172
+ request = self.prepare_create(evaluation_id, mime_document, annotation)
173
+ response = await self._client._prepared_request(request)
174
+ return EvaluationDocument(**response)
175
+
176
+ async def list(self, evaluation_id: str) -> List[EvaluationDocument]:
177
+ """
178
+ List documents for an evaluation.
179
+
180
+ Args:
181
+ evaluation_id: The ID of the evaluation
182
+
183
+ Returns:
184
+ List[EvaluationDocument]: List of documents
185
+ Raises:
186
+ HTTPException if the request fails
187
+ """
188
+ request = self.prepare_list(evaluation_id)
189
+ response = await self._client._prepared_request(request)
190
+ return [EvaluationDocument(**item) for item in response.get("data", [])]
191
+
192
+ async def update(self, evaluation_id: str, document_id: str, annotation: dict[str, Any] = FieldUnset) -> EvaluationDocument:
193
+ """
194
+ Update a document.
195
+
196
+ Args:
197
+ evaluation_id: The ID of the evaluation
198
+ document_id: The ID of the document
199
+ annotation: The ground truth for the document
200
+
201
+ Returns:
202
+ EvaluationDocument: The updated document
203
+ Raises:
204
+ HTTPException if the request fails
205
+ """
206
+ request = self.prepare_update(evaluation_id, document_id, annotation)
207
+ response = await self._client._prepared_request(request)
208
+ return EvaluationDocument(**response)
209
+
210
+ async def delete(self, evaluation_id: str, document_id: str) -> DeleteResponse:
211
+ """
212
+ Delete a document.
213
+
214
+ Args:
215
+ evaluation_id: The ID of the evaluation
216
+ document_id: The ID of the document
217
+
218
+ Returns:
219
+ DeleteResponse: The response containing success status and ID
220
+ Raises:
221
+ HTTPException if the request fails
222
+ """
223
+ request = self.prepare_delete(evaluation_id, document_id)
224
+ return await self._client._prepared_request(request)
225
+
226
+ async def llm_annotate(self, evaluation_id: str, document_id: str) -> UiParsedChatCompletion:
227
+ """
228
+ Annotate a document with an LLM.
229
+ This method updates the document (within the evaluation) with the latest extraction.
230
+ """
231
+ request = self.prepare_llm_annotate(evaluation_id, document_id)
232
+ response = await self._client._prepared_request(request)
233
+ return UiParsedChatCompletion(**response)