retab 0.0.41__py3-none-any.whl → 0.0.43__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. retab/__init__.py +2 -1
  2. retab/client.py +21 -50
  3. retab/resources/consensus/client.py +1 -1
  4. retab/resources/consensus/completions_stream.py +2 -2
  5. retab/resources/consensus/responses.py +1 -1
  6. retab/resources/documents/client.py +103 -76
  7. retab/resources/documents/extractions.py +55 -46
  8. retab/resources/evaluations/client.py +32 -19
  9. retab/resources/evaluations/documents.py +12 -11
  10. retab/resources/evaluations/iterations.py +48 -30
  11. retab/resources/jsonlUtils.py +3 -4
  12. retab/resources/processors/automations/endpoints.py +57 -43
  13. retab/resources/processors/automations/links.py +54 -45
  14. retab/resources/processors/automations/logs.py +2 -2
  15. retab/resources/processors/automations/mailboxes.py +116 -90
  16. retab/resources/processors/automations/outlook.py +126 -86
  17. retab/resources/processors/automations/tests.py +7 -1
  18. retab/resources/processors/client.py +37 -32
  19. retab/resources/usage.py +2 -0
  20. retab/types/ai_models.py +1 -1
  21. retab/types/automations/mailboxes.py +1 -1
  22. retab/types/deprecated_evals.py +195 -0
  23. retab/types/documents/extractions.py +2 -2
  24. retab/types/documents/parse.py +3 -1
  25. retab/types/evaluations/__init__.py +5 -2
  26. retab/types/evaluations/iterations.py +9 -43
  27. retab/types/evaluations/model.py +20 -22
  28. retab/types/extractions.py +35 -9
  29. retab/types/logs.py +5 -6
  30. retab/types/mime.py +1 -10
  31. retab/types/schemas/enhance.py +22 -5
  32. retab/types/schemas/evaluate.py +1 -1
  33. retab/types/schemas/object.py +26 -0
  34. retab/types/standards.py +2 -2
  35. retab/utils/__init__.py +3 -0
  36. retab/utils/ai_models.py +127 -12
  37. retab/utils/hashing.py +24 -0
  38. retab/utils/json_schema.py +1 -26
  39. retab/utils/mime.py +0 -17
  40. retab-0.0.43.dist-info/METADATA +117 -0
  41. {retab-0.0.41.dist-info → retab-0.0.43.dist-info}/RECORD +43 -57
  42. retab/_utils/__init__.py +0 -0
  43. retab/_utils/_model_cards/anthropic.yaml +0 -59
  44. retab/_utils/_model_cards/auto.yaml +0 -43
  45. retab/_utils/_model_cards/gemini.yaml +0 -117
  46. retab/_utils/_model_cards/openai.yaml +0 -301
  47. retab/_utils/_model_cards/xai.yaml +0 -28
  48. retab/_utils/ai_models.py +0 -138
  49. retab/_utils/benchmarking.py +0 -484
  50. retab/_utils/chat.py +0 -327
  51. retab/_utils/display.py +0 -440
  52. retab/_utils/json_schema.py +0 -2156
  53. retab/_utils/mime.py +0 -165
  54. retab/_utils/responses.py +0 -169
  55. retab/_utils/stream_context_managers.py +0 -52
  56. retab/_utils/usage/__init__.py +0 -0
  57. retab/_utils/usage/usage.py +0 -301
  58. retab-0.0.41.dist-info/METADATA +0 -418
  59. {retab-0.0.41.dist-info → retab-0.0.43.dist-info}/WHEEL +0 -0
  60. {retab-0.0.41.dist-info → retab-0.0.43.dist-info}/top_level.txt +0 -0
@@ -10,7 +10,6 @@ from openai.types.chat.chat_completion_reasoning_effort import ChatCompletionRea
10
10
  from openai.types.chat.parsed_chat_completion import ParsedChatCompletionMessage
11
11
  from openai.types.responses.response import Response
12
12
  from openai.types.responses.response_input_param import ResponseInputItemParam
13
- from pydantic_core import PydanticUndefined
14
13
  from pydantic import HttpUrl
15
14
 
16
15
  from ..._resource import AsyncAPIResource, SyncAPIResource
@@ -23,7 +22,7 @@ from ...types.documents.extractions import DocumentExtractRequest, LogExtraction
23
22
  from ...types.browser_canvas import BrowserCanvas
24
23
  from ...types.modalities import Modality
25
24
  from ...types.schemas.object import Schema
26
- from ...types.standards import PreparedRequest
25
+ from ...types.standards import PreparedRequest, FieldUnset
27
26
 
28
27
 
29
28
  def maybe_parse_to_pydantic(schema: Schema, response: RetabParsedChatCompletion, allow_partial: bool = False) -> RetabParsedChatCompletion:
@@ -44,14 +43,14 @@ class BaseExtractionsMixin:
44
43
  json_schema: dict[str, Any] | Path | str,
45
44
  document: Path | str | IOBase | HttpUrl | None = None,
46
45
  documents: list[Path | str | IOBase | HttpUrl] | None = None,
47
- image_resolution_dpi: int = PydanticUndefined, # type: ignore[assignment]
48
- browser_canvas: BrowserCanvas = PydanticUndefined, # type: ignore[assignment]
49
- model: str = PydanticUndefined, # type: ignore[assignment]
50
- temperature: float = PydanticUndefined, # type: ignore[assignment]
51
- modality: Modality = PydanticUndefined, # type: ignore[assignment]
52
- reasoning_effort: ChatCompletionReasoningEffort = PydanticUndefined, # type: ignore[assignment]
46
+ image_resolution_dpi: int = FieldUnset,
47
+ browser_canvas: BrowserCanvas = FieldUnset,
48
+ model: str = FieldUnset,
49
+ temperature: float = FieldUnset,
50
+ modality: Modality = FieldUnset,
51
+ reasoning_effort: ChatCompletionReasoningEffort = FieldUnset,
53
52
  stream: bool = False,
54
- n_consensus: int = PydanticUndefined, # type: ignore[assignment]
53
+ n_consensus: int = FieldUnset,
55
54
  store: bool = False,
56
55
  idempotency_key: str | None = None,
57
56
  ) -> PreparedRequest:
@@ -71,20 +70,30 @@ class BaseExtractionsMixin:
71
70
  else:
72
71
  raise ValueError("Must provide either 'document' or 'documents' parameter.")
73
72
 
73
+ # Build request dictionary with only provided fields
74
+ request_dict = {
75
+ 'json_schema': json_schema,
76
+ 'documents': processed_documents,
77
+ 'stream': stream,
78
+ 'store': store,
79
+ }
80
+ if model is not FieldUnset:
81
+ request_dict['model'] = model
82
+ if temperature is not FieldUnset:
83
+ request_dict['temperature'] = temperature
84
+ if modality is not FieldUnset:
85
+ request_dict['modality'] = modality
86
+ if reasoning_effort is not FieldUnset:
87
+ request_dict['reasoning_effort'] = reasoning_effort
88
+ if n_consensus is not FieldUnset:
89
+ request_dict['n_consensus'] = n_consensus
90
+ if image_resolution_dpi is not FieldUnset:
91
+ request_dict['image_resolution_dpi'] = image_resolution_dpi
92
+ if browser_canvas is not FieldUnset:
93
+ request_dict['browser_canvas'] = browser_canvas
94
+
74
95
  # Validate DocumentAPIRequest data (raises exception if invalid)
75
- request = DocumentExtractRequest(
76
- json_schema=json_schema,
77
- documents=processed_documents,
78
- model=model,
79
- temperature=temperature,
80
- stream=stream,
81
- modality=modality,
82
- store=store,
83
- reasoning_effort=reasoning_effort,
84
- n_consensus=n_consensus,
85
- image_resolution_dpi=image_resolution_dpi,
86
- browser_canvas=browser_canvas,
87
- )
96
+ request = DocumentExtractRequest(**request_dict)
88
97
 
89
98
  return PreparedRequest(
90
99
  method="POST", url="/v1/documents/extractions", data=request.model_dump(mode="json", exclude_unset=True, exclude_defaults=True), idempotency_key=idempotency_key
@@ -144,12 +153,12 @@ class Extractions(SyncAPIResource, BaseExtractionsMixin):
144
153
  model: str,
145
154
  document: Path | str | IOBase | HttpUrl | None = None,
146
155
  documents: list[Path | str | IOBase | HttpUrl] | None = None,
147
- image_resolution_dpi: int = PydanticUndefined, # type: ignore[assignment]
148
- browser_canvas: BrowserCanvas = PydanticUndefined, # type: ignore[assignment]
149
- temperature: float = PydanticUndefined, # type: ignore[assignment]
150
- modality: Modality = PydanticUndefined, # type: ignore[assignment]
151
- reasoning_effort: ChatCompletionReasoningEffort = PydanticUndefined, # type: ignore[assignment]
152
- n_consensus: int = PydanticUndefined, # type: ignore[assignment]
156
+ image_resolution_dpi: int = FieldUnset,
157
+ browser_canvas: BrowserCanvas = FieldUnset,
158
+ temperature: float = FieldUnset,
159
+ modality: Modality = FieldUnset,
160
+ reasoning_effort: ChatCompletionReasoningEffort = FieldUnset,
161
+ n_consensus: int = FieldUnset,
153
162
  idempotency_key: str | None = None,
154
163
  store: bool = False,
155
164
  ) -> RetabParsedChatCompletion:
@@ -204,12 +213,12 @@ class Extractions(SyncAPIResource, BaseExtractionsMixin):
204
213
  model: str,
205
214
  document: Path | str | IOBase | HttpUrl | None = None,
206
215
  documents: list[Path | str | IOBase | HttpUrl] | None = None,
207
- image_resolution_dpi: int = PydanticUndefined, # type: ignore[assignment]
208
- browser_canvas: BrowserCanvas = PydanticUndefined, # type: ignore[assignment]
209
- temperature: float = PydanticUndefined, # type: ignore[assignment]
210
- modality: Modality = PydanticUndefined, # type: ignore[assignment]
211
- reasoning_effort: ChatCompletionReasoningEffort = PydanticUndefined, # type: ignore[assignment]
212
- n_consensus: int = PydanticUndefined, # type: ignore[assignment]
216
+ image_resolution_dpi: int = FieldUnset,
217
+ browser_canvas: BrowserCanvas = FieldUnset,
218
+ temperature: float = FieldUnset,
219
+ modality: Modality = FieldUnset,
220
+ reasoning_effort: ChatCompletionReasoningEffort = FieldUnset,
221
+ n_consensus: int = FieldUnset,
213
222
  idempotency_key: str | None = None,
214
223
  store: bool = False,
215
224
  ) -> Generator[RetabParsedChatCompletion, None, None]:
@@ -345,12 +354,12 @@ class AsyncExtractions(AsyncAPIResource, BaseExtractionsMixin):
345
354
  model: str,
346
355
  document: Path | str | IOBase | HttpUrl | None = None,
347
356
  documents: list[Path | str | IOBase | HttpUrl] | None = None,
348
- image_resolution_dpi: int = PydanticUndefined, # type: ignore[assignment]
349
- browser_canvas: BrowserCanvas = PydanticUndefined, # type: ignore[assignment]
350
- temperature: float = PydanticUndefined, # type: ignore[assignment]
351
- modality: Modality = PydanticUndefined, # type: ignore[assignment]
352
- reasoning_effort: ChatCompletionReasoningEffort = PydanticUndefined, # type: ignore[assignment]
353
- n_consensus: int = PydanticUndefined, # type: ignore[assignment]
357
+ image_resolution_dpi: int = FieldUnset,
358
+ browser_canvas: BrowserCanvas = FieldUnset,
359
+ temperature: float = FieldUnset,
360
+ modality: Modality = FieldUnset,
361
+ reasoning_effort: ChatCompletionReasoningEffort = FieldUnset,
362
+ n_consensus: int = FieldUnset,
354
363
  idempotency_key: str | None = None,
355
364
  store: bool = False,
356
365
  ) -> RetabParsedChatCompletion:
@@ -401,12 +410,12 @@ class AsyncExtractions(AsyncAPIResource, BaseExtractionsMixin):
401
410
  model: str,
402
411
  document: Path | str | IOBase | HttpUrl | None = None,
403
412
  documents: list[Path | str | IOBase | HttpUrl] | None = None,
404
- image_resolution_dpi: int = PydanticUndefined, # type: ignore[assignment]
405
- browser_canvas: BrowserCanvas = PydanticUndefined, # type: ignore[assignment]
406
- temperature: float = PydanticUndefined, # type: ignore[assignment]
407
- modality: Modality = PydanticUndefined, # type: ignore[assignment]
408
- reasoning_effort: ChatCompletionReasoningEffort = PydanticUndefined, # type: ignore[assignment]
409
- n_consensus: int = PydanticUndefined, # type: ignore[assignment]
413
+ image_resolution_dpi: int = FieldUnset,
414
+ browser_canvas: BrowserCanvas = FieldUnset,
415
+ temperature: float = FieldUnset,
416
+ modality: Modality = FieldUnset,
417
+ reasoning_effort: ChatCompletionReasoningEffort = FieldUnset,
418
+ n_consensus: int = FieldUnset,
410
419
  idempotency_key: str | None = None,
411
420
  store: bool = False,
412
421
  ) -> AsyncGenerator[RetabParsedChatCompletion, None]:
@@ -1,7 +1,7 @@
1
1
  from typing import Any, Dict, List
2
2
 
3
3
  from ..._resource import AsyncAPIResource, SyncAPIResource
4
- from ...types.evaluations import Evaluation, PatchEvaluationRequest, ListEvaluationParams, CreateEvaluation
4
+ from ...types.evaluations import Evaluation, PatchEvaluationRequest, ListEvaluationParams, BaseEvaluation
5
5
  from ...types.inference_settings import InferenceSettings
6
6
  from ...types.standards import PreparedRequest, DeleteResponse, FieldUnset
7
7
  from .documents import Documents, AsyncDocuments
@@ -16,14 +16,18 @@ class EvaluationsMixin:
16
16
  project_id: str = FieldUnset,
17
17
  default_inference_settings: InferenceSettings = FieldUnset,
18
18
  ) -> PreparedRequest:
19
- # Use CreateEvaluation model
20
- eval_data = CreateEvaluation(
21
- name=name,
22
- json_schema=json_schema,
23
- project_id=project_id,
24
- default_inference_settings=default_inference_settings,
25
- )
26
- return PreparedRequest(method="POST", url="/v1/evaluations", data=eval_data.model_dump(exclude_none=True, mode="json"))
19
+ # Use BaseEvaluation model
20
+ eval_dict = {
21
+ "name": name,
22
+ "json_schema": json_schema,
23
+ }
24
+ if project_id is not FieldUnset:
25
+ eval_dict["project_id"] = project_id
26
+ if default_inference_settings is not FieldUnset:
27
+ eval_dict["default_inference_settings"] = default_inference_settings
28
+
29
+ eval_data = BaseEvaluation(**eval_dict)
30
+ return PreparedRequest(method="POST", url="/v1/evaluations", data=eval_data.model_dump(exclude_unset=True, mode="json"))
27
31
 
28
32
  def prepare_get(self, evaluation_id: str) -> PreparedRequest:
29
33
  return PreparedRequest(method="GET", url=f"/v1/evaluations/{evaluation_id}")
@@ -42,12 +46,17 @@ class EvaluationsMixin:
42
46
  Only the provided fields will be updated. Fields set to None will be excluded from the update.
43
47
  """
44
48
  # Build a dictionary with only the provided fields
45
- data = PatchEvaluationRequest(
46
- name=name,
47
- project_id=project_id,
48
- json_schema=json_schema,
49
- default_inference_settings=default_inference_settings,
50
- ).model_dump(exclude_unset=True, mode="json")
49
+ update_dict = {}
50
+ if name is not FieldUnset:
51
+ update_dict["name"] = name
52
+ if project_id is not FieldUnset:
53
+ update_dict["project_id"] = project_id
54
+ if json_schema is not FieldUnset:
55
+ update_dict["json_schema"] = json_schema
56
+ if default_inference_settings is not FieldUnset:
57
+ update_dict["default_inference_settings"] = default_inference_settings
58
+
59
+ data = PatchEvaluationRequest(**update_dict).model_dump(exclude_unset=True, mode="json")
51
60
 
52
61
  return PreparedRequest(method="PATCH", url=f"/v1/evaluations/{evaluation_id}", data=data)
53
62
 
@@ -56,11 +65,11 @@ class EvaluationsMixin:
56
65
  Prepare a request to list evaluations.
57
66
 
58
67
  Usage:
59
- >>> client.evals.list(project_id="project_id") # List all evaluations for a project
60
- >>> client.evals.list() # List all evaluations (no project_id)
68
+ >>> client.evaluations.list(project_id="project_id") # List all evaluations for a project
69
+ >>> client.evaluations.list() # List all evaluations (no project_id)
61
70
 
62
71
  This does not work:
63
- >>> client.evals.list(project_id=None)
72
+ >>> client.evaluations.list(project_id=None)
64
73
 
65
74
  Args:
66
75
  project_id: The project ID to list evaluations for
@@ -68,7 +77,11 @@ class EvaluationsMixin:
68
77
  Returns:
69
78
  PreparedRequest: The prepared request
70
79
  """
71
- params = ListEvaluationParams(project_id=project_id).model_dump(exclude_unset=True, exclude_defaults=True, mode="json")
80
+ params_dict = {}
81
+ if project_id is not FieldUnset:
82
+ params_dict["project_id"] = project_id
83
+
84
+ params = ListEvaluationParams(**params_dict).model_dump(exclude_unset=True, exclude_defaults=True, mode="json")
72
85
  return PreparedRequest(method="GET", url="/v1/evaluations", params=params)
73
86
 
74
87
  def prepare_delete(self, id: str) -> PreparedRequest:
@@ -8,6 +8,7 @@ from pydantic import HttpUrl
8
8
  from ..._resource import AsyncAPIResource, SyncAPIResource
9
9
  from ...utils.mime import prepare_mime_document
10
10
  from ...types.evaluations import DocumentItem, EvaluationDocument, PatchEvaluationDocumentRequest
11
+ from ...types.predictions import PredictionMetadata
11
12
  from ...types.mime import MIMEData
12
13
  from ...types.standards import PreparedRequest, DeleteResponse, FieldUnset
13
14
  from ...types.documents.extractions import RetabParsedChatCompletion
@@ -17,18 +18,18 @@ class DocumentsMixin:
17
18
  def prepare_get(self, evaluation_id: str, document_id: str) -> PreparedRequest:
18
19
  return PreparedRequest(method="GET", url=f"/v1/evaluations/{evaluation_id}/documents/{document_id}")
19
20
 
20
- def prepare_create(self, evaluation_id: str, document: MIMEData, annotation: dict[str, Any]) -> PreparedRequest:
21
+ def prepare_create(self, evaluation_id: str, document: MIMEData, annotation: dict[str, Any], annotation_metadata: dict[str, Any] | None = None) -> PreparedRequest:
21
22
  # Serialize the MIMEData
22
- document_item = DocumentItem(mime_data=document, annotation=annotation, annotation_metadata=None)
23
+ document_item = DocumentItem(mime_data=document, annotation=annotation, annotation_metadata=PredictionMetadata(**annotation_metadata) if annotation_metadata else None)
23
24
  return PreparedRequest(method="POST", url=f"/v1/evaluations/{evaluation_id}/documents", data=document_item.model_dump(mode="json"))
24
25
 
25
26
  def prepare_list(self, evaluation_id: str) -> PreparedRequest:
26
27
  return PreparedRequest(method="GET", url=f"/v1/evaluations/{evaluation_id}/documents")
27
28
 
28
- def prepare_update(self, evaluation_id: str, document_id: str, annotation: dict[str, Any] = FieldUnset) -> PreparedRequest:
29
+ def prepare_update(self, evaluation_id: str, document_id: str, annotation: dict[str, Any]) -> PreparedRequest:
29
30
  update_request = PatchEvaluationDocumentRequest(annotation=annotation)
30
31
  return PreparedRequest(
31
- method="PATCH", url=f"/v1/evaluations/{evaluation_id}/documents/{document_id}", data=update_request.model_dump(mode="json", exclude_unset=True, exclude_defaults=True)
32
+ method="PATCH", url=f"/v1/evaluations/{evaluation_id}/documents/{document_id}", data=update_request.model_dump(mode="json", exclude_unset=True)
32
33
  )
33
34
 
34
35
  def prepare_delete(self, evaluation_id: str, document_id: str) -> PreparedRequest:
@@ -41,7 +42,7 @@ class DocumentsMixin:
41
42
  class Documents(SyncAPIResource, DocumentsMixin):
42
43
  """Documents API wrapper for evaluations"""
43
44
 
44
- def create(self, evaluation_id: str, document: Union[Path, str, IOBase, MIMEData, PIL.Image.Image, HttpUrl], annotation: Dict[str, Any]) -> EvaluationDocument:
45
+ def create(self, evaluation_id: str, document: Union[Path, str, IOBase, MIMEData, PIL.Image.Image, HttpUrl], annotation: Dict[str, Any], annotation_metadata: Dict[str, Any] | None = None) -> EvaluationDocument:
45
46
  """
46
47
  Create a document for an evaluation.
47
48
 
@@ -64,7 +65,7 @@ class Documents(SyncAPIResource, DocumentsMixin):
64
65
  mime_document: MIMEData = prepare_mime_document(document)
65
66
 
66
67
  # Let prepare_create handle the serialization
67
- request = self.prepare_create(evaluation_id, mime_document, annotation)
68
+ request = self.prepare_create(evaluation_id, mime_document, annotation, annotation_metadata)
68
69
  response = self._client._prepared_request(request)
69
70
  return EvaluationDocument(**response)
70
71
 
@@ -101,7 +102,7 @@ class Documents(SyncAPIResource, DocumentsMixin):
101
102
  response = self._client._prepared_request(request)
102
103
  return EvaluationDocument(**response)
103
104
 
104
- def update(self, evaluation_id: str, document_id: str, annotation: dict[str, Any] = FieldUnset) -> EvaluationDocument:
105
+ def update(self, evaluation_id: str, document_id: str, annotation: dict[str, Any]) -> EvaluationDocument:
105
106
  """
106
107
  Update a document.
107
108
 
@@ -146,7 +147,7 @@ class Documents(SyncAPIResource, DocumentsMixin):
146
147
  class AsyncDocuments(AsyncAPIResource, DocumentsMixin):
147
148
  """Async Documents API wrapper for evaluations"""
148
149
 
149
- async def create(self, evaluation_id: str, document: Union[Path, str, IOBase, MIMEData, PIL.Image.Image, HttpUrl], annotation: Dict[str, Any]) -> EvaluationDocument:
150
+ async def create(self, evaluation_id: str, document: Union[Path, str, IOBase, MIMEData, PIL.Image.Image, HttpUrl], annotation: Dict[str, Any], annotation_metadata: Dict[str, Any] | None = None) -> EvaluationDocument:
150
151
  """
151
152
  Create a document for an evaluation.
152
153
 
@@ -159,7 +160,7 @@ class AsyncDocuments(AsyncAPIResource, DocumentsMixin):
159
160
  - A PIL Image object
160
161
  - A URL (HttpUrl)
161
162
  annotation: The ground truth for the document
162
-
163
+ annotation_metadata: The metadata of the annotation
163
164
  Returns:
164
165
  EvaluationDocument: The created document
165
166
  Raises:
@@ -169,7 +170,7 @@ class AsyncDocuments(AsyncAPIResource, DocumentsMixin):
169
170
  mime_document: MIMEData = prepare_mime_document(document)
170
171
 
171
172
  # Let prepare_create handle the serialization
172
- request = self.prepare_create(evaluation_id, mime_document, annotation)
173
+ request = self.prepare_create(evaluation_id, mime_document, annotation, annotation_metadata)
173
174
  response = await self._client._prepared_request(request)
174
175
  return EvaluationDocument(**response)
175
176
 
@@ -189,7 +190,7 @@ class AsyncDocuments(AsyncAPIResource, DocumentsMixin):
189
190
  response = await self._client._prepared_request(request)
190
191
  return [EvaluationDocument(**item) for item in response.get("data", [])]
191
192
 
192
- async def update(self, evaluation_id: str, document_id: str, annotation: dict[str, Any] = FieldUnset) -> EvaluationDocument:
193
+ async def update(self, evaluation_id: str, document_id: str, annotation: dict[str, Any]) -> EvaluationDocument:
193
194
  """
194
195
  Update a document.
195
196
 
@@ -34,15 +34,23 @@ class IterationsMixin:
34
34
  browser_canvas: BrowserCanvas = FieldUnset,
35
35
  n_consensus: int = FieldUnset,
36
36
  ) -> PreparedRequest:
37
- inference_settings = InferenceSettings(
38
- model=model,
39
- temperature=temperature,
40
- modality=modality,
41
- reasoning_effort=reasoning_effort,
42
- image_resolution_dpi=image_resolution_dpi,
43
- browser_canvas=browser_canvas,
44
- n_consensus=n_consensus,
45
- )
37
+ inference_dict = {}
38
+ if model is not FieldUnset:
39
+ inference_dict["model"] = model
40
+ if temperature is not FieldUnset:
41
+ inference_dict["temperature"] = temperature
42
+ if modality is not FieldUnset:
43
+ inference_dict["modality"] = modality
44
+ if reasoning_effort is not FieldUnset:
45
+ inference_dict["reasoning_effort"] = reasoning_effort
46
+ if image_resolution_dpi is not FieldUnset:
47
+ inference_dict["image_resolution_dpi"] = image_resolution_dpi
48
+ if browser_canvas is not FieldUnset:
49
+ inference_dict["browser_canvas"] = browser_canvas
50
+ if n_consensus is not FieldUnset:
51
+ inference_dict["n_consensus"] = n_consensus
52
+
53
+ inference_settings = InferenceSettings(**inference_dict)
46
54
 
47
55
  request = CreateIterationRequest(inference_settings=inference_settings, json_schema=json_schema)
48
56
 
@@ -61,19 +69,29 @@ class IterationsMixin:
61
69
  browser_canvas: BrowserCanvas = FieldUnset,
62
70
  n_consensus: int = FieldUnset,
63
71
  ) -> PreparedRequest:
64
- inference_settings = InferenceSettings(
65
- model=model,
66
- temperature=temperature,
67
- modality=modality,
68
- reasoning_effort=reasoning_effort,
69
- image_resolution_dpi=image_resolution_dpi,
70
- browser_canvas=browser_canvas,
71
- n_consensus=n_consensus,
72
- )
73
- if not inference_settings.model_dump(exclude_unset=True, mode="json"):
74
- inference_settings = FieldUnset
75
-
76
- iteration_data = PatchIterationRequest(json_schema=json_schema, inference_settings=inference_settings)
72
+ inference_dict = {}
73
+ if model is not FieldUnset:
74
+ inference_dict["model"] = model
75
+ if temperature is not FieldUnset:
76
+ inference_dict["temperature"] = temperature
77
+ if modality is not FieldUnset:
78
+ inference_dict["modality"] = modality
79
+ if reasoning_effort is not FieldUnset:
80
+ inference_dict["reasoning_effort"] = reasoning_effort
81
+ if image_resolution_dpi is not FieldUnset:
82
+ inference_dict["image_resolution_dpi"] = image_resolution_dpi
83
+ if browser_canvas is not FieldUnset:
84
+ inference_dict["browser_canvas"] = browser_canvas
85
+ if n_consensus is not FieldUnset:
86
+ inference_dict["n_consensus"] = n_consensus
87
+
88
+ iteration_dict = {}
89
+ if json_schema is not FieldUnset:
90
+ iteration_dict["json_schema"] = json_schema
91
+ if inference_dict: # Only add inference_settings if we have at least one field
92
+ iteration_dict["inference_settings"] = InferenceSettings(**inference_dict)
93
+
94
+ iteration_data = PatchIterationRequest(**iteration_dict)
77
95
 
78
96
  return PreparedRequest(
79
97
  method="PATCH", url=f"/v1/evaluations/{evaluation_id}/iterations/{iteration_id}", data=iteration_data.model_dump(exclude_unset=True, exclude_defaults=True, mode="json")
@@ -136,14 +154,14 @@ class Iterations(SyncAPIResource, IterationsMixin):
136
154
  def create(
137
155
  self,
138
156
  evaluation_id: str,
139
- model: str,
140
- temperature: float = 0.0,
141
- modality: Modality = "native",
142
- json_schema: Optional[Dict[str, Any]] = None,
143
- reasoning_effort: ChatCompletionReasoningEffort = "medium",
144
- image_resolution_dpi: int = 96,
145
- browser_canvas: BrowserCanvas = "A4",
146
- n_consensus: int = 1,
157
+ model: str = FieldUnset,
158
+ temperature: float = FieldUnset,
159
+ modality: Modality = FieldUnset,
160
+ json_schema: Optional[Dict[str, Any]] = FieldUnset,
161
+ reasoning_effort: ChatCompletionReasoningEffort = FieldUnset,
162
+ image_resolution_dpi: int = FieldUnset,
163
+ browser_canvas: BrowserCanvas = FieldUnset,
164
+ n_consensus: int = FieldUnset,
147
165
  ) -> Iteration:
148
166
  """
149
167
  Create a new iteration for an evaluation.
@@ -14,9 +14,8 @@ from anthropic import Anthropic
14
14
  from openai import OpenAI
15
15
  from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam
16
16
  from pydantic import BaseModel
17
- from pydantic_core import PydanticUndefined
18
17
  from tqdm import tqdm
19
-
18
+ from ..types.standards import FieldUnset
20
19
  from .._resource import AsyncAPIResource, SyncAPIResource
21
20
  from ..utils.ai_models import assert_valid_model_extraction, get_provider_for_model
22
21
  from ..utils.chat import convert_to_anthropic_format, convert_to_openai_format, separate_messages
@@ -139,8 +138,8 @@ class Datasets(SyncAPIResource, BaseDatasetsMixin):
139
138
  json_schema: dict[str, Any] | Path | str,
140
139
  document_annotation_pairs_paths: list[dict[str, Path | str]],
141
140
  dataset_path: Path | str,
142
- image_resolution_dpi: int = PydanticUndefined, # type: ignore[assignment]
143
- browser_canvas: BrowserCanvas = PydanticUndefined, # type: ignore[assignment]
141
+ image_resolution_dpi: int = FieldUnset,
142
+ browser_canvas: BrowserCanvas = FieldUnset,
144
143
  modality: Modality = "native",
145
144
  ) -> None:
146
145
  """Save document-annotation pairs to a JSONL training set.