retab 0.0.42__py3-none-any.whl → 0.0.43__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- retab/__init__.py +2 -1
- retab/client.py +16 -45
- retab/resources/consensus/client.py +1 -1
- retab/resources/consensus/responses.py +1 -1
- retab/resources/documents/client.py +94 -68
- retab/resources/documents/extractions.py +55 -46
- retab/resources/evaluations/client.py +32 -19
- retab/resources/evaluations/documents.py +12 -11
- retab/resources/evaluations/iterations.py +48 -30
- retab/resources/jsonlUtils.py +3 -4
- retab/resources/processors/automations/endpoints.py +49 -39
- retab/resources/processors/automations/links.py +52 -43
- retab/resources/processors/automations/mailboxes.py +74 -59
- retab/resources/processors/automations/outlook.py +104 -82
- retab/resources/processors/client.py +35 -30
- retab/resources/usage.py +2 -0
- retab/types/ai_models.py +1 -1
- retab/types/deprecated_evals.py +195 -0
- retab/types/evaluations/__init__.py +5 -2
- retab/types/evaluations/iterations.py +9 -43
- retab/types/evaluations/model.py +20 -22
- retab/types/extractions.py +1 -0
- retab/types/logs.py +5 -6
- retab/types/mime.py +1 -10
- retab/types/schemas/enhance.py +22 -5
- retab/types/schemas/evaluate.py +1 -1
- retab/types/schemas/object.py +26 -0
- retab/types/standards.py +2 -2
- retab/utils/__init__.py +3 -0
- retab/utils/ai_models.py +127 -12
- retab/utils/hashing.py +24 -0
- retab/utils/json_schema.py +1 -26
- retab/utils/mime.py +0 -17
- {retab-0.0.42.dist-info → retab-0.0.43.dist-info}/METADATA +3 -5
- {retab-0.0.42.dist-info → retab-0.0.43.dist-info}/RECORD +37 -51
- retab/_utils/__init__.py +0 -0
- retab/_utils/_model_cards/anthropic.yaml +0 -59
- retab/_utils/_model_cards/auto.yaml +0 -43
- retab/_utils/_model_cards/gemini.yaml +0 -117
- retab/_utils/_model_cards/openai.yaml +0 -301
- retab/_utils/_model_cards/xai.yaml +0 -28
- retab/_utils/ai_models.py +0 -138
- retab/_utils/benchmarking.py +0 -484
- retab/_utils/chat.py +0 -327
- retab/_utils/display.py +0 -440
- retab/_utils/json_schema.py +0 -2156
- retab/_utils/mime.py +0 -165
- retab/_utils/responses.py +0 -169
- retab/_utils/stream_context_managers.py +0 -52
- retab/_utils/usage/__init__.py +0 -0
- retab/_utils/usage/usage.py +0 -301
- {retab-0.0.42.dist-info → retab-0.0.43.dist-info}/WHEEL +0 -0
- {retab-0.0.42.dist-info → retab-0.0.43.dist-info}/top_level.txt +0 -0
@@ -1,7 +1,7 @@
|
|
1
1
|
from typing import Any, Dict, List
|
2
2
|
|
3
3
|
from ..._resource import AsyncAPIResource, SyncAPIResource
|
4
|
-
from ...types.evaluations import Evaluation, PatchEvaluationRequest, ListEvaluationParams,
|
4
|
+
from ...types.evaluations import Evaluation, PatchEvaluationRequest, ListEvaluationParams, BaseEvaluation
|
5
5
|
from ...types.inference_settings import InferenceSettings
|
6
6
|
from ...types.standards import PreparedRequest, DeleteResponse, FieldUnset
|
7
7
|
from .documents import Documents, AsyncDocuments
|
@@ -16,14 +16,18 @@ class EvaluationsMixin:
|
|
16
16
|
project_id: str = FieldUnset,
|
17
17
|
default_inference_settings: InferenceSettings = FieldUnset,
|
18
18
|
) -> PreparedRequest:
|
19
|
-
# Use
|
20
|
-
|
21
|
-
name
|
22
|
-
json_schema
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
19
|
+
# Use BaseEvaluation model
|
20
|
+
eval_dict = {
|
21
|
+
"name": name,
|
22
|
+
"json_schema": json_schema,
|
23
|
+
}
|
24
|
+
if project_id is not FieldUnset:
|
25
|
+
eval_dict["project_id"] = project_id
|
26
|
+
if default_inference_settings is not FieldUnset:
|
27
|
+
eval_dict["default_inference_settings"] = default_inference_settings
|
28
|
+
|
29
|
+
eval_data = BaseEvaluation(**eval_dict)
|
30
|
+
return PreparedRequest(method="POST", url="/v1/evaluations", data=eval_data.model_dump(exclude_unset=True, mode="json"))
|
27
31
|
|
28
32
|
def prepare_get(self, evaluation_id: str) -> PreparedRequest:
|
29
33
|
return PreparedRequest(method="GET", url=f"/v1/evaluations/{evaluation_id}")
|
@@ -42,12 +46,17 @@ class EvaluationsMixin:
|
|
42
46
|
Only the provided fields will be updated. Fields set to None will be excluded from the update.
|
43
47
|
"""
|
44
48
|
# Build a dictionary with only the provided fields
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
49
|
+
update_dict = {}
|
50
|
+
if name is not FieldUnset:
|
51
|
+
update_dict["name"] = name
|
52
|
+
if project_id is not FieldUnset:
|
53
|
+
update_dict["project_id"] = project_id
|
54
|
+
if json_schema is not FieldUnset:
|
55
|
+
update_dict["json_schema"] = json_schema
|
56
|
+
if default_inference_settings is not FieldUnset:
|
57
|
+
update_dict["default_inference_settings"] = default_inference_settings
|
58
|
+
|
59
|
+
data = PatchEvaluationRequest(**update_dict).model_dump(exclude_unset=True, mode="json")
|
51
60
|
|
52
61
|
return PreparedRequest(method="PATCH", url=f"/v1/evaluations/{evaluation_id}", data=data)
|
53
62
|
|
@@ -56,11 +65,11 @@ class EvaluationsMixin:
|
|
56
65
|
Prepare a request to list evaluations.
|
57
66
|
|
58
67
|
Usage:
|
59
|
-
>>> client.
|
60
|
-
>>> client.
|
68
|
+
>>> client.evaluations.list(project_id="project_id") # List all evaluations for a project
|
69
|
+
>>> client.evaluations.list() # List all evaluations (no project_id)
|
61
70
|
|
62
71
|
This does not work:
|
63
|
-
>>> client.
|
72
|
+
>>> client.evaluations.list(project_id=None)
|
64
73
|
|
65
74
|
Args:
|
66
75
|
project_id: The project ID to list evaluations for
|
@@ -68,7 +77,11 @@ class EvaluationsMixin:
|
|
68
77
|
Returns:
|
69
78
|
PreparedRequest: The prepared request
|
70
79
|
"""
|
71
|
-
|
80
|
+
params_dict = {}
|
81
|
+
if project_id is not FieldUnset:
|
82
|
+
params_dict["project_id"] = project_id
|
83
|
+
|
84
|
+
params = ListEvaluationParams(**params_dict).model_dump(exclude_unset=True, exclude_defaults=True, mode="json")
|
72
85
|
return PreparedRequest(method="GET", url="/v1/evaluations", params=params)
|
73
86
|
|
74
87
|
def prepare_delete(self, id: str) -> PreparedRequest:
|
@@ -8,6 +8,7 @@ from pydantic import HttpUrl
|
|
8
8
|
from ..._resource import AsyncAPIResource, SyncAPIResource
|
9
9
|
from ...utils.mime import prepare_mime_document
|
10
10
|
from ...types.evaluations import DocumentItem, EvaluationDocument, PatchEvaluationDocumentRequest
|
11
|
+
from ...types.predictions import PredictionMetadata
|
11
12
|
from ...types.mime import MIMEData
|
12
13
|
from ...types.standards import PreparedRequest, DeleteResponse, FieldUnset
|
13
14
|
from ...types.documents.extractions import RetabParsedChatCompletion
|
@@ -17,18 +18,18 @@ class DocumentsMixin:
|
|
17
18
|
def prepare_get(self, evaluation_id: str, document_id: str) -> PreparedRequest:
|
18
19
|
return PreparedRequest(method="GET", url=f"/v1/evaluations/{evaluation_id}/documents/{document_id}")
|
19
20
|
|
20
|
-
def prepare_create(self, evaluation_id: str, document: MIMEData, annotation: dict[str, Any]) -> PreparedRequest:
|
21
|
+
def prepare_create(self, evaluation_id: str, document: MIMEData, annotation: dict[str, Any], annotation_metadata: dict[str, Any] | None = None) -> PreparedRequest:
|
21
22
|
# Serialize the MIMEData
|
22
|
-
document_item = DocumentItem(mime_data=document, annotation=annotation, annotation_metadata=None)
|
23
|
+
document_item = DocumentItem(mime_data=document, annotation=annotation, annotation_metadata=PredictionMetadata(**annotation_metadata) if annotation_metadata else None)
|
23
24
|
return PreparedRequest(method="POST", url=f"/v1/evaluations/{evaluation_id}/documents", data=document_item.model_dump(mode="json"))
|
24
25
|
|
25
26
|
def prepare_list(self, evaluation_id: str) -> PreparedRequest:
|
26
27
|
return PreparedRequest(method="GET", url=f"/v1/evaluations/{evaluation_id}/documents")
|
27
28
|
|
28
|
-
def prepare_update(self, evaluation_id: str, document_id: str, annotation: dict[str, Any]
|
29
|
+
def prepare_update(self, evaluation_id: str, document_id: str, annotation: dict[str, Any]) -> PreparedRequest:
|
29
30
|
update_request = PatchEvaluationDocumentRequest(annotation=annotation)
|
30
31
|
return PreparedRequest(
|
31
|
-
method="PATCH", url=f"/v1/evaluations/{evaluation_id}/documents/{document_id}", data=update_request.model_dump(mode="json", exclude_unset=True
|
32
|
+
method="PATCH", url=f"/v1/evaluations/{evaluation_id}/documents/{document_id}", data=update_request.model_dump(mode="json", exclude_unset=True)
|
32
33
|
)
|
33
34
|
|
34
35
|
def prepare_delete(self, evaluation_id: str, document_id: str) -> PreparedRequest:
|
@@ -41,7 +42,7 @@ class DocumentsMixin:
|
|
41
42
|
class Documents(SyncAPIResource, DocumentsMixin):
|
42
43
|
"""Documents API wrapper for evaluations"""
|
43
44
|
|
44
|
-
def create(self, evaluation_id: str, document: Union[Path, str, IOBase, MIMEData, PIL.Image.Image, HttpUrl], annotation: Dict[str, Any]) -> EvaluationDocument:
|
45
|
+
def create(self, evaluation_id: str, document: Union[Path, str, IOBase, MIMEData, PIL.Image.Image, HttpUrl], annotation: Dict[str, Any], annotation_metadata: Dict[str, Any] | None = None) -> EvaluationDocument:
|
45
46
|
"""
|
46
47
|
Create a document for an evaluation.
|
47
48
|
|
@@ -64,7 +65,7 @@ class Documents(SyncAPIResource, DocumentsMixin):
|
|
64
65
|
mime_document: MIMEData = prepare_mime_document(document)
|
65
66
|
|
66
67
|
# Let prepare_create handle the serialization
|
67
|
-
request = self.prepare_create(evaluation_id, mime_document, annotation)
|
68
|
+
request = self.prepare_create(evaluation_id, mime_document, annotation, annotation_metadata)
|
68
69
|
response = self._client._prepared_request(request)
|
69
70
|
return EvaluationDocument(**response)
|
70
71
|
|
@@ -101,7 +102,7 @@ class Documents(SyncAPIResource, DocumentsMixin):
|
|
101
102
|
response = self._client._prepared_request(request)
|
102
103
|
return EvaluationDocument(**response)
|
103
104
|
|
104
|
-
def update(self, evaluation_id: str, document_id: str, annotation: dict[str, Any]
|
105
|
+
def update(self, evaluation_id: str, document_id: str, annotation: dict[str, Any]) -> EvaluationDocument:
|
105
106
|
"""
|
106
107
|
Update a document.
|
107
108
|
|
@@ -146,7 +147,7 @@ class Documents(SyncAPIResource, DocumentsMixin):
|
|
146
147
|
class AsyncDocuments(AsyncAPIResource, DocumentsMixin):
|
147
148
|
"""Async Documents API wrapper for evaluations"""
|
148
149
|
|
149
|
-
async def create(self, evaluation_id: str, document: Union[Path, str, IOBase, MIMEData, PIL.Image.Image, HttpUrl], annotation: Dict[str, Any]) -> EvaluationDocument:
|
150
|
+
async def create(self, evaluation_id: str, document: Union[Path, str, IOBase, MIMEData, PIL.Image.Image, HttpUrl], annotation: Dict[str, Any], annotation_metadata: Dict[str, Any] | None = None) -> EvaluationDocument:
|
150
151
|
"""
|
151
152
|
Create a document for an evaluation.
|
152
153
|
|
@@ -159,7 +160,7 @@ class AsyncDocuments(AsyncAPIResource, DocumentsMixin):
|
|
159
160
|
- A PIL Image object
|
160
161
|
- A URL (HttpUrl)
|
161
162
|
annotation: The ground truth for the document
|
162
|
-
|
163
|
+
annotation_metadata: The metadata of the annotation
|
163
164
|
Returns:
|
164
165
|
EvaluationDocument: The created document
|
165
166
|
Raises:
|
@@ -169,7 +170,7 @@ class AsyncDocuments(AsyncAPIResource, DocumentsMixin):
|
|
169
170
|
mime_document: MIMEData = prepare_mime_document(document)
|
170
171
|
|
171
172
|
# Let prepare_create handle the serialization
|
172
|
-
request = self.prepare_create(evaluation_id, mime_document, annotation)
|
173
|
+
request = self.prepare_create(evaluation_id, mime_document, annotation, annotation_metadata)
|
173
174
|
response = await self._client._prepared_request(request)
|
174
175
|
return EvaluationDocument(**response)
|
175
176
|
|
@@ -189,7 +190,7 @@ class AsyncDocuments(AsyncAPIResource, DocumentsMixin):
|
|
189
190
|
response = await self._client._prepared_request(request)
|
190
191
|
return [EvaluationDocument(**item) for item in response.get("data", [])]
|
191
192
|
|
192
|
-
async def update(self, evaluation_id: str, document_id: str, annotation: dict[str, Any]
|
193
|
+
async def update(self, evaluation_id: str, document_id: str, annotation: dict[str, Any]) -> EvaluationDocument:
|
193
194
|
"""
|
194
195
|
Update a document.
|
195
196
|
|
@@ -34,15 +34,23 @@ class IterationsMixin:
|
|
34
34
|
browser_canvas: BrowserCanvas = FieldUnset,
|
35
35
|
n_consensus: int = FieldUnset,
|
36
36
|
) -> PreparedRequest:
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
37
|
+
inference_dict = {}
|
38
|
+
if model is not FieldUnset:
|
39
|
+
inference_dict["model"] = model
|
40
|
+
if temperature is not FieldUnset:
|
41
|
+
inference_dict["temperature"] = temperature
|
42
|
+
if modality is not FieldUnset:
|
43
|
+
inference_dict["modality"] = modality
|
44
|
+
if reasoning_effort is not FieldUnset:
|
45
|
+
inference_dict["reasoning_effort"] = reasoning_effort
|
46
|
+
if image_resolution_dpi is not FieldUnset:
|
47
|
+
inference_dict["image_resolution_dpi"] = image_resolution_dpi
|
48
|
+
if browser_canvas is not FieldUnset:
|
49
|
+
inference_dict["browser_canvas"] = browser_canvas
|
50
|
+
if n_consensus is not FieldUnset:
|
51
|
+
inference_dict["n_consensus"] = n_consensus
|
52
|
+
|
53
|
+
inference_settings = InferenceSettings(**inference_dict)
|
46
54
|
|
47
55
|
request = CreateIterationRequest(inference_settings=inference_settings, json_schema=json_schema)
|
48
56
|
|
@@ -61,19 +69,29 @@ class IterationsMixin:
|
|
61
69
|
browser_canvas: BrowserCanvas = FieldUnset,
|
62
70
|
n_consensus: int = FieldUnset,
|
63
71
|
) -> PreparedRequest:
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
if not
|
74
|
-
|
75
|
-
|
76
|
-
|
72
|
+
inference_dict = {}
|
73
|
+
if model is not FieldUnset:
|
74
|
+
inference_dict["model"] = model
|
75
|
+
if temperature is not FieldUnset:
|
76
|
+
inference_dict["temperature"] = temperature
|
77
|
+
if modality is not FieldUnset:
|
78
|
+
inference_dict["modality"] = modality
|
79
|
+
if reasoning_effort is not FieldUnset:
|
80
|
+
inference_dict["reasoning_effort"] = reasoning_effort
|
81
|
+
if image_resolution_dpi is not FieldUnset:
|
82
|
+
inference_dict["image_resolution_dpi"] = image_resolution_dpi
|
83
|
+
if browser_canvas is not FieldUnset:
|
84
|
+
inference_dict["browser_canvas"] = browser_canvas
|
85
|
+
if n_consensus is not FieldUnset:
|
86
|
+
inference_dict["n_consensus"] = n_consensus
|
87
|
+
|
88
|
+
iteration_dict = {}
|
89
|
+
if json_schema is not FieldUnset:
|
90
|
+
iteration_dict["json_schema"] = json_schema
|
91
|
+
if inference_dict: # Only add inference_settings if we have at least one field
|
92
|
+
iteration_dict["inference_settings"] = InferenceSettings(**inference_dict)
|
93
|
+
|
94
|
+
iteration_data = PatchIterationRequest(**iteration_dict)
|
77
95
|
|
78
96
|
return PreparedRequest(
|
79
97
|
method="PATCH", url=f"/v1/evaluations/{evaluation_id}/iterations/{iteration_id}", data=iteration_data.model_dump(exclude_unset=True, exclude_defaults=True, mode="json")
|
@@ -136,14 +154,14 @@ class Iterations(SyncAPIResource, IterationsMixin):
|
|
136
154
|
def create(
|
137
155
|
self,
|
138
156
|
evaluation_id: str,
|
139
|
-
model: str,
|
140
|
-
temperature: float =
|
141
|
-
modality: Modality =
|
142
|
-
json_schema: Optional[Dict[str, Any]] =
|
143
|
-
reasoning_effort: ChatCompletionReasoningEffort =
|
144
|
-
image_resolution_dpi: int =
|
145
|
-
browser_canvas: BrowserCanvas =
|
146
|
-
n_consensus: int =
|
157
|
+
model: str = FieldUnset,
|
158
|
+
temperature: float = FieldUnset,
|
159
|
+
modality: Modality = FieldUnset,
|
160
|
+
json_schema: Optional[Dict[str, Any]] = FieldUnset,
|
161
|
+
reasoning_effort: ChatCompletionReasoningEffort = FieldUnset,
|
162
|
+
image_resolution_dpi: int = FieldUnset,
|
163
|
+
browser_canvas: BrowserCanvas = FieldUnset,
|
164
|
+
n_consensus: int = FieldUnset,
|
147
165
|
) -> Iteration:
|
148
166
|
"""
|
149
167
|
Create a new iteration for an evaluation.
|
retab/resources/jsonlUtils.py
CHANGED
@@ -14,9 +14,8 @@ from anthropic import Anthropic
|
|
14
14
|
from openai import OpenAI
|
15
15
|
from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam
|
16
16
|
from pydantic import BaseModel
|
17
|
-
from pydantic_core import PydanticUndefined
|
18
17
|
from tqdm import tqdm
|
19
|
-
|
18
|
+
from ..types.standards import FieldUnset
|
20
19
|
from .._resource import AsyncAPIResource, SyncAPIResource
|
21
20
|
from ..utils.ai_models import assert_valid_model_extraction, get_provider_for_model
|
22
21
|
from ..utils.chat import convert_to_anthropic_format, convert_to_openai_format, separate_messages
|
@@ -139,8 +138,8 @@ class Datasets(SyncAPIResource, BaseDatasetsMixin):
|
|
139
138
|
json_schema: dict[str, Any] | Path | str,
|
140
139
|
document_annotation_pairs_paths: list[dict[str, Path | str]],
|
141
140
|
dataset_path: Path | str,
|
142
|
-
image_resolution_dpi: int =
|
143
|
-
browser_canvas: BrowserCanvas =
|
141
|
+
image_resolution_dpi: int = FieldUnset,
|
142
|
+
browser_canvas: BrowserCanvas = FieldUnset,
|
144
143
|
modality: Modality = "native",
|
145
144
|
) -> None:
|
146
145
|
"""Save document-annotation pairs to a JSONL training set.
|
@@ -1,6 +1,6 @@
|
|
1
|
-
from typing import Literal, Optional
|
1
|
+
from typing import Any, Literal, Optional
|
2
2
|
|
3
|
-
from
|
3
|
+
from ....types.standards import FieldUnset
|
4
4
|
|
5
5
|
from ...._resource import AsyncAPIResource, SyncAPIResource
|
6
6
|
from ....utils.ai_models import assert_valid_model_extraction
|
@@ -15,19 +15,23 @@ class EndpointsMixin:
|
|
15
15
|
name: str,
|
16
16
|
webhook_url: str,
|
17
17
|
model: str = "gpt-4o-mini",
|
18
|
-
webhook_headers: dict[str, str] =
|
19
|
-
need_validation: bool =
|
18
|
+
webhook_headers: dict[str, str] = FieldUnset,
|
19
|
+
need_validation: bool = FieldUnset,
|
20
20
|
) -> PreparedRequest:
|
21
21
|
assert_valid_model_extraction(model)
|
22
22
|
|
23
|
-
|
24
|
-
processor_id
|
25
|
-
name
|
26
|
-
webhook_url
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
23
|
+
endpoint_dict: dict[str, Any] = {
|
24
|
+
'processor_id': processor_id,
|
25
|
+
'name': name,
|
26
|
+
'webhook_url': webhook_url,
|
27
|
+
}
|
28
|
+
if webhook_headers is not FieldUnset:
|
29
|
+
endpoint_dict['webhook_headers'] = webhook_headers
|
30
|
+
if need_validation is not FieldUnset:
|
31
|
+
endpoint_dict['need_validation'] = need_validation
|
32
|
+
|
33
|
+
request = Endpoint(**endpoint_dict)
|
34
|
+
return PreparedRequest(method="POST", url="/v1/processors/automations/endpoints", data=request.model_dump(mode="json", exclude_unset=True))
|
31
35
|
|
32
36
|
def prepare_list(
|
33
37
|
self,
|
@@ -68,20 +72,26 @@ class EndpointsMixin:
|
|
68
72
|
def prepare_update(
|
69
73
|
self,
|
70
74
|
endpoint_id: str,
|
71
|
-
name: str =
|
72
|
-
default_language: str =
|
73
|
-
webhook_url: str =
|
74
|
-
webhook_headers: dict[str, str] =
|
75
|
-
need_validation: bool =
|
75
|
+
name: str = FieldUnset,
|
76
|
+
default_language: str = FieldUnset,
|
77
|
+
webhook_url: str = FieldUnset,
|
78
|
+
webhook_headers: dict[str, str] = FieldUnset,
|
79
|
+
need_validation: bool = FieldUnset,
|
76
80
|
) -> PreparedRequest:
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
81
|
+
update_dict: dict[str, Any] = {}
|
82
|
+
if name is not FieldUnset:
|
83
|
+
update_dict['name'] = name
|
84
|
+
if default_language is not FieldUnset:
|
85
|
+
update_dict['default_language'] = default_language
|
86
|
+
if webhook_url is not FieldUnset:
|
87
|
+
update_dict['webhook_url'] = webhook_url
|
88
|
+
if webhook_headers is not FieldUnset:
|
89
|
+
update_dict['webhook_headers'] = webhook_headers
|
90
|
+
if need_validation is not FieldUnset:
|
91
|
+
update_dict['need_validation'] = need_validation
|
92
|
+
|
93
|
+
request = UpdateEndpointRequest(**update_dict)
|
94
|
+
return PreparedRequest(method="PUT", url=f"/v1/processors/automations/endpoints/{endpoint_id}", data=request.model_dump(mode="json", exclude_unset=True))
|
85
95
|
|
86
96
|
def prepare_delete(self, endpoint_id: str) -> PreparedRequest:
|
87
97
|
return PreparedRequest(method="DELETE", url=f"/v1/processors/automations/endpoints/{endpoint_id}")
|
@@ -95,8 +105,8 @@ class Endpoints(SyncAPIResource, EndpointsMixin):
|
|
95
105
|
processor_id: str,
|
96
106
|
name: str,
|
97
107
|
webhook_url: str,
|
98
|
-
webhook_headers: dict[str, str] =
|
99
|
-
need_validation: bool =
|
108
|
+
webhook_headers: dict[str, str] = FieldUnset,
|
109
|
+
need_validation: bool = FieldUnset,
|
100
110
|
) -> Endpoint:
|
101
111
|
"""Create a new endpoint configuration.
|
102
112
|
|
@@ -168,11 +178,11 @@ class Endpoints(SyncAPIResource, EndpointsMixin):
|
|
168
178
|
def update(
|
169
179
|
self,
|
170
180
|
endpoint_id: str,
|
171
|
-
name: str =
|
172
|
-
default_language: str =
|
173
|
-
webhook_url: str =
|
174
|
-
webhook_headers: dict[str, str] =
|
175
|
-
need_validation: bool =
|
181
|
+
name: str = FieldUnset,
|
182
|
+
default_language: str = FieldUnset,
|
183
|
+
webhook_url: str = FieldUnset,
|
184
|
+
webhook_headers: dict[str, str] = FieldUnset,
|
185
|
+
need_validation: bool = FieldUnset,
|
176
186
|
) -> Endpoint:
|
177
187
|
"""Update an endpoint configuration.
|
178
188
|
|
@@ -221,8 +231,8 @@ class AsyncEndpoints(AsyncAPIResource, EndpointsMixin):
|
|
221
231
|
processor_id: str,
|
222
232
|
name: str,
|
223
233
|
webhook_url: str,
|
224
|
-
webhook_headers: dict[str, str] =
|
225
|
-
need_validation: bool =
|
234
|
+
webhook_headers: dict[str, str] = FieldUnset,
|
235
|
+
need_validation: bool = FieldUnset,
|
226
236
|
) -> Endpoint:
|
227
237
|
request = self.prepare_create(
|
228
238
|
processor_id=processor_id,
|
@@ -258,11 +268,11 @@ class AsyncEndpoints(AsyncAPIResource, EndpointsMixin):
|
|
258
268
|
async def update(
|
259
269
|
self,
|
260
270
|
endpoint_id: str,
|
261
|
-
name: str =
|
262
|
-
default_language: str =
|
263
|
-
webhook_url: str =
|
264
|
-
webhook_headers: dict[str, str] =
|
265
|
-
need_validation: bool =
|
271
|
+
name: str = FieldUnset,
|
272
|
+
default_language: str = FieldUnset,
|
273
|
+
webhook_url: str = FieldUnset,
|
274
|
+
webhook_headers: dict[str, str] = FieldUnset,
|
275
|
+
need_validation: bool = FieldUnset,
|
266
276
|
) -> Endpoint:
|
267
277
|
request = self.prepare_update(
|
268
278
|
endpoint_id=endpoint_id,
|
@@ -1,10 +1,8 @@
|
|
1
1
|
from typing import Any, Literal, Optional
|
2
2
|
|
3
|
-
from pydantic_core import PydanticUndefined
|
4
|
-
|
5
3
|
from ...._resource import AsyncAPIResource, SyncAPIResource
|
6
4
|
from ....types.automations.links import Link, ListLinks, UpdateLinkRequest
|
7
|
-
from ....types.standards import PreparedRequest
|
5
|
+
from ....types.standards import PreparedRequest, FieldUnset
|
8
6
|
|
9
7
|
|
10
8
|
class LinksMixin:
|
@@ -15,19 +13,24 @@ class LinksMixin:
|
|
15
13
|
processor_id: str,
|
16
14
|
name: str,
|
17
15
|
webhook_url: str,
|
18
|
-
webhook_headers: dict[str, str] =
|
19
|
-
need_validation: bool =
|
20
|
-
password: str | None =
|
16
|
+
webhook_headers: dict[str, str] = FieldUnset,
|
17
|
+
need_validation: bool = FieldUnset,
|
18
|
+
password: str | None = FieldUnset,
|
21
19
|
) -> PreparedRequest:
|
22
|
-
|
23
|
-
processor_id
|
24
|
-
name
|
25
|
-
webhook_url
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
20
|
+
link_dict: dict[str, Any] = {
|
21
|
+
'processor_id': processor_id,
|
22
|
+
'name': name,
|
23
|
+
'webhook_url': webhook_url,
|
24
|
+
}
|
25
|
+
if webhook_headers is not FieldUnset:
|
26
|
+
link_dict['webhook_headers'] = webhook_headers
|
27
|
+
if need_validation is not FieldUnset:
|
28
|
+
link_dict['need_validation'] = need_validation
|
29
|
+
if password is not FieldUnset:
|
30
|
+
link_dict['password'] = password
|
31
|
+
|
32
|
+
request = Link(**link_dict)
|
33
|
+
return PreparedRequest(method="POST", url=self.links_base_url, data=request.model_dump(mode="json", exclude_unset=True))
|
31
34
|
|
32
35
|
def prepare_list(
|
33
36
|
self,
|
@@ -66,19 +69,25 @@ class LinksMixin:
|
|
66
69
|
def prepare_update(
|
67
70
|
self,
|
68
71
|
link_id: str,
|
69
|
-
name: str =
|
70
|
-
webhook_url: str =
|
71
|
-
webhook_headers: dict[str, str] =
|
72
|
-
need_validation: bool =
|
73
|
-
password: str | None =
|
72
|
+
name: str = FieldUnset,
|
73
|
+
webhook_url: str = FieldUnset,
|
74
|
+
webhook_headers: dict[str, str] = FieldUnset,
|
75
|
+
need_validation: bool = FieldUnset,
|
76
|
+
password: str | None = FieldUnset,
|
74
77
|
) -> PreparedRequest:
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
78
|
+
update_dict: dict[str, Any] = {}
|
79
|
+
if name is not FieldUnset:
|
80
|
+
update_dict['name'] = name
|
81
|
+
if webhook_url is not FieldUnset:
|
82
|
+
update_dict['webhook_url'] = webhook_url
|
83
|
+
if webhook_headers is not FieldUnset:
|
84
|
+
update_dict['webhook_headers'] = webhook_headers
|
85
|
+
if need_validation is not FieldUnset:
|
86
|
+
update_dict['need_validation'] = need_validation
|
87
|
+
if password is not FieldUnset:
|
88
|
+
update_dict['password'] = password
|
89
|
+
|
90
|
+
request = UpdateLinkRequest(**update_dict)
|
82
91
|
return PreparedRequest(method="PUT", url=f"{self.links_base_url}/{link_id}", data=request.model_dump(mode="json", exclude_unset=True, exclude_defaults=True))
|
83
92
|
|
84
93
|
def prepare_delete(self, link_id: str) -> PreparedRequest:
|
@@ -96,9 +105,9 @@ class Links(SyncAPIResource, LinksMixin):
|
|
96
105
|
processor_id: str,
|
97
106
|
name: str,
|
98
107
|
webhook_url: str,
|
99
|
-
webhook_headers: dict[str, str] =
|
100
|
-
need_validation: bool =
|
101
|
-
password: str | None =
|
108
|
+
webhook_headers: dict[str, str] = FieldUnset,
|
109
|
+
need_validation: bool = FieldUnset,
|
110
|
+
password: str | None = FieldUnset,
|
102
111
|
) -> Link:
|
103
112
|
"""Create a new extraction link configuration.
|
104
113
|
|
@@ -174,11 +183,11 @@ class Links(SyncAPIResource, LinksMixin):
|
|
174
183
|
def update(
|
175
184
|
self,
|
176
185
|
link_id: str,
|
177
|
-
name: str =
|
178
|
-
webhook_url: str =
|
179
|
-
webhook_headers: dict[str, str] =
|
180
|
-
password: str | None =
|
181
|
-
need_validation: bool =
|
186
|
+
name: str = FieldUnset,
|
187
|
+
webhook_url: str = FieldUnset,
|
188
|
+
webhook_headers: dict[str, str] = FieldUnset,
|
189
|
+
password: str | None = FieldUnset,
|
190
|
+
need_validation: bool = FieldUnset,
|
182
191
|
) -> Link:
|
183
192
|
"""Update an extraction link configuration.
|
184
193
|
|
@@ -235,9 +244,9 @@ class AsyncLinks(AsyncAPIResource, LinksMixin):
|
|
235
244
|
processor_id: str,
|
236
245
|
name: str,
|
237
246
|
webhook_url: str,
|
238
|
-
webhook_headers: dict[str, str] =
|
239
|
-
need_validation: bool =
|
240
|
-
password: str | None =
|
247
|
+
webhook_headers: dict[str, str] = FieldUnset,
|
248
|
+
need_validation: bool = FieldUnset,
|
249
|
+
password: str | None = FieldUnset,
|
241
250
|
) -> Link:
|
242
251
|
request = self.prepare_create(
|
243
252
|
processor_id=processor_id,
|
@@ -272,11 +281,11 @@ class AsyncLinks(AsyncAPIResource, LinksMixin):
|
|
272
281
|
async def update(
|
273
282
|
self,
|
274
283
|
link_id: str,
|
275
|
-
name: str =
|
276
|
-
webhook_url: str =
|
277
|
-
webhook_headers: dict[str, str] =
|
278
|
-
password: str | None =
|
279
|
-
need_validation: bool =
|
284
|
+
name: str = FieldUnset,
|
285
|
+
webhook_url: str = FieldUnset,
|
286
|
+
webhook_headers: dict[str, str] = FieldUnset,
|
287
|
+
password: str | None = FieldUnset,
|
288
|
+
need_validation: bool = FieldUnset,
|
280
289
|
) -> Link:
|
281
290
|
request = self.prepare_update(
|
282
291
|
link_id=link_id,
|