retab 0.0.35__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- retab-0.0.35.dist-info/METADATA +417 -0
- retab-0.0.35.dist-info/RECORD +111 -0
- retab-0.0.35.dist-info/WHEEL +5 -0
- retab-0.0.35.dist-info/top_level.txt +1 -0
- uiform/__init__.py +4 -0
- uiform/_resource.py +28 -0
- uiform/_utils/__init__.py +0 -0
- uiform/_utils/ai_models.py +100 -0
- uiform/_utils/benchmarking copy.py +588 -0
- uiform/_utils/benchmarking.py +485 -0
- uiform/_utils/chat.py +332 -0
- uiform/_utils/display.py +443 -0
- uiform/_utils/json_schema.py +2161 -0
- uiform/_utils/mime.py +168 -0
- uiform/_utils/responses.py +163 -0
- uiform/_utils/stream_context_managers.py +52 -0
- uiform/_utils/usage/__init__.py +0 -0
- uiform/_utils/usage/usage.py +300 -0
- uiform/client.py +701 -0
- uiform/py.typed +0 -0
- uiform/resources/__init__.py +0 -0
- uiform/resources/consensus/__init__.py +3 -0
- uiform/resources/consensus/client.py +114 -0
- uiform/resources/consensus/completions.py +252 -0
- uiform/resources/consensus/completions_stream.py +278 -0
- uiform/resources/consensus/responses.py +325 -0
- uiform/resources/consensus/responses_stream.py +373 -0
- uiform/resources/deployments/__init__.py +9 -0
- uiform/resources/deployments/client.py +78 -0
- uiform/resources/deployments/endpoints.py +322 -0
- uiform/resources/deployments/links.py +452 -0
- uiform/resources/deployments/logs.py +211 -0
- uiform/resources/deployments/mailboxes.py +496 -0
- uiform/resources/deployments/outlook.py +531 -0
- uiform/resources/deployments/tests.py +158 -0
- uiform/resources/documents/__init__.py +3 -0
- uiform/resources/documents/client.py +255 -0
- uiform/resources/documents/extractions.py +441 -0
- uiform/resources/evals.py +812 -0
- uiform/resources/files.py +24 -0
- uiform/resources/finetuning.py +62 -0
- uiform/resources/jsonlUtils.py +1046 -0
- uiform/resources/models.py +45 -0
- uiform/resources/openai_example.py +22 -0
- uiform/resources/processors/__init__.py +3 -0
- uiform/resources/processors/automations/__init__.py +9 -0
- uiform/resources/processors/automations/client.py +78 -0
- uiform/resources/processors/automations/endpoints.py +317 -0
- uiform/resources/processors/automations/links.py +356 -0
- uiform/resources/processors/automations/logs.py +211 -0
- uiform/resources/processors/automations/mailboxes.py +435 -0
- uiform/resources/processors/automations/outlook.py +444 -0
- uiform/resources/processors/automations/tests.py +158 -0
- uiform/resources/processors/client.py +474 -0
- uiform/resources/prompt_optimization.py +76 -0
- uiform/resources/schemas.py +369 -0
- uiform/resources/secrets/__init__.py +9 -0
- uiform/resources/secrets/client.py +20 -0
- uiform/resources/secrets/external_api_keys.py +109 -0
- uiform/resources/secrets/webhook.py +62 -0
- uiform/resources/usage.py +271 -0
- uiform/types/__init__.py +0 -0
- uiform/types/ai_models.py +645 -0
- uiform/types/automations/__init__.py +0 -0
- uiform/types/automations/cron.py +58 -0
- uiform/types/automations/endpoints.py +21 -0
- uiform/types/automations/links.py +28 -0
- uiform/types/automations/mailboxes.py +60 -0
- uiform/types/automations/outlook.py +68 -0
- uiform/types/automations/webhooks.py +21 -0
- uiform/types/chat.py +8 -0
- uiform/types/completions.py +93 -0
- uiform/types/consensus.py +10 -0
- uiform/types/db/__init__.py +0 -0
- uiform/types/db/annotations.py +24 -0
- uiform/types/db/files.py +36 -0
- uiform/types/deployments/__init__.py +0 -0
- uiform/types/deployments/cron.py +59 -0
- uiform/types/deployments/endpoints.py +28 -0
- uiform/types/deployments/links.py +36 -0
- uiform/types/deployments/mailboxes.py +67 -0
- uiform/types/deployments/outlook.py +76 -0
- uiform/types/deployments/webhooks.py +21 -0
- uiform/types/documents/__init__.py +0 -0
- uiform/types/documents/correct_orientation.py +13 -0
- uiform/types/documents/create_messages.py +226 -0
- uiform/types/documents/extractions.py +297 -0
- uiform/types/evals.py +207 -0
- uiform/types/events.py +76 -0
- uiform/types/extractions.py +85 -0
- uiform/types/jobs/__init__.py +0 -0
- uiform/types/jobs/base.py +150 -0
- uiform/types/jobs/batch_annotation.py +22 -0
- uiform/types/jobs/evaluation.py +133 -0
- uiform/types/jobs/finetune.py +6 -0
- uiform/types/jobs/prompt_optimization.py +41 -0
- uiform/types/jobs/webcrawl.py +6 -0
- uiform/types/logs.py +231 -0
- uiform/types/mime.py +257 -0
- uiform/types/modalities.py +68 -0
- uiform/types/pagination.py +6 -0
- uiform/types/schemas/__init__.py +0 -0
- uiform/types/schemas/enhance.py +53 -0
- uiform/types/schemas/evaluate.py +55 -0
- uiform/types/schemas/generate.py +32 -0
- uiform/types/schemas/layout.py +58 -0
- uiform/types/schemas/object.py +631 -0
- uiform/types/schemas/templates.py +107 -0
- uiform/types/secrets/__init__.py +0 -0
- uiform/types/secrets/external_api_keys.py +22 -0
- uiform/types/standards.py +39 -0
@@ -0,0 +1,369 @@
|
|
1
|
+
from io import IOBase
|
2
|
+
from pathlib import Path
|
3
|
+
from typing import Any, List, Optional, Sequence
|
4
|
+
|
5
|
+
import PIL.Image
|
6
|
+
from pydantic import BaseModel
|
7
|
+
|
8
|
+
from .._resource import AsyncAPIResource, SyncAPIResource
|
9
|
+
from .._utils.ai_models import assert_valid_model_schema_generation
|
10
|
+
from .._utils.json_schema import load_json_schema
|
11
|
+
from .._utils.mime import prepare_mime_document_list
|
12
|
+
from ..types.modalities import Modality
|
13
|
+
from ..types.mime import MIMEData
|
14
|
+
from ..types.schemas.generate import GenerateSchemaRequest
|
15
|
+
from ..types.schemas.enhance import EnhanceSchemaConfig, EnhanceSchemaConfigDict, EnhanceSchemaRequest
|
16
|
+
from ..types.schemas.evaluate import EvaluateSchemaRequest, EvaluateSchemaResponse
|
17
|
+
from ..types.schemas.object import Schema
|
18
|
+
from ..types.standards import PreparedRequest
|
19
|
+
from openai.types.chat.chat_completion_reasoning_effort import ChatCompletionReasoningEffort
|
20
|
+
|
21
|
+
|
22
|
+
class SchemasMixin:
|
23
|
+
def prepare_generate(
|
24
|
+
self,
|
25
|
+
documents: Sequence[Path | str | bytes | MIMEData | IOBase | PIL.Image.Image],
|
26
|
+
instructions: str | None = None,
|
27
|
+
model: str = "gpt-4o-2024-11-20",
|
28
|
+
temperature: float = 0,
|
29
|
+
modality: Modality = "native",
|
30
|
+
) -> PreparedRequest:
|
31
|
+
assert_valid_model_schema_generation(model)
|
32
|
+
mime_documents = prepare_mime_document_list(documents)
|
33
|
+
data = {
|
34
|
+
"documents": [doc.model_dump() for doc in mime_documents],
|
35
|
+
"instructions": instructions if instructions else None,
|
36
|
+
"model": model,
|
37
|
+
"temperature": temperature,
|
38
|
+
"modality": modality,
|
39
|
+
}
|
40
|
+
GenerateSchemaRequest.model_validate(data)
|
41
|
+
return PreparedRequest(method="POST", url="/v1/schemas/generate", data=data)
|
42
|
+
|
43
|
+
def prepare_evaluate(
|
44
|
+
self,
|
45
|
+
documents: Sequence[Path | str | bytes | MIMEData | IOBase | PIL.Image.Image],
|
46
|
+
json_schema: dict[str, Any],
|
47
|
+
ground_truths: list[dict[str, Any]] | None = None,
|
48
|
+
model: str = "gpt-4o-mini",
|
49
|
+
temperature: float = 0.0,
|
50
|
+
reasoning_effort: ChatCompletionReasoningEffort = "medium",
|
51
|
+
modality: Modality = "native",
|
52
|
+
image_resolution_dpi: int = 96,
|
53
|
+
browser_canvas: str = "A4",
|
54
|
+
n_consensus: int = 1,
|
55
|
+
) -> PreparedRequest:
|
56
|
+
# Assert that if ground_truths is not None, it has the same length as documents
|
57
|
+
if ground_truths is not None and len(documents) != len(ground_truths):
|
58
|
+
raise ValueError("Number of documents must match number of ground truths")
|
59
|
+
|
60
|
+
mime_documents = prepare_mime_document_list(documents)
|
61
|
+
data = {
|
62
|
+
"documents": [doc.model_dump() for doc in mime_documents],
|
63
|
+
"ground_truths": ground_truths,
|
64
|
+
"model": model,
|
65
|
+
"temperature": temperature,
|
66
|
+
"reasoning_effort": reasoning_effort,
|
67
|
+
"modality": modality,
|
68
|
+
"image_resolution_dpi": image_resolution_dpi,
|
69
|
+
"browser_canvas": browser_canvas,
|
70
|
+
"n_consensus": n_consensus,
|
71
|
+
"json_schema": json_schema,
|
72
|
+
}
|
73
|
+
EvaluateSchemaRequest.model_validate(data)
|
74
|
+
return PreparedRequest(method="POST", url="/v1/schemas/evaluate", data=data)
|
75
|
+
|
76
|
+
def prepare_enhance(
|
77
|
+
self,
|
78
|
+
json_schema: dict[str, Any] | Path | str,
|
79
|
+
documents: Sequence[Path | str | bytes | MIMEData | IOBase | PIL.Image.Image],
|
80
|
+
ground_truths: list[dict[str, Any]] | None,
|
81
|
+
instructions: str | None,
|
82
|
+
model: str,
|
83
|
+
temperature: float,
|
84
|
+
modality: Modality,
|
85
|
+
flat_likelihoods: list[dict[str, float]] | dict[str, float] | None,
|
86
|
+
tools_config: EnhanceSchemaConfig,
|
87
|
+
) -> PreparedRequest:
|
88
|
+
assert_valid_model_schema_generation(model)
|
89
|
+
mime_documents = prepare_mime_document_list(documents)
|
90
|
+
loaded_json_schema = load_json_schema(json_schema)
|
91
|
+
data = {
|
92
|
+
"json_schema": loaded_json_schema,
|
93
|
+
"documents": [doc.model_dump() for doc in mime_documents],
|
94
|
+
"ground_truths": ground_truths,
|
95
|
+
"instructions": instructions if instructions else None,
|
96
|
+
"model": model,
|
97
|
+
"temperature": temperature,
|
98
|
+
"modality": modality,
|
99
|
+
"flat_likelihoods": flat_likelihoods,
|
100
|
+
"tools_config": tools_config.model_dump(),
|
101
|
+
}
|
102
|
+
EnhanceSchemaRequest.model_validate(data)
|
103
|
+
return PreparedRequest(method="POST", url="/v1/schemas/enhance", data=data)
|
104
|
+
|
105
|
+
def prepare_get(self, schema_id: str) -> PreparedRequest:
|
106
|
+
return PreparedRequest(method="GET", url=f"/v1/schemas/{schema_id}")
|
107
|
+
|
108
|
+
|
109
|
+
class Schemas(SyncAPIResource, SchemasMixin):
|
110
|
+
def load(self, json_schema: dict[str, Any] | Path | str | None = None, pydantic_model: type[BaseModel] | None = None) -> Schema:
|
111
|
+
"""Load a schema from a JSON schema.
|
112
|
+
|
113
|
+
Args:
|
114
|
+
json_schema: The JSON schema to load
|
115
|
+
"""
|
116
|
+
if json_schema:
|
117
|
+
return Schema(json_schema=load_json_schema(json_schema))
|
118
|
+
elif pydantic_model:
|
119
|
+
return Schema(pydantic_model=pydantic_model)
|
120
|
+
else:
|
121
|
+
raise ValueError("Either json_schema or pydantic_model must be provided")
|
122
|
+
|
123
|
+
def generate(
|
124
|
+
self,
|
125
|
+
documents: Sequence[Path | str | bytes | MIMEData | IOBase | PIL.Image.Image],
|
126
|
+
instructions: str | None = None,
|
127
|
+
model: str = "gpt-4o-2024-11-20",
|
128
|
+
temperature: float = 0,
|
129
|
+
modality: Modality = "native",
|
130
|
+
) -> Schema:
|
131
|
+
"""
|
132
|
+
Generate a complete JSON schema by analyzing the provided documents.
|
133
|
+
|
134
|
+
The generated schema includes X-Prompts for enhanced LLM interactions:
|
135
|
+
- X-SystemPrompt: Defines high-level instructions and context for consistent LLM behavior
|
136
|
+
- X-ReasoningPrompt: Creates auxiliary reasoning fields for complex data processing
|
137
|
+
|
138
|
+
Args:
|
139
|
+
documents: List of documents (as MIMEData) to analyze
|
140
|
+
|
141
|
+
Returns:
|
142
|
+
dict[str, Any]: Generated JSON schema with X-Prompts based on document analysis
|
143
|
+
|
144
|
+
Raises:
|
145
|
+
HTTPException if the request fails
|
146
|
+
"""
|
147
|
+
|
148
|
+
prepared_request = self.prepare_generate(documents, instructions, model, temperature, modality)
|
149
|
+
response = self._client._prepared_request(prepared_request)
|
150
|
+
return Schema.model_validate(response)
|
151
|
+
|
152
|
+
def evaluate(
|
153
|
+
self,
|
154
|
+
documents: Sequence[Path | str | bytes | MIMEData | IOBase | PIL.Image.Image],
|
155
|
+
json_schema: dict[str, Any],
|
156
|
+
ground_truths: list[dict[str, Any]] | None = None,
|
157
|
+
model: str = "gpt-4o-mini",
|
158
|
+
temperature: float = 0.0,
|
159
|
+
reasoning_effort: ChatCompletionReasoningEffort = "medium",
|
160
|
+
modality: Modality = "native",
|
161
|
+
image_resolution_dpi: int = 96,
|
162
|
+
browser_canvas: str = "A4",
|
163
|
+
n_consensus: int = 1,
|
164
|
+
) -> EvaluateSchemaResponse:
|
165
|
+
"""
|
166
|
+
Evaluate a schema by performing extractions on provided documents.
|
167
|
+
If ground truths are provided, compares extractions against them.
|
168
|
+
Otherwise, uses consensus likelihoods as metrics.
|
169
|
+
|
170
|
+
Args:
|
171
|
+
documents: List of documents to evaluate against
|
172
|
+
json_schema: The JSON schema to evaluate
|
173
|
+
ground_truths: Optional list of ground truth dictionaries to compare against
|
174
|
+
model: The model to use for extraction
|
175
|
+
temperature: The temperature to use for extraction
|
176
|
+
reasoning_effort: The reasoning effort to use for extraction
|
177
|
+
modality: The modality to use for extraction
|
178
|
+
image_resolution_dpi: The DPI of the image. Defaults to 96.
|
179
|
+
browser_canvas: The canvas size of the browser. Must be one of:
|
180
|
+
- "A3" (11.7in x 16.54in)
|
181
|
+
- "A4" (8.27in x 11.7in)
|
182
|
+
- "A5" (5.83in x 8.27in)
|
183
|
+
Defaults to "A4".
|
184
|
+
n_consensus: Number of consensus rounds to perform
|
185
|
+
|
186
|
+
Returns:
|
187
|
+
EvaluateSchemaResponse: Dictionary containing evaluation metrics for each document.
|
188
|
+
Each metric includes:
|
189
|
+
- similarity: Average similarity/likelihood score
|
190
|
+
- similarities: Per-field similarity/likelihood scores
|
191
|
+
- flat_similarities: Flattened per-field similarity/likelihood scores
|
192
|
+
- aligned_* versions of the above metrics
|
193
|
+
|
194
|
+
Raises:
|
195
|
+
ValueError: If ground_truths is provided and its length doesn't match documents
|
196
|
+
HTTPException: If the request fails or if there are too many documents
|
197
|
+
"""
|
198
|
+
prepared_request = self.prepare_evaluate(
|
199
|
+
documents=documents,
|
200
|
+
json_schema=json_schema,
|
201
|
+
ground_truths=ground_truths,
|
202
|
+
model=model,
|
203
|
+
temperature=temperature,
|
204
|
+
reasoning_effort=reasoning_effort,
|
205
|
+
modality=modality,
|
206
|
+
image_resolution_dpi=image_resolution_dpi,
|
207
|
+
browser_canvas=browser_canvas,
|
208
|
+
n_consensus=n_consensus,
|
209
|
+
)
|
210
|
+
response = self._client._prepared_request(prepared_request)
|
211
|
+
return EvaluateSchemaResponse.model_validate(response)
|
212
|
+
|
213
|
+
def enhance(
|
214
|
+
self,
|
215
|
+
json_schema: dict[str, Any] | Path | str,
|
216
|
+
documents: Sequence[Path | str | bytes | MIMEData | IOBase | PIL.Image.Image],
|
217
|
+
ground_truths: list[dict[str, Any]] | None = None,
|
218
|
+
instructions: str | None = None,
|
219
|
+
model: str = "gpt-4o-2024-11-20",
|
220
|
+
temperature: float = 0,
|
221
|
+
modality: Modality = "native",
|
222
|
+
flat_likelihoods: list[dict[str, float]] | dict[str, float] | None = None,
|
223
|
+
tools_config: EnhanceSchemaConfigDict | None = None,
|
224
|
+
) -> Schema:
|
225
|
+
prepared_request = self.prepare_enhance(
|
226
|
+
json_schema, documents, ground_truths, instructions, model, temperature, modality, flat_likelihoods, EnhanceSchemaConfig.model_validate(tools_config or {})
|
227
|
+
)
|
228
|
+
response = self._client._prepared_request(prepared_request)
|
229
|
+
return Schema(json_schema=response["json_schema"])
|
230
|
+
|
231
|
+
|
232
|
+
class AsyncSchemas(AsyncAPIResource, SchemasMixin):
|
233
|
+
async def load(self, json_schema: dict[str, Any] | Path | str | None = None, pydantic_model: type[BaseModel] | None = None) -> Schema:
|
234
|
+
"""Load a schema from a JSON schema.
|
235
|
+
|
236
|
+
Args:
|
237
|
+
json_schema: The JSON schema to load
|
238
|
+
pydantic_model: The Pydantic model to load
|
239
|
+
"""
|
240
|
+
if json_schema:
|
241
|
+
return Schema(json_schema=load_json_schema(json_schema))
|
242
|
+
elif pydantic_model:
|
243
|
+
return Schema(pydantic_model=pydantic_model)
|
244
|
+
else:
|
245
|
+
raise ValueError("Either json_schema or pydantic_model must be provided")
|
246
|
+
|
247
|
+
"""Schemas Asyncronous API wrapper"""
|
248
|
+
|
249
|
+
async def get(self, schema_id: str) -> Schema:
|
250
|
+
"""Retrieve a schema by ID.
|
251
|
+
|
252
|
+
Args:
|
253
|
+
schema_id: The ID of the schema to retrieve
|
254
|
+
|
255
|
+
Returns:
|
256
|
+
Schema: The retrieved schema object
|
257
|
+
"""
|
258
|
+
|
259
|
+
prepared_request = self.prepare_get(schema_id)
|
260
|
+
response = await self._client._prepared_request(prepared_request)
|
261
|
+
return Schema.model_validate(response)
|
262
|
+
|
263
|
+
async def generate(
|
264
|
+
self,
|
265
|
+
documents: Sequence[Path | str | bytes | MIMEData | IOBase | PIL.Image.Image],
|
266
|
+
instructions: str | None = None,
|
267
|
+
model: str = "gpt-4o-2024-11-20",
|
268
|
+
temperature: float = 0.0,
|
269
|
+
modality: Modality = "native",
|
270
|
+
) -> Schema:
|
271
|
+
"""
|
272
|
+
Generate a complete JSON schema by analyzing the provided documents.
|
273
|
+
|
274
|
+
The generated schema includes X-Prompts for enhanced LLM interactions:
|
275
|
+
- X-SystemPrompt: Defines high-level instructions and context for consistent LLM behavior
|
276
|
+
- X-FieldPrompt: Enhances standard description fields with specific extraction guidance
|
277
|
+
- X-ReasoningPrompt: Creates auxiliary reasoning fields for complex data processing
|
278
|
+
|
279
|
+
Args:
|
280
|
+
documents: List of documents (as MIMEData) to analyze
|
281
|
+
|
282
|
+
Returns:
|
283
|
+
dict[str, Any]: Generated JSON schema with X-Prompts based on document analysis
|
284
|
+
|
285
|
+
Raises
|
286
|
+
HTTPException if the request fails
|
287
|
+
"""
|
288
|
+
prepared_request = self.prepare_generate(documents, instructions, model, temperature, modality)
|
289
|
+
response = await self._client._prepared_request(prepared_request)
|
290
|
+
return Schema.model_validate(response)
|
291
|
+
|
292
|
+
async def evaluate(
|
293
|
+
self,
|
294
|
+
documents: Sequence[Path | str | bytes | MIMEData | IOBase | PIL.Image.Image],
|
295
|
+
json_schema: dict[str, Any],
|
296
|
+
ground_truths: list[dict[str, Any]] | None = None,
|
297
|
+
model: str = "gpt-4o-mini",
|
298
|
+
temperature: float = 0.0,
|
299
|
+
reasoning_effort: ChatCompletionReasoningEffort = "medium",
|
300
|
+
modality: Modality = "native",
|
301
|
+
image_resolution_dpi: int = 96,
|
302
|
+
browser_canvas: str = "A4",
|
303
|
+
n_consensus: int = 1,
|
304
|
+
) -> EvaluateSchemaResponse:
|
305
|
+
"""
|
306
|
+
Evaluate a schema by performing extractions on provided documents.
|
307
|
+
If ground truths are provided, compares extractions against them.
|
308
|
+
Otherwise, uses consensus likelihoods as metrics.
|
309
|
+
|
310
|
+
Args:
|
311
|
+
documents: List of documents to evaluate against
|
312
|
+
json_schema: The JSON schema to evaluate
|
313
|
+
ground_truths: Optional list of ground truth dictionaries to compare against
|
314
|
+
model: The model to use for extraction
|
315
|
+
temperature: The temperature to use for extraction
|
316
|
+
reasoning_effort: The reasoning effort to use for extraction
|
317
|
+
modality: The modality to use for extraction
|
318
|
+
image_resolution_dpi: The DPI of the image. Defaults to 96.
|
319
|
+
browser_canvas: The canvas size of the browser. Must be one of:
|
320
|
+
- "A3" (11.7in x 16.54in)
|
321
|
+
- "A4" (8.27in x 11.7in)
|
322
|
+
- "A5" (5.83in x 8.27in)
|
323
|
+
Defaults to "A4".
|
324
|
+
n_consensus: Number of consensus rounds to perform
|
325
|
+
|
326
|
+
Returns:
|
327
|
+
EvaluateSchemaResponse: Dictionary containing evaluation metrics for each document.
|
328
|
+
Each metric includes:
|
329
|
+
- similarity: Average similarity/likelihood score
|
330
|
+
- similarities: Per-field similarity/likelihood scores
|
331
|
+
- flat_similarities: Flattened per-field similarity/likelihood scores
|
332
|
+
- aligned_* versions of the above metrics
|
333
|
+
|
334
|
+
Raises:
|
335
|
+
ValueError: If ground_truths is provided and its length doesn't match documents
|
336
|
+
HTTPException: If the request fails or if there are too many documents
|
337
|
+
"""
|
338
|
+
prepared_request = self.prepare_evaluate(
|
339
|
+
documents=documents,
|
340
|
+
json_schema=json_schema,
|
341
|
+
ground_truths=ground_truths,
|
342
|
+
model=model,
|
343
|
+
temperature=temperature,
|
344
|
+
reasoning_effort=reasoning_effort,
|
345
|
+
modality=modality,
|
346
|
+
image_resolution_dpi=image_resolution_dpi,
|
347
|
+
browser_canvas=browser_canvas,
|
348
|
+
n_consensus=n_consensus,
|
349
|
+
)
|
350
|
+
response = await self._client._prepared_request(prepared_request)
|
351
|
+
return EvaluateSchemaResponse.model_validate(response)
|
352
|
+
|
353
|
+
async def enhance(
|
354
|
+
self,
|
355
|
+
json_schema: dict[str, Any] | Path | str,
|
356
|
+
documents: Sequence[Path | str | bytes | MIMEData | IOBase | PIL.Image.Image],
|
357
|
+
ground_truths: list[dict[str, Any]] | None = None,
|
358
|
+
instructions: str | None = None,
|
359
|
+
model: str = "gpt-4o-2024-11-20",
|
360
|
+
temperature: float = 0,
|
361
|
+
modality: Modality = "native",
|
362
|
+
flat_likelihoods: list[dict[str, float]] | dict[str, float] | None = None,
|
363
|
+
tools_config: EnhanceSchemaConfigDict | None = None,
|
364
|
+
) -> Schema:
|
365
|
+
prepared_request = self.prepare_enhance(
|
366
|
+
json_schema, documents, ground_truths, instructions, model, temperature, modality, flat_likelihoods, EnhanceSchemaConfig.model_validate(tools_config or {})
|
367
|
+
)
|
368
|
+
response = await self._client._prepared_request(prepared_request)
|
369
|
+
return Schema(json_schema=response["json_schema"])
|
@@ -0,0 +1,20 @@
|
|
1
|
+
from typing import Any
|
2
|
+
|
3
|
+
from ..._resource import AsyncAPIResource, SyncAPIResource
|
4
|
+
from .external_api_keys import AsyncExternalAPIKeys, ExternalAPIKeys
|
5
|
+
|
6
|
+
|
7
|
+
class Secrets(SyncAPIResource):
|
8
|
+
"""Automations API wrapper"""
|
9
|
+
|
10
|
+
def __init__(self, client: Any) -> None:
|
11
|
+
super().__init__(client=client)
|
12
|
+
self.external_api_keys = ExternalAPIKeys(client=client)
|
13
|
+
|
14
|
+
|
15
|
+
class AsyncSecrets(AsyncAPIResource):
|
16
|
+
"""Automations API wrapper"""
|
17
|
+
|
18
|
+
def __init__(self, client: Any) -> None:
|
19
|
+
super().__init__(client=client)
|
20
|
+
self.external_api_keys = AsyncExternalAPIKeys(client=client)
|
@@ -0,0 +1,109 @@
|
|
1
|
+
import os
|
2
|
+
from typing import List
|
3
|
+
|
4
|
+
from ..._resource import AsyncAPIResource, SyncAPIResource
|
5
|
+
from ...types.ai_models import AIProvider
|
6
|
+
from ...types.secrets.external_api_keys import ExternalAPIKey, ExternalAPIKeyRequest
|
7
|
+
from ...types.standards import PreparedRequest
|
8
|
+
|
9
|
+
|
10
|
+
class ExternalAPIKeysMixin:
|
11
|
+
def prepare_create(self, provider: AIProvider, api_key: str) -> PreparedRequest:
|
12
|
+
data = {"provider": provider, "api_key": api_key}
|
13
|
+
request = ExternalAPIKeyRequest.model_validate(data)
|
14
|
+
return PreparedRequest(method="POST", url="/v1/secrets/external_api_keys", data=request.model_dump(mode="json"))
|
15
|
+
|
16
|
+
def prepare_get(self, provider: AIProvider) -> PreparedRequest:
|
17
|
+
return PreparedRequest(method="GET", url=f"/v1/secrets/external_api_keys/{provider}")
|
18
|
+
|
19
|
+
def prepare_list(self) -> PreparedRequest:
|
20
|
+
return PreparedRequest(method="GET", url="/v1/secrets/external_api_keys")
|
21
|
+
|
22
|
+
def prepare_delete(self, provider: AIProvider) -> PreparedRequest:
|
23
|
+
return PreparedRequest(method="DELETE", url=f"/v1/secrets/external_api_keys/{provider}")
|
24
|
+
|
25
|
+
|
26
|
+
class ExternalAPIKeys(SyncAPIResource, ExternalAPIKeysMixin):
|
27
|
+
"""External API Keys management wrapper"""
|
28
|
+
|
29
|
+
def create(self, provider: AIProvider, api_key: str) -> dict:
|
30
|
+
"""Add or update an external API key.
|
31
|
+
|
32
|
+
Args:
|
33
|
+
provider: The API provider (openai, gemini, anthropic, xai)
|
34
|
+
api_key: The API key to store
|
35
|
+
|
36
|
+
Returns:
|
37
|
+
dict: Response indicating success
|
38
|
+
"""
|
39
|
+
|
40
|
+
request = self.prepare_create(provider, api_key)
|
41
|
+
response = self._client._prepared_request(request)
|
42
|
+
return response
|
43
|
+
|
44
|
+
def get(
|
45
|
+
self,
|
46
|
+
provider: AIProvider,
|
47
|
+
) -> ExternalAPIKey:
|
48
|
+
"""Get an external API key configuration.
|
49
|
+
|
50
|
+
Args:
|
51
|
+
provider: The API provider to get the key for
|
52
|
+
|
53
|
+
Returns:
|
54
|
+
ExternalAPIKey: The API key configuration
|
55
|
+
"""
|
56
|
+
request = self.prepare_get(provider)
|
57
|
+
response = self._client._prepared_request(request)
|
58
|
+
return response
|
59
|
+
|
60
|
+
return ExternalAPIKey.model_validate(response)
|
61
|
+
|
62
|
+
def list(self) -> List[ExternalAPIKey]:
|
63
|
+
"""List all configured external API keys.
|
64
|
+
|
65
|
+
Returns:
|
66
|
+
List[ExternalAPIKey]: List of API key configurations
|
67
|
+
"""
|
68
|
+
request = self.prepare_list()
|
69
|
+
response = self._client._prepared_request(request)
|
70
|
+
|
71
|
+
return [ExternalAPIKey.model_validate(key) for key in response]
|
72
|
+
|
73
|
+
def delete(self, provider: AIProvider) -> dict:
|
74
|
+
"""Delete an external API key configuration.
|
75
|
+
|
76
|
+
Args:
|
77
|
+
provider: The API provider to delete the key for
|
78
|
+
|
79
|
+
Returns:
|
80
|
+
dict: Response indicating success
|
81
|
+
"""
|
82
|
+
request = self.prepare_delete(provider)
|
83
|
+
response = self._client._prepared_request(request)
|
84
|
+
|
85
|
+
return response
|
86
|
+
|
87
|
+
|
88
|
+
class AsyncExternalAPIKeys(AsyncAPIResource, ExternalAPIKeysMixin):
|
89
|
+
"""External API Keys management wrapper"""
|
90
|
+
|
91
|
+
async def create(self, provider: AIProvider, api_key: str) -> dict:
|
92
|
+
request = self.prepare_create(provider, api_key)
|
93
|
+
response = await self._client._prepared_request(request)
|
94
|
+
return response
|
95
|
+
|
96
|
+
async def get(self, provider: AIProvider) -> ExternalAPIKey:
|
97
|
+
request = self.prepare_get(provider)
|
98
|
+
response = await self._client._prepared_request(request)
|
99
|
+
return response
|
100
|
+
|
101
|
+
async def list(self) -> List[ExternalAPIKey]:
|
102
|
+
request = self.prepare_list()
|
103
|
+
response = await self._client._prepared_request(request)
|
104
|
+
return [ExternalAPIKey.model_validate(key) for key in response]
|
105
|
+
|
106
|
+
async def delete(self, provider: AIProvider) -> dict:
|
107
|
+
request = self.prepare_delete(provider)
|
108
|
+
response = await self._client._prepared_request(request)
|
109
|
+
return response
|
@@ -0,0 +1,62 @@
|
|
1
|
+
from ..._resource import AsyncAPIResource, SyncAPIResource
|
2
|
+
from ...types.standards import PreparedRequest
|
3
|
+
|
4
|
+
|
5
|
+
class WebhookMixin:
|
6
|
+
def prepare_create(self) -> PreparedRequest:
|
7
|
+
return PreparedRequest(method="POST", url="/v1/secrets/webhook")
|
8
|
+
|
9
|
+
def prepare_delete(self) -> PreparedRequest:
|
10
|
+
return PreparedRequest(method="DELETE", url="/v1/secrets/webhook")
|
11
|
+
|
12
|
+
|
13
|
+
class Webhook(SyncAPIResource, WebhookMixin):
|
14
|
+
"""Webhook secret management wrapper"""
|
15
|
+
|
16
|
+
def create(
|
17
|
+
self,
|
18
|
+
) -> dict:
|
19
|
+
"""Create a webhook secret.
|
20
|
+
|
21
|
+
Returns:
|
22
|
+
dict: Response indicating success
|
23
|
+
"""
|
24
|
+
request = self.prepare_create()
|
25
|
+
response = self._client._prepared_request(request)
|
26
|
+
|
27
|
+
return response
|
28
|
+
|
29
|
+
def delete(self) -> dict:
|
30
|
+
"""Delete a webhook secret.
|
31
|
+
|
32
|
+
Returns:
|
33
|
+
dict: Response indicating success
|
34
|
+
"""
|
35
|
+
request = self.prepare_delete()
|
36
|
+
response = self._client._prepared_request(request)
|
37
|
+
|
38
|
+
return response
|
39
|
+
|
40
|
+
|
41
|
+
class AsyncWebhook(AsyncAPIResource, WebhookMixin):
|
42
|
+
"""Webhook secret management wrapper"""
|
43
|
+
|
44
|
+
async def create(self) -> dict:
|
45
|
+
"""Create a webhook secret.
|
46
|
+
|
47
|
+
Returns:
|
48
|
+
dict: Response indicating success
|
49
|
+
"""
|
50
|
+
request = self.prepare_create()
|
51
|
+
response = await self._client._prepared_request(request)
|
52
|
+
return response
|
53
|
+
|
54
|
+
async def delete(self) -> dict:
|
55
|
+
"""Delete a webhook secret.
|
56
|
+
|
57
|
+
Returns:
|
58
|
+
dict: Response indicating success
|
59
|
+
"""
|
60
|
+
request = self.prepare_delete()
|
61
|
+
response = await self._client._prepared_request(request)
|
62
|
+
return response
|