retab 0.0.35__py3-none-any.whl → 0.0.37__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (134) hide show
  1. {uiform → retab}/_utils/ai_models.py +2 -2
  2. {uiform → retab}/_utils/benchmarking.py +15 -16
  3. {uiform → retab}/_utils/chat.py +9 -14
  4. {uiform → retab}/_utils/display.py +0 -3
  5. {uiform → retab}/_utils/json_schema.py +9 -14
  6. {uiform → retab}/_utils/mime.py +11 -14
  7. {uiform → retab}/_utils/responses.py +9 -3
  8. {uiform → retab}/_utils/stream_context_managers.py +1 -1
  9. {uiform → retab}/_utils/usage/usage.py +28 -28
  10. {uiform → retab}/client.py +32 -31
  11. {uiform → retab}/resources/consensus/client.py +17 -36
  12. {uiform → retab}/resources/consensus/completions.py +24 -47
  13. {uiform → retab}/resources/consensus/completions_stream.py +26 -38
  14. {uiform → retab}/resources/consensus/responses.py +31 -80
  15. {uiform → retab}/resources/consensus/responses_stream.py +31 -79
  16. {uiform → retab}/resources/documents/client.py +59 -45
  17. {uiform → retab}/resources/documents/extractions.py +181 -90
  18. {uiform → retab}/resources/evals.py +56 -43
  19. retab/resources/evaluations/__init__.py +3 -0
  20. retab/resources/evaluations/client.py +301 -0
  21. retab/resources/evaluations/documents.py +233 -0
  22. retab/resources/evaluations/iterations.py +452 -0
  23. {uiform → retab}/resources/files.py +2 -2
  24. {uiform → retab}/resources/jsonlUtils.py +220 -216
  25. retab/resources/models.py +73 -0
  26. retab/resources/processors/automations/client.py +244 -0
  27. {uiform → retab}/resources/processors/automations/endpoints.py +77 -118
  28. retab/resources/processors/automations/links.py +294 -0
  29. {uiform → retab}/resources/processors/automations/logs.py +30 -19
  30. {uiform → retab}/resources/processors/automations/mailboxes.py +136 -174
  31. retab/resources/processors/automations/outlook.py +337 -0
  32. {uiform → retab}/resources/processors/automations/tests.py +22 -25
  33. {uiform → retab}/resources/processors/client.py +179 -164
  34. {uiform → retab}/resources/schemas.py +78 -66
  35. {uiform → retab}/resources/secrets/external_api_keys.py +1 -5
  36. retab/resources/secrets/webhook.py +64 -0
  37. {uiform → retab}/resources/usage.py +39 -2
  38. {uiform → retab}/types/ai_models.py +13 -13
  39. {uiform → retab}/types/automations/cron.py +19 -12
  40. {uiform → retab}/types/automations/endpoints.py +7 -4
  41. {uiform → retab}/types/automations/links.py +7 -3
  42. {uiform → retab}/types/automations/mailboxes.py +9 -9
  43. {uiform → retab}/types/automations/outlook.py +15 -11
  44. retab/types/browser_canvas.py +3 -0
  45. {uiform → retab}/types/chat.py +2 -2
  46. {uiform → retab}/types/completions.py +9 -12
  47. retab/types/consensus.py +19 -0
  48. {uiform → retab}/types/db/annotations.py +3 -3
  49. {uiform → retab}/types/db/files.py +8 -6
  50. {uiform → retab}/types/documents/create_messages.py +18 -20
  51. {uiform → retab}/types/documents/extractions.py +69 -24
  52. {uiform → retab}/types/evals.py +5 -5
  53. retab/types/evaluations/__init__.py +31 -0
  54. retab/types/evaluations/documents.py +30 -0
  55. retab/types/evaluations/iterations.py +112 -0
  56. retab/types/evaluations/model.py +73 -0
  57. retab/types/events.py +79 -0
  58. {uiform → retab}/types/extractions.py +33 -10
  59. retab/types/inference_settings.py +15 -0
  60. retab/types/jobs/base.py +54 -0
  61. retab/types/jobs/batch_annotation.py +12 -0
  62. {uiform → retab}/types/jobs/evaluation.py +1 -2
  63. {uiform → retab}/types/logs.py +37 -34
  64. retab/types/metrics.py +32 -0
  65. {uiform → retab}/types/mime.py +22 -20
  66. {uiform → retab}/types/modalities.py +10 -10
  67. retab/types/predictions.py +19 -0
  68. {uiform → retab}/types/schemas/enhance.py +4 -2
  69. {uiform → retab}/types/schemas/evaluate.py +7 -4
  70. {uiform → retab}/types/schemas/generate.py +6 -3
  71. {uiform → retab}/types/schemas/layout.py +1 -1
  72. {uiform → retab}/types/schemas/object.py +13 -14
  73. {uiform → retab}/types/schemas/templates.py +1 -3
  74. {uiform → retab}/types/secrets/external_api_keys.py +0 -1
  75. {uiform → retab}/types/standards.py +18 -1
  76. {retab-0.0.35.dist-info → retab-0.0.37.dist-info}/METADATA +7 -6
  77. retab-0.0.37.dist-info/RECORD +107 -0
  78. retab-0.0.37.dist-info/top_level.txt +1 -0
  79. retab-0.0.35.dist-info/RECORD +0 -111
  80. retab-0.0.35.dist-info/top_level.txt +0 -1
  81. uiform/_utils/benchmarking copy.py +0 -588
  82. uiform/resources/deployments/__init__.py +0 -9
  83. uiform/resources/deployments/client.py +0 -78
  84. uiform/resources/deployments/endpoints.py +0 -322
  85. uiform/resources/deployments/links.py +0 -452
  86. uiform/resources/deployments/logs.py +0 -211
  87. uiform/resources/deployments/mailboxes.py +0 -496
  88. uiform/resources/deployments/outlook.py +0 -531
  89. uiform/resources/deployments/tests.py +0 -158
  90. uiform/resources/models.py +0 -45
  91. uiform/resources/processors/automations/client.py +0 -78
  92. uiform/resources/processors/automations/links.py +0 -356
  93. uiform/resources/processors/automations/outlook.py +0 -444
  94. uiform/resources/secrets/webhook.py +0 -62
  95. uiform/types/consensus.py +0 -10
  96. uiform/types/deployments/cron.py +0 -59
  97. uiform/types/deployments/endpoints.py +0 -28
  98. uiform/types/deployments/links.py +0 -36
  99. uiform/types/deployments/mailboxes.py +0 -67
  100. uiform/types/deployments/outlook.py +0 -76
  101. uiform/types/deployments/webhooks.py +0 -21
  102. uiform/types/events.py +0 -76
  103. uiform/types/jobs/base.py +0 -150
  104. uiform/types/jobs/batch_annotation.py +0 -22
  105. uiform/types/secrets/__init__.py +0 -0
  106. {uiform → retab}/__init__.py +0 -0
  107. {uiform → retab}/_resource.py +0 -0
  108. {uiform → retab}/_utils/__init__.py +0 -0
  109. {uiform → retab}/_utils/usage/__init__.py +0 -0
  110. {uiform → retab}/py.typed +0 -0
  111. {uiform → retab}/resources/__init__.py +0 -0
  112. {uiform → retab}/resources/consensus/__init__.py +0 -0
  113. {uiform → retab}/resources/documents/__init__.py +0 -0
  114. {uiform → retab}/resources/finetuning.py +0 -0
  115. {uiform → retab}/resources/openai_example.py +0 -0
  116. {uiform → retab}/resources/processors/__init__.py +0 -0
  117. {uiform → retab}/resources/processors/automations/__init__.py +0 -0
  118. {uiform → retab}/resources/prompt_optimization.py +0 -0
  119. {uiform → retab}/resources/secrets/__init__.py +0 -0
  120. {uiform → retab}/resources/secrets/client.py +0 -0
  121. {uiform → retab}/types/__init__.py +0 -0
  122. {uiform → retab}/types/automations/__init__.py +0 -0
  123. {uiform → retab}/types/automations/webhooks.py +0 -0
  124. {uiform → retab}/types/db/__init__.py +0 -0
  125. {uiform/types/deployments → retab/types/documents}/__init__.py +0 -0
  126. {uiform → retab}/types/documents/correct_orientation.py +0 -0
  127. {uiform/types/documents → retab/types/jobs}/__init__.py +0 -0
  128. {uiform → retab}/types/jobs/finetune.py +0 -0
  129. {uiform → retab}/types/jobs/prompt_optimization.py +0 -0
  130. {uiform → retab}/types/jobs/webcrawl.py +0 -0
  131. {uiform → retab}/types/pagination.py +0 -0
  132. {uiform/types/jobs → retab/types/schemas}/__init__.py +0 -0
  133. {uiform/types/schemas → retab/types/secrets}/__init__.py +0 -0
  134. {retab-0.0.35.dist-info → retab-0.0.37.dist-info}/WHEEL +0 -0
@@ -1,22 +1,23 @@
1
1
  from io import IOBase
2
2
  from pathlib import Path
3
- from typing import Any, List, Optional, Sequence
3
+ from typing import Any, Sequence
4
4
 
5
5
  import PIL.Image
6
+ from openai.types.chat.chat_completion_reasoning_effort import ChatCompletionReasoningEffort
6
7
  from pydantic import BaseModel
7
8
 
8
9
  from .._resource import AsyncAPIResource, SyncAPIResource
9
10
  from .._utils.ai_models import assert_valid_model_schema_generation
10
11
  from .._utils.json_schema import load_json_schema
11
12
  from .._utils.mime import prepare_mime_document_list
12
- from ..types.modalities import Modality
13
13
  from ..types.mime import MIMEData
14
- from ..types.schemas.generate import GenerateSchemaRequest
14
+ from ..types.modalities import Modality
15
15
  from ..types.schemas.enhance import EnhanceSchemaConfig, EnhanceSchemaConfigDict, EnhanceSchemaRequest
16
16
  from ..types.schemas.evaluate import EvaluateSchemaRequest, EvaluateSchemaResponse
17
+ from ..types.schemas.generate import GenerateSchemaRequest
17
18
  from ..types.schemas.object import Schema
18
19
  from ..types.standards import PreparedRequest
19
- from openai.types.chat.chat_completion_reasoning_effort import ChatCompletionReasoningEffort
20
+ from ..types.browser_canvas import BrowserCanvas
20
21
 
21
22
 
22
23
  class SchemasMixin:
@@ -27,18 +28,19 @@ class SchemasMixin:
27
28
  model: str = "gpt-4o-2024-11-20",
28
29
  temperature: float = 0,
29
30
  modality: Modality = "native",
31
+ reasoning_effort: ChatCompletionReasoningEffort = "medium",
30
32
  ) -> PreparedRequest:
31
33
  assert_valid_model_schema_generation(model)
32
34
  mime_documents = prepare_mime_document_list(documents)
33
- data = {
34
- "documents": [doc.model_dump() for doc in mime_documents],
35
- "instructions": instructions if instructions else None,
36
- "model": model,
37
- "temperature": temperature,
38
- "modality": modality,
39
- }
40
- GenerateSchemaRequest.model_validate(data)
41
- return PreparedRequest(method="POST", url="/v1/schemas/generate", data=data)
35
+ request = GenerateSchemaRequest(
36
+ documents=mime_documents,
37
+ instructions=instructions if instructions else None,
38
+ model=model,
39
+ temperature=temperature,
40
+ modality=modality,
41
+ reasoning_effort=reasoning_effort,
42
+ )
43
+ return PreparedRequest(method="POST", url="/v1/schemas/generate", data=request.model_dump())
42
44
 
43
45
  def prepare_evaluate(
44
46
  self,
@@ -46,61 +48,53 @@ class SchemasMixin:
46
48
  json_schema: dict[str, Any],
47
49
  ground_truths: list[dict[str, Any]] | None = None,
48
50
  model: str = "gpt-4o-mini",
49
- temperature: float = 0.0,
50
51
  reasoning_effort: ChatCompletionReasoningEffort = "medium",
51
52
  modality: Modality = "native",
52
53
  image_resolution_dpi: int = 96,
53
- browser_canvas: str = "A4",
54
+ browser_canvas: BrowserCanvas = "A4",
54
55
  n_consensus: int = 1,
55
56
  ) -> PreparedRequest:
56
- # Assert that if ground_truths is not None, it has the same length as documents
57
- if ground_truths is not None and len(documents) != len(ground_truths):
58
- raise ValueError("Number of documents must match number of ground truths")
59
-
60
57
  mime_documents = prepare_mime_document_list(documents)
61
- data = {
62
- "documents": [doc.model_dump() for doc in mime_documents],
63
- "ground_truths": ground_truths,
64
- "model": model,
65
- "temperature": temperature,
66
- "reasoning_effort": reasoning_effort,
67
- "modality": modality,
68
- "image_resolution_dpi": image_resolution_dpi,
69
- "browser_canvas": browser_canvas,
70
- "n_consensus": n_consensus,
71
- "json_schema": json_schema,
72
- }
73
- EvaluateSchemaRequest.model_validate(data)
74
- return PreparedRequest(method="POST", url="/v1/schemas/evaluate", data=data)
58
+ request = EvaluateSchemaRequest(
59
+ documents=mime_documents,
60
+ json_schema=json_schema,
61
+ ground_truths=ground_truths,
62
+ model=model,
63
+ reasoning_effort=reasoning_effort,
64
+ modality=modality,
65
+ image_resolution_dpi=image_resolution_dpi,
66
+ browser_canvas=browser_canvas,
67
+ n_consensus=n_consensus,
68
+ )
69
+ return PreparedRequest(method="POST", url="/v1/schemas/evaluate", data=request.model_dump())
75
70
 
76
71
  def prepare_enhance(
77
72
  self,
78
73
  json_schema: dict[str, Any] | Path | str,
79
74
  documents: Sequence[Path | str | bytes | MIMEData | IOBase | PIL.Image.Image],
80
- ground_truths: list[dict[str, Any]] | None,
81
- instructions: str | None,
82
- model: str,
83
- temperature: float,
84
- modality: Modality,
85
- flat_likelihoods: list[dict[str, float]] | dict[str, float] | None,
86
- tools_config: EnhanceSchemaConfig,
75
+ ground_truths: list[dict[str, Any]] | None = None,
76
+ instructions: str | None = None,
77
+ model: str = "gpt-4o-mini",
78
+ temperature: float = 0.0,
79
+ modality: Modality = "native",
80
+ flat_likelihoods: list[dict[str, float]] | dict[str, float] | None = None,
81
+ tools_config: EnhanceSchemaConfig = EnhanceSchemaConfig(),
87
82
  ) -> PreparedRequest:
88
83
  assert_valid_model_schema_generation(model)
89
84
  mime_documents = prepare_mime_document_list(documents)
90
85
  loaded_json_schema = load_json_schema(json_schema)
91
- data = {
92
- "json_schema": loaded_json_schema,
93
- "documents": [doc.model_dump() for doc in mime_documents],
94
- "ground_truths": ground_truths,
95
- "instructions": instructions if instructions else None,
96
- "model": model,
97
- "temperature": temperature,
98
- "modality": modality,
99
- "flat_likelihoods": flat_likelihoods,
100
- "tools_config": tools_config.model_dump(),
101
- }
102
- EnhanceSchemaRequest.model_validate(data)
103
- return PreparedRequest(method="POST", url="/v1/schemas/enhance", data=data)
86
+ request = EnhanceSchemaRequest(
87
+ json_schema=loaded_json_schema,
88
+ documents=mime_documents,
89
+ ground_truths=ground_truths,
90
+ instructions=instructions if instructions else None,
91
+ model=model,
92
+ temperature=temperature,
93
+ modality=modality,
94
+ flat_likelihoods=flat_likelihoods,
95
+ tools_config=tools_config,
96
+ )
97
+ return PreparedRequest(method="POST", url="/v1/schemas/enhance", data=request.model_dump())
104
98
 
105
99
  def prepare_get(self, schema_id: str) -> PreparedRequest:
106
100
  return PreparedRequest(method="GET", url=f"/v1/schemas/{schema_id}")
@@ -155,11 +149,10 @@ class Schemas(SyncAPIResource, SchemasMixin):
155
149
  json_schema: dict[str, Any],
156
150
  ground_truths: list[dict[str, Any]] | None = None,
157
151
  model: str = "gpt-4o-mini",
158
- temperature: float = 0.0,
159
152
  reasoning_effort: ChatCompletionReasoningEffort = "medium",
160
153
  modality: Modality = "native",
161
154
  image_resolution_dpi: int = 96,
162
- browser_canvas: str = "A4",
155
+ browser_canvas: BrowserCanvas = "A4",
163
156
  n_consensus: int = 1,
164
157
  ) -> EvaluateSchemaResponse:
165
158
  """
@@ -172,13 +165,12 @@ class Schemas(SyncAPIResource, SchemasMixin):
172
165
  json_schema: The JSON schema to evaluate
173
166
  ground_truths: Optional list of ground truth dictionaries to compare against
174
167
  model: The model to use for extraction
175
- temperature: The temperature to use for extraction
176
168
  reasoning_effort: The reasoning effort to use for extraction
177
169
  modality: The modality to use for extraction
178
170
  image_resolution_dpi: The DPI of the image. Defaults to 96.
179
171
  browser_canvas: The canvas size of the browser. Must be one of:
180
172
  - "A3" (11.7in x 16.54in)
181
- - "A4" (8.27in x 11.7in)
173
+ - "A4" (8.27in x 11.7in)
182
174
  - "A5" (5.83in x 8.27in)
183
175
  Defaults to "A4".
184
176
  n_consensus: Number of consensus rounds to perform
@@ -200,7 +192,6 @@ class Schemas(SyncAPIResource, SchemasMixin):
200
192
  json_schema=json_schema,
201
193
  ground_truths=ground_truths,
202
194
  model=model,
203
- temperature=temperature,
204
195
  reasoning_effort=reasoning_effort,
205
196
  modality=modality,
206
197
  image_resolution_dpi=image_resolution_dpi,
@@ -222,8 +213,17 @@ class Schemas(SyncAPIResource, SchemasMixin):
222
213
  flat_likelihoods: list[dict[str, float]] | dict[str, float] | None = None,
223
214
  tools_config: EnhanceSchemaConfigDict | None = None,
224
215
  ) -> Schema:
216
+ _tools_config = EnhanceSchemaConfig(**(tools_config or {}))
225
217
  prepared_request = self.prepare_enhance(
226
- json_schema, documents, ground_truths, instructions, model, temperature, modality, flat_likelihoods, EnhanceSchemaConfig.model_validate(tools_config or {})
218
+ json_schema=json_schema,
219
+ documents=documents,
220
+ ground_truths=ground_truths,
221
+ instructions=instructions,
222
+ model=model,
223
+ temperature=temperature,
224
+ modality=modality,
225
+ flat_likelihoods=flat_likelihoods,
226
+ tools_config=_tools_config,
227
227
  )
228
228
  response = self._client._prepared_request(prepared_request)
229
229
  return Schema(json_schema=response["json_schema"])
@@ -285,7 +285,13 @@ class AsyncSchemas(AsyncAPIResource, SchemasMixin):
285
285
  Raises
286
286
  HTTPException if the request fails
287
287
  """
288
- prepared_request = self.prepare_generate(documents, instructions, model, temperature, modality)
288
+ prepared_request = self.prepare_generate(
289
+ documents=documents,
290
+ instructions=instructions,
291
+ model=model,
292
+ temperature=temperature,
293
+ modality=modality,
294
+ )
289
295
  response = await self._client._prepared_request(prepared_request)
290
296
  return Schema.model_validate(response)
291
297
 
@@ -295,11 +301,10 @@ class AsyncSchemas(AsyncAPIResource, SchemasMixin):
295
301
  json_schema: dict[str, Any],
296
302
  ground_truths: list[dict[str, Any]] | None = None,
297
303
  model: str = "gpt-4o-mini",
298
- temperature: float = 0.0,
299
304
  reasoning_effort: ChatCompletionReasoningEffort = "medium",
300
305
  modality: Modality = "native",
301
306
  image_resolution_dpi: int = 96,
302
- browser_canvas: str = "A4",
307
+ browser_canvas: BrowserCanvas = "A4",
303
308
  n_consensus: int = 1,
304
309
  ) -> EvaluateSchemaResponse:
305
310
  """
@@ -312,13 +317,12 @@ class AsyncSchemas(AsyncAPIResource, SchemasMixin):
312
317
  json_schema: The JSON schema to evaluate
313
318
  ground_truths: Optional list of ground truth dictionaries to compare against
314
319
  model: The model to use for extraction
315
- temperature: The temperature to use for extraction
316
320
  reasoning_effort: The reasoning effort to use for extraction
317
321
  modality: The modality to use for extraction
318
322
  image_resolution_dpi: The DPI of the image. Defaults to 96.
319
323
  browser_canvas: The canvas size of the browser. Must be one of:
320
324
  - "A3" (11.7in x 16.54in)
321
- - "A4" (8.27in x 11.7in)
325
+ - "A4" (8.27in x 11.7in)
322
326
  - "A5" (5.83in x 8.27in)
323
327
  Defaults to "A4".
324
328
  n_consensus: Number of consensus rounds to perform
@@ -340,7 +344,6 @@ class AsyncSchemas(AsyncAPIResource, SchemasMixin):
340
344
  json_schema=json_schema,
341
345
  ground_truths=ground_truths,
342
346
  model=model,
343
- temperature=temperature,
344
347
  reasoning_effort=reasoning_effort,
345
348
  modality=modality,
346
349
  image_resolution_dpi=image_resolution_dpi,
@@ -362,8 +365,17 @@ class AsyncSchemas(AsyncAPIResource, SchemasMixin):
362
365
  flat_likelihoods: list[dict[str, float]] | dict[str, float] | None = None,
363
366
  tools_config: EnhanceSchemaConfigDict | None = None,
364
367
  ) -> Schema:
368
+ _tools_config = EnhanceSchemaConfig(**(tools_config or {}))
365
369
  prepared_request = self.prepare_enhance(
366
- json_schema, documents, ground_truths, instructions, model, temperature, modality, flat_likelihoods, EnhanceSchemaConfig.model_validate(tools_config or {})
370
+ json_schema=json_schema,
371
+ documents=documents,
372
+ ground_truths=ground_truths,
373
+ instructions=instructions,
374
+ model=model,
375
+ temperature=temperature,
376
+ modality=modality,
377
+ flat_likelihoods=flat_likelihoods,
378
+ tools_config=_tools_config,
367
379
  )
368
380
  response = await self._client._prepared_request(prepared_request)
369
381
  return Schema(json_schema=response["json_schema"])
@@ -1,4 +1,3 @@
1
- import os
2
1
  from typing import List
3
2
 
4
3
  from ..._resource import AsyncAPIResource, SyncAPIResource
@@ -9,8 +8,7 @@ from ...types.standards import PreparedRequest
9
8
 
10
9
  class ExternalAPIKeysMixin:
11
10
  def prepare_create(self, provider: AIProvider, api_key: str) -> PreparedRequest:
12
- data = {"provider": provider, "api_key": api_key}
13
- request = ExternalAPIKeyRequest.model_validate(data)
11
+ request = ExternalAPIKeyRequest(provider=provider, api_key=api_key)
14
12
  return PreparedRequest(method="POST", url="/v1/secrets/external_api_keys", data=request.model_dump(mode="json"))
15
13
 
16
14
  def prepare_get(self, provider: AIProvider) -> PreparedRequest:
@@ -55,8 +53,6 @@ class ExternalAPIKeys(SyncAPIResource, ExternalAPIKeysMixin):
55
53
  """
56
54
  request = self.prepare_get(provider)
57
55
  response = self._client._prepared_request(request)
58
- return response
59
-
60
56
  return ExternalAPIKey.model_validate(response)
61
57
 
62
58
  def list(self) -> List[ExternalAPIKey]:
@@ -0,0 +1,64 @@
1
+ # TODO: Implement webhook secret management
2
+
3
+ # from ..._resource import AsyncAPIResource, SyncAPIResource
4
+ # from ...types.standards import PreparedRequest
5
+
6
+
7
+ # class WebhookMixin:
8
+ # def prepare_create(self) -> PreparedRequest:
9
+ # return PreparedRequest(method="POST", url="/v1/secrets/webhook")
10
+
11
+ # def prepare_delete(self) -> PreparedRequest:
12
+ # return PreparedRequest(method="DELETE", url="/v1/secrets/webhook")
13
+
14
+
15
+ # class Webhook(SyncAPIResource, WebhookMixin):
16
+ # """Webhook secret management wrapper"""
17
+
18
+ # def create(
19
+ # self,
20
+ # ) -> dict:
21
+ # """Create a webhook secret.
22
+
23
+ # Returns:
24
+ # dict: Response indicating success
25
+ # """
26
+ # request = self.prepare_create()
27
+ # response = self._client._prepared_request(request)
28
+
29
+ # return response
30
+
31
+ # def delete(self) -> dict:
32
+ # """Delete a webhook secret.
33
+
34
+ # Returns:
35
+ # dict: Response indicating success
36
+ # """
37
+ # request = self.prepare_delete()
38
+ # response = self._client._prepared_request(request)
39
+
40
+ # return response
41
+
42
+
43
+ # class AsyncWebhook(AsyncAPIResource, WebhookMixin):
44
+ # """Webhook secret management wrapper"""
45
+
46
+ # async def create(self) -> dict:
47
+ # """Create a webhook secret.
48
+
49
+ # Returns:
50
+ # dict: Response indicating success
51
+ # """
52
+ # request = self.prepare_create()
53
+ # response = await self._client._prepared_request(request)
54
+ # return response
55
+
56
+ # async def delete(self) -> dict:
57
+ # """Delete a webhook secret.
58
+
59
+ # Returns:
60
+ # dict: Response indicating success
61
+ # """
62
+ # request = self.prepare_delete()
63
+ # response = await self._client._prepared_request(request)
64
+ # return response
@@ -6,7 +6,7 @@ from openai.types.chat.chat_completion import ChatCompletion
6
6
  from pydantic import BaseModel
7
7
 
8
8
  from .._resource import AsyncAPIResource, SyncAPIResource
9
- from ..types.ai_models import Amount
9
+ from ..types.ai_models import Amount, MonthlyUsageResponse
10
10
  from ..types.logs import AutomationLog, LogCompletionRequest
11
11
  from ..types.standards import PreparedRequest
12
12
 
@@ -14,8 +14,17 @@ total_cost = 0.0
14
14
 
15
15
 
16
16
  class UsageMixin:
17
+ def prepare_monthly_credits_usage(self) -> PreparedRequest:
18
+ return PreparedRequest(method="GET", url="/v1/usage/monthly_credits")
19
+
17
20
  def prepare_total(self, start_date: Optional[datetime.datetime] = None, end_date: Optional[datetime.datetime] = None) -> PreparedRequest:
18
- raise NotImplementedError("prepare_total is not implemented")
21
+ params = {}
22
+ if start_date:
23
+ params["start_date"] = start_date.isoformat()
24
+ if end_date:
25
+ params["end_date"] = end_date.isoformat()
26
+
27
+ return PreparedRequest(method="GET", url="/v1/usage/total", params=params)
19
28
 
20
29
  def prepare_mailbox(self, email: str, start_date: Optional[datetime.datetime] = None, end_date: Optional[datetime.datetime] = None) -> PreparedRequest:
21
30
  params = {}
@@ -72,6 +81,20 @@ class UsageMixin:
72
81
 
73
82
 
74
83
  class Usage(SyncAPIResource, UsageMixin):
84
+ def monthly_credits_usage(self) -> MonthlyUsageResponse:
85
+ """
86
+ Get monthly credits usage information.
87
+
88
+ Returns:
89
+ dict: Monthly usage data including credits consumed and limits
90
+
91
+ Raises:
92
+ UiformAPIError: If the API request fails
93
+ """
94
+ request = self.prepare_monthly_credits_usage()
95
+ response = self._client._request(request.method, request.url, request.data, request.params)
96
+ return MonthlyUsageResponse.model_validate(response)
97
+
75
98
  def total(self, start_date: Optional[datetime.datetime] = None, end_date: Optional[datetime.datetime] = None) -> Amount:
76
99
  """Get the total usage cost for a mailbox within an optional date range.
77
100
 
@@ -172,6 +195,20 @@ class Usage(SyncAPIResource, UsageMixin):
172
195
 
173
196
 
174
197
  class AsyncUsage(AsyncAPIResource, UsageMixin):
198
+ async def monthly_credits_usage(self) -> MonthlyUsageResponse:
199
+ """
200
+ Get monthly credits usage information.
201
+
202
+ Returns:
203
+ dict: Monthly usage data including credits consumed and limits
204
+
205
+ Raises:
206
+ UiformAPIError: If the API request fails
207
+ """
208
+ request = self.prepare_monthly_credits_usage()
209
+ response = await self._client._request(request.method, request.url, request.data, request.params)
210
+ return MonthlyUsageResponse.model_validate(response)
211
+
175
212
  async def total(self, start_date: Optional[datetime.datetime] = None, end_date: Optional[datetime.datetime] = None) -> Amount:
176
213
  """Get the total usage cost for a mailbox within an optional date range.
177
214
 
@@ -1,8 +1,14 @@
1
- from typing import Literal
1
+ import datetime
2
+ from typing import List, Literal, Optional
3
+
4
+ from pydantic import BaseModel, Field
5
+
6
+ from .inference_settings import InferenceSettings
2
7
 
3
8
  AIProvider = Literal["OpenAI", "Gemini", "xAI", "UiForm"] # , "Anthropic", "xAI"]
4
9
  OpenAICompatibleProvider = Literal["OpenAI", "xAI"] # , "xAI"]
5
10
  GeminiModel = Literal[
11
+ "gemini-2.5-pro-preview-06-05",
6
12
  "gemini-2.5-pro-preview-05-06",
7
13
  "gemini-2.5-flash-preview-05-20",
8
14
  "gemini-2.5-flash-preview-04-17",
@@ -49,12 +55,6 @@ xAI_Model = Literal["grok-3-beta", "grok-3-mini-beta"]
49
55
  UiFormModel = Literal["auto", "auto-small"]
50
56
  LLMModel = Literal[OpenAIModel, "human", AnthropicModel, xAI_Model, GeminiModel, UiFormModel]
51
57
 
52
- import datetime
53
-
54
- from pydantic import BaseModel, Field
55
-
56
- from uiform.types.jobs.base import InferenceSettings
57
-
58
58
 
59
59
  class FinetunedModel(BaseModel):
60
60
  object: Literal["finetuned_model"] = "finetuned_model"
@@ -67,11 +67,6 @@ class FinetunedModel(BaseModel):
67
67
  created_at: datetime.datetime = Field(default_factory=lambda: datetime.datetime.now(datetime.timezone.utc))
68
68
 
69
69
 
70
- from typing import Dict, List, Literal, Optional
71
-
72
- from pydantic import BaseModel
73
-
74
-
75
70
  # Monthly Usage
76
71
  class MonthlyUsageResponseContent(BaseModel):
77
72
  request_count: int
@@ -542,6 +537,11 @@ gemini_model_cards = [
542
537
  capabilities=ModelCapabilities(modalities=["text", "image"], endpoints=["chat_completions"], features=["streaming", "function_calling", "structured_outputs"]),
543
538
  temperature_support=True,
544
539
  ),
540
+ ModelCard(
541
+ model="gemini-2.5-pro-preview-06-05",
542
+ pricing=Pricing(text=TokenPrice(prompt=1.25, cached_discount=0.25, completion=10.00), audio=None),
543
+ capabilities=ModelCapabilities(modalities=["text", "image"], endpoints=["chat_completions"], features=["streaming", "function_calling", "structured_outputs"]),
544
+ ),
545
545
  ModelCard(
546
546
  model="gemini-2.5-pro-preview-05-06",
547
547
  pricing=Pricing(text=TokenPrice(prompt=1.25, cached_discount=0.25, completion=10.00), audio=None),
@@ -556,7 +556,7 @@ gemini_model_cards = [
556
556
  ModelCard(
557
557
  model="gemini-2.5-flash-preview-04-17",
558
558
  pricing=Pricing(text=TokenPrice(prompt=0.15, cached_discount=0.25, completion=0.60), audio=None),
559
- capabilities=ModelCapabilities(modalities=["text", "image"], endpoints=["chat_completions"], features=["streaming", "function_calling", "structured_outputs"]),
559
+ capabilities=ModelCapabilities(modalities=["text", "image"], endpoints=["chat_completions"], features=["streaming", "function_calling", "structured_outputs"]),
560
560
  temperature_support=True,
561
561
  ),
562
562
  ModelCard(
@@ -1,11 +1,13 @@
1
1
  import datetime
2
- from typing import Any, Literal, Optional
2
+ from typing import Any, Optional
3
3
 
4
4
  import nanoid # type: ignore
5
5
  from openai.types.chat.chat_completion_reasoning_effort import ChatCompletionReasoningEffort
6
- from pydantic import BaseModel, Field, HttpUrl, field_serializer
6
+ from pydantic import BaseModel, Field, HttpUrl, computed_field, field_validator
7
7
 
8
+ from ..logs import AutomationConfig
8
9
  from ..modalities import Modality
10
+ from ..browser_canvas import BrowserCanvas
9
11
 
10
12
 
11
13
  def scrapping_action(link: HttpUrl) -> dict[str, Any]:
@@ -24,26 +26,29 @@ class CronSchedule(BaseModel):
24
26
  return f"{self.second or '*'} {self.minute} {self.hour} {self.day_of_month or '*'} {self.month or '*'} {self.day_of_week or '*'}"
25
27
 
26
28
 
27
- from ..logs import AutomationConfig
28
-
29
-
30
29
  class ScrappingConfig(AutomationConfig):
31
- object: Literal['automation.scrapping_cron'] = "automation.scrapping_cron"
30
+ @computed_field
31
+ @property
32
+ def object(self) -> str:
33
+ return "automation.scrapping_cron"
34
+
32
35
  id: str = Field(default_factory=lambda: "scrapping_" + nanoid.generate(), description="Unique identifier for the scrapping job")
33
36
 
34
37
  # Scrapping Specific Config
35
- link: HttpUrl = Field(..., description="Link to be scrapped")
38
+ link: str = Field(..., description="Link to be scrapped")
36
39
  schedule: CronSchedule
37
40
 
38
41
  updated_at: datetime.datetime = Field(default_factory=lambda: datetime.datetime.now(datetime.timezone.utc))
39
42
 
40
43
  # HTTP Config
41
- webhook_url: HttpUrl = Field(..., description="Url of the webhook to send the data to")
44
+ webhook_url: str = Field(..., description="Url of the webhook to send the data to")
42
45
  webhook_headers: dict[str, str] = Field(default_factory=dict, description="Headers to send with the request")
43
46
 
44
47
  modality: Modality
45
48
  image_resolution_dpi: int = Field(default=96, description="Resolution of the image sent to the LLM")
46
- browser_canvas: Literal['A3', 'A4', 'A5'] = Field(default='A4', description="Sets the size of the browser canvas for rendering documents in browser-based processing. Choose a size that matches the document type.")
49
+ browser_canvas: BrowserCanvas = Field(
50
+ default="A4", description="Sets the size of the browser canvas for rendering documents in browser-based processing. Choose a size that matches the document type."
51
+ )
47
52
 
48
53
  # New attributes
49
54
  model: str = Field(..., description="Model used for chat completion")
@@ -53,6 +58,8 @@ class ScrappingConfig(AutomationConfig):
53
58
  default="medium", description="The effort level for the model to reason about the input data. If not provided, the default reasoning effort for the model will be used."
54
59
  )
55
60
 
56
- @field_serializer('webhook_url', 'link')
57
- def url2str(self, val: HttpUrl) -> str:
58
- return str(val)
61
+ @field_validator("webhook_url", "link", mode="after")
62
+ def validate_httpurl(cls, val: Any) -> Any:
63
+ if isinstance(val, str):
64
+ HttpUrl(val)
65
+ return val
@@ -1,16 +1,19 @@
1
- from typing import Literal
2
-
3
1
  import nanoid # type: ignore
4
- from pydantic import BaseModel, Field
2
+ from pydantic import BaseModel, Field, computed_field
5
3
 
6
4
  from ..logs import AutomationConfig, UpdateAutomationRequest
7
5
  from ..pagination import ListMetadata
8
6
 
9
7
 
10
8
  class Endpoint(AutomationConfig):
11
- object: Literal['automation.endpoint'] = "automation.endpoint"
9
+ @computed_field
10
+ @property
11
+ def object(self) -> str:
12
+ return "automation.endpoint"
13
+
12
14
  id: str = Field(default_factory=lambda: "endp_" + nanoid.generate(), description="Unique identifier for the extraction endpoint")
13
15
 
16
+
14
17
  class ListEndpoints(BaseModel):
15
18
  data: list[Endpoint]
16
19
  list_metadata: ListMetadata
@@ -1,14 +1,18 @@
1
- from typing import Literal, Optional
1
+ from typing import Optional
2
2
 
3
3
  import nanoid # type: ignore
4
- from pydantic import BaseModel, Field
4
+ from pydantic import BaseModel, Field, computed_field
5
5
 
6
6
  from ..logs import AutomationConfig, UpdateAutomationRequest
7
7
  from ..pagination import ListMetadata
8
8
 
9
9
 
10
10
  class Link(AutomationConfig):
11
- object: Literal['automation.link'] = "automation.link"
11
+ @computed_field
12
+ @property
13
+ def object(self) -> str:
14
+ return "automation.link"
15
+
12
16
  id: str = Field(default_factory=lambda: "lnk_" + nanoid.generate(), description="Unique identifier for the extraction link")
13
17
 
14
18
  # Link Specific Config
@@ -1,22 +1,23 @@
1
1
  import os
2
2
  import re
3
- from typing import ClassVar, List, Literal, Optional
3
+ from typing import ClassVar, List, Optional
4
4
 
5
5
  import nanoid # type: ignore
6
- from pydantic import BaseModel, EmailStr, Field, field_validator
6
+ from pydantic import BaseModel, EmailStr, Field, computed_field, field_validator
7
7
 
8
+ from ..logs import AutomationConfig, UpdateAutomationRequest
8
9
  from ..pagination import ListMetadata
9
10
 
10
11
  domain_pattern = re.compile(r"^(?:[a-zA-Z0-9-]+\.)+[a-zA-Z]{2,}$")
11
12
 
12
- from openai.types.chat.chat_completion_reasoning_effort import ChatCompletionReasoningEffort
13
-
14
- from ..logs import AutomationConfig, UpdateAutomationRequest
15
-
16
13
 
17
14
  class Mailbox(AutomationConfig):
15
+ @computed_field
16
+ @property
17
+ def object(self) -> str:
18
+ return "automation.mailbox"
19
+
18
20
  EMAIL_PATTERN: ClassVar[str] = f".*@{os.getenv('EMAIL_DOMAIN', 'mailbox.uiform.com')}$"
19
- object: Literal['automation.mailbox'] = "automation.mailbox"
20
21
  id: str = Field(default_factory=lambda: "mb_" + nanoid.generate(), description="Unique identifier for the mailbox")
21
22
 
22
23
  # Email Specific config
@@ -33,7 +34,7 @@ class Mailbox(AutomationConfig):
33
34
  def normalize_authorized_emails(cls, emails: List[str]) -> List[str]:
34
35
  return [email.strip().lower() for email in emails]
35
36
 
36
- @field_validator('authorized_domains', mode='before')
37
+ @field_validator("authorized_domains", mode="before")
37
38
  def validate_domain(cls, list_domains: list[str]) -> list[str]:
38
39
  for domain in list_domains:
39
40
  if not domain_pattern.match(domain):
@@ -48,7 +49,6 @@ class ListMailboxes(BaseModel):
48
49
 
49
50
  # Inherits from the methods of UpdateAutomationRequest
50
51
  class UpdateMailboxRequest(UpdateAutomationRequest):
51
-
52
52
  # ------------------------------
53
53
  # Email Specific config
54
54
  # ------------------------------