retab 0.0.37__py3-none-any.whl → 0.0.38__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. retab/__init__.py +2 -2
  2. retab/_resource.py +5 -5
  3. retab/_utils/chat.py +20 -20
  4. retab/_utils/responses.py +7 -7
  5. retab/_utils/usage/usage.py +3 -3
  6. retab/client.py +22 -22
  7. retab/resources/consensus/client.py +2 -2
  8. retab/resources/consensus/completions.py +12 -12
  9. retab/resources/consensus/completions_stream.py +9 -9
  10. retab/resources/consensus/responses.py +6 -6
  11. retab/resources/consensus/responses_stream.py +10 -10
  12. retab/resources/documents/client.py +201 -15
  13. retab/resources/documents/extractions.py +17 -17
  14. retab/resources/jsonlUtils.py +5 -5
  15. retab/resources/processors/automations/endpoints.py +2 -2
  16. retab/resources/processors/automations/links.py +2 -2
  17. retab/resources/processors/automations/logs.py +2 -2
  18. retab/resources/processors/automations/mailboxes.py +2 -2
  19. retab/resources/processors/automations/outlook.py +2 -2
  20. retab/resources/processors/client.py +2 -2
  21. retab/resources/usage.py +4 -4
  22. retab/types/ai_models.py +4 -4
  23. retab/types/automations/mailboxes.py +1 -1
  24. retab/types/automations/webhooks.py +1 -1
  25. retab/types/chat.py +1 -1
  26. retab/types/completions.py +3 -3
  27. retab/types/documents/create_messages.py +2 -2
  28. retab/types/documents/extractions.py +2 -2
  29. retab/types/extractions.py +3 -3
  30. retab/types/schemas/object.py +3 -3
  31. {retab-0.0.37.dist-info → retab-0.0.38.dist-info}/METADATA +72 -72
  32. {retab-0.0.37.dist-info → retab-0.0.38.dist-info}/RECORD +34 -34
  33. {retab-0.0.37.dist-info → retab-0.0.38.dist-info}/WHEEL +0 -0
  34. {retab-0.0.37.dist-info → retab-0.0.38.dist-info}/top_level.txt +0 -0
retab/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- from .client import AsyncUiForm, UiForm
1
+ from .client import AsyncRetab, Retab
2
2
  from .types.schemas.object import Schema
3
3
 
4
- __all__ = ["UiForm", "AsyncUiForm", "Schema"]
4
+ __all__ = ["Retab", "AsyncRetab", "Schema"]
retab/_resource.py CHANGED
@@ -5,13 +5,13 @@ import time
5
5
  from typing import TYPE_CHECKING
6
6
 
7
7
  if TYPE_CHECKING:
8
- from .client import AsyncUiForm, UiForm
8
+ from .client import AsyncRetab, Retab
9
9
 
10
10
 
11
11
  class SyncAPIResource:
12
- _client: UiForm
12
+ _client: Retab
13
13
 
14
- def __init__(self, client: UiForm) -> None:
14
+ def __init__(self, client: Retab) -> None:
15
15
  self._client = client
16
16
 
17
17
  def _sleep(self, seconds: float) -> None:
@@ -19,9 +19,9 @@ class SyncAPIResource:
19
19
 
20
20
 
21
21
  class AsyncAPIResource:
22
- _client: AsyncUiForm
22
+ _client: AsyncRetab
23
23
 
24
- def __init__(self, client: AsyncUiForm) -> None:
24
+ def __init__(self, client: AsyncRetab) -> None:
25
25
  self._client = client
26
26
 
27
27
  async def _sleep(self, seconds: float) -> None:
retab/_utils/chat.py CHANGED
@@ -13,14 +13,14 @@ from openai.types.chat.chat_completion_content_part_param import ChatCompletionC
13
13
  from openai.types.chat.chat_completion_content_part_text_param import ChatCompletionContentPartTextParam
14
14
  from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam
15
15
 
16
- from ..types.chat import ChatCompletionUiformMessage
16
+ from ..types.chat import ChatCompletionRetabMessage
17
17
 
18
18
  MediaType = Literal["image/jpeg", "image/png", "image/gif", "image/webp"]
19
19
 
20
20
 
21
- def convert_to_google_genai_format(messages: List[ChatCompletionUiformMessage]) -> tuple[str, list[ContentUnionDict]]:
21
+ def convert_to_google_genai_format(messages: List[ChatCompletionRetabMessage]) -> tuple[str, list[ContentUnionDict]]:
22
22
  """
23
- Converts a list of ChatCompletionUiFormMessage to a format compatible with the google.genai SDK.
23
+ Converts a list of ChatCompletionRetabMessage to a format compatible with the google.genai SDK.
24
24
 
25
25
 
26
26
  Example:
@@ -40,7 +40,7 @@ def convert_to_google_genai_format(messages: List[ChatCompletionUiformMessage])
40
40
  ```
41
41
 
42
42
  Args:
43
- messages (List[ChatCompletionUiformMessage]): List of chat messages.
43
+ messages (List[ChatCompletionRetabMessage]): List of chat messages.
44
44
 
45
45
  Returns:
46
46
  List[Union[Dict[str, str], str]]: A list of formatted inputs for the google.genai SDK.
@@ -94,12 +94,12 @@ def convert_to_google_genai_format(messages: List[ChatCompletionUiformMessage])
94
94
  return system_message, formatted_content
95
95
 
96
96
 
97
- def convert_to_anthropic_format(messages: List[ChatCompletionUiformMessage]) -> tuple[str, List[MessageParam]]:
97
+ def convert_to_anthropic_format(messages: List[ChatCompletionRetabMessage]) -> tuple[str, List[MessageParam]]:
98
98
  """
99
- Converts a list of ChatCompletionUiformMessage to a format compatible with the Anthropic SDK.
99
+ Converts a list of ChatCompletionRetabMessage to a format compatible with the Anthropic SDK.
100
100
 
101
101
  Args:
102
- messages (List[ChatCompletionUiformMessage]): List of chat messages.
102
+ messages (List[ChatCompletionRetabMessage]): List of chat messages.
103
103
 
104
104
  Returns:
105
105
  (system_message, formatted_messages):
@@ -216,11 +216,11 @@ def convert_to_anthropic_format(messages: List[ChatCompletionUiformMessage]) ->
216
216
  return system_message, formatted_messages
217
217
 
218
218
 
219
- def convert_from_anthropic_format(messages: list[MessageParam], system_prompt: str) -> list[ChatCompletionUiformMessage]:
219
+ def convert_from_anthropic_format(messages: list[MessageParam], system_prompt: str) -> list[ChatCompletionRetabMessage]:
220
220
  """
221
- Converts a list of Anthropic MessageParam to a list of ChatCompletionUiformMessage.
221
+ Converts a list of Anthropic MessageParam to a list of ChatCompletionRetabMessage.
222
222
  """
223
- formatted_messages: list[ChatCompletionUiformMessage] = [ChatCompletionUiformMessage(role="developer", content=system_prompt)]
223
+ formatted_messages: list[ChatCompletionRetabMessage] = [ChatCompletionRetabMessage(role="developer", content=system_prompt)]
224
224
 
225
225
  for message in messages:
226
226
  role = message["role"]
@@ -229,7 +229,7 @@ def convert_from_anthropic_format(messages: list[MessageParam], system_prompt: s
229
229
  # Handle different content structures
230
230
  if isinstance(content_blocks, list) and len(content_blocks) == 1 and isinstance(content_blocks[0], dict) and content_blocks[0].get("type") == "text":
231
231
  # Simple text message
232
- formatted_messages.append(cast(ChatCompletionUiformMessage, {"role": role, "content": content_blocks[0].get("text", "")}))
232
+ formatted_messages.append(cast(ChatCompletionRetabMessage, {"role": role, "content": content_blocks[0].get("text", "")}))
233
233
  elif isinstance(content_blocks, list):
234
234
  # Message with multiple content parts or non-text content
235
235
  formatted_content: list[ChatCompletionContentPartParam] = []
@@ -248,22 +248,22 @@ def convert_from_anthropic_format(messages: list[MessageParam], system_prompt: s
248
248
 
249
249
  formatted_content.append(cast(ChatCompletionContentPartParam, {"type": "image_url", "image_url": {"url": image_url}}))
250
250
 
251
- formatted_messages.append(cast(ChatCompletionUiformMessage, {"role": role, "content": formatted_content}))
251
+ formatted_messages.append(cast(ChatCompletionRetabMessage, {"role": role, "content": formatted_content}))
252
252
 
253
253
  return formatted_messages
254
254
 
255
255
 
256
- def convert_to_openai_format(messages: List[ChatCompletionUiformMessage]) -> List[ChatCompletionMessageParam]:
256
+ def convert_to_openai_format(messages: List[ChatCompletionRetabMessage]) -> List[ChatCompletionMessageParam]:
257
257
  return cast(list[ChatCompletionMessageParam], messages)
258
258
 
259
259
 
260
- def convert_from_openai_format(messages: list[ChatCompletionMessageParam]) -> list[ChatCompletionUiformMessage]:
261
- return cast(list[ChatCompletionUiformMessage], messages)
260
+ def convert_from_openai_format(messages: list[ChatCompletionMessageParam]) -> list[ChatCompletionRetabMessage]:
261
+ return cast(list[ChatCompletionRetabMessage], messages)
262
262
 
263
263
 
264
264
  def separate_messages(
265
- messages: list[ChatCompletionUiformMessage],
266
- ) -> tuple[Optional[ChatCompletionUiformMessage], list[ChatCompletionUiformMessage], list[ChatCompletionUiformMessage]]:
265
+ messages: list[ChatCompletionRetabMessage],
266
+ ) -> tuple[Optional[ChatCompletionRetabMessage], list[ChatCompletionRetabMessage], list[ChatCompletionRetabMessage]]:
267
267
  """
268
268
  Separates messages into system, user and assistant messages.
269
269
 
@@ -291,12 +291,12 @@ def separate_messages(
291
291
  return system_message, user_messages, assistant_messages
292
292
 
293
293
 
294
- def str_messages(messages: list[ChatCompletionUiformMessage], max_length: int = 100) -> str:
294
+ def str_messages(messages: list[ChatCompletionRetabMessage], max_length: int = 100) -> str:
295
295
  """
296
296
  Converts a list of chat messages into a string representation with faithfully serialized structure.
297
297
 
298
298
  Args:
299
- messages (list[ChatCompletionUiformMessage]): The list of chat messages.
299
+ messages (list[ChatCompletionRetabMessage]): The list of chat messages.
300
300
  max_length (int): Maximum length for content before truncation.
301
301
 
302
302
  Returns:
@@ -307,7 +307,7 @@ def str_messages(messages: list[ChatCompletionUiformMessage], max_length: int =
307
307
  """Truncate text to max_len with ellipsis."""
308
308
  return text if len(text) <= max_len else f"{text[:max_len]}..."
309
309
 
310
- serialized: list[ChatCompletionUiformMessage] = []
310
+ serialized: list[ChatCompletionRetabMessage] = []
311
311
  for message in messages:
312
312
  role = message["role"]
313
313
  content = message["content"]
retab/_utils/responses.py CHANGED
@@ -16,13 +16,13 @@ from openai.types.responses.response_input_message_content_list_param import Res
16
16
  from openai.types.responses.response_input_param import ResponseInputItemParam
17
17
  from openai.types.responses.response_input_text_param import ResponseInputTextParam
18
18
 
19
- from ..types.chat import ChatCompletionUiformMessage
19
+ from ..types.chat import ChatCompletionRetabMessage
20
20
  from ..types.documents.extractions import UiParsedChatCompletion, UiParsedChoice
21
21
 
22
22
 
23
- def convert_to_openai_format(messages: list[ChatCompletionUiformMessage]) -> list[ResponseInputItemParam]:
23
+ def convert_to_openai_format(messages: list[ChatCompletionRetabMessage]) -> list[ResponseInputItemParam]:
24
24
  """
25
- Converts a list of ChatCompletionUiformMessage to the OpenAI ResponseInputParam format.
25
+ Converts a list of ChatCompletionRetabMessage to the OpenAI ResponseInputParam format.
26
26
 
27
27
  Args:
28
28
  messages: List of chat messages in UIForm format
@@ -64,9 +64,9 @@ def convert_to_openai_format(messages: list[ChatCompletionUiformMessage]) -> lis
64
64
  return formatted_messages
65
65
 
66
66
 
67
- def convert_from_openai_format(messages: list[ResponseInputItemParam]) -> list[ChatCompletionUiformMessage]:
67
+ def convert_from_openai_format(messages: list[ResponseInputItemParam]) -> list[ChatCompletionRetabMessage]:
68
68
  """
69
- Converts messages from OpenAI ResponseInputParam format to ChatCompletionUiformMessage format.
69
+ Converts messages from OpenAI ResponseInputParam format to ChatCompletionRetabMessage format.
70
70
 
71
71
  Args:
72
72
  messages: Messages in OpenAI ResponseInputParam format
@@ -74,7 +74,7 @@ def convert_from_openai_format(messages: list[ResponseInputItemParam]) -> list[C
74
74
  Returns:
75
75
  List of chat messages in UIForm format
76
76
  """
77
- formatted_messages: list[ChatCompletionUiformMessage] = []
77
+ formatted_messages: list[ChatCompletionRetabMessage] = []
78
78
 
79
79
  for message in messages:
80
80
  if "role" not in message or "content" not in message:
@@ -110,7 +110,7 @@ def convert_from_openai_format(messages: list[ResponseInputItemParam]) -> list[C
110
110
  print(f"Not supported content type: {part['type']}... Skipping...")
111
111
 
112
112
  # Create message in UIForm format
113
- formatted_message = ChatCompletionUiformMessage(role=role, content=formatted_content)
113
+ formatted_message = ChatCompletionRetabMessage(role=role, content=formatted_content)
114
114
  formatted_messages.append(formatted_message)
115
115
 
116
116
  return formatted_messages
@@ -78,7 +78,7 @@ def compute_api_call_cost(pricing: Pricing, usage: CompletionUsage, is_ft: bool
78
78
 
79
79
 
80
80
  def compute_cost_from_model(model: str, usage: CompletionUsage) -> Amount:
81
- # Extract base model name for fine-tuned models like "ft:gpt-4o:uiform:4389573"
81
+ # Extract base model name for fine-tuned models like "ft:gpt-4o:retab:4389573"
82
82
  is_ft = False
83
83
  if model.startswith("ft:"):
84
84
  # Split by colon and take the second part (index 1) which contains the base model
@@ -270,7 +270,7 @@ def compute_cost_from_model_with_breakdown(model: str, usage: CompletionUsage) -
270
270
  Computes a detailed cost breakdown for an API call using the specified model and usage.
271
271
 
272
272
  Args:
273
- model: The model name (can be a fine-tuned model like "ft:gpt-4o:uiform:4389573")
273
+ model: The model name (can be a fine-tuned model like "ft:gpt-4o:retab:4389573")
274
274
  usage: Token usage statistics for the API call
275
275
 
276
276
  Returns:
@@ -279,7 +279,7 @@ def compute_cost_from_model_with_breakdown(model: str, usage: CompletionUsage) -
279
279
  Raises:
280
280
  ValueError: If no pricing information is found for the model
281
281
  """
282
- # Extract base model name for fine-tuned models like "ft:gpt-4o:uiform:4389573"
282
+ # Extract base model name for fine-tuned models like "ft:gpt-4o:retab:4389573"
283
283
  original_model = model
284
284
  is_ft = False
285
285
 
retab/client.py CHANGED
@@ -26,15 +26,15 @@ def raise_max_tries_exceeded(details: backoff.types.Details) -> None:
26
26
  raise Exception(f"Max tries exceeded after {tries} tries.")
27
27
 
28
28
 
29
- class BaseUiForm:
30
- """Base class for UiForm clients that handles authentication and configuration.
29
+ class BaseRetab:
30
+ """Base class for Retab clients that handles authentication and configuration.
31
31
 
32
32
  This class provides core functionality for API authentication, configuration, and common HTTP operations
33
33
  used by both synchronous and asynchronous clients.
34
34
 
35
35
  Args:
36
- api_key (str, optional): UiForm API key. If not provided, will look for UIFORM_API_KEY env variable.
37
- base_url (str, optional): Base URL for API requests. Defaults to https://api.uiform.com
36
+ api_key (str, optional): Retab API key. If not provided, will look for RETAB_API_KEY env variable.
37
+ base_url (str, optional): Base URL for API requests. Defaults to https://api.retab.dev
38
38
  timeout (float): Request timeout in seconds. Defaults to 240.0
39
39
  max_retries (int): Maximum number of retries for failed requests. Defaults to 3
40
40
  openai_api_key (str, optional): OpenAI API key. Will look for OPENAI_API_KEY env variable if not provided
@@ -59,16 +59,16 @@ class BaseUiForm:
59
59
  xai_api_key: Optional[str] = PydanticUndefined, # type: ignore[assignment]
60
60
  ) -> None:
61
61
  if api_key is None:
62
- api_key = os.environ.get("UIFORM_API_KEY")
62
+ api_key = os.environ.get("RETAB_API_KEY")
63
63
 
64
64
  if api_key is None:
65
65
  raise ValueError(
66
- "No API key provided. You can create an API key at https://uiform.com\n"
67
- "Then either pass it to the client (api_key='your-key') or set the UIFORM_API_KEY environment variable"
66
+ "No API key provided. You can create an API key at https://retab.dev\n"
67
+ "Then either pass it to the client (api_key='your-key') or set the RETAB_API_KEY environment variable"
68
68
  )
69
69
 
70
70
  if base_url is None:
71
- base_url = os.environ.get("UIFORM_API_BASE_URL", "https://api.uiform.com")
71
+ base_url = os.environ.get("RETAB_API_BASE_URL", "https://api.retab.dev")
72
72
 
73
73
  truststore.inject_into_ssl()
74
74
  self.api_key = api_key
@@ -146,15 +146,15 @@ class BaseUiForm:
146
146
  return response.text
147
147
 
148
148
 
149
- class UiForm(BaseUiForm):
150
- """Synchronous client for interacting with the UiForm API.
149
+ class Retab(BaseRetab):
150
+ """Synchronous client for interacting with the Retab API.
151
151
 
152
- This client provides synchronous access to all UiForm API resources including files, fine-tuning,
152
+ This client provides synchronous access to all Retab API resources including files, fine-tuning,
153
153
  prompt optimization, documents, models, datasets, and schemas.
154
154
 
155
155
  Args:
156
- api_key (str, optional): UiForm API key. If not provided, will look for UIFORM_API_KEY env variable.
157
- base_url (str, optional): Base URL for API requests. Defaults to https://api.uiform.com
156
+ api_key (str, optional): Retab API key. If not provided, will look for RETAB_API_KEY env variable.
157
+ base_url (str, optional): Base URL for API requests. Defaults to https://api.retab.dev
158
158
  timeout (float): Request timeout in seconds. Defaults to 240.0
159
159
  max_retries (int): Maximum number of retries for failed requests. Defaults to 3
160
160
  openai_api_key (str, optional): OpenAI API key. Will look for OPENAI_API_KEY env variable if not provided
@@ -395,11 +395,11 @@ class UiForm(BaseUiForm):
395
395
  """Closes the HTTP client session."""
396
396
  self.client.close()
397
397
 
398
- def __enter__(self) -> "UiForm":
398
+ def __enter__(self) -> "Retab":
399
399
  """Context manager entry point.
400
400
 
401
401
  Returns:
402
- UiForm: The client instance
402
+ Retab: The client instance
403
403
  """
404
404
  return self
405
405
 
@@ -414,15 +414,15 @@ class UiForm(BaseUiForm):
414
414
  self.close()
415
415
 
416
416
 
417
- class AsyncUiForm(BaseUiForm):
418
- """Asynchronous client for interacting with the UiForm API.
417
+ class AsyncRetab(BaseRetab):
418
+ """Asynchronous client for interacting with the Retab API.
419
419
 
420
- This client provides asynchronous access to all UiForm API resources including files, fine-tuning,
420
+ This client provides asynchronous access to all Retab API resources including files, fine-tuning,
421
421
  prompt optimization, documents, models, datasets, and schemas.
422
422
 
423
423
  Args:
424
- api_key (str, optional): UiForm API key. If not provided, will look for UIFORM_API_KEY env variable.
425
- base_url (str, optional): Base URL for API requests. Defaults to https://api.uiform.com
424
+ api_key (str, optional): Retab API key. If not provided, will look for RETAB_API_KEY env variable.
425
+ base_url (str, optional): Base URL for API requests. Defaults to https://api.retab.dev
426
426
  timeout (float): Request timeout in seconds. Defaults to 240.0
427
427
  max_retries (int): Maximum number of retries for failed requests. Defaults to 3
428
428
  openai_api_key (str, optional): OpenAI API key. Will look for OPENAI_API_KEY env variable if not provided
@@ -683,11 +683,11 @@ class AsyncUiForm(BaseUiForm):
683
683
  """Closes the async HTTP client session."""
684
684
  await self.client.aclose()
685
685
 
686
- async def __aenter__(self) -> "AsyncUiForm":
686
+ async def __aenter__(self) -> "AsyncRetab":
687
687
  """Async context manager entry point.
688
688
 
689
689
  Returns:
690
- AsyncUiForm: The async client instance
690
+ AsyncRetab: The async client instance
691
691
  """
692
692
  return self
693
693
 
@@ -52,7 +52,7 @@ class Consensus(SyncAPIResource, BaseConsensusMixin):
52
52
  Dict containing the consensus dictionary and consensus likelihoods
53
53
 
54
54
  Raises:
55
- UiformAPIError: If the API request fails
55
+ RetabAPIError: If the API request fails
56
56
  """
57
57
  request = self._prepare_reconcile(list_dicts, reference_schema, mode, idempotency_key)
58
58
  response = self._client._prepared_request(request)
@@ -87,7 +87,7 @@ class AsyncConsensus(AsyncAPIResource, BaseConsensusMixin):
87
87
  Dict containing the consensus dictionary and consensus likelihoods
88
88
 
89
89
  Raises:
90
- UiformAPIError: If the API request fails
90
+ RetabAPIError: If the API request fails
91
91
  """
92
92
  request = self._prepare_reconcile(list_dicts, reference_schema, mode, idempotency_key)
93
93
  response = await self._client._prepared_request(request)
@@ -6,7 +6,7 @@ from pydantic import BaseModel as ResponseFormatT
6
6
 
7
7
  from ..._resource import AsyncAPIResource, SyncAPIResource
8
8
  from ..._utils.ai_models import assert_valid_model_extraction
9
- from ...types.chat import ChatCompletionUiformMessage
9
+ from ...types.chat import ChatCompletionRetabMessage
10
10
  from ...types.completions import UiChatCompletionsRequest
11
11
  from ...types.documents.extractions import UiParsedChatCompletion
12
12
  from ...types.schemas.object import Schema
@@ -17,7 +17,7 @@ class BaseCompletionsMixin:
17
17
  def prepare_parse(
18
18
  self,
19
19
  response_format: type[ResponseFormatT],
20
- messages: list[ChatCompletionUiformMessage],
20
+ messages: list[ChatCompletionRetabMessage],
21
21
  model: str,
22
22
  temperature: float,
23
23
  reasoning_effort: ChatCompletionReasoningEffort,
@@ -52,7 +52,7 @@ class BaseCompletionsMixin:
52
52
  def prepare_create(
53
53
  self,
54
54
  response_format: ResponseFormatJSONSchema,
55
- messages: list[ChatCompletionUiformMessage],
55
+ messages: list[ChatCompletionRetabMessage],
56
56
  model: str,
57
57
  temperature: float,
58
58
  reasoning_effort: ChatCompletionReasoningEffort,
@@ -91,7 +91,7 @@ class Completions(SyncAPIResource, BaseCompletionsMixin):
91
91
  def create(
92
92
  self,
93
93
  response_format: ResponseFormatJSONSchema,
94
- messages: list[ChatCompletionUiformMessage],
94
+ messages: list[ChatCompletionRetabMessage],
95
95
  model: str = "gpt-4o-2024-08-06",
96
96
  temperature: float = 0,
97
97
  reasoning_effort: ChatCompletionReasoningEffort = "medium",
@@ -100,7 +100,7 @@ class Completions(SyncAPIResource, BaseCompletionsMixin):
100
100
  stream: bool = False,
101
101
  ) -> UiParsedChatCompletion:
102
102
  """
103
- Create a completion using the UiForm API.
103
+ Create a completion using the Retab API.
104
104
  """
105
105
 
106
106
  request = self.prepare_create(
@@ -121,7 +121,7 @@ class Completions(SyncAPIResource, BaseCompletionsMixin):
121
121
  def parse(
122
122
  self,
123
123
  response_format: type[ResponseFormatT],
124
- messages: list[ChatCompletionUiformMessage],
124
+ messages: list[ChatCompletionRetabMessage],
125
125
  model: str = "gpt-4o-2024-08-06",
126
126
  temperature: float = 0,
127
127
  reasoning_effort: ChatCompletionReasoningEffort = "medium",
@@ -129,7 +129,7 @@ class Completions(SyncAPIResource, BaseCompletionsMixin):
129
129
  idempotency_key: str | None = None,
130
130
  ) -> UiParsedChatCompletion:
131
131
  """
132
- Parse messages using the UiForm API to extract structured data according to the provided JSON schema.
132
+ Parse messages using the Retab API to extract structured data according to the provided JSON schema.
133
133
 
134
134
  Args:
135
135
  response_format: JSON schema defining the expected data structure
@@ -138,7 +138,7 @@ class Completions(SyncAPIResource, BaseCompletionsMixin):
138
138
  temperature: Model temperature setting (0-1)
139
139
  reasoning_effort: The effort level for the model to reason about the input data
140
140
  idempotency_key: Idempotency key for request
141
- store: Whether to store the data in the UiForm database
141
+ store: Whether to store the data in the Retab database
142
142
 
143
143
  Returns:
144
144
  UiParsedChatCompletion: Parsed response from the API
@@ -164,7 +164,7 @@ class AsyncCompletions(AsyncAPIResource, BaseCompletionsMixin):
164
164
  async def create(
165
165
  self,
166
166
  response_format: ResponseFormatJSONSchema,
167
- messages: list[ChatCompletionUiformMessage],
167
+ messages: list[ChatCompletionRetabMessage],
168
168
  model: str = "gpt-4o-2024-08-06",
169
169
  temperature: float = 0,
170
170
  reasoning_effort: ChatCompletionReasoningEffort = "medium",
@@ -173,7 +173,7 @@ class AsyncCompletions(AsyncAPIResource, BaseCompletionsMixin):
173
173
  stream: bool = False,
174
174
  ) -> UiParsedChatCompletion:
175
175
  """
176
- Create a completion using the UiForm API.
176
+ Create a completion using the Retab API.
177
177
  """
178
178
 
179
179
  request = self.prepare_create(
@@ -193,7 +193,7 @@ class AsyncCompletions(AsyncAPIResource, BaseCompletionsMixin):
193
193
  async def parse(
194
194
  self,
195
195
  response_format: type[ResponseFormatT],
196
- messages: list[ChatCompletionUiformMessage],
196
+ messages: list[ChatCompletionRetabMessage],
197
197
  model: str = "gpt-4o-2024-08-06",
198
198
  temperature: float = 0,
199
199
  reasoning_effort: ChatCompletionReasoningEffort = "medium",
@@ -201,7 +201,7 @@ class AsyncCompletions(AsyncAPIResource, BaseCompletionsMixin):
201
201
  idempotency_key: str | None = None,
202
202
  ) -> UiParsedChatCompletion:
203
203
  """
204
- Parse messages using the UiForm API asynchronously.
204
+ Parse messages using the Retab API asynchronously.
205
205
 
206
206
  Args:
207
207
  json_schema: JSON schema defining the expected data structure
@@ -12,7 +12,7 @@ from ..._resource import AsyncAPIResource, SyncAPIResource
12
12
  from ..._utils.ai_models import assert_valid_model_extraction
13
13
  from ..._utils.json_schema import unflatten_dict
14
14
  from ..._utils.stream_context_managers import as_async_context_manager, as_context_manager
15
- from ...types.chat import ChatCompletionUiformMessage
15
+ from ...types.chat import ChatCompletionRetabMessage
16
16
  from ...types.completions import UiChatCompletionsRequest
17
17
  from ...types.documents.extractions import UiParsedChatCompletion, UiParsedChatCompletionChunk, UiParsedChoice
18
18
  from ...types.schemas.object import Schema
@@ -23,7 +23,7 @@ class BaseCompletionsMixin:
23
23
  def prepare_parse(
24
24
  self,
25
25
  response_format: type[ResponseFormatT],
26
- messages: list[ChatCompletionUiformMessage],
26
+ messages: list[ChatCompletionRetabMessage],
27
27
  model: str,
28
28
  temperature: float,
29
29
  reasoning_effort: ChatCompletionReasoningEffort,
@@ -58,7 +58,7 @@ class BaseCompletionsMixin:
58
58
  def prepare_create(
59
59
  self,
60
60
  response_format: ResponseFormatJSONSchema,
61
- messages: list[ChatCompletionUiformMessage],
61
+ messages: list[ChatCompletionRetabMessage],
62
62
  model: str,
63
63
  temperature: float,
64
64
  reasoning_effort: ChatCompletionReasoningEffort,
@@ -100,7 +100,7 @@ class Completions(SyncAPIResource, BaseCompletionsMixin):
100
100
  def stream(
101
101
  self,
102
102
  response_format: type[ResponseFormatT],
103
- messages: list[ChatCompletionUiformMessage],
103
+ messages: list[ChatCompletionRetabMessage],
104
104
  model: str = "gpt-4o-2024-08-06",
105
105
  temperature: float = 0,
106
106
  reasoning_effort: ChatCompletionReasoningEffort = "medium",
@@ -108,7 +108,7 @@ class Completions(SyncAPIResource, BaseCompletionsMixin):
108
108
  idempotency_key: str | None = None,
109
109
  ) -> Generator[UiParsedChatCompletion, None, None]:
110
110
  """
111
- Process messages using the UiForm API with streaming enabled.
111
+ Process messages using the Retab API with streaming enabled.
112
112
 
113
113
  Args:
114
114
  response_format: JSON schema defining the expected data structure
@@ -123,7 +123,7 @@ class Completions(SyncAPIResource, BaseCompletionsMixin):
123
123
 
124
124
  Usage:
125
125
  ```python
126
- with uiform.completions.stream(json_schema, messages, model, temperature, reasoning_effort) as stream:
126
+ with retab.devpletions.stream(json_schema, messages, model, temperature, reasoning_effort) as stream:
127
127
  for response in stream:
128
128
  print(response)
129
129
  ```
@@ -186,7 +186,7 @@ class AsyncCompletions(AsyncAPIResource, BaseCompletionsMixin):
186
186
  async def stream(
187
187
  self,
188
188
  response_format: type[ResponseFormatT],
189
- messages: list[ChatCompletionUiformMessage],
189
+ messages: list[ChatCompletionRetabMessage],
190
190
  model: str = "gpt-4o-2024-08-06",
191
191
  temperature: float = 0,
192
192
  reasoning_effort: ChatCompletionReasoningEffort = "medium",
@@ -194,7 +194,7 @@ class AsyncCompletions(AsyncAPIResource, BaseCompletionsMixin):
194
194
  idempotency_key: str | None = None,
195
195
  ) -> AsyncGenerator[UiParsedChatCompletion, None]:
196
196
  """
197
- Parse messages using the UiForm API asynchronously with streaming.
197
+ Parse messages using the Retab API asynchronously with streaming.
198
198
 
199
199
  Args:
200
200
  json_schema: JSON schema defining the expected data structure
@@ -210,7 +210,7 @@ class AsyncCompletions(AsyncAPIResource, BaseCompletionsMixin):
210
210
 
211
211
  Usage:
212
212
  ```python
213
- async with uiform.completions.stream(json_schema, messages, model, temperature, reasoning_effort, n_consensus) as stream:
213
+ async with retab.devpletions.stream(json_schema, messages, model, temperature, reasoning_effort, n_consensus) as stream:
214
214
  async for response in stream:
215
215
  print(response)
216
216
  ```
@@ -100,7 +100,7 @@ class BaseResponsesMixin:
100
100
 
101
101
 
102
102
  class Responses(SyncAPIResource, BaseResponsesMixin):
103
- """UiForm Responses API compatible with OpenAI Responses API"""
103
+ """Retab Responses API compatible with OpenAI Responses API"""
104
104
 
105
105
  def create(
106
106
  self,
@@ -114,7 +114,7 @@ class Responses(SyncAPIResource, BaseResponsesMixin):
114
114
  idempotency_key: Optional[str] = None,
115
115
  ) -> Response:
116
116
  """
117
- Create a completion using the UiForm API with OpenAI Responses API compatible interface.
117
+ Create a completion using the Retab API with OpenAI Responses API compatible interface.
118
118
 
119
119
  Args:
120
120
  model: The model to use
@@ -156,7 +156,7 @@ class Responses(SyncAPIResource, BaseResponsesMixin):
156
156
  idempotency_key: Optional[str] = None,
157
157
  ) -> Response:
158
158
  """
159
- Parse content using the UiForm API with OpenAI Responses API compatible interface.
159
+ Parse content using the Retab API with OpenAI Responses API compatible interface.
160
160
 
161
161
  Args:
162
162
  model: The model to use
@@ -189,7 +189,7 @@ class Responses(SyncAPIResource, BaseResponsesMixin):
189
189
 
190
190
 
191
191
  class AsyncResponses(AsyncAPIResource, BaseResponsesMixin):
192
- """UiForm Responses API compatible with OpenAI Responses API for async usage"""
192
+ """Retab Responses API compatible with OpenAI Responses API for async usage"""
193
193
 
194
194
  async def create(
195
195
  self,
@@ -203,7 +203,7 @@ class AsyncResponses(AsyncAPIResource, BaseResponsesMixin):
203
203
  idempotency_key: Optional[str] = None,
204
204
  ) -> UiResponse:
205
205
  """
206
- Create a completion using the UiForm API asynchronously with OpenAI Responses API compatible interface.
206
+ Create a completion using the Retab API asynchronously with OpenAI Responses API compatible interface.
207
207
 
208
208
  Args:
209
209
  model: The model to use
@@ -245,7 +245,7 @@ class AsyncResponses(AsyncAPIResource, BaseResponsesMixin):
245
245
  idempotency_key: Optional[str] = None,
246
246
  ) -> UiResponse:
247
247
  """
248
- Parse content using the UiForm API asynchronously with OpenAI Responses API compatible interface.
248
+ Parse content using the Retab API asynchronously with OpenAI Responses API compatible interface.
249
249
 
250
250
  Args:
251
251
  model: The model to use