retab 0.0.37__py3-none-any.whl → 0.0.39__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- retab/__init__.py +2 -2
- retab/_resource.py +5 -5
- retab/_utils/_model_cards/anthropic.yaml +59 -0
- retab/_utils/_model_cards/auto.yaml +43 -0
- retab/_utils/_model_cards/gemini.yaml +117 -0
- retab/_utils/_model_cards/openai.yaml +301 -0
- retab/_utils/_model_cards/xai.yaml +28 -0
- retab/_utils/ai_models.py +109 -71
- retab/_utils/chat.py +20 -20
- retab/_utils/responses.py +14 -14
- retab/_utils/usage/usage.py +5 -4
- retab/client.py +22 -22
- retab/resources/consensus/client.py +2 -2
- retab/resources/consensus/completions.py +26 -26
- retab/resources/consensus/completions_stream.py +27 -27
- retab/resources/consensus/responses.py +11 -11
- retab/resources/consensus/responses_stream.py +15 -15
- retab/resources/documents/client.py +297 -16
- retab/resources/documents/extractions.py +39 -39
- retab/resources/evaluations/documents.py +5 -5
- retab/resources/evaluations/iterations.py +7 -7
- retab/resources/jsonlUtils.py +7 -7
- retab/resources/processors/automations/endpoints.py +2 -2
- retab/resources/processors/automations/links.py +2 -2
- retab/resources/processors/automations/logs.py +2 -2
- retab/resources/processors/automations/mailboxes.py +2 -2
- retab/resources/processors/automations/outlook.py +2 -2
- retab/resources/processors/client.py +9 -9
- retab/resources/usage.py +4 -4
- retab/types/ai_models.py +41 -513
- retab/types/automations/mailboxes.py +1 -1
- retab/types/automations/webhooks.py +3 -3
- retab/types/chat.py +1 -1
- retab/types/completions.py +10 -10
- retab/types/documents/__init__.py +3 -0
- retab/types/documents/create_messages.py +2 -2
- retab/types/documents/extractions.py +19 -19
- retab/types/documents/parse.py +32 -0
- retab/types/extractions.py +4 -4
- retab/types/logs.py +2 -2
- retab/types/schemas/object.py +3 -3
- {retab-0.0.37.dist-info → retab-0.0.39.dist-info}/METADATA +72 -72
- {retab-0.0.37.dist-info → retab-0.0.39.dist-info}/RECORD +45 -39
- {retab-0.0.37.dist-info → retab-0.0.39.dist-info}/WHEEL +0 -0
- {retab-0.0.37.dist-info → retab-0.0.39.dist-info}/top_level.txt +0 -0
@@ -6,9 +6,9 @@ from pydantic import BaseModel as ResponseFormatT
|
|
6
6
|
|
7
7
|
from ..._resource import AsyncAPIResource, SyncAPIResource
|
8
8
|
from ..._utils.ai_models import assert_valid_model_extraction
|
9
|
-
from ...types.chat import
|
10
|
-
from ...types.completions import
|
11
|
-
from ...types.documents.extractions import
|
9
|
+
from ...types.chat import ChatCompletionRetabMessage
|
10
|
+
from ...types.completions import RetabChatCompletionsRequest
|
11
|
+
from ...types.documents.extractions import RetabParsedChatCompletion
|
12
12
|
from ...types.schemas.object import Schema
|
13
13
|
from ...types.standards import PreparedRequest
|
14
14
|
|
@@ -17,7 +17,7 @@ class BaseCompletionsMixin:
|
|
17
17
|
def prepare_parse(
|
18
18
|
self,
|
19
19
|
response_format: type[ResponseFormatT],
|
20
|
-
messages: list[
|
20
|
+
messages: list[ChatCompletionRetabMessage],
|
21
21
|
model: str,
|
22
22
|
temperature: float,
|
23
23
|
reasoning_effort: ChatCompletionReasoningEffort,
|
@@ -31,7 +31,7 @@ class BaseCompletionsMixin:
|
|
31
31
|
|
32
32
|
schema_obj = Schema(json_schema=json_schema)
|
33
33
|
|
34
|
-
request =
|
34
|
+
request = RetabChatCompletionsRequest(
|
35
35
|
model=model,
|
36
36
|
messages=messages,
|
37
37
|
response_format={
|
@@ -52,7 +52,7 @@ class BaseCompletionsMixin:
|
|
52
52
|
def prepare_create(
|
53
53
|
self,
|
54
54
|
response_format: ResponseFormatJSONSchema,
|
55
|
-
messages: list[
|
55
|
+
messages: list[ChatCompletionRetabMessage],
|
56
56
|
model: str,
|
57
57
|
temperature: float,
|
58
58
|
reasoning_effort: ChatCompletionReasoningEffort,
|
@@ -66,7 +66,7 @@ class BaseCompletionsMixin:
|
|
66
66
|
|
67
67
|
schema_obj = Schema(json_schema=json_schema)
|
68
68
|
|
69
|
-
request =
|
69
|
+
request = RetabChatCompletionsRequest(
|
70
70
|
model=model,
|
71
71
|
messages=messages,
|
72
72
|
response_format={
|
@@ -91,16 +91,16 @@ class Completions(SyncAPIResource, BaseCompletionsMixin):
|
|
91
91
|
def create(
|
92
92
|
self,
|
93
93
|
response_format: ResponseFormatJSONSchema,
|
94
|
-
messages: list[
|
94
|
+
messages: list[ChatCompletionRetabMessage],
|
95
95
|
model: str = "gpt-4o-2024-08-06",
|
96
96
|
temperature: float = 0,
|
97
97
|
reasoning_effort: ChatCompletionReasoningEffort = "medium",
|
98
98
|
n_consensus: int = 1,
|
99
99
|
idempotency_key: str | None = None,
|
100
100
|
stream: bool = False,
|
101
|
-
) ->
|
101
|
+
) -> RetabParsedChatCompletion:
|
102
102
|
"""
|
103
|
-
Create a completion using the
|
103
|
+
Create a completion using the Retab API.
|
104
104
|
"""
|
105
105
|
|
106
106
|
request = self.prepare_create(
|
@@ -116,20 +116,20 @@ class Completions(SyncAPIResource, BaseCompletionsMixin):
|
|
116
116
|
|
117
117
|
response = self._client._prepared_request(request)
|
118
118
|
|
119
|
-
return
|
119
|
+
return RetabParsedChatCompletion.model_validate(response)
|
120
120
|
|
121
121
|
def parse(
|
122
122
|
self,
|
123
123
|
response_format: type[ResponseFormatT],
|
124
|
-
messages: list[
|
124
|
+
messages: list[ChatCompletionRetabMessage],
|
125
125
|
model: str = "gpt-4o-2024-08-06",
|
126
126
|
temperature: float = 0,
|
127
127
|
reasoning_effort: ChatCompletionReasoningEffort = "medium",
|
128
128
|
n_consensus: int = 1,
|
129
129
|
idempotency_key: str | None = None,
|
130
|
-
) ->
|
130
|
+
) -> RetabParsedChatCompletion:
|
131
131
|
"""
|
132
|
-
Parse messages using the
|
132
|
+
Parse messages using the Retab API to extract structured data according to the provided JSON schema.
|
133
133
|
|
134
134
|
Args:
|
135
135
|
response_format: JSON schema defining the expected data structure
|
@@ -138,10 +138,10 @@ class Completions(SyncAPIResource, BaseCompletionsMixin):
|
|
138
138
|
temperature: Model temperature setting (0-1)
|
139
139
|
reasoning_effort: The effort level for the model to reason about the input data
|
140
140
|
idempotency_key: Idempotency key for request
|
141
|
-
store: Whether to store the data in the
|
141
|
+
store: Whether to store the data in the Retab database
|
142
142
|
|
143
143
|
Returns:
|
144
|
-
|
144
|
+
RetabParsedChatCompletion: Parsed response from the API
|
145
145
|
"""
|
146
146
|
request = self.prepare_parse(
|
147
147
|
response_format=response_format,
|
@@ -155,7 +155,7 @@ class Completions(SyncAPIResource, BaseCompletionsMixin):
|
|
155
155
|
)
|
156
156
|
response = self._client._prepared_request(request)
|
157
157
|
|
158
|
-
return
|
158
|
+
return RetabParsedChatCompletion.model_validate(response)
|
159
159
|
|
160
160
|
|
161
161
|
class AsyncCompletions(AsyncAPIResource, BaseCompletionsMixin):
|
@@ -164,16 +164,16 @@ class AsyncCompletions(AsyncAPIResource, BaseCompletionsMixin):
|
|
164
164
|
async def create(
|
165
165
|
self,
|
166
166
|
response_format: ResponseFormatJSONSchema,
|
167
|
-
messages: list[
|
167
|
+
messages: list[ChatCompletionRetabMessage],
|
168
168
|
model: str = "gpt-4o-2024-08-06",
|
169
169
|
temperature: float = 0,
|
170
170
|
reasoning_effort: ChatCompletionReasoningEffort = "medium",
|
171
171
|
n_consensus: int = 1,
|
172
172
|
idempotency_key: str | None = None,
|
173
173
|
stream: bool = False,
|
174
|
-
) ->
|
174
|
+
) -> RetabParsedChatCompletion:
|
175
175
|
"""
|
176
|
-
Create a completion using the
|
176
|
+
Create a completion using the Retab API.
|
177
177
|
"""
|
178
178
|
|
179
179
|
request = self.prepare_create(
|
@@ -188,20 +188,20 @@ class AsyncCompletions(AsyncAPIResource, BaseCompletionsMixin):
|
|
188
188
|
)
|
189
189
|
|
190
190
|
response = await self._client._prepared_request(request)
|
191
|
-
return
|
191
|
+
return RetabParsedChatCompletion.model_validate(response)
|
192
192
|
|
193
193
|
async def parse(
|
194
194
|
self,
|
195
195
|
response_format: type[ResponseFormatT],
|
196
|
-
messages: list[
|
196
|
+
messages: list[ChatCompletionRetabMessage],
|
197
197
|
model: str = "gpt-4o-2024-08-06",
|
198
198
|
temperature: float = 0,
|
199
199
|
reasoning_effort: ChatCompletionReasoningEffort = "medium",
|
200
200
|
n_consensus: int = 1,
|
201
201
|
idempotency_key: str | None = None,
|
202
|
-
) ->
|
202
|
+
) -> RetabParsedChatCompletion:
|
203
203
|
"""
|
204
|
-
Parse messages using the
|
204
|
+
Parse messages using the Retab API asynchronously.
|
205
205
|
|
206
206
|
Args:
|
207
207
|
json_schema: JSON schema defining the expected data structure
|
@@ -213,7 +213,7 @@ class AsyncCompletions(AsyncAPIResource, BaseCompletionsMixin):
|
|
213
213
|
idempotency_key: Idempotency key for request
|
214
214
|
|
215
215
|
Returns:
|
216
|
-
|
216
|
+
RetabParsedChatCompletion: Parsed response from the API
|
217
217
|
"""
|
218
218
|
request = self.prepare_parse(
|
219
219
|
response_format=response_format,
|
@@ -226,4 +226,4 @@ class AsyncCompletions(AsyncAPIResource, BaseCompletionsMixin):
|
|
226
226
|
idempotency_key=idempotency_key,
|
227
227
|
)
|
228
228
|
response = await self._client._prepared_request(request)
|
229
|
-
return
|
229
|
+
return RetabParsedChatCompletion.model_validate(response)
|
@@ -12,9 +12,9 @@ from ..._resource import AsyncAPIResource, SyncAPIResource
|
|
12
12
|
from ..._utils.ai_models import assert_valid_model_extraction
|
13
13
|
from ..._utils.json_schema import unflatten_dict
|
14
14
|
from ..._utils.stream_context_managers import as_async_context_manager, as_context_manager
|
15
|
-
from ...types.chat import
|
16
|
-
from ...types.completions import
|
17
|
-
from ...types.documents.extractions import
|
15
|
+
from ...types.chat import ChatCompletionRetabMessage
|
16
|
+
from ...types.completions import RetabChatCompletionsRequest
|
17
|
+
from ...types.documents.extractions import RetabParsedChatCompletion, RetabParsedChatCompletionChunk, RetabParsedChoice
|
18
18
|
from ...types.schemas.object import Schema
|
19
19
|
from ...types.standards import PreparedRequest
|
20
20
|
|
@@ -23,7 +23,7 @@ class BaseCompletionsMixin:
|
|
23
23
|
def prepare_parse(
|
24
24
|
self,
|
25
25
|
response_format: type[ResponseFormatT],
|
26
|
-
messages: list[
|
26
|
+
messages: list[ChatCompletionRetabMessage],
|
27
27
|
model: str,
|
28
28
|
temperature: float,
|
29
29
|
reasoning_effort: ChatCompletionReasoningEffort,
|
@@ -36,7 +36,7 @@ class BaseCompletionsMixin:
|
|
36
36
|
json_schema = response_format.model_json_schema()
|
37
37
|
schema_obj = Schema(json_schema=json_schema)
|
38
38
|
|
39
|
-
request =
|
39
|
+
request = RetabChatCompletionsRequest(
|
40
40
|
messages=messages,
|
41
41
|
response_format={
|
42
42
|
"type": "json_schema",
|
@@ -58,7 +58,7 @@ class BaseCompletionsMixin:
|
|
58
58
|
def prepare_create(
|
59
59
|
self,
|
60
60
|
response_format: ResponseFormatJSONSchema,
|
61
|
-
messages: list[
|
61
|
+
messages: list[ChatCompletionRetabMessage],
|
62
62
|
model: str,
|
63
63
|
temperature: float,
|
64
64
|
reasoning_effort: ChatCompletionReasoningEffort,
|
@@ -73,7 +73,7 @@ class BaseCompletionsMixin:
|
|
73
73
|
schema_obj = Schema(json_schema=json_schema)
|
74
74
|
|
75
75
|
# Validate DocumentAPIRequest data (raises exception if invalid)
|
76
|
-
request =
|
76
|
+
request = RetabChatCompletionsRequest(
|
77
77
|
model=model,
|
78
78
|
messages=messages,
|
79
79
|
response_format={
|
@@ -100,15 +100,15 @@ class Completions(SyncAPIResource, BaseCompletionsMixin):
|
|
100
100
|
def stream(
|
101
101
|
self,
|
102
102
|
response_format: type[ResponseFormatT],
|
103
|
-
messages: list[
|
103
|
+
messages: list[ChatCompletionRetabMessage],
|
104
104
|
model: str = "gpt-4o-2024-08-06",
|
105
105
|
temperature: float = 0,
|
106
106
|
reasoning_effort: ChatCompletionReasoningEffort = "medium",
|
107
107
|
n_consensus: int = 1,
|
108
108
|
idempotency_key: str | None = None,
|
109
|
-
) -> Generator[
|
109
|
+
) -> Generator[RetabParsedChatCompletion, None, None]:
|
110
110
|
"""
|
111
|
-
Process messages using the
|
111
|
+
Process messages using the Retab API with streaming enabled.
|
112
112
|
|
113
113
|
Args:
|
114
114
|
response_format: JSON schema defining the expected data structure
|
@@ -119,11 +119,11 @@ class Completions(SyncAPIResource, BaseCompletionsMixin):
|
|
119
119
|
idempotency_key: Idempotency key for request
|
120
120
|
|
121
121
|
Returns:
|
122
|
-
Generator[
|
122
|
+
Generator[RetabParsedChatCompletion]: Stream of parsed responses
|
123
123
|
|
124
124
|
Usage:
|
125
125
|
```python
|
126
|
-
with
|
126
|
+
with retab.devpletions.stream(json_schema, messages, model, temperature, reasoning_effort) as stream:
|
127
127
|
for response in stream:
|
128
128
|
print(response)
|
129
129
|
```
|
@@ -140,16 +140,16 @@ class Completions(SyncAPIResource, BaseCompletionsMixin):
|
|
140
140
|
)
|
141
141
|
|
142
142
|
# Request the stream and return a context manager
|
143
|
-
ui_parsed_chat_completion_cum_chunk:
|
144
|
-
# Initialize the
|
145
|
-
ui_parsed_completion:
|
143
|
+
ui_parsed_chat_completion_cum_chunk: RetabParsedChatCompletionChunk | None = None
|
144
|
+
# Initialize the RetabParsedChatCompletion object
|
145
|
+
ui_parsed_completion: RetabParsedChatCompletion = RetabParsedChatCompletion(
|
146
146
|
id="",
|
147
147
|
created=0,
|
148
148
|
model="",
|
149
149
|
object="chat.completion",
|
150
150
|
likelihoods={},
|
151
151
|
choices=[
|
152
|
-
|
152
|
+
RetabParsedChoice(
|
153
153
|
index=0,
|
154
154
|
message=ParsedChatCompletionMessage(content="", role="assistant"),
|
155
155
|
finish_reason=None,
|
@@ -160,7 +160,7 @@ class Completions(SyncAPIResource, BaseCompletionsMixin):
|
|
160
160
|
for chunk_json in self._client._prepared_request_stream(request):
|
161
161
|
if not chunk_json:
|
162
162
|
continue
|
163
|
-
ui_parsed_chat_completion_cum_chunk =
|
163
|
+
ui_parsed_chat_completion_cum_chunk = RetabParsedChatCompletionChunk.model_validate(chunk_json).chunk_accumulator(ui_parsed_chat_completion_cum_chunk)
|
164
164
|
# Basic stuff
|
165
165
|
ui_parsed_completion.id = ui_parsed_chat_completion_cum_chunk.id
|
166
166
|
ui_parsed_completion.created = ui_parsed_chat_completion_cum_chunk.created
|
@@ -186,15 +186,15 @@ class AsyncCompletions(AsyncAPIResource, BaseCompletionsMixin):
|
|
186
186
|
async def stream(
|
187
187
|
self,
|
188
188
|
response_format: type[ResponseFormatT],
|
189
|
-
messages: list[
|
189
|
+
messages: list[ChatCompletionRetabMessage],
|
190
190
|
model: str = "gpt-4o-2024-08-06",
|
191
191
|
temperature: float = 0,
|
192
192
|
reasoning_effort: ChatCompletionReasoningEffort = "medium",
|
193
193
|
n_consensus: int = 1,
|
194
194
|
idempotency_key: str | None = None,
|
195
|
-
) -> AsyncGenerator[
|
195
|
+
) -> AsyncGenerator[RetabParsedChatCompletion, None]:
|
196
196
|
"""
|
197
|
-
Parse messages using the
|
197
|
+
Parse messages using the Retab API asynchronously with streaming.
|
198
198
|
|
199
199
|
Args:
|
200
200
|
json_schema: JSON schema defining the expected data structure
|
@@ -206,11 +206,11 @@ class AsyncCompletions(AsyncAPIResource, BaseCompletionsMixin):
|
|
206
206
|
idempotency_key: Idempotency key for request
|
207
207
|
|
208
208
|
Returns:
|
209
|
-
AsyncGenerator[
|
209
|
+
AsyncGenerator[RetabParsedChatCompletion]: Stream of parsed responses
|
210
210
|
|
211
211
|
Usage:
|
212
212
|
```python
|
213
|
-
async with
|
213
|
+
async with retab.devpletions.stream(json_schema, messages, model, temperature, reasoning_effort, n_consensus) as stream:
|
214
214
|
async for response in stream:
|
215
215
|
print(response)
|
216
216
|
```
|
@@ -227,16 +227,16 @@ class AsyncCompletions(AsyncAPIResource, BaseCompletionsMixin):
|
|
227
227
|
)
|
228
228
|
|
229
229
|
# Request the stream and return a context manager
|
230
|
-
ui_parsed_chat_completion_cum_chunk:
|
231
|
-
# Initialize the
|
232
|
-
ui_parsed_completion:
|
230
|
+
ui_parsed_chat_completion_cum_chunk: RetabParsedChatCompletionChunk | None = None
|
231
|
+
# Initialize the RetabParsedChatCompletion object
|
232
|
+
ui_parsed_completion: RetabParsedChatCompletion = RetabParsedChatCompletion(
|
233
233
|
id="",
|
234
234
|
created=0,
|
235
235
|
model="",
|
236
236
|
object="chat.completion",
|
237
237
|
likelihoods={},
|
238
238
|
choices=[
|
239
|
-
|
239
|
+
RetabParsedChoice(
|
240
240
|
index=0,
|
241
241
|
message=ParsedChatCompletionMessage(content="", role="assistant"),
|
242
242
|
finish_reason=None,
|
@@ -247,7 +247,7 @@ class AsyncCompletions(AsyncAPIResource, BaseCompletionsMixin):
|
|
247
247
|
async for chunk_json in self._client._prepared_request_stream(request):
|
248
248
|
if not chunk_json:
|
249
249
|
continue
|
250
|
-
ui_parsed_chat_completion_cum_chunk =
|
250
|
+
ui_parsed_chat_completion_cum_chunk = RetabParsedChatCompletionChunk.model_validate(chunk_json).chunk_accumulator(ui_parsed_chat_completion_cum_chunk)
|
251
251
|
# Basic stuff
|
252
252
|
ui_parsed_completion.id = ui_parsed_chat_completion_cum_chunk.id
|
253
253
|
ui_parsed_completion.created = ui_parsed_chat_completion_cum_chunk.created
|
@@ -8,7 +8,7 @@ from pydantic import BaseModel
|
|
8
8
|
|
9
9
|
from ..._resource import AsyncAPIResource, SyncAPIResource
|
10
10
|
from ..._utils.ai_models import assert_valid_model_extraction
|
11
|
-
from ...types.completions import
|
11
|
+
from ...types.completions import RetabChatResponseCreateRequest
|
12
12
|
from ...types.documents.extractions import UiResponse
|
13
13
|
from ...types.schemas.object import Schema
|
14
14
|
from ...types.standards import PreparedRequest
|
@@ -43,8 +43,8 @@ class BaseResponsesMixin:
|
|
43
43
|
if instructions is None:
|
44
44
|
instructions = schema_obj.developer_system_prompt
|
45
45
|
|
46
|
-
# Create the request object based on the
|
47
|
-
request =
|
46
|
+
# Create the request object based on the RetabChatResponseCreateRequest model
|
47
|
+
request = RetabChatResponseCreateRequest(
|
48
48
|
model=model,
|
49
49
|
input=input,
|
50
50
|
temperature=temperature,
|
@@ -79,8 +79,8 @@ class BaseResponsesMixin:
|
|
79
79
|
if instructions is None:
|
80
80
|
instructions = schema_obj.developer_system_prompt
|
81
81
|
|
82
|
-
# Create the request object based on the
|
83
|
-
request =
|
82
|
+
# Create the request object based on the RetabChatResponseCreateRequest model
|
83
|
+
request = RetabChatResponseCreateRequest(
|
84
84
|
model=model,
|
85
85
|
input=input,
|
86
86
|
temperature=temperature,
|
@@ -100,7 +100,7 @@ class BaseResponsesMixin:
|
|
100
100
|
|
101
101
|
|
102
102
|
class Responses(SyncAPIResource, BaseResponsesMixin):
|
103
|
-
"""
|
103
|
+
"""Retab Responses API compatible with OpenAI Responses API"""
|
104
104
|
|
105
105
|
def create(
|
106
106
|
self,
|
@@ -114,7 +114,7 @@ class Responses(SyncAPIResource, BaseResponsesMixin):
|
|
114
114
|
idempotency_key: Optional[str] = None,
|
115
115
|
) -> Response:
|
116
116
|
"""
|
117
|
-
Create a completion using the
|
117
|
+
Create a completion using the Retab API with OpenAI Responses API compatible interface.
|
118
118
|
|
119
119
|
Args:
|
120
120
|
model: The model to use
|
@@ -156,7 +156,7 @@ class Responses(SyncAPIResource, BaseResponsesMixin):
|
|
156
156
|
idempotency_key: Optional[str] = None,
|
157
157
|
) -> Response:
|
158
158
|
"""
|
159
|
-
Parse content using the
|
159
|
+
Parse content using the Retab API with OpenAI Responses API compatible interface.
|
160
160
|
|
161
161
|
Args:
|
162
162
|
model: The model to use
|
@@ -189,7 +189,7 @@ class Responses(SyncAPIResource, BaseResponsesMixin):
|
|
189
189
|
|
190
190
|
|
191
191
|
class AsyncResponses(AsyncAPIResource, BaseResponsesMixin):
|
192
|
-
"""
|
192
|
+
"""Retab Responses API compatible with OpenAI Responses API for async usage"""
|
193
193
|
|
194
194
|
async def create(
|
195
195
|
self,
|
@@ -203,7 +203,7 @@ class AsyncResponses(AsyncAPIResource, BaseResponsesMixin):
|
|
203
203
|
idempotency_key: Optional[str] = None,
|
204
204
|
) -> UiResponse:
|
205
205
|
"""
|
206
|
-
Create a completion using the
|
206
|
+
Create a completion using the Retab API asynchronously with OpenAI Responses API compatible interface.
|
207
207
|
|
208
208
|
Args:
|
209
209
|
model: The model to use
|
@@ -245,7 +245,7 @@ class AsyncResponses(AsyncAPIResource, BaseResponsesMixin):
|
|
245
245
|
idempotency_key: Optional[str] = None,
|
246
246
|
) -> UiResponse:
|
247
247
|
"""
|
248
|
-
Parse content using the
|
248
|
+
Parse content using the Retab API asynchronously with OpenAI Responses API compatible interface.
|
249
249
|
|
250
250
|
Args:
|
251
251
|
model: The model to use
|
@@ -8,7 +8,7 @@ from pydantic import BaseModel
|
|
8
8
|
from ..._resource import AsyncAPIResource, SyncAPIResource
|
9
9
|
from ..._utils.ai_models import assert_valid_model_extraction
|
10
10
|
from ..._utils.stream_context_managers import as_async_context_manager, as_context_manager
|
11
|
-
from ...types.completions import
|
11
|
+
from ...types.completions import RetabChatResponseCreateRequest
|
12
12
|
from ...types.documents.extractions import UiResponse
|
13
13
|
from ...types.schemas.object import Schema
|
14
14
|
from ...types.standards import PreparedRequest
|
@@ -44,8 +44,8 @@ class BaseResponsesMixin:
|
|
44
44
|
if instructions is None:
|
45
45
|
instructions = schema_obj.developer_system_prompt
|
46
46
|
|
47
|
-
# Create the request object based on the
|
48
|
-
request =
|
47
|
+
# Create the request object based on the RetabChatResponseCreateRequest model
|
48
|
+
request = RetabChatResponseCreateRequest(
|
49
49
|
model=model,
|
50
50
|
input=input,
|
51
51
|
temperature=temperature,
|
@@ -81,8 +81,8 @@ class BaseResponsesMixin:
|
|
81
81
|
if instructions is None:
|
82
82
|
instructions = schema_obj.developer_system_prompt
|
83
83
|
|
84
|
-
# Create the request object based on the
|
85
|
-
request =
|
84
|
+
# Create the request object based on the RetabChatResponseCreateRequest model
|
85
|
+
request = RetabChatResponseCreateRequest(
|
86
86
|
model=model,
|
87
87
|
input=input,
|
88
88
|
temperature=temperature,
|
@@ -102,7 +102,7 @@ class BaseResponsesMixin:
|
|
102
102
|
|
103
103
|
|
104
104
|
class Responses(SyncAPIResource, BaseResponsesMixin):
|
105
|
-
"""
|
105
|
+
"""Retab Responses API compatible with OpenAI Responses API"""
|
106
106
|
|
107
107
|
@as_context_manager
|
108
108
|
def stream(
|
@@ -117,7 +117,7 @@ class Responses(SyncAPIResource, BaseResponsesMixin):
|
|
117
117
|
idempotency_key: Optional[str] = None,
|
118
118
|
) -> Generator[UiResponse, None, None]:
|
119
119
|
"""
|
120
|
-
Create a completion using the
|
120
|
+
Create a completion using the Retab API with streaming enabled.
|
121
121
|
|
122
122
|
Args:
|
123
123
|
model: The model to use
|
@@ -134,7 +134,7 @@ class Responses(SyncAPIResource, BaseResponsesMixin):
|
|
134
134
|
|
135
135
|
Usage:
|
136
136
|
```python
|
137
|
-
with
|
137
|
+
with retab.responses.stream(model, input, text, temperature, reasoning) as stream:
|
138
138
|
for response in stream:
|
139
139
|
print(response)
|
140
140
|
```
|
@@ -171,7 +171,7 @@ class Responses(SyncAPIResource, BaseResponsesMixin):
|
|
171
171
|
idempotency_key: Optional[str] = None,
|
172
172
|
) -> Generator[UiResponse, None, None]:
|
173
173
|
"""
|
174
|
-
Parse content using the
|
174
|
+
Parse content using the Retab API with streaming enabled.
|
175
175
|
|
176
176
|
Args:
|
177
177
|
model: The model to use
|
@@ -188,7 +188,7 @@ class Responses(SyncAPIResource, BaseResponsesMixin):
|
|
188
188
|
|
189
189
|
Usage:
|
190
190
|
```python
|
191
|
-
with
|
191
|
+
with retab.responses.stream_parse(model, input, MyModel, temperature, reasoning) as stream:
|
192
192
|
for response in stream:
|
193
193
|
print(response)
|
194
194
|
```
|
@@ -214,7 +214,7 @@ class Responses(SyncAPIResource, BaseResponsesMixin):
|
|
214
214
|
|
215
215
|
|
216
216
|
class AsyncResponses(AsyncAPIResource, BaseResponsesMixin):
|
217
|
-
"""
|
217
|
+
"""Retab Responses API compatible with OpenAI Responses API for async usage"""
|
218
218
|
|
219
219
|
@as_async_context_manager
|
220
220
|
async def stream(
|
@@ -229,7 +229,7 @@ class AsyncResponses(AsyncAPIResource, BaseResponsesMixin):
|
|
229
229
|
idempotency_key: Optional[str] = None,
|
230
230
|
) -> AsyncGenerator[UiResponse, None]:
|
231
231
|
"""
|
232
|
-
Create a completion using the
|
232
|
+
Create a completion using the Retab API asynchronously with streaming enabled.
|
233
233
|
|
234
234
|
Args:
|
235
235
|
model: The model to use
|
@@ -246,7 +246,7 @@ class AsyncResponses(AsyncAPIResource, BaseResponsesMixin):
|
|
246
246
|
|
247
247
|
Usage:
|
248
248
|
```python
|
249
|
-
async with
|
249
|
+
async with retab.responses.async_stream(model, input, text, temperature, reasoning) as stream:
|
250
250
|
async for response in stream:
|
251
251
|
print(response)
|
252
252
|
```
|
@@ -283,7 +283,7 @@ class AsyncResponses(AsyncAPIResource, BaseResponsesMixin):
|
|
283
283
|
idempotency_key: Optional[str] = None,
|
284
284
|
) -> AsyncGenerator[UiResponse, None]:
|
285
285
|
"""
|
286
|
-
Parse content using the
|
286
|
+
Parse content using the Retab API asynchronously with streaming enabled.
|
287
287
|
|
288
288
|
Args:
|
289
289
|
model: The model to use
|
@@ -300,7 +300,7 @@ class AsyncResponses(AsyncAPIResource, BaseResponsesMixin):
|
|
300
300
|
|
301
301
|
Usage:
|
302
302
|
```python
|
303
|
-
async with
|
303
|
+
async with retab.responses.async_stream_parse(model, input, MyModel, temperature, reasoning) as stream:
|
304
304
|
async for response in stream:
|
305
305
|
print(response)
|
306
306
|
```
|