retab 0.0.42__py3-none-any.whl → 0.0.43__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- retab/__init__.py +2 -1
- retab/client.py +16 -45
- retab/resources/consensus/client.py +1 -1
- retab/resources/consensus/responses.py +1 -1
- retab/resources/documents/client.py +94 -68
- retab/resources/documents/extractions.py +55 -46
- retab/resources/evaluations/client.py +32 -19
- retab/resources/evaluations/documents.py +12 -11
- retab/resources/evaluations/iterations.py +48 -30
- retab/resources/jsonlUtils.py +3 -4
- retab/resources/processors/automations/endpoints.py +49 -39
- retab/resources/processors/automations/links.py +52 -43
- retab/resources/processors/automations/mailboxes.py +74 -59
- retab/resources/processors/automations/outlook.py +104 -82
- retab/resources/processors/client.py +35 -30
- retab/resources/usage.py +2 -0
- retab/types/ai_models.py +1 -1
- retab/types/deprecated_evals.py +195 -0
- retab/types/evaluations/__init__.py +5 -2
- retab/types/evaluations/iterations.py +9 -43
- retab/types/evaluations/model.py +20 -22
- retab/types/extractions.py +1 -0
- retab/types/logs.py +5 -6
- retab/types/mime.py +1 -10
- retab/types/schemas/enhance.py +22 -5
- retab/types/schemas/evaluate.py +1 -1
- retab/types/schemas/object.py +26 -0
- retab/types/standards.py +2 -2
- retab/utils/__init__.py +3 -0
- retab/utils/ai_models.py +127 -12
- retab/utils/hashing.py +24 -0
- retab/utils/json_schema.py +1 -26
- retab/utils/mime.py +0 -17
- {retab-0.0.42.dist-info → retab-0.0.43.dist-info}/METADATA +3 -5
- {retab-0.0.42.dist-info → retab-0.0.43.dist-info}/RECORD +37 -51
- retab/_utils/__init__.py +0 -0
- retab/_utils/_model_cards/anthropic.yaml +0 -59
- retab/_utils/_model_cards/auto.yaml +0 -43
- retab/_utils/_model_cards/gemini.yaml +0 -117
- retab/_utils/_model_cards/openai.yaml +0 -301
- retab/_utils/_model_cards/xai.yaml +0 -28
- retab/_utils/ai_models.py +0 -138
- retab/_utils/benchmarking.py +0 -484
- retab/_utils/chat.py +0 -327
- retab/_utils/display.py +0 -440
- retab/_utils/json_schema.py +0 -2156
- retab/_utils/mime.py +0 -165
- retab/_utils/responses.py +0 -169
- retab/_utils/stream_context_managers.py +0 -52
- retab/_utils/usage/__init__.py +0 -0
- retab/_utils/usage/usage.py +0 -301
- {retab-0.0.42.dist-info → retab-0.0.43.dist-info}/WHEEL +0 -0
- {retab-0.0.42.dist-info → retab-0.0.43.dist-info}/top_level.txt +0 -0
retab/_utils/chat.py
DELETED
@@ -1,327 +0,0 @@
|
|
1
|
-
import base64
|
2
|
-
import logging
|
3
|
-
from typing import List, Literal, Optional, Union, cast
|
4
|
-
|
5
|
-
import requests
|
6
|
-
from anthropic.types.image_block_param import ImageBlockParam
|
7
|
-
from anthropic.types.message_param import MessageParam
|
8
|
-
from anthropic.types.text_block_param import TextBlockParam
|
9
|
-
from google.genai.types import BlobDict, ContentDict, ContentUnionDict, PartDict # type: ignore
|
10
|
-
from openai.types.chat.chat_completion_content_part_image_param import ChatCompletionContentPartImageParam
|
11
|
-
from openai.types.chat.chat_completion_content_part_input_audio_param import ChatCompletionContentPartInputAudioParam
|
12
|
-
from openai.types.chat.chat_completion_content_part_param import ChatCompletionContentPartParam
|
13
|
-
from openai.types.chat.chat_completion_content_part_text_param import ChatCompletionContentPartTextParam
|
14
|
-
from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam
|
15
|
-
|
16
|
-
from ..types.chat import ChatCompletionRetabMessage
|
17
|
-
|
18
|
-
MediaType = Literal["image/jpeg", "image/png", "image/gif", "image/webp"]
|
19
|
-
|
20
|
-
|
21
|
-
def convert_to_google_genai_format(messages: List[ChatCompletionRetabMessage]) -> tuple[str, list[ContentUnionDict]]:
|
22
|
-
"""
|
23
|
-
Converts a list of ChatCompletionRetabMessage to a format compatible with the google.genai SDK.
|
24
|
-
|
25
|
-
|
26
|
-
Example:
|
27
|
-
```python
|
28
|
-
import google.genai as genai
|
29
|
-
|
30
|
-
# Configure the Gemini client
|
31
|
-
genai.configure(api_key=os.environ["GEMINI_API_KEY"])
|
32
|
-
|
33
|
-
# Initialize the model
|
34
|
-
model = genai.GenerativeModel("gemini-2.0-flash")
|
35
|
-
|
36
|
-
# Get messages in Gemini format
|
37
|
-
gemini_messages = document_message.gemini_messages
|
38
|
-
|
39
|
-
# Generate a response
|
40
|
-
```
|
41
|
-
|
42
|
-
Args:
|
43
|
-
messages (List[ChatCompletionRetabMessage]): List of chat messages.
|
44
|
-
|
45
|
-
Returns:
|
46
|
-
List[Union[Dict[str, str], str]]: A list of formatted inputs for the google.genai SDK.
|
47
|
-
"""
|
48
|
-
system_message: str = ""
|
49
|
-
formatted_content: list[ContentUnionDict] = []
|
50
|
-
for message in messages:
|
51
|
-
# -----------------------
|
52
|
-
# Handle system message
|
53
|
-
# -----------------------
|
54
|
-
if message["role"] in ("system", "developer"):
|
55
|
-
assert isinstance(message["content"], str), "System message content must be a string."
|
56
|
-
if system_message != "":
|
57
|
-
raise ValueError("Only one system message is allowed per chat.")
|
58
|
-
system_message += message["content"]
|
59
|
-
continue
|
60
|
-
parts: list[PartDict] = []
|
61
|
-
|
62
|
-
message_content = message["content"]
|
63
|
-
if isinstance(message_content, str):
|
64
|
-
# Direct string content is treated as the prompt for the SDK
|
65
|
-
parts.append(PartDict(text=message_content))
|
66
|
-
elif isinstance(message_content, list):
|
67
|
-
# Handle structured content
|
68
|
-
for part in message_content:
|
69
|
-
if part["type"] == "text":
|
70
|
-
parts.append(PartDict(text=part["text"]))
|
71
|
-
elif part["type"] == "image_url":
|
72
|
-
url = part["image_url"].get("url", "") # type: ignore
|
73
|
-
if url.startswith("data:image"):
|
74
|
-
# Extract base64 data and add it to the formatted inputs
|
75
|
-
media_type, data_content = url.split(";base64,")
|
76
|
-
media_type = media_type.split("data:")[-1] # => "image/jpeg"
|
77
|
-
base64_data = data_content
|
78
|
-
|
79
|
-
# Try to convert to PIL.Image and append it to the formatted inputs
|
80
|
-
try:
|
81
|
-
image_bytes = base64.b64decode(base64_data)
|
82
|
-
parts.append(PartDict(inline_data=BlobDict(data=image_bytes, mime_type=media_type)))
|
83
|
-
except Exception:
|
84
|
-
pass
|
85
|
-
elif part["type"] == "input_audio":
|
86
|
-
pass
|
87
|
-
elif part["type"] == "file":
|
88
|
-
pass
|
89
|
-
else:
|
90
|
-
pass
|
91
|
-
|
92
|
-
formatted_content.append(ContentDict(parts=parts, role=("user" if message["role"] == "user" else "model")))
|
93
|
-
|
94
|
-
return system_message, formatted_content
|
95
|
-
|
96
|
-
|
97
|
-
def convert_to_anthropic_format(messages: List[ChatCompletionRetabMessage]) -> tuple[str, List[MessageParam]]:
|
98
|
-
"""
|
99
|
-
Converts a list of ChatCompletionRetabMessage to a format compatible with the Anthropic SDK.
|
100
|
-
|
101
|
-
Args:
|
102
|
-
messages (List[ChatCompletionRetabMessage]): List of chat messages.
|
103
|
-
|
104
|
-
Returns:
|
105
|
-
(system_message, formatted_messages):
|
106
|
-
system_message (str | NotGiven):
|
107
|
-
The system message if one was found, otherwise NOT_GIVEN.
|
108
|
-
formatted_messages (List[MessageParam]):
|
109
|
-
A list of formatted messages ready for Anthropic.
|
110
|
-
"""
|
111
|
-
|
112
|
-
formatted_messages: list[MessageParam] = []
|
113
|
-
system_message: str = ""
|
114
|
-
|
115
|
-
for message in messages:
|
116
|
-
content_blocks: list[Union[TextBlockParam, ImageBlockParam]] = []
|
117
|
-
|
118
|
-
# -----------------------
|
119
|
-
# Handle system message
|
120
|
-
# -----------------------
|
121
|
-
if message["role"] in ("system", "developer"):
|
122
|
-
assert isinstance(message["content"], str), "System message content must be a string."
|
123
|
-
if system_message != "":
|
124
|
-
raise ValueError("Only one system message is allowed per chat.")
|
125
|
-
system_message += message["content"]
|
126
|
-
continue
|
127
|
-
|
128
|
-
# -----------------------
|
129
|
-
# Handle non-system roles
|
130
|
-
# -----------------------
|
131
|
-
if isinstance(message["content"], str):
|
132
|
-
# Direct string content is treated as a single text block
|
133
|
-
content_blocks.append(
|
134
|
-
{
|
135
|
-
"type": "text",
|
136
|
-
"text": message["content"],
|
137
|
-
}
|
138
|
-
)
|
139
|
-
|
140
|
-
elif isinstance(message["content"], list):
|
141
|
-
# Handle structured content
|
142
|
-
for part in message["content"]:
|
143
|
-
if part["type"] == "text":
|
144
|
-
part = cast(ChatCompletionContentPartTextParam, part)
|
145
|
-
content_blocks.append(
|
146
|
-
{
|
147
|
-
"type": "text",
|
148
|
-
"text": part["text"], # type: ignore
|
149
|
-
}
|
150
|
-
)
|
151
|
-
|
152
|
-
elif part["type"] == "input_audio":
|
153
|
-
part = cast(ChatCompletionContentPartInputAudioParam, part)
|
154
|
-
logging.warning("Audio input is not supported yet.")
|
155
|
-
# No blocks appended since not supported
|
156
|
-
|
157
|
-
elif part["type"] == "image_url":
|
158
|
-
# Handle images that may be either base64 data-URLs or standard remote URLs
|
159
|
-
part = cast(ChatCompletionContentPartImageParam, part)
|
160
|
-
image_url = part["image_url"]["url"]
|
161
|
-
|
162
|
-
if "base64," in image_url:
|
163
|
-
# The string is already something like: data:image/jpeg;base64,xxxxxxxx...
|
164
|
-
media_type, data_content = image_url.split(";base64,")
|
165
|
-
# media_type might look like: "data:image/jpeg"
|
166
|
-
media_type = media_type.split("data:")[-1] # => "image/jpeg"
|
167
|
-
base64_data = data_content
|
168
|
-
else:
|
169
|
-
# It's a remote URL, so fetch, encode, and derive media type from headers
|
170
|
-
try:
|
171
|
-
r = requests.get(image_url)
|
172
|
-
r.raise_for_status()
|
173
|
-
content_type = r.headers.get("Content-Type", "image/jpeg")
|
174
|
-
# fallback "image/jpeg" if no Content-Type given
|
175
|
-
|
176
|
-
# Only keep recognized image/* for anthropic
|
177
|
-
if content_type not in ("image/jpeg", "image/png", "image/gif", "image/webp"):
|
178
|
-
logging.warning(
|
179
|
-
"Unrecognized Content-Type '%s' - defaulting to image/jpeg",
|
180
|
-
content_type,
|
181
|
-
)
|
182
|
-
content_type = "image/jpeg"
|
183
|
-
|
184
|
-
media_type = content_type
|
185
|
-
base64_data = base64.b64encode(r.content).decode("utf-8")
|
186
|
-
|
187
|
-
except Exception:
|
188
|
-
logging.warning(
|
189
|
-
"Failed to load image from URL: %s",
|
190
|
-
image_url,
|
191
|
-
exc_info=True,
|
192
|
-
stack_info=True,
|
193
|
-
)
|
194
|
-
# Skip adding this block if error
|
195
|
-
continue
|
196
|
-
|
197
|
-
# Finally, append to content blocks
|
198
|
-
content_blocks.append(
|
199
|
-
{
|
200
|
-
"type": "image",
|
201
|
-
"source": {
|
202
|
-
"type": "base64",
|
203
|
-
"media_type": cast(MediaType, media_type),
|
204
|
-
"data": base64_data,
|
205
|
-
},
|
206
|
-
}
|
207
|
-
)
|
208
|
-
|
209
|
-
formatted_messages.append(
|
210
|
-
MessageParam(
|
211
|
-
role=message["role"], # type: ignore
|
212
|
-
content=content_blocks,
|
213
|
-
)
|
214
|
-
)
|
215
|
-
|
216
|
-
return system_message, formatted_messages
|
217
|
-
|
218
|
-
|
219
|
-
def convert_from_anthropic_format(messages: list[MessageParam], system_prompt: str) -> list[ChatCompletionRetabMessage]:
|
220
|
-
"""
|
221
|
-
Converts a list of Anthropic MessageParam to a list of ChatCompletionRetabMessage.
|
222
|
-
"""
|
223
|
-
formatted_messages: list[ChatCompletionRetabMessage] = [ChatCompletionRetabMessage(role="developer", content=system_prompt)]
|
224
|
-
|
225
|
-
for message in messages:
|
226
|
-
role = message["role"]
|
227
|
-
content_blocks = message["content"]
|
228
|
-
|
229
|
-
# Handle different content structures
|
230
|
-
if isinstance(content_blocks, list) and len(content_blocks) == 1 and isinstance(content_blocks[0], dict) and content_blocks[0].get("type") == "text":
|
231
|
-
# Simple text message
|
232
|
-
formatted_messages.append(cast(ChatCompletionRetabMessage, {"role": role, "content": content_blocks[0].get("text", "")}))
|
233
|
-
elif isinstance(content_blocks, list):
|
234
|
-
# Message with multiple content parts or non-text content
|
235
|
-
formatted_content: list[ChatCompletionContentPartParam] = []
|
236
|
-
|
237
|
-
for block in content_blocks:
|
238
|
-
if isinstance(block, dict):
|
239
|
-
if block.get("type") == "text":
|
240
|
-
formatted_content.append(cast(ChatCompletionContentPartParam, {"type": "text", "text": block.get("text", "")}))
|
241
|
-
elif block.get("type") == "image":
|
242
|
-
source = block.get("source", {})
|
243
|
-
if isinstance(source, dict) and source.get("type") == "base64":
|
244
|
-
# Convert base64 image to data URL format
|
245
|
-
media_type = source.get("media_type", "image/jpeg")
|
246
|
-
data = source.get("data", "")
|
247
|
-
image_url = f"data:{media_type};base64,{data}"
|
248
|
-
|
249
|
-
formatted_content.append(cast(ChatCompletionContentPartParam, {"type": "image_url", "image_url": {"url": image_url}}))
|
250
|
-
|
251
|
-
formatted_messages.append(cast(ChatCompletionRetabMessage, {"role": role, "content": formatted_content}))
|
252
|
-
|
253
|
-
return formatted_messages
|
254
|
-
|
255
|
-
|
256
|
-
def convert_to_openai_format(messages: List[ChatCompletionRetabMessage]) -> List[ChatCompletionMessageParam]:
|
257
|
-
return cast(list[ChatCompletionMessageParam], messages)
|
258
|
-
|
259
|
-
|
260
|
-
def convert_from_openai_format(messages: list[ChatCompletionMessageParam]) -> list[ChatCompletionRetabMessage]:
|
261
|
-
return cast(list[ChatCompletionRetabMessage], messages)
|
262
|
-
|
263
|
-
|
264
|
-
def separate_messages(
|
265
|
-
messages: list[ChatCompletionRetabMessage],
|
266
|
-
) -> tuple[Optional[ChatCompletionRetabMessage], list[ChatCompletionRetabMessage], list[ChatCompletionRetabMessage]]:
|
267
|
-
"""
|
268
|
-
Separates messages into system, user and assistant messages.
|
269
|
-
|
270
|
-
Args:
|
271
|
-
messages: List of chat messages containing system, user and assistant messages
|
272
|
-
|
273
|
-
Returns:
|
274
|
-
Tuple containing:
|
275
|
-
- The system message if present, otherwise None
|
276
|
-
- List of user messages
|
277
|
-
- List of assistant messages
|
278
|
-
"""
|
279
|
-
system_message = None
|
280
|
-
user_messages = []
|
281
|
-
assistant_messages = []
|
282
|
-
|
283
|
-
for message in messages:
|
284
|
-
if message["role"] in ("system", "developer"):
|
285
|
-
system_message = message
|
286
|
-
elif message["role"] == "user":
|
287
|
-
user_messages.append(message)
|
288
|
-
elif message["role"] == "assistant":
|
289
|
-
assistant_messages.append(message)
|
290
|
-
|
291
|
-
return system_message, user_messages, assistant_messages
|
292
|
-
|
293
|
-
|
294
|
-
def str_messages(messages: list[ChatCompletionRetabMessage], max_length: int = 100) -> str:
|
295
|
-
"""
|
296
|
-
Converts a list of chat messages into a string representation with faithfully serialized structure.
|
297
|
-
|
298
|
-
Args:
|
299
|
-
messages (list[ChatCompletionRetabMessage]): The list of chat messages.
|
300
|
-
max_length (int): Maximum length for content before truncation.
|
301
|
-
|
302
|
-
Returns:
|
303
|
-
str: A string representation of the messages with applied truncation.
|
304
|
-
"""
|
305
|
-
|
306
|
-
def truncate(text: str, max_len: int) -> str:
|
307
|
-
"""Truncate text to max_len with ellipsis."""
|
308
|
-
return text if len(text) <= max_len else f"{text[:max_len]}..."
|
309
|
-
|
310
|
-
serialized: list[ChatCompletionRetabMessage] = []
|
311
|
-
for message in messages:
|
312
|
-
role = message["role"]
|
313
|
-
content = message["content"]
|
314
|
-
|
315
|
-
if isinstance(content, str):
|
316
|
-
serialized.append({"role": role, "content": truncate(content, max_length)})
|
317
|
-
elif isinstance(content, list):
|
318
|
-
truncated_content: list[ChatCompletionContentPartParam] = []
|
319
|
-
for part in content:
|
320
|
-
if part["type"] == "text" and part["text"]:
|
321
|
-
truncated_content.append({"type": "text", "text": truncate(part["text"], max_length)})
|
322
|
-
elif part["type"] == "image_url" and part["image_url"]:
|
323
|
-
image_url = part["image_url"].get("url", "unknown image")
|
324
|
-
truncated_content.append({"type": "image_url", "image_url": {"url": truncate(image_url, max_length)}})
|
325
|
-
serialized.append({"role": role, "content": truncated_content})
|
326
|
-
|
327
|
-
return repr(serialized)
|