retab 0.0.38__py3-none-any.whl → 0.0.39__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
retab/_utils/ai_models.py CHANGED
@@ -1,74 +1,121 @@
1
+ import os
2
+ import yaml
1
3
  from typing import get_args
2
4
 
3
- from ..types.ai_models import AIProvider, GeminiModel, OpenAIModel, xAI_Model
5
+ from ..types.ai_models import AIProvider, GeminiModel, OpenAIModel, xAI_Model, RetabModel, PureLLMModel, ModelCard
4
6
 
7
+ MODEL_CARDS_DIR = os.path.join(os.path.dirname(__file__), "_model_cards")
5
8
 
6
- def find_provider_from_model(model: str) -> AIProvider:
7
- if model in get_args(OpenAIModel):
8
- return "OpenAI"
9
- elif ":" in model:
10
- # Handle fine-tuned models
11
- ft, base_model, model_id = model.split(":", 2)
12
- if base_model in get_args(OpenAIModel):
13
- return "OpenAI"
14
- # elif model in get_args(AnthropicModel):
15
- # return "Anthropic"
16
- elif model in get_args(xAI_Model):
17
- return "xAI"
18
- elif model in get_args(GeminiModel):
19
- return "Gemini"
20
- raise ValueError(f"Could not determine AI provider for model: {model}")
9
+ def merge_model_cards(base: dict, override: dict) -> dict:
10
+ result = base.copy()
11
+ for key, value in override.items():
12
+ if key == "inherits":
13
+ continue
14
+ if isinstance(value, dict) and key in result:
15
+ result[key] = merge_model_cards(result[key], value)
16
+ else:
17
+ result[key] = value
18
+ return result
21
19
 
20
+ def load_model_cards(yaml_file: str) -> list[ModelCard]:
21
+ raw_cards = yaml.safe_load(open(yaml_file))
22
+ name_to_card = {c["model"]: c for c in raw_cards if "inherits" not in c}
22
23
 
23
- def assert_valid_model_extraction(model: str) -> None:
24
- if model in get_args(OpenAIModel):
25
- return
26
- elif ":" in model:
27
- # Handle fine-tuned models
28
- ft, base_model, model_id = model.split(":", 2)
29
- if base_model in get_args(OpenAIModel):
30
- return
31
- # elif model in get_args(AnthropicModel):
32
- # return
33
- elif model in get_args(xAI_Model):
34
- return
35
- elif model in get_args(GeminiModel):
36
- return
37
- raise ValueError(
38
- f"Invalid model for extraction: {model}.\nValid OpenAI models: {get_args(OpenAIModel)}\n"
39
- # f"Valid Anthropic models: {get_args(AnthropicModel)}\n"
40
- # f"Valid xAI models: {get_args(xAI_Model)}\n"
41
- # f"Valid Gemini models: {get_args(GeminiModel)}"
42
- )
24
+ final_cards = []
25
+ for card in raw_cards:
26
+ if "inherits" in card:
27
+ parent = name_to_card[card["inherits"]]
28
+ merged = merge_model_cards(parent, card)
29
+ final_cards.append(ModelCard(**merged))
30
+ else:
31
+ final_cards.append(ModelCard(**card))
32
+ return final_cards
43
33
 
34
+ # Load all model cards
35
+ model_cards = sum([
36
+ load_model_cards(os.path.join(MODEL_CARDS_DIR, "openai.yaml")),
37
+ load_model_cards(os.path.join(MODEL_CARDS_DIR, "anthropic.yaml")),
38
+ load_model_cards(os.path.join(MODEL_CARDS_DIR, "xai.yaml")),
39
+ load_model_cards(os.path.join(MODEL_CARDS_DIR, "gemini.yaml")),
40
+ load_model_cards(os.path.join(MODEL_CARDS_DIR, "auto.yaml")),
41
+ ], [])
42
+ model_cards_dict = {card.model: card for card in model_cards}
44
43
 
45
- def assert_valid_model_batch_processing(model: str) -> None:
46
- """Assert that the model is either a standard OpenAI model or a valid fine-tuned model.
47
44
 
48
- Valid formats:
49
- - Standard model: Must be in OpenAIModel
50
- - Fine-tuned model: Must be {base_model}:{id} where base_model is in OpenAIModel
45
+ # Validate that model cards
46
+ all_model_names = set(model_cards_dict.keys())
47
+ if all_model_names.symmetric_difference(set(get_args(PureLLMModel))):
48
+ raise ValueError(f"Mismatch between model cards and PureLLMModel type: {all_model_names.symmetric_difference(set(get_args(PureLLMModel)))}")
49
+
50
+
51
+ def get_model_from_model_id(model_id: str) -> str:
52
+ """
53
+ Get the model name from the model id.
54
+ """
55
+ if model_id.startswith("ft:"):
56
+ parts = model_id.split(":")
57
+ return parts[1]
58
+ else:
59
+ return model_id
60
+
61
+
62
+ def get_model_card(model: str) -> ModelCard:
63
+ """
64
+ Get the model card for a specific model.
65
+
66
+ Args:
67
+ model: The model name to look up
68
+
69
+ Returns:
70
+ The ModelCard for the specified model
51
71
 
52
72
  Raises:
53
- ValueError: If the model format is invalid
73
+ ValueError: If no model card is found for the specified model
54
74
  """
55
- if model in get_args(OpenAIModel):
56
- return
75
+ model_name = get_model_from_model_id(model)
76
+ if model_name in model_cards_dict:
77
+ model_card = ModelCard(**model_cards_dict[model_name].model_dump())
78
+ if model_name != model:
79
+ # Fine-tuned model -> Change the name
80
+ model_card.model = model
81
+ # Remove the fine-tuning feature (if exists)
82
+ try:
83
+ model_card.capabilities.features.remove("fine_tuning")
84
+ except ValueError:
85
+ pass
86
+ return model_card
87
+
88
+ raise ValueError(f"No model card found for model: {model_name}")
57
89
 
58
- try:
59
- ft, base_model, model_id = model.split(":", 2)
60
- if base_model not in get_args(OpenAIModel):
61
- raise ValueError(f"Invalid base model in fine-tuned model '{model}'. Base model must be one of: {get_args(OpenAIModel)}")
62
- if not model_id or not model_id.strip():
63
- raise ValueError(f"Model ID cannot be empty in fine-tuned model '{model}'")
90
+
91
+ def get_provider_for_model(model_id: str) -> AIProvider:
92
+ """
93
+ Determine the AI provider associated with the given model identifier.
94
+ Returns one of: "Anthropic", "xAI", "OpenAI", "Gemini", "Retab" or None if unknown.
95
+ """
96
+ model_name = get_model_from_model_id(model_id)
97
+ # if model_name in get_args(AnthropicModel):
98
+ # return "Anthropic"
99
+ # if model_name in get_args(xAI_Model):
100
+ # return "xAI"
101
+ if model_name in get_args(OpenAIModel):
102
+ return "OpenAI"
103
+ if model_name in get_args(GeminiModel):
104
+ return "Gemini"
105
+ if model_name in get_args(RetabModel):
106
+ return "Retab"
107
+ raise ValueError(f"Unknown model: {model_name}")
108
+
109
+
110
+ def assert_valid_model_extraction(model: str) -> None:
111
+ try:
112
+ get_provider_for_model(model)
64
113
  except ValueError:
65
- if ":" not in model:
66
- raise ValueError(
67
- f"Invalid model format: {model}. Must be either:\n"
68
- f"1. A standard model: {get_args(OpenAIModel)}\n"
69
- f"2. A fine-tuned model in format 'base_model:id' where base_model is one of the standard models"
70
- ) from None
71
- raise
114
+ raise ValueError(
115
+ f"Invalid model for extraction: {model}.\nValid OpenAI models: {get_args(OpenAIModel)}\n"
116
+ f"Valid xAI models: {get_args(xAI_Model)}\n"
117
+ f"Valid Gemini models: {get_args(GeminiModel)}"
118
+ ) from None
72
119
 
73
120
 
74
121
  def assert_valid_model_schema_generation(model: str) -> None:
@@ -81,20 +128,11 @@ def assert_valid_model_schema_generation(model: str) -> None:
81
128
  Raises:
82
129
  ValueError: If the model format is invalid
83
130
  """
84
- if model in get_args(OpenAIModel):
131
+ if get_model_from_model_id(model) in get_args(OpenAIModel):
85
132
  return
86
-
87
- try:
88
- ft, base_model, model_id = model.split(":", 2)
89
- if base_model not in get_args(OpenAIModel):
90
- raise ValueError(f"Invalid base model in fine-tuned model '{model}'. Base model must be one of: {get_args(OpenAIModel)}")
91
- if not model_id or not model_id.strip():
92
- raise ValueError(f"Model ID cannot be empty in fine-tuned model '{model}'")
93
- except ValueError:
94
- if ":" not in model:
95
- raise ValueError(
133
+ else:
134
+ raise ValueError(
96
135
  f"Invalid model format: {model}. Must be either:\n"
97
136
  f"1. A standard model: {get_args(OpenAIModel)}\n"
98
- f"2. A fine-tuned model in format 'base_model:id' where base_model is one of the standard models"
99
- ) from None
100
- raise
137
+ f"2. A fine-tuned model in format 'base_model:id' where base_model is one of the standard openai models"
138
+ ) from None
retab/_utils/responses.py CHANGED
@@ -17,7 +17,7 @@ from openai.types.responses.response_input_param import ResponseInputItemParam
17
17
  from openai.types.responses.response_input_text_param import ResponseInputTextParam
18
18
 
19
19
  from ..types.chat import ChatCompletionRetabMessage
20
- from ..types.documents.extractions import UiParsedChatCompletion, UiParsedChoice
20
+ from ..types.documents.extractions import RetabParsedChatCompletion, RetabParsedChoice
21
21
 
22
22
 
23
23
  def convert_to_openai_format(messages: list[ChatCompletionRetabMessage]) -> list[ResponseInputItemParam]:
@@ -116,17 +116,17 @@ def convert_from_openai_format(messages: list[ResponseInputItemParam]) -> list[C
116
116
  return formatted_messages
117
117
 
118
118
 
119
- def parse_openai_responses_response(response: Response) -> UiParsedChatCompletion:
119
+ def parse_openai_responses_response(response: Response) -> RetabParsedChatCompletion:
120
120
  """
121
- Convert an OpenAI Response (Responses API) to UiParsedChatCompletion type.
121
+ Convert an OpenAI Response (Responses API) to RetabParsedChatCompletion type.
122
122
 
123
123
  Args:
124
124
  response: Response from OpenAI Responses API
125
125
 
126
126
  Returns:
127
- Parsed response in UiParsedChatCompletion format
127
+ Parsed response in RetabParsedChatCompletion format
128
128
  """
129
- # Create the UiParsedChatCompletion object
129
+ # Create the RetabParsedChatCompletion object
130
130
  if response.usage:
131
131
  usage = CompletionUsage(
132
132
  prompt_tokens=response.usage.input_tokens,
@@ -148,7 +148,7 @@ def parse_openai_responses_response(response: Response) -> UiParsedChatCompletio
148
148
  result_object = from_json(bytes(output_text, "utf-8"), partial_mode=True) # Attempt to parse the result even if EOF is reached
149
149
 
150
150
  choices.append(
151
- UiParsedChoice(
151
+ RetabParsedChoice(
152
152
  index=0,
153
153
  message=ParsedChatCompletionMessage(
154
154
  role="assistant",
@@ -158,7 +158,7 @@ def parse_openai_responses_response(response: Response) -> UiParsedChatCompletio
158
158
  )
159
159
  )
160
160
 
161
- return UiParsedChatCompletion(
161
+ return RetabParsedChatCompletion(
162
162
  id=response.id,
163
163
  choices=choices,
164
164
  created=int(datetime.datetime.now().timestamp()),
@@ -4,7 +4,8 @@ from openai.types.completion_usage import CompletionUsage
4
4
  from pydantic import BaseModel, Field
5
5
 
6
6
  # https://platform.openai.com/docs/guides/prompt-caching
7
- from ...types.ai_models import Amount, Pricing, get_model_card
7
+ from ...types.ai_models import Amount, Pricing
8
+ from ..._utils.ai_models import get_model_card
8
9
 
9
10
  # ─── PRICING MODELS ────────────────────────────────────────────────────────────
10
11
 
@@ -7,8 +7,8 @@ from pydantic import BaseModel as ResponseFormatT
7
7
  from ..._resource import AsyncAPIResource, SyncAPIResource
8
8
  from ..._utils.ai_models import assert_valid_model_extraction
9
9
  from ...types.chat import ChatCompletionRetabMessage
10
- from ...types.completions import UiChatCompletionsRequest
11
- from ...types.documents.extractions import UiParsedChatCompletion
10
+ from ...types.completions import RetabChatCompletionsRequest
11
+ from ...types.documents.extractions import RetabParsedChatCompletion
12
12
  from ...types.schemas.object import Schema
13
13
  from ...types.standards import PreparedRequest
14
14
 
@@ -31,7 +31,7 @@ class BaseCompletionsMixin:
31
31
 
32
32
  schema_obj = Schema(json_schema=json_schema)
33
33
 
34
- request = UiChatCompletionsRequest(
34
+ request = RetabChatCompletionsRequest(
35
35
  model=model,
36
36
  messages=messages,
37
37
  response_format={
@@ -66,7 +66,7 @@ class BaseCompletionsMixin:
66
66
 
67
67
  schema_obj = Schema(json_schema=json_schema)
68
68
 
69
- request = UiChatCompletionsRequest(
69
+ request = RetabChatCompletionsRequest(
70
70
  model=model,
71
71
  messages=messages,
72
72
  response_format={
@@ -98,7 +98,7 @@ class Completions(SyncAPIResource, BaseCompletionsMixin):
98
98
  n_consensus: int = 1,
99
99
  idempotency_key: str | None = None,
100
100
  stream: bool = False,
101
- ) -> UiParsedChatCompletion:
101
+ ) -> RetabParsedChatCompletion:
102
102
  """
103
103
  Create a completion using the Retab API.
104
104
  """
@@ -116,7 +116,7 @@ class Completions(SyncAPIResource, BaseCompletionsMixin):
116
116
 
117
117
  response = self._client._prepared_request(request)
118
118
 
119
- return UiParsedChatCompletion.model_validate(response)
119
+ return RetabParsedChatCompletion.model_validate(response)
120
120
 
121
121
  def parse(
122
122
  self,
@@ -127,7 +127,7 @@ class Completions(SyncAPIResource, BaseCompletionsMixin):
127
127
  reasoning_effort: ChatCompletionReasoningEffort = "medium",
128
128
  n_consensus: int = 1,
129
129
  idempotency_key: str | None = None,
130
- ) -> UiParsedChatCompletion:
130
+ ) -> RetabParsedChatCompletion:
131
131
  """
132
132
  Parse messages using the Retab API to extract structured data according to the provided JSON schema.
133
133
 
@@ -141,7 +141,7 @@ class Completions(SyncAPIResource, BaseCompletionsMixin):
141
141
  store: Whether to store the data in the Retab database
142
142
 
143
143
  Returns:
144
- UiParsedChatCompletion: Parsed response from the API
144
+ RetabParsedChatCompletion: Parsed response from the API
145
145
  """
146
146
  request = self.prepare_parse(
147
147
  response_format=response_format,
@@ -155,7 +155,7 @@ class Completions(SyncAPIResource, BaseCompletionsMixin):
155
155
  )
156
156
  response = self._client._prepared_request(request)
157
157
 
158
- return UiParsedChatCompletion.model_validate(response)
158
+ return RetabParsedChatCompletion.model_validate(response)
159
159
 
160
160
 
161
161
  class AsyncCompletions(AsyncAPIResource, BaseCompletionsMixin):
@@ -171,7 +171,7 @@ class AsyncCompletions(AsyncAPIResource, BaseCompletionsMixin):
171
171
  n_consensus: int = 1,
172
172
  idempotency_key: str | None = None,
173
173
  stream: bool = False,
174
- ) -> UiParsedChatCompletion:
174
+ ) -> RetabParsedChatCompletion:
175
175
  """
176
176
  Create a completion using the Retab API.
177
177
  """
@@ -188,7 +188,7 @@ class AsyncCompletions(AsyncAPIResource, BaseCompletionsMixin):
188
188
  )
189
189
 
190
190
  response = await self._client._prepared_request(request)
191
- return UiParsedChatCompletion.model_validate(response)
191
+ return RetabParsedChatCompletion.model_validate(response)
192
192
 
193
193
  async def parse(
194
194
  self,
@@ -199,7 +199,7 @@ class AsyncCompletions(AsyncAPIResource, BaseCompletionsMixin):
199
199
  reasoning_effort: ChatCompletionReasoningEffort = "medium",
200
200
  n_consensus: int = 1,
201
201
  idempotency_key: str | None = None,
202
- ) -> UiParsedChatCompletion:
202
+ ) -> RetabParsedChatCompletion:
203
203
  """
204
204
  Parse messages using the Retab API asynchronously.
205
205
 
@@ -213,7 +213,7 @@ class AsyncCompletions(AsyncAPIResource, BaseCompletionsMixin):
213
213
  idempotency_key: Idempotency key for request
214
214
 
215
215
  Returns:
216
- UiParsedChatCompletion: Parsed response from the API
216
+ RetabParsedChatCompletion: Parsed response from the API
217
217
  """
218
218
  request = self.prepare_parse(
219
219
  response_format=response_format,
@@ -226,4 +226,4 @@ class AsyncCompletions(AsyncAPIResource, BaseCompletionsMixin):
226
226
  idempotency_key=idempotency_key,
227
227
  )
228
228
  response = await self._client._prepared_request(request)
229
- return UiParsedChatCompletion.model_validate(response)
229
+ return RetabParsedChatCompletion.model_validate(response)
@@ -13,8 +13,8 @@ from ..._utils.ai_models import assert_valid_model_extraction
13
13
  from ..._utils.json_schema import unflatten_dict
14
14
  from ..._utils.stream_context_managers import as_async_context_manager, as_context_manager
15
15
  from ...types.chat import ChatCompletionRetabMessage
16
- from ...types.completions import UiChatCompletionsRequest
17
- from ...types.documents.extractions import UiParsedChatCompletion, UiParsedChatCompletionChunk, UiParsedChoice
16
+ from ...types.completions import RetabChatCompletionsRequest
17
+ from ...types.documents.extractions import RetabParsedChatCompletion, RetabParsedChatCompletionChunk, RetabParsedChoice
18
18
  from ...types.schemas.object import Schema
19
19
  from ...types.standards import PreparedRequest
20
20
 
@@ -36,7 +36,7 @@ class BaseCompletionsMixin:
36
36
  json_schema = response_format.model_json_schema()
37
37
  schema_obj = Schema(json_schema=json_schema)
38
38
 
39
- request = UiChatCompletionsRequest(
39
+ request = RetabChatCompletionsRequest(
40
40
  messages=messages,
41
41
  response_format={
42
42
  "type": "json_schema",
@@ -73,7 +73,7 @@ class BaseCompletionsMixin:
73
73
  schema_obj = Schema(json_schema=json_schema)
74
74
 
75
75
  # Validate DocumentAPIRequest data (raises exception if invalid)
76
- request = UiChatCompletionsRequest(
76
+ request = RetabChatCompletionsRequest(
77
77
  model=model,
78
78
  messages=messages,
79
79
  response_format={
@@ -106,7 +106,7 @@ class Completions(SyncAPIResource, BaseCompletionsMixin):
106
106
  reasoning_effort: ChatCompletionReasoningEffort = "medium",
107
107
  n_consensus: int = 1,
108
108
  idempotency_key: str | None = None,
109
- ) -> Generator[UiParsedChatCompletion, None, None]:
109
+ ) -> Generator[RetabParsedChatCompletion, None, None]:
110
110
  """
111
111
  Process messages using the Retab API with streaming enabled.
112
112
 
@@ -119,7 +119,7 @@ class Completions(SyncAPIResource, BaseCompletionsMixin):
119
119
  idempotency_key: Idempotency key for request
120
120
 
121
121
  Returns:
122
- Generator[UiParsedChatCompletion]: Stream of parsed responses
122
+ Generator[RetabParsedChatCompletion]: Stream of parsed responses
123
123
 
124
124
  Usage:
125
125
  ```python
@@ -140,16 +140,16 @@ class Completions(SyncAPIResource, BaseCompletionsMixin):
140
140
  )
141
141
 
142
142
  # Request the stream and return a context manager
143
- ui_parsed_chat_completion_cum_chunk: UiParsedChatCompletionChunk | None = None
144
- # Initialize the UiParsedChatCompletion object
145
- ui_parsed_completion: UiParsedChatCompletion = UiParsedChatCompletion(
143
+ ui_parsed_chat_completion_cum_chunk: RetabParsedChatCompletionChunk | None = None
144
+ # Initialize the RetabParsedChatCompletion object
145
+ ui_parsed_completion: RetabParsedChatCompletion = RetabParsedChatCompletion(
146
146
  id="",
147
147
  created=0,
148
148
  model="",
149
149
  object="chat.completion",
150
150
  likelihoods={},
151
151
  choices=[
152
- UiParsedChoice(
152
+ RetabParsedChoice(
153
153
  index=0,
154
154
  message=ParsedChatCompletionMessage(content="", role="assistant"),
155
155
  finish_reason=None,
@@ -160,7 +160,7 @@ class Completions(SyncAPIResource, BaseCompletionsMixin):
160
160
  for chunk_json in self._client._prepared_request_stream(request):
161
161
  if not chunk_json:
162
162
  continue
163
- ui_parsed_chat_completion_cum_chunk = UiParsedChatCompletionChunk.model_validate(chunk_json).chunk_accumulator(ui_parsed_chat_completion_cum_chunk)
163
+ ui_parsed_chat_completion_cum_chunk = RetabParsedChatCompletionChunk.model_validate(chunk_json).chunk_accumulator(ui_parsed_chat_completion_cum_chunk)
164
164
  # Basic stuff
165
165
  ui_parsed_completion.id = ui_parsed_chat_completion_cum_chunk.id
166
166
  ui_parsed_completion.created = ui_parsed_chat_completion_cum_chunk.created
@@ -192,7 +192,7 @@ class AsyncCompletions(AsyncAPIResource, BaseCompletionsMixin):
192
192
  reasoning_effort: ChatCompletionReasoningEffort = "medium",
193
193
  n_consensus: int = 1,
194
194
  idempotency_key: str | None = None,
195
- ) -> AsyncGenerator[UiParsedChatCompletion, None]:
195
+ ) -> AsyncGenerator[RetabParsedChatCompletion, None]:
196
196
  """
197
197
  Parse messages using the Retab API asynchronously with streaming.
198
198
 
@@ -206,7 +206,7 @@ class AsyncCompletions(AsyncAPIResource, BaseCompletionsMixin):
206
206
  idempotency_key: Idempotency key for request
207
207
 
208
208
  Returns:
209
- AsyncGenerator[UiParsedChatCompletion]: Stream of parsed responses
209
+ AsyncGenerator[RetabParsedChatCompletion]: Stream of parsed responses
210
210
 
211
211
  Usage:
212
212
  ```python
@@ -227,16 +227,16 @@ class AsyncCompletions(AsyncAPIResource, BaseCompletionsMixin):
227
227
  )
228
228
 
229
229
  # Request the stream and return a context manager
230
- ui_parsed_chat_completion_cum_chunk: UiParsedChatCompletionChunk | None = None
231
- # Initialize the UiParsedChatCompletion object
232
- ui_parsed_completion: UiParsedChatCompletion = UiParsedChatCompletion(
230
+ ui_parsed_chat_completion_cum_chunk: RetabParsedChatCompletionChunk | None = None
231
+ # Initialize the RetabParsedChatCompletion object
232
+ ui_parsed_completion: RetabParsedChatCompletion = RetabParsedChatCompletion(
233
233
  id="",
234
234
  created=0,
235
235
  model="",
236
236
  object="chat.completion",
237
237
  likelihoods={},
238
238
  choices=[
239
- UiParsedChoice(
239
+ RetabParsedChoice(
240
240
  index=0,
241
241
  message=ParsedChatCompletionMessage(content="", role="assistant"),
242
242
  finish_reason=None,
@@ -247,7 +247,7 @@ class AsyncCompletions(AsyncAPIResource, BaseCompletionsMixin):
247
247
  async for chunk_json in self._client._prepared_request_stream(request):
248
248
  if not chunk_json:
249
249
  continue
250
- ui_parsed_chat_completion_cum_chunk = UiParsedChatCompletionChunk.model_validate(chunk_json).chunk_accumulator(ui_parsed_chat_completion_cum_chunk)
250
+ ui_parsed_chat_completion_cum_chunk = RetabParsedChatCompletionChunk.model_validate(chunk_json).chunk_accumulator(ui_parsed_chat_completion_cum_chunk)
251
251
  # Basic stuff
252
252
  ui_parsed_completion.id = ui_parsed_chat_completion_cum_chunk.id
253
253
  ui_parsed_completion.created = ui_parsed_chat_completion_cum_chunk.created
@@ -8,7 +8,7 @@ from pydantic import BaseModel
8
8
 
9
9
  from ..._resource import AsyncAPIResource, SyncAPIResource
10
10
  from ..._utils.ai_models import assert_valid_model_extraction
11
- from ...types.completions import UiChatResponseCreateRequest
11
+ from ...types.completions import RetabChatResponseCreateRequest
12
12
  from ...types.documents.extractions import UiResponse
13
13
  from ...types.schemas.object import Schema
14
14
  from ...types.standards import PreparedRequest
@@ -43,8 +43,8 @@ class BaseResponsesMixin:
43
43
  if instructions is None:
44
44
  instructions = schema_obj.developer_system_prompt
45
45
 
46
- # Create the request object based on the UiChatResponseCreateRequest model
47
- request = UiChatResponseCreateRequest(
46
+ # Create the request object based on the RetabChatResponseCreateRequest model
47
+ request = RetabChatResponseCreateRequest(
48
48
  model=model,
49
49
  input=input,
50
50
  temperature=temperature,
@@ -79,8 +79,8 @@ class BaseResponsesMixin:
79
79
  if instructions is None:
80
80
  instructions = schema_obj.developer_system_prompt
81
81
 
82
- # Create the request object based on the UiChatResponseCreateRequest model
83
- request = UiChatResponseCreateRequest(
82
+ # Create the request object based on the RetabChatResponseCreateRequest model
83
+ request = RetabChatResponseCreateRequest(
84
84
  model=model,
85
85
  input=input,
86
86
  temperature=temperature,
@@ -8,7 +8,7 @@ from pydantic import BaseModel
8
8
  from ..._resource import AsyncAPIResource, SyncAPIResource
9
9
  from ..._utils.ai_models import assert_valid_model_extraction
10
10
  from ..._utils.stream_context_managers import as_async_context_manager, as_context_manager
11
- from ...types.completions import UiChatResponseCreateRequest
11
+ from ...types.completions import RetabChatResponseCreateRequest
12
12
  from ...types.documents.extractions import UiResponse
13
13
  from ...types.schemas.object import Schema
14
14
  from ...types.standards import PreparedRequest
@@ -44,8 +44,8 @@ class BaseResponsesMixin:
44
44
  if instructions is None:
45
45
  instructions = schema_obj.developer_system_prompt
46
46
 
47
- # Create the request object based on the UiChatResponseCreateRequest model
48
- request = UiChatResponseCreateRequest(
47
+ # Create the request object based on the RetabChatResponseCreateRequest model
48
+ request = RetabChatResponseCreateRequest(
49
49
  model=model,
50
50
  input=input,
51
51
  temperature=temperature,
@@ -81,8 +81,8 @@ class BaseResponsesMixin:
81
81
  if instructions is None:
82
82
  instructions = schema_obj.developer_system_prompt
83
83
 
84
- # Create the request object based on the UiChatResponseCreateRequest model
85
- request = UiChatResponseCreateRequest(
84
+ # Create the request object based on the RetabChatResponseCreateRequest model
85
+ request = RetabChatResponseCreateRequest(
86
86
  model=model,
87
87
  input=input,
88
88
  temperature=temperature,