retab 0.0.35__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (111) hide show
  1. retab-0.0.35.dist-info/METADATA +417 -0
  2. retab-0.0.35.dist-info/RECORD +111 -0
  3. retab-0.0.35.dist-info/WHEEL +5 -0
  4. retab-0.0.35.dist-info/top_level.txt +1 -0
  5. uiform/__init__.py +4 -0
  6. uiform/_resource.py +28 -0
  7. uiform/_utils/__init__.py +0 -0
  8. uiform/_utils/ai_models.py +100 -0
  9. uiform/_utils/benchmarking copy.py +588 -0
  10. uiform/_utils/benchmarking.py +485 -0
  11. uiform/_utils/chat.py +332 -0
  12. uiform/_utils/display.py +443 -0
  13. uiform/_utils/json_schema.py +2161 -0
  14. uiform/_utils/mime.py +168 -0
  15. uiform/_utils/responses.py +163 -0
  16. uiform/_utils/stream_context_managers.py +52 -0
  17. uiform/_utils/usage/__init__.py +0 -0
  18. uiform/_utils/usage/usage.py +300 -0
  19. uiform/client.py +701 -0
  20. uiform/py.typed +0 -0
  21. uiform/resources/__init__.py +0 -0
  22. uiform/resources/consensus/__init__.py +3 -0
  23. uiform/resources/consensus/client.py +114 -0
  24. uiform/resources/consensus/completions.py +252 -0
  25. uiform/resources/consensus/completions_stream.py +278 -0
  26. uiform/resources/consensus/responses.py +325 -0
  27. uiform/resources/consensus/responses_stream.py +373 -0
  28. uiform/resources/deployments/__init__.py +9 -0
  29. uiform/resources/deployments/client.py +78 -0
  30. uiform/resources/deployments/endpoints.py +322 -0
  31. uiform/resources/deployments/links.py +452 -0
  32. uiform/resources/deployments/logs.py +211 -0
  33. uiform/resources/deployments/mailboxes.py +496 -0
  34. uiform/resources/deployments/outlook.py +531 -0
  35. uiform/resources/deployments/tests.py +158 -0
  36. uiform/resources/documents/__init__.py +3 -0
  37. uiform/resources/documents/client.py +255 -0
  38. uiform/resources/documents/extractions.py +441 -0
  39. uiform/resources/evals.py +812 -0
  40. uiform/resources/files.py +24 -0
  41. uiform/resources/finetuning.py +62 -0
  42. uiform/resources/jsonlUtils.py +1046 -0
  43. uiform/resources/models.py +45 -0
  44. uiform/resources/openai_example.py +22 -0
  45. uiform/resources/processors/__init__.py +3 -0
  46. uiform/resources/processors/automations/__init__.py +9 -0
  47. uiform/resources/processors/automations/client.py +78 -0
  48. uiform/resources/processors/automations/endpoints.py +317 -0
  49. uiform/resources/processors/automations/links.py +356 -0
  50. uiform/resources/processors/automations/logs.py +211 -0
  51. uiform/resources/processors/automations/mailboxes.py +435 -0
  52. uiform/resources/processors/automations/outlook.py +444 -0
  53. uiform/resources/processors/automations/tests.py +158 -0
  54. uiform/resources/processors/client.py +474 -0
  55. uiform/resources/prompt_optimization.py +76 -0
  56. uiform/resources/schemas.py +369 -0
  57. uiform/resources/secrets/__init__.py +9 -0
  58. uiform/resources/secrets/client.py +20 -0
  59. uiform/resources/secrets/external_api_keys.py +109 -0
  60. uiform/resources/secrets/webhook.py +62 -0
  61. uiform/resources/usage.py +271 -0
  62. uiform/types/__init__.py +0 -0
  63. uiform/types/ai_models.py +645 -0
  64. uiform/types/automations/__init__.py +0 -0
  65. uiform/types/automations/cron.py +58 -0
  66. uiform/types/automations/endpoints.py +21 -0
  67. uiform/types/automations/links.py +28 -0
  68. uiform/types/automations/mailboxes.py +60 -0
  69. uiform/types/automations/outlook.py +68 -0
  70. uiform/types/automations/webhooks.py +21 -0
  71. uiform/types/chat.py +8 -0
  72. uiform/types/completions.py +93 -0
  73. uiform/types/consensus.py +10 -0
  74. uiform/types/db/__init__.py +0 -0
  75. uiform/types/db/annotations.py +24 -0
  76. uiform/types/db/files.py +36 -0
  77. uiform/types/deployments/__init__.py +0 -0
  78. uiform/types/deployments/cron.py +59 -0
  79. uiform/types/deployments/endpoints.py +28 -0
  80. uiform/types/deployments/links.py +36 -0
  81. uiform/types/deployments/mailboxes.py +67 -0
  82. uiform/types/deployments/outlook.py +76 -0
  83. uiform/types/deployments/webhooks.py +21 -0
  84. uiform/types/documents/__init__.py +0 -0
  85. uiform/types/documents/correct_orientation.py +13 -0
  86. uiform/types/documents/create_messages.py +226 -0
  87. uiform/types/documents/extractions.py +297 -0
  88. uiform/types/evals.py +207 -0
  89. uiform/types/events.py +76 -0
  90. uiform/types/extractions.py +85 -0
  91. uiform/types/jobs/__init__.py +0 -0
  92. uiform/types/jobs/base.py +150 -0
  93. uiform/types/jobs/batch_annotation.py +22 -0
  94. uiform/types/jobs/evaluation.py +133 -0
  95. uiform/types/jobs/finetune.py +6 -0
  96. uiform/types/jobs/prompt_optimization.py +41 -0
  97. uiform/types/jobs/webcrawl.py +6 -0
  98. uiform/types/logs.py +231 -0
  99. uiform/types/mime.py +257 -0
  100. uiform/types/modalities.py +68 -0
  101. uiform/types/pagination.py +6 -0
  102. uiform/types/schemas/__init__.py +0 -0
  103. uiform/types/schemas/enhance.py +53 -0
  104. uiform/types/schemas/evaluate.py +55 -0
  105. uiform/types/schemas/generate.py +32 -0
  106. uiform/types/schemas/layout.py +58 -0
  107. uiform/types/schemas/object.py +631 -0
  108. uiform/types/schemas/templates.py +107 -0
  109. uiform/types/secrets/__init__.py +0 -0
  110. uiform/types/secrets/external_api_keys.py +22 -0
  111. uiform/types/standards.py +39 -0
@@ -0,0 +1,325 @@
1
+ import json
2
+ from pathlib import Path
3
+ from typing import Any, AsyncGenerator, Generator, TypeVar, Generic, Optional, Union, List, Sequence, cast
4
+
5
+ from openai.types.chat.chat_completion_reasoning_effort import ChatCompletionReasoningEffort
6
+ from openai.types.chat.parsed_chat_completion import ParsedChatCompletionMessage
7
+ from openai.types.responses.response import Response
8
+ from openai.types.responses.response_input_param import ResponseInputParam, ResponseInputItemParam
9
+ from openai.types.responses.response_output_item import ResponseOutputItem
10
+ from openai.types.shared_params.response_format_json_schema import ResponseFormatJSONSchema
11
+ from pydantic import BaseModel
12
+
13
+ from ..._resource import AsyncAPIResource, SyncAPIResource
14
+ from ..._utils.ai_models import assert_valid_model_extraction
15
+ from ..._utils.json_schema import load_json_schema, unflatten_dict
16
+ from ..._utils.responses import convert_to_openai_format, convert_from_openai_format, parse_openai_responses_response
17
+ from ..._utils.stream_context_managers import as_async_context_manager, as_context_manager
18
+ from ...types.chat import ChatCompletionUiformMessage
19
+ from ...types.completions import UiChatResponseCreateRequest, UiChatCompletionsRequest
20
+ from ...types.documents.extractions import UiParsedChatCompletion, UiParsedChatCompletionChunk, UiParsedChoice, UiResponse
21
+ from ...types.standards import PreparedRequest
22
+ from ...types.schemas.object import Schema
23
+
24
+ from typing import Optional, Union
25
+ from openai.types.shared_params.reasoning import Reasoning
26
+ from openai.types.responses.response_input_param import ResponseInputParam
27
+ from openai.types.responses.response_text_config_param import ResponseTextConfigParam
28
+ from openai.types.shared_params.response_format_json_schema import ResponseFormatJSONSchema
29
+
30
+ T = TypeVar('T', bound=BaseModel)
31
+
32
+ class BaseResponsesMixin:
33
+ def prepare_create(
34
+ self,
35
+ model: str,
36
+ input: Union[str, ResponseInputParam],
37
+ text: ResponseTextConfigParam,
38
+ temperature: float = 0,
39
+ reasoning: Optional[Reasoning] = None,
40
+ n_consensus: int = 1,
41
+ instructions: Optional[str] = None,
42
+ idempotency_key: Optional[str] = None,
43
+ ) -> PreparedRequest:
44
+ """
45
+ Prepare a request for the Responses API create method.
46
+ """
47
+ assert_valid_model_extraction(model)
48
+
49
+ text_format = text.get("format", None)
50
+ assert text_format is not None, "text.format is required"
51
+ json_schema = text_format.get("schema", None)
52
+ assert json_schema is not None, "text.format.schema is required"
53
+
54
+ schema_obj = Schema(json_schema=json_schema)
55
+
56
+ if instructions is None:
57
+ instructions = schema_obj.developer_system_prompt
58
+
59
+ # Create the request object based on the UiChatResponseCreateRequest model
60
+ data = UiChatResponseCreateRequest(
61
+ model=model,
62
+ input=input,
63
+ temperature=temperature,
64
+ stream=False,
65
+ reasoning=reasoning,
66
+ n_consensus=n_consensus,
67
+ text={
68
+ "format": {
69
+ "type": "json_schema",
70
+ "name": schema_obj.id,
71
+ "schema": schema_obj.inference_json_schema,
72
+ "strict": True
73
+ }
74
+ },
75
+ instructions=instructions,
76
+ )
77
+
78
+ # Validate the request data
79
+ ui_chat_response_create_request = UiChatResponseCreateRequest.model_validate(data)
80
+
81
+ return PreparedRequest(
82
+ method="POST",
83
+ url="/v1/responses",
84
+ data=ui_chat_response_create_request.model_dump(),
85
+ idempotency_key=idempotency_key
86
+ )
87
+
88
+ def prepare_parse(
89
+ self,
90
+ model: str,
91
+ input: Union[str, ResponseInputParam],
92
+ text_format: type[BaseModel],
93
+ temperature: float = 0,
94
+ reasoning: Optional[Reasoning] = None,
95
+ n_consensus: int = 1,
96
+ instructions: Optional[str] = None,
97
+ idempotency_key: Optional[str] = None,
98
+ ) -> PreparedRequest:
99
+ """
100
+ Prepare a request for the Responses API parse method.
101
+ """
102
+
103
+ assert_valid_model_extraction(model)
104
+
105
+ schema_obj = Schema(pydantic_model=text_format)
106
+
107
+ if instructions is None:
108
+ instructions = schema_obj.developer_system_prompt
109
+
110
+ # Create the request object based on the UiChatResponseCreateRequest model
111
+ data = UiChatResponseCreateRequest(
112
+ model=model,
113
+ input=input,
114
+ temperature=temperature,
115
+ stream=False,
116
+ reasoning=reasoning,
117
+ n_consensus=n_consensus,
118
+ text={
119
+ "format": {
120
+ "type": "json_schema",
121
+ "name": schema_obj.id,
122
+ "schema": schema_obj.inference_json_schema,
123
+ "strict": True
124
+ }
125
+ },
126
+ instructions=instructions,
127
+ )
128
+
129
+ # Validate the request data
130
+ ui_chat_response_create_request = UiChatResponseCreateRequest.model_validate(data)
131
+
132
+ return PreparedRequest(
133
+ method="POST",
134
+ url="/v1/responses",
135
+ data=ui_chat_response_create_request.model_dump(),
136
+ idempotency_key=idempotency_key
137
+ )
138
+
139
+
140
+ return PreparedRequest(
141
+ method="POST",
142
+ url="/v1/completions",
143
+ data=ui_chat_completions_request.model_dump(),
144
+ idempotency_key=idempotency_key
145
+ )
146
+
147
+
148
+ class Responses(SyncAPIResource, BaseResponsesMixin):
149
+ """UiForm Responses API compatible with OpenAI Responses API"""
150
+
151
+ def create(
152
+ self,
153
+ model: str,
154
+ input: Union[str, ResponseInputParam],
155
+ text: ResponseTextConfigParam,
156
+ temperature: float = 0,
157
+ reasoning: Optional[Reasoning] = None,
158
+ n_consensus: int = 1,
159
+ instructions: Optional[str] = None,
160
+ idempotency_key: Optional[str] = None,
161
+ ) -> Response:
162
+ """
163
+ Create a completion using the UiForm API with OpenAI Responses API compatible interface.
164
+
165
+ Args:
166
+ model: The model to use
167
+ input: The input text or message array
168
+ temperature: Model temperature setting (0-1)
169
+ reasoning: The effort level for the model to reason about the input data
170
+ n_consensus: Number of consensus models to use
171
+ text: The response format configuration
172
+ instructions: Optional system instructions
173
+ idempotency_key: Idempotency key for request
174
+ Returns:
175
+ Response: OpenAI Responses API compatible response
176
+ """
177
+ request = self.prepare_create(
178
+ model=model,
179
+ input=input,
180
+ temperature=temperature,
181
+ reasoning=reasoning,
182
+ text=text,
183
+ instructions=instructions,
184
+ n_consensus=n_consensus,
185
+ idempotency_key=idempotency_key,
186
+ )
187
+
188
+ result = self._client._prepared_request(request)
189
+ response = UiResponse.model_validate(result)
190
+
191
+ return response
192
+
193
+ def parse(
194
+ self,
195
+ model: str,
196
+ input: Union[str, ResponseInputParam],
197
+ text_format: type[T],
198
+ temperature: float = 0,
199
+ reasoning: Optional[Reasoning] = None,
200
+ n_consensus: int = 1,
201
+ instructions: Optional[str] = None,
202
+ idempotency_key: Optional[str] = None,
203
+ ) -> Response:
204
+ """
205
+ Parse content using the UiForm API with OpenAI Responses API compatible interface.
206
+
207
+ Args:
208
+ model: The model to use
209
+ input: The input text or message array
210
+ text_format: The Pydantic model defining the expected output format
211
+ temperature: Model temperature setting (0-1)
212
+ reasoning_effort: The effort level for the model to reason about the input data
213
+ n_consensus: Number of consensus models to use
214
+ instructions: Optional system instructions
215
+ idempotency_key: Idempotency key for request
216
+
217
+ Returns:
218
+ Response: OpenAI Responses API compatible response with parsed content
219
+ """
220
+ request = self.prepare_parse(
221
+ model=model,
222
+ input=input,
223
+ temperature=temperature,
224
+ reasoning=reasoning,
225
+ text_format=text_format,
226
+ instructions=instructions,
227
+ n_consensus=n_consensus,
228
+ idempotency_key=idempotency_key,
229
+ )
230
+
231
+ result = self._client._prepared_request(request)
232
+ response = UiResponse.model_validate(result)
233
+
234
+ return response
235
+
236
+
237
+ class AsyncResponses(AsyncAPIResource, BaseResponsesMixin):
238
+ """UiForm Responses API compatible with OpenAI Responses API for async usage"""
239
+
240
+ async def create(
241
+ self,
242
+ model: str,
243
+ input: Union[str, ResponseInputParam],
244
+ text: ResponseTextConfigParam,
245
+ temperature: float = 0,
246
+ reasoning: Optional[Reasoning] = None,
247
+ n_consensus: int = 1,
248
+ instructions: Optional[str] = None,
249
+ idempotency_key: Optional[str] = None,
250
+ ) -> UiResponse:
251
+ """
252
+ Create a completion using the UiForm API asynchronously with OpenAI Responses API compatible interface.
253
+
254
+ Args:
255
+ model: The model to use
256
+ input: The input text or message array
257
+ text: The response format configuration
258
+ temperature: Model temperature setting (0-1)
259
+ reasoning: The effort level for the model to reason about the input data
260
+ n_consensus: Number of consensus models to use
261
+ instructions: Optional system instructions
262
+ idempotency_key: Idempotency key for request
263
+
264
+ Returns:
265
+ Response: OpenAI Responses API compatible response
266
+ """
267
+ request = self.prepare_create(
268
+ model=model,
269
+ input=input,
270
+ temperature=temperature,
271
+ reasoning=reasoning,
272
+ text=text,
273
+ instructions=instructions,
274
+ n_consensus=n_consensus,
275
+ idempotency_key=idempotency_key,
276
+ )
277
+
278
+ result = await self._client._prepared_request(request)
279
+ response = UiResponse.model_validate(result)
280
+ return response
281
+
282
+
283
+
284
+ async def parse(
285
+ self,
286
+ model: str,
287
+ input: Union[str, ResponseInputParam],
288
+ text_format: type[BaseModel],
289
+ temperature: float = 0,
290
+ reasoning: Optional[Reasoning] = None,
291
+ n_consensus: int = 1,
292
+ instructions: Optional[str] = None,
293
+ idempotency_key: Optional[str] = None,
294
+ ) -> UiResponse:
295
+ """
296
+ Parse content using the UiForm API asynchronously with OpenAI Responses API compatible interface.
297
+
298
+ Args:
299
+ model: The model to use
300
+ input: The input text or message array
301
+ text_format: The Pydantic model defining the expected output format
302
+ temperature: Model temperature setting (0-1)
303
+ reasoning: The effort level for the model to reason about the input data
304
+ n_consensus: Number of consensus models to use
305
+ instructions: Optional system instructions
306
+ idempotency_key: Idempotency key for request
307
+
308
+ Returns:
309
+ Response: OpenAI Responses API compatible response with parsed content
310
+ """
311
+ request = self.prepare_parse(
312
+ model=model,
313
+ input=input,
314
+ temperature=temperature,
315
+ reasoning=reasoning,
316
+ text_format=text_format,
317
+ instructions=instructions,
318
+ n_consensus=n_consensus,
319
+ idempotency_key=idempotency_key,
320
+ )
321
+
322
+ result = await self._client._prepared_request(request)
323
+ response = UiResponse.model_validate(result)
324
+ return response
325
+
@@ -0,0 +1,373 @@
1
+ import json
2
+ from pathlib import Path
3
+ from typing import Any, AsyncGenerator, Generator, TypeVar, Generic, Optional, Union, List, Sequence, cast
4
+
5
+ from openai.types.chat.chat_completion_reasoning_effort import ChatCompletionReasoningEffort
6
+ from openai.types.chat.parsed_chat_completion import ParsedChatCompletionMessage
7
+ from openai.types.responses.response import Response
8
+ from openai.types.responses.response_input_param import ResponseInputParam, ResponseInputItemParam
9
+ from openai.types.responses.response_output_item import ResponseOutputItem
10
+ from openai.types.shared_params.response_format_json_schema import ResponseFormatJSONSchema
11
+ from pydantic import BaseModel
12
+
13
+ from ..._resource import AsyncAPIResource, SyncAPIResource
14
+ from ..._utils.ai_models import assert_valid_model_extraction
15
+ from ..._utils.json_schema import load_json_schema, unflatten_dict
16
+ from ..._utils.responses import convert_to_openai_format, convert_from_openai_format, parse_openai_responses_response
17
+ from ..._utils.stream_context_managers import as_async_context_manager, as_context_manager
18
+ from ...types.chat import ChatCompletionUiformMessage
19
+ from ...types.completions import UiChatResponseCreateRequest, UiChatCompletionsRequest
20
+ from ...types.documents.extractions import UiParsedChatCompletion, UiParsedChatCompletionChunk, UiParsedChoice, UiResponse
21
+ from ...types.standards import PreparedRequest
22
+ from ...types.schemas.object import Schema
23
+
24
+ from typing import Optional, Union
25
+ from openai.types.shared_params.reasoning import Reasoning
26
+ from openai.types.responses.response_input_param import ResponseInputParam
27
+ from openai.types.responses.response_text_config_param import ResponseTextConfigParam
28
+ from openai.types.shared_params.response_format_json_schema import ResponseFormatJSONSchema
29
+
30
+ T = TypeVar('T', bound=BaseModel)
31
+
32
+ class BaseResponsesMixin:
33
+ def prepare_create(
34
+ self,
35
+ model: str,
36
+ input: Union[str, ResponseInputParam],
37
+ text: ResponseTextConfigParam,
38
+ temperature: float = 0,
39
+ reasoning: Optional[Reasoning] = None,
40
+ stream: bool = False,
41
+ n_consensus: int = 1,
42
+ instructions: Optional[str] = None,
43
+ idempotency_key: Optional[str] = None,
44
+ ) -> PreparedRequest:
45
+ """
46
+ Prepare a request for the Responses API create method.
47
+ """
48
+ assert_valid_model_extraction(model)
49
+
50
+ text_format = text.get("format", None)
51
+ assert text_format is not None, "text.format is required"
52
+ json_schema = text_format.get("schema", None)
53
+ assert json_schema is not None, "text.format.schema is required"
54
+
55
+ schema_obj = Schema(json_schema=json_schema)
56
+
57
+ if instructions is None:
58
+ instructions = schema_obj.developer_system_prompt
59
+
60
+ # Create the request object based on the UiChatResponseCreateRequest model
61
+ data = UiChatResponseCreateRequest(
62
+ model=model,
63
+ input=input,
64
+ temperature=temperature,
65
+ stream=stream,
66
+ reasoning=reasoning,
67
+ n_consensus=n_consensus,
68
+ text={
69
+ "format": {
70
+ "type": "json_schema",
71
+ "name": schema_obj.id,
72
+ "schema": schema_obj.inference_json_schema,
73
+ "strict": True
74
+ }
75
+ },
76
+ instructions=instructions,
77
+ )
78
+
79
+ # Validate the request data
80
+ ui_chat_response_create_request = UiChatResponseCreateRequest.model_validate(data)
81
+
82
+ return PreparedRequest(
83
+ method="POST",
84
+ url="/v1/responses",
85
+ data=ui_chat_response_create_request.model_dump(),
86
+ idempotency_key=idempotency_key
87
+ )
88
+
89
+ def prepare_parse(
90
+ self,
91
+ model: str,
92
+ input: Union[str, ResponseInputParam],
93
+ text_format: type[BaseModel],
94
+ temperature: float = 0,
95
+ reasoning: Optional[Reasoning] = None,
96
+ stream: bool = False,
97
+ n_consensus: int = 1,
98
+ instructions: Optional[str] = None,
99
+ idempotency_key: Optional[str] = None,
100
+ ) -> PreparedRequest:
101
+ """
102
+ Prepare a request for the Responses API parse method.
103
+ """
104
+
105
+ assert_valid_model_extraction(model)
106
+
107
+ schema_obj = Schema(pydantic_model=text_format)
108
+
109
+ if instructions is None:
110
+ instructions = schema_obj.developer_system_prompt
111
+
112
+ # Create the request object based on the UiChatResponseCreateRequest model
113
+ data = UiChatResponseCreateRequest(
114
+ model=model,
115
+ input=input,
116
+ temperature=temperature,
117
+ stream=stream,
118
+ reasoning=reasoning,
119
+ n_consensus=n_consensus,
120
+ text={
121
+ "format": {
122
+ "type": "json_schema",
123
+ "name": schema_obj.id,
124
+ "schema": schema_obj.inference_json_schema,
125
+ "strict": True
126
+ }
127
+ },
128
+ instructions=instructions,
129
+ )
130
+
131
+ # Validate the request data
132
+ ui_chat_response_create_request = UiChatResponseCreateRequest.model_validate(data)
133
+
134
+ return PreparedRequest(
135
+ method="POST",
136
+ url="/v1/responses",
137
+ data=ui_chat_response_create_request.model_dump(),
138
+ idempotency_key=idempotency_key
139
+ )
140
+
141
+
142
+ return PreparedRequest(
143
+ method="POST",
144
+ url="/v1/completions",
145
+ data=ui_chat_completions_request.model_dump(),
146
+ idempotency_key=idempotency_key
147
+ )
148
+
149
+
150
+ class Responses(SyncAPIResource, BaseResponsesMixin):
151
+ """UiForm Responses API compatible with OpenAI Responses API"""
152
+
153
+ @as_context_manager
154
+ def stream(
155
+ self,
156
+ model: str,
157
+ input: Union[str, ResponseInputParam],
158
+ text: ResponseTextConfigParam,
159
+ temperature: float = 0,
160
+ reasoning: Optional[Reasoning] = None,
161
+ n_consensus: int = 1,
162
+ instructions: Optional[str] = None,
163
+ idempotency_key: Optional[str] = None,
164
+ ) -> Generator[UiResponse, None, None]:
165
+ """
166
+ Create a completion using the UiForm API with streaming enabled.
167
+
168
+ Args:
169
+ model: The model to use
170
+ input: The input text or message array
171
+ text: The response format configuration
172
+ temperature: Model temperature setting (0-1)
173
+ reasoning: The effort level for the model to reason about the input data
174
+ n_consensus: Number of consensus models to use
175
+ instructions: Optional system instructions
176
+ idempotency_key: Idempotency key for request
177
+
178
+ Returns:
179
+ Generator[UiResponse]: Stream of responses
180
+
181
+ Usage:
182
+ ```python
183
+ with uiform.responses.stream(model, input, text, temperature, reasoning) as stream:
184
+ for response in stream:
185
+ print(response)
186
+ ```
187
+ """
188
+ request = self.prepare_create(
189
+ model=model,
190
+ input=input,
191
+ temperature=temperature,
192
+ reasoning=reasoning,
193
+ stream=True,
194
+ text=text,
195
+ instructions=instructions,
196
+ n_consensus=n_consensus,
197
+ idempotency_key=idempotency_key,
198
+ )
199
+
200
+ # Request the stream and return a context manager
201
+ for chunk_json in self._client._prepared_request_stream(request):
202
+ if not chunk_json:
203
+ continue
204
+ response = UiResponse.model_validate(chunk_json)
205
+ yield response
206
+
207
+ @as_context_manager
208
+ def stream_parse(
209
+ self,
210
+ model: str,
211
+ input: Union[str, ResponseInputParam],
212
+ text_format: type[T],
213
+ temperature: float = 0,
214
+ reasoning: Optional[Reasoning] = None,
215
+ n_consensus: int = 1,
216
+ instructions: Optional[str] = None,
217
+ idempotency_key: Optional[str] = None,
218
+ ) -> Generator[UiResponse, None, None]:
219
+ """
220
+ Parse content using the UiForm API with streaming enabled.
221
+
222
+ Args:
223
+ model: The model to use
224
+ input: The input text or message array
225
+ text_format: The Pydantic model defining the expected output format
226
+ temperature: Model temperature setting (0-1)
227
+ reasoning: The effort level for the model to reason about the input data
228
+ n_consensus: Number of consensus models to use
229
+ instructions: Optional system instructions
230
+ idempotency_key: Idempotency key for request
231
+
232
+ Returns:
233
+ Generator[UiResponse]: Stream of parsed responses
234
+
235
+ Usage:
236
+ ```python
237
+ with uiform.responses.stream_parse(model, input, MyModel, temperature, reasoning) as stream:
238
+ for response in stream:
239
+ print(response)
240
+ ```
241
+ """
242
+ request = self.prepare_parse(
243
+ model=model,
244
+ input=input,
245
+ temperature=temperature,
246
+ reasoning=reasoning,
247
+ stream=True,
248
+ text_format=text_format,
249
+ instructions=instructions,
250
+ n_consensus=n_consensus,
251
+ idempotency_key=idempotency_key,
252
+ )
253
+
254
+ # Request the stream and return a context manager
255
+ for chunk_json in self._client._prepared_request_stream(request):
256
+ if not chunk_json:
257
+ continue
258
+ response = UiResponse.model_validate(chunk_json)
259
+ yield response
260
+
261
+
262
+
263
+ class AsyncResponses(AsyncAPIResource, BaseResponsesMixin):
264
+ """UiForm Responses API compatible with OpenAI Responses API for async usage"""
265
+
266
+ @as_async_context_manager
267
+ async def stream(
268
+ self,
269
+ model: str,
270
+ input: Union[str, ResponseInputParam],
271
+ text: ResponseTextConfigParam,
272
+ temperature: float = 0,
273
+ reasoning: Optional[Reasoning] = None,
274
+ n_consensus: int = 1,
275
+ instructions: Optional[str] = None,
276
+ idempotency_key: Optional[str] = None,
277
+ ) -> AsyncGenerator[UiResponse, None]:
278
+ """
279
+ Create a completion using the UiForm API asynchronously with streaming enabled.
280
+
281
+ Args:
282
+ model: The model to use
283
+ input: The input text or message array
284
+ text: The response format configuration
285
+ temperature: Model temperature setting (0-1)
286
+ reasoning: The effort level for the model to reason about the input data
287
+ n_consensus: Number of consensus models to use
288
+ instructions: Optional system instructions
289
+ idempotency_key: Idempotency key for request
290
+
291
+ Returns:
292
+ AsyncGenerator[UiResponse]: Async stream of responses
293
+
294
+ Usage:
295
+ ```python
296
+ async with uiform.responses.async_stream(model, input, text, temperature, reasoning) as stream:
297
+ async for response in stream:
298
+ print(response)
299
+ ```
300
+ """
301
+ request = self.prepare_create(
302
+ model=model,
303
+ input=input,
304
+ temperature=temperature,
305
+ reasoning=reasoning,
306
+ stream=True,
307
+ text=text,
308
+ instructions=instructions,
309
+ n_consensus=n_consensus,
310
+ idempotency_key=idempotency_key,
311
+ )
312
+
313
+ # Request the stream and return a context manager
314
+ async for chunk_json in self._client._prepared_request_stream(request):
315
+ if not chunk_json:
316
+ continue
317
+ response = UiResponse.model_validate(chunk_json)
318
+ yield response
319
+
320
+ @as_async_context_manager
321
+ async def stream_parse(
322
+ self,
323
+ model: str,
324
+ input: Union[str, ResponseInputParam],
325
+ text_format: type[T],
326
+ temperature: float = 0,
327
+ reasoning: Optional[Reasoning] = None,
328
+ n_consensus: int = 1,
329
+ instructions: Optional[str] = None,
330
+ idempotency_key: Optional[str] = None,
331
+ ) -> AsyncGenerator[UiResponse, None]:
332
+ """
333
+ Parse content using the UiForm API asynchronously with streaming enabled.
334
+
335
+ Args:
336
+ model: The model to use
337
+ input: The input text or message array
338
+ text_format: The Pydantic model defining the expected output format
339
+ temperature: Model temperature setting (0-1)
340
+ reasoning: The effort level for the model to reason about the input data
341
+ n_consensus: Number of consensus models to use
342
+ instructions: Optional system instructions
343
+ idempotency_key: Idempotency key for request
344
+
345
+ Returns:
346
+ AsyncGenerator[UiResponse]: Async stream of parsed responses
347
+
348
+ Usage:
349
+ ```python
350
+ async with uiform.responses.async_stream_parse(model, input, MyModel, temperature, reasoning) as stream:
351
+ async for response in stream:
352
+ print(response)
353
+ ```
354
+ """
355
+ request = self.prepare_parse(
356
+ model=model,
357
+ input=input,
358
+ temperature=temperature,
359
+ reasoning=reasoning,
360
+ stream=True,
361
+ text_format=text_format,
362
+ instructions=instructions,
363
+ n_consensus=n_consensus,
364
+ idempotency_key=idempotency_key,
365
+ )
366
+
367
+ # Request the stream and return a context manager
368
+ async for chunk_json in self._client._prepared_request_stream(request):
369
+ if not chunk_json:
370
+ continue
371
+ response = UiResponse.model_validate(chunk_json)
372
+ yield response
373
+
@@ -0,0 +1,9 @@
1
+ from .client import (
2
+ AsyncDeployments,
3
+ Deployments,
4
+ )
5
+
6
+ __all__ = [
7
+ "Deployments",
8
+ "AsyncDeployments",
9
+ ]