retab 0.0.36__py3-none-any.whl → 0.0.38__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. retab/__init__.py +4 -0
  2. {uiform → retab}/_resource.py +5 -5
  3. {uiform → retab}/_utils/ai_models.py +2 -2
  4. {uiform → retab}/_utils/benchmarking.py +15 -16
  5. {uiform → retab}/_utils/chat.py +29 -34
  6. {uiform → retab}/_utils/display.py +0 -3
  7. {uiform → retab}/_utils/json_schema.py +9 -14
  8. {uiform → retab}/_utils/mime.py +11 -14
  9. {uiform → retab}/_utils/responses.py +16 -10
  10. {uiform → retab}/_utils/stream_context_managers.py +1 -1
  11. {uiform → retab}/_utils/usage/usage.py +31 -31
  12. {uiform → retab}/client.py +54 -53
  13. {uiform → retab}/resources/consensus/client.py +19 -38
  14. {uiform → retab}/resources/consensus/completions.py +36 -59
  15. {uiform → retab}/resources/consensus/completions_stream.py +35 -47
  16. {uiform → retab}/resources/consensus/responses.py +37 -86
  17. {uiform → retab}/resources/consensus/responses_stream.py +41 -89
  18. retab/resources/documents/client.py +455 -0
  19. {uiform → retab}/resources/documents/extractions.py +192 -101
  20. {uiform → retab}/resources/evals.py +56 -43
  21. retab/resources/evaluations/__init__.py +3 -0
  22. retab/resources/evaluations/client.py +301 -0
  23. retab/resources/evaluations/documents.py +233 -0
  24. retab/resources/evaluations/iterations.py +452 -0
  25. {uiform → retab}/resources/files.py +2 -2
  26. {uiform → retab}/resources/jsonlUtils.py +225 -221
  27. retab/resources/models.py +73 -0
  28. retab/resources/processors/automations/client.py +244 -0
  29. {uiform → retab}/resources/processors/automations/endpoints.py +79 -120
  30. retab/resources/processors/automations/links.py +294 -0
  31. {uiform → retab}/resources/processors/automations/logs.py +30 -19
  32. retab/resources/processors/automations/mailboxes.py +397 -0
  33. retab/resources/processors/automations/outlook.py +337 -0
  34. {uiform → retab}/resources/processors/automations/tests.py +22 -25
  35. {uiform → retab}/resources/processors/client.py +181 -166
  36. {uiform → retab}/resources/schemas.py +78 -66
  37. {uiform → retab}/resources/secrets/external_api_keys.py +1 -5
  38. retab/resources/secrets/webhook.py +64 -0
  39. {uiform → retab}/resources/usage.py +41 -4
  40. {uiform → retab}/types/ai_models.py +17 -17
  41. {uiform → retab}/types/automations/cron.py +19 -12
  42. {uiform → retab}/types/automations/endpoints.py +7 -4
  43. {uiform → retab}/types/automations/links.py +7 -3
  44. {uiform → retab}/types/automations/mailboxes.py +10 -10
  45. {uiform → retab}/types/automations/outlook.py +15 -11
  46. {uiform → retab}/types/automations/webhooks.py +1 -1
  47. retab/types/browser_canvas.py +3 -0
  48. retab/types/chat.py +8 -0
  49. {uiform → retab}/types/completions.py +12 -15
  50. retab/types/consensus.py +19 -0
  51. {uiform → retab}/types/db/annotations.py +3 -3
  52. {uiform → retab}/types/db/files.py +8 -6
  53. {uiform → retab}/types/documents/create_messages.py +20 -22
  54. {uiform → retab}/types/documents/extractions.py +71 -26
  55. {uiform → retab}/types/evals.py +5 -5
  56. retab/types/evaluations/__init__.py +31 -0
  57. retab/types/evaluations/documents.py +30 -0
  58. retab/types/evaluations/iterations.py +112 -0
  59. retab/types/evaluations/model.py +73 -0
  60. retab/types/events.py +79 -0
  61. {uiform → retab}/types/extractions.py +36 -13
  62. retab/types/inference_settings.py +15 -0
  63. retab/types/jobs/base.py +54 -0
  64. retab/types/jobs/batch_annotation.py +12 -0
  65. {uiform → retab}/types/jobs/evaluation.py +1 -2
  66. {uiform → retab}/types/logs.py +37 -34
  67. retab/types/metrics.py +32 -0
  68. {uiform → retab}/types/mime.py +22 -20
  69. {uiform → retab}/types/modalities.py +10 -10
  70. retab/types/predictions.py +19 -0
  71. {uiform → retab}/types/schemas/enhance.py +4 -2
  72. {uiform → retab}/types/schemas/evaluate.py +7 -4
  73. {uiform → retab}/types/schemas/generate.py +6 -3
  74. {uiform → retab}/types/schemas/layout.py +1 -1
  75. {uiform → retab}/types/schemas/object.py +16 -17
  76. {uiform → retab}/types/schemas/templates.py +1 -3
  77. {uiform → retab}/types/secrets/external_api_keys.py +0 -1
  78. {uiform → retab}/types/standards.py +18 -1
  79. {retab-0.0.36.dist-info → retab-0.0.38.dist-info}/METADATA +78 -77
  80. retab-0.0.38.dist-info/RECORD +107 -0
  81. retab-0.0.38.dist-info/top_level.txt +1 -0
  82. retab-0.0.36.dist-info/RECORD +0 -96
  83. retab-0.0.36.dist-info/top_level.txt +0 -1
  84. uiform/__init__.py +0 -4
  85. uiform/_utils/benchmarking copy.py +0 -588
  86. uiform/resources/documents/client.py +0 -255
  87. uiform/resources/models.py +0 -45
  88. uiform/resources/processors/automations/client.py +0 -78
  89. uiform/resources/processors/automations/links.py +0 -356
  90. uiform/resources/processors/automations/mailboxes.py +0 -435
  91. uiform/resources/processors/automations/outlook.py +0 -444
  92. uiform/resources/secrets/webhook.py +0 -62
  93. uiform/types/chat.py +0 -8
  94. uiform/types/consensus.py +0 -10
  95. uiform/types/events.py +0 -76
  96. uiform/types/jobs/base.py +0 -150
  97. uiform/types/jobs/batch_annotation.py +0 -22
  98. {uiform → retab}/_utils/__init__.py +0 -0
  99. {uiform → retab}/_utils/usage/__init__.py +0 -0
  100. {uiform → retab}/py.typed +0 -0
  101. {uiform → retab}/resources/__init__.py +0 -0
  102. {uiform → retab}/resources/consensus/__init__.py +0 -0
  103. {uiform → retab}/resources/documents/__init__.py +0 -0
  104. {uiform → retab}/resources/finetuning.py +0 -0
  105. {uiform → retab}/resources/openai_example.py +0 -0
  106. {uiform → retab}/resources/processors/__init__.py +0 -0
  107. {uiform → retab}/resources/processors/automations/__init__.py +0 -0
  108. {uiform → retab}/resources/prompt_optimization.py +0 -0
  109. {uiform → retab}/resources/secrets/__init__.py +0 -0
  110. {uiform → retab}/resources/secrets/client.py +0 -0
  111. {uiform → retab}/types/__init__.py +0 -0
  112. {uiform → retab}/types/automations/__init__.py +0 -0
  113. {uiform → retab}/types/db/__init__.py +0 -0
  114. {uiform → retab}/types/documents/__init__.py +0 -0
  115. {uiform → retab}/types/documents/correct_orientation.py +0 -0
  116. {uiform → retab}/types/jobs/__init__.py +0 -0
  117. {uiform → retab}/types/jobs/finetune.py +0 -0
  118. {uiform → retab}/types/jobs/prompt_optimization.py +0 -0
  119. {uiform → retab}/types/jobs/webcrawl.py +0 -0
  120. {uiform → retab}/types/pagination.py +0 -0
  121. {uiform → retab}/types/schemas/__init__.py +0 -0
  122. {uiform → retab}/types/secrets/__init__.py +0 -0
  123. {retab-0.0.36.dist-info → retab-0.0.38.dist-info}/WHEEL +0 -0
@@ -58,7 +58,7 @@ class TextBox(BaseModel):
58
58
  vertices: tuple[Point, Point, Point, Point] = Field(description="(top-left, top-right, bottom-right, bottom-left)")
59
59
  text: str
60
60
 
61
- @field_validator('width', 'height')
61
+ @field_validator("width", "height")
62
62
  @classmethod
63
63
  def check_positive_dimensions(cls, v: int) -> int:
64
64
  if not isinstance(v, int) or v <= 0:
@@ -76,7 +76,7 @@ class Page(BaseModel):
76
76
  tokens: list[TextBox]
77
77
  transforms: list[Matrix] = Field(default=[], description="Transformation matrices applied to the original document image")
78
78
 
79
- @field_validator('width', 'height')
79
+ @field_validator("width", "height")
80
80
  @classmethod
81
81
  def check_positive_dimensions(cls, v: int) -> int:
82
82
  if not isinstance(v, int) or v <= 0:
@@ -98,21 +98,21 @@ class MIMEData(BaseModel):
98
98
 
99
99
  @property
100
100
  def extension(self) -> str:
101
- return self.filename.split('.')[-1].lower()
101
+ return self.filename.split(".")[-1].lower()
102
102
 
103
103
  @property
104
104
  def content(self) -> str:
105
- if self.url.startswith('data:'):
105
+ if self.url.startswith("data:"):
106
106
  # Extract base64 content from data URL
107
- base64_content = self.url.split(',')[1]
107
+ base64_content = self.url.split(",")[1]
108
108
  return base64_content
109
109
  else:
110
110
  raise ValueError("Content is not available for this file")
111
111
 
112
112
  @property
113
113
  def mime_type(self) -> str:
114
- if self.url.startswith('data:'):
115
- return self.url.split(';')[0].split(':')[1]
114
+ if self.url.startswith("data:"):
115
+ return self.url.split(";")[0].split(":")[1]
116
116
  else:
117
117
  return mimetypes.guess_type(self.filename)[0] or "application/octet-stream"
118
118
 
@@ -126,7 +126,7 @@ class MIMEData(BaseModel):
126
126
  return len(base64.b64decode(self.content))
127
127
 
128
128
  def __str__(self) -> str:
129
- truncated_url = self.url[:50] + '...' if len(self.url) > 50 else self.url
129
+ truncated_url = self.url[:50] + "..." if len(self.url) > 50 else self.url
130
130
  # truncated_content = self.content[:50] + '...' if len(self.content) > 50 else self.content
131
131
  return f"MIMEData(filename='{self.filename}', url='{truncated_url}', mime_type='{self.mime_type}', size='{self.size}', extension='{self.extension}')"
132
132
 
@@ -136,34 +136,36 @@ class MIMEData(BaseModel):
136
136
 
137
137
  class BaseMIMEData(MIMEData):
138
138
  @classmethod
139
- def model_validate(cls, obj: Any, *, strict: bool | None = None, from_attributes: bool | None = None, context: Any | None = None) -> Self:
139
+ def model_validate(
140
+ cls, obj: Any, *, strict: bool | None = None, from_attributes: bool | None = None, context: Any | None = None, by_alias: bool | None = None, by_name: bool | None = None
141
+ ) -> Self:
140
142
  if isinstance(obj, MIMEData):
141
143
  # Convert MIMEData instance to dict
142
144
  obj = obj.model_dump()
143
- if isinstance(obj, dict) and 'url' in obj:
145
+ if isinstance(obj, dict) and "url" in obj:
144
146
  # Truncate URL to 1000 chars or less, ensuring it's a valid base64 string
145
- if len(obj['url']) > 1000:
147
+ if len(obj["url"]) > 1000:
146
148
  # Find the position of the base64 data
147
- if ',' in obj['url']:
148
- prefix, base64_data = obj['url'].split(',', 1)
149
+ if "," in obj["url"]:
150
+ prefix, base64_data = obj["url"].split(",", 1)
149
151
  # Calculate how many characters we can keep (must be a multiple of 4)
150
152
  max_base64_len = 1000 - len(prefix) - 1 # -1 for the comma
151
153
  # Ensure the length is a multiple of 4
152
154
  max_base64_len = max_base64_len - (max_base64_len % 4)
153
155
  # Truncate and reassemble
154
- obj['url'] = prefix + ',' + base64_data[:max_base64_len]
156
+ obj["url"] = prefix + "," + base64_data[:max_base64_len]
155
157
  else:
156
158
  # If there's no comma (unexpected format), truncate to 996 chars (multiple of 4)
157
- obj['url'] = obj['url'][:996]
158
- return super().model_validate(obj, strict=strict, from_attributes=from_attributes, context=context)
159
+ obj["url"] = obj["url"][:996]
160
+ return super().model_validate(obj, strict=strict, from_attributes=from_attributes, context=context, by_alias=by_alias, by_name=by_name)
159
161
 
160
162
  @property
161
163
  def id(self) -> str:
162
164
  raise NotImplementedError("id is not implemented for BaseMIMEData - id is the hash of the content, so it's not possible to generate it from the base class")
163
165
 
164
166
  def __str__(self) -> str:
165
- truncated_url = self.url[:50] + '...' if len(self.url) > 50 else self.url
166
- truncated_content = self.content[:50] + '...' if len(self.content) > 50 else self.content
167
+ truncated_url = self.url[:50] + "..." if len(self.url) > 50 else self.url
168
+ truncated_content = self.content[:50] + "..." if len(self.content) > 50 else self.content
167
169
  return f"BaseMIMEData(filename='{self.filename}', url='{truncated_url}', content='{truncated_content}', mime_type='{self.mime_type}', extension='{self.extension}')"
168
170
 
169
171
  def __repr__(self) -> str:
@@ -227,7 +229,7 @@ class BaseEmailData(BaseModel):
227
229
 
228
230
  @property
229
231
  def unique_filename(self) -> str:
230
- cleaned_id = re.sub(r'[\s<>]', '', self.id)
232
+ cleaned_id = re.sub(r"[\s<>]", "", self.id)
231
233
  return f"{cleaned_id}.eml"
232
234
 
233
235
  def __repr__(self) -> str:
@@ -235,7 +237,7 @@ class BaseEmailData(BaseModel):
235
237
  attachment_count = len(self.attachments)
236
238
 
237
239
  subject_preview = self.subject
238
- body_preview = self.body_plain[:5000] + '...' if self.body_plain and len(self.body_plain) > 5000 else self.body_plain
240
+ body_preview = self.body_plain[:5000] + "..." if self.body_plain and len(self.body_plain) > 5000 else self.body_plain
239
241
 
240
242
  return (
241
243
  f"BaseEmailData("
@@ -4,16 +4,16 @@ BaseModality = Literal["text", "image"] # "video" , "audio"
4
4
  Modality = Literal[BaseModality, "native", "image+text"]
5
5
  TYPE_FAMILIES = Literal["excel", "word", "powerpoint", "pdf", "image", "text", "email", "audio", "html", "web"]
6
6
  NativeModalities: dict[TYPE_FAMILIES, Modality] = {
7
- 'excel': 'image',
8
- 'word': 'image',
9
- 'html': 'text',
10
- 'powerpoint': 'image',
11
- 'pdf': 'image',
12
- 'image': 'image',
13
- 'web': 'image',
14
- 'text': 'text',
15
- 'email': 'native',
16
- 'audio': 'text',
7
+ "excel": "image",
8
+ "word": "image",
9
+ "html": "text",
10
+ "powerpoint": "image",
11
+ "pdf": "image",
12
+ "image": "image",
13
+ "web": "image",
14
+ "text": "text",
15
+ "email": "native",
16
+ "audio": "text",
17
17
  }
18
18
 
19
19
  EXCEL_TYPES = Literal[".xls", ".xlsx", ".ods"]
@@ -0,0 +1,19 @@
1
+ import datetime
2
+ from typing import Any, Optional
3
+ from pydantic import BaseModel, Field
4
+ from .ai_models import Amount
5
+
6
+
7
+ class PredictionMetadata(BaseModel):
8
+ extraction_id: Optional[str] = Field(default=None, description="The ID of the extraction")
9
+ likelihoods: Optional[dict[str, Any]] = Field(default=None, description="The likelihoods of the extraction")
10
+ field_locations: Optional[dict[str, Any]] = Field(default=None, description="The field locations of the extraction")
11
+ agentic_field_locations: Optional[dict[str, Any]] = Field(default=None, description="The field locations of the extraction extracted by an llm")
12
+ consensus_details: Optional[list[dict[str, Any]]] = Field(default=None, description="The consensus details of the extraction")
13
+ api_cost: Optional[Amount] = Field(default=None, description="The cost of the API call for this document (if any -- ground truth for example)")
14
+
15
+
16
+ class PredictionData(BaseModel):
17
+ prediction: dict[str, Any] = Field(default={}, description="The result of the extraction or manual annotation")
18
+ metadata: Optional[PredictionMetadata] = Field(default=None, description="The metadata of the prediction")
19
+ updated_at: Optional[datetime.datetime] = Field(default=None, description="The creation date of the prediction")
@@ -1,9 +1,11 @@
1
- from typing import Any, Self, TypedDict, Literal
1
+ from typing import Any, Self, TypedDict
2
+
2
3
  from openai.types.chat.chat_completion_reasoning_effort import ChatCompletionReasoningEffort
3
4
  from pydantic import BaseModel, Field, model_validator
4
5
 
5
6
  from ..mime import MIMEData
6
7
  from ..modalities import Modality
8
+ from ..browser_canvas import BrowserCanvas
7
9
 
8
10
 
9
11
  class EnhanceSchemaConfig(BaseModel):
@@ -40,7 +42,7 @@ class EnhanceSchemaRequest(BaseModel):
40
42
  """The modality of the document to load."""
41
43
 
42
44
  image_resolution_dpi: int = 96
43
- browser_canvas: Literal['A3', 'A4', 'A5'] = 'A4'
45
+ browser_canvas: BrowserCanvas = "A4"
44
46
  """The image operations to apply to the document."""
45
47
 
46
48
  stream: bool = False
@@ -1,9 +1,12 @@
1
+ from typing import Any, Self
2
+
3
+ from openai.types.chat.chat_completion_reasoning_effort import ChatCompletionReasoningEffort
1
4
  from pydantic import BaseModel, Field, model_validator
2
- from typing import Any, Literal, Self
5
+
6
+ from ..evals import ItemMetric
3
7
  from ..mime import MIMEData
4
8
  from ..modalities import Modality
5
- from openai.types.chat.chat_completion_reasoning_effort import ChatCompletionReasoningEffort
6
- from ..evals import ItemMetric
9
+ from ..browser_canvas import BrowserCanvas
7
10
 
8
11
 
9
12
  class EvaluateSchemaRequest(BaseModel):
@@ -21,7 +24,7 @@ class EvaluateSchemaRequest(BaseModel):
21
24
  reasoning_effort: ChatCompletionReasoningEffort = "medium"
22
25
  modality: Modality
23
26
  image_resolution_dpi: int = Field(default=96, description="Resolution of the image sent to the LLM")
24
- browser_canvas: Literal["A3", "A4", "A5"] = Field(
27
+ browser_canvas: BrowserCanvas = Field(
25
28
  default="A4", description="Sets the size of the browser canvas for rendering documents in browser-based processing. Choose a size that matches the document type."
26
29
  )
27
30
  n_consensus: int = 1
@@ -1,9 +1,11 @@
1
- from typing import Any, Literal
1
+ from typing import Any
2
+
2
3
  from openai.types.chat.chat_completion_reasoning_effort import ChatCompletionReasoningEffort
3
4
  from pydantic import BaseModel, Field
4
5
 
5
6
  from ..mime import MIMEData
6
7
  from ..modalities import Modality
8
+ from ..browser_canvas import BrowserCanvas
7
9
 
8
10
 
9
11
  class GenerateSchemaRequest(BaseModel):
@@ -15,7 +17,9 @@ class GenerateSchemaRequest(BaseModel):
15
17
  instructions: str | None = None
16
18
  """The modality of the document to load."""
17
19
  image_resolution_dpi: int = Field(default=96, description="Resolution of the image sent to the LLM")
18
- browser_canvas: Literal['A3', 'A4', 'A5'] = Field(default='A4', description="Sets the size of the browser canvas for rendering documents in browser-based processing. Choose a size that matches the document type.")
20
+ browser_canvas: BrowserCanvas = Field(
21
+ default="A4", description="Sets the size of the browser canvas for rendering documents in browser-based processing. Choose a size that matches the document type."
22
+ )
19
23
 
20
24
  """The image operations to apply to the document."""
21
25
 
@@ -29,4 +33,3 @@ class GenerateSystemPromptRequest(GenerateSchemaRequest):
29
33
  """
30
34
 
31
35
  json_schema: dict[str, Any]
32
-
@@ -1,4 +1,4 @@
1
- from typing import Any, Dict, List, Literal, Optional, Union
1
+ from typing import Dict, List, Literal, Optional, Union
2
2
 
3
3
  from pydantic import BaseModel
4
4
  from pydantic import Field as PydanticField
@@ -2,10 +2,9 @@ import copy
2
2
  import datetime
3
3
  import json
4
4
  from pathlib import Path
5
- from typing import Any, Iterable, Literal, Self, cast
5
+ from typing import Any, Literal, Self
6
6
 
7
7
  from anthropic.types.message_param import MessageParam
8
-
9
8
  from google.genai.types import ContentUnionDict # type: ignore
10
9
  from openai.types.chat.chat_completion_message_param import ChatCompletionMessageParam
11
10
  from openai.types.responses.response_input_param import ResponseInputItemParam
@@ -28,7 +27,7 @@ from ..._utils.json_schema import (
28
27
  )
29
28
  from ..._utils.responses import convert_to_openai_format as convert_to_openai_responses_api_format
30
29
  from ...types.standards import StreamingBaseModel
31
- from ..chat import ChatCompletionUiformMessage
30
+ from ..chat import ChatCompletionRetabMessage
32
31
 
33
32
 
34
33
  class PartialSchema(BaseModel):
@@ -220,7 +219,7 @@ class Schema(PartialSchema):
220
219
 
221
220
  @property
222
221
  def developer_system_prompt(self) -> str:
223
- return '''
222
+ return """
224
223
  # General Instructions
225
224
 
226
225
  You are an expert in data extraction and structured data outputs.
@@ -379,7 +378,7 @@ You can easily identify the fields that require a source by the `quote___[attrib
379
378
 
380
379
  # User Defined System Prompt
381
380
 
382
- '''
381
+ """
383
382
 
384
383
  @property
385
384
  def user_system_prompt(self) -> str:
@@ -463,7 +462,7 @@ You can easily identify the fields that require a source by the `quote___[attrib
463
462
  rec_remove_required(_validation_object_schema_)
464
463
  return _validation_object_schema_
465
464
 
466
- def _get_pattern_attribute(self, pattern: str, attribute: Literal['X-FieldPrompt', 'X-ReasoningPrompt', 'type']) -> str | None:
465
+ def _get_pattern_attribute(self, pattern: str, attribute: Literal["X-FieldPrompt", "X-ReasoningPrompt", "type"]) -> str | None:
467
466
  """
468
467
  Given a JSON Schema and a pattern (like "my_object.my_array.*.my_property"),
469
468
  navigate the schema and return the specified attribute of the identified node.
@@ -506,7 +505,7 @@ You can easily identify the fields that require a source by the `quote___[attrib
506
505
  return schema_to_ts_type(current_schema, {}, {}, 0, 0, add_field_description=False)
507
506
  return current_schema.get(attribute)
508
507
 
509
- def _set_pattern_attribute(self, pattern: str, attribute: Literal['X-FieldPrompt', 'X-ReasoningPrompt', 'X-SystemPrompt', 'description'], value: str) -> None:
508
+ def _set_pattern_attribute(self, pattern: str, attribute: Literal["X-FieldPrompt", "X-ReasoningPrompt", "X-SystemPrompt", "description"], value: str) -> None:
510
509
  """Sets an attribute value at a specific path in the schema.
511
510
 
512
511
  Args:
@@ -551,7 +550,7 @@ You can easily identify the fields that require a source by the `quote___[attrib
551
550
  assert ref_name in definitions, "Validation Error: The $ref is not a definition reference"
552
551
 
553
552
  # Count how many times this ref is used in the entire schema
554
- ref_count = json.dumps(self.json_schema).count(f"\"{ref}\"")
553
+ ref_count = json.dumps(self.json_schema).count(f'"{ref}"')
555
554
 
556
555
  if ref_count > 1:
557
556
  # Create a unique copy name by appending a number
@@ -589,8 +588,8 @@ You can easily identify the fields that require a source by the `quote___[attrib
589
588
  def validate_schema_and_model(cls, data: Any) -> Any:
590
589
  """Validate schema and model logic."""
591
590
  # Extract from data
592
- json_schema: dict[str, Any] | None = data.get('json_schema', None)
593
- pydantic_model: type[BaseModel] | None = data.get('pydantic_model', None)
591
+ json_schema: dict[str, Any] | None = data.get("json_schema", None)
592
+ pydantic_model: type[BaseModel] | None = data.get("pydantic_model", None)
594
593
 
595
594
  # Check if either json_schema or pydantic_model is provided
596
595
  if json_schema and pydantic_model:
@@ -601,17 +600,17 @@ You can easily identify the fields that require a source by the `quote___[attrib
601
600
 
602
601
  if json_schema:
603
602
  json_schema = load_json_schema(json_schema)
604
- data['pydantic_model'] = convert_json_schema_to_basemodel(json_schema)
605
- data['json_schema'] = json_schema
603
+ data["pydantic_model"] = convert_json_schema_to_basemodel(json_schema)
604
+ data["json_schema"] = json_schema
606
605
  if pydantic_model:
607
- data['pydantic_model'] = pydantic_model
608
- data['json_schema'] = pydantic_model.model_json_schema()
606
+ data["pydantic_model"] = pydantic_model
607
+ data["json_schema"] = pydantic_model.model_json_schema()
609
608
 
610
609
  return data
611
610
 
612
611
  @property
613
- def messages(self) -> list[ChatCompletionUiformMessage]:
614
- return [ChatCompletionUiformMessage(role="developer", content=self.system_prompt)]
612
+ def messages(self) -> list[ChatCompletionRetabMessage]:
613
+ return [ChatCompletionRetabMessage(role="developer", content=self.system_prompt)]
615
614
 
616
615
  @model_validator(mode="after")
617
616
  def model_after_validator(self) -> Self:
@@ -627,5 +626,5 @@ You can easily identify the fields that require a source by the `quote___[attrib
627
626
  json_schema: The JSON schema to save, can be a dict, Path, or string
628
627
  schema_path: Output path for the schema file
629
628
  """
630
- with open(path, 'w', encoding='utf-8') as f:
629
+ with open(path, "w", encoding="utf-8") as f:
631
630
  json.dump(self.json_schema, f, ensure_ascii=False, indent=2)
@@ -5,6 +5,7 @@ import nanoid # type: ignore
5
5
  from pydantic import BaseModel, Field, PrivateAttr, computed_field
6
6
 
7
7
  from ..._utils.json_schema import generate_schema_data_id, generate_schema_id
8
+ from ...types.mime import MIMEData
8
9
 
9
10
 
10
11
  class TemplateSchema(BaseModel):
@@ -58,9 +59,6 @@ class TemplateSchema(BaseModel):
58
59
  """The Pydantic model to use for loading."""
59
60
 
60
61
 
61
- from ...types.mime import MIMEData
62
-
63
-
64
62
  class UpdateTemplateRequest(BaseModel):
65
63
  """Request model for updating a template."""
66
64
 
@@ -1,5 +1,4 @@
1
1
  from datetime import datetime
2
- from enum import Enum
3
2
  from typing import Optional
4
3
 
5
4
  from pydantic import BaseModel
@@ -1,12 +1,15 @@
1
- from typing import Any, Generic, List, Literal, Optional, Tuple, TypeVar
1
+ from typing import Any, List, Literal, Optional, Tuple, TypeVar, TypedDict
2
2
 
3
3
  from pydantic import BaseModel, Field
4
+ from pydantic.fields import _Unset
4
5
 
5
6
  # API Standards
6
7
 
7
8
  # Define a type variable to represent the content type
8
9
  T = TypeVar("T")
9
10
 
11
+ FieldUnset = _Unset
12
+
10
13
 
11
14
  # Define the ErrorDetail model
12
15
  class ErrorDetail(BaseModel):
@@ -37,3 +40,17 @@ class PreparedRequest(BaseModel):
37
40
  files: dict | List[Tuple[str, Tuple[str, bytes, str]]] | None = None
38
41
  idempotency_key: str | None = None
39
42
  raise_for_status: bool = False
43
+
44
+
45
+ class DeleteResponse(TypedDict):
46
+ """Response from a delete operation"""
47
+
48
+ success: bool
49
+ id: str
50
+
51
+
52
+ class ExportResponse(TypedDict):
53
+ """Response from an export operation"""
54
+
55
+ success: bool
56
+ path: str