stackraise 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- stackraise/__init__.py +6 -0
- stackraise/ai/__init__.py +2 -0
- stackraise/ai/rpa.py +380 -0
- stackraise/ai/toolset.py +227 -0
- stackraise/app.py +23 -0
- stackraise/auth/__init__.py +2 -0
- stackraise/auth/model.py +24 -0
- stackraise/auth/service.py +240 -0
- stackraise/ctrl/__init__.py +4 -0
- stackraise/ctrl/change_stream.py +40 -0
- stackraise/ctrl/crud_controller.py +63 -0
- stackraise/ctrl/file_storage.py +68 -0
- stackraise/db/__init__.py +11 -0
- stackraise/db/adapter.py +60 -0
- stackraise/db/collection.py +292 -0
- stackraise/db/cursor.py +229 -0
- stackraise/db/document.py +282 -0
- stackraise/db/exceptions.py +9 -0
- stackraise/db/id.py +79 -0
- stackraise/db/index.py +84 -0
- stackraise/db/persistence.py +238 -0
- stackraise/db/pipeline.py +245 -0
- stackraise/db/protocols.py +141 -0
- stackraise/di.py +36 -0
- stackraise/event.py +150 -0
- stackraise/inflection.py +28 -0
- stackraise/io/__init__.py +3 -0
- stackraise/io/imap_client.py +400 -0
- stackraise/io/smtp_client.py +102 -0
- stackraise/logging.py +22 -0
- stackraise/model/__init__.py +11 -0
- stackraise/model/core.py +16 -0
- stackraise/model/dto.py +12 -0
- stackraise/model/email_message.py +88 -0
- stackraise/model/file.py +154 -0
- stackraise/model/name_email.py +45 -0
- stackraise/model/query_filters.py +231 -0
- stackraise/model/time_range.py +285 -0
- stackraise/model/validation.py +8 -0
- stackraise/templating/__init__.py +4 -0
- stackraise/templating/exceptions.py +23 -0
- stackraise/templating/image/__init__.py +2 -0
- stackraise/templating/image/model.py +51 -0
- stackraise/templating/image/processor.py +154 -0
- stackraise/templating/parser.py +156 -0
- stackraise/templating/pptx/__init__.py +3 -0
- stackraise/templating/pptx/pptx_engine.py +204 -0
- stackraise/templating/pptx/slide_renderer.py +181 -0
- stackraise/templating/tracer.py +57 -0
- stackraise-0.1.0.dist-info/METADATA +37 -0
- stackraise-0.1.0.dist-info/RECORD +52 -0
- stackraise-0.1.0.dist-info/WHEEL +4 -0
stackraise/model/file.py
ADDED
|
@@ -0,0 +1,154 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import base64
|
|
4
|
+
from datetime import datetime, timezone
|
|
5
|
+
from mimetypes import guess_type
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Any, ClassVar, Optional, Self
|
|
8
|
+
from webbrowser import get
|
|
9
|
+
|
|
10
|
+
import stackraise.db as db
|
|
11
|
+
from fastapi.responses import StreamingResponse
|
|
12
|
+
from pydantic import Field
|
|
13
|
+
|
|
14
|
+
from .core import Base
|
|
15
|
+
|
|
16
|
+
# """
|
|
17
|
+
# Implementacion de grid fs en stackraise.
|
|
18
|
+
# """
|
|
19
|
+
|
|
20
|
+
_GRIDFS_DEFAULT_CHUNK_SIZE = 256 * 1024 # Default chunk size of 256 KB
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class File(db.Document, collection="fs.files"):
|
|
24
|
+
__slots__ = ("_sync_content",)
|
|
25
|
+
|
|
26
|
+
length: int
|
|
27
|
+
chunk_size: int = _GRIDFS_DEFAULT_CHUNK_SIZE
|
|
28
|
+
filename: Optional[str] = None
|
|
29
|
+
content_type: Optional[str] = None
|
|
30
|
+
upload_date: datetime = Field(default_factory=lambda: datetime.now(timezone.utc))
|
|
31
|
+
metadata: Optional[dict[str, Any]] = None
|
|
32
|
+
|
|
33
|
+
# _sync_content: ClassVar[bytes | None] = None # Cached content for sync operations
|
|
34
|
+
# _sync_modified: ClassVar[bool] = False # Flag to track if content has been modified
|
|
35
|
+
|
|
36
|
+
# _content_cache: bytes
|
|
37
|
+
|
|
38
|
+
class Chunk(db.Document, collection="fs.chunks"):
|
|
39
|
+
file_id: File.Ref
|
|
40
|
+
n: int
|
|
41
|
+
data: bytes
|
|
42
|
+
|
|
43
|
+
@classmethod
|
|
44
|
+
def find_by_file_id(cls, file_id: db.Id):
|
|
45
|
+
"""
|
|
46
|
+
Returns an async iterator over all chunks for the given file ID.
|
|
47
|
+
"""
|
|
48
|
+
return cls.collection.find({"fileId": file_id}).sort("n")
|
|
49
|
+
|
|
50
|
+
@classmethod
|
|
51
|
+
def new(
|
|
52
|
+
cls,
|
|
53
|
+
content: bytes,
|
|
54
|
+
content_type: str,
|
|
55
|
+
filename: Optional[Path | str],
|
|
56
|
+
chunk_size: int = _GRIDFS_DEFAULT_CHUNK_SIZE,
|
|
57
|
+
**metadata: dict[str, str],
|
|
58
|
+
) -> Self:
|
|
59
|
+
file = cls(
|
|
60
|
+
length=len(content),
|
|
61
|
+
chunk_size=chunk_size,
|
|
62
|
+
content_type=content_type,
|
|
63
|
+
filename=str(filename) if filename else None,
|
|
64
|
+
metadata=metadata,
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
setattr(file, "_sync_content", content) # Cache the content for synchronous operations
|
|
68
|
+
|
|
69
|
+
return file
|
|
70
|
+
|
|
71
|
+
@classmethod
|
|
72
|
+
async def from_local_path(cls, path: Path | str) -> File:
|
|
73
|
+
"""
|
|
74
|
+
Reads the file content from the given path and returns a File object.
|
|
75
|
+
"""
|
|
76
|
+
with open(path, "rb") as f:
|
|
77
|
+
content = f.read()
|
|
78
|
+
|
|
79
|
+
content_type, encoding = guess_type(path)
|
|
80
|
+
|
|
81
|
+
return cls.new(
|
|
82
|
+
filename=Path(path).name,
|
|
83
|
+
content_type=content_type or "application/octet-stream",
|
|
84
|
+
content=content,
|
|
85
|
+
)
|
|
86
|
+
|
|
87
|
+
async def content(self) -> bytes:
|
|
88
|
+
"""
|
|
89
|
+
Reads the file content from the database.
|
|
90
|
+
"""
|
|
91
|
+
if content := getattr(self, "_sync_content", None):
|
|
92
|
+
return content
|
|
93
|
+
|
|
94
|
+
assert self.id is not None, "The file must be saved before reading."
|
|
95
|
+
|
|
96
|
+
# Fetch all chunks associated with this file
|
|
97
|
+
# chunks = [chunk.data async for chunk in self.Chunk.find_by_file_id(self.id)]
|
|
98
|
+
|
|
99
|
+
# # Combine all chunks into a single bytes object
|
|
100
|
+
# return b"".join(chunks)
|
|
101
|
+
|
|
102
|
+
cursor = self.Chunk.find_by_file_id(self.id)
|
|
103
|
+
chunk_docs = await cursor.as_list()
|
|
104
|
+
return b"".join(chunk.data for chunk in chunk_docs)
|
|
105
|
+
|
|
106
|
+
def as_stream(
|
|
107
|
+
self,
|
|
108
|
+
headers: Optional[dict[str, str]] = None,
|
|
109
|
+
) -> StreamingResponse:
|
|
110
|
+
"""
|
|
111
|
+
Returns a StreamingResponse for the file content.
|
|
112
|
+
This is useful for serving large files without loading them entirely into memory.
|
|
113
|
+
"""
|
|
114
|
+
assert self.id is not None, "The file must be saved before streaming."
|
|
115
|
+
|
|
116
|
+
async def file_stream():
|
|
117
|
+
#chunks = await self.Chunk.find_by_file_id(self.id).sort("n").as_list()
|
|
118
|
+
chunks = await self.Chunk.find_by_file_id(self.id).as_list()
|
|
119
|
+
for chunk in chunks:
|
|
120
|
+
yield chunk.data
|
|
121
|
+
|
|
122
|
+
if headers is None:
|
|
123
|
+
headers = {"Content-Disposition": f'attachment; filename="{self.filename}"'}
|
|
124
|
+
|
|
125
|
+
return StreamingResponse(
|
|
126
|
+
file_stream(), media_type=self.content_type, headers=headers
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
async def __prepare_for_storage__(self):
|
|
130
|
+
# si tiene contenido cacheado,
|
|
131
|
+
|
|
132
|
+
if self.id is None and self._sync_content is None:
|
|
133
|
+
raise ValueError("Cannot create a file without content or id.")
|
|
134
|
+
|
|
135
|
+
if self.id is None:
|
|
136
|
+
self.id = db.Id.new()
|
|
137
|
+
|
|
138
|
+
if (content := getattr(self, "_sync_content", None)) is not None:
|
|
139
|
+
# Insert all chunks into the database
|
|
140
|
+
for n, data in _iter_chunked_content(content, self.chunk_size):
|
|
141
|
+
await self.Chunk(file_id=self.id, n=n, data=data).insert()
|
|
142
|
+
|
|
143
|
+
@classmethod
|
|
144
|
+
async def __handle_post_deletion__(cls, file_id: db.Id):
|
|
145
|
+
"""
|
|
146
|
+
Delete all chunks associated with this file.
|
|
147
|
+
"""
|
|
148
|
+
# Delete all chunks associated with this file
|
|
149
|
+
await cls.Chunk.collection._delete_many({"fileId": file_id})
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
def _iter_chunked_content(content: bytes, chunk_size: int = _GRIDFS_DEFAULT_CHUNK_SIZE):
|
|
153
|
+
for n in range(0, len(content), chunk_size):
|
|
154
|
+
yield n, content[n : n + chunk_size]
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
from readline import insert_text
|
|
2
|
+
from typing import Optional, Self
|
|
3
|
+
from pydantic import validate_email, EmailStr
|
|
4
|
+
from pydantic_core import core_schema
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
|
|
7
|
+
@dataclass
|
|
8
|
+
class NameEmail:
|
|
9
|
+
email: EmailStr
|
|
10
|
+
name: Optional[str] = None
|
|
11
|
+
|
|
12
|
+
@classmethod
|
|
13
|
+
def from_str(cls, value: str) -> Self:
|
|
14
|
+
name, email = validate_email(value)
|
|
15
|
+
return cls(name=name, email=email)
|
|
16
|
+
|
|
17
|
+
def __str__(self):
|
|
18
|
+
if not self.name:
|
|
19
|
+
return self.email
|
|
20
|
+
return f"{self.name} <{self.email}>"
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
# @classmethod
|
|
24
|
+
# def __get_pydantic_core_schema__(cls, filter_alias, handler):
|
|
25
|
+
|
|
26
|
+
# def validate(val: str | dict | NameEmail) -> NameEmail:
|
|
27
|
+
|
|
28
|
+
# if isinstance(val, str):
|
|
29
|
+
# return NameEmail.from_string(val)
|
|
30
|
+
|
|
31
|
+
# if isinstance(val, dict):
|
|
32
|
+
# return cls(name=val.get("name", None), email=EmailStr(val.get("email")))
|
|
33
|
+
|
|
34
|
+
# return val
|
|
35
|
+
|
|
36
|
+
# def serialize(name_email: NameEmail) -> str:
|
|
37
|
+
# return str(name_email)
|
|
38
|
+
|
|
39
|
+
# schema = core_schema.json_or_python_schema(
|
|
40
|
+
# json_schema=core_schema.no_info_plain_validator_function(validate),
|
|
41
|
+
# python_schema=core_schema.no_info_plain_validator_function(validate),
|
|
42
|
+
# serialization=core_schema.plain_serializer_function_ser_schema(serialize),
|
|
43
|
+
# )
|
|
44
|
+
|
|
45
|
+
# return schema
|
|
@@ -0,0 +1,231 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import re
|
|
4
|
+
from datetime import date
|
|
5
|
+
from enum import Enum
|
|
6
|
+
from functools import cache
|
|
7
|
+
from typing import Annotated, Optional, Self, get_args
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
from fastapi import Query
|
|
11
|
+
from pydantic import TypeAdapter, create_model
|
|
12
|
+
from pydantic.fields import FieldInfo
|
|
13
|
+
from pydantic_core import core_schema
|
|
14
|
+
|
|
15
|
+
import stackraise.model as model
|
|
16
|
+
|
|
17
|
+
_QUERY_FILTER_RE = re.compile(r"(?P<op>\w+):(?P<val>.*)")
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class QueryFilter[T]:
|
|
21
|
+
|
|
22
|
+
class Operator(str, Enum):
|
|
23
|
+
EQ = "eq"
|
|
24
|
+
NE = "neq"
|
|
25
|
+
LT = "lt"
|
|
26
|
+
LTE = "lte"
|
|
27
|
+
GT = "gt"
|
|
28
|
+
GTE = "gte"
|
|
29
|
+
IN = "in"
|
|
30
|
+
LIKE = "like"
|
|
31
|
+
ILIKE = "ilike"
|
|
32
|
+
|
|
33
|
+
@classmethod
|
|
34
|
+
def eq(cls, value: T) -> "QueryFilter[T]":
|
|
35
|
+
return cls(cls.Operator.EQ, value)
|
|
36
|
+
|
|
37
|
+
@classmethod
|
|
38
|
+
def neq(cls, value: T) -> "QueryFilter[T]":
|
|
39
|
+
return cls(cls.Operator.NE, value)
|
|
40
|
+
|
|
41
|
+
@classmethod
|
|
42
|
+
def lt(cls, value: T) -> "QueryFilter[T]":
|
|
43
|
+
return cls(cls.Operator.LT, value)
|
|
44
|
+
|
|
45
|
+
@classmethod
|
|
46
|
+
def lte(cls, value: T) -> "QueryFilter[T]":
|
|
47
|
+
return cls(cls.Operator.LTE, value)
|
|
48
|
+
|
|
49
|
+
@classmethod
|
|
50
|
+
def gt(cls, value: T) -> "QueryFilter[T]":
|
|
51
|
+
return cls(cls.Operator.GT, value)
|
|
52
|
+
|
|
53
|
+
@classmethod
|
|
54
|
+
def gte(cls, value: T) -> "QueryFilter[T]":
|
|
55
|
+
return cls(cls.Operator.GTE, value)
|
|
56
|
+
|
|
57
|
+
@classmethod
|
|
58
|
+
def in_(cls, value: list[T]) -> "QueryFilter[list[T]]":
|
|
59
|
+
return cls(cls.Operator.IN, value)
|
|
60
|
+
|
|
61
|
+
@classmethod
|
|
62
|
+
def like(cls, value: str) -> "QueryFilter[str]":
|
|
63
|
+
return cls(cls.Operator.LIKE, value)
|
|
64
|
+
|
|
65
|
+
operator: Operator
|
|
66
|
+
value: Optional[T | list[T]]
|
|
67
|
+
|
|
68
|
+
def __init__(self, operator: Operator, value: T | list[T]):
|
|
69
|
+
self.operator = operator
|
|
70
|
+
self.value = value
|
|
71
|
+
|
|
72
|
+
def to_mongo_query_operator(self, annotation: type):
|
|
73
|
+
type_args = get_args(annotation)
|
|
74
|
+
bson = model.TypeAdapter(type_args[0]).dump_python(self.value)
|
|
75
|
+
return _MONGO_QUERY_OPERATOR_MAP[self.operator](bson)
|
|
76
|
+
|
|
77
|
+
@classmethod
|
|
78
|
+
def __get_pydantic_core_schema__(cls, filter_alias, handler):
|
|
79
|
+
filter_args = get_args(filter_alias)
|
|
80
|
+
if len(filter_args) != 1:
|
|
81
|
+
raise ValueError(
|
|
82
|
+
f"QueryFilter '{filter_alias}' must have exactly one type argument, got {len(filter_args)}"
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
inner_type = filter_args[0]
|
|
86
|
+
|
|
87
|
+
|
|
88
|
+
single_type_adapter = model.TypeAdapter(inner_type)
|
|
89
|
+
list_type_adapter = model.TypeAdapter(list[inner_type])
|
|
90
|
+
|
|
91
|
+
def validate(val: str | QueryFilter | None) -> Optional[QueryFilter]:
|
|
92
|
+
if isinstance(val, QueryFilter):
|
|
93
|
+
return val
|
|
94
|
+
|
|
95
|
+
# Manejar valores None o vacíos
|
|
96
|
+
if val is None:
|
|
97
|
+
return None
|
|
98
|
+
|
|
99
|
+
# Convertir a string si no lo es
|
|
100
|
+
if not isinstance(val, str):
|
|
101
|
+
val = str(val)
|
|
102
|
+
|
|
103
|
+
val = val.strip()
|
|
104
|
+
|
|
105
|
+
# Manejar strings vacíos
|
|
106
|
+
if val == "":
|
|
107
|
+
return None
|
|
108
|
+
|
|
109
|
+
m = _QUERY_FILTER_RE.match(val)
|
|
110
|
+
if not m:
|
|
111
|
+
raise ValueError(f"Invalid filter string: {val}")
|
|
112
|
+
|
|
113
|
+
op = QueryFilter.Operator(m.group("op"))
|
|
114
|
+
val = m.group("val")
|
|
115
|
+
|
|
116
|
+
if val == "":
|
|
117
|
+
val = None
|
|
118
|
+
elif op == QueryFilter.Operator.IN:
|
|
119
|
+
val = val.split(",")
|
|
120
|
+
val = list_type_adapter.validate_python(val) ## Is fine??
|
|
121
|
+
# TODO: like filter restringido a str
|
|
122
|
+
else:
|
|
123
|
+
val = single_type_adapter.validate_strings(val)
|
|
124
|
+
|
|
125
|
+
return cls(operator=op, value=val)
|
|
126
|
+
|
|
127
|
+
def serialize(filter: QueryFilter) -> str:
|
|
128
|
+
if filter.value is None:
|
|
129
|
+
val = ""
|
|
130
|
+
elif filter.operator == QueryFilter.Operator.IN:
|
|
131
|
+
val = list_type_adapter.serialize(filter.value)
|
|
132
|
+
else:
|
|
133
|
+
val = single_type_adapter.serialize(filter.value)
|
|
134
|
+
|
|
135
|
+
return f"{filter.operator.value}:{val}"
|
|
136
|
+
|
|
137
|
+
schema = core_schema.json_or_python_schema(
|
|
138
|
+
json_schema=core_schema.no_info_plain_validator_function(validate),
|
|
139
|
+
python_schema=core_schema.no_info_plain_validator_function(validate),
|
|
140
|
+
serialization=core_schema.plain_serializer_function_ser_schema(serialize),
|
|
141
|
+
)
|
|
142
|
+
|
|
143
|
+
return schema
|
|
144
|
+
|
|
145
|
+
@classmethod
|
|
146
|
+
def __get_pydantic_json_schema__(cls, _, handler):
|
|
147
|
+
return handler(core_schema.str_schema())
|
|
148
|
+
|
|
149
|
+
# __pydantic_serializer__ = SchemaSerializer(core_schema.json_schema({"type": "string"}))
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
_MONGO_QUERY_OPERATOR_MAP = {
|
|
153
|
+
QueryFilter.Operator.EQ: lambda v: {"$eq": v},
|
|
154
|
+
QueryFilter.Operator.NE: lambda v: {"$ne": v},
|
|
155
|
+
QueryFilter.Operator.LT: lambda v: {"$lt": v},
|
|
156
|
+
QueryFilter.Operator.LTE: lambda v: {"$lte": v},
|
|
157
|
+
QueryFilter.Operator.GT: lambda v: {"$gt": v},
|
|
158
|
+
QueryFilter.Operator.GTE: lambda v: {"$gte": v},
|
|
159
|
+
QueryFilter.Operator.IN: lambda v: {"$in": v if isinstance(v, list) else [v]},
|
|
160
|
+
QueryFilter.Operator.LIKE: lambda v: { '$regex': v },
|
|
161
|
+
QueryFilter.Operator.ILIKE: lambda v: { '$regex': v, '$options': 'i' },
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
class QueryFilters(model.Base):
|
|
166
|
+
|
|
167
|
+
qs_: Annotated[Optional[str], model.Field(alias='qs')] = None
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
@classmethod
|
|
171
|
+
@cache
|
|
172
|
+
def for_model(cls, model_class: type[model.Base]) -> type[QueryFilters]:
|
|
173
|
+
# TODO: support for GenericAlias
|
|
174
|
+
|
|
175
|
+
def query_filter_of_field(field_info: FieldInfo):
|
|
176
|
+
|
|
177
|
+
return Annotated[
|
|
178
|
+
QueryFilter[field_info.annotation],
|
|
179
|
+
Query(
|
|
180
|
+
None,
|
|
181
|
+
alias=field_info.alias,
|
|
182
|
+
description=f"Filter for {field_info.alias} property with {field_info.annotation} type",
|
|
183
|
+
),
|
|
184
|
+
]
|
|
185
|
+
|
|
186
|
+
fields = {nm: query_filter_of_field(fi) for nm, fi in model_class.model_fields.items()}
|
|
187
|
+
|
|
188
|
+
model_class = create_model(
|
|
189
|
+
f"QueryFilters",
|
|
190
|
+
__base__=QueryFilters,
|
|
191
|
+
__module__=model_class.__module__,
|
|
192
|
+
**fields,
|
|
193
|
+
)
|
|
194
|
+
|
|
195
|
+
model_class.__qualname__ = f"{model_class.__qualname__}.QueryFilters"
|
|
196
|
+
|
|
197
|
+
return model_class
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
def to_mongo_query(self, *, prefix: list[str] = []):
|
|
201
|
+
def mk_field_name(field: FieldInfo, field_name):
|
|
202
|
+
return ".".join(prefix + [field.alias or field_name])
|
|
203
|
+
|
|
204
|
+
where = [{
|
|
205
|
+
mk_field_name(field, field_name): query_filter.to_mongo_query_operator(field.annotation)
|
|
206
|
+
for field_name, field in type(self).model_fields.items()
|
|
207
|
+
if (query_filter := getattr(self, field_name, None)) and isinstance(query_filter, QueryFilter)
|
|
208
|
+
}]
|
|
209
|
+
|
|
210
|
+
if self.qs_ is not None:
|
|
211
|
+
|
|
212
|
+
where.append({'$or': [
|
|
213
|
+
{ mk_field_name(field, field_name): { '$regex': f'^{self.qs_}', '$options': 'i' }}
|
|
214
|
+
for field_name, field in type(self).model_fields.items()
|
|
215
|
+
if field.annotation == QueryFilter[str]
|
|
216
|
+
]})
|
|
217
|
+
|
|
218
|
+
# TEXT SEARCH
|
|
219
|
+
# search['$or'].append({ '$text': { '$search': self.qs_, '$caseSensitive': False, }})
|
|
220
|
+
|
|
221
|
+
return {'$and': where}
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
if __name__ == "__main__":
|
|
225
|
+
# from stackraise.persistence.id import Id
|
|
226
|
+
ta = TypeAdapter(QueryFilter[date])
|
|
227
|
+
filter = TypeAdapter(QueryFilter[float]).validate_python("lte:.45")
|
|
228
|
+
print(filter.to_mongo_query)
|
|
229
|
+
|
|
230
|
+
class MyModel(model.Base):
|
|
231
|
+
name: Annotated[str, model.Field(alias="n")]
|