llama-stack 0.4.3__py3-none-any.whl → 0.4.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llama_stack/distributions/dell/doc_template.md +209 -0
- llama_stack/distributions/meta-reference-gpu/doc_template.md +119 -0
- llama_stack/distributions/nvidia/doc_template.md +170 -0
- llama_stack/distributions/oci/doc_template.md +140 -0
- llama_stack/models/llama/llama3/dog.jpg +0 -0
- llama_stack/models/llama/llama3/pasta.jpeg +0 -0
- llama_stack/models/llama/resources/dog.jpg +0 -0
- llama_stack/models/llama/resources/pasta.jpeg +0 -0
- llama_stack/models/llama/resources/small_dog.jpg +0 -0
- llama_stack/providers/inline/agents/meta_reference/responses/openai_responses.py +136 -11
- llama_stack/providers/inline/ios/inference/LocalInferenceImpl/LocalInference.h +9 -0
- llama_stack/providers/inline/ios/inference/LocalInferenceImpl/LocalInference.swift +189 -0
- llama_stack/providers/inline/ios/inference/LocalInferenceImpl/Parsing.swift +238 -0
- llama_stack/providers/inline/ios/inference/LocalInferenceImpl/PromptTemplate.swift +12 -0
- llama_stack/providers/inline/ios/inference/LocalInferenceImpl/SystemPrompts.swift +89 -0
- llama_stack/providers/inline/ios/inference/LocalInferenceImpl.xcodeproj/project.pbxproj +550 -0
- llama_stack/providers/inline/ios/inference/LocalInferenceImpl.xcodeproj/project.xcworkspace/contents.xcworkspacedata +7 -0
- llama_stack/providers/inline/ios/inference/LocalInferenceImpl.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist +8 -0
- llama_stack/providers/remote/datasetio/nvidia/README.md +74 -0
- llama_stack/providers/remote/eval/nvidia/README.md +134 -0
- llama_stack/providers/remote/files/s3/README.md +266 -0
- llama_stack/providers/remote/inference/nvidia/NVIDIA.md +203 -0
- llama_stack/providers/remote/post_training/nvidia/README.md +151 -0
- llama_stack/providers/remote/safety/nvidia/README.md +78 -0
- llama_stack/providers/utils/responses/responses_store.py +34 -0
- {llama_stack-0.4.3.dist-info → llama_stack-0.4.4.dist-info}/METADATA +2 -2
- {llama_stack-0.4.3.dist-info → llama_stack-0.4.4.dist-info}/RECORD +31 -142
- llama_stack-0.4.4.dist-info/top_level.txt +1 -0
- llama_stack-0.4.3.dist-info/top_level.txt +0 -2
- llama_stack_api/__init__.py +0 -945
- llama_stack_api/admin/__init__.py +0 -45
- llama_stack_api/admin/api.py +0 -72
- llama_stack_api/admin/fastapi_routes.py +0 -117
- llama_stack_api/admin/models.py +0 -113
- llama_stack_api/agents.py +0 -173
- llama_stack_api/batches/__init__.py +0 -40
- llama_stack_api/batches/api.py +0 -53
- llama_stack_api/batches/fastapi_routes.py +0 -113
- llama_stack_api/batches/models.py +0 -78
- llama_stack_api/benchmarks/__init__.py +0 -43
- llama_stack_api/benchmarks/api.py +0 -39
- llama_stack_api/benchmarks/fastapi_routes.py +0 -109
- llama_stack_api/benchmarks/models.py +0 -109
- llama_stack_api/common/__init__.py +0 -5
- llama_stack_api/common/content_types.py +0 -101
- llama_stack_api/common/errors.py +0 -95
- llama_stack_api/common/job_types.py +0 -38
- llama_stack_api/common/responses.py +0 -77
- llama_stack_api/common/training_types.py +0 -47
- llama_stack_api/common/type_system.py +0 -146
- llama_stack_api/connectors.py +0 -146
- llama_stack_api/conversations.py +0 -270
- llama_stack_api/datasetio.py +0 -55
- llama_stack_api/datasets/__init__.py +0 -61
- llama_stack_api/datasets/api.py +0 -35
- llama_stack_api/datasets/fastapi_routes.py +0 -104
- llama_stack_api/datasets/models.py +0 -152
- llama_stack_api/datatypes.py +0 -373
- llama_stack_api/eval.py +0 -137
- llama_stack_api/file_processors/__init__.py +0 -27
- llama_stack_api/file_processors/api.py +0 -64
- llama_stack_api/file_processors/fastapi_routes.py +0 -78
- llama_stack_api/file_processors/models.py +0 -42
- llama_stack_api/files/__init__.py +0 -35
- llama_stack_api/files/api.py +0 -51
- llama_stack_api/files/fastapi_routes.py +0 -124
- llama_stack_api/files/models.py +0 -107
- llama_stack_api/inference.py +0 -1169
- llama_stack_api/inspect_api/__init__.py +0 -37
- llama_stack_api/inspect_api/api.py +0 -25
- llama_stack_api/inspect_api/fastapi_routes.py +0 -76
- llama_stack_api/inspect_api/models.py +0 -28
- llama_stack_api/internal/__init__.py +0 -9
- llama_stack_api/internal/kvstore.py +0 -28
- llama_stack_api/internal/sqlstore.py +0 -81
- llama_stack_api/llama_stack_api/__init__.py +0 -945
- llama_stack_api/llama_stack_api/admin/__init__.py +0 -45
- llama_stack_api/llama_stack_api/admin/api.py +0 -72
- llama_stack_api/llama_stack_api/admin/fastapi_routes.py +0 -117
- llama_stack_api/llama_stack_api/admin/models.py +0 -113
- llama_stack_api/llama_stack_api/agents.py +0 -173
- llama_stack_api/llama_stack_api/batches/__init__.py +0 -40
- llama_stack_api/llama_stack_api/batches/api.py +0 -53
- llama_stack_api/llama_stack_api/batches/fastapi_routes.py +0 -113
- llama_stack_api/llama_stack_api/batches/models.py +0 -78
- llama_stack_api/llama_stack_api/benchmarks/__init__.py +0 -43
- llama_stack_api/llama_stack_api/benchmarks/api.py +0 -39
- llama_stack_api/llama_stack_api/benchmarks/fastapi_routes.py +0 -109
- llama_stack_api/llama_stack_api/benchmarks/models.py +0 -109
- llama_stack_api/llama_stack_api/common/__init__.py +0 -5
- llama_stack_api/llama_stack_api/common/content_types.py +0 -101
- llama_stack_api/llama_stack_api/common/errors.py +0 -95
- llama_stack_api/llama_stack_api/common/job_types.py +0 -38
- llama_stack_api/llama_stack_api/common/responses.py +0 -77
- llama_stack_api/llama_stack_api/common/training_types.py +0 -47
- llama_stack_api/llama_stack_api/common/type_system.py +0 -146
- llama_stack_api/llama_stack_api/connectors.py +0 -146
- llama_stack_api/llama_stack_api/conversations.py +0 -270
- llama_stack_api/llama_stack_api/datasetio.py +0 -55
- llama_stack_api/llama_stack_api/datasets/__init__.py +0 -61
- llama_stack_api/llama_stack_api/datasets/api.py +0 -35
- llama_stack_api/llama_stack_api/datasets/fastapi_routes.py +0 -104
- llama_stack_api/llama_stack_api/datasets/models.py +0 -152
- llama_stack_api/llama_stack_api/datatypes.py +0 -373
- llama_stack_api/llama_stack_api/eval.py +0 -137
- llama_stack_api/llama_stack_api/file_processors/__init__.py +0 -27
- llama_stack_api/llama_stack_api/file_processors/api.py +0 -64
- llama_stack_api/llama_stack_api/file_processors/fastapi_routes.py +0 -78
- llama_stack_api/llama_stack_api/file_processors/models.py +0 -42
- llama_stack_api/llama_stack_api/files/__init__.py +0 -35
- llama_stack_api/llama_stack_api/files/api.py +0 -51
- llama_stack_api/llama_stack_api/files/fastapi_routes.py +0 -124
- llama_stack_api/llama_stack_api/files/models.py +0 -107
- llama_stack_api/llama_stack_api/inference.py +0 -1169
- llama_stack_api/llama_stack_api/inspect_api/__init__.py +0 -37
- llama_stack_api/llama_stack_api/inspect_api/api.py +0 -25
- llama_stack_api/llama_stack_api/inspect_api/fastapi_routes.py +0 -76
- llama_stack_api/llama_stack_api/inspect_api/models.py +0 -28
- llama_stack_api/llama_stack_api/internal/__init__.py +0 -9
- llama_stack_api/llama_stack_api/internal/kvstore.py +0 -28
- llama_stack_api/llama_stack_api/internal/sqlstore.py +0 -81
- llama_stack_api/llama_stack_api/models.py +0 -171
- llama_stack_api/llama_stack_api/openai_responses.py +0 -1468
- llama_stack_api/llama_stack_api/post_training.py +0 -370
- llama_stack_api/llama_stack_api/prompts.py +0 -203
- llama_stack_api/llama_stack_api/providers/__init__.py +0 -33
- llama_stack_api/llama_stack_api/providers/api.py +0 -16
- llama_stack_api/llama_stack_api/providers/fastapi_routes.py +0 -57
- llama_stack_api/llama_stack_api/providers/models.py +0 -24
- llama_stack_api/llama_stack_api/py.typed +0 -0
- llama_stack_api/llama_stack_api/rag_tool.py +0 -168
- llama_stack_api/llama_stack_api/resource.py +0 -37
- llama_stack_api/llama_stack_api/router_utils.py +0 -160
- llama_stack_api/llama_stack_api/safety.py +0 -132
- llama_stack_api/llama_stack_api/schema_utils.py +0 -208
- llama_stack_api/llama_stack_api/scoring.py +0 -93
- llama_stack_api/llama_stack_api/scoring_functions.py +0 -211
- llama_stack_api/llama_stack_api/shields.py +0 -93
- llama_stack_api/llama_stack_api/tools.py +0 -226
- llama_stack_api/llama_stack_api/vector_io.py +0 -941
- llama_stack_api/llama_stack_api/vector_stores.py +0 -53
- llama_stack_api/llama_stack_api/version.py +0 -9
- llama_stack_api/models.py +0 -171
- llama_stack_api/openai_responses.py +0 -1468
- llama_stack_api/post_training.py +0 -370
- llama_stack_api/prompts.py +0 -203
- llama_stack_api/providers/__init__.py +0 -33
- llama_stack_api/providers/api.py +0 -16
- llama_stack_api/providers/fastapi_routes.py +0 -57
- llama_stack_api/providers/models.py +0 -24
- llama_stack_api/py.typed +0 -0
- llama_stack_api/rag_tool.py +0 -168
- llama_stack_api/resource.py +0 -37
- llama_stack_api/router_utils.py +0 -160
- llama_stack_api/safety.py +0 -132
- llama_stack_api/schema_utils.py +0 -208
- llama_stack_api/scoring.py +0 -93
- llama_stack_api/scoring_functions.py +0 -211
- llama_stack_api/shields.py +0 -93
- llama_stack_api/tools.py +0 -226
- llama_stack_api/vector_io.py +0 -941
- llama_stack_api/vector_stores.py +0 -53
- llama_stack_api/version.py +0 -9
- {llama_stack-0.4.3.dist-info → llama_stack-0.4.4.dist-info}/WHEEL +0 -0
- {llama_stack-0.4.3.dist-info → llama_stack-0.4.4.dist-info}/entry_points.txt +0 -0
- {llama_stack-0.4.3.dist-info → llama_stack-0.4.4.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,37 +0,0 @@
|
|
|
1
|
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
-
# All rights reserved.
|
|
3
|
-
#
|
|
4
|
-
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
-
# the root directory of this source tree.
|
|
6
|
-
|
|
7
|
-
"""Inspect API protocol and models.
|
|
8
|
-
|
|
9
|
-
This module contains the Inspect protocol definition.
|
|
10
|
-
Pydantic models are defined in llama_stack_api.inspect.models.
|
|
11
|
-
The FastAPI router is defined in llama_stack_api.inspect.fastapi_routes.
|
|
12
|
-
"""
|
|
13
|
-
|
|
14
|
-
# Import fastapi_routes for router factory access
|
|
15
|
-
from . import fastapi_routes
|
|
16
|
-
|
|
17
|
-
# Import protocol for re-export
|
|
18
|
-
from .api import Inspect
|
|
19
|
-
|
|
20
|
-
# Import models for re-export
|
|
21
|
-
from .models import (
|
|
22
|
-
ApiFilter,
|
|
23
|
-
HealthInfo,
|
|
24
|
-
ListRoutesResponse,
|
|
25
|
-
RouteInfo,
|
|
26
|
-
VersionInfo,
|
|
27
|
-
)
|
|
28
|
-
|
|
29
|
-
__all__ = [
|
|
30
|
-
"Inspect",
|
|
31
|
-
"ApiFilter",
|
|
32
|
-
"HealthInfo",
|
|
33
|
-
"ListRoutesResponse",
|
|
34
|
-
"RouteInfo",
|
|
35
|
-
"VersionInfo",
|
|
36
|
-
"fastapi_routes",
|
|
37
|
-
]
|
|
@@ -1,25 +0,0 @@
|
|
|
1
|
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
-
# All rights reserved.
|
|
3
|
-
#
|
|
4
|
-
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
-
# the root directory of this source tree.
|
|
6
|
-
|
|
7
|
-
from typing import Protocol, runtime_checkable
|
|
8
|
-
|
|
9
|
-
from .models import (
|
|
10
|
-
ApiFilter,
|
|
11
|
-
HealthInfo,
|
|
12
|
-
ListRoutesResponse,
|
|
13
|
-
VersionInfo,
|
|
14
|
-
)
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
@runtime_checkable
|
|
18
|
-
class Inspect(Protocol):
|
|
19
|
-
"""APIs for inspecting the Llama Stack service, including health status, available API routes with methods and implementing providers."""
|
|
20
|
-
|
|
21
|
-
async def list_routes(self, api_filter: ApiFilter | None = None) -> ListRoutesResponse: ...
|
|
22
|
-
|
|
23
|
-
async def health(self) -> HealthInfo: ...
|
|
24
|
-
|
|
25
|
-
async def version(self) -> VersionInfo: ...
|
|
@@ -1,76 +0,0 @@
|
|
|
1
|
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
-
# All rights reserved.
|
|
3
|
-
#
|
|
4
|
-
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
-
# the root directory of this source tree.
|
|
6
|
-
|
|
7
|
-
"""FastAPI router for the Inspect API.
|
|
8
|
-
|
|
9
|
-
This module defines the FastAPI router for the Inspect API using standard
|
|
10
|
-
FastAPI route decorators.
|
|
11
|
-
"""
|
|
12
|
-
|
|
13
|
-
from typing import Annotated
|
|
14
|
-
|
|
15
|
-
from fastapi import APIRouter, Query
|
|
16
|
-
|
|
17
|
-
from llama_stack_api.router_utils import PUBLIC_ROUTE_KEY, standard_responses
|
|
18
|
-
from llama_stack_api.version import LLAMA_STACK_API_V1
|
|
19
|
-
|
|
20
|
-
from .api import Inspect
|
|
21
|
-
from .models import (
|
|
22
|
-
ApiFilter,
|
|
23
|
-
HealthInfo,
|
|
24
|
-
ListRoutesResponse,
|
|
25
|
-
VersionInfo,
|
|
26
|
-
)
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
def create_router(impl: Inspect) -> APIRouter:
|
|
30
|
-
"""Create a FastAPI router for the Inspect API."""
|
|
31
|
-
router = APIRouter(
|
|
32
|
-
prefix=f"/{LLAMA_STACK_API_V1}",
|
|
33
|
-
tags=["Inspect"],
|
|
34
|
-
responses=standard_responses,
|
|
35
|
-
)
|
|
36
|
-
|
|
37
|
-
@router.get(
|
|
38
|
-
"/inspect/routes",
|
|
39
|
-
response_model=ListRoutesResponse,
|
|
40
|
-
summary="List routes.",
|
|
41
|
-
description="List all available API routes with their methods and implementing providers.",
|
|
42
|
-
responses={200: {"description": "Response containing information about all available routes."}},
|
|
43
|
-
)
|
|
44
|
-
async def list_routes(
|
|
45
|
-
api_filter: Annotated[
|
|
46
|
-
ApiFilter | None,
|
|
47
|
-
Query(
|
|
48
|
-
description="Optional filter to control which routes are returned. Can be an API level ('v1', 'v1alpha', 'v1beta') to show non-deprecated routes at that level, or 'deprecated' to show deprecated routes across all levels. If not specified, returns all non-deprecated routes."
|
|
49
|
-
),
|
|
50
|
-
] = None,
|
|
51
|
-
) -> ListRoutesResponse:
|
|
52
|
-
return await impl.list_routes(api_filter)
|
|
53
|
-
|
|
54
|
-
@router.get(
|
|
55
|
-
"/health",
|
|
56
|
-
response_model=HealthInfo,
|
|
57
|
-
summary="Get health status.",
|
|
58
|
-
description="Get the current health status of the service.",
|
|
59
|
-
responses={200: {"description": "Health information indicating if the service is operational."}},
|
|
60
|
-
openapi_extra={PUBLIC_ROUTE_KEY: True},
|
|
61
|
-
)
|
|
62
|
-
async def health() -> HealthInfo:
|
|
63
|
-
return await impl.health()
|
|
64
|
-
|
|
65
|
-
@router.get(
|
|
66
|
-
"/version",
|
|
67
|
-
response_model=VersionInfo,
|
|
68
|
-
summary="Get version.",
|
|
69
|
-
description="Get the version of the service.",
|
|
70
|
-
responses={200: {"description": "Version information containing the service version number."}},
|
|
71
|
-
openapi_extra={PUBLIC_ROUTE_KEY: True},
|
|
72
|
-
)
|
|
73
|
-
async def version() -> VersionInfo:
|
|
74
|
-
return await impl.version()
|
|
75
|
-
|
|
76
|
-
return router
|
|
@@ -1,28 +0,0 @@
|
|
|
1
|
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
-
# All rights reserved.
|
|
3
|
-
#
|
|
4
|
-
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
-
# the root directory of this source tree.
|
|
6
|
-
|
|
7
|
-
"""Pydantic models for Inspect API requests and responses.
|
|
8
|
-
|
|
9
|
-
This module re-exports models from llama_stack_api.admin.models to ensure
|
|
10
|
-
a single source of truth and avoid type conflicts.
|
|
11
|
-
"""
|
|
12
|
-
|
|
13
|
-
# Import and re-export shared models from admin
|
|
14
|
-
from llama_stack_api.admin.models import (
|
|
15
|
-
ApiFilter,
|
|
16
|
-
HealthInfo,
|
|
17
|
-
ListRoutesResponse,
|
|
18
|
-
RouteInfo,
|
|
19
|
-
VersionInfo,
|
|
20
|
-
)
|
|
21
|
-
|
|
22
|
-
__all__ = [
|
|
23
|
-
"ApiFilter",
|
|
24
|
-
"RouteInfo",
|
|
25
|
-
"HealthInfo",
|
|
26
|
-
"VersionInfo",
|
|
27
|
-
"ListRoutesResponse",
|
|
28
|
-
]
|
|
@@ -1,9 +0,0 @@
|
|
|
1
|
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
-
# All rights reserved.
|
|
3
|
-
#
|
|
4
|
-
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
-
# the root directory of this source tree.
|
|
6
|
-
|
|
7
|
-
# Internal subpackage for shared interfaces that are not part of the public API.
|
|
8
|
-
|
|
9
|
-
__all__: list[str] = []
|
|
@@ -1,28 +0,0 @@
|
|
|
1
|
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
-
# All rights reserved.
|
|
3
|
-
#
|
|
4
|
-
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
-
# the root directory of this source tree.
|
|
6
|
-
|
|
7
|
-
from datetime import datetime
|
|
8
|
-
from typing import Protocol
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
class KVStore(Protocol):
|
|
12
|
-
"""Protocol for simple key/value storage backends."""
|
|
13
|
-
|
|
14
|
-
# TODO: make the value type bytes instead of str
|
|
15
|
-
async def set(self, key: str, value: str, expiration: datetime | None = None) -> None: ...
|
|
16
|
-
|
|
17
|
-
async def get(self, key: str) -> str | None: ...
|
|
18
|
-
|
|
19
|
-
async def delete(self, key: str) -> None: ...
|
|
20
|
-
|
|
21
|
-
async def values_in_range(self, start_key: str, end_key: str) -> list[str]: ...
|
|
22
|
-
|
|
23
|
-
async def keys_in_range(self, start_key: str, end_key: str) -> list[str]: ...
|
|
24
|
-
|
|
25
|
-
async def shutdown(self) -> None: ...
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
__all__ = ["KVStore"]
|
|
@@ -1,81 +0,0 @@
|
|
|
1
|
-
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
|
2
|
-
# All rights reserved.
|
|
3
|
-
#
|
|
4
|
-
# This source code is licensed under the terms described in the LICENSE file in
|
|
5
|
-
# the root directory of this source tree.
|
|
6
|
-
|
|
7
|
-
from collections.abc import Mapping, Sequence
|
|
8
|
-
from enum import Enum
|
|
9
|
-
from typing import Any, Literal, Protocol
|
|
10
|
-
|
|
11
|
-
from pydantic import BaseModel
|
|
12
|
-
|
|
13
|
-
from llama_stack_api import PaginatedResponse
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
class ColumnType(Enum):
|
|
17
|
-
INTEGER = "INTEGER"
|
|
18
|
-
STRING = "STRING"
|
|
19
|
-
TEXT = "TEXT"
|
|
20
|
-
FLOAT = "FLOAT"
|
|
21
|
-
BOOLEAN = "BOOLEAN"
|
|
22
|
-
JSON = "JSON"
|
|
23
|
-
DATETIME = "DATETIME"
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
class ColumnDefinition(BaseModel):
|
|
27
|
-
type: ColumnType
|
|
28
|
-
primary_key: bool = False
|
|
29
|
-
nullable: bool = True
|
|
30
|
-
default: Any = None
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
class SqlStore(Protocol):
|
|
34
|
-
"""Protocol for common SQL-store functionality."""
|
|
35
|
-
|
|
36
|
-
async def create_table(self, table: str, schema: Mapping[str, ColumnType | ColumnDefinition]) -> None: ...
|
|
37
|
-
|
|
38
|
-
async def insert(self, table: str, data: Mapping[str, Any] | Sequence[Mapping[str, Any]]) -> None: ...
|
|
39
|
-
|
|
40
|
-
async def upsert(
|
|
41
|
-
self,
|
|
42
|
-
table: str,
|
|
43
|
-
data: Mapping[str, Any],
|
|
44
|
-
conflict_columns: list[str],
|
|
45
|
-
update_columns: list[str] | None = None,
|
|
46
|
-
) -> None: ...
|
|
47
|
-
|
|
48
|
-
async def fetch_all(
|
|
49
|
-
self,
|
|
50
|
-
table: str,
|
|
51
|
-
where: Mapping[str, Any] | None = None,
|
|
52
|
-
where_sql: str | None = None,
|
|
53
|
-
limit: int | None = None,
|
|
54
|
-
order_by: list[tuple[str, Literal["asc", "desc"]]] | None = None,
|
|
55
|
-
cursor: tuple[str, str] | None = None,
|
|
56
|
-
) -> PaginatedResponse: ...
|
|
57
|
-
|
|
58
|
-
async def fetch_one(
|
|
59
|
-
self,
|
|
60
|
-
table: str,
|
|
61
|
-
where: Mapping[str, Any] | None = None,
|
|
62
|
-
where_sql: str | None = None,
|
|
63
|
-
order_by: list[tuple[str, Literal["asc", "desc"]]] | None = None,
|
|
64
|
-
) -> dict[str, Any] | None: ...
|
|
65
|
-
|
|
66
|
-
async def update(self, table: str, data: Mapping[str, Any], where: Mapping[str, Any]) -> None: ...
|
|
67
|
-
|
|
68
|
-
async def delete(self, table: str, where: Mapping[str, Any]) -> None: ...
|
|
69
|
-
|
|
70
|
-
async def add_column_if_not_exists(
|
|
71
|
-
self,
|
|
72
|
-
table: str,
|
|
73
|
-
column_name: str,
|
|
74
|
-
column_type: ColumnType,
|
|
75
|
-
nullable: bool = True,
|
|
76
|
-
) -> None: ...
|
|
77
|
-
|
|
78
|
-
async def shutdown(self) -> None: ...
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
__all__ = ["ColumnDefinition", "ColumnType", "SqlStore"]
|