seekrai 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- seekrai/__init__.py +64 -0
- seekrai/abstract/__init__.py +1 -0
- seekrai/abstract/api_requestor.py +710 -0
- seekrai/cli/__init__.py +0 -0
- seekrai/cli/api/__init__.py +0 -0
- seekrai/cli/api/chat.py +245 -0
- seekrai/cli/api/completions.py +107 -0
- seekrai/cli/api/files.py +125 -0
- seekrai/cli/api/finetune.py +175 -0
- seekrai/cli/api/images.py +82 -0
- seekrai/cli/api/models.py +42 -0
- seekrai/cli/cli.py +77 -0
- seekrai/client.py +154 -0
- seekrai/constants.py +32 -0
- seekrai/error.py +188 -0
- seekrai/filemanager.py +393 -0
- seekrai/legacy/__init__.py +0 -0
- seekrai/legacy/base.py +27 -0
- seekrai/legacy/complete.py +91 -0
- seekrai/legacy/embeddings.py +25 -0
- seekrai/legacy/files.py +140 -0
- seekrai/legacy/finetune.py +173 -0
- seekrai/legacy/images.py +25 -0
- seekrai/legacy/models.py +44 -0
- seekrai/resources/__init__.py +25 -0
- seekrai/resources/chat/__init__.py +24 -0
- seekrai/resources/chat/completions.py +241 -0
- seekrai/resources/completions.py +205 -0
- seekrai/resources/embeddings.py +100 -0
- seekrai/resources/files.py +173 -0
- seekrai/resources/finetune.py +425 -0
- seekrai/resources/images.py +156 -0
- seekrai/resources/models.py +75 -0
- seekrai/seekrflow_response.py +50 -0
- seekrai/types/__init__.py +67 -0
- seekrai/types/abstract.py +26 -0
- seekrai/types/chat_completions.py +151 -0
- seekrai/types/common.py +64 -0
- seekrai/types/completions.py +86 -0
- seekrai/types/embeddings.py +35 -0
- seekrai/types/error.py +16 -0
- seekrai/types/files.py +88 -0
- seekrai/types/finetune.py +218 -0
- seekrai/types/images.py +42 -0
- seekrai/types/models.py +43 -0
- seekrai/utils/__init__.py +28 -0
- seekrai/utils/_log.py +61 -0
- seekrai/utils/api_helpers.py +84 -0
- seekrai/utils/files.py +204 -0
- seekrai/utils/tools.py +75 -0
- seekrai/version.py +6 -0
- seekrai-0.0.1.dist-info/LICENSE +201 -0
- seekrai-0.0.1.dist-info/METADATA +401 -0
- seekrai-0.0.1.dist-info/RECORD +56 -0
- seekrai-0.0.1.dist-info/WHEEL +4 -0
- seekrai-0.0.1.dist-info/entry_points.txt +3 -0
|
@@ -0,0 +1,205 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import AsyncGenerator, Iterator, List
|
|
4
|
+
|
|
5
|
+
from seekrai.abstract import api_requestor
|
|
6
|
+
from seekrai.seekrflow_response import SeekrFlowResponse
|
|
7
|
+
from seekrai.types import (
|
|
8
|
+
CompletionChunk,
|
|
9
|
+
CompletionRequest,
|
|
10
|
+
CompletionResponse,
|
|
11
|
+
SeekrFlowClient,
|
|
12
|
+
SeekrFlowRequest,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class Completions:
|
|
17
|
+
def __init__(self, client: SeekrFlowClient) -> None:
|
|
18
|
+
self._client = client
|
|
19
|
+
|
|
20
|
+
def create(
|
|
21
|
+
self,
|
|
22
|
+
*,
|
|
23
|
+
prompt: str,
|
|
24
|
+
model: str,
|
|
25
|
+
max_tokens: int | None = 512,
|
|
26
|
+
stop: List[str] | None = None,
|
|
27
|
+
temperature: float | None = None,
|
|
28
|
+
top_p: float | None = None,
|
|
29
|
+
top_k: int | None = None,
|
|
30
|
+
repetition_penalty: float | None = None,
|
|
31
|
+
stream: bool = False,
|
|
32
|
+
logprobs: int | None = None,
|
|
33
|
+
echo: bool | None = None,
|
|
34
|
+
n: int | None = None,
|
|
35
|
+
safety_model: str | None = None,
|
|
36
|
+
) -> CompletionResponse | Iterator[CompletionChunk]:
|
|
37
|
+
"""
|
|
38
|
+
Method to generate completions based on a given prompt using a specified model.
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
prompt (str): A string providing context for the model to complete.
|
|
42
|
+
model (str): The name of the model to query.
|
|
43
|
+
max_tokens (int, optional): The maximum number of tokens to generate.
|
|
44
|
+
Defaults to 512.
|
|
45
|
+
stop (List[str], optional): List of strings at which to stop generation.
|
|
46
|
+
Defaults to None.
|
|
47
|
+
temperature (float, optional): A decimal number that determines the degree of randomness in the response.
|
|
48
|
+
Defaults to None.
|
|
49
|
+
top_p (float, optional): The top_p (nucleus) parameter is used to dynamically adjust the number
|
|
50
|
+
of choices for each predicted token based on the cumulative probabilities.
|
|
51
|
+
Defaults to None.
|
|
52
|
+
top_k (int, optional): The top_k parameter is used to limit the number of choices for the
|
|
53
|
+
next predicted word or token.
|
|
54
|
+
Defaults to None.
|
|
55
|
+
repetition_penalty (float, optional): A number that controls the diversity of generated text
|
|
56
|
+
by reducing the likelihood of repeated sequences. Higher values decrease repetition.
|
|
57
|
+
Defaults to None.
|
|
58
|
+
stream (bool, optional): Flag indicating whether to stream the generated completions.
|
|
59
|
+
Defaults to False.
|
|
60
|
+
logprobs (int, optional): Number of top-k logprobs to return
|
|
61
|
+
Defaults to None.
|
|
62
|
+
echo (bool, optional): Echo prompt in output. Can be used with logprobs to return prompt logprobs.
|
|
63
|
+
Defaults to None.
|
|
64
|
+
n (int, optional): Number of completions to generate. Setting to None will return a single generation.
|
|
65
|
+
Defaults to None.
|
|
66
|
+
safety_model (str, optional): A moderation model to validate tokens. Choice between available moderation
|
|
67
|
+
models found [here](https://docs.seekrflow.ai/docs/inference-models#moderation-models).
|
|
68
|
+
Defaults to None.
|
|
69
|
+
|
|
70
|
+
Returns:
|
|
71
|
+
CompletionResponse | Iterator[CompletionChunk]: Object containing the completions
|
|
72
|
+
or an iterator over completion chunks.
|
|
73
|
+
"""
|
|
74
|
+
|
|
75
|
+
requestor = api_requestor.APIRequestor(
|
|
76
|
+
client=self._client,
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
parameter_payload = CompletionRequest(
|
|
80
|
+
model=model,
|
|
81
|
+
prompt=prompt,
|
|
82
|
+
top_p=top_p,
|
|
83
|
+
top_k=top_k,
|
|
84
|
+
temperature=temperature,
|
|
85
|
+
max_tokens=max_tokens,
|
|
86
|
+
stop=stop,
|
|
87
|
+
repetition_penalty=repetition_penalty,
|
|
88
|
+
stream=stream,
|
|
89
|
+
logprobs=logprobs,
|
|
90
|
+
echo=echo,
|
|
91
|
+
n=n,
|
|
92
|
+
safety_model=safety_model,
|
|
93
|
+
).model_dump()
|
|
94
|
+
|
|
95
|
+
response, _, _ = requestor.request(
|
|
96
|
+
options=SeekrFlowRequest(
|
|
97
|
+
method="POST",
|
|
98
|
+
url="completions",
|
|
99
|
+
params=parameter_payload,
|
|
100
|
+
),
|
|
101
|
+
stream=stream,
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
if stream:
|
|
105
|
+
# must be an iterator
|
|
106
|
+
assert not isinstance(response, SeekrFlowResponse)
|
|
107
|
+
return (CompletionChunk(**line.data) for line in response)
|
|
108
|
+
assert isinstance(response, SeekrFlowResponse)
|
|
109
|
+
return CompletionResponse(**response.data)
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
class AsyncCompletions:
|
|
113
|
+
def __init__(self, client: SeekrFlowClient) -> None:
|
|
114
|
+
self._client = client
|
|
115
|
+
|
|
116
|
+
async def create(
|
|
117
|
+
self,
|
|
118
|
+
*,
|
|
119
|
+
prompt: str,
|
|
120
|
+
model: str,
|
|
121
|
+
max_tokens: int | None = 512,
|
|
122
|
+
stop: List[str] | None = None,
|
|
123
|
+
temperature: float | None = None,
|
|
124
|
+
top_p: float | None = None,
|
|
125
|
+
top_k: int | None = None,
|
|
126
|
+
repetition_penalty: float | None = None,
|
|
127
|
+
stream: bool = False,
|
|
128
|
+
logprobs: int | None = None,
|
|
129
|
+
echo: bool | None = None,
|
|
130
|
+
n: int | None = None,
|
|
131
|
+
safety_model: str | None = None,
|
|
132
|
+
) -> AsyncGenerator[CompletionChunk, None] | CompletionResponse:
|
|
133
|
+
"""
|
|
134
|
+
Async method to generate completions based on a given prompt using a specified model.
|
|
135
|
+
|
|
136
|
+
Args:
|
|
137
|
+
prompt (str): A string providing context for the model to complete.
|
|
138
|
+
model (str): The name of the model to query.
|
|
139
|
+
max_tokens (int, optional): The maximum number of tokens to generate.
|
|
140
|
+
Defaults to 512.
|
|
141
|
+
stop (List[str], optional): List of strings at which to stop generation.
|
|
142
|
+
Defaults to None.
|
|
143
|
+
temperature (float, optional): A decimal number that determines the degree of randomness in the response.
|
|
144
|
+
Defaults to None.
|
|
145
|
+
top_p (float, optional): The top_p (nucleus) parameter is used to dynamically adjust the number
|
|
146
|
+
of choices for each predicted token based on the cumulative probabilities.
|
|
147
|
+
Defaults to None.
|
|
148
|
+
top_k (int, optional): The top_k parameter is used to limit the number of choices for the
|
|
149
|
+
next predicted word or token.
|
|
150
|
+
Defaults to None.
|
|
151
|
+
repetition_penalty (float, optional): A number that controls the diversity of generated text
|
|
152
|
+
by reducing the likelihood of repeated sequences. Higher values decrease repetition.
|
|
153
|
+
Defaults to None.
|
|
154
|
+
stream (bool, optional): Flag indicating whether to stream the generated completions.
|
|
155
|
+
Defaults to False.
|
|
156
|
+
logprobs (int, optional): Number of top-k logprobs to return
|
|
157
|
+
Defaults to None.
|
|
158
|
+
echo (bool, optional): Echo prompt in output. Can be used with logprobs to return prompt logprobs.
|
|
159
|
+
Defaults to None.
|
|
160
|
+
n (int, optional): Number of completions to generate. Setting to None will return a single generation.
|
|
161
|
+
Defaults to None.
|
|
162
|
+
safety_model (str, optional): A moderation model to validate tokens. Choice between available moderation
|
|
163
|
+
models found [here](https://docs.seekrflow.ai/docs/inference-models#moderation-models).
|
|
164
|
+
Defaults to None.
|
|
165
|
+
|
|
166
|
+
Returns:
|
|
167
|
+
AsyncGenerator[CompletionChunk, None] | CompletionResponse: Object containing the completions
|
|
168
|
+
or an iterator over completion chunks.
|
|
169
|
+
"""
|
|
170
|
+
|
|
171
|
+
requestor = api_requestor.APIRequestor(
|
|
172
|
+
client=self._client,
|
|
173
|
+
)
|
|
174
|
+
|
|
175
|
+
parameter_payload = CompletionRequest(
|
|
176
|
+
model=model,
|
|
177
|
+
prompt=prompt,
|
|
178
|
+
top_p=top_p,
|
|
179
|
+
top_k=top_k,
|
|
180
|
+
temperature=temperature,
|
|
181
|
+
max_tokens=max_tokens,
|
|
182
|
+
stop=stop,
|
|
183
|
+
repetition_penalty=repetition_penalty,
|
|
184
|
+
stream=stream,
|
|
185
|
+
logprobs=logprobs,
|
|
186
|
+
echo=echo,
|
|
187
|
+
n=n,
|
|
188
|
+
safety_model=safety_model,
|
|
189
|
+
).model_dump()
|
|
190
|
+
|
|
191
|
+
response, _, _ = await requestor.arequest(
|
|
192
|
+
options=SeekrFlowRequest(
|
|
193
|
+
method="POST",
|
|
194
|
+
url="completions",
|
|
195
|
+
params=parameter_payload,
|
|
196
|
+
),
|
|
197
|
+
stream=stream,
|
|
198
|
+
)
|
|
199
|
+
|
|
200
|
+
if stream:
|
|
201
|
+
# must be an iterator
|
|
202
|
+
assert not isinstance(response, SeekrFlowResponse)
|
|
203
|
+
return (CompletionChunk(**line.data) async for line in response)
|
|
204
|
+
assert isinstance(response, SeekrFlowResponse)
|
|
205
|
+
return CompletionResponse(**response.data)
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import List
|
|
4
|
+
|
|
5
|
+
from seekrai.abstract import api_requestor
|
|
6
|
+
from seekrai.seekrflow_response import SeekrFlowResponse
|
|
7
|
+
from seekrai.types import (
|
|
8
|
+
EmbeddingRequest,
|
|
9
|
+
EmbeddingResponse,
|
|
10
|
+
SeekrFlowClient,
|
|
11
|
+
SeekrFlowRequest,
|
|
12
|
+
)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class Embeddings:
|
|
16
|
+
def __init__(self, client: SeekrFlowClient) -> None:
|
|
17
|
+
self._client = client
|
|
18
|
+
|
|
19
|
+
def create(
|
|
20
|
+
self,
|
|
21
|
+
*,
|
|
22
|
+
input: str | List[str],
|
|
23
|
+
model: str,
|
|
24
|
+
) -> EmbeddingResponse:
|
|
25
|
+
"""
|
|
26
|
+
Method to generate completions based on a given prompt using a specified model.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
input (str | List[str]): A string or list of strings to embed
|
|
30
|
+
model (str): The name of the model to query.
|
|
31
|
+
|
|
32
|
+
Returns:
|
|
33
|
+
EmbeddingResponse: Object containing embeddings
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
requestor = api_requestor.APIRequestor(
|
|
37
|
+
client=self._client,
|
|
38
|
+
)
|
|
39
|
+
|
|
40
|
+
parameter_payload = EmbeddingRequest(
|
|
41
|
+
input=input,
|
|
42
|
+
model=model,
|
|
43
|
+
).model_dump()
|
|
44
|
+
|
|
45
|
+
response, _, _ = requestor.request(
|
|
46
|
+
options=SeekrFlowRequest(
|
|
47
|
+
method="POST",
|
|
48
|
+
url="embeddings",
|
|
49
|
+
params=parameter_payload,
|
|
50
|
+
),
|
|
51
|
+
stream=False,
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
assert isinstance(response, SeekrFlowResponse)
|
|
55
|
+
|
|
56
|
+
return EmbeddingResponse(**response.data)
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class AsyncEmbeddings:
|
|
60
|
+
def __init__(self, client: SeekrFlowClient) -> None:
|
|
61
|
+
self._client = client
|
|
62
|
+
|
|
63
|
+
async def create(
|
|
64
|
+
self,
|
|
65
|
+
*,
|
|
66
|
+
input: str | List[str],
|
|
67
|
+
model: str,
|
|
68
|
+
) -> EmbeddingResponse:
|
|
69
|
+
"""
|
|
70
|
+
Async method to generate completions based on a given prompt using a specified model.
|
|
71
|
+
|
|
72
|
+
Args:
|
|
73
|
+
input (str | List[str]): A string or list of strings to embed
|
|
74
|
+
model (str): The name of the model to query.
|
|
75
|
+
|
|
76
|
+
Returns:
|
|
77
|
+
EmbeddingResponse: Object containing embeddings
|
|
78
|
+
"""
|
|
79
|
+
|
|
80
|
+
requestor = api_requestor.APIRequestor(
|
|
81
|
+
client=self._client,
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
parameter_payload = EmbeddingRequest(
|
|
85
|
+
input=input,
|
|
86
|
+
model=model,
|
|
87
|
+
).model_dump()
|
|
88
|
+
|
|
89
|
+
response, _, _ = await requestor.arequest(
|
|
90
|
+
options=SeekrFlowRequest(
|
|
91
|
+
method="POST",
|
|
92
|
+
url="embeddings",
|
|
93
|
+
params=parameter_payload,
|
|
94
|
+
),
|
|
95
|
+
stream=False,
|
|
96
|
+
)
|
|
97
|
+
|
|
98
|
+
assert isinstance(response, SeekrFlowResponse)
|
|
99
|
+
|
|
100
|
+
return EmbeddingResponse(**response.data)
|
|
@@ -0,0 +1,173 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
|
|
5
|
+
from seekrai.abstract import api_requestor
|
|
6
|
+
from seekrai.filemanager import DownloadManager, UploadManager
|
|
7
|
+
from seekrai.seekrflow_response import SeekrFlowResponse
|
|
8
|
+
from seekrai.types import (
|
|
9
|
+
FileDeleteResponse,
|
|
10
|
+
FileList,
|
|
11
|
+
FileObject,
|
|
12
|
+
FilePurpose,
|
|
13
|
+
FileResponse,
|
|
14
|
+
SeekrFlowClient,
|
|
15
|
+
SeekrFlowRequest,
|
|
16
|
+
)
|
|
17
|
+
from seekrai.utils import normalize_key
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class Files:
|
|
21
|
+
def __init__(self, client: SeekrFlowClient) -> None:
|
|
22
|
+
self._client = client
|
|
23
|
+
|
|
24
|
+
def upload(
|
|
25
|
+
self, file: Path | str, *, purpose: FilePurpose | str = FilePurpose.FineTune
|
|
26
|
+
) -> FileResponse:
|
|
27
|
+
upload_manager = UploadManager(self._client)
|
|
28
|
+
|
|
29
|
+
if isinstance(file, str):
|
|
30
|
+
file = Path(file)
|
|
31
|
+
|
|
32
|
+
if isinstance(purpose, str):
|
|
33
|
+
purpose = FilePurpose(purpose)
|
|
34
|
+
|
|
35
|
+
assert isinstance(purpose, FilePurpose)
|
|
36
|
+
|
|
37
|
+
return upload_manager.upload("files", file, purpose=purpose, redirect=True)
|
|
38
|
+
|
|
39
|
+
def list(self) -> FileList:
|
|
40
|
+
requestor = api_requestor.APIRequestor(
|
|
41
|
+
client=self._client,
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
response, _, _ = requestor.request(
|
|
45
|
+
options=SeekrFlowRequest(
|
|
46
|
+
method="GET",
|
|
47
|
+
url="files",
|
|
48
|
+
),
|
|
49
|
+
stream=False,
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
assert isinstance(response, SeekrFlowResponse)
|
|
53
|
+
files = [FileResponse(id=file['id'], filename= file['filename'], created_at = file['created_at'], object="file") for file in response.data['data']]
|
|
54
|
+
return FileList(object="list", data=files)
|
|
55
|
+
|
|
56
|
+
def retrieve(self, id: str) -> FileResponse:
|
|
57
|
+
requestor = api_requestor.APIRequestor(
|
|
58
|
+
client=self._client,
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
response, _, _ = requestor.request(
|
|
62
|
+
options=SeekrFlowRequest(
|
|
63
|
+
method="GET",
|
|
64
|
+
url=f"files/{id}",
|
|
65
|
+
),
|
|
66
|
+
stream=False,
|
|
67
|
+
)
|
|
68
|
+
|
|
69
|
+
assert isinstance(response, SeekrFlowResponse)
|
|
70
|
+
|
|
71
|
+
return FileResponse(**response.data)
|
|
72
|
+
|
|
73
|
+
def retrieve_content(
|
|
74
|
+
self, id: str, *, output: Path | str | None = None
|
|
75
|
+
) -> FileObject:
|
|
76
|
+
download_manager = DownloadManager(self._client)
|
|
77
|
+
|
|
78
|
+
if isinstance(output, str):
|
|
79
|
+
output = Path(output)
|
|
80
|
+
|
|
81
|
+
downloaded_filename, file_size = download_manager.download(
|
|
82
|
+
f"files/{id}/content", output, normalize_key(f"{id}.jsonl")
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
return FileObject(
|
|
86
|
+
object="local",
|
|
87
|
+
id=id,
|
|
88
|
+
filename=downloaded_filename,
|
|
89
|
+
size=file_size,
|
|
90
|
+
)
|
|
91
|
+
|
|
92
|
+
def delete(self, id: str) -> FileDeleteResponse:
|
|
93
|
+
requestor = api_requestor.APIRequestor(
|
|
94
|
+
client=self._client,
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
response, _, _ = requestor.request(
|
|
98
|
+
options=SeekrFlowRequest(
|
|
99
|
+
method="DELETE",
|
|
100
|
+
url=f"files/{id}",
|
|
101
|
+
),
|
|
102
|
+
stream=False,
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
assert isinstance(response, SeekrFlowResponse)
|
|
106
|
+
|
|
107
|
+
return FileDeleteResponse(**response.data)
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
class AsyncFiles:
|
|
111
|
+
def __init__(self, client: SeekrFlowClient) -> None:
|
|
112
|
+
self._client = client
|
|
113
|
+
|
|
114
|
+
async def upload(
|
|
115
|
+
self, file: Path | str, *, purpose: FilePurpose | str = FilePurpose.FineTune
|
|
116
|
+
) -> None:
|
|
117
|
+
raise NotImplementedError()
|
|
118
|
+
|
|
119
|
+
async def list(self) -> FileList:
|
|
120
|
+
requestor = api_requestor.APIRequestor(
|
|
121
|
+
client=self._client,
|
|
122
|
+
)
|
|
123
|
+
|
|
124
|
+
response, _, _ = await requestor.arequest(
|
|
125
|
+
options=SeekrFlowRequest(
|
|
126
|
+
method="GET",
|
|
127
|
+
url="files",
|
|
128
|
+
),
|
|
129
|
+
stream=False,
|
|
130
|
+
)
|
|
131
|
+
|
|
132
|
+
assert isinstance(response, SeekrFlowResponse)
|
|
133
|
+
|
|
134
|
+
return FileList(**response.data)
|
|
135
|
+
|
|
136
|
+
async def retrieve(self, id: str) -> FileResponse:
|
|
137
|
+
requestor = api_requestor.APIRequestor(
|
|
138
|
+
client=self._client,
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
response, _, _ = await requestor.arequest(
|
|
142
|
+
options=SeekrFlowRequest(
|
|
143
|
+
method="GET",
|
|
144
|
+
url=f"files/{id}",
|
|
145
|
+
),
|
|
146
|
+
stream=False,
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
assert isinstance(response, SeekrFlowResponse)
|
|
150
|
+
|
|
151
|
+
return FileResponse(**response.data)
|
|
152
|
+
|
|
153
|
+
async def retrieve_content(
|
|
154
|
+
self, id: str, *, output: Path | str | None = None
|
|
155
|
+
) -> FileObject:
|
|
156
|
+
raise NotImplementedError()
|
|
157
|
+
|
|
158
|
+
async def delete(self, id: str) -> FileDeleteResponse:
|
|
159
|
+
requestor = api_requestor.APIRequestor(
|
|
160
|
+
client=self._client,
|
|
161
|
+
)
|
|
162
|
+
|
|
163
|
+
response, _, _ = await requestor.arequest(
|
|
164
|
+
options=SeekrFlowRequest(
|
|
165
|
+
method="DELETE",
|
|
166
|
+
url=f"files/{id}",
|
|
167
|
+
),
|
|
168
|
+
stream=False,
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
assert isinstance(response, SeekrFlowResponse)
|
|
172
|
+
|
|
173
|
+
return FileDeleteResponse(**response.data)
|