seekrai 0.0.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- seekrai/__init__.py +64 -0
- seekrai/abstract/__init__.py +1 -0
- seekrai/abstract/api_requestor.py +710 -0
- seekrai/cli/__init__.py +0 -0
- seekrai/cli/api/__init__.py +0 -0
- seekrai/cli/api/chat.py +245 -0
- seekrai/cli/api/completions.py +107 -0
- seekrai/cli/api/files.py +125 -0
- seekrai/cli/api/finetune.py +175 -0
- seekrai/cli/api/images.py +82 -0
- seekrai/cli/api/models.py +42 -0
- seekrai/cli/cli.py +77 -0
- seekrai/client.py +154 -0
- seekrai/constants.py +32 -0
- seekrai/error.py +188 -0
- seekrai/filemanager.py +393 -0
- seekrai/legacy/__init__.py +0 -0
- seekrai/legacy/base.py +27 -0
- seekrai/legacy/complete.py +91 -0
- seekrai/legacy/embeddings.py +25 -0
- seekrai/legacy/files.py +140 -0
- seekrai/legacy/finetune.py +173 -0
- seekrai/legacy/images.py +25 -0
- seekrai/legacy/models.py +44 -0
- seekrai/resources/__init__.py +25 -0
- seekrai/resources/chat/__init__.py +24 -0
- seekrai/resources/chat/completions.py +241 -0
- seekrai/resources/completions.py +205 -0
- seekrai/resources/embeddings.py +100 -0
- seekrai/resources/files.py +173 -0
- seekrai/resources/finetune.py +425 -0
- seekrai/resources/images.py +156 -0
- seekrai/resources/models.py +75 -0
- seekrai/seekrflow_response.py +50 -0
- seekrai/types/__init__.py +67 -0
- seekrai/types/abstract.py +26 -0
- seekrai/types/chat_completions.py +151 -0
- seekrai/types/common.py +64 -0
- seekrai/types/completions.py +86 -0
- seekrai/types/embeddings.py +35 -0
- seekrai/types/error.py +16 -0
- seekrai/types/files.py +88 -0
- seekrai/types/finetune.py +218 -0
- seekrai/types/images.py +42 -0
- seekrai/types/models.py +43 -0
- seekrai/utils/__init__.py +28 -0
- seekrai/utils/_log.py +61 -0
- seekrai/utils/api_helpers.py +84 -0
- seekrai/utils/files.py +204 -0
- seekrai/utils/tools.py +75 -0
- seekrai/version.py +6 -0
- seekrai-0.0.1.dist-info/LICENSE +201 -0
- seekrai-0.0.1.dist-info/METADATA +401 -0
- seekrai-0.0.1.dist-info/RECORD +56 -0
- seekrai-0.0.1.dist-info/WHEEL +4 -0
- seekrai-0.0.1.dist-info/entry_points.txt +3 -0
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
from seekrai.types.abstract import SeekrFlowClient
|
|
2
|
+
from seekrai.types.chat_completions import (
|
|
3
|
+
ChatCompletionChunk,
|
|
4
|
+
ChatCompletionRequest,
|
|
5
|
+
ChatCompletionResponse,
|
|
6
|
+
)
|
|
7
|
+
from seekrai.types.common import SeekrFlowRequest
|
|
8
|
+
from seekrai.types.completions import (
|
|
9
|
+
CompletionChunk,
|
|
10
|
+
CompletionRequest,
|
|
11
|
+
CompletionResponse,
|
|
12
|
+
)
|
|
13
|
+
from seekrai.types.embeddings import EmbeddingRequest, EmbeddingResponse
|
|
14
|
+
from seekrai.types.files import (
|
|
15
|
+
FileDeleteResponse,
|
|
16
|
+
FileList,
|
|
17
|
+
FileObject,
|
|
18
|
+
FilePurpose,
|
|
19
|
+
FileRequest,
|
|
20
|
+
FileResponse,
|
|
21
|
+
FileType,
|
|
22
|
+
)
|
|
23
|
+
from seekrai.types.finetune import (
|
|
24
|
+
FinetuneDownloadResult,
|
|
25
|
+
FinetuneList,
|
|
26
|
+
FinetuneListEvents,
|
|
27
|
+
FinetuneRequest,
|
|
28
|
+
FinetuneResponse,
|
|
29
|
+
InfrastructureConfig,
|
|
30
|
+
TrainingConfig,
|
|
31
|
+
)
|
|
32
|
+
from seekrai.types.images import (
|
|
33
|
+
ImageRequest,
|
|
34
|
+
ImageResponse,
|
|
35
|
+
)
|
|
36
|
+
from seekrai.types.models import ModelObject
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
__all__ = [
|
|
40
|
+
"SeekrFlowClient",
|
|
41
|
+
"SeekrFlowRequest",
|
|
42
|
+
"CompletionChunk",
|
|
43
|
+
"CompletionRequest",
|
|
44
|
+
"CompletionResponse",
|
|
45
|
+
"ChatCompletionChunk",
|
|
46
|
+
"ChatCompletionRequest",
|
|
47
|
+
"ChatCompletionResponse",
|
|
48
|
+
"EmbeddingRequest",
|
|
49
|
+
"EmbeddingResponse",
|
|
50
|
+
"FinetuneRequest",
|
|
51
|
+
"FinetuneResponse",
|
|
52
|
+
"FinetuneList",
|
|
53
|
+
"FinetuneListEvents",
|
|
54
|
+
"FinetuneDownloadResult",
|
|
55
|
+
"InfrastructureConfig",
|
|
56
|
+
"TrainingConfig",
|
|
57
|
+
"FileRequest",
|
|
58
|
+
"FileResponse",
|
|
59
|
+
"FileList",
|
|
60
|
+
"FileDeleteResponse",
|
|
61
|
+
"FileObject",
|
|
62
|
+
"FilePurpose",
|
|
63
|
+
"FileType",
|
|
64
|
+
"ImageRequest",
|
|
65
|
+
"ImageResponse",
|
|
66
|
+
"ModelObject",
|
|
67
|
+
]
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from typing import Dict
|
|
5
|
+
|
|
6
|
+
import pydantic
|
|
7
|
+
from pydantic import ConfigDict
|
|
8
|
+
from typing_extensions import ClassVar
|
|
9
|
+
|
|
10
|
+
from seekrai.constants import BASE_URL, MAX_RETRIES, TIMEOUT_SECS
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
PYDANTIC_V2 = pydantic.VERSION.startswith("2.")
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@dataclass
|
|
17
|
+
class SeekrFlowClient:
|
|
18
|
+
api_key: str | None = None
|
|
19
|
+
base_url: str | None = BASE_URL
|
|
20
|
+
timeout: float | None = TIMEOUT_SECS
|
|
21
|
+
max_retries: int | None = MAX_RETRIES
|
|
22
|
+
supplied_headers: Dict[str, str] | None = None
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class BaseModel(pydantic.BaseModel):
|
|
26
|
+
model_config: ClassVar[ConfigDict] = ConfigDict(extra="allow")
|
|
@@ -0,0 +1,151 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from enum import Enum
|
|
4
|
+
from typing import Any, Dict, List
|
|
5
|
+
|
|
6
|
+
from pydantic import Field
|
|
7
|
+
|
|
8
|
+
from seekrai.types.abstract import BaseModel
|
|
9
|
+
from seekrai.types.common import (
|
|
10
|
+
DeltaContent,
|
|
11
|
+
FinishReason,
|
|
12
|
+
LogprobsPart,
|
|
13
|
+
ObjectType,
|
|
14
|
+
PromptPart,
|
|
15
|
+
UsageData,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class MessageRole(str, Enum):
|
|
20
|
+
ASSISTANT = "assistant"
|
|
21
|
+
SYSTEM = "system"
|
|
22
|
+
USER = "user"
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class ResponseFormatType(str, Enum):
|
|
26
|
+
JSON_OBJECT = "json_object"
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class FunctionCall(BaseModel):
|
|
30
|
+
name: str | None = None
|
|
31
|
+
arguments: str | None = None
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class ToolCalls(BaseModel):
|
|
35
|
+
id: str | None = None
|
|
36
|
+
type: str | None = None
|
|
37
|
+
function: FunctionCall | None = None
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class ChatCompletionMessage(BaseModel):
|
|
41
|
+
role: MessageRole
|
|
42
|
+
content: str | None = None
|
|
43
|
+
# tool_calls: List[ToolCalls] | None = None
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class ResponseFormat(BaseModel):
|
|
47
|
+
type: ResponseFormatType
|
|
48
|
+
schema_: Dict[str, Any] | None = Field(None, alias="schema")
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class FunctionTool(BaseModel):
|
|
52
|
+
description: str | None = None
|
|
53
|
+
name: str
|
|
54
|
+
parameters: Dict[str, Any] | None = None
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class FunctionToolChoice(BaseModel):
|
|
58
|
+
name: str
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class Tools(BaseModel):
|
|
62
|
+
type: str
|
|
63
|
+
function: FunctionTool
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class ToolChoice(BaseModel):
|
|
67
|
+
type: str
|
|
68
|
+
function: FunctionToolChoice
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
class ToolChoiceEnum(str, Enum):
|
|
72
|
+
Auto = "auto"
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class ChatCompletionRequest(BaseModel):
|
|
76
|
+
# list of messages
|
|
77
|
+
messages: List[ChatCompletionMessage]
|
|
78
|
+
# model name
|
|
79
|
+
model: str
|
|
80
|
+
# stopping criteria: max tokens to generate
|
|
81
|
+
max_tokens: int | None = None
|
|
82
|
+
# stopping criteria: list of strings to stop generation
|
|
83
|
+
stop: List[str] | None = None
|
|
84
|
+
# sampling hyperparameters
|
|
85
|
+
temperature: float = 0.7
|
|
86
|
+
top_p: float = 1
|
|
87
|
+
top_k: int = -1
|
|
88
|
+
repetition_penalty: float = 1
|
|
89
|
+
# stream SSE token chunks
|
|
90
|
+
stream: bool = False
|
|
91
|
+
# return logprobs
|
|
92
|
+
logprobs: int = 0
|
|
93
|
+
# echo prompt.
|
|
94
|
+
# can be used with logprobs to return prompt logprobs
|
|
95
|
+
echo: bool = False
|
|
96
|
+
# number of output generations
|
|
97
|
+
n: int = 1
|
|
98
|
+
# moderation model
|
|
99
|
+
safety_model: str | None = None
|
|
100
|
+
# constraints
|
|
101
|
+
response_format: ResponseFormat | None = None
|
|
102
|
+
# tools: List[Tools] | None = None
|
|
103
|
+
# tool_choice: ToolChoice | ToolChoiceEnum | None = None
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
class ChatCompletionChoicesData(BaseModel):
|
|
107
|
+
index: int | None = None
|
|
108
|
+
logprobs: LogprobsPart | None = None
|
|
109
|
+
finish_reason: FinishReason | None = None
|
|
110
|
+
message: ChatCompletionMessage | None = None
|
|
111
|
+
|
|
112
|
+
|
|
113
|
+
class ChatCompletionResponse(BaseModel):
|
|
114
|
+
# request id
|
|
115
|
+
id: str | None = None
|
|
116
|
+
# object type
|
|
117
|
+
object: ObjectType | None = None
|
|
118
|
+
# created timestamp
|
|
119
|
+
created: int | None = None
|
|
120
|
+
# model name
|
|
121
|
+
model: str | None = None
|
|
122
|
+
# choices list
|
|
123
|
+
choices: List[ChatCompletionChoicesData] | None = None
|
|
124
|
+
# prompt list
|
|
125
|
+
prompt: List[PromptPart] | List[None] | None = None
|
|
126
|
+
# token usage data
|
|
127
|
+
usage: UsageData | None = None
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
class ChatCompletionChoicesChunk(BaseModel):
|
|
131
|
+
index: int | None = None
|
|
132
|
+
logprobs: float | None = None
|
|
133
|
+
finish_reason: FinishReason | None = None
|
|
134
|
+
delta: DeltaContent | None = None
|
|
135
|
+
|
|
136
|
+
|
|
137
|
+
class ChatCompletionChunk(BaseModel):
|
|
138
|
+
# request id
|
|
139
|
+
id: str | None = None
|
|
140
|
+
# object type
|
|
141
|
+
object: ObjectType | None = None
|
|
142
|
+
# created timestamp
|
|
143
|
+
created: int | None = None
|
|
144
|
+
# model name
|
|
145
|
+
model: str | None = None
|
|
146
|
+
# delta content
|
|
147
|
+
choices: List[ChatCompletionChoicesChunk] | None = None
|
|
148
|
+
# finish reason
|
|
149
|
+
finish_reason: FinishReason | None = None
|
|
150
|
+
# token usage data
|
|
151
|
+
usage: UsageData | None = None
|
seekrai/types/common.py
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from enum import Enum
|
|
4
|
+
from typing import Any, Dict, List
|
|
5
|
+
|
|
6
|
+
from pydantic import ConfigDict
|
|
7
|
+
from tqdm.utils import CallbackIOWrapper
|
|
8
|
+
|
|
9
|
+
from seekrai.types.abstract import BaseModel
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
# Generation finish reason
|
|
13
|
+
class FinishReason(str, Enum):
|
|
14
|
+
Length = "length"
|
|
15
|
+
StopSequence = "stop"
|
|
16
|
+
EOS = "eos"
|
|
17
|
+
ToolCalls = "tool_calls"
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class UsageData(BaseModel):
|
|
21
|
+
prompt_tokens: int
|
|
22
|
+
completion_tokens: int
|
|
23
|
+
total_tokens: int
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class ObjectType(str, Enum):
|
|
27
|
+
Completion = "text.completion"
|
|
28
|
+
CompletionChunk = "completion.chunk"
|
|
29
|
+
ChatCompletion = "chat.completion"
|
|
30
|
+
ChatCompletionChunk = "chat.completion.chunk"
|
|
31
|
+
Embedding = "embedding"
|
|
32
|
+
FinetuneEvent = "fine-tune-event"
|
|
33
|
+
File = "file"
|
|
34
|
+
Model = "model"
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class LogprobsPart(BaseModel):
|
|
38
|
+
# token list
|
|
39
|
+
tokens: List[str | None] | None = None
|
|
40
|
+
# token logprob list
|
|
41
|
+
token_logprobs: List[float | None] | None = None
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class PromptPart(BaseModel):
|
|
45
|
+
# prompt string
|
|
46
|
+
text: str | None = None
|
|
47
|
+
# list of prompt logprobs
|
|
48
|
+
logprobs: LogprobsPart | None = None
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class DeltaContent(BaseModel):
|
|
52
|
+
content: str | None = None
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class SeekrFlowRequest(BaseModel):
|
|
56
|
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
|
57
|
+
|
|
58
|
+
method: str
|
|
59
|
+
url: str
|
|
60
|
+
headers: Dict[str, str] | None = None
|
|
61
|
+
params: Dict[str, Any] | CallbackIOWrapper | None = None
|
|
62
|
+
files: Dict[str, Any] | None = None
|
|
63
|
+
allow_redirects: bool = True
|
|
64
|
+
override_headers: bool = False
|
|
@@ -0,0 +1,86 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import List
|
|
4
|
+
|
|
5
|
+
from seekrai.types.abstract import BaseModel
|
|
6
|
+
from seekrai.types.common import (
|
|
7
|
+
DeltaContent,
|
|
8
|
+
FinishReason,
|
|
9
|
+
LogprobsPart,
|
|
10
|
+
ObjectType,
|
|
11
|
+
PromptPart,
|
|
12
|
+
UsageData,
|
|
13
|
+
)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class CompletionRequest(BaseModel):
|
|
17
|
+
# prompt to complete
|
|
18
|
+
prompt: str
|
|
19
|
+
# query model
|
|
20
|
+
model: str
|
|
21
|
+
# stopping criteria: max tokens to generate
|
|
22
|
+
max_tokens: int | None = None
|
|
23
|
+
# stopping criteria: list of strings to stop generation
|
|
24
|
+
stop: List[str] | None = None
|
|
25
|
+
# sampling hyperparameters
|
|
26
|
+
temperature: float | None = None
|
|
27
|
+
top_p: float | None = None
|
|
28
|
+
top_k: int | None = None
|
|
29
|
+
repetition_penalty: float | None = None
|
|
30
|
+
# stream SSE token chunks
|
|
31
|
+
stream: bool = False
|
|
32
|
+
# return logprobs
|
|
33
|
+
logprobs: int | None = None
|
|
34
|
+
# echo prompt.
|
|
35
|
+
# can be used with logprobs to return prompt logprobs
|
|
36
|
+
echo: bool | None = None
|
|
37
|
+
# number of output generations
|
|
38
|
+
n: int | None = None
|
|
39
|
+
# moderation model
|
|
40
|
+
safety_model: str | None = None
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class CompletionChoicesData(BaseModel):
|
|
44
|
+
index: int
|
|
45
|
+
logprobs: LogprobsPart | None = None
|
|
46
|
+
finish_reason: FinishReason
|
|
47
|
+
text: str
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class CompletionChoicesChunk(BaseModel):
|
|
51
|
+
index: int | None = None
|
|
52
|
+
logprobs: float | None = None
|
|
53
|
+
finish_reason: FinishReason | None = None
|
|
54
|
+
delta: DeltaContent | None = None
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class CompletionResponse(BaseModel):
|
|
58
|
+
# request id
|
|
59
|
+
id: str | None = None
|
|
60
|
+
# object type
|
|
61
|
+
object: ObjectType | None = None
|
|
62
|
+
# created timestamp
|
|
63
|
+
created: int | None = None
|
|
64
|
+
# model name
|
|
65
|
+
model: str | None = None
|
|
66
|
+
# choices list
|
|
67
|
+
choices: List[CompletionChoicesData] | None = None
|
|
68
|
+
# prompt list
|
|
69
|
+
prompt: List[PromptPart] | None = None
|
|
70
|
+
# token usage data
|
|
71
|
+
usage: UsageData | None = None
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class CompletionChunk(BaseModel):
|
|
75
|
+
# request id
|
|
76
|
+
id: str | None = None
|
|
77
|
+
# object type
|
|
78
|
+
object: ObjectType | None = None
|
|
79
|
+
# created timestamp
|
|
80
|
+
created: int | None = None
|
|
81
|
+
# model name
|
|
82
|
+
model: str | None = None
|
|
83
|
+
# choices list
|
|
84
|
+
choices: List[CompletionChoicesChunk] | None = None
|
|
85
|
+
# token usage data
|
|
86
|
+
usage: UsageData | None = None
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import List, Literal
|
|
4
|
+
|
|
5
|
+
from seekrai.types.abstract import BaseModel
|
|
6
|
+
from seekrai.types.common import (
|
|
7
|
+
ObjectType,
|
|
8
|
+
)
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class EmbeddingRequest(BaseModel):
|
|
12
|
+
# input or list of inputs
|
|
13
|
+
input: str | List[str]
|
|
14
|
+
# model to query
|
|
15
|
+
model: str
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class EmbeddingChoicesData(BaseModel):
|
|
19
|
+
# response index
|
|
20
|
+
index: int
|
|
21
|
+
# object type
|
|
22
|
+
object: ObjectType
|
|
23
|
+
# embedding response
|
|
24
|
+
embedding: List[float] | None = None
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class EmbeddingResponse(BaseModel):
|
|
28
|
+
# job id
|
|
29
|
+
id: str | None = None
|
|
30
|
+
# query model
|
|
31
|
+
model: str | None = None
|
|
32
|
+
# object type
|
|
33
|
+
object: Literal["list"] | None = None
|
|
34
|
+
# list of embedding choices
|
|
35
|
+
data: List[EmbeddingChoicesData] | None = None
|
seekrai/types/error.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from pydantic import Field
|
|
4
|
+
|
|
5
|
+
from seekrai.types.abstract import BaseModel
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class SeekrFlowErrorResponse(BaseModel):
|
|
9
|
+
# error message
|
|
10
|
+
message: str | None = None
|
|
11
|
+
# error type
|
|
12
|
+
type_: str | None = Field(None, alias="type")
|
|
13
|
+
# param causing error
|
|
14
|
+
param: str | None = None
|
|
15
|
+
# error code
|
|
16
|
+
code: str | None = None
|
seekrai/types/files.py
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from enum import Enum
|
|
4
|
+
from typing import List, Literal
|
|
5
|
+
|
|
6
|
+
from pydantic import Field
|
|
7
|
+
|
|
8
|
+
from seekrai.types.abstract import BaseModel
|
|
9
|
+
from seekrai.types.common import (
|
|
10
|
+
ObjectType,
|
|
11
|
+
)
|
|
12
|
+
from datetime import datetime
|
|
13
|
+
|
|
14
|
+
class FilePurpose(str, Enum):
|
|
15
|
+
FineTune = "fine-tune"
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class FileType(str, Enum):
|
|
19
|
+
jsonl = "jsonl"
|
|
20
|
+
parquet = "parquet"
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class FileRequest(BaseModel):
|
|
24
|
+
"""
|
|
25
|
+
Files request type
|
|
26
|
+
"""
|
|
27
|
+
|
|
28
|
+
# training file ID
|
|
29
|
+
training_file: str
|
|
30
|
+
# base model string
|
|
31
|
+
model: str
|
|
32
|
+
# number of epochs to train for
|
|
33
|
+
n_epochs: int
|
|
34
|
+
# training learning rate
|
|
35
|
+
learning_rate: float
|
|
36
|
+
# number of checkpoints to save
|
|
37
|
+
n_checkpoints: int | None = None
|
|
38
|
+
# training batch size
|
|
39
|
+
batch_size: int | None = None
|
|
40
|
+
# up to 40 character suffix for output model name
|
|
41
|
+
suffix: str | None = None
|
|
42
|
+
# weights & biases api key
|
|
43
|
+
wandb_api_key: str | None = None
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class FileResponse(BaseModel):
|
|
47
|
+
"""
|
|
48
|
+
Files API response type
|
|
49
|
+
"""
|
|
50
|
+
|
|
51
|
+
id: str
|
|
52
|
+
object: Literal[ObjectType.File]
|
|
53
|
+
# created timestamp
|
|
54
|
+
created_at: datetime | None = None
|
|
55
|
+
type: FileType | None = None
|
|
56
|
+
purpose: FilePurpose | None = None
|
|
57
|
+
filename: str | None = None
|
|
58
|
+
# file byte size
|
|
59
|
+
bytes: int | None = None
|
|
60
|
+
# JSONL/Parquet line count
|
|
61
|
+
line_count: int | None = None
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
class FileList(BaseModel):
|
|
65
|
+
# object type
|
|
66
|
+
object: Literal["list"] | None = None
|
|
67
|
+
# list of fine-tune job objects
|
|
68
|
+
data: List[FileResponse] | None = None
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
class FileDeleteResponse(BaseModel):
|
|
72
|
+
# file id
|
|
73
|
+
id: str
|
|
74
|
+
# object type
|
|
75
|
+
object: Literal[ObjectType.File]
|
|
76
|
+
# is deleted
|
|
77
|
+
deleted: bool
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
class FileObject(BaseModel):
|
|
81
|
+
# object type
|
|
82
|
+
object: Literal["local"] | None = None
|
|
83
|
+
# fine-tune job id
|
|
84
|
+
id: str | None = None
|
|
85
|
+
# local path filename
|
|
86
|
+
filename: str | None = None
|
|
87
|
+
# size in bytes
|
|
88
|
+
size: int | None = None
|