mistralai 1.5.2rc1__py3-none-any.whl → 1.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mistralai/_version.py +2 -2
- mistralai/agents.py +12 -0
- mistralai/chat.py +12 -0
- mistralai/classifiers.py +435 -23
- mistralai/embeddings.py +6 -2
- mistralai/jobs.py +84 -38
- mistralai/mistral_jobs.py +2 -2
- mistralai/models/__init__.py +197 -46
- mistralai/models/agentscompletionrequest.py +4 -0
- mistralai/models/agentscompletionstreamrequest.py +4 -0
- mistralai/models/archiveftmodelout.py +3 -11
- mistralai/models/batchjobout.py +3 -9
- mistralai/models/batchjobsout.py +3 -9
- mistralai/models/chatclassificationrequest.py +20 -0
- mistralai/models/chatcompletionrequest.py +4 -0
- mistralai/models/chatcompletionstreamrequest.py +4 -0
- mistralai/models/chatmoderationrequest.py +4 -7
- mistralai/models/classificationresponse.py +12 -9
- mistralai/models/classificationtargetresult.py +14 -0
- mistralai/models/classifierdetailedjobout.py +156 -0
- mistralai/models/classifierftmodelout.py +101 -0
- mistralai/models/classifierjobout.py +165 -0
- mistralai/models/classifiertargetin.py +55 -0
- mistralai/models/classifiertargetout.py +24 -0
- mistralai/models/classifiertrainingparameters.py +73 -0
- mistralai/models/classifiertrainingparametersin.py +85 -0
- mistralai/models/{detailedjobout.py → completiondetailedjobout.py} +34 -34
- mistralai/models/{ftmodelout.py → completionftmodelout.py} +12 -12
- mistralai/models/{jobout.py → completionjobout.py} +25 -24
- mistralai/models/{trainingparameters.py → completiontrainingparameters.py} +7 -7
- mistralai/models/{trainingparametersin.py → completiontrainingparametersin.py} +7 -7
- mistralai/models/embeddingrequest.py +6 -4
- mistralai/models/finetuneablemodeltype.py +7 -0
- mistralai/models/ftclassifierlossfunction.py +7 -0
- mistralai/models/ftmodelcapabilitiesout.py +3 -0
- mistralai/models/function.py +2 -2
- mistralai/models/githubrepositoryin.py +3 -11
- mistralai/models/githubrepositoryout.py +3 -11
- mistralai/models/inputs.py +54 -0
- mistralai/models/instructrequest.py +42 -0
- mistralai/models/jobin.py +52 -12
- mistralai/models/jobs_api_routes_batch_get_batch_jobsop.py +3 -3
- mistralai/models/jobs_api_routes_fine_tuning_cancel_fine_tuning_jobop.py +29 -2
- mistralai/models/jobs_api_routes_fine_tuning_create_fine_tuning_jobop.py +21 -4
- mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobop.py +29 -2
- mistralai/models/jobs_api_routes_fine_tuning_get_fine_tuning_jobsop.py +8 -0
- mistralai/models/jobs_api_routes_fine_tuning_start_fine_tuning_jobop.py +29 -2
- mistralai/models/jobs_api_routes_fine_tuning_update_fine_tuned_modelop.py +28 -2
- mistralai/models/jobsout.py +24 -13
- mistralai/models/jsonschema.py +1 -1
- mistralai/models/legacyjobmetadataout.py +3 -12
- mistralai/models/{classificationobject.py → moderationobject.py} +6 -6
- mistralai/models/moderationresponse.py +21 -0
- mistralai/models/unarchiveftmodelout.py +3 -11
- mistralai/models/wandbintegration.py +3 -11
- mistralai/models/wandbintegrationout.py +8 -13
- mistralai/models_.py +10 -4
- {mistralai-1.5.2rc1.dist-info → mistralai-1.7.0.dist-info}/METADATA +4 -2
- {mistralai-1.5.2rc1.dist-info → mistralai-1.7.0.dist-info}/RECORD +81 -63
- {mistralai-1.5.2rc1.dist-info → mistralai-1.7.0.dist-info}/WHEEL +1 -1
- mistralai_azure/_version.py +2 -2
- mistralai_azure/chat.py +12 -0
- mistralai_azure/models/__init__.py +15 -0
- mistralai_azure/models/chatcompletionrequest.py +4 -0
- mistralai_azure/models/chatcompletionstreamrequest.py +4 -0
- mistralai_azure/models/contentchunk.py +6 -2
- mistralai_azure/models/function.py +2 -2
- mistralai_azure/models/imageurl.py +53 -0
- mistralai_azure/models/imageurlchunk.py +33 -0
- mistralai_azure/models/jsonschema.py +1 -1
- mistralai_gcp/_version.py +2 -2
- mistralai_gcp/chat.py +12 -0
- mistralai_gcp/models/__init__.py +15 -0
- mistralai_gcp/models/chatcompletionrequest.py +4 -0
- mistralai_gcp/models/chatcompletionstreamrequest.py +4 -0
- mistralai_gcp/models/contentchunk.py +6 -2
- mistralai_gcp/models/function.py +2 -2
- mistralai_gcp/models/imageurl.py +53 -0
- mistralai_gcp/models/imageurlchunk.py +33 -0
- mistralai_gcp/models/jsonschema.py +1 -1
- {mistralai-1.5.2rc1.dist-info → mistralai-1.7.0.dist-info}/LICENSE +0 -0
|
@@ -9,8 +9,8 @@ from mistralai.types import BaseModel
|
|
|
9
9
|
from mistralai.utils import get_discriminator
|
|
10
10
|
import pydantic
|
|
11
11
|
from pydantic import Discriminator, Tag
|
|
12
|
-
from typing import List,
|
|
13
|
-
from typing_extensions import Annotated,
|
|
12
|
+
from typing import List, Union
|
|
13
|
+
from typing_extensions import Annotated, TypeAliasType, TypedDict
|
|
14
14
|
|
|
15
15
|
|
|
16
16
|
TwoTypedDict = TypeAliasType(
|
|
@@ -71,16 +71,13 @@ r"""Chat to classify"""
|
|
|
71
71
|
|
|
72
72
|
|
|
73
73
|
class ChatModerationRequestTypedDict(TypedDict):
|
|
74
|
-
model: str
|
|
75
74
|
inputs: ChatModerationRequestInputsTypedDict
|
|
76
75
|
r"""Chat to classify"""
|
|
77
|
-
|
|
76
|
+
model: str
|
|
78
77
|
|
|
79
78
|
|
|
80
79
|
class ChatModerationRequest(BaseModel):
|
|
81
|
-
model: str
|
|
82
|
-
|
|
83
80
|
inputs: Annotated[ChatModerationRequestInputs, pydantic.Field(alias="input")]
|
|
84
81
|
r"""Chat to classify"""
|
|
85
82
|
|
|
86
|
-
|
|
83
|
+
model: str
|
|
@@ -1,21 +1,24 @@
|
|
|
1
1
|
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
2
|
|
|
3
3
|
from __future__ import annotations
|
|
4
|
-
from .
|
|
4
|
+
from .classificationtargetresult import (
|
|
5
|
+
ClassificationTargetResult,
|
|
6
|
+
ClassificationTargetResultTypedDict,
|
|
7
|
+
)
|
|
5
8
|
from mistralai.types import BaseModel
|
|
6
|
-
from typing import
|
|
7
|
-
from typing_extensions import
|
|
9
|
+
from typing import Dict, List
|
|
10
|
+
from typing_extensions import TypedDict
|
|
8
11
|
|
|
9
12
|
|
|
10
13
|
class ClassificationResponseTypedDict(TypedDict):
|
|
11
|
-
id:
|
|
12
|
-
model:
|
|
13
|
-
results:
|
|
14
|
+
id: str
|
|
15
|
+
model: str
|
|
16
|
+
results: List[Dict[str, ClassificationTargetResultTypedDict]]
|
|
14
17
|
|
|
15
18
|
|
|
16
19
|
class ClassificationResponse(BaseModel):
|
|
17
|
-
id:
|
|
20
|
+
id: str
|
|
18
21
|
|
|
19
|
-
model:
|
|
22
|
+
model: str
|
|
20
23
|
|
|
21
|
-
results:
|
|
24
|
+
results: List[Dict[str, ClassificationTargetResult]]
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from mistralai.types import BaseModel
|
|
5
|
+
from typing import Dict
|
|
6
|
+
from typing_extensions import TypedDict
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class ClassificationTargetResultTypedDict(TypedDict):
|
|
10
|
+
scores: Dict[str, float]
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class ClassificationTargetResult(BaseModel):
|
|
14
|
+
scores: Dict[str, float]
|
|
@@ -0,0 +1,156 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from .checkpointout import CheckpointOut, CheckpointOutTypedDict
|
|
5
|
+
from .classifiertargetout import ClassifierTargetOut, ClassifierTargetOutTypedDict
|
|
6
|
+
from .classifiertrainingparameters import (
|
|
7
|
+
ClassifierTrainingParameters,
|
|
8
|
+
ClassifierTrainingParametersTypedDict,
|
|
9
|
+
)
|
|
10
|
+
from .eventout import EventOut, EventOutTypedDict
|
|
11
|
+
from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict
|
|
12
|
+
from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict
|
|
13
|
+
from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
|
|
14
|
+
from pydantic import model_serializer
|
|
15
|
+
from typing import List, Literal, Optional
|
|
16
|
+
from typing_extensions import NotRequired, TypedDict
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
ClassifierDetailedJobOutStatus = Literal[
|
|
20
|
+
"QUEUED",
|
|
21
|
+
"STARTED",
|
|
22
|
+
"VALIDATING",
|
|
23
|
+
"VALIDATED",
|
|
24
|
+
"RUNNING",
|
|
25
|
+
"FAILED_VALIDATION",
|
|
26
|
+
"FAILED",
|
|
27
|
+
"SUCCESS",
|
|
28
|
+
"CANCELLED",
|
|
29
|
+
"CANCELLATION_REQUESTED",
|
|
30
|
+
]
|
|
31
|
+
|
|
32
|
+
ClassifierDetailedJobOutObject = Literal["job"]
|
|
33
|
+
|
|
34
|
+
ClassifierDetailedJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
ClassifierDetailedJobOutIntegrations = WandbIntegrationOut
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
ClassifierDetailedJobOutJobType = Literal["classifier"]
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class ClassifierDetailedJobOutTypedDict(TypedDict):
|
|
44
|
+
id: str
|
|
45
|
+
auto_start: bool
|
|
46
|
+
model: str
|
|
47
|
+
r"""The name of the model to fine-tune."""
|
|
48
|
+
status: ClassifierDetailedJobOutStatus
|
|
49
|
+
created_at: int
|
|
50
|
+
modified_at: int
|
|
51
|
+
training_files: List[str]
|
|
52
|
+
hyperparameters: ClassifierTrainingParametersTypedDict
|
|
53
|
+
classifier_targets: List[ClassifierTargetOutTypedDict]
|
|
54
|
+
validation_files: NotRequired[Nullable[List[str]]]
|
|
55
|
+
object: NotRequired[ClassifierDetailedJobOutObject]
|
|
56
|
+
fine_tuned_model: NotRequired[Nullable[str]]
|
|
57
|
+
suffix: NotRequired[Nullable[str]]
|
|
58
|
+
integrations: NotRequired[
|
|
59
|
+
Nullable[List[ClassifierDetailedJobOutIntegrationsTypedDict]]
|
|
60
|
+
]
|
|
61
|
+
trained_tokens: NotRequired[Nullable[int]]
|
|
62
|
+
metadata: NotRequired[Nullable[JobMetadataOutTypedDict]]
|
|
63
|
+
job_type: NotRequired[ClassifierDetailedJobOutJobType]
|
|
64
|
+
events: NotRequired[List[EventOutTypedDict]]
|
|
65
|
+
r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here."""
|
|
66
|
+
checkpoints: NotRequired[List[CheckpointOutTypedDict]]
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
class ClassifierDetailedJobOut(BaseModel):
|
|
70
|
+
id: str
|
|
71
|
+
|
|
72
|
+
auto_start: bool
|
|
73
|
+
|
|
74
|
+
model: str
|
|
75
|
+
r"""The name of the model to fine-tune."""
|
|
76
|
+
|
|
77
|
+
status: ClassifierDetailedJobOutStatus
|
|
78
|
+
|
|
79
|
+
created_at: int
|
|
80
|
+
|
|
81
|
+
modified_at: int
|
|
82
|
+
|
|
83
|
+
training_files: List[str]
|
|
84
|
+
|
|
85
|
+
hyperparameters: ClassifierTrainingParameters
|
|
86
|
+
|
|
87
|
+
classifier_targets: List[ClassifierTargetOut]
|
|
88
|
+
|
|
89
|
+
validation_files: OptionalNullable[List[str]] = UNSET
|
|
90
|
+
|
|
91
|
+
object: Optional[ClassifierDetailedJobOutObject] = "job"
|
|
92
|
+
|
|
93
|
+
fine_tuned_model: OptionalNullable[str] = UNSET
|
|
94
|
+
|
|
95
|
+
suffix: OptionalNullable[str] = UNSET
|
|
96
|
+
|
|
97
|
+
integrations: OptionalNullable[List[ClassifierDetailedJobOutIntegrations]] = UNSET
|
|
98
|
+
|
|
99
|
+
trained_tokens: OptionalNullable[int] = UNSET
|
|
100
|
+
|
|
101
|
+
metadata: OptionalNullable[JobMetadataOut] = UNSET
|
|
102
|
+
|
|
103
|
+
job_type: Optional[ClassifierDetailedJobOutJobType] = "classifier"
|
|
104
|
+
|
|
105
|
+
events: Optional[List[EventOut]] = None
|
|
106
|
+
r"""Event items are created every time the status of a fine-tuning job changes. The timestamped list of all events is accessible here."""
|
|
107
|
+
|
|
108
|
+
checkpoints: Optional[List[CheckpointOut]] = None
|
|
109
|
+
|
|
110
|
+
@model_serializer(mode="wrap")
|
|
111
|
+
def serialize_model(self, handler):
|
|
112
|
+
optional_fields = [
|
|
113
|
+
"validation_files",
|
|
114
|
+
"object",
|
|
115
|
+
"fine_tuned_model",
|
|
116
|
+
"suffix",
|
|
117
|
+
"integrations",
|
|
118
|
+
"trained_tokens",
|
|
119
|
+
"metadata",
|
|
120
|
+
"job_type",
|
|
121
|
+
"events",
|
|
122
|
+
"checkpoints",
|
|
123
|
+
]
|
|
124
|
+
nullable_fields = [
|
|
125
|
+
"validation_files",
|
|
126
|
+
"fine_tuned_model",
|
|
127
|
+
"suffix",
|
|
128
|
+
"integrations",
|
|
129
|
+
"trained_tokens",
|
|
130
|
+
"metadata",
|
|
131
|
+
]
|
|
132
|
+
null_default_fields = []
|
|
133
|
+
|
|
134
|
+
serialized = handler(self)
|
|
135
|
+
|
|
136
|
+
m = {}
|
|
137
|
+
|
|
138
|
+
for n, f in self.model_fields.items():
|
|
139
|
+
k = f.alias or n
|
|
140
|
+
val = serialized.get(k)
|
|
141
|
+
serialized.pop(k, None)
|
|
142
|
+
|
|
143
|
+
optional_nullable = k in optional_fields and k in nullable_fields
|
|
144
|
+
is_set = (
|
|
145
|
+
self.__pydantic_fields_set__.intersection({n})
|
|
146
|
+
or k in null_default_fields
|
|
147
|
+
) # pylint: disable=no-member
|
|
148
|
+
|
|
149
|
+
if val is not None and val != UNSET_SENTINEL:
|
|
150
|
+
m[k] = val
|
|
151
|
+
elif val != UNSET_SENTINEL and (
|
|
152
|
+
not k in optional_fields or (optional_nullable and is_set)
|
|
153
|
+
):
|
|
154
|
+
m[k] = val
|
|
155
|
+
|
|
156
|
+
return m
|
|
@@ -0,0 +1,101 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from .classifiertargetout import ClassifierTargetOut, ClassifierTargetOutTypedDict
|
|
5
|
+
from .ftmodelcapabilitiesout import (
|
|
6
|
+
FTModelCapabilitiesOut,
|
|
7
|
+
FTModelCapabilitiesOutTypedDict,
|
|
8
|
+
)
|
|
9
|
+
from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
|
|
10
|
+
from pydantic import model_serializer
|
|
11
|
+
from typing import List, Literal, Optional
|
|
12
|
+
from typing_extensions import NotRequired, TypedDict
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
ClassifierFTModelOutObject = Literal["model"]
|
|
16
|
+
|
|
17
|
+
ClassifierFTModelOutModelType = Literal["classifier"]
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class ClassifierFTModelOutTypedDict(TypedDict):
|
|
21
|
+
id: str
|
|
22
|
+
created: int
|
|
23
|
+
owned_by: str
|
|
24
|
+
root: str
|
|
25
|
+
archived: bool
|
|
26
|
+
capabilities: FTModelCapabilitiesOutTypedDict
|
|
27
|
+
job: str
|
|
28
|
+
classifier_targets: List[ClassifierTargetOutTypedDict]
|
|
29
|
+
object: NotRequired[ClassifierFTModelOutObject]
|
|
30
|
+
name: NotRequired[Nullable[str]]
|
|
31
|
+
description: NotRequired[Nullable[str]]
|
|
32
|
+
max_context_length: NotRequired[int]
|
|
33
|
+
aliases: NotRequired[List[str]]
|
|
34
|
+
model_type: NotRequired[ClassifierFTModelOutModelType]
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class ClassifierFTModelOut(BaseModel):
|
|
38
|
+
id: str
|
|
39
|
+
|
|
40
|
+
created: int
|
|
41
|
+
|
|
42
|
+
owned_by: str
|
|
43
|
+
|
|
44
|
+
root: str
|
|
45
|
+
|
|
46
|
+
archived: bool
|
|
47
|
+
|
|
48
|
+
capabilities: FTModelCapabilitiesOut
|
|
49
|
+
|
|
50
|
+
job: str
|
|
51
|
+
|
|
52
|
+
classifier_targets: List[ClassifierTargetOut]
|
|
53
|
+
|
|
54
|
+
object: Optional[ClassifierFTModelOutObject] = "model"
|
|
55
|
+
|
|
56
|
+
name: OptionalNullable[str] = UNSET
|
|
57
|
+
|
|
58
|
+
description: OptionalNullable[str] = UNSET
|
|
59
|
+
|
|
60
|
+
max_context_length: Optional[int] = 32768
|
|
61
|
+
|
|
62
|
+
aliases: Optional[List[str]] = None
|
|
63
|
+
|
|
64
|
+
model_type: Optional[ClassifierFTModelOutModelType] = "classifier"
|
|
65
|
+
|
|
66
|
+
@model_serializer(mode="wrap")
|
|
67
|
+
def serialize_model(self, handler):
|
|
68
|
+
optional_fields = [
|
|
69
|
+
"object",
|
|
70
|
+
"name",
|
|
71
|
+
"description",
|
|
72
|
+
"max_context_length",
|
|
73
|
+
"aliases",
|
|
74
|
+
"model_type",
|
|
75
|
+
]
|
|
76
|
+
nullable_fields = ["name", "description"]
|
|
77
|
+
null_default_fields = []
|
|
78
|
+
|
|
79
|
+
serialized = handler(self)
|
|
80
|
+
|
|
81
|
+
m = {}
|
|
82
|
+
|
|
83
|
+
for n, f in self.model_fields.items():
|
|
84
|
+
k = f.alias or n
|
|
85
|
+
val = serialized.get(k)
|
|
86
|
+
serialized.pop(k, None)
|
|
87
|
+
|
|
88
|
+
optional_nullable = k in optional_fields and k in nullable_fields
|
|
89
|
+
is_set = (
|
|
90
|
+
self.__pydantic_fields_set__.intersection({n})
|
|
91
|
+
or k in null_default_fields
|
|
92
|
+
) # pylint: disable=no-member
|
|
93
|
+
|
|
94
|
+
if val is not None and val != UNSET_SENTINEL:
|
|
95
|
+
m[k] = val
|
|
96
|
+
elif val != UNSET_SENTINEL and (
|
|
97
|
+
not k in optional_fields or (optional_nullable and is_set)
|
|
98
|
+
):
|
|
99
|
+
m[k] = val
|
|
100
|
+
|
|
101
|
+
return m
|
|
@@ -0,0 +1,165 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from .classifiertrainingparameters import (
|
|
5
|
+
ClassifierTrainingParameters,
|
|
6
|
+
ClassifierTrainingParametersTypedDict,
|
|
7
|
+
)
|
|
8
|
+
from .jobmetadataout import JobMetadataOut, JobMetadataOutTypedDict
|
|
9
|
+
from .wandbintegrationout import WandbIntegrationOut, WandbIntegrationOutTypedDict
|
|
10
|
+
from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
|
|
11
|
+
from pydantic import model_serializer
|
|
12
|
+
from typing import List, Literal, Optional
|
|
13
|
+
from typing_extensions import NotRequired, TypedDict
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
ClassifierJobOutStatus = Literal[
|
|
17
|
+
"QUEUED",
|
|
18
|
+
"STARTED",
|
|
19
|
+
"VALIDATING",
|
|
20
|
+
"VALIDATED",
|
|
21
|
+
"RUNNING",
|
|
22
|
+
"FAILED_VALIDATION",
|
|
23
|
+
"FAILED",
|
|
24
|
+
"SUCCESS",
|
|
25
|
+
"CANCELLED",
|
|
26
|
+
"CANCELLATION_REQUESTED",
|
|
27
|
+
]
|
|
28
|
+
r"""The current status of the fine-tuning job."""
|
|
29
|
+
|
|
30
|
+
ClassifierJobOutObject = Literal["job"]
|
|
31
|
+
r"""The object type of the fine-tuning job."""
|
|
32
|
+
|
|
33
|
+
ClassifierJobOutIntegrationsTypedDict = WandbIntegrationOutTypedDict
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
ClassifierJobOutIntegrations = WandbIntegrationOut
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
ClassifierJobOutJobType = Literal["classifier"]
|
|
40
|
+
r"""The type of job (`FT` for fine-tuning)."""
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class ClassifierJobOutTypedDict(TypedDict):
|
|
44
|
+
id: str
|
|
45
|
+
r"""The ID of the job."""
|
|
46
|
+
auto_start: bool
|
|
47
|
+
model: str
|
|
48
|
+
r"""The name of the model to fine-tune."""
|
|
49
|
+
status: ClassifierJobOutStatus
|
|
50
|
+
r"""The current status of the fine-tuning job."""
|
|
51
|
+
created_at: int
|
|
52
|
+
r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created."""
|
|
53
|
+
modified_at: int
|
|
54
|
+
r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified."""
|
|
55
|
+
training_files: List[str]
|
|
56
|
+
r"""A list containing the IDs of uploaded files that contain training data."""
|
|
57
|
+
hyperparameters: ClassifierTrainingParametersTypedDict
|
|
58
|
+
validation_files: NotRequired[Nullable[List[str]]]
|
|
59
|
+
r"""A list containing the IDs of uploaded files that contain validation data."""
|
|
60
|
+
object: NotRequired[ClassifierJobOutObject]
|
|
61
|
+
r"""The object type of the fine-tuning job."""
|
|
62
|
+
fine_tuned_model: NotRequired[Nullable[str]]
|
|
63
|
+
r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running."""
|
|
64
|
+
suffix: NotRequired[Nullable[str]]
|
|
65
|
+
r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`."""
|
|
66
|
+
integrations: NotRequired[Nullable[List[ClassifierJobOutIntegrationsTypedDict]]]
|
|
67
|
+
r"""A list of integrations enabled for your fine-tuning job."""
|
|
68
|
+
trained_tokens: NotRequired[Nullable[int]]
|
|
69
|
+
r"""Total number of tokens trained."""
|
|
70
|
+
metadata: NotRequired[Nullable[JobMetadataOutTypedDict]]
|
|
71
|
+
job_type: NotRequired[ClassifierJobOutJobType]
|
|
72
|
+
r"""The type of job (`FT` for fine-tuning)."""
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
class ClassifierJobOut(BaseModel):
|
|
76
|
+
id: str
|
|
77
|
+
r"""The ID of the job."""
|
|
78
|
+
|
|
79
|
+
auto_start: bool
|
|
80
|
+
|
|
81
|
+
model: str
|
|
82
|
+
r"""The name of the model to fine-tune."""
|
|
83
|
+
|
|
84
|
+
status: ClassifierJobOutStatus
|
|
85
|
+
r"""The current status of the fine-tuning job."""
|
|
86
|
+
|
|
87
|
+
created_at: int
|
|
88
|
+
r"""The UNIX timestamp (in seconds) for when the fine-tuning job was created."""
|
|
89
|
+
|
|
90
|
+
modified_at: int
|
|
91
|
+
r"""The UNIX timestamp (in seconds) for when the fine-tuning job was last modified."""
|
|
92
|
+
|
|
93
|
+
training_files: List[str]
|
|
94
|
+
r"""A list containing the IDs of uploaded files that contain training data."""
|
|
95
|
+
|
|
96
|
+
hyperparameters: ClassifierTrainingParameters
|
|
97
|
+
|
|
98
|
+
validation_files: OptionalNullable[List[str]] = UNSET
|
|
99
|
+
r"""A list containing the IDs of uploaded files that contain validation data."""
|
|
100
|
+
|
|
101
|
+
object: Optional[ClassifierJobOutObject] = "job"
|
|
102
|
+
r"""The object type of the fine-tuning job."""
|
|
103
|
+
|
|
104
|
+
fine_tuned_model: OptionalNullable[str] = UNSET
|
|
105
|
+
r"""The name of the fine-tuned model that is being created. The value will be `null` if the fine-tuning job is still running."""
|
|
106
|
+
|
|
107
|
+
suffix: OptionalNullable[str] = UNSET
|
|
108
|
+
r"""Optional text/code that adds more context for the model. When given a `prompt` and a `suffix` the model will fill what is between them. When `suffix` is not provided, the model will simply execute completion starting with `prompt`."""
|
|
109
|
+
|
|
110
|
+
integrations: OptionalNullable[List[ClassifierJobOutIntegrations]] = UNSET
|
|
111
|
+
r"""A list of integrations enabled for your fine-tuning job."""
|
|
112
|
+
|
|
113
|
+
trained_tokens: OptionalNullable[int] = UNSET
|
|
114
|
+
r"""Total number of tokens trained."""
|
|
115
|
+
|
|
116
|
+
metadata: OptionalNullable[JobMetadataOut] = UNSET
|
|
117
|
+
|
|
118
|
+
job_type: Optional[ClassifierJobOutJobType] = "classifier"
|
|
119
|
+
r"""The type of job (`FT` for fine-tuning)."""
|
|
120
|
+
|
|
121
|
+
@model_serializer(mode="wrap")
|
|
122
|
+
def serialize_model(self, handler):
|
|
123
|
+
optional_fields = [
|
|
124
|
+
"validation_files",
|
|
125
|
+
"object",
|
|
126
|
+
"fine_tuned_model",
|
|
127
|
+
"suffix",
|
|
128
|
+
"integrations",
|
|
129
|
+
"trained_tokens",
|
|
130
|
+
"metadata",
|
|
131
|
+
"job_type",
|
|
132
|
+
]
|
|
133
|
+
nullable_fields = [
|
|
134
|
+
"validation_files",
|
|
135
|
+
"fine_tuned_model",
|
|
136
|
+
"suffix",
|
|
137
|
+
"integrations",
|
|
138
|
+
"trained_tokens",
|
|
139
|
+
"metadata",
|
|
140
|
+
]
|
|
141
|
+
null_default_fields = []
|
|
142
|
+
|
|
143
|
+
serialized = handler(self)
|
|
144
|
+
|
|
145
|
+
m = {}
|
|
146
|
+
|
|
147
|
+
for n, f in self.model_fields.items():
|
|
148
|
+
k = f.alias or n
|
|
149
|
+
val = serialized.get(k)
|
|
150
|
+
serialized.pop(k, None)
|
|
151
|
+
|
|
152
|
+
optional_nullable = k in optional_fields and k in nullable_fields
|
|
153
|
+
is_set = (
|
|
154
|
+
self.__pydantic_fields_set__.intersection({n})
|
|
155
|
+
or k in null_default_fields
|
|
156
|
+
) # pylint: disable=no-member
|
|
157
|
+
|
|
158
|
+
if val is not None and val != UNSET_SENTINEL:
|
|
159
|
+
m[k] = val
|
|
160
|
+
elif val != UNSET_SENTINEL and (
|
|
161
|
+
not k in optional_fields or (optional_nullable and is_set)
|
|
162
|
+
):
|
|
163
|
+
m[k] = val
|
|
164
|
+
|
|
165
|
+
return m
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from .ftclassifierlossfunction import FTClassifierLossFunction
|
|
5
|
+
from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
|
|
6
|
+
from pydantic import model_serializer
|
|
7
|
+
from typing import List, Optional
|
|
8
|
+
from typing_extensions import NotRequired, TypedDict
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class ClassifierTargetInTypedDict(TypedDict):
|
|
12
|
+
name: str
|
|
13
|
+
labels: List[str]
|
|
14
|
+
weight: NotRequired[float]
|
|
15
|
+
loss_function: NotRequired[Nullable[FTClassifierLossFunction]]
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class ClassifierTargetIn(BaseModel):
|
|
19
|
+
name: str
|
|
20
|
+
|
|
21
|
+
labels: List[str]
|
|
22
|
+
|
|
23
|
+
weight: Optional[float] = 1
|
|
24
|
+
|
|
25
|
+
loss_function: OptionalNullable[FTClassifierLossFunction] = UNSET
|
|
26
|
+
|
|
27
|
+
@model_serializer(mode="wrap")
|
|
28
|
+
def serialize_model(self, handler):
|
|
29
|
+
optional_fields = ["weight", "loss_function"]
|
|
30
|
+
nullable_fields = ["loss_function"]
|
|
31
|
+
null_default_fields = []
|
|
32
|
+
|
|
33
|
+
serialized = handler(self)
|
|
34
|
+
|
|
35
|
+
m = {}
|
|
36
|
+
|
|
37
|
+
for n, f in self.model_fields.items():
|
|
38
|
+
k = f.alias or n
|
|
39
|
+
val = serialized.get(k)
|
|
40
|
+
serialized.pop(k, None)
|
|
41
|
+
|
|
42
|
+
optional_nullable = k in optional_fields and k in nullable_fields
|
|
43
|
+
is_set = (
|
|
44
|
+
self.__pydantic_fields_set__.intersection({n})
|
|
45
|
+
or k in null_default_fields
|
|
46
|
+
) # pylint: disable=no-member
|
|
47
|
+
|
|
48
|
+
if val is not None and val != UNSET_SENTINEL:
|
|
49
|
+
m[k] = val
|
|
50
|
+
elif val != UNSET_SENTINEL and (
|
|
51
|
+
not k in optional_fields or (optional_nullable and is_set)
|
|
52
|
+
):
|
|
53
|
+
m[k] = val
|
|
54
|
+
|
|
55
|
+
return m
|
|
@@ -0,0 +1,24 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from .ftclassifierlossfunction import FTClassifierLossFunction
|
|
5
|
+
from mistralai.types import BaseModel
|
|
6
|
+
from typing import List
|
|
7
|
+
from typing_extensions import TypedDict
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class ClassifierTargetOutTypedDict(TypedDict):
|
|
11
|
+
name: str
|
|
12
|
+
labels: List[str]
|
|
13
|
+
weight: float
|
|
14
|
+
loss_function: FTClassifierLossFunction
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class ClassifierTargetOut(BaseModel):
|
|
18
|
+
name: str
|
|
19
|
+
|
|
20
|
+
labels: List[str]
|
|
21
|
+
|
|
22
|
+
weight: float
|
|
23
|
+
|
|
24
|
+
loss_function: FTClassifierLossFunction
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
"""Code generated by Speakeasy (https://speakeasy.com). DO NOT EDIT."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
from mistralai.types import BaseModel, Nullable, OptionalNullable, UNSET, UNSET_SENTINEL
|
|
5
|
+
from pydantic import model_serializer
|
|
6
|
+
from typing import Optional
|
|
7
|
+
from typing_extensions import NotRequired, TypedDict
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class ClassifierTrainingParametersTypedDict(TypedDict):
|
|
11
|
+
training_steps: NotRequired[Nullable[int]]
|
|
12
|
+
learning_rate: NotRequired[float]
|
|
13
|
+
weight_decay: NotRequired[Nullable[float]]
|
|
14
|
+
warmup_fraction: NotRequired[Nullable[float]]
|
|
15
|
+
epochs: NotRequired[Nullable[float]]
|
|
16
|
+
seq_len: NotRequired[Nullable[int]]
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class ClassifierTrainingParameters(BaseModel):
|
|
20
|
+
training_steps: OptionalNullable[int] = UNSET
|
|
21
|
+
|
|
22
|
+
learning_rate: Optional[float] = 0.0001
|
|
23
|
+
|
|
24
|
+
weight_decay: OptionalNullable[float] = UNSET
|
|
25
|
+
|
|
26
|
+
warmup_fraction: OptionalNullable[float] = UNSET
|
|
27
|
+
|
|
28
|
+
epochs: OptionalNullable[float] = UNSET
|
|
29
|
+
|
|
30
|
+
seq_len: OptionalNullable[int] = UNSET
|
|
31
|
+
|
|
32
|
+
@model_serializer(mode="wrap")
|
|
33
|
+
def serialize_model(self, handler):
|
|
34
|
+
optional_fields = [
|
|
35
|
+
"training_steps",
|
|
36
|
+
"learning_rate",
|
|
37
|
+
"weight_decay",
|
|
38
|
+
"warmup_fraction",
|
|
39
|
+
"epochs",
|
|
40
|
+
"seq_len",
|
|
41
|
+
]
|
|
42
|
+
nullable_fields = [
|
|
43
|
+
"training_steps",
|
|
44
|
+
"weight_decay",
|
|
45
|
+
"warmup_fraction",
|
|
46
|
+
"epochs",
|
|
47
|
+
"seq_len",
|
|
48
|
+
]
|
|
49
|
+
null_default_fields = []
|
|
50
|
+
|
|
51
|
+
serialized = handler(self)
|
|
52
|
+
|
|
53
|
+
m = {}
|
|
54
|
+
|
|
55
|
+
for n, f in self.model_fields.items():
|
|
56
|
+
k = f.alias or n
|
|
57
|
+
val = serialized.get(k)
|
|
58
|
+
serialized.pop(k, None)
|
|
59
|
+
|
|
60
|
+
optional_nullable = k in optional_fields and k in nullable_fields
|
|
61
|
+
is_set = (
|
|
62
|
+
self.__pydantic_fields_set__.intersection({n})
|
|
63
|
+
or k in null_default_fields
|
|
64
|
+
) # pylint: disable=no-member
|
|
65
|
+
|
|
66
|
+
if val is not None and val != UNSET_SENTINEL:
|
|
67
|
+
m[k] = val
|
|
68
|
+
elif val != UNSET_SENTINEL and (
|
|
69
|
+
not k in optional_fields or (optional_nullable and is_set)
|
|
70
|
+
):
|
|
71
|
+
m[k] = val
|
|
72
|
+
|
|
73
|
+
return m
|