seekrai 0.2.3__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -442,7 +442,9 @@ class APIRequestor:
442
442
  ) from e
443
443
 
444
444
  # retry on 5XX error or rate-limit
445
- if result.status_code > 300:
445
+ if result.status_code > 300 and result.status_code < 500:
446
+ raise httpx.HTTPError(result.content.decode())
447
+ elif result.status_code >= 500:
446
448
  raise httpx.HTTPError("Error communicating with API: {}".format(result))
447
449
 
448
450
  utils.log_debug(
seekrai/client.py CHANGED
@@ -19,6 +19,8 @@ class SeekrFlow:
19
19
  models: resources.Models
20
20
  fine_tuning: resources.FineTuning
21
21
  alignment: resources.Alignment
22
+ projects: resources.Projects
23
+ deployments: resources.Deployments
22
24
 
23
25
  # client options
24
26
  client: SeekrFlowClient
@@ -79,6 +81,8 @@ class SeekrFlow:
79
81
  self.models = resources.Models(self.client)
80
82
  self.fine_tuning = resources.FineTuning(self.client)
81
83
  self.alignment = resources.Alignment(self.client)
84
+ self.projects = resources.Projects(self.client)
85
+ self.deployments = resources.Deployments(self.client)
82
86
 
83
87
 
84
88
  class AsyncSeekrFlow:
@@ -90,6 +94,8 @@ class AsyncSeekrFlow:
90
94
  models: resources.AsyncModels
91
95
  fine_tuning: resources.AsyncFineTuning
92
96
  alignment: resources.AsyncAlignment
97
+ projects: resources.AsyncProjects
98
+ deployments: resources.AsyncDeployments
93
99
 
94
100
  # client options
95
101
  client: SeekrFlowClient
@@ -150,6 +156,8 @@ class AsyncSeekrFlow:
150
156
  self.models = resources.AsyncModels(self.client)
151
157
  self.fine_tuning = resources.AsyncFineTuning(self.client)
152
158
  self.alignment = resources.AsyncAlignment(self.client)
159
+ self.projects = resources.AsyncProjects(self.client)
160
+ self.deployments = resources.AsyncDeployments(self.client)
153
161
 
154
162
 
155
163
  Client = SeekrFlow
@@ -1,11 +1,13 @@
1
1
  from seekrai.resources.alignment import Alignment, AsyncAlignment
2
2
  from seekrai.resources.chat import AsyncChat, Chat
3
3
  from seekrai.resources.completions import AsyncCompletions, Completions
4
+ from seekrai.resources.deployments import AsyncDeployments, Deployments
4
5
  from seekrai.resources.embeddings import AsyncEmbeddings, Embeddings
5
6
  from seekrai.resources.files import AsyncFiles, Files
6
7
  from seekrai.resources.finetune import AsyncFineTuning, FineTuning
7
8
  from seekrai.resources.images import AsyncImages, Images
8
9
  from seekrai.resources.models import AsyncModels, Models
10
+ from seekrai.resources.projects import AsyncProjects, Projects
9
11
 
10
12
 
11
13
  __all__ = [
@@ -25,4 +27,8 @@ __all__ = [
25
27
  "Images",
26
28
  "AsyncModels",
27
29
  "Models",
30
+ "AsyncProjects",
31
+ "Projects",
32
+ "AsyncDeployments",
33
+ "Deployments",
28
34
  ]
@@ -29,7 +29,8 @@ class ChatCompletions:
29
29
  top_k: int = 5,
30
30
  repetition_penalty: float = 1,
31
31
  stream: bool = False,
32
- logprobs: int | None = None,
32
+ logprobs: bool | None = False,
33
+ top_logprobs: int | None = 0,
33
34
  echo: bool = False,
34
35
  n: int = 1,
35
36
  safety_model: str | None = None,
@@ -61,8 +62,10 @@ class ChatCompletions:
61
62
  Defaults to None.
62
63
  stream (bool, optional): Flag indicating whether to stream the generated completions.
63
64
  Defaults to False.
64
- logprobs (int, optional): Number of top-k logprobs to return
65
- Defaults to None.
65
+ logprobs (bool, optional): Whether to return log probabilities of the output tokens.
66
+ Defaults to False.
67
+ top_logprobs (int, optional): Number of most likely tokens to return the log probabilities for.
68
+ If greater than 0, it implies logprobs=True. Defaults to 0.
66
69
  echo (bool, optional): Echo prompt in output. Can be used with logprobs to return prompt logprobs.
67
70
  Defaults to None.
68
71
  n (int, optional): Number of completions to generate. Setting to None will return a single generation.
@@ -102,6 +105,7 @@ class ChatCompletions:
102
105
  repetition_penalty=repetition_penalty,
103
106
  stream=stream,
104
107
  logprobs=logprobs,
108
+ top_logprobs=top_logprobs,
105
109
  echo=echo,
106
110
  n=n,
107
111
  safety_model=safety_model,
@@ -143,7 +147,8 @@ class AsyncChatCompletions:
143
147
  top_k: int = 5,
144
148
  repetition_penalty: float = 1,
145
149
  stream: bool = False,
146
- logprobs: int | None = None,
150
+ logprobs: bool | None = False,
151
+ top_logprobs: int | None = 0,
147
152
  echo: bool = False,
148
153
  n: int = 1,
149
154
  safety_model: str | None = None,
@@ -175,8 +180,10 @@ class AsyncChatCompletions:
175
180
  Defaults to None.
176
181
  stream (bool, optional): Flag indicating whether to stream the generated completions.
177
182
  Defaults to False.
178
- logprobs (int, optional): Number of top-k logprobs to return
179
- Defaults to None.
183
+ logprobs (bool, optional): Whether to return log probabilities of the output tokens.
184
+ Defaults to False.
185
+ top_logprobs (int, optional): Number of most likely tokens to return the log probabilities for.
186
+ If greater than 0, it implies logprobs=True. Defaults to 0.
180
187
  echo (bool, optional): Echo prompt in output. Can be used with logprobs to return prompt logprobs.
181
188
  Defaults to None.
182
189
  n (int, optional): Number of completions to generate. Setting to None will return a single generation.
@@ -216,6 +223,7 @@ class AsyncChatCompletions:
216
223
  repetition_penalty=repetition_penalty,
217
224
  stream=stream,
218
225
  logprobs=logprobs,
226
+ top_logprobs=top_logprobs,
219
227
  echo=echo,
220
228
  n=n,
221
229
  safety_model=safety_model,
@@ -0,0 +1,203 @@
1
+ from seekrai.abstract import api_requestor
2
+ from seekrai.seekrflow_response import SeekrFlowResponse
3
+ from seekrai.types import SeekrFlowClient, SeekrFlowRequest
4
+ from seekrai.types.deployments import Deployment as DeploymentSchema
5
+ from seekrai.types.deployments import GetDeploymentsResponse
6
+
7
+
8
+ class Deployments:
9
+ def __init__(self, client: SeekrFlowClient) -> None:
10
+ self._client = client
11
+
12
+ def list(self) -> GetDeploymentsResponse:
13
+ requestor = api_requestor.APIRequestor(
14
+ client=self._client,
15
+ )
16
+
17
+ response, _, _ = requestor.request(
18
+ options=SeekrFlowRequest(
19
+ method="GET",
20
+ url="flow/deployments",
21
+ ),
22
+ stream=False,
23
+ )
24
+
25
+ assert isinstance(response, SeekrFlowResponse)
26
+ return GetDeploymentsResponse(**response.data)
27
+
28
+ def retrieve(self, deployment_id: str) -> DeploymentSchema:
29
+ requestor = api_requestor.APIRequestor(
30
+ client=self._client,
31
+ )
32
+
33
+ response, _, _ = requestor.request(
34
+ options=SeekrFlowRequest(
35
+ method="GET",
36
+ url=f"flow/deployments/{deployment_id}",
37
+ ),
38
+ stream=False,
39
+ )
40
+
41
+ assert isinstance(response, SeekrFlowResponse)
42
+ return DeploymentSchema(**response.data)
43
+
44
+ def create(
45
+ self,
46
+ name: str,
47
+ description: str,
48
+ model_type: str,
49
+ model_id: str,
50
+ n_instances: int,
51
+ ) -> DeploymentSchema:
52
+ requestor = api_requestor.APIRequestor(
53
+ client=self._client,
54
+ )
55
+
56
+ response, _, _ = requestor.request(
57
+ options=SeekrFlowRequest(
58
+ method="POST",
59
+ url="flow/deployments",
60
+ params={
61
+ "name": name,
62
+ "description": description,
63
+ "model_type": model_type,
64
+ "model_id": model_id,
65
+ "n_instances": n_instances,
66
+ },
67
+ ),
68
+ stream=False,
69
+ )
70
+
71
+ assert isinstance(response, SeekrFlowResponse)
72
+ return DeploymentSchema(**response.data)
73
+
74
+ def promote(self, deployment_id: str) -> DeploymentSchema:
75
+ requestor = api_requestor.APIRequestor(
76
+ client=self._client,
77
+ )
78
+
79
+ response, _, _ = requestor.request(
80
+ options=SeekrFlowRequest(
81
+ method="PUT",
82
+ url=f"flow/deployments/{deployment_id}/promote",
83
+ ),
84
+ stream=False,
85
+ )
86
+
87
+ assert isinstance(response, SeekrFlowResponse)
88
+ return DeploymentSchema(**response.data)
89
+
90
+ def demote(self, deployment_id: str) -> DeploymentSchema:
91
+ requestor = api_requestor.APIRequestor(
92
+ client=self._client,
93
+ )
94
+
95
+ response, _, _ = requestor.request(
96
+ options=SeekrFlowRequest(
97
+ method="PUT",
98
+ url=f"flow/deployments/{deployment_id}/demote",
99
+ ),
100
+ stream=False,
101
+ )
102
+
103
+ assert isinstance(response, SeekrFlowResponse)
104
+ return DeploymentSchema(**response.data)
105
+
106
+
107
+ class AsyncDeployments:
108
+ def __init__(self, client: SeekrFlowClient) -> None:
109
+ self._client = client
110
+
111
+ async def list(self) -> GetDeploymentsResponse:
112
+ requestor = api_requestor.APIRequestor(
113
+ client=self._client,
114
+ )
115
+
116
+ response, _, _ = await requestor.arequest(
117
+ options=SeekrFlowRequest(
118
+ method="GET",
119
+ url="flow/deployments",
120
+ ),
121
+ stream=False,
122
+ )
123
+
124
+ assert isinstance(response, SeekrFlowResponse)
125
+ return GetDeploymentsResponse(**response.data)
126
+
127
+ async def retrieve(self, deployment_id: str) -> DeploymentSchema:
128
+ requestor = api_requestor.APIRequestor(
129
+ client=self._client,
130
+ )
131
+
132
+ response, _, _ = await requestor.arequest(
133
+ options=SeekrFlowRequest(
134
+ method="GET",
135
+ url=f"flow/deployments/{deployment_id}",
136
+ ),
137
+ stream=False,
138
+ )
139
+
140
+ assert isinstance(response, SeekrFlowResponse)
141
+ return DeploymentSchema(**response.data)
142
+
143
+ async def create(
144
+ self,
145
+ name: str,
146
+ description: str,
147
+ model_type: str,
148
+ model_id: str,
149
+ n_instances: int,
150
+ ) -> DeploymentSchema:
151
+ requestor = api_requestor.APIRequestor(
152
+ client=self._client,
153
+ )
154
+
155
+ response, _, _ = await requestor.arequest(
156
+ options=SeekrFlowRequest(
157
+ method="POST",
158
+ url="flow/deployments",
159
+ params={
160
+ "name": name,
161
+ "description": description,
162
+ "model_type": model_type,
163
+ "model_id": model_id,
164
+ "n_instances": n_instances,
165
+ },
166
+ ),
167
+ stream=False,
168
+ )
169
+
170
+ assert isinstance(response, SeekrFlowResponse)
171
+ return DeploymentSchema(**response.data)
172
+
173
+ async def promote(self, deployment_id: str) -> DeploymentSchema:
174
+ requestor = api_requestor.APIRequestor(
175
+ client=self._client,
176
+ )
177
+
178
+ response, _, _ = await requestor.arequest(
179
+ options=SeekrFlowRequest(
180
+ method="PUT",
181
+ url=f"flow/deployments/{deployment_id}/promote",
182
+ ),
183
+ stream=False,
184
+ )
185
+
186
+ assert isinstance(response, SeekrFlowResponse)
187
+ return DeploymentSchema(**response.data)
188
+
189
+ async def demote(self, deployment_id: str) -> DeploymentSchema:
190
+ requestor = api_requestor.APIRequestor(
191
+ client=self._client,
192
+ )
193
+
194
+ response, _, _ = await requestor.arequest(
195
+ options=SeekrFlowRequest(
196
+ method="PUT",
197
+ url=f"flow/deployments/{deployment_id}/demote",
198
+ ),
199
+ stream=False,
200
+ )
201
+
202
+ assert isinstance(response, SeekrFlowResponse)
203
+ return DeploymentSchema(**response.data)
@@ -24,6 +24,7 @@ class FineTuning:
24
24
  def create(
25
25
  self,
26
26
  *,
27
+ project_id: int,
27
28
  training_config: TrainingConfig,
28
29
  infrastructure_config: InfrastructureConfig,
29
30
  # wandb_api_key: str | None = None,
@@ -42,7 +43,9 @@ class FineTuning:
42
43
  )
43
44
 
44
45
  parameter_payload = FinetuneRequest(
45
- training_config=training_config, infrastructure_config=infrastructure_config
46
+ project_id=project_id,
47
+ training_config=training_config,
48
+ infrastructure_config=infrastructure_config,
46
49
  ).model_dump()
47
50
 
48
51
  response, _, _ = requestor.request(
@@ -250,6 +253,7 @@ class AsyncFineTuning:
250
253
  async def create(
251
254
  self,
252
255
  *,
256
+ project_id: int,
253
257
  training_config: TrainingConfig,
254
258
  infrastructure_config: InfrastructureConfig,
255
259
  ) -> FinetuneResponse:
@@ -266,7 +270,9 @@ class AsyncFineTuning:
266
270
  )
267
271
 
268
272
  parameter_payload = FinetuneRequest(
269
- training_config=training_config, infrastructure_config=infrastructure_config
273
+ project_id=project_id,
274
+ training_config=training_config,
275
+ infrastructure_config=infrastructure_config,
270
276
  ).model_dump()
271
277
 
272
278
  response, _, _ = await requestor.arequest(
@@ -0,0 +1,129 @@
1
+ from seekrai.abstract import api_requestor
2
+ from seekrai.seekrflow_response import SeekrFlowResponse
3
+ from seekrai.types import SeekrFlowClient, SeekrFlowRequest
4
+ from seekrai.types.projects import (
5
+ GetProjectsResponse,
6
+ PostProjectRequest,
7
+ )
8
+ from seekrai.types.projects import (
9
+ Project as ProjectSchema,
10
+ )
11
+
12
+
13
+ class Projects:
14
+ def __init__(self, client: SeekrFlowClient) -> None:
15
+ self._client = client
16
+
17
+ def list(self, skip: int = 0, limit: int = 100) -> GetProjectsResponse:
18
+ requestor = api_requestor.APIRequestor(
19
+ client=self._client,
20
+ )
21
+
22
+ response, _, _ = requestor.request(
23
+ options=SeekrFlowRequest(
24
+ method="GET",
25
+ url="flow/projects",
26
+ params={"skip": skip, "limit": limit},
27
+ ),
28
+ stream=False,
29
+ )
30
+
31
+ assert isinstance(response, SeekrFlowResponse)
32
+ return GetProjectsResponse(**response.data)
33
+
34
+ def retrieve(self, project_id: int) -> ProjectSchema:
35
+ requestor = api_requestor.APIRequestor(
36
+ client=self._client,
37
+ )
38
+
39
+ response, _, _ = requestor.request(
40
+ options=SeekrFlowRequest(
41
+ method="GET",
42
+ url=f"flow/projects/{project_id}",
43
+ ),
44
+ stream=False,
45
+ )
46
+
47
+ assert isinstance(response, SeekrFlowResponse)
48
+ return ProjectSchema(**response.data)
49
+
50
+ def create(self, name: str, description: str) -> ProjectSchema:
51
+ requestor = api_requestor.APIRequestor(
52
+ client=self._client,
53
+ )
54
+
55
+ parameter_payload = PostProjectRequest(
56
+ name=name,
57
+ description=description,
58
+ ).model_dump()
59
+
60
+ response, _, _ = requestor.request(
61
+ options=SeekrFlowRequest(
62
+ method="POST",
63
+ url="flow/projects",
64
+ params=parameter_payload,
65
+ ),
66
+ stream=False,
67
+ )
68
+
69
+ assert isinstance(response, SeekrFlowResponse)
70
+ return ProjectSchema(**response.data)
71
+
72
+
73
+ class AsyncProjects:
74
+ def __init__(self, client: SeekrFlowClient) -> None:
75
+ self._client = client
76
+
77
+ async def list(self, skip: int = 0, limit: int = 100) -> GetProjectsResponse:
78
+ requestor = api_requestor.APIRequestor(
79
+ client=self._client,
80
+ )
81
+
82
+ response, _, _ = await requestor.arequest(
83
+ options=SeekrFlowRequest(
84
+ method="GET",
85
+ url="flow/projects",
86
+ ),
87
+ stream=False,
88
+ )
89
+
90
+ assert isinstance(response, SeekrFlowResponse)
91
+ return GetProjectsResponse(**response.data)
92
+
93
+ async def retrieve(self, project_id: int) -> ProjectSchema:
94
+ requestor = api_requestor.APIRequestor(
95
+ client=self._client,
96
+ )
97
+
98
+ response, _, _ = await requestor.arequest(
99
+ options=SeekrFlowRequest(
100
+ method="GET",
101
+ url=f"flow/projects/{project_id}",
102
+ ),
103
+ stream=False,
104
+ )
105
+
106
+ assert isinstance(response, SeekrFlowResponse)
107
+ return ProjectSchema(**response.data)
108
+
109
+ async def create(self, name: str, description: str) -> ProjectSchema:
110
+ requestor = api_requestor.APIRequestor(
111
+ client=self._client,
112
+ )
113
+
114
+ parameter_payload = PostProjectRequest(
115
+ name=name,
116
+ description=description,
117
+ ).model_dump()
118
+
119
+ response, _, _ = await requestor.arequest(
120
+ options=SeekrFlowRequest(
121
+ method="POST",
122
+ url="flow/projects",
123
+ params=parameter_payload,
124
+ ),
125
+ stream=False,
126
+ )
127
+
128
+ assert isinstance(response, SeekrFlowResponse)
129
+ return ProjectSchema(**response.data)
seekrai/types/__init__.py CHANGED
@@ -16,6 +16,15 @@ from seekrai.types.completions import (
16
16
  CompletionRequest,
17
17
  CompletionResponse,
18
18
  )
19
+ from seekrai.types.deployments import (
20
+ Deployment,
21
+ DeploymentProcessor,
22
+ DeploymentStatus,
23
+ DeploymentType,
24
+ GetDeploymentsResponse,
25
+ HardwareType,
26
+ NewDeploymentRequest,
27
+ )
19
28
  from seekrai.types.embeddings import EmbeddingRequest, EmbeddingResponse
20
29
  from seekrai.types.files import (
21
30
  FileDeleteResponse,
@@ -40,6 +49,12 @@ from seekrai.types.images import (
40
49
  ImageResponse,
41
50
  )
42
51
  from seekrai.types.models import ModelList, ModelResponse
52
+ from seekrai.types.projects import (
53
+ GetProjectsResponse,
54
+ PostProjectRequest,
55
+ Project,
56
+ ProjectWithRuns,
57
+ )
43
58
 
44
59
 
45
60
  __all__ = [
@@ -75,4 +90,15 @@ __all__ = [
75
90
  "AlignmentResponse",
76
91
  "AlignmentJobStatus",
77
92
  "AlignmentList",
93
+ "Project",
94
+ "ProjectWithRuns",
95
+ "GetProjectsResponse",
96
+ "PostProjectRequest",
97
+ "Deployment",
98
+ "DeploymentProcessor",
99
+ "DeploymentStatus",
100
+ "DeploymentType",
101
+ "GetDeploymentsResponse",
102
+ "HardwareType",
103
+ "NewDeploymentRequest",
78
104
  ]
@@ -1,3 +1,5 @@
1
+ from __future__ import annotations
2
+
1
3
  from datetime import datetime
2
4
  from enum import Enum
3
5
  from typing import List, Literal, Optional
@@ -90,7 +90,8 @@ class ChatCompletionRequest(BaseModel):
90
90
  # stream SSE token chunks
91
91
  stream: bool = False
92
92
  # return logprobs
93
- logprobs: int | None = None
93
+ logprobs: bool | None = False
94
+ top_logprobs: int | None = 0
94
95
  # echo prompt.
95
96
  # can be used with logprobs to return prompt logprobs (is this supported in Seekr API/worker implementation?)
96
97
  echo: bool = False
@@ -0,0 +1,69 @@
1
+ import enum
2
+ from datetime import datetime
3
+ from typing import Optional
4
+
5
+ from pydantic import BaseModel, ConfigDict, Field
6
+
7
+
8
+ class DeploymentType(str, enum.Enum):
9
+ # Matches UI
10
+ FINE_TUNED_RUN = "Fine-tuned Run"
11
+ BASE_MODEL = "Base Model" # TODO - clean up spacing, capital, etc.
12
+
13
+
14
+ class DeploymentStatus(str, enum.Enum):
15
+ # dedicated
16
+ INACTIVE = "Inactive" # Shared with serverless.
17
+ PENDING = "Pending"
18
+ ACTIVE = "Active" # Shared with serverless.
19
+ FAILED = "Failed"
20
+ STARTED = "Started"
21
+ SUCCESS = "Success"
22
+
23
+
24
+ class HardwareType(str, enum.Enum):
25
+ # Matches UI
26
+ SERVERLESS = "Serverless"
27
+ DEDICATED = "Dedicated"
28
+
29
+
30
+ class DeploymentProcessor(str, enum.Enum):
31
+ GAUDI2 = "GAUDI2"
32
+ GAUDI3 = "GAUDI3"
33
+ A100 = "A100"
34
+ H100 = "H100"
35
+ XEON = "XEON"
36
+ NVIDIA = "NVIDIA" # TODO - this doesnt make sense with A100, etc.
37
+
38
+
39
+ class NewDeploymentRequest(BaseModel):
40
+ model_type: DeploymentType
41
+ model_id: str
42
+ name: str = Field(min_length=5, max_length=100)
43
+ description: str = Field(min_length=5, max_length=1000)
44
+ n_instances: int = Field(..., ge=1, le=50)
45
+
46
+
47
+ class Deployment(BaseModel):
48
+ model_config = ConfigDict(from_attributes=True)
49
+
50
+ id: str
51
+ model_type: DeploymentType
52
+ model_id: str
53
+ name: str
54
+ description: str
55
+ status: DeploymentStatus
56
+ memory: Optional[str] = None
57
+ hardware_type: HardwareType = HardwareType.DEDICATED
58
+ total_input_tokens: int
59
+ total_output_tokens: int
60
+ created_at: datetime
61
+ last_deployed_at: Optional[datetime] = None
62
+ updated_at: datetime
63
+ processor: DeploymentProcessor = DeploymentProcessor.GAUDI2
64
+ n_instances: int
65
+ user_id: int
66
+
67
+
68
+ class GetDeploymentsResponse(BaseModel):
69
+ data: list[Deployment]
seekrai/types/finetune.py CHANGED
@@ -2,7 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  from datetime import datetime
4
4
  from enum import Enum
5
- from typing import List, Literal
5
+ from typing import List, Literal, Optional
6
6
 
7
7
  from seekrai.types.abstract import BaseModel
8
8
  from seekrai.types.common import (
@@ -124,6 +124,7 @@ class FinetuneRequest(BaseModel):
124
124
  Fine-tune request type
125
125
  """
126
126
 
127
+ project_id: int
127
128
  training_config: TrainingConfig
128
129
  infrastructure_config: InfrastructureConfig
129
130
 
@@ -169,6 +170,9 @@ class FinetuneResponse(BaseModel):
169
170
  # list of fine-tune events
170
171
  events: List[FinetuneEvent] | None = None
171
172
  inference_available: bool = False
173
+ project_id: Optional[int] = None # TODO - fix this
174
+ completed_at: datetime | None = None
175
+
172
176
  # dataset token count
173
177
  # TODO
174
178
  # token_count: int | None = None
@@ -0,0 +1,31 @@
1
+ from datetime import datetime
2
+ from typing import Optional
3
+
4
+ from pydantic import BaseModel, ConfigDict, Field
5
+
6
+
7
+ class Project(BaseModel):
8
+ model_config = ConfigDict(from_attributes=True)
9
+
10
+ id: int
11
+ name: str
12
+ description: str
13
+ user_id: int
14
+ created_at: datetime
15
+ updated_at: datetime
16
+
17
+
18
+ class ProjectWithRuns(Project):
19
+ runs: int
20
+ runs_deployed: int
21
+ last_modified: datetime
22
+
23
+
24
+ class GetProjectsResponse(BaseModel):
25
+ data: list[ProjectWithRuns]
26
+
27
+
28
+ class PostProjectRequest(BaseModel):
29
+ id: Optional[int] = Field(default=None)
30
+ name: str = Field(min_length=5, max_length=100)
31
+ description: str = Field(min_length=5, max_length=1000)
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: seekrai
3
- Version: 0.2.3
3
+ Version: 0.3.1
4
4
  Summary: Python client for SeekrAI
5
5
  Home-page: https://gitlab.cb.ntent.com/ml/seekr-py
6
6
  License: Apache-2.0
@@ -31,7 +31,7 @@ Project-URL: Homepage, https://www.seekr.com/
31
31
  Project-URL: Repository, https://gitlab.cb.ntent.com/ml/seekr-py
32
32
  Description-Content-Type: text/markdown
33
33
 
34
- The Seekr Python Library is the official Python client for SeekrFlow's API platform, providing a convenient way for interacting with the REST APIs and enables easy integrations with Python 3.8+ applications with easy to use synchronous and asynchronous clients.
34
+ The Seekr Python Library is the official Python client for SeekrFlow's API platform, providing a convenient way for interacting with the REST APIs and enables easy integrations with Python 3.9+ applications with easy to use synchronous and asynchronous clients.
35
35
 
36
36
  # Installation
37
37
 
@@ -1,41 +1,45 @@
1
1
  seekrai/__init__.py,sha256=HC6iy-IdwqecabH-6a80Lsy9qO2PBToAI0WqEErV41c,935
2
2
  seekrai/abstract/__init__.py,sha256=wNiOTW9TJpUgfCJCG-wAbhhWWH2PtoVpAuL3nxvQGps,56
3
- seekrai/abstract/api_requestor.py,sha256=Bij__JWVSmr4AYnrh6ugFYo-48_uYGQodcTDp38pnf4,18169
4
- seekrai/client.py,sha256=Z7T7cc0_Ess8pchceNdhYyT5RvP9luAuGmTPDz5Y0ss,4976
3
+ seekrai/abstract/api_requestor.py,sha256=tI43JdbUfNQHWjvRrenUM7C7Ea-zD-7yd2JNS-Lki04,18297
4
+ seekrai/client.py,sha256=Yhejl-2a-Uoc8nWi-XxETZT4a4Ou_t_TRLcp0e9APIY,5376
5
5
  seekrai/constants.py,sha256=hoR2iF5te5Ydjt_lxIOSGID4vESIakG4F-3xAWdwxaU,1854
6
6
  seekrai/error.py,sha256=rAYL8qEd8INwYMMKvhS-HKeC3QkWL4Wq-zfazFU-zBg,4861
7
7
  seekrai/filemanager.py,sha256=8RuSzJvELD-fCI2Wd_t0jSKeVrmFwF7E5AzXIgDFxNA,9572
8
- seekrai/resources/__init__.py,sha256=QpK4dsJoXdMvlA_53fOS2S19Ua0OyLGFBZw_fQrOOvk,799
8
+ seekrai/resources/__init__.py,sha256=-OpvrZp0c_ro_e1G14gjOILtcSfpSOeRZeK8kt41WLo,1014
9
9
  seekrai/resources/alignment.py,sha256=6qQm9w0Em0q3zVeOzs8cX3wulr-B-wh4Pcr3pboIeTE,4468
10
10
  seekrai/resources/chat/__init__.py,sha256=KmtPupgECtEN80NyvcnSmieTAFXhwmVxhMHP0qhspA4,618
11
- seekrai/resources/chat/completions.py,sha256=vMLTc_aP6EaS1kvVNMmGz61ViU0BhhfyuNo73madrPg,11067
11
+ seekrai/resources/chat/completions.py,sha256=pGBVrIvUfY-wr0ooiywzuhIjZ4GwiojH3ofmRE6bYlw,11643
12
12
  seekrai/resources/completions.py,sha256=w3La3zPMlN00y-b-tJwLgvZVH-xK_dKC6ktI5Ggn1us,8564
13
+ seekrai/resources/deployments.py,sha256=HmP7MuxAlLUoQl6z705_d1Y53MDvGgSQXhlgN3DKX2A,6078
13
14
  seekrai/resources/embeddings.py,sha256=3lohUrkdFqzSg8FgS7p4r87jwjE0NXU1PilWv278quk,2705
14
15
  seekrai/resources/files.py,sha256=16FfJFZJjZ10Q38AHvTwE-CtIPpKA0d4zyB52YN14e4,6876
15
- seekrai/resources/finetune.py,sha256=fXLkVCTnfoeq4TvjhEWQsdIwO247tcpfWCVTlWZczDw,11271
16
+ seekrai/resources/finetune.py,sha256=B3UpzdqH3tUjS9ZwMiEv6XUNULmYaMOubqJg2BY17Os,11417
16
17
  seekrai/resources/images.py,sha256=E48lAe7YsZ2WXBHR_qz4SF7P4Y-U7t61m_bWNS91pM0,4802
17
18
  seekrai/resources/models.py,sha256=Pdd0S0gZdratWcHJPKNb7LkEdUGjr3xNR06W6GDiyxk,5000
19
+ seekrai/resources/projects.py,sha256=AWJUeUDSzkbxBksHjJ4a3c83UR62TlMGHutm2NdV6Xk,3790
18
20
  seekrai/seekrflow_response.py,sha256=5RFEQzamDy7sTSDkxSsZQThZ3biNmeCPeHWdrFId5Go,1320
19
- seekrai/types/__init__.py,sha256=YUXzhcpwGGDeiV9LxKzwaYne7zI5vl99OMLmCtdSXwY,1779
21
+ seekrai/types/__init__.py,sha256=E41__vAq-tnm5aVYW0Ef1Ky-GMPnMpUKlyLr3pYhLSM,2358
20
22
  seekrai/types/abstract.py,sha256=TqWFQV_6bPblywfCH-r8FCkXWvPkc9KlJ4QVgyrnaMc,642
21
- seekrai/types/alignment.py,sha256=FCPrACbjDPZ02Q1-1hURehcz2Nu5kPiEFlS_CSbrWUw,1093
22
- seekrai/types/chat_completions.py,sha256=LfZk4tZ0bSm4s1DWUeTEZVpBlfemTWdDNXNUUVqbP9Q,3656
23
+ seekrai/types/alignment.py,sha256=-lxF0Cyj3w4brMft8WAoc2EDMAlbX7zdRYD04ot98-Y,1129
24
+ seekrai/types/chat_completions.py,sha256=xRTHBbDJDbz0HgW042WX3csQDolhjEuO81w0rzFSeBU,3691
23
25
  seekrai/types/common.py,sha256=OH3l3u-0_5oz1KYrcHMybFESzivDySocYlJAsLSLOWU,1940
24
26
  seekrai/types/completions.py,sha256=lm9AFdZR3Xg5AHPkV-qETHikkwMJmkHrLGr5GG-YR-M,2171
27
+ seekrai/types/deployments.py,sha256=n7_t7DEeBSC8cDJSjIfvWtcgUql1DaEn89zuGYN_RaI,1744
25
28
  seekrai/types/embeddings.py,sha256=OANoLNOs0aceS8NppVvvcNYQbF7-pAOAmcr30pw64OU,749
26
29
  seekrai/types/error.py,sha256=uTKISs9aRC4_6zwirtNkanxepN8KY-SqCq0kNbfZylQ,370
27
30
  seekrai/types/files.py,sha256=XmtiM6d9i3tnYS-Kii3QpxZJRqemJi2rvLJ32GsECXQ,2602
28
- seekrai/types/finetune.py,sha256=IczMdaMUub4tcNzfW25UBZqbLDZ0wAdBAoEll-QEiqA,5961
31
+ seekrai/types/finetune.py,sha256=Izh7NfaW7pGzSN5GpWYSaZpkHV2TilE1CF62DrB8vE0,6089
29
32
  seekrai/types/images.py,sha256=Fusj8OhVYFsT8kz636lRGGivLbPXo_ZNgakKwmzJi3U,914
30
33
  seekrai/types/models.py,sha256=1ZfW9WwayApkISRizDntjkWhYNv-wkbrRVIfHn2QuC4,1242
34
+ seekrai/types/projects.py,sha256=JFgpZdovia8Orcnhp6QkIEAXzyPCfKT_bUiwjxUaHHQ,670
31
35
  seekrai/utils/__init__.py,sha256=dfbiYEc47EBVRkq6C4O9y6tTGuPuV3LbV3__v01Mbds,658
32
36
  seekrai/utils/_log.py,sha256=Cayw5B394H2WGVTXPXS2AN8znQdxsgrLqADXgqmokvU,1649
33
37
  seekrai/utils/api_helpers.py,sha256=0Y8BblNIr9h_R12zdmhkxgTlxgoRkbq84QNi4nNWGu8,2385
34
38
  seekrai/utils/files.py,sha256=B61Pwra49MVVWjPtdkx4hBtAuUe9UI63hdNus87Uq0o,7164
35
39
  seekrai/utils/tools.py,sha256=jgJTL-dOIouDbEJLdQpQfpXhqaz_poQYS52adyUtBjo,1781
36
40
  seekrai/version.py,sha256=q6iGQVFor8zXiPP5F-3vy9TndOxKv5JXbaNJ2kdOQws,125
37
- seekrai-0.2.3.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
38
- seekrai-0.2.3.dist-info/METADATA,sha256=z9WhvFSI8PwVKHpudNwzo0F2XoBXEfmrkN_ewAhKLzY,4748
39
- seekrai-0.2.3.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
40
- seekrai-0.2.3.dist-info/entry_points.txt,sha256=N49yOEGi1sK7Xr13F_rkkcOxQ88suyiMoOmRhUHTZ_U,48
41
- seekrai-0.2.3.dist-info/RECORD,,
41
+ seekrai-0.3.1.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
42
+ seekrai-0.3.1.dist-info/METADATA,sha256=mRcvLQlduWbKblvlUqQ3BMZ7cpwBHoLEG6H6PRbFDHI,4748
43
+ seekrai-0.3.1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
44
+ seekrai-0.3.1.dist-info/entry_points.txt,sha256=N49yOEGi1sK7Xr13F_rkkcOxQ88suyiMoOmRhUHTZ_U,48
45
+ seekrai-0.3.1.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 1.8.1
2
+ Generator: poetry-core 1.9.0
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any