seekrai 0.4.4__py3-none-any.whl → 0.5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. seekrai/__init__.py +0 -1
  2. seekrai/abstract/api_requestor.py +108 -251
  3. seekrai/abstract/response_parsing.py +99 -0
  4. seekrai/client.py +6 -2
  5. seekrai/filemanager.py +92 -3
  6. seekrai/resources/__init__.py +6 -1
  7. seekrai/resources/agents/__init__.py +11 -6
  8. seekrai/resources/agents/agent_inference.py +236 -29
  9. seekrai/resources/agents/agents.py +272 -0
  10. seekrai/resources/agents/threads.py +454 -0
  11. seekrai/resources/alignment.py +3 -9
  12. seekrai/resources/completions.py +3 -9
  13. seekrai/resources/deployments.py +4 -9
  14. seekrai/resources/embeddings.py +3 -9
  15. seekrai/resources/files.py +118 -53
  16. seekrai/resources/finetune.py +3 -9
  17. seekrai/resources/images.py +3 -5
  18. seekrai/resources/ingestion.py +3 -9
  19. seekrai/resources/models.py +35 -124
  20. seekrai/resources/projects.py +4 -9
  21. seekrai/resources/resource_base.py +10 -0
  22. seekrai/resources/vectordb.py +482 -0
  23. seekrai/types/__init__.py +87 -0
  24. seekrai/types/agents/__init__.py +89 -0
  25. seekrai/types/agents/agent.py +42 -0
  26. seekrai/types/agents/runs.py +117 -0
  27. seekrai/types/agents/threads.py +265 -0
  28. seekrai/types/agents/tools/__init__.py +16 -0
  29. seekrai/types/agents/tools/env_model_config.py +7 -0
  30. seekrai/types/agents/tools/schemas/__init__.py +8 -0
  31. seekrai/types/agents/tools/schemas/file_search.py +9 -0
  32. seekrai/types/agents/tools/schemas/file_search_env.py +11 -0
  33. seekrai/types/agents/tools/tool.py +14 -0
  34. seekrai/types/agents/tools/tool_env_types.py +4 -0
  35. seekrai/types/agents/tools/tool_types.py +10 -0
  36. seekrai/types/alignment.py +6 -2
  37. seekrai/types/files.py +3 -0
  38. seekrai/types/finetune.py +1 -0
  39. seekrai/types/models.py +3 -0
  40. seekrai/types/vectordb.py +78 -0
  41. {seekrai-0.4.4.dist-info → seekrai-0.5.0.dist-info}/METADATA +3 -3
  42. seekrai-0.5.0.dist-info/RECORD +67 -0
  43. {seekrai-0.4.4.dist-info → seekrai-0.5.0.dist-info}/WHEEL +1 -1
  44. seekrai-0.4.4.dist-info/RECORD +0 -49
  45. {seekrai-0.4.4.dist-info → seekrai-0.5.0.dist-info}/LICENSE +0 -0
  46. {seekrai-0.4.4.dist-info → seekrai-0.5.0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,117 @@
1
+ import datetime
2
+ from enum import Enum
3
+ from typing import Any, Optional, Union
4
+
5
+ from pydantic import Field
6
+
7
+ from seekrai.types.abstract import BaseModel
8
+
9
+
10
+ class RunRequest(BaseModel):
11
+ """Request model for creating a run."""
12
+
13
+ agent_id: str = Field(default="default_agent")
14
+
15
+
16
+ class RunResponse(BaseModel):
17
+ """Response model for run creation."""
18
+
19
+ run_id: str
20
+ thread_id: str
21
+ status: str
22
+
23
+
24
+ class RunStatus(str, Enum):
25
+ """Available status for a run."""
26
+
27
+ QUEUED = "queued"
28
+ IN_PROGRESS = "running"
29
+ COMPLETED = "completed"
30
+ FAILED = "failed"
31
+ CANCELED = "canceled"
32
+
33
+
34
+ class RunUsage(BaseModel):
35
+ """Aggregated usage metrics for a complete run execution.
36
+
37
+ Tracks token consumption for both prompts and completions.
38
+ """
39
+
40
+ prompt_tokens: int
41
+ completion_tokens: int
42
+ total_tokens: int
43
+
44
+
45
+ class RunStepUsage(BaseModel):
46
+ """Usage metrics for a single step within a run.
47
+
48
+ Tracks token consumption at the individual step level.
49
+ """
50
+
51
+ prompt_tokens: int
52
+ completion_tokens: int
53
+ total_tokens: int
54
+
55
+
56
+ class Run(BaseModel):
57
+ """Represents a single execution within a thread.
58
+
59
+ A run encompasses the entire lifecycle of processing, from receiving
60
+ the initial prompt to delivering the final response. Runs track
61
+ execution status, timing, model parameters, and resource usage.
62
+ """
63
+
64
+ id: str
65
+ object: str = "thread.run"
66
+ created_at: datetime.datetime
67
+ agent_id: str
68
+ thread_id: str
69
+ status: RunStatus # e.g. queued, in_progress, completed, failed, canceled
70
+ is_active: bool = False # Indicates if this run is actively executing on its thread
71
+
72
+ started_at: Optional[datetime.datetime] = None
73
+ expires_at: Optional[datetime.datetime] = None
74
+ cancelled_at: Optional[datetime.datetime] = None
75
+ failed_at: Optional[datetime.datetime] = None
76
+ completed_at: Optional[datetime.datetime] = None
77
+
78
+ model: str
79
+ instructions: Optional[str] = None
80
+ tools: list[dict[str, Any]] = Field(default_factory=list)
81
+ meta_data: dict[str, Any] = Field(default_factory=dict)
82
+
83
+ usage: Optional[RunUsage] = None
84
+
85
+ temperature: float
86
+ top_p: float
87
+ max_completion_tokens: Optional[int] = None
88
+ truncation_strategy: dict[str, Any] = Field(default_factory=dict)
89
+ response_format: Union[str, dict[str, Any]] = "auto"
90
+ tool_choice: Union[str, dict[str, Any]] = "auto"
91
+ parallel_tool_calls: bool
92
+
93
+
94
+ class RunStep(BaseModel):
95
+ """A single atomic operation within a run.
96
+
97
+ Steps can include message creation, tool calls, or other internal actions.
98
+ Each step is associated with a specific run, assistant, and thread, and
99
+ includes details about its type, status, and resource usage.
100
+ """
101
+
102
+ id: str
103
+ object: str = "thread.run.step"
104
+ created_at: datetime.datetime
105
+ run_id: str
106
+ agent_id: str
107
+ thread_id: str
108
+
109
+ # E.g. 'message_creation', 'tool_call', etc.
110
+ type: str
111
+
112
+ # E.g. 'completed', 'failed', etc.
113
+ status: str
114
+
115
+ completed_at: Optional[datetime.datetime] = None
116
+ meta_data: dict[str, Any] = Field(default_factory=dict)
117
+ usage: Optional[RunStepUsage] = None
@@ -0,0 +1,265 @@
1
+ import datetime
2
+ from enum import Enum
3
+ from typing import Any, Literal, Optional, Sequence, Union
4
+ from uuid import uuid4
5
+
6
+ from pydantic import Field
7
+
8
+ from seekrai.types.abstract import BaseModel
9
+
10
+
11
+ class ThreadCreateRequest(BaseModel):
12
+ """Pydantic model for creating a thread request.
13
+
14
+ Attributes:
15
+ meta_data (Optional[dict[str, Any]]): Optional metadata dictionary.
16
+ """
17
+
18
+ meta_data: Optional[dict[str, Any]] = None
19
+
20
+
21
+ class MessageUpdateRequest(BaseModel):
22
+ """Pydantic model for updating a message request.
23
+
24
+ Attributes:
25
+ content (Optional[str]): Optional content of the message.
26
+ meta_data (Optional[dict[str, Any]]): Optional metadata dictionary.
27
+ """
28
+
29
+ content: Optional[str] = None
30
+ meta_data: Optional[dict[str, Any]] = None
31
+
32
+
33
+ class ThreadStatus(str, Enum):
34
+ """Available status for a thread."""
35
+
36
+ AVAILABLE = "available"
37
+ LOCKED = "locked"
38
+
39
+
40
+ class Thread(BaseModel):
41
+ """A thread is a top-level conversation container.
42
+
43
+ Threads can contain multiple messages from various assistants and users,
44
+ providing a complete history of an interaction sequence.
45
+ """
46
+
47
+ id: str
48
+ object: str = "thread"
49
+ created_at: datetime.datetime
50
+ status: ThreadStatus = ThreadStatus.AVAILABLE
51
+ active_run_id: Optional[str] = None
52
+ meta_data: dict[str, Any] = Field(default_factory=dict)
53
+
54
+
55
+ class StreamReasoningChunk(BaseModel):
56
+ """A chunk of reasoning output from a streaming tool."""
57
+
58
+ type: Literal["stream_reasoning"]
59
+ reasoning: str
60
+ meta_data: dict[str, Any]
61
+
62
+
63
+ class StreamTextChunk(BaseModel):
64
+ """A chunk of text output from a streaming tool."""
65
+
66
+ text: str
67
+ type: Literal["streaming_chunk"]
68
+ meta_data: dict[str, Any]
69
+
70
+
71
+ class StreamingToolChunk(BaseModel):
72
+ """A chunk of output from a streaming tool."""
73
+
74
+ type: Literal["streaming_tool"]
75
+ tool_id: str
76
+ tool_name: str
77
+ tool_output: dict[str, Any]
78
+ meta_data: dict[str, Any]
79
+
80
+
81
+ class StreamingToolRequest(BaseModel):
82
+ """A chunk of output from a streaming tool."""
83
+
84
+ type: Literal["streaming_tool_request"]
85
+ tool_id: str
86
+ tool_args: dict[str, Any]
87
+ meta_data: dict[str, Any]
88
+ tool_name: str
89
+
90
+
91
+ class StreamingToolResponse(BaseModel):
92
+ """A chunk of output from a streaming tool."""
93
+
94
+ type: Literal["streaming_tool_response"]
95
+ tool_id: str
96
+ tool_output: dict[str, Any]
97
+ meta_data: dict[str, Any]
98
+
99
+
100
+ class StreamNodeHeaderChunk(BaseModel):
101
+ """Represents a special 'header' announcement that we're at a particular Node."""
102
+
103
+ type: Literal["node_header"]
104
+ node_type: str # e.g. "ModelRequestNode" or "CallToolsNode"
105
+ description: str # e.g. "streaming partial request tokens"
106
+ meta_data: dict[str, Any]
107
+
108
+
109
+ class StreamUserPromptChunk(BaseModel):
110
+ """Represents the user prompt node."""
111
+
112
+ type: Literal["user_prompt"]
113
+ user_prompt: Union[str, Sequence[Any]]
114
+ meta_data: dict[str, Any]
115
+
116
+
117
+ class StreamPartStartEventChunk(BaseModel):
118
+ """Represents a PartStartEvent."""
119
+
120
+ type: Literal["part_start_event"]
121
+ part_index: int
122
+ part_content: str # whatever event.part!r is
123
+ meta_data: dict[str, Any]
124
+
125
+
126
+ class StreamToolCallPartDeltaChunk(BaseModel):
127
+ """Represents a ToolCallPartDelta."""
128
+
129
+ type: Literal["tool_call_part_delta"]
130
+ part_index: int
131
+ args_delta: Any # or dict[str, Any] depending on your usage
132
+ meta_data: dict[str, Any]
133
+
134
+
135
+ class StreamFinalResultEventChunk(BaseModel):
136
+ """Represents a FinalResultEvent."""
137
+
138
+ type: Literal["final_result_event"]
139
+ tool_name: Optional[str]
140
+ meta_data: dict[str, Any]
141
+
142
+
143
+ class StreamEndNodeChunk(BaseModel):
144
+ """Represents the final agent output at the EndNode."""
145
+
146
+ type: Literal["end_node"]
147
+ final_output: str
148
+ meta_data: dict[str, Any]
149
+
150
+
151
+ class StreamTextDeltaChunk(BaseModel):
152
+ """Represents partial text tokens returned by the model (TextPartDelta)."""
153
+
154
+ type: Literal["text_delta"]
155
+ text: str # chunk of partial text
156
+ meta_data: dict[str, Any]
157
+
158
+
159
+ StreamChunkDataTypes = Union[
160
+ StreamReasoningChunk,
161
+ StreamTextChunk,
162
+ StreamingToolRequest,
163
+ StreamingToolResponse,
164
+ StreamNodeHeaderChunk,
165
+ StreamUserPromptChunk,
166
+ StreamPartStartEventChunk,
167
+ StreamToolCallPartDeltaChunk,
168
+ StreamFinalResultEventChunk,
169
+ StreamEndNodeChunk,
170
+ StreamTextDeltaChunk,
171
+ ]
172
+
173
+
174
+ class StreamChunk(BaseModel):
175
+ """A single chunk of streaming output from a tool."""
176
+
177
+ data: StreamChunkDataTypes
178
+ type: Literal["streaming_chunk"]
179
+ meta_data: dict[str, Any]
180
+
181
+
182
+ class InputText(BaseModel):
183
+ """A text input to be sent to the model."""
184
+
185
+ text: str
186
+ type: Literal["input_text"]
187
+
188
+
189
+ class InputImage(BaseModel):
190
+ """An image input to be sent to the model."""
191
+
192
+ detail: Literal["high", "low", "auto"]
193
+ """The detail level of the image to be sent to the model.
194
+ One of `high`, `low`, or `auto`. Defaults to `auto`.
195
+ """
196
+
197
+ type: Literal["input_image"]
198
+
199
+ file_id: Optional[str]
200
+ """The ID of the file to be sent to the model."""
201
+
202
+ image_url: Optional[str]
203
+ """The URL of the image to be sent to the model.
204
+
205
+ A fully qualified URL or base64 encoded image in a data URL.
206
+ """
207
+
208
+
209
+ class InputFile(BaseModel):
210
+ """A file input to be sent to the model."""
211
+
212
+ type: Literal["input_file"]
213
+ """The type of the input item. Always `input_file`."""
214
+
215
+ file_id: str
216
+ """The ID of the file to be sent to the model."""
217
+
218
+
219
+ InputMessage = Union[InputText, InputImage, InputFile]
220
+
221
+
222
+ class OutputText(BaseModel):
223
+ """A text output from the model."""
224
+
225
+ text: str
226
+ type: Literal["output_text"]
227
+ annotations: list[str]
228
+
229
+
230
+ class OutputGuardrail(BaseModel):
231
+ """A guardrail output from the model."""
232
+
233
+ type: Literal["output_guardrail"]
234
+ text: str
235
+ guardrail: list[dict[str, Any]]
236
+
237
+
238
+ OutputMessage = Union[OutputText, OutputGuardrail]
239
+
240
+
241
+ ThreadMessageContentType = Union[str, list[InputMessage], list[OutputMessage]]
242
+
243
+
244
+ class ThreadMessage(BaseModel):
245
+ """A single piece of content within a thread.
246
+
247
+ Messages can be either requests to a model (user prompts, system prompts,
248
+ or tool returns) or responses from the model. Each message is associated
249
+ with a specific thread and optionally with an assistant and run.
250
+ """
251
+
252
+ id: str = Field(default_factory=lambda: str(uuid4()))
253
+ object: str = "thread.message"
254
+ thread_id: str # FK to Thread
255
+ role: str # e.g. 'user', 'assistant', 'system', 'tool'
256
+
257
+ # content can be either
258
+ # - a single string (for text messages)
259
+ # - a list of InputMessage objects (for model inputs)
260
+ # - a list of OutputMessage objects (for model outputs)
261
+ content: ThreadMessageContentType
262
+
263
+ agent_id: Optional[str] = None # If this message was sent by an assistant
264
+ run_id: Optional[str] = None # If it's part of a Run
265
+ meta_data: dict[str, Any] = Field(default_factory=dict)
@@ -0,0 +1,16 @@
1
+ from seekrai.types.agents.tools.env_model_config import EnvConfig
2
+ from seekrai.types.agents.tools.schemas import FileSearch, FileSearchEnv
3
+ from seekrai.types.agents.tools.tool import ToolBase, ToolType
4
+ from seekrai.types.agents.tools.tool_env_types import Env
5
+ from seekrai.types.agents.tools.tool_types import Tool
6
+
7
+
8
+ __all__ = [
9
+ "ToolBase",
10
+ "ToolType",
11
+ "EnvConfig",
12
+ "Env",
13
+ "Tool",
14
+ "FileSearch",
15
+ "FileSearchEnv",
16
+ ]
@@ -0,0 +1,7 @@
1
+ from pydantic import BaseModel, ConfigDict
2
+
3
+
4
+ class EnvConfig(BaseModel):
5
+ model_config = ConfigDict(
6
+ alias_generator=lambda k: k.upper(), populate_by_name=True
7
+ )
@@ -0,0 +1,8 @@
1
+ from seekrai.types.agents.tools.schemas.file_search import FileSearch
2
+ from seekrai.types.agents.tools.schemas.file_search_env import FileSearchEnv
3
+
4
+
5
+ __all__ = [
6
+ "FileSearch",
7
+ "FileSearchEnv",
8
+ ]
@@ -0,0 +1,9 @@
1
+ from typing import Literal
2
+
3
+ from seekrai.types.agents.tools.schemas.file_search_env import FileSearchEnv
4
+ from seekrai.types.agents.tools.tool import ToolBase, ToolType
5
+
6
+
7
+ class FileSearch(ToolBase):
8
+ name: Literal[ToolType.FILE_SEARCH] = ToolType.FILE_SEARCH
9
+ tool_env: FileSearchEnv
@@ -0,0 +1,11 @@
1
+ from pydantic import Field
2
+
3
+ from seekrai.types.agents.tools.env_model_config import EnvConfig
4
+
5
+
6
+ # TODO: figure out better way of creating tool environment models (within tool ideally), but retaining separate model_configs
7
+ class FileSearchEnv(EnvConfig):
8
+ file_search_index: str
9
+ document_tool_desc: str
10
+ top_k: int = Field(default=10)
11
+ score_threshold: int = Field(default=0)
@@ -0,0 +1,14 @@
1
+ import enum
2
+
3
+ from pydantic import BaseModel
4
+
5
+ from seekrai.types.agents.tools.tool_env_types import Env
6
+
7
+
8
+ class ToolType(str, enum.Enum):
9
+ FILE_SEARCH = "file_search"
10
+
11
+
12
+ class ToolBase(BaseModel):
13
+ name: ToolType
14
+ tool_env: Env
@@ -0,0 +1,4 @@
1
+ from seekrai.types.agents.tools.schemas.file_search_env import FileSearchEnv
2
+
3
+
4
+ Env = FileSearchEnv # will be a Union of tool envs when more are added
@@ -0,0 +1,10 @@
1
+ from typing import Annotated
2
+
3
+ from pydantic import Field
4
+
5
+ from seekrai.types.agents.tools.schemas.file_search import FileSearch
6
+
7
+
8
+ Tool = Annotated[
9
+ FileSearch, Field(discriminator="name")
10
+ ] # will be a Union of tools when more are added
@@ -12,7 +12,8 @@ from seekrai.types.abstract import BaseModel
12
12
  class AlignmentType(str, Enum):
13
13
  PRINCIPLE = "principle"
14
14
  CHAIN_OF_THOUGHT = "chain_of_thought"
15
- RAFT = "raft"
15
+ RAFT = "raft" # deprecated - use CONTEXT_DATA instead
16
+ CONTEXT_DATA = "context_data"
16
17
 
17
18
 
18
19
  class AlignmentRequest(BaseModel):
@@ -24,7 +25,7 @@ class AlignmentRequest(BaseModel):
24
25
  )
25
26
  type: AlignmentType = Field(
26
27
  default=AlignmentType.PRINCIPLE,
27
- description="Type of alignment task (principle, chain_of_thought, or raft)",
28
+ description="Type of alignment task (principle, chain_of_thought, or context_data)",
28
29
  )
29
30
 
30
31
 
@@ -59,7 +60,10 @@ class AlignmentJobStatus(str, Enum):
59
60
  class AlignmentResponse(BaseModel):
60
61
  id: Optional[str] = Field(default=..., description="Alignment job ID")
61
62
  created_at: datetime | None = None
63
+ updated_at: datetime | None = None
62
64
  status: AlignmentJobStatus | None = None
65
+ current_step: str | None = None
66
+ progress: str | None = None
63
67
 
64
68
 
65
69
  class AlignmentList(BaseModel):
seekrai/types/files.py CHANGED
@@ -14,6 +14,8 @@ class FilePurpose(str, Enum):
14
14
  FineTune = "fine-tune"
15
15
  PreTrain = "pre-train"
16
16
  Alignment = "alignment"
17
+ Ingestion = "ingestion"
18
+ VectorIngestion = "vector-ingestion"
17
19
 
18
20
 
19
21
  class TrainingFileType(str, Enum):
@@ -94,6 +96,7 @@ class FileResponse(BaseModel):
94
96
  # file byte size
95
97
  bytes: int | None = None
96
98
  created_by: str | None = None # TODO - fix this later
99
+ deleted: bool | None = None
97
100
 
98
101
 
99
102
  class FileList(BaseModel):
seekrai/types/finetune.py CHANGED
@@ -113,6 +113,7 @@ class AcceleratorType(str, Enum):
113
113
  A100 = "A100"
114
114
  A10 = "A10"
115
115
  H100 = "H100"
116
+ MI300X = "MI300X"
116
117
 
117
118
 
118
119
  class InfrastructureConfig(BaseModel):
seekrai/types/models.py CHANGED
@@ -1,5 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
+ from datetime import datetime
3
4
  from enum import Enum
4
5
  from typing import List, Literal
5
6
 
@@ -29,10 +30,12 @@ class ModelResponse(BaseModel):
29
30
  # object type
30
31
  object: Literal[ObjectType.Model]
31
32
  created: int | None = None
33
+ created_at: datetime | None = None
32
34
  # model type
33
35
  type: ModelType | None = None
34
36
  name: str | None = None
35
37
  bytes: int | None = None
38
+ model_type: str | None = None
36
39
  # # model creator organization
37
40
  # organization: str | None = None
38
41
  # # link to model resource
@@ -0,0 +1,78 @@
1
+ from datetime import datetime
2
+ from typing import List, Literal, Optional
3
+
4
+ from pydantic import BaseModel, Field
5
+
6
+
7
+ class VectorDatabaseCreate(BaseModel):
8
+ """Request model for creating a new vector database."""
9
+
10
+ name: str = Field(..., description="Name of the vector database")
11
+ model: str = Field(..., description="Model used to generate the vectors")
12
+ description: Optional[str] = Field(None, description="Optional description")
13
+
14
+
15
+ class VectorDatabaseResponse(BaseModel):
16
+ """Response model for a vector database."""
17
+
18
+ id: str
19
+ name: str
20
+ model: str
21
+ dimension: int
22
+ description: Optional[str]
23
+ created_at: datetime
24
+ updated_at: datetime
25
+ file_count: int
26
+ size_in_bytes: Optional[int] = None
27
+
28
+
29
+ class VectorDatabaseList(BaseModel):
30
+ """Response model for a list of vector databases."""
31
+
32
+ object: Literal["list"]
33
+ data: List[VectorDatabaseResponse]
34
+
35
+
36
+ class VectorDatabaseIngestionRequest(BaseModel):
37
+ """Request model for creating a new vector database ingestion job."""
38
+
39
+ file_ids: List[str] = Field(..., description="List of file IDs to ingest")
40
+ method: str = Field(..., description="Method to use for ingestion")
41
+ token_count: int = Field(default=800, description="Token count for ingestion")
42
+ overlap_tokens: int = Field(default=100, description="Overlap tokens for ingestion")
43
+
44
+
45
+ class VectorDatabaseIngestionResponse(BaseModel):
46
+ """Response model for a vector database ingestion job."""
47
+
48
+ id: str
49
+ vector_database_id: str
50
+ status: str
51
+ created_at: datetime
52
+ updated_at: datetime
53
+ error_message: Optional[str]
54
+ file_ids: List[str]
55
+ metaflow_run_id: Optional[str]
56
+
57
+
58
+ class VectorDatabaseIngestionList(BaseModel):
59
+ """Response model for a list of vector database ingestion jobs."""
60
+
61
+ object: Literal["list"]
62
+ data: List[VectorDatabaseIngestionResponse]
63
+
64
+
65
+ class VectorDatabaseFileResponse(BaseModel):
66
+ """Response model for a vector database file."""
67
+
68
+ id: str
69
+ vector_database_id: str
70
+ filename: str
71
+ created_at: datetime
72
+
73
+
74
+ class VectorDatabaseFileList(BaseModel):
75
+ """Response model for a list of vector database files."""
76
+
77
+ object: Literal["list"]
78
+ data: List[VectorDatabaseFileResponse]
@@ -1,8 +1,7 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.3
2
2
  Name: seekrai
3
- Version: 0.4.4
3
+ Version: 0.5.0
4
4
  Summary: Python client for SeekrAI
5
- Home-page: https://gitlab.cb.ntent.com/ml/seekr-py
6
5
  License: Apache-2.0
7
6
  Author: SeekrFlow
8
7
  Author-email: support@seekr.com
@@ -14,6 +13,7 @@ Classifier: Programming Language :: Python :: 3.9
14
13
  Classifier: Programming Language :: Python :: 3.10
15
14
  Classifier: Programming Language :: Python :: 3.11
16
15
  Classifier: Programming Language :: Python :: 3.12
16
+ Classifier: Programming Language :: Python :: 3.13
17
17
  Requires-Dist: click (>=8.1.7,<9.0.0)
18
18
  Requires-Dist: eval-type-backport (>=0.1.3,<0.3.0)
19
19
  Requires-Dist: filelock (>=3.13.1,<4.0.0)