seekrai 0.5.2__py3-none-any.whl → 0.5.24__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. seekrai/abstract/response_parsing.py +2 -0
  2. seekrai/client.py +8 -0
  3. seekrai/resources/__init__.py +20 -2
  4. seekrai/resources/agents/__init__.py +12 -0
  5. seekrai/resources/agents/agent_inference.py +37 -10
  6. seekrai/resources/agents/agent_observability.py +135 -0
  7. seekrai/resources/agents/agents.py +51 -0
  8. seekrai/resources/agents/python_functions.py +295 -0
  9. seekrai/resources/alignment.py +460 -1
  10. seekrai/resources/chat/completions.py +17 -8
  11. seekrai/resources/embeddings.py +2 -2
  12. seekrai/resources/explainability.py +92 -0
  13. seekrai/resources/finetune.py +44 -0
  14. seekrai/resources/ingestion.py +5 -7
  15. seekrai/resources/models.py +0 -3
  16. seekrai/resources/vectordb.py +36 -2
  17. seekrai/types/__init__.py +30 -3
  18. seekrai/types/agents/__init__.py +25 -3
  19. seekrai/types/agents/agent.py +11 -0
  20. seekrai/types/agents/observability.py +34 -0
  21. seekrai/types/agents/python_functions.py +29 -0
  22. seekrai/types/agents/runs.py +51 -1
  23. seekrai/types/agents/tools/__init__.py +12 -2
  24. seekrai/types/agents/tools/schemas/__init__.py +8 -0
  25. seekrai/types/agents/tools/schemas/file_search.py +1 -1
  26. seekrai/types/agents/tools/schemas/file_search_env.py +0 -1
  27. seekrai/types/agents/tools/schemas/run_python.py +9 -0
  28. seekrai/types/agents/tools/schemas/run_python_env.py +8 -0
  29. seekrai/types/agents/tools/schemas/web_search.py +9 -0
  30. seekrai/types/agents/tools/schemas/web_search_env.py +7 -0
  31. seekrai/types/agents/tools/tool.py +9 -3
  32. seekrai/types/agents/tools/tool_types.py +4 -4
  33. seekrai/types/alignment.py +36 -0
  34. seekrai/types/chat_completions.py +1 -0
  35. seekrai/types/deployments.py +2 -0
  36. seekrai/types/explainability.py +26 -0
  37. seekrai/types/files.py +2 -1
  38. seekrai/types/finetune.py +40 -7
  39. seekrai/types/vectordb.py +6 -1
  40. {seekrai-0.5.2.dist-info → seekrai-0.5.24.dist-info}/METADATA +3 -6
  41. seekrai-0.5.24.dist-info/RECORD +76 -0
  42. {seekrai-0.5.2.dist-info → seekrai-0.5.24.dist-info}/WHEEL +1 -1
  43. seekrai/types/agents/tools/tool_env_types.py +0 -4
  44. seekrai-0.5.2.dist-info/RECORD +0 -67
  45. {seekrai-0.5.2.dist-info → seekrai-0.5.24.dist-info}/LICENSE +0 -0
  46. {seekrai-0.5.2.dist-info → seekrai-0.5.24.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,92 @@
1
+ from typing import Optional
2
+
3
+ from seekrai.abstract import api_requestor
4
+ from seekrai.resources.resource_base import ResourceBase
5
+ from seekrai.types import (
6
+ SeekrFlowRequest,
7
+ )
8
+ from seekrai.types.explainability import (
9
+ InfluentialFinetuningDataRequest,
10
+ InfluentialFinetuningDataResponse,
11
+ )
12
+
13
+
14
+ class Explainability(ResourceBase):
15
+ def get_influential_finetuning_data(
16
+ self,
17
+ model_id: str,
18
+ question: str,
19
+ system_prompt: Optional[str] = None,
20
+ answer: Optional[str] = None,
21
+ ) -> InfluentialFinetuningDataResponse:
22
+ """
23
+ Retrieve influential QA pair fine tuning data for a specific model.
24
+ Args:
25
+ - model_id (str): ID of the model to explain.
26
+ - question (str): question from user,
27
+ - system_prompt (str | None): System prompt for the user's question.
28
+ - answer (str | None): answer of the finetuned model to the question; if None, the answer is retrieved from the finetuned model specified by model_id,
29
+ Returns:
30
+ InfluentialFinetuningDataResponse: Object containing the influential fine tuning data.
31
+ """
32
+ requestor = api_requestor.APIRequestor(
33
+ client=self._client,
34
+ )
35
+ # Create query parameters dictionary
36
+ parameter_payload = InfluentialFinetuningDataRequest(
37
+ question=question, system_prompt=system_prompt, answer=answer
38
+ ).model_dump()
39
+
40
+ # if limit is not None:
41
+ # params["limit"] = limit
42
+ # TODO limits =? timeout: float | None = None, max_retries: int | None = None,
43
+
44
+ response, _, _ = requestor.request(
45
+ options=SeekrFlowRequest(
46
+ method="GET",
47
+ url=f"flow/explain/models/{model_id}/influential-finetuning-data",
48
+ params=parameter_payload,
49
+ ),
50
+ stream=False,
51
+ )
52
+ return InfluentialFinetuningDataResponse(**response.data)
53
+
54
+
55
+ class AsyncExplainability(ResourceBase):
56
+ async def get_influential_finetuning_data(
57
+ self,
58
+ model_id: str,
59
+ question: str,
60
+ system_prompt: Optional[str] = None,
61
+ answer: Optional[str] = None,
62
+ ) -> InfluentialFinetuningDataResponse:
63
+ """
64
+ Retrieve influential QA pair finetuning data for a specific model asynchronously.
65
+ Args:
66
+ - model_id (str): ID of the model to explain.
67
+ - question (str): question from user,
68
+ - system_prompt (str | None): System prompt for the user's question.
69
+ - answer (str | None): answer of the finetuned model to the question; if None, the answer is retrieved from the finetuned model specified by model_id,
70
+ Returns:
71
+ InfluentialFinetuningDataResponse: Object containing the influential finetuning data.
72
+ """
73
+ requestor = api_requestor.APIRequestor(
74
+ client=self._client,
75
+ )
76
+ # Create query parameters dictionary
77
+ parameter_payload = InfluentialFinetuningDataRequest(
78
+ model_id=model_id,
79
+ question=question,
80
+ system_prompt=system_prompt,
81
+ answer=answer,
82
+ ).model_dump()
83
+
84
+ response, _, _ = await requestor.arequest(
85
+ options=SeekrFlowRequest(
86
+ method="GET",
87
+ url=f"flow/explain/models/{model_id}/influential-finetuning-data",
88
+ params=parameter_payload,
89
+ ),
90
+ stream=False,
91
+ )
92
+ return InfluentialFinetuningDataResponse(**response.data)
@@ -3,6 +3,7 @@ from __future__ import annotations
3
3
  from pathlib import Path
4
4
 
5
5
  from seekrai.abstract import api_requestor
6
+ from seekrai.error import InvalidRequestError
6
7
  from seekrai.resources.resource_base import ResourceBase
7
8
  from seekrai.seekrflow_response import SeekrFlowResponse
8
9
  from seekrai.types import (
@@ -17,6 +18,29 @@ from seekrai.types import (
17
18
  )
18
19
 
19
20
 
21
+ def validate_lora_support(
22
+ models_response: SeekrFlowResponse, training_config: TrainingConfig
23
+ ) -> None:
24
+ assert isinstance(models_response, SeekrFlowResponse)
25
+ model_entry = None
26
+ for model in models_response.data.get("data", []):
27
+ model_id = str(model.get("id")) if model.get("id") is not None else None
28
+ if (
29
+ model_id == training_config.model
30
+ or model.get("name") == training_config.model
31
+ ):
32
+ model_entry = model
33
+ break
34
+ if not model_entry:
35
+ raise InvalidRequestError(
36
+ f"Model '{training_config.model}' not found; cannot enable LoRA."
37
+ )
38
+ if not model_entry.get("supports_lora", False):
39
+ raise InvalidRequestError(
40
+ f"Model '{training_config.model}' does not support LoRA fine-tuning."
41
+ )
42
+
43
+
20
44
  class FineTuning(ResourceBase):
21
45
  def create(
22
46
  self,
@@ -39,6 +63,16 @@ class FineTuning(ResourceBase):
39
63
  client=self._client,
40
64
  )
41
65
 
66
+ if training_config.lora_config is not None:
67
+ models_response, _, _ = requestor.request(
68
+ options=SeekrFlowRequest(
69
+ method="GET",
70
+ url="flow/models",
71
+ ),
72
+ stream=False,
73
+ )
74
+ validate_lora_support(models_response, training_config)
75
+
42
76
  parameter_payload = FinetuneRequest(
43
77
  project_id=project_id,
44
78
  training_config=training_config,
@@ -263,6 +297,16 @@ class AsyncFineTuning(ResourceBase):
263
297
  client=self._client,
264
298
  )
265
299
 
300
+ if training_config.lora_config is not None:
301
+ models_response, _, _ = await requestor.arequest(
302
+ options=SeekrFlowRequest(
303
+ method="GET",
304
+ url="flow/models",
305
+ ),
306
+ stream=False,
307
+ )
308
+ validate_lora_support(models_response, training_config)
309
+
266
310
  parameter_payload = FinetuneRequest(
267
311
  project_id=project_id,
268
312
  training_config=training_config,
@@ -1,4 +1,4 @@
1
- from typing import List
1
+ from typing import List, Optional
2
2
 
3
3
  from seekrai.abstract import api_requestor
4
4
  from seekrai.resources.resource_base import ResourceBase
@@ -13,6 +13,7 @@ class Ingestion(ResourceBase):
13
13
  def ingest(
14
14
  self,
15
15
  files: List[str],
16
+ method: Optional[str] = "accuracy-optimized",
16
17
  ) -> IngestionResponse:
17
18
  """
18
19
  Start an ingestion job for the specified files.
@@ -27,9 +28,7 @@ class Ingestion(ResourceBase):
27
28
  client=self._client,
28
29
  )
29
30
 
30
- parameter_payload = IngestionRequest(
31
- files=files,
32
- ).model_dump()
31
+ parameter_payload = IngestionRequest(files=files, method=method).model_dump()
33
32
 
34
33
  response, _, _ = requestor.request(
35
34
  options=SeekrFlowRequest(
@@ -95,6 +94,7 @@ class AsyncIngestion(ResourceBase):
95
94
  async def ingest(
96
95
  self,
97
96
  files: List[str],
97
+ method: Optional[str] = "accuracy-optimized",
98
98
  ) -> IngestionResponse:
99
99
  """
100
100
  Start an ingestion job for the specified files asynchronously.
@@ -109,9 +109,7 @@ class AsyncIngestion(ResourceBase):
109
109
  client=self._client,
110
110
  )
111
111
 
112
- parameter_payload = IngestionRequest(
113
- files=files,
114
- ).model_dump()
112
+ parameter_payload = IngestionRequest(files=files, method=method).model_dump()
115
113
 
116
114
  response, _, _ = await requestor.arequest(
117
115
  options=SeekrFlowRequest(
@@ -5,7 +5,6 @@ from seekrai.resources.resource_base import ResourceBase
5
5
  from seekrai.seekrflow_response import SeekrFlowResponse
6
6
  from seekrai.types import ModelList, ModelResponse, SeekrFlowRequest
7
7
  from seekrai.types.common import ObjectType
8
- from seekrai.utils import parse_timestamp
9
8
 
10
9
 
11
10
  class Models(ResourceBase):
@@ -38,7 +37,6 @@ class Models(ResourceBase):
38
37
  object=ObjectType.Model,
39
38
  name=model["name"],
40
39
  bytes=model["size"],
41
- created_at=parse_timestamp(model["created_at"]),
42
40
  model_type=model["model_type"],
43
41
  )
44
42
  for model in response.data["data"]
@@ -79,7 +77,6 @@ class AsyncModels(ResourceBase):
79
77
  object=ObjectType.Model,
80
78
  name=model["name"],
81
79
  bytes=model["size"],
82
- created_at=parse_timestamp(model["created_at"]),
83
80
  model_type=model["model_type"],
84
81
  )
85
82
  for model in response.data["data"]
@@ -107,7 +107,8 @@ class VectorDatabase(ResourceBase):
107
107
  self,
108
108
  database_id: str,
109
109
  files: List[str],
110
- method: str,
110
+ method: Optional[str] = "accuracy-optimized",
111
+ chunking_method: Optional[str] = "markdown",
111
112
  token_count: int = 800,
112
113
  overlap_tokens: int = 100,
113
114
  ) -> VectorDatabaseIngestionResponse:
@@ -129,6 +130,7 @@ class VectorDatabase(ResourceBase):
129
130
  parameter_payload = VectorDatabaseIngestionRequest(
130
131
  file_ids=files,
131
132
  method=method,
133
+ chunking_method=chunking_method,
132
134
  token_count=token_count,
133
135
  overlap_tokens=overlap_tokens,
134
136
  ).model_dump()
@@ -248,6 +250,21 @@ class VectorDatabase(ResourceBase):
248
250
  # The endpoint returns 204 No Content
249
251
  return None
250
252
 
253
+ def delete_file(self, database_id: str, file_id: str) -> None:
254
+ """Delete a file from a vector database."""
255
+ requestor = api_requestor.APIRequestor(client=self._client)
256
+
257
+ response, _, _ = requestor.request(
258
+ options=SeekrFlowRequest(
259
+ method="DELETE",
260
+ url=f"flow/vectordb/{database_id}/files/{file_id}",
261
+ ),
262
+ stream=False,
263
+ )
264
+
265
+ # The endpoint returns 204 No Content
266
+ return None
267
+
251
268
 
252
269
  class AsyncVectorDatabase(ResourceBase):
253
270
  async def create(
@@ -338,7 +355,8 @@ class AsyncVectorDatabase(ResourceBase):
338
355
  self,
339
356
  database_id: str,
340
357
  files: List[str],
341
- method: str,
358
+ method: Optional[str] = "accuracy-optimized",
359
+ chunking_method: Optional[str] = "markdown",
342
360
  token_count: int = 800,
343
361
  overlap_tokens: int = 100,
344
362
  ) -> VectorDatabaseIngestionResponse:
@@ -360,6 +378,7 @@ class AsyncVectorDatabase(ResourceBase):
360
378
  parameter_payload = VectorDatabaseIngestionRequest(
361
379
  file_ids=files,
362
380
  method=method,
381
+ chunking_method=chunking_method,
363
382
  token_count=token_count,
364
383
  overlap_tokens=overlap_tokens,
365
384
  ).model_dump()
@@ -480,3 +499,18 @@ class AsyncVectorDatabase(ResourceBase):
480
499
 
481
500
  # The endpoint returns 204 No Content
482
501
  return None
502
+
503
+ async def delete_file(self, database_id: str, file_id: str) -> None:
504
+ """Delete a file from a vector database asynchronously."""
505
+ requestor = api_requestor.APIRequestor(client=self._client)
506
+
507
+ response, _, _ = await requestor.arequest(
508
+ options=SeekrFlowRequest(
509
+ method="DELETE",
510
+ url=f"flow/vectordb/{database_id}/files/{file_id}",
511
+ ),
512
+ stream=False,
513
+ )
514
+
515
+ # The endpoint returns 204 No Content
516
+ return None
seekrai/types/__init__.py CHANGED
@@ -4,7 +4,6 @@ from seekrai.types.agents import (
4
4
  AgentDeleteResponse,
5
5
  AgentStatus,
6
6
  CreateAgentRequest,
7
- Env,
8
7
  EnvConfig,
9
8
  FileSearch,
10
9
  FileSearchEnv,
@@ -13,10 +12,14 @@ from seekrai.types.agents import (
13
12
  InputMessage,
14
13
  InputText,
15
14
  MessageUpdateRequest,
15
+ ModelSettings,
16
16
  OutputGuardrail,
17
17
  OutputMessage,
18
18
  OutputText,
19
+ ReasoningEffort,
19
20
  Run,
21
+ RunPython,
22
+ RunPythonEnv,
20
23
  RunRequest,
21
24
  RunResponse,
22
25
  RunStatus,
@@ -42,16 +45,29 @@ from seekrai.types.agents import (
42
45
  Tool,
43
46
  ToolBase,
44
47
  ToolType,
48
+ WebSearch,
49
+ WebSearchEnv,
50
+ )
51
+ from seekrai.types.agents.tools.schemas import (
52
+ FileSearch,
53
+ FileSearchEnv,
54
+ RunPython,
55
+ RunPythonEnv,
56
+ WebSearch,
57
+ WebSearchEnv,
45
58
  )
46
- from seekrai.types.agents.tools.schemas import FileSearch, FileSearchEnv
47
59
  from seekrai.types.alignment import (
48
60
  AlignmentEstimationRequest,
49
61
  AlignmentEstimationResponse,
50
62
  AlignmentJobStatus,
51
63
  AlignmentList,
64
+ AlignmentOutput,
52
65
  AlignmentRequest,
53
66
  AlignmentResponse,
54
67
  AlignmentType,
68
+ SystemPrompt,
69
+ SystemPromptCreateRequest,
70
+ SystemPromptUpdateRequest,
55
71
  )
56
72
  from seekrai.types.chat_completions import (
57
73
  ChatCompletionChunk,
@@ -90,6 +106,7 @@ from seekrai.types.finetune import (
90
106
  FinetuneRequest,
91
107
  FinetuneResponse,
92
108
  InfrastructureConfig,
109
+ LoRAConfig,
93
110
  TrainingConfig,
94
111
  )
95
112
  from seekrai.types.images import (
@@ -123,6 +140,7 @@ __all__ = [
123
140
  "FinetuneDownloadResult",
124
141
  "InfrastructureConfig",
125
142
  "TrainingConfig",
143
+ "LoRAConfig",
126
144
  "FileRequest",
127
145
  "FileResponse",
128
146
  "FileList",
@@ -139,8 +157,12 @@ __all__ = [
139
157
  "AlignmentEstimationResponse",
140
158
  "AlignmentResponse",
141
159
  "AlignmentJobStatus",
160
+ "AlignmentOutput",
142
161
  "AlignmentList",
143
162
  "AlignmentType",
163
+ "SystemPrompt",
164
+ "SystemPromptCreateRequest",
165
+ "SystemPromptUpdateRequest",
144
166
  "Project",
145
167
  "ProjectWithRuns",
146
168
  "GetProjectsResponse",
@@ -183,15 +205,20 @@ __all__ = [
183
205
  "RunUsage",
184
206
  "RunStatus",
185
207
  "RunStepUsage",
208
+ "ModelSettings",
186
209
  "Agent",
187
210
  "AgentStatus",
188
211
  "CreateAgentRequest",
212
+ "ReasoningEffort",
189
213
  "AgentDeleteResponse",
190
214
  "ToolBase",
191
215
  "ToolType",
192
216
  "EnvConfig",
193
- "Env",
194
217
  "Tool",
195
218
  "FileSearch",
196
219
  "FileSearchEnv",
220
+ "RunPython",
221
+ "RunPythonEnv",
222
+ "WebSearch",
223
+ "WebSearchEnv",
197
224
  ]
@@ -3,8 +3,15 @@ from seekrai.types.agents.agent import (
3
3
  AgentDeleteResponse,
4
4
  AgentStatus,
5
5
  CreateAgentRequest,
6
+ ReasoningEffort,
7
+ )
8
+ from seekrai.types.agents.python_functions import (
9
+ DeletePythonFunctionResponse,
10
+ PythonFunctionBase,
11
+ PythonFunctionResponse,
6
12
  )
7
13
  from seekrai.types.agents.runs import (
14
+ ModelSettings,
8
15
  Run,
9
16
  RunRequest,
10
17
  RunResponse,
@@ -39,8 +46,15 @@ from seekrai.types.agents.threads import (
39
46
  ThreadMessageContentType,
40
47
  ThreadStatus,
41
48
  )
42
- from seekrai.types.agents.tools import Env, EnvConfig, Tool, ToolBase, ToolType
43
- from seekrai.types.agents.tools.schemas import FileSearch, FileSearchEnv
49
+ from seekrai.types.agents.tools import EnvConfig, Tool, ToolBase, ToolType
50
+ from seekrai.types.agents.tools.schemas import (
51
+ FileSearch,
52
+ FileSearchEnv,
53
+ RunPython,
54
+ RunPythonEnv,
55
+ WebSearch,
56
+ WebSearchEnv,
57
+ )
44
58
 
45
59
 
46
60
  __all__ = [
@@ -51,6 +65,7 @@ __all__ = [
51
65
  "RunUsage",
52
66
  "RunStatus",
53
67
  "RunStepUsage",
68
+ "ModelSettings",
54
69
  "MessageUpdateRequest",
55
70
  "ThreadCreateRequest",
56
71
  "ThreadStatus",
@@ -78,12 +93,19 @@ __all__ = [
78
93
  "Agent",
79
94
  "AgentStatus",
80
95
  "CreateAgentRequest",
96
+ "ReasoningEffort",
81
97
  "AgentDeleteResponse",
82
98
  "ToolBase",
83
99
  "ToolType",
84
100
  "EnvConfig",
85
- "Env",
86
101
  "Tool",
87
102
  "FileSearch",
88
103
  "FileSearchEnv",
104
+ "RunPython",
105
+ "RunPythonEnv",
106
+ "WebSearch",
107
+ "WebSearchEnv",
108
+ "PythonFunctionBase",
109
+ "PythonFunctionResponse",
110
+ "DeletePythonFunctionResponse",
89
111
  ]
@@ -14,11 +14,17 @@ class AgentStatus(str, enum.Enum):
14
14
  FAILED = "Failed"
15
15
 
16
16
 
17
+ class ReasoningEffort(str, enum.Enum):
18
+ PERFORMANCE_OPTIMIZED = "performance_optimized"
19
+ SPEED_OPTIMIZED = "speed_optimized"
20
+
21
+
17
22
  class CreateAgentRequest(BaseModel):
18
23
  name: str
19
24
  instructions: str
20
25
  tools: list[Tool]
21
26
  model_id: str
27
+ reasoning_effort: Optional[ReasoningEffort] = None
22
28
 
23
29
 
24
30
  class Agent(BaseModel):
@@ -35,8 +41,13 @@ class Agent(BaseModel):
35
41
  updated_at: datetime
36
42
  last_deployed_at: Optional[datetime] = None
37
43
  active_duration: int = Field(default=0, ge=0)
44
+ reasoning_effort: ReasoningEffort
38
45
 
39
46
 
40
47
  class AgentDeleteResponse(BaseModel):
41
48
  id: str
42
49
  deleted: bool
50
+
51
+
52
+ class UpdateAgentRequest(CreateAgentRequest):
53
+ pass
@@ -0,0 +1,34 @@
1
+ from datetime import datetime
2
+ from typing import Any, Dict, List, Optional
3
+
4
+ from pydantic import field_serializer
5
+
6
+ from seekrai.types.abstract import BaseModel
7
+
8
+
9
+ class ObservabilitySpansRequest(BaseModel):
10
+ """Request model for requesting observability spans."""
11
+
12
+ min_start_datetime: Optional[datetime]
13
+ max_start_datetime: Optional[datetime]
14
+ agent_id: Optional[str]
15
+ run_id: Optional[str]
16
+ trace_id: Optional[str]
17
+ thread_id: Optional[str]
18
+ group: Optional[str]
19
+ metadata: Optional[dict[str, str]]
20
+ limit: int = 100
21
+ order: str = "desc"
22
+ offset: int = 0
23
+
24
+ @field_serializer("min_start_datetime", "max_start_datetime")
25
+ def serialize_dt(self, dt: Optional[datetime], _info: Any) -> Optional[str]:
26
+ if dt is not None:
27
+ return dt.isoformat()
28
+ return None
29
+
30
+
31
+ class ObservabilitySpansResponse(BaseModel):
32
+ """Response model for requesting observability spans."""
33
+
34
+ spans: List[Dict[str, Any]]
@@ -0,0 +1,29 @@
1
+ from datetime import datetime
2
+
3
+ from pydantic import BaseModel, ConfigDict
4
+
5
+
6
+ class PythonFunctionBase(BaseModel):
7
+ """Base model for a Python function, including metadata fields."""
8
+
9
+ model_config = ConfigDict(from_attributes=True)
10
+ id: str
11
+ version: int
12
+ name: str
13
+ description: str
14
+ active: bool
15
+
16
+
17
+ class PythonFunctionResponse(PythonFunctionBase):
18
+ """Response model for a Python function, including code and user info."""
19
+
20
+ code: str
21
+ user_id: str
22
+ created_at: datetime
23
+ updated_at: datetime
24
+
25
+
26
+ class DeletePythonFunctionResponse(BaseModel):
27
+ """Response model for Python function deletion."""
28
+
29
+ deleted: bool
@@ -1,16 +1,65 @@
1
1
  import datetime
2
2
  from enum import Enum
3
- from typing import Any, Optional, Union
3
+ from typing import Any, Dict, Optional, Union
4
4
 
5
+ import pydantic
5
6
  from pydantic import Field
6
7
 
7
8
  from seekrai.types.abstract import BaseModel
8
9
 
9
10
 
11
+ class ModelSettings(BaseModel):
12
+ """Settings to use when calling an LLM.
13
+
14
+ This class holds optional model configuration parameters (e.g. temperature,
15
+ top_p, penalties, truncation, etc.).
16
+
17
+ Not all models/providers support all of these parameters, so please check the API documentation
18
+ for the specific model and provider you are using.
19
+ """
20
+
21
+ temperature: float = Field(default=1.0, ge=0.0, le=2.0)
22
+ top_p: float = Field(default=1.0, ge=0.0, le=1.0)
23
+ frequency_penalty: float = Field(default=0.0, ge=-2.0, le=2.0)
24
+ presence_penalty: float = Field(default=0.0, ge=-2.0, le=2.0)
25
+ max_tokens: Optional[int] = None
26
+
27
+
28
+ class ResponseFormat(BaseModel):
29
+ """Specifies a JSON schema for the response format.
30
+
31
+ When provided, the LLM will be constrained to return a JSON response
32
+ that matches the specified schema.
33
+
34
+ Can be instantiated with:
35
+ - A JSON schema dictionary
36
+ - A Pydantic model class
37
+ - An existing ResponseFormat instance
38
+ """
39
+
40
+ json_schema: Dict[str, Any]
41
+
42
+ @classmethod
43
+ def from_value(cls, value: Any) -> "ResponseFormat":
44
+ if isinstance(value, cls):
45
+ return value
46
+ if isinstance(value, dict):
47
+ return cls(json_schema=value)
48
+ if isinstance(value, type) and issubclass(value, pydantic.BaseModel):
49
+ return cls(json_schema=value.model_json_schema())
50
+ raise ValueError(
51
+ "ResponseFormat configuration is invalid. Expected ResponseFormat, a valid schema or a Pydantic BaseModel."
52
+ )
53
+
54
+
10
55
  class RunRequest(BaseModel):
11
56
  """Request model for creating a run."""
12
57
 
13
58
  agent_id: str = Field(default="default_agent")
59
+ model_settings: ModelSettings = ModelSettings()
60
+ response_format: Optional[Union[ResponseFormat, Dict[str, Any], type]] = None
61
+ group: Optional[str] = None
62
+ metadata: Optional[Dict[str, str]] = None
14
63
 
15
64
 
16
65
  class RunResponse(BaseModel):
@@ -19,6 +68,7 @@ class RunResponse(BaseModel):
19
68
  run_id: str
20
69
  thread_id: str
21
70
  status: str
71
+ group: Optional[str] = None
22
72
 
23
73
 
24
74
  class RunStatus(str, Enum):
@@ -1,7 +1,13 @@
1
1
  from seekrai.types.agents.tools.env_model_config import EnvConfig
2
- from seekrai.types.agents.tools.schemas import FileSearch, FileSearchEnv
2
+ from seekrai.types.agents.tools.schemas import (
3
+ FileSearch,
4
+ FileSearchEnv,
5
+ RunPython,
6
+ RunPythonEnv,
7
+ WebSearch,
8
+ WebSearchEnv,
9
+ )
3
10
  from seekrai.types.agents.tools.tool import ToolBase, ToolType
4
- from seekrai.types.agents.tools.tool_env_types import Env
5
11
  from seekrai.types.agents.tools.tool_types import Tool
6
12
 
7
13
 
@@ -13,4 +19,8 @@ __all__ = [
13
19
  "Tool",
14
20
  "FileSearch",
15
21
  "FileSearchEnv",
22
+ "RunPython",
23
+ "RunPythonEnv",
24
+ "WebSearch",
25
+ "WebSearchEnv",
16
26
  ]
@@ -1,8 +1,16 @@
1
1
  from seekrai.types.agents.tools.schemas.file_search import FileSearch
2
2
  from seekrai.types.agents.tools.schemas.file_search_env import FileSearchEnv
3
+ from seekrai.types.agents.tools.schemas.run_python import RunPython
4
+ from seekrai.types.agents.tools.schemas.run_python_env import RunPythonEnv
5
+ from seekrai.types.agents.tools.schemas.web_search import WebSearch
6
+ from seekrai.types.agents.tools.schemas.web_search_env import WebSearchEnv
3
7
 
4
8
 
5
9
  __all__ = [
6
10
  "FileSearch",
7
11
  "FileSearchEnv",
12
+ "RunPython",
13
+ "RunPythonEnv",
14
+ "WebSearch",
15
+ "WebSearchEnv",
8
16
  ]
@@ -4,6 +4,6 @@ from seekrai.types.agents.tools.schemas.file_search_env import FileSearchEnv
4
4
  from seekrai.types.agents.tools.tool import ToolBase, ToolType
5
5
 
6
6
 
7
- class FileSearch(ToolBase):
7
+ class FileSearch(ToolBase[FileSearchEnv]):
8
8
  name: Literal[ToolType.FILE_SEARCH] = ToolType.FILE_SEARCH
9
9
  tool_env: FileSearchEnv