seekrai 0.5.2__tar.gz → 0.5.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (75) hide show
  1. {seekrai-0.5.2 → seekrai-0.5.6}/PKG-INFO +2 -2
  2. {seekrai-0.5.2 → seekrai-0.5.6}/pyproject.toml +2 -2
  3. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/abstract/response_parsing.py +2 -0
  4. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/resources/__init__.py +3 -0
  5. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/resources/agents/agent_inference.py +10 -8
  6. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/resources/agents/agents.py +45 -0
  7. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/resources/chat/completions.py +11 -2
  8. seekrai-0.5.6/src/seekrai/resources/explainability.py +84 -0
  9. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/types/__init__.py +20 -3
  10. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/types/agents/__init__.py +17 -3
  11. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/types/agents/agent.py +11 -0
  12. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/types/agents/runs.py +17 -0
  13. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/types/agents/tools/__init__.py +12 -2
  14. seekrai-0.5.6/src/seekrai/types/agents/tools/schemas/__init__.py +16 -0
  15. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/types/agents/tools/schemas/file_search.py +1 -1
  16. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/types/agents/tools/schemas/file_search_env.py +0 -1
  17. seekrai-0.5.6/src/seekrai/types/agents/tools/schemas/run_python.py +9 -0
  18. seekrai-0.5.6/src/seekrai/types/agents/tools/schemas/run_python_env.py +7 -0
  19. seekrai-0.5.6/src/seekrai/types/agents/tools/schemas/web_search.py +9 -0
  20. seekrai-0.5.6/src/seekrai/types/agents/tools/schemas/web_search_env.py +7 -0
  21. seekrai-0.5.6/src/seekrai/types/agents/tools/tool.py +20 -0
  22. seekrai-0.5.6/src/seekrai/types/agents/tools/tool_types.py +10 -0
  23. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/types/chat_completions.py +1 -0
  24. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/types/deployments.py +1 -0
  25. seekrai-0.5.6/src/seekrai/types/explainability.py +57 -0
  26. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/types/finetune.py +9 -0
  27. seekrai-0.5.2/src/seekrai/types/agents/tools/schemas/__init__.py +0 -8
  28. seekrai-0.5.2/src/seekrai/types/agents/tools/tool.py +0 -14
  29. seekrai-0.5.2/src/seekrai/types/agents/tools/tool_env_types.py +0 -4
  30. seekrai-0.5.2/src/seekrai/types/agents/tools/tool_types.py +0 -10
  31. {seekrai-0.5.2 → seekrai-0.5.6}/LICENSE +0 -0
  32. {seekrai-0.5.2 → seekrai-0.5.6}/README.md +0 -0
  33. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/__init__.py +0 -0
  34. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/abstract/__init__.py +0 -0
  35. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/abstract/api_requestor.py +0 -0
  36. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/client.py +0 -0
  37. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/constants.py +0 -0
  38. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/error.py +0 -0
  39. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/filemanager.py +0 -0
  40. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/resources/agents/__init__.py +0 -0
  41. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/resources/agents/threads.py +0 -0
  42. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/resources/alignment.py +0 -0
  43. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/resources/chat/__init__.py +0 -0
  44. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/resources/completions.py +0 -0
  45. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/resources/deployments.py +0 -0
  46. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/resources/embeddings.py +0 -0
  47. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/resources/files.py +0 -0
  48. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/resources/finetune.py +0 -0
  49. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/resources/images.py +0 -0
  50. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/resources/ingestion.py +0 -0
  51. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/resources/models.py +0 -0
  52. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/resources/projects.py +0 -0
  53. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/resources/resource_base.py +0 -0
  54. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/resources/vectordb.py +0 -0
  55. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/seekrflow_response.py +0 -0
  56. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/types/abstract.py +0 -0
  57. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/types/agents/threads.py +0 -0
  58. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/types/agents/tools/env_model_config.py +0 -0
  59. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/types/alignment.py +0 -0
  60. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/types/common.py +0 -0
  61. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/types/completions.py +0 -0
  62. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/types/embeddings.py +0 -0
  63. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/types/error.py +0 -0
  64. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/types/files.py +0 -0
  65. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/types/images.py +0 -0
  66. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/types/ingestion.py +0 -0
  67. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/types/models.py +0 -0
  68. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/types/projects.py +0 -0
  69. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/types/vectordb.py +0 -0
  70. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/utils/__init__.py +0 -0
  71. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/utils/_log.py +0 -0
  72. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/utils/api_helpers.py +0 -0
  73. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/utils/files.py +0 -0
  74. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/utils/tools.py +0 -0
  75. {seekrai-0.5.2 → seekrai-0.5.6}/src/seekrai/version.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: seekrai
3
- Version: 0.5.2
3
+ Version: 0.5.6
4
4
  Summary: Python client for SeekrAI
5
5
  License: Apache-2.0
6
6
  Author: SeekrFlow
@@ -17,7 +17,7 @@ Classifier: Programming Language :: Python :: 3.13
17
17
  Requires-Dist: click (>=8.1.7,<9.0.0)
18
18
  Requires-Dist: eval-type-backport (>=0.1.3,<0.3.0)
19
19
  Requires-Dist: filelock (>=3.13.1,<4.0.0)
20
- Requires-Dist: httpx[http2] (>=0.27.0,<0.28.0)
20
+ Requires-Dist: httpx[http2] (>=0.28.0,<0.29.0)
21
21
  Requires-Dist: numpy (>=1.23.5) ; python_version < "3.12"
22
22
  Requires-Dist: numpy (>=1.26.0) ; python_version >= "3.12"
23
23
  Requires-Dist: pillow (>=10.3.0,<11.0.0)
@@ -14,7 +14,7 @@ build-backend = "poetry.core.masonry.api"
14
14
 
15
15
  [tool.poetry]
16
16
  name = "seekrai"
17
- version = "0.5.2"
17
+ version = "0.5.6"
18
18
  authors = [
19
19
  "SeekrFlow <support@seekr.com>"
20
20
  ]
@@ -46,7 +46,7 @@ numpy = [
46
46
  { version = ">=1.23.5", python = "<3.12" },
47
47
  { version = ">=1.26.0", python = ">=3.12" },
48
48
  ]
49
- httpx = {extras = ["http2"], version = "^0.27.0"}
49
+ httpx = {extras = ["http2"], version = "^0.28.0"}
50
50
 
51
51
  [tool.poetry.group.quality]
52
52
  optional = true
@@ -22,6 +22,8 @@ def parse_data_line(line: str) -> str:
22
22
  def parse_stream(chunks: Iterator[str]) -> Iterator[Any]:
23
23
  buffer = []
24
24
  for chunk in chunks:
25
+ if chunk == "data: [DONE]":
26
+ break
25
27
  content = parse_data_line(chunk)
26
28
 
27
29
  if content:
@@ -4,6 +4,7 @@ from seekrai.resources.chat import AsyncChat, Chat
4
4
  from seekrai.resources.completions import AsyncCompletions, Completions
5
5
  from seekrai.resources.deployments import AsyncDeployments, Deployments
6
6
  from seekrai.resources.embeddings import AsyncEmbeddings, Embeddings
7
+ from seekrai.resources.explainability import AsyncExplainability, Explainability
7
8
  from seekrai.resources.files import AsyncFiles, Files
8
9
  from seekrai.resources.finetune import AsyncFineTuning, FineTuning
9
10
  from seekrai.resources.images import AsyncImages, Images
@@ -41,4 +42,6 @@ __all__ = [
41
42
  "VectorDatabase",
42
43
  "AsyncVectorDatabase",
43
44
  "AgentInference",
45
+ "AsyncExplainability",
46
+ "Explainability",
44
47
  ]
@@ -1,8 +1,8 @@
1
- from typing import Any, AsyncGenerator, Iterator, Union
1
+ from typing import Any, AsyncGenerator, Iterator, Optional, Union
2
2
 
3
3
  from seekrai.abstract import api_requestor
4
4
  from seekrai.seekrflow_response import SeekrFlowResponse
5
- from seekrai.types import Run, RunRequest, RunResponse, SeekrFlowRequest
5
+ from seekrai.types import ModelSettings, Run, RunRequest, RunResponse, SeekrFlowRequest
6
6
 
7
7
 
8
8
  class AgentInference:
@@ -16,7 +16,7 @@ class AgentInference:
16
16
  thread_id: str,
17
17
  *,
18
18
  stream: bool = False,
19
- **model_settings: Any,
19
+ model_settings: Optional[ModelSettings] = None,
20
20
  ) -> Union[RunResponse, Iterator[Any]]:
21
21
  """
22
22
  Run an inference call on a deployed agent.
@@ -25,13 +25,14 @@ class AgentInference:
25
25
  agent_id (str): The unique identifier of the deployed agent.
26
26
  thread_id (str): A thread identifier.
27
27
  stream (bool, optional): Whether to stream the response. Defaults to False.
28
- **model_settings: Additional parameters (such as temperature, max_tokens, etc).
28
+ model_settings (optional): Additional parameters (such as temperature, max_tokens, etc).
29
29
 
30
30
  Returns:
31
31
  A dictionary with the response (if non-streaming) or an iterator over response chunks.
32
32
  """
33
33
  payload = RunRequest(agent_id=agent_id).model_dump()
34
- payload.update(model_settings)
34
+ if model_settings is not None:
35
+ payload["model_settings"] = model_settings.model_dump()
35
36
  endpoint = f"threads/{thread_id}/runs"
36
37
  if stream:
37
38
  endpoint += "/stream"
@@ -146,7 +147,7 @@ class AsyncAgentInference:
146
147
  thread_id: str,
147
148
  *,
148
149
  stream: bool = False,
149
- **model_settings: Any,
150
+ model_settings: Optional[ModelSettings] = None,
150
151
  ) -> Union[RunResponse, AsyncGenerator[Any, None]]:
151
152
  """
152
153
  Run an inference call on a deployed agent.
@@ -155,13 +156,14 @@ class AsyncAgentInference:
155
156
  agent_id (str): The unique identifier of the deployed agent.
156
157
  thread_id (str): A thread identifier.
157
158
  stream (bool, optional): Whether to stream the response. Defaults to False.
158
- **model_settings: Additional parameters (such as temperature, max_tokens, etc).
159
+ model_settings (optional): Additional parameters (such as temperature, max_tokens, etc).
159
160
 
160
161
  Returns:
161
162
  A dictionary with the response (if non-streaming) or an iterator over response chunks.
162
163
  """
163
164
  payload = RunRequest(agent_id=agent_id).model_dump()
164
- payload.update(model_settings)
165
+ if model_settings is not None:
166
+ payload["model_settings"] = model_settings.model_dump()
165
167
  endpoint = f"threads/{thread_id}/runs"
166
168
  if stream:
167
169
  endpoint += "/stream"
@@ -7,6 +7,7 @@ from seekrai.types.agents.agent import (
7
7
  Agent,
8
8
  AgentDeleteResponse,
9
9
  CreateAgentRequest,
10
+ UpdateAgentRequest,
10
11
  )
11
12
 
12
13
 
@@ -140,6 +141,28 @@ class Agents:
140
141
  assert isinstance(response, SeekrFlowResponse)
141
142
  return AgentDeleteResponse(**response.data)
142
143
 
144
+ def update(self, agent_id: str, request: UpdateAgentRequest) -> Agent:
145
+ """
146
+ Update an existing agent's configuration.
147
+
148
+ Args:
149
+ agent_id: The ID of the agent to update.
150
+ request: The request object containing updated agent config.
151
+
152
+ Returns:
153
+ The updated agent.
154
+ """
155
+ response, _, _ = self._requestor.request(
156
+ options=SeekrFlowRequest(
157
+ method="PUT",
158
+ url=f"flow/agents/{agent_id}/update",
159
+ params=request.model_dump(),
160
+ ),
161
+ )
162
+
163
+ assert isinstance(response, SeekrFlowResponse)
164
+ return Agent(**response.data)
165
+
143
166
 
144
167
  class AsyncAgents:
145
168
  def __init__(self, client: SeekrFlowClient) -> None:
@@ -270,3 +293,25 @@ class AsyncAgents:
270
293
 
271
294
  assert isinstance(response, SeekrFlowResponse)
272
295
  return AgentDeleteResponse(**response.data)
296
+
297
+ async def update(self, agent_id: str, request: UpdateAgentRequest) -> Agent:
298
+ """
299
+ Update an existing agent's configuration.
300
+
301
+ Args:
302
+ agent_id: The ID of the agent to update.
303
+ request: The request object containing updated agent config.
304
+
305
+ Returns:
306
+ The updated agent.
307
+ """
308
+ response, _, _ = await self._requestor.arequest(
309
+ options=SeekrFlowRequest(
310
+ method="PUT",
311
+ url=f"flow/agents/{agent_id}/update",
312
+ params=request.model_dump(),
313
+ ),
314
+ )
315
+
316
+ assert isinstance(response, SeekrFlowResponse)
317
+ return Agent(**response.data)
@@ -22,6 +22,7 @@ class ChatCompletions:
22
22
  *,
23
23
  messages: List[Dict[str, str]],
24
24
  model: str,
25
+ max_completion_tokens: int | None = None,
25
26
  max_tokens: int | None = 512,
26
27
  stop: List[str] | None = None,
27
28
  temperature: float = 0.7,
@@ -36,7 +37,7 @@ class ChatCompletions:
36
37
  safety_model: str | None = None,
37
38
  response_format: Dict[str, str | Dict[str, Any]] | None = None,
38
39
  tools: Dict[str, str | Dict[str, Any]] | None = None,
39
- tool_choice: str | Dict[str, str | Dict[str, str]] | None = "auto",
40
+ tool_choice: str | Dict[str, str | Dict[str, str]] | None = None,
40
41
  ) -> ChatCompletionResponse | Iterator[ChatCompletionChunk]:
41
42
  """
42
43
  Method to generate completions based on a given prompt using a specified model.
@@ -45,6 +46,7 @@ class ChatCompletions:
45
46
  messages (List[Dict[str, str]]): A list of messages in the format
46
47
  `[{"role": seekrai.types.chat_completions.MessageRole, "content": TEXT}, ...]`
47
48
  model (str): The name of the model to query.
49
+ max_completion_tokens (int, optional): The maximum number of tokens the output can contain.
48
50
  max_tokens (int, optional): The maximum number of tokens to generate.
49
51
  Defaults to 512.
50
52
  stop (List[str], optional): List of strings at which to stop generation.
@@ -99,6 +101,7 @@ class ChatCompletions:
99
101
  top_p=top_p,
100
102
  top_k=top_k,
101
103
  temperature=temperature,
104
+ max_completion_tokens=max_completion_tokens,
102
105
  max_tokens=max_tokens,
103
106
  stop=stop,
104
107
  repetition_penalty=repetition_penalty,
@@ -110,14 +113,16 @@ class ChatCompletions:
110
113
  safety_model=safety_model,
111
114
  response_format=response_format,
112
115
  tools=tools or [],
113
- tool_choice=tool_choice,
114
116
  ).model_dump()
117
+ if tool_choice is not None:
118
+ parameter_payload["tool_choice"] = tool_choice
115
119
 
116
120
  response, _, _ = requestor.request(
117
121
  options=SeekrFlowRequest(
118
122
  method="POST",
119
123
  url="inference/chat/completions",
120
124
  params=parameter_payload,
125
+ headers={"content-type": "application/json"},
121
126
  ),
122
127
  stream=stream,
123
128
  )
@@ -139,6 +144,7 @@ class AsyncChatCompletions:
139
144
  *,
140
145
  messages: List[Dict[str, str]],
141
146
  model: str,
147
+ max_completion_tokens: int | None = None,
142
148
  max_tokens: int | None = 512,
143
149
  stop: List[str] | None = None,
144
150
  temperature: float = 0.7,
@@ -162,6 +168,7 @@ class AsyncChatCompletions:
162
168
  messages (List[Dict[str, str]]): A list of messages in the format
163
169
  `[{"role": seekrai.types.chat_completions.MessageRole, "content": TEXT}, ...]`
164
170
  model (str): The name of the model to query.
171
+ max_completion_tokens (int, optional): The maximum number of tokens the output can contain.
165
172
  max_tokens (int, optional): The maximum number of tokens to generate.
166
173
  Defaults to 512.
167
174
  stop (List[str], optional): List of strings at which to stop generation.
@@ -217,6 +224,7 @@ class AsyncChatCompletions:
217
224
  top_p=top_p,
218
225
  top_k=top_k,
219
226
  temperature=temperature,
227
+ max_completion_tokens=max_completion_tokens,
220
228
  max_tokens=max_tokens,
221
229
  stop=stop,
222
230
  repetition_penalty=repetition_penalty,
@@ -236,6 +244,7 @@ class AsyncChatCompletions:
236
244
  method="POST",
237
245
  url="inference/chat/completions",
238
246
  params=parameter_payload,
247
+ headers={"content-type": "application/json"},
239
248
  ),
240
249
  stream=stream,
241
250
  )
@@ -0,0 +1,84 @@
1
+ from typing import Optional
2
+
3
+ from seekrai.abstract import api_requestor
4
+ from seekrai.resources.resource_base import ResourceBase
5
+ from seekrai.seekrflow_response import SeekrFlowResponse
6
+ from seekrai.types import (
7
+ SeekrFlowRequest,
8
+ )
9
+ from seekrai.types.explainability import (
10
+ InfluentialFinetuningDataRequest,
11
+ InfluentialFinetuningDataResponse,
12
+ )
13
+
14
+
15
+ class Explainability(ResourceBase):
16
+ def get_influential_finetuning_data(
17
+ self, model_id: str, question: str, answer: Optional[str], k: int = 5
18
+ ) -> InfluentialFinetuningDataResponse:
19
+ """
20
+ Retrieve influential QA pair fine tuning data for a specific model.
21
+ Args:
22
+ - model_id (str): ID of the model to explain.
23
+ - question (str): question from user,
24
+ - answer (str | None): answer of the finetuned model to the question; if None, the answer is retrieved from the finetuned model specified by model_id,
25
+ - k (int): the number of results to be retrieved (5 by default)
26
+ Returns:
27
+ InfluentialFinetuningDataResponse: Object containing the influential fine tuning data.
28
+ """
29
+ requestor = api_requestor.APIRequestor(
30
+ client=self._client,
31
+ )
32
+ # Create query parameters dictionary
33
+ parameter_payload = InfluentialFinetuningDataRequest(
34
+ question=question, answer=answer, k=k
35
+ ).model_dump()
36
+
37
+ # if limit is not None:
38
+ # params["limit"] = limit
39
+ # TODO limits =? timeout: float | None = None, max_retries: int | None = None,
40
+
41
+ response, _, _ = requestor.request(
42
+ options=SeekrFlowRequest(
43
+ method="GET",
44
+ url=f"v1/flow/explain/models/{model_id}/influential-finetuning-data",
45
+ params=parameter_payload,
46
+ ),
47
+ stream=False,
48
+ )
49
+ assert isinstance(response, SeekrFlowResponse)
50
+ return InfluentialFinetuningDataResponse(**response.data)
51
+
52
+
53
+ class AsyncExplainability(ResourceBase):
54
+ async def get_influential_finetuning_data(
55
+ self, model_id: str, question: str, answer: Optional[str], k: int = 5
56
+ ) -> InfluentialFinetuningDataResponse:
57
+ """
58
+ Retrieve influential QA pair finetuning data for a specific model asynchronously.
59
+ Args:
60
+ - model_id (str): ID of the model to explain.
61
+ - question (str): question from user,
62
+ - answer (str | None): answer of the finetuned model to the question; if None, the answer is retrieved from the finetuned model specified by model_id,
63
+ - k (int): the number of results to be retrieved (5 by default),
64
+ Returns:
65
+ InfluentialFinetuningDataResponse: Object containing the influential finetuning data.
66
+ """
67
+ requestor = api_requestor.APIRequestor(
68
+ client=self._client,
69
+ )
70
+ # Create query parameters dictionary
71
+ parameter_payload = InfluentialFinetuningDataRequest(
72
+ model_id=model_id, question=question, answer=answer, k=k
73
+ ).model_dump()
74
+
75
+ response, _, _ = await requestor.arequest(
76
+ options=SeekrFlowRequest(
77
+ method="GET",
78
+ url=f"v1/flow/explain/models/{model_id}/influential-finetuning-data",
79
+ params=parameter_payload,
80
+ ),
81
+ stream=False,
82
+ )
83
+ assert isinstance(response, SeekrFlowResponse)
84
+ return InfluentialFinetuningDataResponse(**response.data)
@@ -4,7 +4,6 @@ from seekrai.types.agents import (
4
4
  AgentDeleteResponse,
5
5
  AgentStatus,
6
6
  CreateAgentRequest,
7
- Env,
8
7
  EnvConfig,
9
8
  FileSearch,
10
9
  FileSearchEnv,
@@ -13,10 +12,14 @@ from seekrai.types.agents import (
13
12
  InputMessage,
14
13
  InputText,
15
14
  MessageUpdateRequest,
15
+ ModelSettings,
16
16
  OutputGuardrail,
17
17
  OutputMessage,
18
18
  OutputText,
19
+ ReasoningEffort,
19
20
  Run,
21
+ RunPython,
22
+ RunPythonEnv,
20
23
  RunRequest,
21
24
  RunResponse,
22
25
  RunStatus,
@@ -42,8 +45,17 @@ from seekrai.types.agents import (
42
45
  Tool,
43
46
  ToolBase,
44
47
  ToolType,
48
+ WebSearch,
49
+ WebSearchEnv,
50
+ )
51
+ from seekrai.types.agents.tools.schemas import (
52
+ FileSearch,
53
+ FileSearchEnv,
54
+ RunPython,
55
+ RunPythonEnv,
56
+ WebSearch,
57
+ WebSearchEnv,
45
58
  )
46
- from seekrai.types.agents.tools.schemas import FileSearch, FileSearchEnv
47
59
  from seekrai.types.alignment import (
48
60
  AlignmentEstimationRequest,
49
61
  AlignmentEstimationResponse,
@@ -183,15 +195,20 @@ __all__ = [
183
195
  "RunUsage",
184
196
  "RunStatus",
185
197
  "RunStepUsage",
198
+ "ModelSettings",
186
199
  "Agent",
187
200
  "AgentStatus",
188
201
  "CreateAgentRequest",
202
+ "ReasoningEffort",
189
203
  "AgentDeleteResponse",
190
204
  "ToolBase",
191
205
  "ToolType",
192
206
  "EnvConfig",
193
- "Env",
194
207
  "Tool",
195
208
  "FileSearch",
196
209
  "FileSearchEnv",
210
+ "RunPython",
211
+ "RunPythonEnv",
212
+ "WebSearch",
213
+ "WebSearchEnv",
197
214
  ]
@@ -3,8 +3,10 @@ from seekrai.types.agents.agent import (
3
3
  AgentDeleteResponse,
4
4
  AgentStatus,
5
5
  CreateAgentRequest,
6
+ ReasoningEffort,
6
7
  )
7
8
  from seekrai.types.agents.runs import (
9
+ ModelSettings,
8
10
  Run,
9
11
  RunRequest,
10
12
  RunResponse,
@@ -39,8 +41,15 @@ from seekrai.types.agents.threads import (
39
41
  ThreadMessageContentType,
40
42
  ThreadStatus,
41
43
  )
42
- from seekrai.types.agents.tools import Env, EnvConfig, Tool, ToolBase, ToolType
43
- from seekrai.types.agents.tools.schemas import FileSearch, FileSearchEnv
44
+ from seekrai.types.agents.tools import EnvConfig, Tool, ToolBase, ToolType
45
+ from seekrai.types.agents.tools.schemas import (
46
+ FileSearch,
47
+ FileSearchEnv,
48
+ RunPython,
49
+ RunPythonEnv,
50
+ WebSearch,
51
+ WebSearchEnv,
52
+ )
44
53
 
45
54
 
46
55
  __all__ = [
@@ -51,6 +60,7 @@ __all__ = [
51
60
  "RunUsage",
52
61
  "RunStatus",
53
62
  "RunStepUsage",
63
+ "ModelSettings",
54
64
  "MessageUpdateRequest",
55
65
  "ThreadCreateRequest",
56
66
  "ThreadStatus",
@@ -78,12 +88,16 @@ __all__ = [
78
88
  "Agent",
79
89
  "AgentStatus",
80
90
  "CreateAgentRequest",
91
+ "ReasoningEffort",
81
92
  "AgentDeleteResponse",
82
93
  "ToolBase",
83
94
  "ToolType",
84
95
  "EnvConfig",
85
- "Env",
86
96
  "Tool",
87
97
  "FileSearch",
88
98
  "FileSearchEnv",
99
+ "RunPython",
100
+ "RunPythonEnv",
101
+ "WebSearch",
102
+ "WebSearchEnv",
89
103
  ]
@@ -14,11 +14,17 @@ class AgentStatus(str, enum.Enum):
14
14
  FAILED = "Failed"
15
15
 
16
16
 
17
+ class ReasoningEffort(str, enum.Enum):
18
+ PERFORMANCE_OPTIMIZED = "performance_optimized"
19
+ SPEED_OPTIMIZED = "speed_optimized"
20
+
21
+
17
22
  class CreateAgentRequest(BaseModel):
18
23
  name: str
19
24
  instructions: str
20
25
  tools: list[Tool]
21
26
  model_id: str
27
+ reasoning_effort: Optional[ReasoningEffort] = None
22
28
 
23
29
 
24
30
  class Agent(BaseModel):
@@ -35,8 +41,13 @@ class Agent(BaseModel):
35
41
  updated_at: datetime
36
42
  last_deployed_at: Optional[datetime] = None
37
43
  active_duration: int = Field(default=0, ge=0)
44
+ reasoning_effort: ReasoningEffort
38
45
 
39
46
 
40
47
  class AgentDeleteResponse(BaseModel):
41
48
  id: str
42
49
  deleted: bool
50
+
51
+
52
+ class UpdateAgentRequest(CreateAgentRequest):
53
+ pass
@@ -115,3 +115,20 @@ class RunStep(BaseModel):
115
115
  completed_at: Optional[datetime.datetime] = None
116
116
  meta_data: dict[str, Any] = Field(default_factory=dict)
117
117
  usage: Optional[RunStepUsage] = None
118
+
119
+
120
+ class ModelSettings(BaseModel):
121
+ """Settings to use when calling an LLM.
122
+
123
+ This class holds optional model configuration parameters (e.g. temperature,
124
+ top_p, penalties, truncation, etc.).
125
+
126
+ Not all models/providers support all of these parameters, so please check the API documentation
127
+ for the specific model and provider you are using.
128
+ """
129
+
130
+ temperature: float = Field(default=1.0, ge=0.0, le=2.0)
131
+ top_p: float = Field(default=1.0, ge=0.0, le=1.0)
132
+ frequency_penalty: float = Field(default=0.0, ge=-2.0, le=2.0)
133
+ presence_penalty: float = Field(default=0.0, ge=-2.0, le=2.0)
134
+ max_tokens: Optional[int] = None
@@ -1,7 +1,13 @@
1
1
  from seekrai.types.agents.tools.env_model_config import EnvConfig
2
- from seekrai.types.agents.tools.schemas import FileSearch, FileSearchEnv
2
+ from seekrai.types.agents.tools.schemas import (
3
+ FileSearch,
4
+ FileSearchEnv,
5
+ RunPython,
6
+ RunPythonEnv,
7
+ WebSearch,
8
+ WebSearchEnv,
9
+ )
3
10
  from seekrai.types.agents.tools.tool import ToolBase, ToolType
4
- from seekrai.types.agents.tools.tool_env_types import Env
5
11
  from seekrai.types.agents.tools.tool_types import Tool
6
12
 
7
13
 
@@ -13,4 +19,8 @@ __all__ = [
13
19
  "Tool",
14
20
  "FileSearch",
15
21
  "FileSearchEnv",
22
+ "RunPython",
23
+ "RunPythonEnv",
24
+ "WebSearch",
25
+ "WebSearchEnv",
16
26
  ]
@@ -0,0 +1,16 @@
1
+ from seekrai.types.agents.tools.schemas.file_search import FileSearch
2
+ from seekrai.types.agents.tools.schemas.file_search_env import FileSearchEnv
3
+ from seekrai.types.agents.tools.schemas.run_python import RunPython
4
+ from seekrai.types.agents.tools.schemas.run_python_env import RunPythonEnv
5
+ from seekrai.types.agents.tools.schemas.web_search import WebSearch
6
+ from seekrai.types.agents.tools.schemas.web_search_env import WebSearchEnv
7
+
8
+
9
+ __all__ = [
10
+ "FileSearch",
11
+ "FileSearchEnv",
12
+ "RunPython",
13
+ "RunPythonEnv",
14
+ "WebSearch",
15
+ "WebSearchEnv",
16
+ ]
@@ -4,6 +4,6 @@ from seekrai.types.agents.tools.schemas.file_search_env import FileSearchEnv
4
4
  from seekrai.types.agents.tools.tool import ToolBase, ToolType
5
5
 
6
6
 
7
- class FileSearch(ToolBase):
7
+ class FileSearch(ToolBase[FileSearchEnv]):
8
8
  name: Literal[ToolType.FILE_SEARCH] = ToolType.FILE_SEARCH
9
9
  tool_env: FileSearchEnv
@@ -13,4 +13,3 @@ class FileSearchEnv(EnvConfig):
13
13
  score_threshold: float = Field(
14
14
  default=0, ge=0, lt=1.0, description="Score must be ≥ 0.0 and < 1.0"
15
15
  )
16
-
@@ -0,0 +1,9 @@
1
+ from typing import Literal
2
+
3
+ from seekrai.types.agents.tools.schemas.run_python_env import RunPythonEnv
4
+ from seekrai.types.agents.tools.tool import ToolBase, ToolType
5
+
6
+
7
+ class RunPython(ToolBase[RunPythonEnv]):
8
+ name: Literal[ToolType.RUN_PYTHON] = ToolType.RUN_PYTHON
9
+ tool_env: RunPythonEnv
@@ -0,0 +1,7 @@
1
+ from typing import Optional
2
+
3
+ from seekrai.types.agents.tools.env_model_config import EnvConfig
4
+
5
+
6
+ class RunPythonEnv(EnvConfig):
7
+ run_python_tool_desc: Optional[str] = None
@@ -0,0 +1,9 @@
1
+ from typing import Literal
2
+
3
+ from seekrai.types.agents.tools.schemas.web_search_env import WebSearchEnv
4
+ from seekrai.types.agents.tools.tool import ToolBase, ToolType
5
+
6
+
7
+ class WebSearch(ToolBase[WebSearchEnv]):
8
+ name: Literal[ToolType.WEB_SEARCH] = ToolType.WEB_SEARCH
9
+ tool_env: WebSearchEnv
@@ -0,0 +1,7 @@
1
+ from typing import Optional
2
+
3
+ from seekrai.types.agents.tools.env_model_config import EnvConfig
4
+
5
+
6
+ class WebSearchEnv(EnvConfig):
7
+ web_search_tool_description: Optional[str] = None
@@ -0,0 +1,20 @@
1
+ import enum
2
+ from typing import Generic, TypeVar
3
+
4
+ from pydantic import BaseModel
5
+
6
+ from seekrai.types.agents.tools.env_model_config import EnvConfig
7
+
8
+
9
+ class ToolType(str, enum.Enum):
10
+ FILE_SEARCH = "file_search"
11
+ WEB_SEARCH = "web_search"
12
+ RUN_PYTHON = "run_python"
13
+
14
+
15
+ TEnv = TypeVar("TEnv", bound=EnvConfig)
16
+
17
+
18
+ class ToolBase(BaseModel, Generic[TEnv]):
19
+ name: ToolType
20
+ tool_env: TEnv
@@ -0,0 +1,10 @@
1
+ from typing import Annotated, Union
2
+
3
+ from pydantic import Field
4
+
5
+ from seekrai.types.agents.tools.schemas.file_search import FileSearch
6
+ from seekrai.types.agents.tools.schemas.run_python import RunPython
7
+ from seekrai.types.agents.tools.schemas.web_search import WebSearch
8
+
9
+
10
+ Tool = Annotated[Union[FileSearch, WebSearch, RunPython], Field(discriminator="name")]
@@ -78,6 +78,7 @@ class ChatCompletionRequest(BaseModel):
78
78
  messages: List[ChatCompletionMessage]
79
79
  # model name
80
80
  model: str
81
+ max_completion_tokens: int | None = None
81
82
  # stopping criteria: max tokens to generate
82
83
  max_tokens: int | None = None
83
84
  # stopping criteria: list of strings to stop generation
@@ -35,6 +35,7 @@ class DeploymentProcessor(str, enum.Enum):
35
35
  H100 = "H100"
36
36
  XEON = "XEON"
37
37
  NVIDIA = "NVIDIA" # TODO - this doesnt make sense with A100, etc.
38
+ AMD = "AMD"
38
39
 
39
40
 
40
41
  class NewDeploymentRequest(BaseModel):
@@ -0,0 +1,57 @@
1
+ from __future__ import annotations
2
+
3
+ from datetime import datetime
4
+ from enum import Enum
5
+ from typing import Any, Dict, List, Literal
6
+
7
+ from pydantic import Field
8
+
9
+ from seekrai.types.abstract import BaseModel
10
+
11
+
12
+ class InfluentialFinetuningDataResponse(BaseModel):
13
+ results: List[Dict[str, Any]]
14
+ version: str
15
+
16
+
17
+ class InfluentialFinetuningDataRequest(BaseModel):
18
+ question: str
19
+ answer: str = Field(
20
+ default="",
21
+ description="Response could be generated or given",
22
+ )
23
+ k: int
24
+
25
+
26
+ class ExplainabilityJobStatus(Enum):
27
+ QUEUED = "queued"
28
+ RUNNING = "running"
29
+ COMPLETED = "completed"
30
+ FAILED = "failed"
31
+
32
+ # TODO should titles along the following get added:
33
+ # create_index
34
+ # populate_index
35
+ # delete_index
36
+ # influential-finetuning-data
37
+
38
+
39
+ class ExplainabilityRequest(BaseModel):
40
+ files: List[str] = Field(
41
+ default=..., description="List of file ids to use for fine tuning"
42
+ )
43
+ method: str = Field(default="best", description="Method to use for explainability")
44
+
45
+
46
+ class ExplainabilityResponse(BaseModel):
47
+ id: str = Field(default=..., description="Explainability job ID")
48
+ created_at: datetime
49
+ status: ExplainabilityJobStatus
50
+ output_files: List[str]
51
+
52
+
53
+ class ExplainabilityList(BaseModel):
54
+ # object type
55
+ object: Literal["list"] | None = None
56
+ # list of fine-tune job objects
57
+ data: List[ExplainabilityResponse] | None = None
@@ -72,6 +72,11 @@ class FinetuneEventType(str, Enum):
72
72
  WARNING = "WARNING"
73
73
 
74
74
 
75
+ class FineTuneType(str, Enum):
76
+ STANDARD = "STANDARD"
77
+ DPO = "DPO"
78
+
79
+
75
80
  class FinetuneEvent(BaseModel):
76
81
  """
77
82
  Fine-tune event type
@@ -105,6 +110,8 @@ class TrainingConfig(BaseModel):
105
110
  # wandb_key: str | None = None
106
111
  # IFT by default
107
112
  pre_train: bool = False
113
+ # fine-tune type
114
+ fine_tune_type: FineTuneType = FineTuneType.STANDARD
108
115
 
109
116
 
110
117
  class AcceleratorType(str, Enum):
@@ -138,6 +145,8 @@ class FinetuneResponse(BaseModel):
138
145
 
139
146
  # job ID
140
147
  id: str | None = None
148
+ # fine-tune type
149
+ fine_tune_type: FineTuneType = FineTuneType.STANDARD
141
150
  # training file id
142
151
  training_files: List[str] | None = None
143
152
  # validation file id
@@ -1,8 +0,0 @@
1
- from seekrai.types.agents.tools.schemas.file_search import FileSearch
2
- from seekrai.types.agents.tools.schemas.file_search_env import FileSearchEnv
3
-
4
-
5
- __all__ = [
6
- "FileSearch",
7
- "FileSearchEnv",
8
- ]
@@ -1,14 +0,0 @@
1
- import enum
2
-
3
- from pydantic import BaseModel
4
-
5
- from seekrai.types.agents.tools.tool_env_types import Env
6
-
7
-
8
- class ToolType(str, enum.Enum):
9
- FILE_SEARCH = "file_search"
10
-
11
-
12
- class ToolBase(BaseModel):
13
- name: ToolType
14
- tool_env: Env
@@ -1,4 +0,0 @@
1
- from seekrai.types.agents.tools.schemas.file_search_env import FileSearchEnv
2
-
3
-
4
- Env = FileSearchEnv # will be a Union of tool envs when more are added
@@ -1,10 +0,0 @@
1
- from typing import Annotated
2
-
3
- from pydantic import Field
4
-
5
- from seekrai.types.agents.tools.schemas.file_search import FileSearch
6
-
7
-
8
- Tool = Annotated[
9
- FileSearch, Field(discriminator="name")
10
- ] # will be a Union of tools when more are added
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes