seekrai 0.5.11__py3-none-any.whl → 0.5.12__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -3,6 +3,7 @@ from typing import Any, AsyncGenerator, Iterator, Optional, Union
3
3
  from seekrai.abstract import api_requestor
4
4
  from seekrai.seekrflow_response import SeekrFlowResponse
5
5
  from seekrai.types import ModelSettings, Run, RunRequest, RunResponse, SeekrFlowRequest
6
+ from seekrai.types.agents.runs import ResponseFormat
6
7
 
7
8
 
8
9
  class AgentInference:
@@ -16,7 +17,8 @@ class AgentInference:
16
17
  thread_id: str,
17
18
  *,
18
19
  stream: bool = False,
19
- model_settings: Optional[ModelSettings] = None,
20
+ model_settings: ModelSettings = ModelSettings(),
21
+ response_format: Optional[Any] = None,
20
22
  ) -> Union[RunResponse, Iterator[Any]]:
21
23
  """
22
24
  Run an inference call on a deployed agent.
@@ -26,13 +28,18 @@ class AgentInference:
26
28
  thread_id (str): A thread identifier.
27
29
  stream (bool, optional): Whether to stream the response. Defaults to False.
28
30
  model_settings (optional): Additional parameters (such as temperature, max_tokens, etc).
31
+ response_format: Optional structured output specification. If provided, the LLM will be constrained to return JSON matching this schema.
29
32
 
30
33
  Returns:
31
34
  A dictionary with the response (if non-streaming) or an iterator over response chunks.
32
35
  """
33
- payload = RunRequest(agent_id=agent_id).model_dump()
34
- if model_settings is not None:
35
- payload["model_settings"] = model_settings.model_dump()
36
+ payload = RunRequest(
37
+ agent_id=agent_id,
38
+ model_settings=model_settings,
39
+ response_format=ResponseFormat.from_value(response_format)
40
+ if response_format
41
+ else None,
42
+ ).model_dump()
36
43
  endpoint = f"threads/{thread_id}/runs"
37
44
  if stream:
38
45
  endpoint += "/stream"
@@ -147,7 +154,8 @@ class AsyncAgentInference:
147
154
  thread_id: str,
148
155
  *,
149
156
  stream: bool = False,
150
- model_settings: Optional[ModelSettings] = None,
157
+ model_settings: ModelSettings = ModelSettings(),
158
+ response_format: Optional[Any] = None,
151
159
  ) -> Union[RunResponse, AsyncGenerator[Any, None]]:
152
160
  """
153
161
  Run an inference call on a deployed agent.
@@ -157,13 +165,18 @@ class AsyncAgentInference:
157
165
  thread_id (str): A thread identifier.
158
166
  stream (bool, optional): Whether to stream the response. Defaults to False.
159
167
  model_settings (optional): Additional parameters (such as temperature, max_tokens, etc).
168
+ response_format: Optional structured output specification. If provided, the LLM will be constrained to return JSON matching this schema.
160
169
 
161
170
  Returns:
162
171
  A dictionary with the response (if non-streaming) or an iterator over response chunks.
163
172
  """
164
- payload = RunRequest(agent_id=agent_id).model_dump()
165
- if model_settings is not None:
166
- payload["model_settings"] = model_settings.model_dump()
173
+ payload = RunRequest(
174
+ agent_id=agent_id,
175
+ model_settings=model_settings,
176
+ response_format=ResponseFormat.from_value(response_format)
177
+ if response_format
178
+ else None,
179
+ ).model_dump()
167
180
  endpoint = f"threads/{thread_id}/runs"
168
181
  if stream:
169
182
  endpoint += "/stream"
@@ -1,16 +1,63 @@
1
1
  import datetime
2
2
  from enum import Enum
3
- from typing import Any, Optional, Union
3
+ from typing import Any, Dict, Optional, Union
4
4
 
5
+ import pydantic
5
6
  from pydantic import Field
6
7
 
7
8
  from seekrai.types.abstract import BaseModel
8
9
 
9
10
 
11
+ class ModelSettings(BaseModel):
12
+ """Settings to use when calling an LLM.
13
+
14
+ This class holds optional model configuration parameters (e.g. temperature,
15
+ top_p, penalties, truncation, etc.).
16
+
17
+ Not all models/providers support all of these parameters, so please check the API documentation
18
+ for the specific model and provider you are using.
19
+ """
20
+
21
+ temperature: float = Field(default=1.0, ge=0.0, le=2.0)
22
+ top_p: float = Field(default=1.0, ge=0.0, le=1.0)
23
+ frequency_penalty: float = Field(default=0.0, ge=-2.0, le=2.0)
24
+ presence_penalty: float = Field(default=0.0, ge=-2.0, le=2.0)
25
+ max_tokens: Optional[int] = None
26
+
27
+
28
+ class ResponseFormat(BaseModel):
29
+ """Specifies a JSON schema for the response format.
30
+
31
+ When provided, the LLM will be constrained to return a JSON response
32
+ that matches the specified schema.
33
+
34
+ Can be instantiated with:
35
+ - A JSON schema dictionary
36
+ - A Pydantic model class
37
+ - An existing ResponseFormat instance
38
+ """
39
+
40
+ json_schema: Dict[str, Any]
41
+
42
+ @classmethod
43
+ def from_value(cls, value: Any) -> "ResponseFormat":
44
+ if isinstance(value, cls):
45
+ return value
46
+ if isinstance(value, dict):
47
+ return cls(json_schema=value)
48
+ if isinstance(value, type) and issubclass(value, pydantic.BaseModel):
49
+ return cls(json_schema=value.model_json_schema())
50
+ raise ValueError(
51
+ "ResponseFormat configuration is invalid. Expected ResponseFormat, a valid schema or a Pydantic BaseModel."
52
+ )
53
+
54
+
10
55
  class RunRequest(BaseModel):
11
56
  """Request model for creating a run."""
12
57
 
13
58
  agent_id: str = Field(default="default_agent")
59
+ model_settings: ModelSettings = ModelSettings()
60
+ response_format: Optional[Union[ResponseFormat, Dict[str, Any], type]] = None
14
61
 
15
62
 
16
63
  class RunResponse(BaseModel):
@@ -115,20 +162,3 @@ class RunStep(BaseModel):
115
162
  completed_at: Optional[datetime.datetime] = None
116
163
  meta_data: dict[str, Any] = Field(default_factory=dict)
117
164
  usage: Optional[RunStepUsage] = None
118
-
119
-
120
- class ModelSettings(BaseModel):
121
- """Settings to use when calling an LLM.
122
-
123
- This class holds optional model configuration parameters (e.g. temperature,
124
- top_p, penalties, truncation, etc.).
125
-
126
- Not all models/providers support all of these parameters, so please check the API documentation
127
- for the specific model and provider you are using.
128
- """
129
-
130
- temperature: float = Field(default=1.0, ge=0.0, le=2.0)
131
- top_p: float = Field(default=1.0, ge=0.0, le=1.0)
132
- frequency_penalty: float = Field(default=0.0, ge=-2.0, le=2.0)
133
- presence_penalty: float = Field(default=0.0, ge=-2.0, le=2.0)
134
- max_tokens: Optional[int] = None
seekrai/types/files.py CHANGED
@@ -11,6 +11,7 @@ from seekrai.types.common import (
11
11
 
12
12
 
13
13
  class FilePurpose(str, Enum):
14
+ ReinforcementFineTune = "reinforcement-fine-tune"
14
15
  FineTune = "fine-tune"
15
16
  PreTrain = "pre-train"
16
17
  Alignment = "alignment"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: seekrai
3
- Version: 0.5.11
3
+ Version: 0.5.12
4
4
  Summary: Python client for SeekrAI
5
5
  License: Apache-2.0
6
6
  Author: SeekrFlow
@@ -8,7 +8,7 @@ seekrai/error.py,sha256=rAYL8qEd8INwYMMKvhS-HKeC3QkWL4Wq-zfazFU-zBg,4861
8
8
  seekrai/filemanager.py,sha256=bO2OvjZ9Cx5r2vr3Ocpd_0qVc3owRDT2LCU4Zmp2uDY,15489
9
9
  seekrai/resources/__init__.py,sha256=EPMOqI3mvpzMjNJIr2Da2tkDqSq_lntMph2O-j4pyIA,1501
10
10
  seekrai/resources/agents/__init__.py,sha256=qPdo3vMZUaGZPdZCNYL0hjtX-T6yAlnpE_zc5otkjak,373
11
- seekrai/resources/agents/agent_inference.py,sha256=oNk8UK39XY9mRaBdE5icpi-nSWiRKHgQBcq-DIhSht4,9219
11
+ seekrai/resources/agents/agent_inference.py,sha256=4dZNeKBHWhv22J4ADSfr_0HXR6B4Vbz3syGnatmLkPM,9834
12
12
  seekrai/resources/agents/agents.py,sha256=d3C4VaXq4q5WlUr0f8txdRD3hvEKKvz4UQA-a_sLgso,9385
13
13
  seekrai/resources/agents/threads.py,sha256=BwZ2_6wlezsb12PQjEw1fgdJh5S83SPgD6qZQoGvyIM,14544
14
14
  seekrai/resources/alignment.py,sha256=IOKlKK2I9_NhS9pwcrsd9-5OO7lVT8Uw0y_wuGHOnyA,5839
@@ -31,7 +31,7 @@ seekrai/types/__init__.py,sha256=N72VsnmOdvLxnhBFLgfugIkT0tqr-M7WVFpAFtXEjLI,466
31
31
  seekrai/types/abstract.py,sha256=TqWFQV_6bPblywfCH-r8FCkXWvPkc9KlJ4QVgyrnaMc,642
32
32
  seekrai/types/agents/__init__.py,sha256=akuEDZDs4-OzvPIt0nupF63vBmI_CPfSLKsQUHTl1s8,2163
33
33
  seekrai/types/agents/agent.py,sha256=85D4GeHF-bYYnPirJSi1MbFg_2uFE2fSEmAHV9LxZfQ,1132
34
- seekrai/types/agents/runs.py,sha256=BZRwrdRWizOcoMJ1r8FKq06znIfd25MNVTP7DxZBQWA,3780
34
+ seekrai/types/agents/runs.py,sha256=hXH8F6BjF6MoKPWiZY-QtgNTjr6Y256peaW5ASXLWxA,4823
35
35
  seekrai/types/agents/threads.py,sha256=TinCMKv1bi5LzboDyCx1XI4Zzd8UzUZos4VOrTNhmEc,6835
36
36
  seekrai/types/agents/tools/__init__.py,sha256=4MmlL13JLhWgMUlL3TKfegiA-IXGG06YellZTSTVFC8,537
37
37
  seekrai/types/agents/tools/env_model_config.py,sha256=9POx2DPwfSXgoaziJv7QvKeMrhMsYD1exnanSRK48vw,177
@@ -52,7 +52,7 @@ seekrai/types/deployments.py,sha256=CQd7uhnURnhXE4_W_1FDv3p8IpZBSgHIG1JCOgz8VA0,
52
52
  seekrai/types/embeddings.py,sha256=OANoLNOs0aceS8NppVvvcNYQbF7-pAOAmcr30pw64OU,749
53
53
  seekrai/types/error.py,sha256=uTKISs9aRC4_6zwirtNkanxepN8KY-SqCq0kNbfZylQ,370
54
54
  seekrai/types/explainability.py,sha256=l9dp9DJ_GPkHzNw_3zwiGkpAWDETMDN8NSoymhKvdgc,1420
55
- seekrai/types/files.py,sha256=yjJYT8twY-cNh9AY9qlcN_moTeCfR0tJSSCQsOVB02Y,2708
55
+ seekrai/types/files.py,sha256=Oo5nYy2jw3ksziL7e6FBtStvV8Tm-SfKEyJVAA2pR-Q,2762
56
56
  seekrai/types/finetune.py,sha256=pt6lJYXnkHDgMo61zPWvgB04I4sdlijCb0velkrT7TY,6502
57
57
  seekrai/types/images.py,sha256=Fusj8OhVYFsT8kz636lRGGivLbPXo_ZNgakKwmzJi3U,914
58
58
  seekrai/types/ingestion.py,sha256=uUdKOR4xqSfAXWQOR1UOltSlOnuyAwKVA1Q2a6Yslk8,919
@@ -65,8 +65,8 @@ seekrai/utils/api_helpers.py,sha256=0Y8BblNIr9h_R12zdmhkxgTlxgoRkbq84QNi4nNWGu8,
65
65
  seekrai/utils/files.py,sha256=7ixn_hgV-6pEhYqLyOp-EN0o8c1CzUwJzX9n3PQ5oqo,7164
66
66
  seekrai/utils/tools.py,sha256=jgJTL-dOIouDbEJLdQpQfpXhqaz_poQYS52adyUtBjo,1781
67
67
  seekrai/version.py,sha256=q6iGQVFor8zXiPP5F-3vy9TndOxKv5JXbaNJ2kdOQws,125
68
- seekrai-0.5.11.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
69
- seekrai-0.5.11.dist-info/METADATA,sha256=pPqdBDfPu-dJ7GozIqK1lWoxODbSl_tX-X0FKmsSbyg,4781
70
- seekrai-0.5.11.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
71
- seekrai-0.5.11.dist-info/entry_points.txt,sha256=N49yOEGi1sK7Xr13F_rkkcOxQ88suyiMoOmRhUHTZ_U,48
72
- seekrai-0.5.11.dist-info/RECORD,,
68
+ seekrai-0.5.12.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
69
+ seekrai-0.5.12.dist-info/METADATA,sha256=7n2YKQjR7WVH9rShRYO5ZcdxW4kN9moWX0siQMmFp1o,4781
70
+ seekrai-0.5.12.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
71
+ seekrai-0.5.12.dist-info/entry_points.txt,sha256=N49yOEGi1sK7Xr13F_rkkcOxQ88suyiMoOmRhUHTZ_U,48
72
+ seekrai-0.5.12.dist-info/RECORD,,