latitude-sdk 0.1.0b8__tar.gz → 1.0.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/PKG-INFO +3 -2
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/pyproject.toml +3 -2
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/src/latitude_sdk/client/payloads.py +3 -1
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/src/latitude_sdk/sdk/evaluations.py +6 -5
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/src/latitude_sdk/sdk/latitude.py +8 -4
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/src/latitude_sdk/sdk/logs.py +10 -18
- latitude_sdk-1.0.0/src/latitude_sdk/sdk/prompts.py +453 -0
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/src/latitude_sdk/sdk/types.py +42 -83
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/src/latitude_sdk/util/utils.py +8 -6
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/tests/evaluations/trigger_test.py +1 -2
- latitude_sdk-1.0.0/tests/prompts/chat_test.py +559 -0
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/tests/prompts/get_test.py +3 -6
- latitude_sdk-1.0.0/tests/prompts/render_chain_test.py +156 -0
- latitude_sdk-1.0.0/tests/prompts/render_test.py +83 -0
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/tests/prompts/run_test.py +289 -2
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/tests/utils/fixtures.py +393 -28
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/tests/utils/utils.py +9 -6
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/uv.lock +50 -5
- latitude_sdk-0.1.0b8/src/latitude_sdk/sdk/prompts.py +0 -302
- latitude_sdk-0.1.0b8/tests/prompts/chat_test.py +0 -294
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/.gitignore +0 -0
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/.python-version +0 -0
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/README.md +0 -0
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/scripts/format.py +0 -0
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/scripts/lint.py +0 -0
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/scripts/test.py +0 -0
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/src/latitude_sdk/__init__.py +0 -0
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/src/latitude_sdk/client/__init__.py +0 -0
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/src/latitude_sdk/client/client.py +0 -0
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/src/latitude_sdk/client/router.py +0 -0
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/src/latitude_sdk/env/__init__.py +0 -0
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/src/latitude_sdk/env/env.py +0 -0
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/src/latitude_sdk/py.typed +0 -0
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/src/latitude_sdk/sdk/__init__.py +0 -0
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/src/latitude_sdk/sdk/errors.py +0 -0
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/src/latitude_sdk/util/__init__.py +0 -0
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/tests/__init__.py +0 -0
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/tests/evaluations/__init__.py +0 -0
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/tests/evaluations/create_result_test.py +0 -0
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/tests/logs/__init__.py +0 -0
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/tests/logs/create_test.py +0 -0
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/tests/prompts/__init__.py +0 -0
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/tests/prompts/get_or_create_test.py +0 -0
- {latitude_sdk-0.1.0b8 → latitude_sdk-1.0.0}/tests/utils/__init__.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: latitude-sdk
|
3
|
-
Version: 0.
|
3
|
+
Version: 1.0.0
|
4
4
|
Summary: Latitude SDK for Python
|
5
5
|
Project-URL: repository, https://github.com/latitude-dev/latitude-llm/tree/main/packages/sdks/python
|
6
6
|
Project-URL: homepage, https://github.com/latitude-dev/latitude-llm/tree/main/packages/sdks/python#readme
|
@@ -11,7 +11,8 @@ License-Expression: LGPL-3.0
|
|
11
11
|
Requires-Python: <3.13,>=3.9
|
12
12
|
Requires-Dist: httpx-sse>=0.4.0
|
13
13
|
Requires-Dist: httpx>=0.28.1
|
14
|
-
Requires-Dist: latitude-telemetry>=0.
|
14
|
+
Requires-Dist: latitude-telemetry>=1.0.0
|
15
|
+
Requires-Dist: promptl-ai>=1.0.1
|
15
16
|
Requires-Dist: pydantic>=2.10.3
|
16
17
|
Requires-Dist: typing-extensions>=4.12.2
|
17
18
|
Description-Content-Type: text/markdown
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[project]
|
2
2
|
name = "latitude-sdk"
|
3
|
-
version = "
|
3
|
+
version = "1.0.0"
|
4
4
|
description = "Latitude SDK for Python"
|
5
5
|
authors = [{ name = "Latitude Data SL", email = "hello@latitude.so" }]
|
6
6
|
maintainers = [{ name = "Latitude Data SL", email = "hello@latitude.so" }]
|
@@ -15,7 +15,8 @@ dependencies = [
|
|
15
15
|
"httpx-sse>=0.4.0",
|
16
16
|
"pydantic>=2.10.3",
|
17
17
|
"typing-extensions>=4.12.2",
|
18
|
-
"latitude-telemetry>=0.
|
18
|
+
"latitude-telemetry>=1.0.0",
|
19
|
+
"promptl-ai>=1.0.1",
|
19
20
|
]
|
20
21
|
|
21
22
|
[dependency-groups]
|
@@ -8,10 +8,7 @@ from latitude_sdk.client import (
|
|
8
8
|
TriggerEvaluationRequestBody,
|
9
9
|
TriggerEvaluationRequestParams,
|
10
10
|
)
|
11
|
-
from latitude_sdk.sdk.types import
|
12
|
-
EvaluationResult,
|
13
|
-
SdkOptions,
|
14
|
-
)
|
11
|
+
from latitude_sdk.sdk.types import EvaluationResult, SdkOptions
|
15
12
|
from latitude_sdk.util import Model
|
16
13
|
|
17
14
|
|
@@ -40,7 +37,9 @@ class Evaluations:
|
|
40
37
|
self._options = options
|
41
38
|
self._client = client
|
42
39
|
|
43
|
-
async def trigger(self, uuid: str, options: TriggerEvaluationOptions) -> TriggerEvaluationResult:
|
40
|
+
async def trigger(self, uuid: str, options: Optional[TriggerEvaluationOptions] = None) -> TriggerEvaluationResult:
|
41
|
+
options = TriggerEvaluationOptions(**{**dict(self._options), **dict(options or {})})
|
42
|
+
|
44
43
|
async with self._client.request(
|
45
44
|
handler=RequestHandler.TriggerEvaluation,
|
46
45
|
params=TriggerEvaluationRequestParams(
|
@@ -55,6 +54,8 @@ class Evaluations:
|
|
55
54
|
async def create_result(
|
56
55
|
self, uuid: str, evaluation_uuid: str, options: CreateEvaluationResultOptions
|
57
56
|
) -> CreateEvaluationResultResult:
|
57
|
+
options = CreateEvaluationResultOptions(**{**dict(self._options), **dict(options)})
|
58
|
+
|
58
59
|
async with self._client.request(
|
59
60
|
handler=RequestHandler.CreateEvaluationResult,
|
60
61
|
params=CreateEvaluationResultRequestParams(
|
@@ -1,6 +1,7 @@
|
|
1
1
|
from typing import Optional
|
2
2
|
|
3
3
|
from latitude_telemetry import Telemetry, TelemetryOptions
|
4
|
+
from promptl_ai import Promptl, PromptlOptions
|
4
5
|
|
5
6
|
from latitude_sdk.client import Client, ClientOptions, RouterOptions
|
6
7
|
from latitude_sdk.env import env
|
@@ -20,6 +21,7 @@ class InternalOptions(Model):
|
|
20
21
|
|
21
22
|
|
22
23
|
class LatitudeOptions(SdkOptions, Model):
|
24
|
+
promptl: Optional[PromptlOptions] = None
|
23
25
|
telemetry: Optional[TelemetryOptions] = None
|
24
26
|
internal: Optional[InternalOptions] = None
|
25
27
|
|
@@ -39,7 +41,7 @@ DEFAULT_INTERNAL_OPTIONS = InternalOptions(
|
|
39
41
|
|
40
42
|
|
41
43
|
DEFAULT_LATITUDE_OPTIONS = LatitudeOptions(
|
42
|
-
telemetry=None, #
|
44
|
+
telemetry=None, # NOTE: Telemetry is opt-in
|
43
45
|
internal=DEFAULT_INTERNAL_OPTIONS,
|
44
46
|
)
|
45
47
|
|
@@ -48,15 +50,16 @@ class Latitude:
|
|
48
50
|
_options: LatitudeOptions
|
49
51
|
_client: Client
|
50
52
|
|
53
|
+
promptl: Promptl
|
51
54
|
telemetry: Optional[Telemetry]
|
52
55
|
|
53
56
|
prompts: Prompts
|
54
57
|
logs: Logs
|
55
58
|
evaluations: Evaluations
|
56
59
|
|
57
|
-
def __init__(self, api_key: str, options: LatitudeOptions):
|
60
|
+
def __init__(self, api_key: str, options: Optional[LatitudeOptions] = None):
|
61
|
+
options = LatitudeOptions(**{**dict(DEFAULT_LATITUDE_OPTIONS), **dict(options or {})})
|
58
62
|
options.internal = InternalOptions(**{**dict(DEFAULT_INTERNAL_OPTIONS), **dict(options.internal or {})})
|
59
|
-
options = LatitudeOptions(**{**dict(DEFAULT_LATITUDE_OPTIONS), **dict(options)})
|
60
63
|
self._options = options
|
61
64
|
|
62
65
|
assert self._options.internal is not None
|
@@ -77,9 +80,10 @@ class Latitude:
|
|
77
80
|
)
|
78
81
|
)
|
79
82
|
|
83
|
+
self.promptl = Promptl(self._options.promptl)
|
80
84
|
if self._options.telemetry:
|
81
85
|
self.telemetry = Telemetry(api_key, self._options.telemetry)
|
82
86
|
|
83
|
-
self.prompts = Prompts(self._client, self._options)
|
87
|
+
self.prompts = Prompts(self._client, self.promptl, self._options)
|
84
88
|
self.logs = Logs(self._client, self._options)
|
85
89
|
self.evaluations = Evaluations(self._client, self._options)
|
@@ -1,13 +1,11 @@
|
|
1
|
-
from typing import
|
1
|
+
from typing import Optional, Sequence
|
2
|
+
|
3
|
+
from promptl_ai import MessageLike
|
4
|
+
from promptl_ai.bindings.types import _Message
|
2
5
|
|
3
6
|
from latitude_sdk.client import Client, CreateLogRequestBody, CreateLogRequestParams, RequestHandler
|
4
7
|
from latitude_sdk.sdk.errors import ApiError, ApiErrorCodes
|
5
|
-
from latitude_sdk.sdk.types import
|
6
|
-
Log,
|
7
|
-
Message,
|
8
|
-
SdkOptions,
|
9
|
-
_Message,
|
10
|
-
)
|
8
|
+
from latitude_sdk.sdk.types import Log, SdkOptions
|
11
9
|
from latitude_sdk.util import Model
|
12
10
|
|
13
11
|
|
@@ -32,9 +30,8 @@ class Logs:
|
|
32
30
|
self._options = options
|
33
31
|
self._client = client
|
34
32
|
|
35
|
-
def
|
36
|
-
|
37
|
-
if not project_id:
|
33
|
+
def _ensure_log_options(self, options: LogOptions):
|
34
|
+
if not options.project_id:
|
38
35
|
raise ApiError(
|
39
36
|
status=404,
|
40
37
|
code=ApiErrorCodes.NotFoundError,
|
@@ -42,16 +39,11 @@ class Logs:
|
|
42
39
|
response="Project ID is required",
|
43
40
|
)
|
44
41
|
|
45
|
-
version_uuid = options.version_uuid or self._options.version_uuid
|
46
|
-
|
47
|
-
return LogOptions(project_id=project_id, version_uuid=version_uuid)
|
48
|
-
|
49
42
|
async def create(
|
50
|
-
self, path: str, messages: Sequence[
|
43
|
+
self, path: str, messages: Sequence[MessageLike], options: Optional[CreateLogOptions] = None
|
51
44
|
) -> CreateLogResult:
|
52
|
-
|
53
|
-
|
54
|
-
|
45
|
+
options = CreateLogOptions(**{**dict(self._options), **dict(options or {})})
|
46
|
+
self._ensure_log_options(options)
|
55
47
|
assert options.project_id is not None
|
56
48
|
|
57
49
|
messages = [_Message.validate_python(message) for message in messages]
|
@@ -0,0 +1,453 @@
|
|
1
|
+
import asyncio
|
2
|
+
from typing import Any, AsyncGenerator, Dict, List, Optional, Sequence, Union
|
3
|
+
|
4
|
+
from promptl_ai import Adapter, Message, MessageLike, Promptl, ToolMessage, ToolResultContent
|
5
|
+
from promptl_ai.bindings.types import _Message
|
6
|
+
|
7
|
+
from latitude_sdk.client import (
|
8
|
+
ChatPromptRequestBody,
|
9
|
+
ChatPromptRequestParams,
|
10
|
+
Client,
|
11
|
+
ClientEvent,
|
12
|
+
GetOrCreatePromptRequestBody,
|
13
|
+
GetOrCreatePromptRequestParams,
|
14
|
+
GetPromptRequestParams,
|
15
|
+
RequestHandler,
|
16
|
+
RunPromptRequestBody,
|
17
|
+
RunPromptRequestParams,
|
18
|
+
)
|
19
|
+
from latitude_sdk.sdk.errors import ApiError, ApiErrorCodes
|
20
|
+
from latitude_sdk.sdk.types import (
|
21
|
+
ChainEventCompleted,
|
22
|
+
ChainEventError,
|
23
|
+
ChainEvents,
|
24
|
+
ChainEventStep,
|
25
|
+
ChainEventStepCompleted,
|
26
|
+
FinishedEvent,
|
27
|
+
OnStep,
|
28
|
+
OnToolCall,
|
29
|
+
OnToolCallDetails,
|
30
|
+
Prompt,
|
31
|
+
Providers,
|
32
|
+
SdkOptions,
|
33
|
+
StreamCallbacks,
|
34
|
+
StreamEvents,
|
35
|
+
StreamTypes,
|
36
|
+
ToolResult,
|
37
|
+
)
|
38
|
+
from latitude_sdk.util import Model
|
39
|
+
|
40
|
+
_PROVIDER_TO_ADAPTER = {
|
41
|
+
Providers.OpenAI: Adapter.OpenAI,
|
42
|
+
Providers.Anthropic: Adapter.Anthropic,
|
43
|
+
}
|
44
|
+
|
45
|
+
_PROMPT_ATTR_TO_ADAPTER_ATTR = {
|
46
|
+
"maxTokens": ("max_tokens", [Adapter.OpenAI, Adapter.Anthropic]),
|
47
|
+
"topP": ("top_p", [Adapter.OpenAI, Adapter.Anthropic]),
|
48
|
+
"topK": ("top_k", [Adapter.OpenAI, Adapter.Anthropic]),
|
49
|
+
"presencePenalty": ("presence_penalty", [Adapter.OpenAI, Adapter.Anthropic]),
|
50
|
+
"stopSequences": ("stop_sequences", [Adapter.OpenAI, Adapter.Anthropic]),
|
51
|
+
"toolChoice": ("tool_choice", [Adapter.OpenAI, Adapter.Anthropic]),
|
52
|
+
}
|
53
|
+
|
54
|
+
|
55
|
+
class OnToolCallPaused(Exception):
|
56
|
+
pass
|
57
|
+
|
58
|
+
|
59
|
+
class PromptOptions(Model):
|
60
|
+
project_id: Optional[int] = None
|
61
|
+
version_uuid: Optional[str] = None
|
62
|
+
|
63
|
+
|
64
|
+
class GetPromptOptions(PromptOptions, Model):
|
65
|
+
pass
|
66
|
+
|
67
|
+
|
68
|
+
class GetPromptResult(Prompt, Model):
|
69
|
+
pass
|
70
|
+
|
71
|
+
|
72
|
+
class GetOrCreatePromptOptions(PromptOptions, Model):
|
73
|
+
prompt: Optional[str] = None
|
74
|
+
|
75
|
+
|
76
|
+
class GetOrCreatePromptResult(Prompt, Model):
|
77
|
+
pass
|
78
|
+
|
79
|
+
|
80
|
+
class RunPromptOptions(StreamCallbacks, PromptOptions, Model):
|
81
|
+
custom_identifier: Optional[str] = None
|
82
|
+
parameters: Optional[Dict[str, Any]] = None
|
83
|
+
tools: Optional[Dict[str, OnToolCall]] = None
|
84
|
+
stream: Optional[bool] = None
|
85
|
+
|
86
|
+
|
87
|
+
class RunPromptResult(FinishedEvent, Model):
|
88
|
+
pass
|
89
|
+
|
90
|
+
|
91
|
+
class ChatPromptOptions(StreamCallbacks, Model):
|
92
|
+
tools: Optional[Dict[str, OnToolCall]] = None
|
93
|
+
stream: Optional[bool] = None
|
94
|
+
|
95
|
+
|
96
|
+
class ChatPromptResult(FinishedEvent, Model):
|
97
|
+
pass
|
98
|
+
|
99
|
+
|
100
|
+
class RenderPromptOptions(Model):
|
101
|
+
parameters: Optional[Dict[str, Any]] = None
|
102
|
+
adapter: Optional[Adapter] = None
|
103
|
+
|
104
|
+
|
105
|
+
class RenderPromptResult(Model):
|
106
|
+
messages: List[MessageLike]
|
107
|
+
config: Dict[str, Any]
|
108
|
+
|
109
|
+
|
110
|
+
class RenderChainOptions(Model):
|
111
|
+
parameters: Optional[Dict[str, Any]] = None
|
112
|
+
adapter: Optional[Adapter] = None
|
113
|
+
|
114
|
+
|
115
|
+
class RenderChainResult(RenderPromptResult, Model):
|
116
|
+
pass
|
117
|
+
|
118
|
+
|
119
|
+
class Prompts:
|
120
|
+
_options: SdkOptions
|
121
|
+
_client: Client
|
122
|
+
_promptl: Promptl
|
123
|
+
|
124
|
+
def __init__(self, client: Client, promptl: Promptl, options: SdkOptions):
|
125
|
+
self._options = options
|
126
|
+
self._client = client
|
127
|
+
self._promptl = promptl
|
128
|
+
|
129
|
+
def _ensure_prompt_options(self, options: PromptOptions):
|
130
|
+
if not options.project_id:
|
131
|
+
raise ApiError(
|
132
|
+
status=404,
|
133
|
+
code=ApiErrorCodes.NotFoundError,
|
134
|
+
message="Project ID is required",
|
135
|
+
response="Project ID is required",
|
136
|
+
)
|
137
|
+
|
138
|
+
async def _handle_stream(
|
139
|
+
self, stream: AsyncGenerator[ClientEvent, Any], on_event: Optional[StreamCallbacks.OnEvent]
|
140
|
+
) -> FinishedEvent:
|
141
|
+
uuid = None
|
142
|
+
conversation: List[Message] = []
|
143
|
+
response = None
|
144
|
+
|
145
|
+
async for stream_event in stream:
|
146
|
+
event = None
|
147
|
+
|
148
|
+
if stream_event.event == str(StreamEvents.Latitude):
|
149
|
+
type = stream_event.json().get("type")
|
150
|
+
|
151
|
+
if type == str(ChainEvents.Step):
|
152
|
+
event = ChainEventStep.model_validate_json(stream_event.data)
|
153
|
+
conversation.extend(event.messages)
|
154
|
+
|
155
|
+
elif type == str(ChainEvents.StepCompleted):
|
156
|
+
event = ChainEventStepCompleted.model_validate_json(stream_event.data)
|
157
|
+
|
158
|
+
elif type == str(ChainEvents.Completed):
|
159
|
+
event = ChainEventCompleted.model_validate_json(stream_event.data)
|
160
|
+
uuid = event.uuid
|
161
|
+
conversation.extend(event.messages or [])
|
162
|
+
response = event.response
|
163
|
+
|
164
|
+
elif type == str(ChainEvents.Error):
|
165
|
+
event = ChainEventError.model_validate_json(stream_event.data)
|
166
|
+
raise ApiError(
|
167
|
+
status=400,
|
168
|
+
code=ApiErrorCodes.AIRunError,
|
169
|
+
message=event.error.message,
|
170
|
+
response=stream_event.data,
|
171
|
+
)
|
172
|
+
|
173
|
+
else:
|
174
|
+
raise ApiError(
|
175
|
+
status=500,
|
176
|
+
code=ApiErrorCodes.InternalServerError,
|
177
|
+
message=f"Unknown latitude event: {type}",
|
178
|
+
response=stream_event.data,
|
179
|
+
)
|
180
|
+
|
181
|
+
elif stream_event.event == str(StreamEvents.Provider):
|
182
|
+
event = stream_event.json()
|
183
|
+
event["event"] = StreamEvents.Provider
|
184
|
+
|
185
|
+
else:
|
186
|
+
raise ApiError(
|
187
|
+
status=500,
|
188
|
+
code=ApiErrorCodes.InternalServerError,
|
189
|
+
message=f"Unknown stream event: {stream_event.event}",
|
190
|
+
response=stream_event.data,
|
191
|
+
)
|
192
|
+
|
193
|
+
if on_event:
|
194
|
+
on_event(event)
|
195
|
+
|
196
|
+
if not uuid or not response:
|
197
|
+
raise ApiError(
|
198
|
+
status=500,
|
199
|
+
code=ApiErrorCodes.InternalServerError,
|
200
|
+
message="Stream ended without a chain-complete event. Missing uuid or response.",
|
201
|
+
response="Stream ended without a chain-complete event. Missing uuid or response.",
|
202
|
+
)
|
203
|
+
|
204
|
+
# NOTE: FinishedEvent not in on_event
|
205
|
+
return FinishedEvent(uuid=uuid, conversation=conversation, response=response)
|
206
|
+
|
207
|
+
def _pause_tool_execution(self) -> ToolResult:
|
208
|
+
raise OnToolCallPaused()
|
209
|
+
|
210
|
+
async def _handle_tool_calls(
|
211
|
+
self, result: FinishedEvent, options: Union[RunPromptOptions, ChatPromptOptions]
|
212
|
+
) -> Optional[FinishedEvent]:
|
213
|
+
# Seems Python cannot infer the type
|
214
|
+
assert result.response.type == StreamTypes.Text and result.response.tool_calls is not None
|
215
|
+
|
216
|
+
if not options.tools:
|
217
|
+
raise ApiError(
|
218
|
+
status=400,
|
219
|
+
code=ApiErrorCodes.AIRunError,
|
220
|
+
message="Tools not supplied",
|
221
|
+
response="Tools not supplied",
|
222
|
+
)
|
223
|
+
|
224
|
+
for tool_call in result.response.tool_calls:
|
225
|
+
if tool_call.name not in options.tools:
|
226
|
+
raise ApiError(
|
227
|
+
status=400,
|
228
|
+
code=ApiErrorCodes.AIRunError,
|
229
|
+
message=f"Tool {tool_call.name} not supplied",
|
230
|
+
response=f"Tool {tool_call.name} not supplied",
|
231
|
+
)
|
232
|
+
|
233
|
+
details = OnToolCallDetails(
|
234
|
+
conversation_uuid=result.uuid,
|
235
|
+
messages=result.conversation,
|
236
|
+
pause_execution=self._pause_tool_execution,
|
237
|
+
requested_tool_calls=result.response.tool_calls,
|
238
|
+
)
|
239
|
+
|
240
|
+
tool_results = await asyncio.gather(
|
241
|
+
*[options.tools[tool_call.name](tool_call, details) for tool_call in result.response.tool_calls],
|
242
|
+
return_exceptions=False,
|
243
|
+
)
|
244
|
+
|
245
|
+
tool_messages = [
|
246
|
+
ToolMessage(
|
247
|
+
content=[
|
248
|
+
ToolResultContent(
|
249
|
+
id=tool_result.id,
|
250
|
+
name=tool_result.name,
|
251
|
+
result=tool_result.result,
|
252
|
+
is_error=tool_result.is_error,
|
253
|
+
)
|
254
|
+
]
|
255
|
+
)
|
256
|
+
for tool_result in tool_results
|
257
|
+
]
|
258
|
+
|
259
|
+
next_result = await self.chat(result.uuid, tool_messages, ChatPromptOptions(**dict(options)))
|
260
|
+
|
261
|
+
return FinishedEvent(**dict(next_result)) if next_result else None
|
262
|
+
|
263
|
+
async def get(self, path: str, options: Optional[GetPromptOptions] = None) -> GetPromptResult:
|
264
|
+
options = GetPromptOptions(**{**dict(self._options), **dict(options or {})})
|
265
|
+
self._ensure_prompt_options(options)
|
266
|
+
assert options.project_id is not None
|
267
|
+
|
268
|
+
async with self._client.request(
|
269
|
+
handler=RequestHandler.GetPrompt,
|
270
|
+
params=GetPromptRequestParams(
|
271
|
+
project_id=options.project_id,
|
272
|
+
version_uuid=options.version_uuid,
|
273
|
+
path=path,
|
274
|
+
),
|
275
|
+
) as response:
|
276
|
+
return GetPromptResult.model_validate_json(response.content)
|
277
|
+
|
278
|
+
async def get_or_create(
|
279
|
+
self, path: str, options: Optional[GetOrCreatePromptOptions] = None
|
280
|
+
) -> GetOrCreatePromptResult:
|
281
|
+
options = GetOrCreatePromptOptions(**{**dict(self._options), **dict(options or {})})
|
282
|
+
self._ensure_prompt_options(options)
|
283
|
+
assert options.project_id is not None
|
284
|
+
|
285
|
+
async with self._client.request(
|
286
|
+
handler=RequestHandler.GetOrCreatePrompt,
|
287
|
+
params=GetOrCreatePromptRequestParams(
|
288
|
+
project_id=options.project_id,
|
289
|
+
version_uuid=options.version_uuid,
|
290
|
+
),
|
291
|
+
body=GetOrCreatePromptRequestBody(
|
292
|
+
path=path,
|
293
|
+
prompt=options.prompt,
|
294
|
+
),
|
295
|
+
) as response:
|
296
|
+
return GetOrCreatePromptResult.model_validate_json(response.content)
|
297
|
+
|
298
|
+
async def run(self, path: str, options: Optional[RunPromptOptions] = None) -> Optional[RunPromptResult]:
|
299
|
+
options = RunPromptOptions(**{**dict(self._options), **dict(options or {})})
|
300
|
+
self._ensure_prompt_options(options)
|
301
|
+
assert options.project_id is not None
|
302
|
+
|
303
|
+
try:
|
304
|
+
async with self._client.request(
|
305
|
+
handler=RequestHandler.RunPrompt,
|
306
|
+
params=RunPromptRequestParams(
|
307
|
+
project_id=options.project_id,
|
308
|
+
version_uuid=options.version_uuid,
|
309
|
+
),
|
310
|
+
body=RunPromptRequestBody(
|
311
|
+
path=path,
|
312
|
+
parameters=options.parameters,
|
313
|
+
custom_identifier=options.custom_identifier,
|
314
|
+
stream=options.stream,
|
315
|
+
),
|
316
|
+
) as response:
|
317
|
+
if options.stream:
|
318
|
+
result = await self._handle_stream(response.sse(), options.on_event)
|
319
|
+
else:
|
320
|
+
result = RunPromptResult.model_validate_json(response.content)
|
321
|
+
|
322
|
+
if options.tools and result.response.type == StreamTypes.Text and result.response.tool_calls:
|
323
|
+
try:
|
324
|
+
# NOTE: The last sdk.chat called will already call on_finished
|
325
|
+
final_result = await self._handle_tool_calls(result, options)
|
326
|
+
return RunPromptResult(**dict(final_result)) if final_result else None
|
327
|
+
except OnToolCallPaused:
|
328
|
+
pass
|
329
|
+
|
330
|
+
if options.on_finished:
|
331
|
+
options.on_finished(FinishedEvent(**dict(result)))
|
332
|
+
|
333
|
+
return RunPromptResult(**dict(result))
|
334
|
+
|
335
|
+
except Exception as exception:
|
336
|
+
if not isinstance(exception, ApiError):
|
337
|
+
exception = ApiError(
|
338
|
+
status=500,
|
339
|
+
code=ApiErrorCodes.InternalServerError,
|
340
|
+
message=str(exception),
|
341
|
+
response=str(exception),
|
342
|
+
)
|
343
|
+
|
344
|
+
if not options.on_error:
|
345
|
+
raise exception
|
346
|
+
|
347
|
+
options.on_error(exception)
|
348
|
+
|
349
|
+
return None
|
350
|
+
|
351
|
+
async def chat(
|
352
|
+
self, uuid: str, messages: Sequence[MessageLike], options: Optional[ChatPromptOptions] = None
|
353
|
+
) -> Optional[ChatPromptResult]:
|
354
|
+
options = ChatPromptOptions(**{**dict(self._options), **dict(options or {})})
|
355
|
+
|
356
|
+
messages = [_Message.validate_python(message) for message in messages]
|
357
|
+
|
358
|
+
try:
|
359
|
+
async with self._client.request(
|
360
|
+
handler=RequestHandler.ChatPrompt,
|
361
|
+
params=ChatPromptRequestParams(
|
362
|
+
conversation_uuid=uuid,
|
363
|
+
),
|
364
|
+
body=ChatPromptRequestBody(
|
365
|
+
messages=messages,
|
366
|
+
stream=options.stream,
|
367
|
+
),
|
368
|
+
) as response:
|
369
|
+
if options.stream:
|
370
|
+
result = await self._handle_stream(response.sse(), options.on_event)
|
371
|
+
else:
|
372
|
+
result = ChatPromptResult.model_validate_json(response.content)
|
373
|
+
|
374
|
+
if options.tools and result.response.type == StreamTypes.Text and result.response.tool_calls:
|
375
|
+
try:
|
376
|
+
# NOTE: The last sdk.chat called will already call on_finished
|
377
|
+
final_result = await self._handle_tool_calls(result, options)
|
378
|
+
return ChatPromptResult(**dict(final_result)) if final_result else None
|
379
|
+
except OnToolCallPaused:
|
380
|
+
pass
|
381
|
+
|
382
|
+
if options.on_finished:
|
383
|
+
options.on_finished(FinishedEvent(**dict(result)))
|
384
|
+
|
385
|
+
return ChatPromptResult(**dict(result))
|
386
|
+
|
387
|
+
except Exception as exception:
|
388
|
+
if not isinstance(exception, ApiError):
|
389
|
+
exception = ApiError(
|
390
|
+
status=500,
|
391
|
+
code=ApiErrorCodes.InternalServerError,
|
392
|
+
message=str(exception),
|
393
|
+
response=str(exception),
|
394
|
+
)
|
395
|
+
|
396
|
+
if not options.on_error:
|
397
|
+
raise exception
|
398
|
+
|
399
|
+
options.on_error(exception)
|
400
|
+
|
401
|
+
return None
|
402
|
+
|
403
|
+
def _adapt_prompt_config(self, config: Dict[str, Any], adapter: Adapter) -> Dict[str, Any]:
|
404
|
+
adapted_config: Dict[str, Any] = {}
|
405
|
+
|
406
|
+
for attr, value in config.items():
|
407
|
+
if attr in _PROMPT_ATTR_TO_ADAPTER_ATTR and adapter in _PROMPT_ATTR_TO_ADAPTER_ATTR[attr][1]:
|
408
|
+
adapted_config[_PROMPT_ATTR_TO_ADAPTER_ATTR[attr][0]] = value
|
409
|
+
else:
|
410
|
+
adapted_config[attr] = value
|
411
|
+
|
412
|
+
return adapted_config
|
413
|
+
|
414
|
+
async def render(self, prompt: str, options: Optional[RenderPromptOptions] = None) -> RenderPromptResult:
|
415
|
+
options = RenderPromptOptions(**{**dict(self._options), **dict(options or {})})
|
416
|
+
adapter = options.adapter or Adapter.OpenAI
|
417
|
+
|
418
|
+
result = self._promptl.prompts.render(
|
419
|
+
prompt=prompt,
|
420
|
+
parameters=options.parameters,
|
421
|
+
adapter=adapter,
|
422
|
+
)
|
423
|
+
|
424
|
+
return RenderPromptResult(
|
425
|
+
messages=result.messages,
|
426
|
+
config=self._adapt_prompt_config(result.config, adapter),
|
427
|
+
)
|
428
|
+
|
429
|
+
async def render_chain(
|
430
|
+
self, prompt: Prompt, on_step: OnStep, options: Optional[RenderChainOptions] = None
|
431
|
+
) -> RenderChainResult:
|
432
|
+
options = RenderChainOptions(**{**dict(self._options), **dict(options or {})})
|
433
|
+
adapter = options.adapter or _PROVIDER_TO_ADAPTER.get(prompt.provider or Providers.OpenAI, Adapter.OpenAI)
|
434
|
+
|
435
|
+
chain = self._promptl.chains.create(
|
436
|
+
prompt=prompt.content,
|
437
|
+
parameters=options.parameters,
|
438
|
+
adapter=adapter,
|
439
|
+
)
|
440
|
+
|
441
|
+
step = None
|
442
|
+
response = None
|
443
|
+
while not chain.completed:
|
444
|
+
step = chain.step(response)
|
445
|
+
if not step.completed:
|
446
|
+
response = await on_step(step.messages, self._adapt_prompt_config(step.config, adapter))
|
447
|
+
|
448
|
+
assert step is not None
|
449
|
+
|
450
|
+
return RenderChainResult(
|
451
|
+
messages=step.messages,
|
452
|
+
config=self._adapt_prompt_config(step.config, adapter),
|
453
|
+
)
|