latitude-sdk 0.1.0b9__tar.gz → 1.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/PKG-INFO +3 -2
  2. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/pyproject.toml +3 -2
  3. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/src/latitude_sdk/client/payloads.py +3 -1
  4. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/src/latitude_sdk/sdk/evaluations.py +3 -6
  5. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/src/latitude_sdk/sdk/latitude.py +7 -3
  6. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/src/latitude_sdk/sdk/logs.py +7 -9
  7. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/src/latitude_sdk/sdk/prompts.py +106 -20
  8. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/src/latitude_sdk/sdk/types.py +11 -83
  9. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/src/latitude_sdk/util/utils.py +8 -6
  10. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/tests/evaluations/trigger_test.py +1 -2
  11. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/tests/prompts/chat_test.py +28 -28
  12. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/tests/prompts/get_test.py +3 -6
  13. latitude_sdk-1.0.0/tests/prompts/render_chain_test.py +156 -0
  14. latitude_sdk-1.0.0/tests/prompts/render_test.py +83 -0
  15. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/tests/prompts/run_test.py +28 -28
  16. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/tests/utils/fixtures.py +56 -13
  17. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/tests/utils/utils.py +9 -6
  18. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/uv.lock +50 -5
  19. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/.gitignore +0 -0
  20. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/.python-version +0 -0
  21. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/README.md +0 -0
  22. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/scripts/format.py +0 -0
  23. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/scripts/lint.py +0 -0
  24. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/scripts/test.py +0 -0
  25. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/src/latitude_sdk/__init__.py +0 -0
  26. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/src/latitude_sdk/client/__init__.py +0 -0
  27. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/src/latitude_sdk/client/client.py +0 -0
  28. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/src/latitude_sdk/client/router.py +0 -0
  29. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/src/latitude_sdk/env/__init__.py +0 -0
  30. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/src/latitude_sdk/env/env.py +0 -0
  31. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/src/latitude_sdk/py.typed +0 -0
  32. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/src/latitude_sdk/sdk/__init__.py +0 -0
  33. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/src/latitude_sdk/sdk/errors.py +0 -0
  34. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/src/latitude_sdk/util/__init__.py +0 -0
  35. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/tests/__init__.py +0 -0
  36. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/tests/evaluations/__init__.py +0 -0
  37. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/tests/evaluations/create_result_test.py +0 -0
  38. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/tests/logs/__init__.py +0 -0
  39. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/tests/logs/create_test.py +0 -0
  40. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/tests/prompts/__init__.py +0 -0
  41. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/tests/prompts/get_or_create_test.py +0 -0
  42. {latitude_sdk-0.1.0b9 → latitude_sdk-1.0.0}/tests/utils/__init__.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: latitude-sdk
3
- Version: 0.1.0b9
3
+ Version: 1.0.0
4
4
  Summary: Latitude SDK for Python
5
5
  Project-URL: repository, https://github.com/latitude-dev/latitude-llm/tree/main/packages/sdks/python
6
6
  Project-URL: homepage, https://github.com/latitude-dev/latitude-llm/tree/main/packages/sdks/python#readme
@@ -11,7 +11,8 @@ License-Expression: LGPL-3.0
11
11
  Requires-Python: <3.13,>=3.9
12
12
  Requires-Dist: httpx-sse>=0.4.0
13
13
  Requires-Dist: httpx>=0.28.1
14
- Requires-Dist: latitude-telemetry>=0.1.0b6
14
+ Requires-Dist: latitude-telemetry>=1.0.0
15
+ Requires-Dist: promptl-ai>=1.0.1
15
16
  Requires-Dist: pydantic>=2.10.3
16
17
  Requires-Dist: typing-extensions>=4.12.2
17
18
  Description-Content-Type: text/markdown
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "latitude-sdk"
3
- version = "0.1.0-beta.9"
3
+ version = "1.0.0"
4
4
  description = "Latitude SDK for Python"
5
5
  authors = [{ name = "Latitude Data SL", email = "hello@latitude.so" }]
6
6
  maintainers = [{ name = "Latitude Data SL", email = "hello@latitude.so" }]
@@ -15,7 +15,8 @@ dependencies = [
15
15
  "httpx-sse>=0.4.0",
16
16
  "pydantic>=2.10.3",
17
17
  "typing-extensions>=4.12.2",
18
- "latitude-telemetry>=0.1.0b6",
18
+ "latitude-telemetry>=1.0.0",
19
+ "promptl-ai>=1.0.1",
19
20
  ]
20
21
 
21
22
  [dependency-groups]
@@ -1,6 +1,8 @@
1
1
  from typing import Any, Dict, List, Optional, Union
2
2
 
3
- from latitude_sdk.sdk.types import DbErrorRef, Message
3
+ from promptl_ai import Message
4
+
5
+ from latitude_sdk.sdk.types import DbErrorRef
4
6
  from latitude_sdk.util import Field, Model, StrEnum
5
7
 
6
8
 
@@ -8,10 +8,7 @@ from latitude_sdk.client import (
8
8
  TriggerEvaluationRequestBody,
9
9
  TriggerEvaluationRequestParams,
10
10
  )
11
- from latitude_sdk.sdk.types import (
12
- EvaluationResult,
13
- SdkOptions,
14
- )
11
+ from latitude_sdk.sdk.types import EvaluationResult, SdkOptions
15
12
  from latitude_sdk.util import Model
16
13
 
17
14
 
@@ -40,8 +37,8 @@ class Evaluations:
40
37
  self._options = options
41
38
  self._client = client
42
39
 
43
- async def trigger(self, uuid: str, options: TriggerEvaluationOptions) -> TriggerEvaluationResult:
44
- options = TriggerEvaluationOptions(**{**dict(self._options), **dict(options)})
40
+ async def trigger(self, uuid: str, options: Optional[TriggerEvaluationOptions] = None) -> TriggerEvaluationResult:
41
+ options = TriggerEvaluationOptions(**{**dict(self._options), **dict(options or {})})
45
42
 
46
43
  async with self._client.request(
47
44
  handler=RequestHandler.TriggerEvaluation,
@@ -1,6 +1,7 @@
1
1
  from typing import Optional
2
2
 
3
3
  from latitude_telemetry import Telemetry, TelemetryOptions
4
+ from promptl_ai import Promptl, PromptlOptions
4
5
 
5
6
  from latitude_sdk.client import Client, ClientOptions, RouterOptions
6
7
  from latitude_sdk.env import env
@@ -20,6 +21,7 @@ class InternalOptions(Model):
20
21
 
21
22
 
22
23
  class LatitudeOptions(SdkOptions, Model):
24
+ promptl: Optional[PromptlOptions] = None
23
25
  telemetry: Optional[TelemetryOptions] = None
24
26
  internal: Optional[InternalOptions] = None
25
27
 
@@ -48,15 +50,16 @@ class Latitude:
48
50
  _options: LatitudeOptions
49
51
  _client: Client
50
52
 
53
+ promptl: Promptl
51
54
  telemetry: Optional[Telemetry]
52
55
 
53
56
  prompts: Prompts
54
57
  logs: Logs
55
58
  evaluations: Evaluations
56
59
 
57
- def __init__(self, api_key: str, options: LatitudeOptions):
60
+ def __init__(self, api_key: str, options: Optional[LatitudeOptions] = None):
61
+ options = LatitudeOptions(**{**dict(DEFAULT_LATITUDE_OPTIONS), **dict(options or {})})
58
62
  options.internal = InternalOptions(**{**dict(DEFAULT_INTERNAL_OPTIONS), **dict(options.internal or {})})
59
- options = LatitudeOptions(**{**dict(DEFAULT_LATITUDE_OPTIONS), **dict(options)})
60
63
  self._options = options
61
64
 
62
65
  assert self._options.internal is not None
@@ -77,9 +80,10 @@ class Latitude:
77
80
  )
78
81
  )
79
82
 
83
+ self.promptl = Promptl(self._options.promptl)
80
84
  if self._options.telemetry:
81
85
  self.telemetry = Telemetry(api_key, self._options.telemetry)
82
86
 
83
- self.prompts = Prompts(self._client, self._options)
87
+ self.prompts = Prompts(self._client, self.promptl, self._options)
84
88
  self.logs = Logs(self._client, self._options)
85
89
  self.evaluations = Evaluations(self._client, self._options)
@@ -1,13 +1,11 @@
1
- from typing import Any, Dict, Optional, Sequence, Union
1
+ from typing import Optional, Sequence
2
+
3
+ from promptl_ai import MessageLike
4
+ from promptl_ai.bindings.types import _Message
2
5
 
3
6
  from latitude_sdk.client import Client, CreateLogRequestBody, CreateLogRequestParams, RequestHandler
4
7
  from latitude_sdk.sdk.errors import ApiError, ApiErrorCodes
5
- from latitude_sdk.sdk.types import (
6
- Log,
7
- Message,
8
- SdkOptions,
9
- _Message,
10
- )
8
+ from latitude_sdk.sdk.types import Log, SdkOptions
11
9
  from latitude_sdk.util import Model
12
10
 
13
11
 
@@ -42,9 +40,9 @@ class Logs:
42
40
  )
43
41
 
44
42
  async def create(
45
- self, path: str, messages: Sequence[Union[Message, Dict[str, Any]]], options: CreateLogOptions
43
+ self, path: str, messages: Sequence[MessageLike], options: Optional[CreateLogOptions] = None
46
44
  ) -> CreateLogResult:
47
- options = CreateLogOptions(**{**dict(self._options), **dict(options)})
45
+ options = CreateLogOptions(**{**dict(self._options), **dict(options or {})})
48
46
  self._ensure_log_options(options)
49
47
  assert options.project_id is not None
50
48
 
@@ -1,6 +1,9 @@
1
1
  import asyncio
2
2
  from typing import Any, AsyncGenerator, Dict, List, Optional, Sequence, Union
3
3
 
4
+ from promptl_ai import Adapter, Message, MessageLike, Promptl, ToolMessage, ToolResultContent
5
+ from promptl_ai.bindings.types import _Message
6
+
4
7
  from latitude_sdk.client import (
5
8
  ChatPromptRequestBody,
6
9
  ChatPromptRequestParams,
@@ -21,21 +24,33 @@ from latitude_sdk.sdk.types import (
21
24
  ChainEventStep,
22
25
  ChainEventStepCompleted,
23
26
  FinishedEvent,
24
- Message,
27
+ OnStep,
25
28
  OnToolCall,
26
29
  OnToolCallDetails,
27
30
  Prompt,
31
+ Providers,
28
32
  SdkOptions,
29
33
  StreamCallbacks,
30
34
  StreamEvents,
31
35
  StreamTypes,
32
- ToolMessage,
33
36
  ToolResult,
34
- ToolResultContent,
35
- _Message,
36
37
  )
37
38
  from latitude_sdk.util import Model
38
39
 
40
+ _PROVIDER_TO_ADAPTER = {
41
+ Providers.OpenAI: Adapter.OpenAI,
42
+ Providers.Anthropic: Adapter.Anthropic,
43
+ }
44
+
45
+ _PROMPT_ATTR_TO_ADAPTER_ATTR = {
46
+ "maxTokens": ("max_tokens", [Adapter.OpenAI, Adapter.Anthropic]),
47
+ "topP": ("top_p", [Adapter.OpenAI, Adapter.Anthropic]),
48
+ "topK": ("top_k", [Adapter.OpenAI, Adapter.Anthropic]),
49
+ "presencePenalty": ("presence_penalty", [Adapter.OpenAI, Adapter.Anthropic]),
50
+ "stopSequences": ("stop_sequences", [Adapter.OpenAI, Adapter.Anthropic]),
51
+ "toolChoice": ("tool_choice", [Adapter.OpenAI, Adapter.Anthropic]),
52
+ }
53
+
39
54
 
40
55
  class OnToolCallPaused(Exception):
41
56
  pass
@@ -82,13 +97,34 @@ class ChatPromptResult(FinishedEvent, Model):
82
97
  pass
83
98
 
84
99
 
100
+ class RenderPromptOptions(Model):
101
+ parameters: Optional[Dict[str, Any]] = None
102
+ adapter: Optional[Adapter] = None
103
+
104
+
105
+ class RenderPromptResult(Model):
106
+ messages: List[MessageLike]
107
+ config: Dict[str, Any]
108
+
109
+
110
+ class RenderChainOptions(Model):
111
+ parameters: Optional[Dict[str, Any]] = None
112
+ adapter: Optional[Adapter] = None
113
+
114
+
115
+ class RenderChainResult(RenderPromptResult, Model):
116
+ pass
117
+
118
+
85
119
  class Prompts:
86
120
  _options: SdkOptions
87
121
  _client: Client
122
+ _promptl: Promptl
88
123
 
89
- def __init__(self, client: Client, options: SdkOptions):
124
+ def __init__(self, client: Client, promptl: Promptl, options: SdkOptions):
90
125
  self._options = options
91
126
  self._client = client
127
+ self._promptl = promptl
92
128
 
93
129
  def _ensure_prompt_options(self, options: PromptOptions):
94
130
  if not options.project_id:
@@ -224,8 +260,8 @@ class Prompts:
224
260
 
225
261
  return FinishedEvent(**dict(next_result)) if next_result else None
226
262
 
227
- async def get(self, path: str, options: GetPromptOptions) -> GetPromptResult:
228
- options = GetPromptOptions(**{**dict(self._options), **dict(options)})
263
+ async def get(self, path: str, options: Optional[GetPromptOptions] = None) -> GetPromptResult:
264
+ options = GetPromptOptions(**{**dict(self._options), **dict(options or {})})
229
265
  self._ensure_prompt_options(options)
230
266
  assert options.project_id is not None
231
267
 
@@ -239,8 +275,10 @@ class Prompts:
239
275
  ) as response:
240
276
  return GetPromptResult.model_validate_json(response.content)
241
277
 
242
- async def get_or_create(self, path: str, options: GetOrCreatePromptOptions) -> GetOrCreatePromptResult:
243
- options = GetOrCreatePromptOptions(**{**dict(self._options), **dict(options)})
278
+ async def get_or_create(
279
+ self, path: str, options: Optional[GetOrCreatePromptOptions] = None
280
+ ) -> GetOrCreatePromptResult:
281
+ options = GetOrCreatePromptOptions(**{**dict(self._options), **dict(options or {})})
244
282
  self._ensure_prompt_options(options)
245
283
  assert options.project_id is not None
246
284
 
@@ -257,12 +295,12 @@ class Prompts:
257
295
  ) as response:
258
296
  return GetOrCreatePromptResult.model_validate_json(response.content)
259
297
 
260
- async def run(self, path: str, options: RunPromptOptions) -> Optional[RunPromptResult]:
261
- try:
262
- options = RunPromptOptions(**{**dict(self._options), **dict(options)})
263
- self._ensure_prompt_options(options)
264
- assert options.project_id is not None
298
+ async def run(self, path: str, options: Optional[RunPromptOptions] = None) -> Optional[RunPromptResult]:
299
+ options = RunPromptOptions(**{**dict(self._options), **dict(options or {})})
300
+ self._ensure_prompt_options(options)
301
+ assert options.project_id is not None
265
302
 
303
+ try:
266
304
  async with self._client.request(
267
305
  handler=RequestHandler.RunPrompt,
268
306
  params=RunPromptRequestParams(
@@ -311,13 +349,13 @@ class Prompts:
311
349
  return None
312
350
 
313
351
  async def chat(
314
- self, uuid: str, messages: Sequence[Union[Message, Dict[str, Any]]], options: ChatPromptOptions
352
+ self, uuid: str, messages: Sequence[MessageLike], options: Optional[ChatPromptOptions] = None
315
353
  ) -> Optional[ChatPromptResult]:
316
- try:
317
- options = ChatPromptOptions(**{**dict(self._options), **dict(options)})
354
+ options = ChatPromptOptions(**{**dict(self._options), **dict(options or {})})
318
355
 
319
- messages = [_Message.validate_python(message) for message in messages]
356
+ messages = [_Message.validate_python(message) for message in messages]
320
357
 
358
+ try:
321
359
  async with self._client.request(
322
360
  handler=RequestHandler.ChatPrompt,
323
361
  params=ChatPromptRequestParams(
@@ -362,6 +400,54 @@ class Prompts:
362
400
 
363
401
  return None
364
402
 
365
- # TODO: render - needs PromptL in Python
403
+ def _adapt_prompt_config(self, config: Dict[str, Any], adapter: Adapter) -> Dict[str, Any]:
404
+ adapted_config: Dict[str, Any] = {}
405
+
406
+ for attr, value in config.items():
407
+ if attr in _PROMPT_ATTR_TO_ADAPTER_ATTR and adapter in _PROMPT_ATTR_TO_ADAPTER_ATTR[attr][1]:
408
+ adapted_config[_PROMPT_ATTR_TO_ADAPTER_ATTR[attr][0]] = value
409
+ else:
410
+ adapted_config[attr] = value
411
+
412
+ return adapted_config
366
413
 
367
- # TODO: render_chain - needs PromptL in Python
414
+ async def render(self, prompt: str, options: Optional[RenderPromptOptions] = None) -> RenderPromptResult:
415
+ options = RenderPromptOptions(**{**dict(self._options), **dict(options or {})})
416
+ adapter = options.adapter or Adapter.OpenAI
417
+
418
+ result = self._promptl.prompts.render(
419
+ prompt=prompt,
420
+ parameters=options.parameters,
421
+ adapter=adapter,
422
+ )
423
+
424
+ return RenderPromptResult(
425
+ messages=result.messages,
426
+ config=self._adapt_prompt_config(result.config, adapter),
427
+ )
428
+
429
+ async def render_chain(
430
+ self, prompt: Prompt, on_step: OnStep, options: Optional[RenderChainOptions] = None
431
+ ) -> RenderChainResult:
432
+ options = RenderChainOptions(**{**dict(self._options), **dict(options or {})})
433
+ adapter = options.adapter or _PROVIDER_TO_ADAPTER.get(prompt.provider or Providers.OpenAI, Adapter.OpenAI)
434
+
435
+ chain = self._promptl.chains.create(
436
+ prompt=prompt.content,
437
+ parameters=options.parameters,
438
+ adapter=adapter,
439
+ )
440
+
441
+ step = None
442
+ response = None
443
+ while not chain.completed:
444
+ step = chain.step(response)
445
+ if not step.completed:
446
+ response = await on_step(step.messages, self._adapt_prompt_config(step.config, adapter))
447
+
448
+ assert step is not None
449
+
450
+ return RenderChainResult(
451
+ messages=step.messages,
452
+ config=self._adapt_prompt_config(step.config, adapter),
453
+ )
@@ -1,8 +1,10 @@
1
1
  from datetime import datetime
2
- from typing import Any, Callable, Dict, List, Literal, Optional, Protocol, Union, runtime_checkable
2
+ from typing import Any, Callable, Dict, List, Literal, Optional, Protocol, Sequence, Union, runtime_checkable
3
+
4
+ from promptl_ai import Message, MessageLike
3
5
 
4
6
  from latitude_sdk.sdk.errors import ApiError
5
- from latitude_sdk.util import Adapter, Field, Model, StrEnum
7
+ from latitude_sdk.util import Field, Model, StrEnum
6
8
 
7
9
 
8
10
  class DbErrorRef(Model):
@@ -28,87 +30,6 @@ class Prompt(Model):
28
30
  provider: Optional[Providers] = None
29
31
 
30
32
 
31
- class ContentType(StrEnum):
32
- Text = "text"
33
- Image = "image"
34
- File = "file"
35
- ToolCall = "tool-call"
36
- ToolResult = "tool-result"
37
-
38
-
39
- class TextContent(Model):
40
- type: Literal[ContentType.Text] = ContentType.Text
41
- text: str
42
-
43
-
44
- class ImageContent(Model):
45
- type: Literal[ContentType.Image] = ContentType.Image
46
- image: str
47
-
48
-
49
- class FileContent(Model):
50
- type: Literal[ContentType.File] = ContentType.File
51
- file: str
52
- mime_type: str = Field(alias=str("mimeType"))
53
-
54
-
55
- class ToolCallContent(Model):
56
- type: Literal[ContentType.ToolCall] = ContentType.ToolCall
57
- id: str = Field(alias=str("toolCallId"))
58
- name: str = Field(alias=str("toolName"))
59
- arguments: Dict[str, Any] = Field(alias=str("args"))
60
-
61
-
62
- class ToolResultContent(Model):
63
- type: Literal[ContentType.ToolResult] = ContentType.ToolResult
64
- id: str = Field(alias=str("toolCallId"))
65
- name: str = Field(alias=str("toolName"))
66
- result: Any
67
- is_error: Optional[bool] = Field(default=None, alias=str("isError"))
68
-
69
-
70
- MessageContent = Union[
71
- str,
72
- List[TextContent],
73
- List[ImageContent],
74
- List[FileContent],
75
- List[ToolCallContent],
76
- List[ToolResultContent],
77
- ]
78
-
79
-
80
- class MessageRole(StrEnum):
81
- System = "system"
82
- User = "user"
83
- Assistant = "assistant"
84
- Tool = "tool"
85
-
86
-
87
- class SystemMessage(Model):
88
- role: Literal[MessageRole.System] = MessageRole.System
89
- content: Union[str, List[TextContent]]
90
-
91
-
92
- class UserMessage(Model):
93
- role: Literal[MessageRole.User] = MessageRole.User
94
- content: Union[str, List[Union[TextContent, ImageContent, FileContent]]]
95
- name: Optional[str] = None
96
-
97
-
98
- class AssistantMessage(Model):
99
- role: Literal[MessageRole.Assistant] = MessageRole.Assistant
100
- content: Union[str, List[Union[TextContent, ToolCallContent]]]
101
-
102
-
103
- class ToolMessage(Model):
104
- role: Literal[MessageRole.Tool] = MessageRole.Tool
105
- content: List[ToolResultContent]
106
-
107
-
108
- Message = Union[SystemMessage, UserMessage, AssistantMessage, ToolMessage]
109
- _Message = Adapter(Message)
110
-
111
-
112
33
  class ModelUsage(Model):
113
34
  prompt_tokens: int = Field(alias=str("promptTokens"))
114
35
  completion_tokens: int = Field(alias=str("completionTokens"))
@@ -306,6 +227,13 @@ class OnToolCall(Protocol):
306
227
  async def __call__(self, call: ToolCall, details: OnToolCallDetails) -> ToolResult: ...
307
228
 
308
229
 
230
+ @runtime_checkable
231
+ class OnStep(Protocol):
232
+ async def __call__(
233
+ self, messages: List[MessageLike], config: Dict[str, Any]
234
+ ) -> Union[str, MessageLike, Sequence[MessageLike]]: ...
235
+
236
+
309
237
  class SdkOptions(Model):
310
238
  project_id: Optional[int] = None
311
239
  version_uuid: Optional[str] = None
@@ -64,6 +64,10 @@ class StrEnum(str, Enum):
64
64
  Field = pydantic.Field
65
65
  Config = pydantic.ConfigDict
66
66
  Adapter = pydantic.TypeAdapter
67
+ Aliases = pydantic.AliasChoices
68
+ Validator = pydantic.WrapValidator
69
+ ValidatorInfo = pydantic.ValidationInfo
70
+ ValidatorHandler = pydantic.ValidatorFunctionWrapHandler
67
71
 
68
72
 
69
73
  class Model(pydantic.BaseModel):
@@ -76,12 +80,12 @@ class Model(pydantic.BaseModel):
76
80
  @is_like(pydantic.BaseModel.model_dump)
77
81
  def model_dump(self, *args: Any, **kwargs: Any) -> Any:
78
82
  exclude_none = kwargs.pop("exclude_none", True)
79
- return super().model_dump(*args, exclude_none=exclude_none, **kwargs)
83
+ by_alias = kwargs.pop("by_alias", True)
84
+ return super().model_dump(*args, exclude_none=exclude_none, by_alias=by_alias, **kwargs)
80
85
 
81
86
  @is_like(pydantic.BaseModel.dict) # pyright: ignore [reportDeprecated]
82
87
  def dict(self, *args: Any, **kwargs: Any) -> Any:
83
- exclude_none = kwargs.pop("exclude_none", True)
84
- return super().dict(*args, exclude_none=exclude_none, **kwargs) # pyright: ignore [reportDeprecated]
88
+ raise NotImplementedError("deprecated")
85
89
 
86
90
  @is_like(pydantic.BaseModel.model_dump_json)
87
91
  def model_dump_json(self, *args: Any, **kwargs: Any) -> Any:
@@ -91,6 +95,4 @@ class Model(pydantic.BaseModel):
91
95
 
92
96
  @is_like(pydantic.BaseModel.json) # pyright: ignore [reportDeprecated]
93
97
  def json(self, *args: Any, **kwargs: Any) -> Any:
94
- exclude_none = kwargs.pop("exclude_none", True)
95
- by_alias = kwargs.pop("by_alias", True)
96
- return super().json(*args, exclude_none=exclude_none, by_alias=by_alias, **kwargs) # pyright: ignore [reportDeprecated]
98
+ raise NotImplementedError("deprecated")
@@ -31,14 +31,13 @@ class TestTriggerEvaluation(TestCase):
31
31
 
32
32
  async def test_fails(self):
33
33
  conversation_uuid = "conversation-uuid"
34
- options = TriggerEvaluationOptions()
35
34
  endpoint = f"/conversations/{conversation_uuid}/evaluate"
36
35
  endpoint_mock = self.gateway_mock.post(endpoint).mock(
37
36
  return_value=httpx.Response(500, json=fixtures.ERROR_RESPONSE)
38
37
  )
39
38
 
40
39
  with self.assertRaisesRegex(type(fixtures.ERROR), fixtures.ERROR.message):
41
- await self.sdk.evaluations.trigger(conversation_uuid, options)
40
+ await self.sdk.evaluations.trigger(conversation_uuid)
42
41
  requests = cast(List[httpx.Request], [request for request, _ in endpoint_mock.calls]) # type: ignore
43
42
 
44
43
  [
@@ -50,15 +50,15 @@ class TestChatPromptSync(TestCase):
50
50
  on_event_mock = Mock()
51
51
  on_finished_mock = Mock()
52
52
  on_error_mock = Mock()
53
- actual_tool = AsyncMock(side_effect=fixtures.CONVERSATION_TOOL_RESULTS)
54
- other_tool = AsyncMock()
53
+ actual_tool_mock = AsyncMock(side_effect=fixtures.CONVERSATION_TOOL_RESULTS)
54
+ other_tool_mock = AsyncMock()
55
55
  conversation_uuid = "conversation-uuid"
56
56
  messages = self.create_conversation(4)
57
57
  options = ChatPromptOptions(
58
58
  on_event=on_event_mock,
59
59
  on_finished=on_finished_mock,
60
60
  on_error=on_error_mock,
61
- tools={"calculator": actual_tool, "other_tool": other_tool},
61
+ tools={"calculator": actual_tool_mock, "other_tool": other_tool_mock},
62
62
  stream=False,
63
63
  )
64
64
  endpoint = re.compile(r"/conversations/(?P<uuid>[a-zA-Z0-9-]+)/chat")
@@ -99,7 +99,7 @@ class TestChatPromptSync(TestCase):
99
99
  on_error_mock.assert_not_called()
100
100
  [
101
101
  self.assertEqual(
102
- actual_tool.call_args_list[index][0],
102
+ actual_tool_mock.call_args_list[index][0],
103
103
  (
104
104
  fixtures.CONVERSATION_TOOL_CALLS[index],
105
105
  OnToolCallDetails.model_construct(
@@ -110,24 +110,24 @@ class TestChatPromptSync(TestCase):
110
110
  ),
111
111
  ),
112
112
  )
113
- for index, _ in enumerate(actual_tool.call_args_list)
113
+ for index, _ in enumerate(actual_tool_mock.call_args_list)
114
114
  ]
115
- self.assertEqual(actual_tool.await_count, len(fixtures.CONVERSATION_TOOL_CALLS))
116
- other_tool.assert_not_awaited()
115
+ self.assertEqual(actual_tool_mock.await_count, len(fixtures.CONVERSATION_TOOL_CALLS))
116
+ other_tool_mock.assert_not_awaited()
117
117
 
118
118
  async def test_success_with_paused_tools(self):
119
119
  on_event_mock = Mock()
120
120
  on_finished_mock = Mock()
121
121
  on_error_mock = Mock()
122
- actual_tool = AsyncMock(side_effect=OnToolCallPaused)
123
- other_tool = AsyncMock()
122
+ actual_tool_mock = AsyncMock(side_effect=OnToolCallPaused)
123
+ other_tool_mock = AsyncMock()
124
124
  conversation_uuid = "conversation-uuid"
125
125
  messages = self.create_conversation(4)
126
126
  options = ChatPromptOptions(
127
127
  on_event=on_event_mock,
128
128
  on_finished=on_finished_mock,
129
129
  on_error=on_error_mock,
130
- tools={"calculator": actual_tool, "other_tool": other_tool},
130
+ tools={"calculator": actual_tool_mock, "other_tool": other_tool_mock},
131
131
  stream=False,
132
132
  )
133
133
  endpoint = re.compile(r"/conversations/(?P<uuid>[a-zA-Z0-9-]+)/chat")
@@ -157,7 +157,7 @@ class TestChatPromptSync(TestCase):
157
157
  on_error_mock.assert_not_called()
158
158
  [
159
159
  self.assertEqual(
160
- actual_tool.call_args_list[index][0],
160
+ actual_tool_mock.call_args_list[index][0],
161
161
  (
162
162
  fixtures.CONVERSATION_TOOL_CALLS[index],
163
163
  OnToolCallDetails.model_construct(
@@ -168,10 +168,10 @@ class TestChatPromptSync(TestCase):
168
168
  ),
169
169
  ),
170
170
  )
171
- for index, _ in enumerate(actual_tool.call_args_list)
171
+ for index, _ in enumerate(actual_tool_mock.call_args_list)
172
172
  ]
173
- self.assertEqual(actual_tool.await_count, len(fixtures.CONVERSATION_TOOL_CALLS))
174
- other_tool.assert_not_awaited()
173
+ self.assertEqual(actual_tool_mock.await_count, len(fixtures.CONVERSATION_TOOL_CALLS))
174
+ other_tool_mock.assert_not_awaited()
175
175
 
176
176
  async def test_fails_and_retries(self):
177
177
  on_event_mock = Mock()
@@ -321,15 +321,15 @@ class TestChatPromptStream(TestCase):
321
321
  on_event_mock = Mock()
322
322
  on_finished_mock = Mock()
323
323
  on_error_mock = Mock()
324
- actual_tool = AsyncMock(side_effect=fixtures.CONVERSATION_TOOL_RESULTS)
325
- other_tool = AsyncMock()
324
+ actual_tool_mock = AsyncMock(side_effect=fixtures.CONVERSATION_TOOL_RESULTS)
325
+ other_tool_mock = AsyncMock()
326
326
  conversation_uuid = "conversation-uuid"
327
327
  messages = self.create_conversation(4)
328
328
  options = ChatPromptOptions(
329
329
  on_event=on_event_mock,
330
330
  on_finished=on_finished_mock,
331
331
  on_error=on_error_mock,
332
- tools={"calculator": actual_tool, "other_tool": other_tool},
332
+ tools={"calculator": actual_tool_mock, "other_tool": other_tool_mock},
333
333
  stream=True,
334
334
  )
335
335
  endpoint = re.compile(r"/conversations/(?P<uuid>[a-zA-Z0-9-]+)/chat")
@@ -377,7 +377,7 @@ class TestChatPromptStream(TestCase):
377
377
  on_error_mock.assert_not_called()
378
378
  [
379
379
  self.assertEqual(
380
- actual_tool.call_args_list[index][0],
380
+ actual_tool_mock.call_args_list[index][0],
381
381
  (
382
382
  fixtures.CONVERSATION_TOOL_CALLS[index],
383
383
  OnToolCallDetails.model_construct(
@@ -388,24 +388,24 @@ class TestChatPromptStream(TestCase):
388
388
  ),
389
389
  ),
390
390
  )
391
- for index, _ in enumerate(actual_tool.call_args_list)
391
+ for index, _ in enumerate(actual_tool_mock.call_args_list)
392
392
  ]
393
- self.assertEqual(actual_tool.await_count, len(fixtures.CONVERSATION_TOOL_CALLS))
394
- other_tool.assert_not_awaited()
393
+ self.assertEqual(actual_tool_mock.await_count, len(fixtures.CONVERSATION_TOOL_CALLS))
394
+ other_tool_mock.assert_not_awaited()
395
395
 
396
396
  async def test_success_with_paused_tools(self):
397
397
  on_event_mock = Mock()
398
398
  on_finished_mock = Mock()
399
399
  on_error_mock = Mock()
400
- actual_tool = AsyncMock(side_effect=OnToolCallPaused)
401
- other_tool = AsyncMock()
400
+ actual_tool_mock = AsyncMock(side_effect=OnToolCallPaused)
401
+ other_tool_mock = AsyncMock()
402
402
  conversation_uuid = "conversation-uuid"
403
403
  messages = self.create_conversation(4)
404
404
  options = ChatPromptOptions(
405
405
  on_event=on_event_mock,
406
406
  on_finished=on_finished_mock,
407
407
  on_error=on_error_mock,
408
- tools={"calculator": actual_tool, "other_tool": other_tool},
408
+ tools={"calculator": actual_tool_mock, "other_tool": other_tool_mock},
409
409
  stream=True,
410
410
  )
411
411
  endpoint = re.compile(r"/conversations/(?P<uuid>[a-zA-Z0-9-]+)/chat")
@@ -437,7 +437,7 @@ class TestChatPromptStream(TestCase):
437
437
  on_error_mock.assert_not_called()
438
438
  [
439
439
  self.assertEqual(
440
- actual_tool.call_args_list[index][0],
440
+ actual_tool_mock.call_args_list[index][0],
441
441
  (
442
442
  fixtures.CONVERSATION_TOOL_CALLS[index],
443
443
  OnToolCallDetails.model_construct(
@@ -448,10 +448,10 @@ class TestChatPromptStream(TestCase):
448
448
  ),
449
449
  ),
450
450
  )
451
- for index, _ in enumerate(actual_tool.call_args_list)
451
+ for index, _ in enumerate(actual_tool_mock.call_args_list)
452
452
  ]
453
- self.assertEqual(actual_tool.await_count, len(fixtures.CONVERSATION_TOOL_CALLS))
454
- other_tool.assert_not_awaited()
453
+ self.assertEqual(actual_tool_mock.await_count, len(fixtures.CONVERSATION_TOOL_CALLS))
454
+ other_tool_mock.assert_not_awaited()
455
455
 
456
456
  async def test_fails_and_retries(self):
457
457
  on_event_mock = Mock()