latitude-sdk 1.0.0__tar.gz → 1.0.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/PKG-INFO +1 -1
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/pyproject.toml +1 -1
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/src/latitude_sdk/sdk/prompts.py +34 -9
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/src/latitude_sdk/sdk/types.py +3 -1
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/tests/prompts/chat_test.py +22 -6
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/tests/prompts/run_test.py +22 -6
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/tests/utils/fixtures.py +4 -6
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/.gitignore +0 -0
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/.python-version +0 -0
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/README.md +0 -0
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/scripts/format.py +0 -0
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/scripts/lint.py +0 -0
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/scripts/test.py +0 -0
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/src/latitude_sdk/__init__.py +0 -0
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/src/latitude_sdk/client/__init__.py +0 -0
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/src/latitude_sdk/client/client.py +0 -0
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/src/latitude_sdk/client/payloads.py +0 -0
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/src/latitude_sdk/client/router.py +0 -0
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/src/latitude_sdk/env/__init__.py +0 -0
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/src/latitude_sdk/env/env.py +0 -0
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/src/latitude_sdk/py.typed +0 -0
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/src/latitude_sdk/sdk/__init__.py +0 -0
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/src/latitude_sdk/sdk/errors.py +0 -0
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/src/latitude_sdk/sdk/evaluations.py +0 -0
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/src/latitude_sdk/sdk/latitude.py +0 -0
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/src/latitude_sdk/sdk/logs.py +0 -0
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/src/latitude_sdk/util/__init__.py +0 -0
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/src/latitude_sdk/util/utils.py +0 -0
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/tests/__init__.py +0 -0
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/tests/evaluations/__init__.py +0 -0
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/tests/evaluations/create_result_test.py +0 -0
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/tests/evaluations/trigger_test.py +0 -0
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/tests/logs/__init__.py +0 -0
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/tests/logs/create_test.py +0 -0
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/tests/prompts/__init__.py +0 -0
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/tests/prompts/get_or_create_test.py +0 -0
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/tests/prompts/get_test.py +0 -0
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/tests/prompts/render_chain_test.py +0 -0
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/tests/prompts/render_test.py +0 -0
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/tests/utils/__init__.py +0 -0
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/tests/utils/utils.py +0 -0
- {latitude_sdk-1.0.0 → latitude_sdk-1.0.1}/uv.lock +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: latitude-sdk
|
3
|
-
Version: 1.0.
|
3
|
+
Version: 1.0.1
|
4
4
|
Summary: Latitude SDK for Python
|
5
5
|
Project-URL: repository, https://github.com/latitude-dev/latitude-llm/tree/main/packages/sdks/python
|
6
6
|
Project-URL: homepage, https://github.com/latitude-dev/latitude-llm/tree/main/packages/sdks/python#readme
|
@@ -204,9 +204,26 @@ class Prompts:
|
|
204
204
|
# NOTE: FinishedEvent not in on_event
|
205
205
|
return FinishedEvent(uuid=uuid, conversation=conversation, response=response)
|
206
206
|
|
207
|
-
|
207
|
+
@staticmethod
|
208
|
+
def _pause_tool_execution() -> Any:
|
208
209
|
raise OnToolCallPaused()
|
209
210
|
|
211
|
+
@staticmethod
|
212
|
+
async def _wrap_tool_handler(
|
213
|
+
handler: OnToolCall, arguments: Dict[str, Any], details: OnToolCallDetails
|
214
|
+
) -> ToolResult:
|
215
|
+
tool_result: Dict[str, Any] = {"id": details.id, "name": details.name}
|
216
|
+
|
217
|
+
try:
|
218
|
+
result = await handler(arguments, details)
|
219
|
+
|
220
|
+
return ToolResult(**tool_result, result=result)
|
221
|
+
except Exception as exception:
|
222
|
+
if isinstance(exception, OnToolCallPaused):
|
223
|
+
raise exception
|
224
|
+
|
225
|
+
return ToolResult(**tool_result, result=str(exception), is_error=True)
|
226
|
+
|
210
227
|
async def _handle_tool_calls(
|
211
228
|
self, result: FinishedEvent, options: Union[RunPromptOptions, ChatPromptOptions]
|
212
229
|
) -> Optional[FinishedEvent]:
|
@@ -230,15 +247,22 @@ class Prompts:
|
|
230
247
|
response=f"Tool {tool_call.name} not supplied",
|
231
248
|
)
|
232
249
|
|
233
|
-
details = OnToolCallDetails(
|
234
|
-
conversation_uuid=result.uuid,
|
235
|
-
messages=result.conversation,
|
236
|
-
pause_execution=self._pause_tool_execution,
|
237
|
-
requested_tool_calls=result.response.tool_calls,
|
238
|
-
)
|
239
|
-
|
240
250
|
tool_results = await asyncio.gather(
|
241
|
-
*[
|
251
|
+
*[
|
252
|
+
self._wrap_tool_handler(
|
253
|
+
options.tools[tool_call.name],
|
254
|
+
tool_call.arguments,
|
255
|
+
OnToolCallDetails(
|
256
|
+
id=tool_call.id,
|
257
|
+
name=tool_call.name,
|
258
|
+
conversation_uuid=result.uuid,
|
259
|
+
messages=result.conversation,
|
260
|
+
pause_execution=self._pause_tool_execution,
|
261
|
+
requested_tool_calls=result.response.tool_calls,
|
262
|
+
),
|
263
|
+
)
|
264
|
+
for tool_call in result.response.tool_calls
|
265
|
+
],
|
242
266
|
return_exceptions=False,
|
243
267
|
)
|
244
268
|
|
@@ -403,6 +427,7 @@ class Prompts:
|
|
403
427
|
def _adapt_prompt_config(self, config: Dict[str, Any], adapter: Adapter) -> Dict[str, Any]:
|
404
428
|
adapted_config: Dict[str, Any] = {}
|
405
429
|
|
430
|
+
# NOTE: Should we delete attributes not supported by the provider?
|
406
431
|
for attr, value in config.items():
|
407
432
|
if attr in _PROMPT_ATTR_TO_ADAPTER_ATTR and adapter in _PROMPT_ATTR_TO_ADAPTER_ATTR[attr][1]:
|
408
433
|
adapted_config[_PROMPT_ATTR_TO_ADAPTER_ATTR[attr][0]] = value
|
@@ -216,6 +216,8 @@ class StreamCallbacks(Model):
|
|
216
216
|
|
217
217
|
|
218
218
|
class OnToolCallDetails(Model):
|
219
|
+
id: str
|
220
|
+
name: str
|
219
221
|
conversation_uuid: str
|
220
222
|
messages: List[Message]
|
221
223
|
pause_execution: Callable[[], ToolResult]
|
@@ -224,7 +226,7 @@ class OnToolCallDetails(Model):
|
|
224
226
|
|
225
227
|
@runtime_checkable
|
226
228
|
class OnToolCall(Protocol):
|
227
|
-
async def __call__(self,
|
229
|
+
async def __call__(self, arguments: Dict[str, Any], details: OnToolCallDetails) -> Any: ...
|
228
230
|
|
229
231
|
|
230
232
|
@runtime_checkable
|
@@ -50,7 +50,11 @@ class TestChatPromptSync(TestCase):
|
|
50
50
|
on_event_mock = Mock()
|
51
51
|
on_finished_mock = Mock()
|
52
52
|
on_error_mock = Mock()
|
53
|
-
actual_tool_mock = AsyncMock(
|
53
|
+
actual_tool_mock = AsyncMock(
|
54
|
+
side_effect=[
|
55
|
+
r.result if not r.is_error else Exception(r.result) for r in fixtures.CONVERSATION_TOOL_RESULTS
|
56
|
+
]
|
57
|
+
)
|
54
58
|
other_tool_mock = AsyncMock()
|
55
59
|
conversation_uuid = "conversation-uuid"
|
56
60
|
messages = self.create_conversation(4)
|
@@ -101,8 +105,10 @@ class TestChatPromptSync(TestCase):
|
|
101
105
|
self.assertEqual(
|
102
106
|
actual_tool_mock.call_args_list[index][0],
|
103
107
|
(
|
104
|
-
fixtures.CONVERSATION_TOOL_CALLS[index],
|
108
|
+
fixtures.CONVERSATION_TOOL_CALLS[index].arguments,
|
105
109
|
OnToolCallDetails.model_construct(
|
110
|
+
id=fixtures.CONVERSATION_TOOL_CALLS[index].id,
|
111
|
+
name=fixtures.CONVERSATION_TOOL_CALLS[index].name,
|
106
112
|
conversation_uuid=fixtures.CONVERSATION_FINISHED_EVENT.uuid,
|
107
113
|
messages=fixtures.CONVERSATION_FINISHED_EVENT.conversation,
|
108
114
|
pause_execution=mock.ANY,
|
@@ -159,8 +165,10 @@ class TestChatPromptSync(TestCase):
|
|
159
165
|
self.assertEqual(
|
160
166
|
actual_tool_mock.call_args_list[index][0],
|
161
167
|
(
|
162
|
-
fixtures.CONVERSATION_TOOL_CALLS[index],
|
168
|
+
fixtures.CONVERSATION_TOOL_CALLS[index].arguments,
|
163
169
|
OnToolCallDetails.model_construct(
|
170
|
+
id=fixtures.CONVERSATION_TOOL_CALLS[index].id,
|
171
|
+
name=fixtures.CONVERSATION_TOOL_CALLS[index].name,
|
164
172
|
conversation_uuid=fixtures.CONVERSATION_FINISHED_EVENT.uuid,
|
165
173
|
messages=fixtures.CONVERSATION_FINISHED_EVENT.conversation,
|
166
174
|
pause_execution=mock.ANY,
|
@@ -321,7 +329,11 @@ class TestChatPromptStream(TestCase):
|
|
321
329
|
on_event_mock = Mock()
|
322
330
|
on_finished_mock = Mock()
|
323
331
|
on_error_mock = Mock()
|
324
|
-
actual_tool_mock = AsyncMock(
|
332
|
+
actual_tool_mock = AsyncMock(
|
333
|
+
side_effect=[
|
334
|
+
r.result if not r.is_error else Exception(r.result) for r in fixtures.CONVERSATION_TOOL_RESULTS
|
335
|
+
]
|
336
|
+
)
|
325
337
|
other_tool_mock = AsyncMock()
|
326
338
|
conversation_uuid = "conversation-uuid"
|
327
339
|
messages = self.create_conversation(4)
|
@@ -379,8 +391,10 @@ class TestChatPromptStream(TestCase):
|
|
379
391
|
self.assertEqual(
|
380
392
|
actual_tool_mock.call_args_list[index][0],
|
381
393
|
(
|
382
|
-
fixtures.CONVERSATION_TOOL_CALLS[index],
|
394
|
+
fixtures.CONVERSATION_TOOL_CALLS[index].arguments,
|
383
395
|
OnToolCallDetails.model_construct(
|
396
|
+
id=fixtures.CONVERSATION_TOOL_CALLS[index].id,
|
397
|
+
name=fixtures.CONVERSATION_TOOL_CALLS[index].name,
|
384
398
|
conversation_uuid=fixtures.CONVERSATION_FINISHED_EVENT.uuid,
|
385
399
|
messages=fixtures.CONVERSATION_FINISHED_EVENT.conversation,
|
386
400
|
pause_execution=mock.ANY,
|
@@ -439,8 +453,10 @@ class TestChatPromptStream(TestCase):
|
|
439
453
|
self.assertEqual(
|
440
454
|
actual_tool_mock.call_args_list[index][0],
|
441
455
|
(
|
442
|
-
fixtures.CONVERSATION_TOOL_CALLS[index],
|
456
|
+
fixtures.CONVERSATION_TOOL_CALLS[index].arguments,
|
443
457
|
OnToolCallDetails.model_construct(
|
458
|
+
id=fixtures.CONVERSATION_TOOL_CALLS[index].id,
|
459
|
+
name=fixtures.CONVERSATION_TOOL_CALLS[index].name,
|
444
460
|
conversation_uuid=fixtures.CONVERSATION_FINISHED_EVENT.uuid,
|
445
461
|
messages=fixtures.CONVERSATION_FINISHED_EVENT.conversation,
|
446
462
|
pause_execution=mock.ANY,
|
@@ -131,7 +131,11 @@ class TestRunPromptSync(TestCase):
|
|
131
131
|
on_event_mock = Mock()
|
132
132
|
on_finished_mock = Mock()
|
133
133
|
on_error_mock = Mock()
|
134
|
-
actual_tool_mock = AsyncMock(
|
134
|
+
actual_tool_mock = AsyncMock(
|
135
|
+
side_effect=[
|
136
|
+
r.result if not r.is_error else Exception(r.result) for r in fixtures.CONVERSATION_TOOL_RESULTS
|
137
|
+
]
|
138
|
+
)
|
135
139
|
other_tool_mock = AsyncMock()
|
136
140
|
path = "prompt-path"
|
137
141
|
options = RunPromptOptions(
|
@@ -188,8 +192,10 @@ class TestRunPromptSync(TestCase):
|
|
188
192
|
self.assertEqual(
|
189
193
|
actual_tool_mock.call_args_list[index][0],
|
190
194
|
(
|
191
|
-
fixtures.CONVERSATION_TOOL_CALLS[index],
|
195
|
+
fixtures.CONVERSATION_TOOL_CALLS[index].arguments,
|
192
196
|
OnToolCallDetails.model_construct(
|
197
|
+
id=fixtures.CONVERSATION_TOOL_CALLS[index].id,
|
198
|
+
name=fixtures.CONVERSATION_TOOL_CALLS[index].name,
|
193
199
|
conversation_uuid=fixtures.CONVERSATION_FINISHED_EVENT.uuid,
|
194
200
|
messages=fixtures.CONVERSATION_FINISHED_EVENT.conversation,
|
195
201
|
pause_execution=mock.ANY,
|
@@ -251,8 +257,10 @@ class TestRunPromptSync(TestCase):
|
|
251
257
|
self.assertEqual(
|
252
258
|
actual_tool_mock.call_args_list[index][0],
|
253
259
|
(
|
254
|
-
fixtures.CONVERSATION_TOOL_CALLS[index],
|
260
|
+
fixtures.CONVERSATION_TOOL_CALLS[index].arguments,
|
255
261
|
OnToolCallDetails.model_construct(
|
262
|
+
id=fixtures.CONVERSATION_TOOL_CALLS[index].id,
|
263
|
+
name=fixtures.CONVERSATION_TOOL_CALLS[index].name,
|
256
264
|
conversation_uuid=fixtures.CONVERSATION_FINISHED_EVENT.uuid,
|
257
265
|
messages=fixtures.CONVERSATION_FINISHED_EVENT.conversation,
|
258
266
|
pause_execution=mock.ANY,
|
@@ -508,7 +516,11 @@ class TestRunPromptStream(TestCase):
|
|
508
516
|
on_event_mock = Mock()
|
509
517
|
on_finished_mock = Mock()
|
510
518
|
on_error_mock = Mock()
|
511
|
-
actual_tool_mock = AsyncMock(
|
519
|
+
actual_tool_mock = AsyncMock(
|
520
|
+
side_effect=[
|
521
|
+
r.result if not r.is_error else Exception(r.result) for r in fixtures.CONVERSATION_TOOL_RESULTS
|
522
|
+
]
|
523
|
+
)
|
512
524
|
other_tool_mock = AsyncMock()
|
513
525
|
path = "prompt-path"
|
514
526
|
options = RunPromptOptions(
|
@@ -572,8 +584,10 @@ class TestRunPromptStream(TestCase):
|
|
572
584
|
self.assertEqual(
|
573
585
|
actual_tool_mock.call_args_list[index][0],
|
574
586
|
(
|
575
|
-
fixtures.CONVERSATION_TOOL_CALLS[index],
|
587
|
+
fixtures.CONVERSATION_TOOL_CALLS[index].arguments,
|
576
588
|
OnToolCallDetails.model_construct(
|
589
|
+
id=fixtures.CONVERSATION_TOOL_CALLS[index].id,
|
590
|
+
name=fixtures.CONVERSATION_TOOL_CALLS[index].name,
|
577
591
|
conversation_uuid=fixtures.CONVERSATION_FINISHED_EVENT.uuid,
|
578
592
|
messages=fixtures.CONVERSATION_FINISHED_EVENT.conversation,
|
579
593
|
pause_execution=mock.ANY,
|
@@ -637,8 +651,10 @@ class TestRunPromptStream(TestCase):
|
|
637
651
|
self.assertEqual(
|
638
652
|
actual_tool_mock.call_args_list[index][0],
|
639
653
|
(
|
640
|
-
fixtures.CONVERSATION_TOOL_CALLS[index],
|
654
|
+
fixtures.CONVERSATION_TOOL_CALLS[index].arguments,
|
641
655
|
OnToolCallDetails.model_construct(
|
656
|
+
id=fixtures.CONVERSATION_TOOL_CALLS[index].id,
|
657
|
+
name=fixtures.CONVERSATION_TOOL_CALLS[index].name,
|
642
658
|
conversation_uuid=fixtures.CONVERSATION_FINISHED_EVENT.uuid,
|
643
659
|
messages=fixtures.CONVERSATION_FINISHED_EVENT.conversation,
|
644
660
|
pause_execution=mock.ANY,
|
@@ -769,13 +769,12 @@ CONVERSATION_TOOL_RESULTS = [
|
|
769
769
|
ToolResult(
|
770
770
|
id="toolu_01ARatRfRidTDshkg1UuQhW2",
|
771
771
|
name="calculator",
|
772
|
-
result=
|
773
|
-
is_error=False,
|
772
|
+
result=True,
|
774
773
|
),
|
775
774
|
ToolResult(
|
776
775
|
id="toolu_B0398l23AOdTDshkg1UuQhZ3",
|
777
776
|
name="calculator",
|
778
|
-
result=
|
777
|
+
result="Expression is invalid",
|
779
778
|
is_error=True,
|
780
779
|
),
|
781
780
|
]
|
@@ -786,8 +785,7 @@ CONVERSATION_TOOL_RESULTS_MESSAGES: List[Message] = [
|
|
786
785
|
ToolResultContent(
|
787
786
|
id="toolu_01ARatRfRidTDshkg1UuQhW2",
|
788
787
|
name="calculator",
|
789
|
-
result=
|
790
|
-
is_error=False,
|
788
|
+
result=True,
|
791
789
|
),
|
792
790
|
],
|
793
791
|
),
|
@@ -796,7 +794,7 @@ CONVERSATION_TOOL_RESULTS_MESSAGES: List[Message] = [
|
|
796
794
|
ToolResultContent(
|
797
795
|
id="toolu_B0398l23AOdTDshkg1UuQhZ3",
|
798
796
|
name="calculator",
|
799
|
-
result=
|
797
|
+
result="Expression is invalid",
|
800
798
|
is_error=True,
|
801
799
|
),
|
802
800
|
],
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|