latitude-sdk 0.1.0b1__tar.gz → 0.1.0b3__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {latitude_sdk-0.1.0b1 → latitude_sdk-0.1.0b3}/PKG-INFO +2 -2
- {latitude_sdk-0.1.0b1 → latitude_sdk-0.1.0b3}/README.md +1 -1
- {latitude_sdk-0.1.0b1 → latitude_sdk-0.1.0b3}/pyproject.toml +1 -1
- {latitude_sdk-0.1.0b1 → latitude_sdk-0.1.0b3}/src/latitude_sdk/client/payloads.py +3 -3
- {latitude_sdk-0.1.0b1 → latitude_sdk-0.1.0b3}/src/latitude_sdk/sdk/latitude.py +1 -1
- {latitude_sdk-0.1.0b1 → latitude_sdk-0.1.0b3}/src/latitude_sdk/sdk/types.py +10 -10
- {latitude_sdk-0.1.0b1 → latitude_sdk-0.1.0b3}/uv.lock +1 -1
- latitude_sdk-0.1.0b1/examples/all.py +0 -120
- {latitude_sdk-0.1.0b1 → latitude_sdk-0.1.0b3}/.gitignore +0 -0
- {latitude_sdk-0.1.0b1 → latitude_sdk-0.1.0b3}/.python-version +0 -0
- {latitude_sdk-0.1.0b1 → latitude_sdk-0.1.0b3}/scripts/format.py +0 -0
- {latitude_sdk-0.1.0b1 → latitude_sdk-0.1.0b3}/scripts/lint.py +0 -0
- {latitude_sdk-0.1.0b1 → latitude_sdk-0.1.0b3}/scripts/test.py +0 -0
- {latitude_sdk-0.1.0b1 → latitude_sdk-0.1.0b3}/src/latitude_sdk/__init__.py +0 -0
- {latitude_sdk-0.1.0b1 → latitude_sdk-0.1.0b3}/src/latitude_sdk/client/__init__.py +0 -0
- {latitude_sdk-0.1.0b1 → latitude_sdk-0.1.0b3}/src/latitude_sdk/client/client.py +0 -0
- {latitude_sdk-0.1.0b1 → latitude_sdk-0.1.0b3}/src/latitude_sdk/client/router.py +0 -0
- {latitude_sdk-0.1.0b1 → latitude_sdk-0.1.0b3}/src/latitude_sdk/env/__init__.py +0 -0
- {latitude_sdk-0.1.0b1 → latitude_sdk-0.1.0b3}/src/latitude_sdk/env/env.py +0 -0
- {latitude_sdk-0.1.0b1 → latitude_sdk-0.1.0b3}/src/latitude_sdk/py.typed +0 -0
- {latitude_sdk-0.1.0b1 → latitude_sdk-0.1.0b3}/src/latitude_sdk/sdk/__init__.py +0 -0
- {latitude_sdk-0.1.0b1 → latitude_sdk-0.1.0b3}/src/latitude_sdk/sdk/errors.py +0 -0
- {latitude_sdk-0.1.0b1 → latitude_sdk-0.1.0b3}/src/latitude_sdk/sdk/evaluations.py +0 -0
- {latitude_sdk-0.1.0b1 → latitude_sdk-0.1.0b3}/src/latitude_sdk/sdk/logs.py +0 -0
- {latitude_sdk-0.1.0b1 → latitude_sdk-0.1.0b3}/src/latitude_sdk/sdk/prompts.py +0 -0
- {latitude_sdk-0.1.0b1 → latitude_sdk-0.1.0b3}/src/latitude_sdk/util/__init__.py +0 -0
- {latitude_sdk-0.1.0b1 → latitude_sdk-0.1.0b3}/src/latitude_sdk/util/utils.py +0 -0
- {latitude_sdk-0.1.0b1 → latitude_sdk-0.1.0b3}/tests/prompts/get_test.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: latitude-sdk
|
3
|
-
Version: 0.1.
|
3
|
+
Version: 0.1.0b3
|
4
4
|
Summary: Latitude SDK for Python
|
5
5
|
Project-URL: repository, https://github.com/latitude-dev/latitude-llm/tree/main/packages/sdks/python
|
6
6
|
Project-URL: homepage, https://github.com/latitude-dev/latitude-llm/tree/main/packages/sdks/python#readme
|
@@ -44,7 +44,7 @@ await sdk.prompts.run("joke-teller", RunPromptOptions(
|
|
44
44
|
))
|
45
45
|
```
|
46
46
|
|
47
|
-
Find more examples
|
47
|
+
Find more [examples](https://github.com/latitude-dev/latitude-llm/tree/main/examples/sdks/python).
|
48
48
|
|
49
49
|
## Development
|
50
50
|
|
@@ -27,7 +27,7 @@ await sdk.prompts.run("joke-teller", RunPromptOptions(
|
|
27
27
|
))
|
28
28
|
```
|
29
29
|
|
30
|
-
Find more examples
|
30
|
+
Find more [examples](https://github.com/latitude-dev/latitude-llm/tree/main/examples/sdks/python).
|
31
31
|
|
32
32
|
## Development
|
33
33
|
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[project]
|
2
2
|
name = "latitude-sdk"
|
3
|
-
version = "0.1.0-beta.
|
3
|
+
version = "0.1.0-beta.3"
|
4
4
|
description = "Latitude SDK for Python"
|
5
5
|
authors = [{ name = "Latitude Data SL", email = "hello@latitude.so" }]
|
6
6
|
maintainers = [{ name = "Latitude Data SL", email = "hello@latitude.so" }]
|
@@ -9,7 +9,7 @@ class ErrorResponse(Model):
|
|
9
9
|
code: str = Field(alias=str("errorCode"))
|
10
10
|
message: str
|
11
11
|
details: Dict[str, Any]
|
12
|
-
db_ref: Optional[DbErrorRef] = Field(None, alias=str("dbErrorRef"))
|
12
|
+
db_ref: Optional[DbErrorRef] = Field(default=None, alias=str("dbErrorRef"))
|
13
13
|
|
14
14
|
|
15
15
|
class PromptRequestParams(Model):
|
@@ -37,7 +37,7 @@ class RunPromptRequestParams(PromptRequestParams, Model):
|
|
37
37
|
class RunPromptRequestBody(Model):
|
38
38
|
path: str
|
39
39
|
parameters: Optional[Dict[str, Any]] = None
|
40
|
-
custom_identifier: Optional[str] = Field(None, alias=str("customIdentifier"))
|
40
|
+
custom_identifier: Optional[str] = Field(default=None, alias=str("customIdentifier"))
|
41
41
|
stream: Optional[bool] = None
|
42
42
|
|
43
43
|
|
@@ -74,7 +74,7 @@ class TriggerEvaluationRequestParams(EvaluationRequestParams, Model):
|
|
74
74
|
|
75
75
|
|
76
76
|
class TriggerEvaluationRequestBody(Model):
|
77
|
-
evaluation_uuids: Optional[List[str]] = Field(None, alias=str("evaluationUuids"))
|
77
|
+
evaluation_uuids: Optional[List[str]] = Field(default=None, alias=str("evaluationUuids"))
|
78
78
|
|
79
79
|
|
80
80
|
class CreateEvaluationResultRequestParams(EvaluationRequestParams, Model):
|
@@ -46,7 +46,7 @@ class Latitude:
|
|
46
46
|
logs: Logs
|
47
47
|
evaluations: Evaluations
|
48
48
|
|
49
|
-
def __init__(self, api_key: str, options: LatitudeOptions
|
49
|
+
def __init__(self, api_key: str, options: LatitudeOptions):
|
50
50
|
options.internal = options.internal or DEFAULT_INTERNAL_OPTIONS
|
51
51
|
options.internal = InternalOptions(**{**dict(DEFAULT_INTERNAL_OPTIONS), **dict(options.internal)})
|
52
52
|
options = LatitudeOptions(**{**dict(DEFAULT_LATITUDE_OPTIONS), **dict(options)})
|
@@ -64,7 +64,7 @@ class ToolResultContent(Model):
|
|
64
64
|
tool_call_id: str = Field(alias=str("toolCallId"))
|
65
65
|
tool_name: str = Field(alias=str("toolName"))
|
66
66
|
result: str
|
67
|
-
is_error: Optional[bool] = Field(None, alias=str("isError"))
|
67
|
+
is_error: Optional[bool] = Field(default=None, alias=str("isError"))
|
68
68
|
|
69
69
|
|
70
70
|
MessageContent = Union[
|
@@ -91,7 +91,7 @@ class SystemMessage(Model):
|
|
91
91
|
|
92
92
|
class UserMessage(Model):
|
93
93
|
role: MessageRole = MessageRole.User
|
94
|
-
content: Union[str, List[TextContent
|
94
|
+
content: Union[str, List[Union[TextContent, ImageContent, FileContent]]]
|
95
95
|
name: Optional[str] = None
|
96
96
|
|
97
97
|
|
@@ -103,8 +103,8 @@ class ToolCall(Model):
|
|
103
103
|
|
104
104
|
class AssistantMessage(Model):
|
105
105
|
role: MessageRole = MessageRole.Assistant
|
106
|
-
content: Union[str, List[TextContent
|
107
|
-
tool_calls: Optional[List[ToolCall]] = Field(None, alias=str("toolCalls"))
|
106
|
+
content: Union[str, List[Union[TextContent, ToolCallContent]]]
|
107
|
+
tool_calls: Optional[List[ToolCall]] = Field(default=None, alias=str("toolCalls"))
|
108
108
|
|
109
109
|
|
110
110
|
class ToolMessage(Model):
|
@@ -129,7 +129,7 @@ class StreamTypes(StrEnum):
|
|
129
129
|
class ChainTextResponse(Model):
|
130
130
|
type: StreamTypes = StreamTypes.Text
|
131
131
|
text: str
|
132
|
-
tool_calls: Optional[List[ToolCall]] = Field(None, alias=str("toolCalls"))
|
132
|
+
tool_calls: Optional[List[ToolCall]] = Field(default=None, alias=str("toolCalls"))
|
133
133
|
usage: ModelUsage
|
134
134
|
|
135
135
|
|
@@ -228,7 +228,7 @@ class Log(Model):
|
|
228
228
|
resolved_content: str = Field(alias=str("resolvedContent"))
|
229
229
|
content_hash: str = Field(alias=str("contentHash"))
|
230
230
|
parameters: Dict[str, Any]
|
231
|
-
custom_identifier: Optional[str] = Field(None, alias=str("customIdentifier"))
|
231
|
+
custom_identifier: Optional[str] = Field(default=None, alias=str("customIdentifier"))
|
232
232
|
duration: Optional[int] = None
|
233
233
|
created_at: datetime = Field(alias=str("createdAt"))
|
234
234
|
updated_at: datetime = Field(alias=str("updatedAt"))
|
@@ -245,10 +245,10 @@ class EvaluationResult(Model):
|
|
245
245
|
uuid: str
|
246
246
|
evaluation_id: int = Field(alias=str("evaluationId"))
|
247
247
|
document_log_id: int = Field(alias=str("documentLogId"))
|
248
|
-
evaluated_provider_log_id: Optional[int] = Field(None, alias=str("evaluatedProviderLogId"))
|
249
|
-
evaluation_provider_log_id: Optional[int] = Field(None, alias=str("evaluationProviderLogId"))
|
250
|
-
resultable_type: Optional[EvaluationResultType] = Field(None, alias=str("resultableType"))
|
251
|
-
resultable_id: Optional[int] = Field(None, alias=str("resultableId"))
|
248
|
+
evaluated_provider_log_id: Optional[int] = Field(default=None, alias=str("evaluatedProviderLogId"))
|
249
|
+
evaluation_provider_log_id: Optional[int] = Field(default=None, alias=str("evaluationProviderLogId"))
|
250
|
+
resultable_type: Optional[EvaluationResultType] = Field(default=None, alias=str("resultableType"))
|
251
|
+
resultable_id: Optional[int] = Field(default=None, alias=str("resultableId"))
|
252
252
|
result: Optional[Union[str, bool, int]] = None
|
253
253
|
source: Optional[LogSources] = None
|
254
254
|
reason: Optional[str] = None
|
@@ -1,120 +0,0 @@
|
|
1
|
-
import asyncio
|
2
|
-
from pprint import pp # type: ignore
|
3
|
-
|
4
|
-
from latitude_sdk import (
|
5
|
-
ApiError,
|
6
|
-
ChatPromptOptions,
|
7
|
-
CreateEvaluationResultOptions,
|
8
|
-
CreateLogOptions,
|
9
|
-
GatewayOptions,
|
10
|
-
GetOrCreatePromptOptions,
|
11
|
-
GetPromptOptions,
|
12
|
-
InternalOptions,
|
13
|
-
Latitude,
|
14
|
-
LatitudeOptions,
|
15
|
-
RunPromptOptions,
|
16
|
-
TextContent,
|
17
|
-
TriggerEvaluationOptions,
|
18
|
-
UserMessage,
|
19
|
-
)
|
20
|
-
|
21
|
-
|
22
|
-
# TODO Move to root/examples when latitude-sdk is published
|
23
|
-
async def main():
|
24
|
-
sdk = Latitude(
|
25
|
-
api_key="6f67407c-da6c-4a4d-9615-a3eb59e51d29",
|
26
|
-
options=LatitudeOptions(
|
27
|
-
project_id=3,
|
28
|
-
version_uuid="57502e00-20c2-4411-8b4b-44bc9008079e",
|
29
|
-
internal=InternalOptions(gateway=GatewayOptions(host="localhost", port=8787, ssl=False, api_version="v2")),
|
30
|
-
),
|
31
|
-
)
|
32
|
-
|
33
|
-
try:
|
34
|
-
print("Getting prompt...")
|
35
|
-
get_prompt_result = await sdk.prompts.get("prompt", GetPromptOptions())
|
36
|
-
pp(get_prompt_result.model_dump())
|
37
|
-
print("\n" * 2)
|
38
|
-
print("-" * 100)
|
39
|
-
|
40
|
-
print("Getting or creating prompt...")
|
41
|
-
get_or_create_prompt_result = await sdk.prompts.get_or_create("prompt3", GetOrCreatePromptOptions())
|
42
|
-
pp(get_or_create_prompt_result.model_dump())
|
43
|
-
print("\n" * 2)
|
44
|
-
print("-" * 100)
|
45
|
-
|
46
|
-
print("Running prompt...")
|
47
|
-
run_prompt_result = await sdk.prompts.run(
|
48
|
-
"prompt",
|
49
|
-
RunPromptOptions(
|
50
|
-
on_event=lambda event: print(event, "\n" * 2),
|
51
|
-
on_finished=lambda event: print(event, "\n" * 2),
|
52
|
-
on_error=lambda error: print(error, "\n" * 2),
|
53
|
-
custom_identifier="custom!",
|
54
|
-
parameters={"topic": "Python"},
|
55
|
-
stream=True,
|
56
|
-
),
|
57
|
-
)
|
58
|
-
assert run_prompt_result is not None
|
59
|
-
pp(run_prompt_result.model_dump())
|
60
|
-
print("\n" * 2)
|
61
|
-
print("-" * 100)
|
62
|
-
|
63
|
-
print("Chat prompt...")
|
64
|
-
chat_prompt_result = await sdk.prompts.chat(
|
65
|
-
run_prompt_result.uuid,
|
66
|
-
[
|
67
|
-
UserMessage(content=[TextContent(text="Hello, how are you?")]),
|
68
|
-
UserMessage(content="I'm fine btw"),
|
69
|
-
],
|
70
|
-
ChatPromptOptions(
|
71
|
-
on_event=lambda event: print(event, "\n" * 2),
|
72
|
-
on_finished=lambda event: print(event, "\n" * 2),
|
73
|
-
on_error=lambda error: print(error, "\n" * 2),
|
74
|
-
stream=True,
|
75
|
-
),
|
76
|
-
)
|
77
|
-
assert chat_prompt_result is not None
|
78
|
-
pp(chat_prompt_result.model_dump())
|
79
|
-
print("\n" * 2)
|
80
|
-
print("-" * 100)
|
81
|
-
|
82
|
-
print("Create log...")
|
83
|
-
create_log_result = await sdk.logs.create(
|
84
|
-
"prompt",
|
85
|
-
[
|
86
|
-
UserMessage(content=[TextContent(text="Hello, how are you?")]),
|
87
|
-
UserMessage(content=[TextContent(text="I'm fine btw")]),
|
88
|
-
],
|
89
|
-
CreateLogOptions(),
|
90
|
-
)
|
91
|
-
pp(create_log_result.model_dump())
|
92
|
-
print("\n" * 2)
|
93
|
-
print("-" * 100)
|
94
|
-
|
95
|
-
print("Trigger evaluation...")
|
96
|
-
trigger_evaluation_result = await sdk.evaluations.trigger(
|
97
|
-
chat_prompt_result.uuid,
|
98
|
-
TriggerEvaluationOptions(evaluation_uuids=["46d29f2d-7086-44b8-9220-af1dea1e3692"]),
|
99
|
-
)
|
100
|
-
pp(trigger_evaluation_result.model_dump())
|
101
|
-
print("\n" * 2)
|
102
|
-
print("-" * 100)
|
103
|
-
|
104
|
-
print("Create evaluation result...")
|
105
|
-
create_evaluation_result_result = await sdk.evaluations.create_result(
|
106
|
-
chat_prompt_result.uuid,
|
107
|
-
"d7a04129-9df8-4047-ba93-6349029a1000",
|
108
|
-
CreateEvaluationResultOptions(result="I like it!", reason="Because I like it!"),
|
109
|
-
)
|
110
|
-
pp(create_evaluation_result_result.model_dump())
|
111
|
-
print("\n" * 2)
|
112
|
-
print("-" * 100)
|
113
|
-
|
114
|
-
except ApiError as error:
|
115
|
-
pp(error.__dict__)
|
116
|
-
except Exception as e:
|
117
|
-
raise e
|
118
|
-
|
119
|
-
|
120
|
-
asyncio.run(main())
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|