lmnr 0.3.6__py3-none-any.whl → 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- lmnr/__init__.py +2 -6
- lmnr/sdk/decorators.py +55 -264
- lmnr/sdk/laminar.py +380 -0
- lmnr/sdk/log.py +39 -0
- lmnr/sdk/utils.py +10 -11
- lmnr-0.4.0.dist-info/METADATA +151 -0
- lmnr-0.4.0.dist-info/RECORD +12 -0
- lmnr/sdk/client.py +0 -161
- lmnr/sdk/collector.py +0 -177
- lmnr/sdk/constants.py +0 -1
- lmnr/sdk/context.py +0 -464
- lmnr/sdk/interface.py +0 -308
- lmnr/sdk/providers/__init__.py +0 -2
- lmnr/sdk/providers/base.py +0 -28
- lmnr/sdk/providers/fallback.py +0 -154
- lmnr/sdk/providers/openai.py +0 -159
- lmnr/sdk/providers/utils.py +0 -33
- lmnr/sdk/tracing_types.py +0 -194
- lmnr/semantic_conventions/__init__.py +0 -0
- lmnr/semantic_conventions/gen_ai_spans.py +0 -48
- lmnr-0.3.6.dist-info/METADATA +0 -257
- lmnr-0.3.6.dist-info/RECORD +0 -23
- {lmnr-0.3.6.dist-info → lmnr-0.4.0.dist-info}/LICENSE +0 -0
- {lmnr-0.3.6.dist-info → lmnr-0.4.0.dist-info}/WHEEL +0 -0
- {lmnr-0.3.6.dist-info → lmnr-0.4.0.dist-info}/entry_points.txt +0 -0
lmnr/sdk/tracing_types.py
DELETED
@@ -1,194 +0,0 @@
|
|
1
|
-
from typing import Any, Literal, Optional, Union
|
2
|
-
import datetime
|
3
|
-
import pydantic
|
4
|
-
import uuid
|
5
|
-
|
6
|
-
from .constants import CURRENT_TRACING_VERSION
|
7
|
-
from .utils import to_dict
|
8
|
-
|
9
|
-
|
10
|
-
class EvaluateEvent(pydantic.BaseModel):
|
11
|
-
name: str
|
12
|
-
data: str
|
13
|
-
timestamp: Optional[datetime.datetime] = None
|
14
|
-
|
15
|
-
|
16
|
-
class Span(pydantic.BaseModel):
|
17
|
-
version: str = CURRENT_TRACING_VERSION
|
18
|
-
spanType: Literal["DEFAULT", "LLM"] = "DEFAULT"
|
19
|
-
id: uuid.UUID
|
20
|
-
traceId: uuid.UUID
|
21
|
-
parentSpanId: Optional[uuid.UUID] = None
|
22
|
-
name: str
|
23
|
-
# generated at start of span, so required
|
24
|
-
startTime: datetime.datetime
|
25
|
-
# generated at end of span, optional when span is still running
|
26
|
-
endTime: Optional[datetime.datetime] = None
|
27
|
-
attributes: dict[str, Any] = {}
|
28
|
-
input: Optional[Any] = None
|
29
|
-
output: Optional[Any] = None
|
30
|
-
metadata: Optional[dict[str, Any]] = None
|
31
|
-
evaluateEvents: list[EvaluateEvent] = []
|
32
|
-
events: list["Event"] = None
|
33
|
-
|
34
|
-
def __init__(
|
35
|
-
self,
|
36
|
-
name: str,
|
37
|
-
trace_id: uuid.UUID,
|
38
|
-
start_time: Optional[datetime.datetime] = None,
|
39
|
-
version: str = CURRENT_TRACING_VERSION,
|
40
|
-
span_type: Literal["DEFAULT", "LLM"] = "DEFAULT",
|
41
|
-
id: Optional[uuid.UUID] = None,
|
42
|
-
parent_span_id: Optional[uuid.UUID] = None,
|
43
|
-
input: Optional[Any] = None,
|
44
|
-
metadata: Optional[dict[str, Any]] = {},
|
45
|
-
attributes: Optional[dict[str, Any]] = {},
|
46
|
-
evaluate_events: list[EvaluateEvent] = [],
|
47
|
-
):
|
48
|
-
super().__init__(
|
49
|
-
version=version,
|
50
|
-
spanType=span_type,
|
51
|
-
id=id or uuid.uuid4(),
|
52
|
-
traceId=trace_id,
|
53
|
-
parentSpanId=parent_span_id,
|
54
|
-
name=name,
|
55
|
-
startTime=start_time or datetime.datetime.now(datetime.timezone.utc),
|
56
|
-
input=input,
|
57
|
-
metadata=metadata or {},
|
58
|
-
attributes=attributes or {},
|
59
|
-
evaluateEvents=evaluate_events,
|
60
|
-
events=[],
|
61
|
-
)
|
62
|
-
|
63
|
-
def update(
|
64
|
-
self,
|
65
|
-
end_time: Optional[datetime.datetime],
|
66
|
-
input: Optional[Any] = None,
|
67
|
-
output: Optional[Any] = None,
|
68
|
-
metadata: Optional[dict[str, Any]] = None,
|
69
|
-
attributes: Optional[dict[str, Any]] = None,
|
70
|
-
evaluate_events: Optional[list[EvaluateEvent]] = None,
|
71
|
-
override: bool = False,
|
72
|
-
):
|
73
|
-
self.endTime = end_time or datetime.datetime.now(datetime.timezone.utc)
|
74
|
-
self.input = input
|
75
|
-
self.output = output
|
76
|
-
new_metadata = (
|
77
|
-
metadata if override else {**(self.metadata or {}), **(metadata or {})}
|
78
|
-
)
|
79
|
-
new_attributes = (
|
80
|
-
attributes or {}
|
81
|
-
if override
|
82
|
-
else {**(self.attributes or {}), **(attributes or {})}
|
83
|
-
)
|
84
|
-
new_evaluate_events = (
|
85
|
-
evaluate_events or []
|
86
|
-
if override
|
87
|
-
else self.evaluateEvents + (evaluate_events or [])
|
88
|
-
)
|
89
|
-
self.metadata = new_metadata
|
90
|
-
self.attributes = new_attributes
|
91
|
-
self.evaluateEvents = new_evaluate_events
|
92
|
-
|
93
|
-
def add_event(self, event: "Event"):
|
94
|
-
self.events.append(event)
|
95
|
-
|
96
|
-
def to_dict(self) -> dict[str, Any]:
|
97
|
-
try:
|
98
|
-
obj = self.model_dump()
|
99
|
-
except TypeError:
|
100
|
-
# if inner values are pydantic models, we need to call model_dump on them
|
101
|
-
# see: https://github.com/pydantic/pydantic/issues/7713
|
102
|
-
obj = {}
|
103
|
-
for key, value in self.__dict__.items():
|
104
|
-
obj[key] = (
|
105
|
-
value.model_dump()
|
106
|
-
if isinstance(value, pydantic.BaseModel)
|
107
|
-
else value
|
108
|
-
)
|
109
|
-
obj = to_dict(obj)
|
110
|
-
return obj
|
111
|
-
|
112
|
-
|
113
|
-
class Trace(pydantic.BaseModel):
|
114
|
-
id: uuid.UUID
|
115
|
-
version: str = CURRENT_TRACING_VERSION
|
116
|
-
success: bool = True
|
117
|
-
userId: Optional[str] = None # provided by user or null
|
118
|
-
sessionId: Optional[str] = None # provided by user or uuid()
|
119
|
-
release: Optional[str] = None
|
120
|
-
metadata: Optional[dict[str, Any]] = None
|
121
|
-
|
122
|
-
def __init__(
|
123
|
-
self,
|
124
|
-
success: bool = True,
|
125
|
-
id: Optional[uuid.UUID] = None,
|
126
|
-
user_id: Optional[str] = None,
|
127
|
-
session_id: Optional[str] = None,
|
128
|
-
release: Optional[str] = None,
|
129
|
-
metadata: Optional[dict[str, Any]] = None,
|
130
|
-
):
|
131
|
-
id_ = id or uuid.uuid4()
|
132
|
-
super().__init__(
|
133
|
-
id=id_,
|
134
|
-
success=success,
|
135
|
-
userId=user_id,
|
136
|
-
sessionId=session_id,
|
137
|
-
release=release,
|
138
|
-
metadata=metadata or {},
|
139
|
-
)
|
140
|
-
|
141
|
-
def to_dict(self) -> dict[str, Any]:
|
142
|
-
try:
|
143
|
-
obj = self.model_dump()
|
144
|
-
except TypeError:
|
145
|
-
# if inner values are pydantic models, we need to call model_dump on them
|
146
|
-
# see: https://github.com/pydantic/pydantic/issues/7713
|
147
|
-
obj = {}
|
148
|
-
for key, value in self.__dict__.items():
|
149
|
-
obj[key] = (
|
150
|
-
value.model_dump()
|
151
|
-
if isinstance(value, pydantic.BaseModel)
|
152
|
-
else value
|
153
|
-
)
|
154
|
-
obj = to_dict(obj)
|
155
|
-
return obj
|
156
|
-
|
157
|
-
|
158
|
-
class Event(pydantic.BaseModel):
|
159
|
-
id: uuid.UUID
|
160
|
-
templateName: str
|
161
|
-
timestamp: datetime.datetime
|
162
|
-
spanId: uuid.UUID
|
163
|
-
value: Optional[Union[int, str, float, bool]] = None
|
164
|
-
|
165
|
-
def __init__(
|
166
|
-
self,
|
167
|
-
name: str,
|
168
|
-
span_id: uuid.UUID,
|
169
|
-
timestamp: Optional[datetime.datetime] = None,
|
170
|
-
value: Optional[Union[int, str, float, bool]] = None,
|
171
|
-
):
|
172
|
-
super().__init__(
|
173
|
-
id=uuid.uuid4(),
|
174
|
-
templateName=name,
|
175
|
-
spanId=span_id,
|
176
|
-
timestamp=timestamp or datetime.datetime.now(datetime.timezone.utc),
|
177
|
-
value=value,
|
178
|
-
)
|
179
|
-
|
180
|
-
def to_dict(self) -> dict[str, Any]:
|
181
|
-
try:
|
182
|
-
obj = self.model_dump()
|
183
|
-
except TypeError:
|
184
|
-
# if inner values are pydantic models, we need to call model_dump on them
|
185
|
-
# see: https://github.com/pydantic/pydantic/issues/7713
|
186
|
-
obj = {}
|
187
|
-
for key, value in self.__dict__.items():
|
188
|
-
obj[key] = (
|
189
|
-
value.model_dump()
|
190
|
-
if isinstance(value, pydantic.BaseModel)
|
191
|
-
else value
|
192
|
-
)
|
193
|
-
obj = to_dict(obj)
|
194
|
-
return obj
|
File without changes
|
@@ -1,48 +0,0 @@
|
|
1
|
-
# source: https://github.com/open-telemetry/semantic-conventions/blob/main/docs/gen-ai/gen-ai-spans.md
|
2
|
-
# last updated: 2024-08-26
|
3
|
-
|
4
|
-
REQUEST_MODEL: str = "gen_ai.request.model"
|
5
|
-
RESPONSE_MODEL: str = "gen_ai.response.model"
|
6
|
-
PROVIDER: str = "gen_ai.system"
|
7
|
-
INPUT_TOKEN_COUNT: str = "gen_ai.usage.input_tokens"
|
8
|
-
OUTPUT_TOKEN_COUNT: str = "gen_ai.usage.output_tokens"
|
9
|
-
TOTAL_TOKEN_COUNT: str = "gen_ai.usage.total_tokens" # custom, not in the spec
|
10
|
-
# https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/semcov/__init__.py
|
11
|
-
COST: str = "gen_ai.usage.cost"
|
12
|
-
|
13
|
-
OPERATION: str = "gen_ai.operation.name"
|
14
|
-
|
15
|
-
FREQUENCY_PENALTY: str = "gen_ai.request.frequency_penalty"
|
16
|
-
TEMPERATURE: str = "gen_ai.request.temperature"
|
17
|
-
MAX_TOKENS: str = "gen_ai.request.max_tokens"
|
18
|
-
PRESENCE_PENALTY: str = "gen_ai.request.presence_penalty"
|
19
|
-
STOP_SEQUENCES: str = "gen_ai.request.stop_sequences"
|
20
|
-
TEMPERATURE: str = "gen_ai.request.temperature"
|
21
|
-
TOP_P: str = "gen_ai.request.top_p"
|
22
|
-
TOP_K: str = "gen_ai.request.top_k"
|
23
|
-
|
24
|
-
# https://github.com/openlit/openlit/blob/main/sdk/python/src/openlit/semcov/__init__.py
|
25
|
-
STREAM: str = "gen_ai.request.is_stream"
|
26
|
-
|
27
|
-
FINISH_REASONS = "gen_ai.response.finish_reasons"
|
28
|
-
|
29
|
-
__all__ = [
|
30
|
-
"REQUEST_MODEL",
|
31
|
-
"RESPONSE_MODEL",
|
32
|
-
"PROVIDER",
|
33
|
-
"INPUT_TOKEN_COUNT",
|
34
|
-
"OUTPUT_TOKEN_COUNT",
|
35
|
-
"TOTAL_TOKEN_COUNT",
|
36
|
-
"COST",
|
37
|
-
"OPERATION",
|
38
|
-
"FREQUENCY_PENALTY",
|
39
|
-
"TEMPERATURE",
|
40
|
-
"MAX_TOKENS",
|
41
|
-
"PRESENCE_PENALTY",
|
42
|
-
"STOP_SEQUENCES",
|
43
|
-
"TEMPERATURE",
|
44
|
-
"TOP_P",
|
45
|
-
"TOP_K",
|
46
|
-
"STREAM",
|
47
|
-
"FINISH_REASONS",
|
48
|
-
]
|
lmnr-0.3.6.dist-info/METADATA
DELETED
@@ -1,257 +0,0 @@
|
|
1
|
-
Metadata-Version: 2.1
|
2
|
-
Name: lmnr
|
3
|
-
Version: 0.3.6
|
4
|
-
Summary: Python SDK for Laminar AI
|
5
|
-
License: Apache-2.0
|
6
|
-
Author: lmnr.ai
|
7
|
-
Requires-Python: >=3.9,<4.0
|
8
|
-
Classifier: License :: OSI Approved :: Apache Software License
|
9
|
-
Classifier: Programming Language :: Python :: 3
|
10
|
-
Classifier: Programming Language :: Python :: 3.9
|
11
|
-
Classifier: Programming Language :: Python :: 3.10
|
12
|
-
Classifier: Programming Language :: Python :: 3.11
|
13
|
-
Classifier: Programming Language :: Python :: 3.12
|
14
|
-
Requires-Dist: backoff (>=2.2.1,<3.0.0)
|
15
|
-
Requires-Dist: openai (>=1.41.1,<2.0.0)
|
16
|
-
Requires-Dist: pydantic (>=2.7.4,<3.0.0)
|
17
|
-
Requires-Dist: python-dotenv (>=1.0.1,<2.0.0)
|
18
|
-
Requires-Dist: requests (>=2.32.3,<3.0.0)
|
19
|
-
Description-Content-Type: text/markdown
|
20
|
-
|
21
|
-
# Laminar AI
|
22
|
-
|
23
|
-
This repo provides core for code generation, Laminar CLI, and Laminar SDK.
|
24
|
-
|
25
|
-
## Quickstart
|
26
|
-
```sh
|
27
|
-
python3 -m venv .myenv
|
28
|
-
source .myenv/bin/activate # or use your favorite env management tool
|
29
|
-
|
30
|
-
pip install lmnr
|
31
|
-
```
|
32
|
-
|
33
|
-
Create .env file at the root and add `LMNR_PROJECT_API_KEY` value to it.
|
34
|
-
|
35
|
-
Read more [here](https://docs.lmnr.ai/api-reference/introduction#authentication) on how to get `LMNR_PROJECT_API_KEY`.
|
36
|
-
|
37
|
-
## Sending events
|
38
|
-
|
39
|
-
You can send events in two ways:
|
40
|
-
- `.event(name, value)` – for a pre-defined event with one of possible values.
|
41
|
-
- `.evaluate_event(name, data)` – for an event that our agent checks for and assigns a value from possible values.
|
42
|
-
|
43
|
-
There are 3 types of events:
|
44
|
-
- Number - Numeric value.
|
45
|
-
- String - Arbitrary string.
|
46
|
-
- Boolean - Convenient to classify if something has took place or not.
|
47
|
-
|
48
|
-
Important notes:
|
49
|
-
- If event name does not match anything pre-defined in the UI, the event won't be saved.
|
50
|
-
|
51
|
-
## Instrumentation
|
52
|
-
|
53
|
-
We provide two ways to instrument your python code:
|
54
|
-
- With `@observe()` decorators and `wrap_llm_call` helpers
|
55
|
-
- Manually
|
56
|
-
|
57
|
-
It is important to not mix the two styles of instrumentation, this can lead to unpredictable results.
|
58
|
-
|
59
|
-
## Decorator instrumentation example
|
60
|
-
|
61
|
-
For easy automatic instrumentation, we provide you two simple primitives:
|
62
|
-
|
63
|
-
- `observe` - a multi-purpose automatic decorator that starts traces and spans when functions are entered, and finishes them when functions return
|
64
|
-
- `wrap_llm_call` - a function that takes in your LLM call and return a "decorated" version of it. This does all the same things as `observe`, plus
|
65
|
-
a few utilities around LLM-specific things, such as counting tokens and recording model params.
|
66
|
-
|
67
|
-
You can also import `lmnr_context` in order to interact and have more control over the context of the current span.
|
68
|
-
|
69
|
-
```python
|
70
|
-
import os
|
71
|
-
from openai import OpenAI
|
72
|
-
|
73
|
-
from lmnr import observe, wrap_llm_call, lmnr_context
|
74
|
-
client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
|
75
|
-
|
76
|
-
@observe() # annotate all functions you want to trace
|
77
|
-
def poem_writer(topic="turbulence"):
|
78
|
-
prompt = f"write a poem about {topic}"
|
79
|
-
|
80
|
-
# wrap the actual final call to LLM with `wrap_llm_call`
|
81
|
-
response = wrap_llm_call(client.chat.completions.create)(
|
82
|
-
model="gpt-4o",
|
83
|
-
messages=[
|
84
|
-
{"role": "system", "content": "You are a helpful assistant."},
|
85
|
-
{"role": "user", "content": prompt},
|
86
|
-
],
|
87
|
-
)
|
88
|
-
|
89
|
-
poem = response.choices[0].message.content
|
90
|
-
|
91
|
-
if topic in poem:
|
92
|
-
# send an event with a pre-defined name
|
93
|
-
lmnr_context.event("topic_alignment", "good")
|
94
|
-
|
95
|
-
# to trigger an automatic check for a possible event do:
|
96
|
-
lmnr_context.evaluate_event("excessive_wordiness", poem)
|
97
|
-
|
98
|
-
return poem
|
99
|
-
|
100
|
-
if __name__ == "__main__":
|
101
|
-
print(poem_writer(topic="laminar flow"))
|
102
|
-
```
|
103
|
-
|
104
|
-
This gives an advantage of quick instrumentation, but is somewhat limited in flexibility + doesn't really work as expected with threading.
|
105
|
-
This is due to the fact that we use `contextvars.ContextVar` for this, and how Python manages them between threads.
|
106
|
-
|
107
|
-
If you want to instrument your code manually, follow on to the next section
|
108
|
-
|
109
|
-
## Manual instrumentation example
|
110
|
-
|
111
|
-
For manual instrumetation you will need to import the following:
|
112
|
-
- `trace` - this is a function to start a trace. It returns a `TraceContext`
|
113
|
-
- `TraceContext` - a pointer to the current trace that you can pass around functions as you want.
|
114
|
-
- `SpanContext` - a pointer to the current span that you can pass around functions as you want
|
115
|
-
|
116
|
-
Both `TraceContext` and `SpanContext` expose the following interfaces:
|
117
|
-
- `span(name: str, **kwargs)` - create a child span within the current context. Returns `SpanContext`
|
118
|
-
- `update(**kwargs)` - update the current trace or span and return it. Returns `TraceContext` or `SpanContext`. Useful when some metadata becomes known later during the program execution
|
119
|
-
|
120
|
-
In addition, `SpanContext` allows you to:
|
121
|
-
- `event(name: str, value: str | int)` - emit a custom event at any point
|
122
|
-
- `evaluate_event(name: str, data: str)` - register a possible event for automatic checking by Laminar.
|
123
|
-
- `end(**kwargs)` – update the current span, and terminate it
|
124
|
-
|
125
|
-
Example:
|
126
|
-
|
127
|
-
```python
|
128
|
-
import os
|
129
|
-
from openai import OpenAI
|
130
|
-
|
131
|
-
from lmnr import trace, TraceContext, SpanContext, EvaluateEvent
|
132
|
-
from lmnr.semantic_conventions.gen_ai_spans import INPUT_TOKEN_COUNT, OUTPUT_TOKEN_COUNT, RESPONSE_MODEL, PROVIDER, STREAM
|
133
|
-
client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
|
134
|
-
|
135
|
-
def poem_writer(t: TraceContext, topic = "turbulence"):
|
136
|
-
span: SpanContext = t.span(name="poem_writer", input=topic)
|
137
|
-
|
138
|
-
prompt = f"write a poem about {topic}"
|
139
|
-
messages = [
|
140
|
-
{"role": "system", "content": "You are a helpful assistant."},
|
141
|
-
{"role": "user", "content": prompt},
|
142
|
-
]
|
143
|
-
# create a child span within the current `poem_writer` span.
|
144
|
-
llm_span = span.span(name="OpenAI completion", input=messages, span_type="LLM")
|
145
|
-
|
146
|
-
response = client.chat.completions.create(
|
147
|
-
model="gpt-4o-mini",
|
148
|
-
messages=[
|
149
|
-
{"role": "system", "content": "You are a helpful assistant."},
|
150
|
-
{"role": "user", "content": "Hello. What is the capital of France?"},
|
151
|
-
],
|
152
|
-
)
|
153
|
-
poem = response.choices[0].message.content
|
154
|
-
if topic in poem:
|
155
|
-
llm_span.event("topic_alignment", "good") # send an event with a pre-defined name
|
156
|
-
|
157
|
-
# note that you can register possible events here as well,
|
158
|
-
# not only `llm_span.evaluate_event()`
|
159
|
-
llm_span.end(
|
160
|
-
output=poem,
|
161
|
-
evaluate_events=[EvaluateEvent(name="excessive_wordines", data=poem)],
|
162
|
-
attributes={
|
163
|
-
INPUT_TOKEN_COUNT: response.usage.prompt_tokens,
|
164
|
-
OUTPUT_TOKEN_COUNT: response.usage.completion_tokens,
|
165
|
-
RESPONSE_MODEL: response.model,
|
166
|
-
PROVIDER: 'openai',
|
167
|
-
STREAM: False
|
168
|
-
}
|
169
|
-
)
|
170
|
-
span.end(output=poem)
|
171
|
-
return poem
|
172
|
-
|
173
|
-
|
174
|
-
t: TraceContext = trace(user_id="user123", session_id="session123", release="release")
|
175
|
-
main(t, topic="laminar flow")
|
176
|
-
```
|
177
|
-
|
178
|
-
## Manual attributes
|
179
|
-
|
180
|
-
You can specify span attributes when creating/updating/ending spans.
|
181
|
-
|
182
|
-
If you use [decorator instrumentation](#decorator-instrumentation-example), `wrap_llm_call` handles all of this for you.
|
183
|
-
|
184
|
-
Example usage:
|
185
|
-
|
186
|
-
```python
|
187
|
-
from lmnr.semantic_conventions.gen_ai_spans import REQUEST_MODEL
|
188
|
-
|
189
|
-
# span_type = LLM is important for correct attribute semantics
|
190
|
-
llm_span = span.span(name="OpenAI completion", input=messages, span_type="LLM")
|
191
|
-
llm_span.update(
|
192
|
-
attributes={REQUEST_MODEL: "gpt-4o-mini"}
|
193
|
-
)
|
194
|
-
response = client.chat.completions.create(
|
195
|
-
model="gpt-4o-mini",
|
196
|
-
messages=[
|
197
|
-
{"role": "system", "content": "You are a helpful assistant."},
|
198
|
-
{"role": "user", "content": "Hello. What is the capital of France?"},
|
199
|
-
],
|
200
|
-
)
|
201
|
-
```
|
202
|
-
|
203
|
-
Semantics:
|
204
|
-
|
205
|
-
Check for available semantic conventions in `lmnr.semantic_conventions.gen_ai_spans`.
|
206
|
-
|
207
|
-
You can specify the cost with `COST`. Otherwise, the cost will be calculated
|
208
|
-
on the Laminar servers, given the following are specified:
|
209
|
-
|
210
|
-
- span_type is `"LLM"`
|
211
|
-
- Model provider: `PROVIDER`, e.g. 'openai', 'anthropic'
|
212
|
-
- Output tokens: `OUTPUT_TOKEN_COUNT`
|
213
|
-
- Input tokens: `INPUT_TOKEN_COUNT`*
|
214
|
-
- Model. We look at `RESPONSE_MODEL` first, and then, if it is not present, we take the value of `REQUEST_MODEL`
|
215
|
-
|
216
|
-
\* Also, for the case when `PROVIDER` is `"openai"`, the `STREAM` is set to `True`, and `INPUT_TOKEN_COUNT` is not set, we will calculate
|
217
|
-
the number of input tokens, and the cost on the server using [tiktoken](https://github.com/zurawiki/tiktoken-rs) and
|
218
|
-
use it in cost calculation.
|
219
|
-
This is done because OpenAI does not stream the usage back
|
220
|
-
when streaming is enabled. Output token count is (approximately) equal to the number of streaming
|
221
|
-
events sent by OpenAI, but there is no way to calculate the input token count, other than re-tokenizing.
|
222
|
-
|
223
|
-
## Making Laminar pipeline calls
|
224
|
-
|
225
|
-
After you are ready to use your pipeline in your code, deploy it in Laminar by selecting the target version for the pipeline.
|
226
|
-
|
227
|
-
Once your pipeline target is set, you can call it from Python in just a few lines.
|
228
|
-
|
229
|
-
Example use:
|
230
|
-
|
231
|
-
```python
|
232
|
-
from lmnr import Laminar
|
233
|
-
|
234
|
-
# for decorator instrumentation, do: `from lmnr inport lmnr_context`
|
235
|
-
|
236
|
-
l = Laminar('<YOUR_PROJECT_API_KEY>')
|
237
|
-
result = l.run( # lmnr_context.run( for decorator instrumentation
|
238
|
-
pipeline = 'my_pipeline_name',
|
239
|
-
inputs = {'input_node_name': 'some_value'},
|
240
|
-
# all environment variables
|
241
|
-
env = {'OPENAI_API_KEY': 'sk-some-key'},
|
242
|
-
# any metadata to attach to this run's trace
|
243
|
-
metadata = {'session_id': 'your_custom_session_id'}
|
244
|
-
)
|
245
|
-
```
|
246
|
-
|
247
|
-
Resulting in:
|
248
|
-
|
249
|
-
```python
|
250
|
-
>>> result
|
251
|
-
PipelineRunResponse(
|
252
|
-
outputs={'output': {'value': [ChatMessage(role='user', content='hello')]}},
|
253
|
-
# useful to locate your trace
|
254
|
-
run_id='53b012d5-5759-48a6-a9c5-0011610e3669'
|
255
|
-
)
|
256
|
-
```
|
257
|
-
|
lmnr-0.3.6.dist-info/RECORD
DELETED
@@ -1,23 +0,0 @@
|
|
1
|
-
lmnr/__init__.py,sha256=vUiBEqNVi-dWlFKqnyxt0387t1kzVXrwFp7ShhDJyXY,324
|
2
|
-
lmnr/sdk/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
3
|
-
lmnr/sdk/client.py,sha256=e6cIvJq38a6XAU8FGWYtNXVAPlEoZEyKS7hC3M_6EkU,5749
|
4
|
-
lmnr/sdk/collector.py,sha256=6LRmPhOcmGplUDWm_sJh0dVrLTHknd_kmq7POGuAvoQ,5338
|
5
|
-
lmnr/sdk/constants.py,sha256=USCfwuUqRx6_0xC8WUxqGj766dInqQkWJcf8U5vPK7s,34
|
6
|
-
lmnr/sdk/context.py,sha256=4ngtUYhix91hUW4Kpo-WplDiNs-SCqz6LmHMl84StoA,15398
|
7
|
-
lmnr/sdk/decorators.py,sha256=vEGXYJUKGNVAVEt3sRKAUTtGAdhfQbyA474kfHsZHTk,12024
|
8
|
-
lmnr/sdk/interface.py,sha256=ugtNIcHZhHC_qSyu_-1Fm62_phOb4vW-Lo5VZ1WaMhc,12255
|
9
|
-
lmnr/sdk/providers/__init__.py,sha256=wNCgQnt9-bnTNXLQWdPgyKhqA1ajiaEd1Rr2KPOpazM,54
|
10
|
-
lmnr/sdk/providers/base.py,sha256=xc6iA8yY_VK6tbzswt-3naZ53aAXtOLur9j8eimC_ZA,1054
|
11
|
-
lmnr/sdk/providers/fallback.py,sha256=9-srLJgDK5CWD8DIdnxo6jbSsnwDvdHC-vA06BfEkqQ,5431
|
12
|
-
lmnr/sdk/providers/openai.py,sha256=9X2VWz4_EqQBEA1UEww2EKdqods6IzOEmaO6dnhY-Lw,5725
|
13
|
-
lmnr/sdk/providers/utils.py,sha256=ROt82VrvezExYOxionAynD3dp6oX5JoPW6F1ayTm7q8,946
|
14
|
-
lmnr/sdk/tracing_types.py,sha256=O6sfTBRKxz8WtRy4uEr-_wkSqu58mmEa1SXXJXDGwBI,6267
|
15
|
-
lmnr/sdk/types.py,sha256=5-Ft-l35wtmn2xxE8BTqsM3nx1zD799tRv4qiOkED50,2121
|
16
|
-
lmnr/sdk/utils.py,sha256=9ScSKalwZbW5wAhGN_Mn26_LZewVBkTz_OG5uQi6Og4,3401
|
17
|
-
lmnr/semantic_conventions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
18
|
-
lmnr/semantic_conventions/gen_ai_spans.py,sha256=3s-2J5v3t5LcMKwK2DefPn56XpxN5oMEYtb9Mf9D_gA,1541
|
19
|
-
lmnr-0.3.6.dist-info/LICENSE,sha256=67b_wJHVV1CBaWkrKFWU1wyqTPSdzH77Ls-59631COg,10411
|
20
|
-
lmnr-0.3.6.dist-info/METADATA,sha256=OcWavyUecfR_YwlTJF003WghB4mDqbvUg0ril4arC0k,9485
|
21
|
-
lmnr-0.3.6.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
|
22
|
-
lmnr-0.3.6.dist-info/entry_points.txt,sha256=Qg7ZRax4k-rcQsZ26XRYQ8YFSBiyY2PNxYfq4a6PYXI,41
|
23
|
-
lmnr-0.3.6.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|