lmnr 0.4.9__py3-none-any.whl → 0.4.11__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
lmnr/__init__.py CHANGED
@@ -2,3 +2,4 @@ from .sdk.evaluations import Evaluation
2
2
  from .sdk.laminar import Laminar
3
3
  from .sdk.types import ChatMessage, PipelineRunError, PipelineRunResponse, NodeInput
4
4
  from .sdk.decorators import observe
5
+ from .traceloop_sdk import Instruments
lmnr/sdk/decorators.py CHANGED
@@ -4,22 +4,19 @@ from lmnr.traceloop_sdk.decorators.base import (
4
4
  )
5
5
  from opentelemetry.trace import INVALID_SPAN, get_current_span
6
6
 
7
- from typing import Callable, Optional, ParamSpec, TypeVar, cast
7
+ from typing import Callable, Optional, cast
8
8
 
9
9
  from lmnr.traceloop_sdk.tracing.tracing import update_association_properties
10
10
 
11
11
  from .utils import is_async
12
12
 
13
- P = ParamSpec("P")
14
- R = TypeVar("R")
15
-
16
13
 
17
14
  def observe(
18
15
  *,
19
16
  name: Optional[str] = None,
20
17
  user_id: Optional[str] = None,
21
18
  session_id: Optional[str] = None,
22
- ) -> Callable[[Callable[P, R]], Callable[P, R]]:
19
+ ) -> Callable[[Callable], Callable]:
23
20
  """The main decorator entrypoint for Laminar. This is used to wrap
24
21
  functions and methods to create spans.
25
22
 
@@ -41,7 +38,7 @@ def observe(
41
38
  R: Returns the result of the wrapped function
42
39
  """
43
40
 
44
- def decorator(func: Callable[P, R]) -> Callable[P, R]:
41
+ def decorator(func: Callable) -> Callable:
45
42
  current_span = get_current_span()
46
43
  if current_span != INVALID_SPAN:
47
44
  if session_id is not None:
@@ -64,4 +61,4 @@ def observe(
64
61
  else entity_method(name=name)(func)
65
62
  )
66
63
 
67
- return cast(Callable[P, R], decorator)
64
+ return cast(Callable, decorator)
lmnr/sdk/types.py CHANGED
@@ -2,7 +2,7 @@ import datetime
2
2
  import requests
3
3
  import pydantic
4
4
  import uuid
5
- from typing import Any, Literal, Optional, TypeAlias, Union
5
+ from typing import Any, Awaitable, Callable, Literal, Optional, Union
6
6
 
7
7
  from .utils import to_dict
8
8
 
@@ -17,9 +17,9 @@ class ConditionedValue(pydantic.BaseModel):
17
17
  value: "NodeInput"
18
18
 
19
19
 
20
- Numeric: TypeAlias = Union[int, float]
21
- NodeInput: TypeAlias = Union[str, list[ChatMessage], ConditionedValue, Numeric, bool]
22
- PipelineOutput: TypeAlias = Union[NodeInput]
20
+ Numeric = Union[int, float]
21
+ NodeInput = Union[str, list[ChatMessage], ConditionedValue, Numeric, bool]
22
+ PipelineOutput = Union[NodeInput]
23
23
 
24
24
 
25
25
  class PipelineRunRequest(pydantic.BaseModel):
@@ -74,8 +74,8 @@ class PipelineRunError(Exception):
74
74
  return super().__str__()
75
75
 
76
76
 
77
- EvaluationDatapointData: TypeAlias = dict[str, Any]
78
- EvaluationDatapointTarget: TypeAlias = dict[str, Any]
77
+ EvaluationDatapointData = dict[str, Any]
78
+ EvaluationDatapointTarget = dict[str, Any]
79
79
 
80
80
 
81
81
  # EvaluationDatapoint is a single data point in the evaluation
@@ -87,24 +87,24 @@ class EvaluationDatapoint(pydantic.BaseModel):
87
87
  target: EvaluationDatapointTarget
88
88
 
89
89
 
90
- ExecutorFunctionReturnType: TypeAlias = Any
91
- EvaluatorFunctionReturnType: TypeAlias = Union[Numeric, dict[str, Numeric]]
90
+ ExecutorFunctionReturnType = Any
91
+ EvaluatorFunctionReturnType = Union[Numeric, dict[str, Numeric]]
92
92
 
93
- # ExecutorFunction: TypeAlias = Callable[
94
- # [EvaluationDatapointData, *tuple[Any, ...], dict[str, Any]],
95
- # Union[ExecutorFunctionReturnType, Awaitable[ExecutorFunctionReturnType]],
96
- # ]
93
+ ExecutorFunction = Callable[
94
+ [EvaluationDatapointData, Any, dict[str, Any]],
95
+ Union[ExecutorFunctionReturnType, Awaitable[ExecutorFunctionReturnType]],
96
+ ]
97
97
 
98
98
  # EvaluatorFunction is a function that takes the output of the executor and the
99
99
  # target data, and returns a score. The score can be a single number or a
100
100
  # record of string keys and number values. The latter is useful for evaluating
101
101
  # multiple criteria in one go instead of running multiple evaluators.
102
- # EvaluatorFunction: TypeAlias = Callable[
103
- # [ExecutorFunctionReturnType, *tuple[Any, ...], dict[str, Any]],
104
- # Union[EvaluatorFunctionReturnType, Awaitable[EvaluatorFunctionReturnType]],
105
- # ]
102
+ EvaluatorFunction = Callable[
103
+ [ExecutorFunctionReturnType, Any, dict[str, Any]],
104
+ Union[EvaluatorFunctionReturnType, Awaitable[EvaluatorFunctionReturnType]],
105
+ ]
106
106
 
107
- EvaluationStatus: TypeAlias = Literal["Started", "Finished", "Error"]
107
+ EvaluationStatus = Literal["Started", "Finished", "Error"]
108
108
 
109
109
 
110
110
  class CreateEvaluationResponse(pydantic.BaseModel):
@@ -2,6 +2,8 @@ from enum import Enum
2
2
 
3
3
 
4
4
  class Instruments(Enum):
5
+ # The list of libraries which will be autoinstrumented
6
+ # if no specific instruments are provided to initialize()
5
7
  OPENAI = "openai"
6
8
  ANTHROPIC = "anthropic"
7
9
  COHERE = "cohere"
@@ -15,10 +17,6 @@ class Instruments(Enum):
15
17
  MILVUS = "milvus"
16
18
  TRANSFORMERS = "transformers"
17
19
  TOGETHER = "together"
18
- REDIS = "redis"
19
- REQUESTS = "requests"
20
- URLLIB3 = "urllib3"
21
- PYMYSQL = "pymysql"
22
20
  BEDROCK = "bedrock"
23
21
  REPLICATE = "replicate"
24
22
  VERTEXAI = "vertexai"
@@ -27,3 +25,10 @@ class Instruments(Enum):
27
25
  ALEPHALPHA = "alephalpha"
28
26
  MARQO = "marqo"
29
27
  LANCEDB = "lancedb"
28
+
29
+ # The following libraries will not be autoinstrumented unless
30
+ # specified explicitly in the initialize() call.
31
+ REDIS = "redis"
32
+ REQUESTS = "requests"
33
+ URLLIB3 = "urllib3"
34
+ PYMYSQL = "pymysql"
@@ -531,10 +531,6 @@ def init_instrumentations(should_enrich_metrics: bool):
531
531
  init_milvus_instrumentor()
532
532
  init_transformers_instrumentor()
533
533
  init_together_instrumentor()
534
- init_redis_instrumentor()
535
- init_requests_instrumentor()
536
- init_urllib3_instrumentor()
537
- init_pymysql_instrumentor()
538
534
  init_bedrock_instrumentor(should_enrich_metrics)
539
535
  init_replicate_instrumentor()
540
536
  init_vertexai_instrumentor()
@@ -545,6 +541,12 @@ def init_instrumentations(should_enrich_metrics: bool):
545
541
  init_lancedb_instrumentor()
546
542
  init_groq_instrumentor()
547
543
 
544
+ # These libraries are not instrumented by default, but if the user wants, he can manually specify them
545
+ # init_redis_instrumentor()
546
+ # init_requests_instrumentor()
547
+ # init_urllib3_instrumentor()
548
+ # init_pymysql_instrumentor()
549
+
548
550
 
549
551
  def init_openai_instrumentor(should_enrich_metrics: bool):
550
552
  try:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lmnr
3
- Version: 0.4.9
3
+ Version: 0.4.11
4
4
  Summary: Python SDK for Laminar AI
5
5
  License: Apache-2.0
6
6
  Author: lmnr.ai
@@ -67,6 +67,9 @@ OpenTelemetry log sender for [Laminar](https://github.com/lmnr-ai/lmnr) for Pyth
67
67
 
68
68
 
69
69
  ## Quickstart
70
+
71
+ First, install the package:
72
+
70
73
  ```sh
71
74
  python3 -m venv .myenv
72
75
  source .myenv/bin/activate # or use your favorite env management tool
@@ -74,21 +77,39 @@ source .myenv/bin/activate # or use your favorite env management tool
74
77
  pip install lmnr
75
78
  ```
76
79
 
77
- And the in your main Python file
80
+ Then, you can initialize Laminar in your main file and instrument your code.
78
81
 
79
82
  ```python
83
+ import os
84
+ from openai import OpenAI
80
85
  from lmnr import Laminar as L
81
86
 
82
- L.initialize(project_api_key="<LMNR_PROJECT_API_KEY>", instruments=set())
83
- ```
87
+ L.initialize(
88
+ project_api_key=os.environ["LMNR_PROJECT_API_KEY"],
89
+ )
84
90
 
85
- If you want to automatically instrument particular LLM, Vector DB, and related
86
- calls with OpenTelemetry-compatible instrumentation, then pass the appropriate instruments to `.initialize()`.
91
+ client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
87
92
 
88
- Also if you want to automatically instrument all supported libraries, then pass `instruments=None` or don't pass `instruments` at all.
93
+ def poem_writer(topic: str):
94
+ prompt = f"write a poem about {topic}"
89
95
 
90
- We rely on the amazing [OpenLLMetry](https://github.com/traceloop/openllmetry), open-source package
91
- by TraceLoop, to achieve that.
96
+ # OpenAI calls are automatically instrumented
97
+ response = client.chat.completions.create(
98
+ model="gpt-4o",
99
+ messages=[
100
+ {"role": "system", "content": "You are a helpful assistant."},
101
+ {"role": "user", "content": prompt},
102
+ ],
103
+ )
104
+ poem = response.choices[0].message.content
105
+ return poem
106
+
107
+ if __name__ == "__main__":
108
+ print(poem_writer("laminar flow"))
109
+
110
+ ```
111
+
112
+ Note that you need to only initialize Laminar once in your application.
92
113
 
93
114
  ### Project API key
94
115
 
@@ -97,75 +118,84 @@ You can either pass it to `.initialize()` or set it to `.env` at the root of you
97
118
 
98
119
  ## Instrumentation
99
120
 
100
- In addition to automatic instrumentation, we provide a simple `@observe()` decorator, if you want more fine-grained tracing
101
- or to trace other functions.
121
+ ### Manual instrumentation
102
122
 
103
- ### Example
123
+ To instrument any function in your code, we provide a simple `@observe()` decorator.
124
+ This can be useful if you want to trace a request handler or a function which combines multiple LLM calls.
104
125
 
105
126
  ```python
106
127
  import os
107
128
  from openai import OpenAI
129
+ from lmnr import Laminar as L, Instruments
108
130
 
109
-
110
- from lmnr import observe, Laminar as L
111
- L.initialize(project_api_key="<LMNR_PROJECT_API_KEY>", instruments=set())
131
+ L.initialize(project_api_key=os.environ["LMNR_PROJECT_API_KEY"])
112
132
 
113
133
  client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
114
134
 
115
- @observe() # annotate all functions you want to trace
116
- def poem_writer(topic="turbulence"):
135
+ def poem_writer(topic: str):
117
136
  prompt = f"write a poem about {topic}"
137
+ messages = [
138
+ {"role": "system", "content": "You are a helpful assistant."},
139
+ {"role": "user", "content": prompt},
140
+ ]
141
+
142
+ # OpenAI calls are still automatically instrumented
118
143
  response = client.chat.completions.create(
119
144
  model="gpt-4o",
120
- messages=[
121
- {"role": "system", "content": "You are a helpful assistant."},
122
- {"role": "user", "content": prompt},
123
- ],
145
+ messages=messages,
124
146
  )
125
147
  poem = response.choices[0].message.content
148
+
126
149
  return poem
127
150
 
128
- print(poem_writer(topic="laminar flow"))
151
+ @observe()
152
+ def generate_poems():
153
+ poem1 = poem_writer(topic="laminar flow")
154
+ L.event("is_poem_generated", True)
155
+ poem2 = poem_writer(topic="turbulence")
156
+ L.event("is_poem_generated", True)
157
+ poems = f"{poem1}\n\n---\n\n{poem2}"
158
+ return poems
129
159
  ```
130
160
 
131
- ### Manual instrumentation
132
-
133
- Our manual instrumentation is a very thin wrapper around OpenTelemetry's
134
- `trace.start_span`. Our wrapper sets the span into the active context.
135
- You don't have to explicitly pass the spans around, it is enough to
136
- just call `L.start_span`, and OpenTelemetry will handle the context management
161
+ Also, you can use `Laminar.start_as_current_span` if you want to record a chunk of your code using `with` statement.
137
162
 
138
163
  ```python
139
- from lmnr import observe, Laminar as L
140
- L.initialize(project_api_key="<LMNR_PROJECT_API_KEY>", instruments=set())
164
+ def handle_user_request(topic: str):
165
+ with L.start_as_current_span(name="poem_writer", input=topic):
166
+ ...
167
+
168
+ poem = poem_writer(topic=topic)
169
+
170
+ ...
171
+
172
+ # while within the span, you can attach laminar events to it
173
+ L.event("is_poem_generated", True)
174
+
175
+ # Use set_span_output to record the output of the span
176
+ L.set_span_output(poem)
177
+ ```
141
178
 
142
- def poem_writer(topic="turbulence"):
143
-
144
- span = L.start_span("poem_writer", topic) # start a span
179
+ ### Automatic instrumentation
145
180
 
146
- prompt = f"write a poem about {topic}"
181
+ Laminar allows you to automatically instrument majority of the most popular LLM, Vector DB, database, requests, and other libraries.
147
182
 
148
- # OpenAI calls are still automatically instrumented with OpenLLMetry
149
- response = client.chat.completions.create(
150
- model="gpt-4o",
151
- messages=[
152
- {"role": "system", "content": "You are a helpful assistant."},
153
- {"role": "user", "content": prompt},
154
- ],
155
- )
156
- poem = response.choices[0].message.content
157
- # while within the span, you can attach laminar events to it
158
- L.event("event_name", "event_value")
183
+ If you want to automatically instrument a default set of libraries, then simply do NOT pass `instruments` argument to `.initialize()`.
184
+ See the full list of available instrumentations in the [enum](/src/lmnr/traceloop_sdk/instruments.py).
159
185
 
160
- L.set_span_output(poem) # set an output
161
-
162
- # IMPORTANT: don't forget to end all the spans (usually in `finally` blocks)
163
- # Otherwise, the trace may not be sent/displayed correctly
164
- span.end()
186
+ If you want to automatically instrument only specific LLM, Vector DB, or other
187
+ calls with OpenTelemetry-compatible instrumentation, then pass the appropriate instruments to `.initialize()`.
188
+ For example, if you want to only instrument OpenAI and Anthropic, then do the following:
165
189
 
166
- return poem
190
+ ```python
191
+ from lmnr import Laminar as L, Instruments
192
+
193
+ L.initialize(project_api_key=os.environ["LMNR_PROJECT_API_KEY"], instruments={Instruments.OPENAI, Instruments.ANTHROPIC})
167
194
  ```
168
195
 
196
+ If you want to fully disable any kind of autoinstrumentation, pass an empty set as `instruments=set()` to `.initialize()`.
197
+
198
+ Majority of the autoinstrumentations are provided by Traceloop's [OpenLLMetry](https://github.com/traceloop/openllmetry).
169
199
 
170
200
  ## Sending events
171
201
 
@@ -1,10 +1,10 @@
1
- lmnr/__init__.py,sha256=wQwnHl662Xcz7GdSofFsEjmAK0nxioYA2Yq6Q78m4ps,194
1
+ lmnr/__init__.py,sha256=bA1f7JsEdSdU93HTz3SQLSanq-UgZGvb5I2OE0CWGR8,233
2
2
  lmnr/sdk/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
- lmnr/sdk/decorators.py,sha256=W46diLcINe0HAhxktrjbfQnaIfklSb0AydBHHxiko9U,2314
3
+ lmnr/sdk/decorators.py,sha256=O8S4PI6LUfdWPkbroigl5khtnkyhp24J8qzSdlvCs44,2227
4
4
  lmnr/sdk/evaluations.py,sha256=EaRcwbdXxj4w2yzak1xFv-YhDuxRVentQcJ-CypBoH0,6307
5
5
  lmnr/sdk/laminar.py,sha256=M8HdP6ZYJHdngUVrGj4GMZxz_EZyx3woHm-UpfWmIvs,18439
6
6
  lmnr/sdk/log.py,sha256=EgAMY77Zn1bv1imCqrmflD3imoAJ2yveOkIcrIP3e98,1170
7
- lmnr/sdk/types.py,sha256=w7BJsoEPHiNps62cQt3Hd6tEZ7ZFCKRTPzcwdD6rNak,4050
7
+ lmnr/sdk/types.py,sha256=zDOLdtKqjJtaXH0ea1BpCtKhyqHdbogiiFQ0Tqgy2T8,3908
8
8
  lmnr/sdk/utils.py,sha256=ZsGJ86tq8lIbvOhSb1gJWH5K3GylO_lgX68FN6rG2nM,3358
9
9
  lmnr/traceloop_sdk/.flake8,sha256=bCxuDlGx3YQ55QHKPiGJkncHanh9qGjQJUujcFa3lAU,150
10
10
  lmnr/traceloop_sdk/.python-version,sha256=9OLQBQVbD4zE4cJsPePhnAfV_snrPSoqEQw-PXgPMOs,6
@@ -12,7 +12,7 @@ lmnr/traceloop_sdk/__init__.py,sha256=J-zVw6j0DmceVvJVZXAFcCzN_scz9hB3X17NQgPMgO
12
12
  lmnr/traceloop_sdk/config/__init__.py,sha256=EGN3ixOt_ORbMxqaQdLaC14kmO-gyG4mnGJ2GfN-R-E,364
13
13
  lmnr/traceloop_sdk/decorators/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
14
14
  lmnr/traceloop_sdk/decorators/base.py,sha256=wcqXF0iVQgRXMyWTcJ5QvL_6q2y_gttwsX8dllmAtWM,4891
15
- lmnr/traceloop_sdk/instruments.py,sha256=G5EFAbpc20WD3M6xK6rlbj-Yy_r_f1m3gidY6UXzSRQ,701
15
+ lmnr/traceloop_sdk/instruments.py,sha256=oMvIASueW3GeChpjIdH-DD9aFBVB8OtHZ0HawppTrlI,942
16
16
  lmnr/traceloop_sdk/metrics/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
17
17
  lmnr/traceloop_sdk/metrics/metrics.py,sha256=AlQ2a2os1WcZbfBd155u_UzBbPrbuPia6O_HbojV9Wc,5055
18
18
  lmnr/traceloop_sdk/tests/__init__.py,sha256=RYnG0-8zbXL0-2Ste1mEBf5sN4d_rQjGTCgPBuaZC74,20
@@ -39,14 +39,14 @@ lmnr/traceloop_sdk/tracing/__init__.py,sha256=Ckq7zCM26VdJVB5tIZv0GTPyMZKyfso_KW
39
39
  lmnr/traceloop_sdk/tracing/content_allow_list.py,sha256=3feztm6PBWNelc8pAZUcQyEGyeSpNiVKjOaDk65l2ps,846
40
40
  lmnr/traceloop_sdk/tracing/context_manager.py,sha256=csVlB6kDmbgSPsROHwnddvGGblx55v6lJMRj0wsSMQM,304
41
41
  lmnr/traceloop_sdk/tracing/manual.py,sha256=RPwEreHHdzmw7g15u4G21GqhHOvRp7d72ylQNLG1jRM,1841
42
- lmnr/traceloop_sdk/tracing/tracing.py,sha256=VFrf5D6CC3DquLy_19_5I_L_w1kO2X61KvPW0XD26-k,42347
42
+ lmnr/traceloop_sdk/tracing/tracing.py,sha256=fDtdZ7mrZIGHDiAAFHIu9x-FRQjHE9b1-KO6yFfGqB0,42463
43
43
  lmnr/traceloop_sdk/utils/__init__.py,sha256=pNhf0G3vTd5ccoc03i1MXDbricSaiqCbi1DLWhSekK8,604
44
44
  lmnr/traceloop_sdk/utils/in_memory_span_exporter.py,sha256=H_4TRaThMO1H6vUQ0OpQvzJk_fZH0OOsRAM1iZQXsR8,2112
45
45
  lmnr/traceloop_sdk/utils/json_encoder.py,sha256=dK6b_axr70IYL7Vv-bu4wntvDDuyntoqsHaddqX7P58,463
46
46
  lmnr/traceloop_sdk/utils/package_check.py,sha256=TZSngzJOpFhfUZLXIs38cpMxQiZSmp0D-sCrIyhz7BA,251
47
47
  lmnr/traceloop_sdk/version.py,sha256=OlatFEFA4ttqSSIiV8jdE-sq3KG5zu2hnC4B4mzWF3s,23
48
- lmnr-0.4.9.dist-info/LICENSE,sha256=67b_wJHVV1CBaWkrKFWU1wyqTPSdzH77Ls-59631COg,10411
49
- lmnr-0.4.9.dist-info/METADATA,sha256=jFA2U8-94qC2U_bK3kSaDG-ATSnlIRbgpiqTtUN6RsY,11023
50
- lmnr-0.4.9.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
51
- lmnr-0.4.9.dist-info/entry_points.txt,sha256=Qg7ZRax4k-rcQsZ26XRYQ8YFSBiyY2PNxYfq4a6PYXI,41
52
- lmnr-0.4.9.dist-info/RECORD,,
48
+ lmnr-0.4.11.dist-info/LICENSE,sha256=67b_wJHVV1CBaWkrKFWU1wyqTPSdzH77Ls-59631COg,10411
49
+ lmnr-0.4.11.dist-info/METADATA,sha256=xrSEMxVEEsrG-29yIWGiIPD6TlHQ4x6zS2HwBtr8Qmw,11920
50
+ lmnr-0.4.11.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
51
+ lmnr-0.4.11.dist-info/entry_points.txt,sha256=Qg7ZRax4k-rcQsZ26XRYQ8YFSBiyY2PNxYfq4a6PYXI,41
52
+ lmnr-0.4.11.dist-info/RECORD,,
File without changes
File without changes