lmnr 0.6.10__tar.gz → 0.6.12__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {lmnr-0.6.10 → lmnr-0.6.12}/PKG-INFO +55 -55
- lmnr-0.6.12/pyproject.toml +143 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/__init__.py +2 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/opentelemetry_lib/__init__.py +1 -1
- lmnr-0.6.12/src/lmnr/opentelemetry_lib/litellm/__init__.py +371 -0
- lmnr-0.6.12/src/lmnr/opentelemetry_lib/litellm/utils.py +18 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/sdk/client/asynchronous/async_client.py +4 -2
- lmnr-0.6.12/src/lmnr/sdk/client/asynchronous/resources/evals.py +158 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/sdk/client/synchronous/resources/evals.py +87 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/sdk/client/synchronous/sync_client.py +3 -1
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/sdk/datasets.py +1 -1
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/sdk/evaluations.py +61 -54
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/sdk/types.py +4 -12
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/version.py +1 -1
- lmnr-0.6.10/pyproject.toml +0 -142
- lmnr-0.6.10/src/lmnr/sdk/client/asynchronous/resources/evals.py +0 -68
- {lmnr-0.6.10 → lmnr-0.6.12}/LICENSE +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/README.md +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/cli.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/opentelemetry_lib/.flake8 +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/opentelemetry_lib/decorators/__init__.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/__init__.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/config.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/google_genai/utils.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/__init__.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/opentelemetry_lib/opentelemetry/instrumentation/langgraph/utils.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/opentelemetry_lib/tracing/__init__.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/opentelemetry_lib/tracing/_instrument_initializers.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/opentelemetry_lib/tracing/attributes.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/opentelemetry_lib/tracing/context_properties.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/opentelemetry_lib/tracing/exporter.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/opentelemetry_lib/tracing/instruments.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/opentelemetry_lib/tracing/processor.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/opentelemetry_lib/tracing/tracer.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/opentelemetry_lib/utils/__init__.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/opentelemetry_lib/utils/json_encoder.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/opentelemetry_lib/utils/package_check.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/py.typed +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/sdk/__init__.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/sdk/browser/__init__.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/sdk/browser/browser_use_otel.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/sdk/browser/patchright_otel.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/sdk/browser/playwright_otel.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/sdk/browser/pw_utils.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/sdk/browser/rrweb/rrweb.umd.min.cjs +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/sdk/browser/utils.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/sdk/client/asynchronous/resources/__init__.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/sdk/client/asynchronous/resources/agent.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/sdk/client/asynchronous/resources/base.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/sdk/client/asynchronous/resources/browser_events.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/sdk/client/asynchronous/resources/tags.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/sdk/client/synchronous/resources/__init__.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/sdk/client/synchronous/resources/agent.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/sdk/client/synchronous/resources/base.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/sdk/client/synchronous/resources/browser_events.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/sdk/client/synchronous/resources/tags.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/sdk/decorators.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/sdk/eval_control.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/sdk/laminar.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/sdk/log.py +0 -0
- {lmnr-0.6.10 → lmnr-0.6.12}/src/lmnr/sdk/utils.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: lmnr
|
3
|
-
Version: 0.6.
|
3
|
+
Version: 0.6.12
|
4
4
|
Summary: Python SDK for Laminar
|
5
5
|
License: Apache-2.0
|
6
6
|
Author: lmnr.ai
|
@@ -46,61 +46,61 @@ Requires-Dist: httpx (>=0.25.0)
|
|
46
46
|
Requires-Dist: opentelemetry-api (>=1.33.0)
|
47
47
|
Requires-Dist: opentelemetry-exporter-otlp-proto-grpc (>=1.33.0)
|
48
48
|
Requires-Dist: opentelemetry-exporter-otlp-proto-http (>=1.33.0)
|
49
|
-
Requires-Dist: opentelemetry-instrumentation-alephalpha (>=0.40.
|
50
|
-
Requires-Dist: opentelemetry-instrumentation-alephalpha (>=0.40.
|
51
|
-
Requires-Dist: opentelemetry-instrumentation-anthropic (>=0.40.
|
52
|
-
Requires-Dist: opentelemetry-instrumentation-anthropic (>=0.40.
|
53
|
-
Requires-Dist: opentelemetry-instrumentation-bedrock (>=0.40.
|
54
|
-
Requires-Dist: opentelemetry-instrumentation-bedrock (>=0.40.
|
55
|
-
Requires-Dist: opentelemetry-instrumentation-chromadb (>=0.40.
|
56
|
-
Requires-Dist: opentelemetry-instrumentation-chromadb (>=0.40.
|
57
|
-
Requires-Dist: opentelemetry-instrumentation-cohere (>=0.40.
|
58
|
-
Requires-Dist: opentelemetry-instrumentation-cohere (>=0.40.
|
59
|
-
Requires-Dist: opentelemetry-instrumentation-crewai (>=0.40.
|
60
|
-
Requires-Dist: opentelemetry-instrumentation-crewai (>=0.40.
|
61
|
-
Requires-Dist: opentelemetry-instrumentation-google-generativeai (>=0.40.
|
62
|
-
Requires-Dist: opentelemetry-instrumentation-google-generativeai (>=0.40.
|
63
|
-
Requires-Dist: opentelemetry-instrumentation-groq (>=0.40.
|
64
|
-
Requires-Dist: opentelemetry-instrumentation-groq (>=0.40.
|
65
|
-
Requires-Dist: opentelemetry-instrumentation-haystack (>=0.40.
|
66
|
-
Requires-Dist: opentelemetry-instrumentation-haystack (>=0.40.
|
67
|
-
Requires-Dist: opentelemetry-instrumentation-lancedb (>=0.40.
|
68
|
-
Requires-Dist: opentelemetry-instrumentation-lancedb (>=0.40.
|
69
|
-
Requires-Dist: opentelemetry-instrumentation-langchain (>=0.40.
|
70
|
-
Requires-Dist: opentelemetry-instrumentation-langchain (>=0.40.
|
71
|
-
Requires-Dist: opentelemetry-instrumentation-llamaindex (>=0.40.
|
72
|
-
Requires-Dist: opentelemetry-instrumentation-llamaindex (>=0.40.
|
73
|
-
Requires-Dist: opentelemetry-instrumentation-marqo (>=0.40.
|
74
|
-
Requires-Dist: opentelemetry-instrumentation-marqo (>=0.40.
|
75
|
-
Requires-Dist: opentelemetry-instrumentation-mcp (>=0.40.
|
76
|
-
Requires-Dist: opentelemetry-instrumentation-mcp (>=0.40.
|
77
|
-
Requires-Dist: opentelemetry-instrumentation-milvus (>=0.40.
|
78
|
-
Requires-Dist: opentelemetry-instrumentation-milvus (>=0.40.
|
79
|
-
Requires-Dist: opentelemetry-instrumentation-mistralai (>=0.40.
|
80
|
-
Requires-Dist: opentelemetry-instrumentation-mistralai (>=0.40.
|
81
|
-
Requires-Dist: opentelemetry-instrumentation-ollama (>=0.40.
|
82
|
-
Requires-Dist: opentelemetry-instrumentation-ollama (>=0.40.
|
83
|
-
Requires-Dist: opentelemetry-instrumentation-openai (>=0.40.
|
84
|
-
Requires-Dist: opentelemetry-instrumentation-openai (>=0.40.
|
85
|
-
Requires-Dist: opentelemetry-instrumentation-pinecone (>=0.40.
|
86
|
-
Requires-Dist: opentelemetry-instrumentation-pinecone (>=0.40.
|
87
|
-
Requires-Dist: opentelemetry-instrumentation-qdrant (>=0.40.
|
88
|
-
Requires-Dist: opentelemetry-instrumentation-qdrant (>=0.40.
|
89
|
-
Requires-Dist: opentelemetry-instrumentation-replicate (>=0.40.
|
90
|
-
Requires-Dist: opentelemetry-instrumentation-replicate (>=0.40.
|
91
|
-
Requires-Dist: opentelemetry-instrumentation-sagemaker (>=0.40.
|
92
|
-
Requires-Dist: opentelemetry-instrumentation-sagemaker (>=0.40.
|
49
|
+
Requires-Dist: opentelemetry-instrumentation-alephalpha (>=0.40.12) ; extra == "alephalpha"
|
50
|
+
Requires-Dist: opentelemetry-instrumentation-alephalpha (>=0.40.12) ; extra == "all"
|
51
|
+
Requires-Dist: opentelemetry-instrumentation-anthropic (>=0.40.12) ; extra == "all"
|
52
|
+
Requires-Dist: opentelemetry-instrumentation-anthropic (>=0.40.12) ; extra == "anthropic"
|
53
|
+
Requires-Dist: opentelemetry-instrumentation-bedrock (>=0.40.12) ; extra == "all"
|
54
|
+
Requires-Dist: opentelemetry-instrumentation-bedrock (>=0.40.12) ; extra == "bedrock"
|
55
|
+
Requires-Dist: opentelemetry-instrumentation-chromadb (>=0.40.12) ; extra == "all"
|
56
|
+
Requires-Dist: opentelemetry-instrumentation-chromadb (>=0.40.12) ; extra == "chromadb"
|
57
|
+
Requires-Dist: opentelemetry-instrumentation-cohere (>=0.40.12) ; extra == "all"
|
58
|
+
Requires-Dist: opentelemetry-instrumentation-cohere (>=0.40.12) ; extra == "cohere"
|
59
|
+
Requires-Dist: opentelemetry-instrumentation-crewai (>=0.40.12) ; extra == "all"
|
60
|
+
Requires-Dist: opentelemetry-instrumentation-crewai (>=0.40.12) ; extra == "crewai"
|
61
|
+
Requires-Dist: opentelemetry-instrumentation-google-generativeai (>=0.40.12) ; extra == "all"
|
62
|
+
Requires-Dist: opentelemetry-instrumentation-google-generativeai (>=0.40.12) ; extra == "google-generativeai"
|
63
|
+
Requires-Dist: opentelemetry-instrumentation-groq (>=0.40.12) ; extra == "all"
|
64
|
+
Requires-Dist: opentelemetry-instrumentation-groq (>=0.40.12) ; extra == "groq"
|
65
|
+
Requires-Dist: opentelemetry-instrumentation-haystack (>=0.40.12) ; extra == "all"
|
66
|
+
Requires-Dist: opentelemetry-instrumentation-haystack (>=0.40.12) ; extra == "haystack"
|
67
|
+
Requires-Dist: opentelemetry-instrumentation-lancedb (>=0.40.12) ; extra == "all"
|
68
|
+
Requires-Dist: opentelemetry-instrumentation-lancedb (>=0.40.12) ; extra == "lancedb"
|
69
|
+
Requires-Dist: opentelemetry-instrumentation-langchain (>=0.40.12) ; extra == "all"
|
70
|
+
Requires-Dist: opentelemetry-instrumentation-langchain (>=0.40.12) ; extra == "langchain"
|
71
|
+
Requires-Dist: opentelemetry-instrumentation-llamaindex (>=0.40.12) ; extra == "all"
|
72
|
+
Requires-Dist: opentelemetry-instrumentation-llamaindex (>=0.40.12) ; extra == "llamaindex"
|
73
|
+
Requires-Dist: opentelemetry-instrumentation-marqo (>=0.40.12) ; extra == "all"
|
74
|
+
Requires-Dist: opentelemetry-instrumentation-marqo (>=0.40.12) ; extra == "marqo"
|
75
|
+
Requires-Dist: opentelemetry-instrumentation-mcp (>=0.40.12) ; extra == "all"
|
76
|
+
Requires-Dist: opentelemetry-instrumentation-mcp (>=0.40.12) ; extra == "mcp"
|
77
|
+
Requires-Dist: opentelemetry-instrumentation-milvus (>=0.40.12) ; extra == "all"
|
78
|
+
Requires-Dist: opentelemetry-instrumentation-milvus (>=0.40.12) ; extra == "milvus"
|
79
|
+
Requires-Dist: opentelemetry-instrumentation-mistralai (>=0.40.12) ; extra == "all"
|
80
|
+
Requires-Dist: opentelemetry-instrumentation-mistralai (>=0.40.12) ; extra == "mistralai"
|
81
|
+
Requires-Dist: opentelemetry-instrumentation-ollama (>=0.40.12) ; extra == "all"
|
82
|
+
Requires-Dist: opentelemetry-instrumentation-ollama (>=0.40.12) ; extra == "ollama"
|
83
|
+
Requires-Dist: opentelemetry-instrumentation-openai (>=0.40.12) ; extra == "all"
|
84
|
+
Requires-Dist: opentelemetry-instrumentation-openai (>=0.40.12) ; extra == "openai"
|
85
|
+
Requires-Dist: opentelemetry-instrumentation-pinecone (>=0.40.12) ; extra == "all"
|
86
|
+
Requires-Dist: opentelemetry-instrumentation-pinecone (>=0.40.12) ; extra == "pinecone"
|
87
|
+
Requires-Dist: opentelemetry-instrumentation-qdrant (>=0.40.12) ; extra == "all"
|
88
|
+
Requires-Dist: opentelemetry-instrumentation-qdrant (>=0.40.12) ; extra == "qdrant"
|
89
|
+
Requires-Dist: opentelemetry-instrumentation-replicate (>=0.40.12) ; extra == "all"
|
90
|
+
Requires-Dist: opentelemetry-instrumentation-replicate (>=0.40.12) ; extra == "replicate"
|
91
|
+
Requires-Dist: opentelemetry-instrumentation-sagemaker (>=0.40.12) ; extra == "all"
|
92
|
+
Requires-Dist: opentelemetry-instrumentation-sagemaker (>=0.40.12) ; extra == "sagemaker"
|
93
93
|
Requires-Dist: opentelemetry-instrumentation-threading (>=0.54b0)
|
94
|
-
Requires-Dist: opentelemetry-instrumentation-together (>=0.40.
|
95
|
-
Requires-Dist: opentelemetry-instrumentation-together (>=0.40.
|
96
|
-
Requires-Dist: opentelemetry-instrumentation-transformers (>=0.40.
|
97
|
-
Requires-Dist: opentelemetry-instrumentation-transformers (>=0.40.
|
98
|
-
Requires-Dist: opentelemetry-instrumentation-vertexai (>=0.40.
|
99
|
-
Requires-Dist: opentelemetry-instrumentation-vertexai (>=0.40.
|
100
|
-
Requires-Dist: opentelemetry-instrumentation-watsonx (>=0.40.
|
101
|
-
Requires-Dist: opentelemetry-instrumentation-watsonx (>=0.40.
|
102
|
-
Requires-Dist: opentelemetry-instrumentation-weaviate (>=0.40.
|
103
|
-
Requires-Dist: opentelemetry-instrumentation-weaviate (>=0.40.
|
94
|
+
Requires-Dist: opentelemetry-instrumentation-together (>=0.40.12) ; extra == "all"
|
95
|
+
Requires-Dist: opentelemetry-instrumentation-together (>=0.40.12) ; extra == "together"
|
96
|
+
Requires-Dist: opentelemetry-instrumentation-transformers (>=0.40.12) ; extra == "all"
|
97
|
+
Requires-Dist: opentelemetry-instrumentation-transformers (>=0.40.12) ; extra == "transformers"
|
98
|
+
Requires-Dist: opentelemetry-instrumentation-vertexai (>=0.40.12) ; extra == "all"
|
99
|
+
Requires-Dist: opentelemetry-instrumentation-vertexai (>=0.40.12) ; extra == "vertexai"
|
100
|
+
Requires-Dist: opentelemetry-instrumentation-watsonx (>=0.40.12) ; extra == "all"
|
101
|
+
Requires-Dist: opentelemetry-instrumentation-watsonx (>=0.40.12) ; extra == "watsonx"
|
102
|
+
Requires-Dist: opentelemetry-instrumentation-weaviate (>=0.40.12) ; extra == "all"
|
103
|
+
Requires-Dist: opentelemetry-instrumentation-weaviate (>=0.40.12) ; extra == "weaviate"
|
104
104
|
Requires-Dist: opentelemetry-sdk (>=1.33.0)
|
105
105
|
Requires-Dist: opentelemetry-semantic-conventions (>=0.54b0)
|
106
106
|
Requires-Dist: opentelemetry-semantic-conventions-ai (>=0.4.8)
|
@@ -0,0 +1,143 @@
|
|
1
|
+
# Laminar Python
|
2
|
+
|
3
|
+
# If you are looking for information about possible extras installations,
|
4
|
+
# i.e. what you can pass into `pip install 'lmnr[extra1,extra2]'`, please see the
|
5
|
+
# `[project.optional-dependencies]` section below.
|
6
|
+
|
7
|
+
[project]
|
8
|
+
name = "lmnr"
|
9
|
+
version = "0.6.12"
|
10
|
+
description = "Python SDK for Laminar"
|
11
|
+
authors = [
|
12
|
+
{ name = "lmnr.ai", email = "founders@lmnr.ai" }
|
13
|
+
]
|
14
|
+
readme = "README.md"
|
15
|
+
requires-python = ">=3.10,<4"
|
16
|
+
license = "Apache-2.0"
|
17
|
+
dependencies = [
|
18
|
+
"pydantic (>=2.0.3,<3.0.0)",
|
19
|
+
"python-dotenv (>=1.0)",
|
20
|
+
"opentelemetry-api (>=1.33.0)",
|
21
|
+
"opentelemetry-sdk (>=1.33.0)",
|
22
|
+
"opentelemetry-exporter-otlp-proto-http (>=1.33.0)",
|
23
|
+
"opentelemetry-exporter-otlp-proto-grpc (>=1.33.0)",
|
24
|
+
"opentelemetry-semantic-conventions (>=0.54b0)",
|
25
|
+
"opentelemetry-semantic-conventions-ai (>=0.4.8)",
|
26
|
+
"tqdm (>=4.0)",
|
27
|
+
"argparse (>=1.0)",
|
28
|
+
"tenacity (>=8.0)",
|
29
|
+
# Since 1.68.0, grpcio writes a warning message
|
30
|
+
# that looks scary, but is harmless.
|
31
|
+
# WARNING: All log messages before absl::InitializeLog() is called are written to STDERR
|
32
|
+
# E0000 00:00:1737439981.199902 9456033 init.cc:229] grpc_wait_for_shutdown_with_timeout() timed out.
|
33
|
+
#
|
34
|
+
# Remove this comment when we make sure that grpcio has resolved this.
|
35
|
+
# Related issue:
|
36
|
+
# https://discuss.ai.google.dev/t/warning-all-log-messages-before-absl-initializelog-is-called-are-written-to-stderr-e0000-001731955515-629532-17124-init-cc-229-grpc-wait-for-shutdown-with-timeout-timed-out/50020
|
37
|
+
# https://github.com/grpc/grpc/issues/38490
|
38
|
+
"grpcio>=1",
|
39
|
+
"httpx>=0.25.0",
|
40
|
+
"opentelemetry-instrumentation-threading>=0.54b0",
|
41
|
+
]
|
42
|
+
|
43
|
+
[project.scripts]
|
44
|
+
lmnr = "lmnr.cli:cli"
|
45
|
+
|
46
|
+
[project.optional-dependencies]
|
47
|
+
# List of all possible extras. You can specify one or more of these extras
|
48
|
+
# when installing the package, using any of the following examples:
|
49
|
+
# `pip install 'lmnr[anthropic,openai]'`
|
50
|
+
# `uv pip install 'lmnr[anthropic,openai]'`
|
51
|
+
# `uv add lmnr --extra anthropic --extra openai`
|
52
|
+
# `poetry add 'lmnr[anthropic,openai]'`
|
53
|
+
|
54
|
+
alephalpha=["opentelemetry-instrumentation-alephalpha>=0.40.12"]
|
55
|
+
anthropic=["opentelemetry-instrumentation-anthropic>=0.40.12"]
|
56
|
+
bedrock=["opentelemetry-instrumentation-bedrock>=0.40.12"]
|
57
|
+
chromadb=["opentelemetry-instrumentation-chromadb>=0.40.12"]
|
58
|
+
cohere=["opentelemetry-instrumentation-cohere>=0.40.12"]
|
59
|
+
crewai=["opentelemetry-instrumentation-crewai>=0.40.12"]
|
60
|
+
google-generativeai=["opentelemetry-instrumentation-google-generativeai>=0.40.12"]
|
61
|
+
groq=["opentelemetry-instrumentation-groq>=0.40.12"]
|
62
|
+
haystack=["opentelemetry-instrumentation-haystack>=0.40.12"]
|
63
|
+
lancedb=["opentelemetry-instrumentation-lancedb>=0.40.12"]
|
64
|
+
langchain=["opentelemetry-instrumentation-langchain>=0.40.12"]
|
65
|
+
llamaindex=["opentelemetry-instrumentation-llamaindex>=0.40.12"]
|
66
|
+
marqo=["opentelemetry-instrumentation-marqo>=0.40.12"]
|
67
|
+
mcp=["opentelemetry-instrumentation-mcp>=0.40.12"]
|
68
|
+
milvus=["opentelemetry-instrumentation-milvus>=0.40.12"]
|
69
|
+
mistralai=["opentelemetry-instrumentation-mistralai>=0.40.12"]
|
70
|
+
ollama=["opentelemetry-instrumentation-ollama>=0.40.12"]
|
71
|
+
openai=["opentelemetry-instrumentation-openai>=0.40.12"]
|
72
|
+
pinecone=["opentelemetry-instrumentation-pinecone>=0.40.12"]
|
73
|
+
qdrant=["opentelemetry-instrumentation-qdrant>=0.40.12"]
|
74
|
+
replicate=["opentelemetry-instrumentation-replicate>=0.40.12"]
|
75
|
+
sagemaker=["opentelemetry-instrumentation-sagemaker>=0.40.12"]
|
76
|
+
together=["opentelemetry-instrumentation-together>=0.40.12"]
|
77
|
+
transformers=["opentelemetry-instrumentation-transformers>=0.40.12"]
|
78
|
+
vertexai=["opentelemetry-instrumentation-vertexai>=0.40.12"]
|
79
|
+
watsonx=["opentelemetry-instrumentation-watsonx>=0.40.12"]
|
80
|
+
weaviate=["opentelemetry-instrumentation-weaviate>=0.40.12"]
|
81
|
+
# `all` is the group added for convenience, if you want to install all
|
82
|
+
# the instrumentations.
|
83
|
+
# we suggest using package-manager-specific commands instead,
|
84
|
+
# like `uv add lmnr --all-extras`
|
85
|
+
all = [
|
86
|
+
"opentelemetry-instrumentation-alephalpha>=0.40.12",
|
87
|
+
"opentelemetry-instrumentation-anthropic>=0.40.12",
|
88
|
+
"opentelemetry-instrumentation-bedrock>=0.40.12",
|
89
|
+
"opentelemetry-instrumentation-chromadb>=0.40.12",
|
90
|
+
"opentelemetry-instrumentation-cohere>=0.40.12",
|
91
|
+
"opentelemetry-instrumentation-crewai>=0.40.12",
|
92
|
+
"opentelemetry-instrumentation-google-generativeai>=0.40.12",
|
93
|
+
"opentelemetry-instrumentation-groq>=0.40.12",
|
94
|
+
"opentelemetry-instrumentation-haystack>=0.40.12",
|
95
|
+
"opentelemetry-instrumentation-lancedb>=0.40.12",
|
96
|
+
"opentelemetry-instrumentation-langchain>=0.40.12",
|
97
|
+
"opentelemetry-instrumentation-llamaindex>=0.40.12",
|
98
|
+
"opentelemetry-instrumentation-marqo>=0.40.12",
|
99
|
+
"opentelemetry-instrumentation-mcp>=0.40.12",
|
100
|
+
"opentelemetry-instrumentation-milvus>=0.40.12",
|
101
|
+
"opentelemetry-instrumentation-mistralai>=0.40.12",
|
102
|
+
"opentelemetry-instrumentation-ollama>=0.40.12",
|
103
|
+
"opentelemetry-instrumentation-openai>=0.40.12",
|
104
|
+
"opentelemetry-instrumentation-pinecone>=0.40.12",
|
105
|
+
"opentelemetry-instrumentation-qdrant>=0.40.12",
|
106
|
+
"opentelemetry-instrumentation-replicate>=0.40.12",
|
107
|
+
"opentelemetry-instrumentation-sagemaker>=0.40.12",
|
108
|
+
"opentelemetry-instrumentation-together>=0.40.12",
|
109
|
+
"opentelemetry-instrumentation-transformers>=0.40.12",
|
110
|
+
"opentelemetry-instrumentation-vertexai>=0.40.12",
|
111
|
+
"opentelemetry-instrumentation-watsonx>=0.40.12",
|
112
|
+
"opentelemetry-instrumentation-weaviate>=0.40.12"
|
113
|
+
]
|
114
|
+
|
115
|
+
[dependency-groups]
|
116
|
+
dev = [
|
117
|
+
"autopep8>=2.3.2",
|
118
|
+
"flake8>=7.2.0",
|
119
|
+
"pytest>=8.3.5",
|
120
|
+
"pytest-sugar>=1.0.0",
|
121
|
+
"pytest-asyncio>=0.26.0",
|
122
|
+
"playwright>=1.52.0",
|
123
|
+
"vcrpy>=7.0.0",
|
124
|
+
"openai>=1.77.0",
|
125
|
+
"pytest-recording>=0.13.4",
|
126
|
+
"patchright>=1.52.3",
|
127
|
+
"google-genai>=1.19.0",
|
128
|
+
"langgraph>=0.4.8",
|
129
|
+
"langchain-core>=0.3.64",
|
130
|
+
"langchain>=0.3.25",
|
131
|
+
"litellm>=1.72.6",
|
132
|
+
]
|
133
|
+
|
134
|
+
[build-system]
|
135
|
+
requires = ["poetry-core"]
|
136
|
+
build-backend = "poetry.core.masonry.api"
|
137
|
+
|
138
|
+
[tool.uv.workspace]
|
139
|
+
members = ["examples/fastapi-app"]
|
140
|
+
# we can move to uv_build, once it's more stable
|
141
|
+
# https://github.com/astral-sh/uv/issues/3957
|
142
|
+
# requires = ["uv_build>=0.6.16,<0.7"]
|
143
|
+
# build-backend = "uv_build"
|
@@ -13,6 +13,7 @@ from .sdk.types import (
|
|
13
13
|
)
|
14
14
|
from .sdk.decorators import observe
|
15
15
|
from .sdk.types import LaminarSpanContext
|
16
|
+
from .opentelemetry_lib.litellm import LaminarLiteLLMCallback
|
16
17
|
from .opentelemetry_lib.tracing.attributes import Attributes
|
17
18
|
from .opentelemetry_lib.tracing.instruments import Instruments
|
18
19
|
from .opentelemetry_lib.tracing.processor import LaminarSpanProcessor
|
@@ -30,6 +31,7 @@ __all__ = [
|
|
30
31
|
"Laminar",
|
31
32
|
"LaminarClient",
|
32
33
|
"LaminarDataset",
|
34
|
+
"LaminarLiteLLMCallback",
|
33
35
|
"LaminarSpanContext",
|
34
36
|
"LaminarSpanProcessor",
|
35
37
|
"RunAgentResponseChunk",
|
@@ -7,7 +7,7 @@ from opentelemetry.sdk.resources import SERVICE_NAME
|
|
7
7
|
from lmnr.opentelemetry_lib.tracing.instruments import Instruments
|
8
8
|
from lmnr.opentelemetry_lib.tracing import TracerWrapper
|
9
9
|
|
10
|
-
MAX_MANUAL_SPAN_PAYLOAD_SIZE = 1024 * 1024 #
|
10
|
+
MAX_MANUAL_SPAN_PAYLOAD_SIZE = 1024 * 1024 * 10 # 10MB
|
11
11
|
|
12
12
|
|
13
13
|
class TracerManager:
|
@@ -0,0 +1,371 @@
|
|
1
|
+
"""LiteLLM callback logger for Laminar"""
|
2
|
+
|
3
|
+
import json
|
4
|
+
from datetime import datetime
|
5
|
+
|
6
|
+
from opentelemetry.trace import SpanKind, Status, StatusCode, Tracer
|
7
|
+
from lmnr.opentelemetry_lib.litellm.utils import model_as_dict, set_span_attribute
|
8
|
+
from lmnr.opentelemetry_lib.tracing import TracerWrapper
|
9
|
+
|
10
|
+
from lmnr.opentelemetry_lib.utils.package_check import is_package_installed
|
11
|
+
from lmnr.sdk.log import get_default_logger
|
12
|
+
|
13
|
+
logger = get_default_logger(__name__)
|
14
|
+
|
15
|
+
SUPPORTED_CALL_TYPES = ["completion", "acompletion"]
|
16
|
+
|
17
|
+
# Try to import the necessary LiteLLM components and gracefully handle ImportError
|
18
|
+
try:
|
19
|
+
if not is_package_installed("litellm"):
|
20
|
+
raise ImportError("LiteLLM is not installed")
|
21
|
+
|
22
|
+
from litellm.integrations.custom_batch_logger import CustomBatchLogger
|
23
|
+
|
24
|
+
class LaminarLiteLLMCallback(CustomBatchLogger):
|
25
|
+
"""Custom LiteLLM logger that sends logs to Laminar via OpenTelemetry spans
|
26
|
+
|
27
|
+
Usage:
|
28
|
+
import litellm
|
29
|
+
from lmnr import Laminar, LaminarLiteLLMCallback
|
30
|
+
|
31
|
+
# make sure this comes first
|
32
|
+
Laminar.initialize()
|
33
|
+
|
34
|
+
# Add the logger to LiteLLM callbacks
|
35
|
+
litellm.callbacks = [LaminarLiteLLMCallback()]
|
36
|
+
"""
|
37
|
+
|
38
|
+
def __init__(self, **kwargs):
|
39
|
+
super().__init__(**kwargs)
|
40
|
+
if not hasattr(TracerWrapper, "instance") or TracerWrapper.instance is None:
|
41
|
+
raise ValueError("Laminar must be initialized before LiteLLM callback")
|
42
|
+
|
43
|
+
def _get_tracer(self) -> Tracer:
|
44
|
+
if not hasattr(TracerWrapper, "instance") or TracerWrapper.instance is None:
|
45
|
+
raise ValueError("Laminar must be initialized before LiteLLM callback")
|
46
|
+
return TracerWrapper().get_tracer()
|
47
|
+
|
48
|
+
def log_success_event(
|
49
|
+
self, kwargs, response_obj, start_time: datetime, end_time: datetime
|
50
|
+
):
|
51
|
+
if kwargs.get("call_type") not in SUPPORTED_CALL_TYPES:
|
52
|
+
return
|
53
|
+
try:
|
54
|
+
self._create_span(
|
55
|
+
kwargs, response_obj, start_time, end_time, is_success=True
|
56
|
+
)
|
57
|
+
except Exception as e:
|
58
|
+
logger.error(f"Error in log_success_event: {e}")
|
59
|
+
|
60
|
+
def log_failure_event(
|
61
|
+
self, kwargs, response_obj, start_time: datetime, end_time: datetime
|
62
|
+
):
|
63
|
+
if kwargs.get("call_type") not in SUPPORTED_CALL_TYPES:
|
64
|
+
return
|
65
|
+
try:
|
66
|
+
self._create_span(
|
67
|
+
kwargs, response_obj, start_time, end_time, is_success=False
|
68
|
+
)
|
69
|
+
except Exception as e:
|
70
|
+
logger.error(f"Error in log_failure_event: {e}")
|
71
|
+
|
72
|
+
async def async_log_success_event(
|
73
|
+
self, kwargs, response_obj, start_time: datetime, end_time: datetime
|
74
|
+
):
|
75
|
+
self.log_success_event(kwargs, response_obj, start_time, end_time)
|
76
|
+
|
77
|
+
async def async_log_failure_event(
|
78
|
+
self, kwargs, response_obj, start_time: datetime, end_time: datetime
|
79
|
+
):
|
80
|
+
self.log_failure_event(kwargs, response_obj, start_time, end_time)
|
81
|
+
|
82
|
+
def _create_span(
|
83
|
+
self,
|
84
|
+
kwargs,
|
85
|
+
response_obj,
|
86
|
+
start_time: datetime,
|
87
|
+
end_time: datetime,
|
88
|
+
is_success: bool,
|
89
|
+
):
|
90
|
+
"""Create an OpenTelemetry span for the LiteLLM call"""
|
91
|
+
span_name = "litellm.completion"
|
92
|
+
try:
|
93
|
+
tracer = self._get_tracer()
|
94
|
+
except Exception as e:
|
95
|
+
logger.error(f"Error getting tracer: {e}")
|
96
|
+
return
|
97
|
+
span = tracer.start_span(
|
98
|
+
span_name,
|
99
|
+
kind=SpanKind.CLIENT,
|
100
|
+
start_time=int(start_time.timestamp() * 1e9),
|
101
|
+
attributes={
|
102
|
+
"lmnr.internal.provider": "litellm",
|
103
|
+
},
|
104
|
+
)
|
105
|
+
try:
|
106
|
+
model = kwargs.get("model", "unknown")
|
107
|
+
if kwargs.get("custom_llm_provider"):
|
108
|
+
set_span_attribute(
|
109
|
+
span, "gen_ai.system", kwargs["custom_llm_provider"]
|
110
|
+
)
|
111
|
+
|
112
|
+
messages = kwargs.get("messages", [])
|
113
|
+
self._process_input_messages(span, messages)
|
114
|
+
|
115
|
+
tools = kwargs.get("tools", [])
|
116
|
+
self._process_request_tool_definitions(span, tools)
|
117
|
+
|
118
|
+
set_span_attribute(span, "gen_ai.request.model", model)
|
119
|
+
|
120
|
+
# Add more attributes from kwargs
|
121
|
+
if "temperature" in kwargs:
|
122
|
+
set_span_attribute(
|
123
|
+
span, "gen_ai.request.temperature", kwargs["temperature"]
|
124
|
+
)
|
125
|
+
if "max_tokens" in kwargs:
|
126
|
+
set_span_attribute(
|
127
|
+
span, "gen_ai.request.max_tokens", kwargs["max_tokens"]
|
128
|
+
)
|
129
|
+
if "top_p" in kwargs:
|
130
|
+
set_span_attribute(span, "gen_ai.request.top_p", kwargs["top_p"])
|
131
|
+
|
132
|
+
if is_success:
|
133
|
+
span.set_status(Status(StatusCode.OK))
|
134
|
+
if kwargs.get("complete_streaming_response"):
|
135
|
+
self._process_success_response(
|
136
|
+
span,
|
137
|
+
kwargs.get("complete_streaming_response"),
|
138
|
+
)
|
139
|
+
else:
|
140
|
+
self._process_success_response(span, response_obj)
|
141
|
+
else:
|
142
|
+
span.set_status(Status(StatusCode.ERROR))
|
143
|
+
if isinstance(response_obj, Exception):
|
144
|
+
span.record_exception(response_obj)
|
145
|
+
|
146
|
+
except Exception as e:
|
147
|
+
span.record_exception(e)
|
148
|
+
logger.error(f"Error in Laminar LiteLLM instrumentation: {e}")
|
149
|
+
finally:
|
150
|
+
span.end(int(end_time.timestamp() * 1e9))
|
151
|
+
|
152
|
+
def _process_input_messages(self, span, messages):
|
153
|
+
"""Process and set message attributes on the span"""
|
154
|
+
if not isinstance(messages, list):
|
155
|
+
return
|
156
|
+
|
157
|
+
for i, message in enumerate(messages):
|
158
|
+
message_dict = model_as_dict(message)
|
159
|
+
role = message_dict.get("role", "unknown")
|
160
|
+
set_span_attribute(span, f"gen_ai.prompt.{i}.role", role)
|
161
|
+
|
162
|
+
tool_calls = message_dict.get("tool_calls", [])
|
163
|
+
self._process_tool_calls(span, tool_calls, i, is_response=False)
|
164
|
+
|
165
|
+
content = message_dict.get("content", "")
|
166
|
+
if content is None:
|
167
|
+
continue
|
168
|
+
if isinstance(content, str):
|
169
|
+
set_span_attribute(span, f"gen_ai.prompt.{i}.content", content)
|
170
|
+
elif isinstance(content, list):
|
171
|
+
set_span_attribute(
|
172
|
+
span, f"gen_ai.prompt.{i}.content", json.dumps(content)
|
173
|
+
)
|
174
|
+
else:
|
175
|
+
set_span_attribute(
|
176
|
+
span,
|
177
|
+
f"gen_ai.prompt.{i}.content",
|
178
|
+
json.dumps(model_as_dict(content)),
|
179
|
+
)
|
180
|
+
if role == "tool":
|
181
|
+
set_span_attribute(
|
182
|
+
span,
|
183
|
+
f"gen_ai.prompt.{i}.tool_call_id",
|
184
|
+
message_dict.get("tool_call_id"),
|
185
|
+
)
|
186
|
+
|
187
|
+
def _process_request_tool_definitions(self, span, tools):
|
188
|
+
"""Process and set tool definitions attributes on the span"""
|
189
|
+
if not isinstance(tools, list):
|
190
|
+
return
|
191
|
+
|
192
|
+
for i, tool in enumerate(tools):
|
193
|
+
tool_dict = model_as_dict(tool)
|
194
|
+
if tool_dict.get("type") != "function":
|
195
|
+
# TODO: parse other tool types
|
196
|
+
continue
|
197
|
+
|
198
|
+
function_dict = tool_dict.get("function", {})
|
199
|
+
function_name = function_dict.get("name", "")
|
200
|
+
function_description = function_dict.get("description", "")
|
201
|
+
function_parameters = function_dict.get("parameters", {})
|
202
|
+
set_span_attribute(
|
203
|
+
span,
|
204
|
+
f"llm.request.functions.{i}.name",
|
205
|
+
function_name,
|
206
|
+
)
|
207
|
+
set_span_attribute(
|
208
|
+
span,
|
209
|
+
f"llm.request.functions.{i}.description",
|
210
|
+
function_description,
|
211
|
+
)
|
212
|
+
set_span_attribute(
|
213
|
+
span,
|
214
|
+
f"llm.request.functions.{i}.parameters",
|
215
|
+
json.dumps(function_parameters),
|
216
|
+
)
|
217
|
+
|
218
|
+
def _process_response_usage(self, span, usage):
|
219
|
+
"""Process and set usage attributes on the span"""
|
220
|
+
usage_dict = model_as_dict(usage)
|
221
|
+
if (
|
222
|
+
not usage_dict.get("prompt_tokens")
|
223
|
+
and not usage_dict.get("completion_tokens")
|
224
|
+
and not usage_dict.get("total_tokens")
|
225
|
+
):
|
226
|
+
return
|
227
|
+
|
228
|
+
set_span_attribute(
|
229
|
+
span, "gen_ai.usage.input_tokens", usage_dict.get("prompt_tokens")
|
230
|
+
)
|
231
|
+
set_span_attribute(
|
232
|
+
span, "gen_ai.usage.output_tokens", usage_dict.get("completion_tokens")
|
233
|
+
)
|
234
|
+
set_span_attribute(
|
235
|
+
span, "llm.usage.total_tokens", usage_dict.get("total_tokens")
|
236
|
+
)
|
237
|
+
|
238
|
+
if usage_dict.get("prompt_tokens_details"):
|
239
|
+
details = usage_dict.get("prompt_tokens_details", {})
|
240
|
+
details = model_as_dict(details)
|
241
|
+
if details.get("cached_tokens"):
|
242
|
+
set_span_attribute(
|
243
|
+
span,
|
244
|
+
"gen_ai.usage.cache_read_input_tokens",
|
245
|
+
details.get("cached_tokens"),
|
246
|
+
)
|
247
|
+
# TODO: add audio/image/text token details
|
248
|
+
# TODO: add completion tokens details (reasoning tokens)
|
249
|
+
|
250
|
+
def _process_tool_calls(self, span, tool_calls, choice_index, is_response=True):
|
251
|
+
"""Process and set tool call attributes on the span"""
|
252
|
+
attr_prefix = "completion" if is_response else "prompt"
|
253
|
+
if not isinstance(tool_calls, list):
|
254
|
+
return
|
255
|
+
|
256
|
+
for j, tool_call in enumerate(tool_calls):
|
257
|
+
tool_call_dict = model_as_dict(tool_call)
|
258
|
+
|
259
|
+
tool_name = tool_call_dict.get(
|
260
|
+
"name", tool_call_dict.get("function", {}).get("name", "")
|
261
|
+
)
|
262
|
+
set_span_attribute(
|
263
|
+
span,
|
264
|
+
f"gen_ai.{attr_prefix}.{choice_index}.tool_calls.{j}.name",
|
265
|
+
tool_name,
|
266
|
+
)
|
267
|
+
|
268
|
+
call_id = tool_call_dict.get("id", "")
|
269
|
+
set_span_attribute(
|
270
|
+
span,
|
271
|
+
f"gen_ai.{attr_prefix}.{choice_index}.tool_calls.{j}.id",
|
272
|
+
call_id,
|
273
|
+
)
|
274
|
+
|
275
|
+
tool_arguments = tool_call_dict.get(
|
276
|
+
"arguments", tool_call_dict.get("function", {}).get("arguments", "")
|
277
|
+
)
|
278
|
+
if isinstance(tool_arguments, str):
|
279
|
+
set_span_attribute(
|
280
|
+
span,
|
281
|
+
f"gen_ai.{attr_prefix}.{choice_index}.tool_calls.{j}.arguments",
|
282
|
+
tool_arguments,
|
283
|
+
)
|
284
|
+
else:
|
285
|
+
set_span_attribute(
|
286
|
+
span,
|
287
|
+
f"gen_ai.{attr_prefix}.{choice_index}.tool_calls.{j}.arguments",
|
288
|
+
json.dumps(model_as_dict(tool_arguments)),
|
289
|
+
)
|
290
|
+
|
291
|
+
def _process_response_choices(self, span, choices):
|
292
|
+
"""Process and set choice attributes on the span"""
|
293
|
+
if not isinstance(choices, list):
|
294
|
+
return
|
295
|
+
|
296
|
+
for i, choice in enumerate(choices):
|
297
|
+
choice_dict = model_as_dict(choice)
|
298
|
+
message = choice_dict.get("message", choice_dict)
|
299
|
+
|
300
|
+
role = message.get("role", "unknown")
|
301
|
+
set_span_attribute(span, f"gen_ai.completion.{i}.role", role)
|
302
|
+
|
303
|
+
tool_calls = message.get("tool_calls", [])
|
304
|
+
self._process_tool_calls(span, tool_calls, i, is_response=True)
|
305
|
+
|
306
|
+
content = message.get("content", "")
|
307
|
+
if content is None:
|
308
|
+
continue
|
309
|
+
if isinstance(content, str):
|
310
|
+
set_span_attribute(span, f"gen_ai.completion.{i}.content", content)
|
311
|
+
elif isinstance(content, list):
|
312
|
+
set_span_attribute(
|
313
|
+
span, f"gen_ai.completion.{i}.content", json.dumps(content)
|
314
|
+
)
|
315
|
+
else:
|
316
|
+
set_span_attribute(
|
317
|
+
span,
|
318
|
+
f"gen_ai.completion.{i}.content",
|
319
|
+
json.dumps(model_as_dict(content)),
|
320
|
+
)
|
321
|
+
|
322
|
+
def _process_success_response(self, span, response_obj):
|
323
|
+
"""Process successful response attributes"""
|
324
|
+
response_dict = model_as_dict(response_obj)
|
325
|
+
set_span_attribute(span, "gen_ai.response.id", response_dict.get("id"))
|
326
|
+
set_span_attribute(
|
327
|
+
span, "gen_ai.response.model", response_dict.get("model")
|
328
|
+
)
|
329
|
+
|
330
|
+
if response_dict.get("usage"):
|
331
|
+
self._process_response_usage(span, response_dict.get("usage"))
|
332
|
+
|
333
|
+
if response_dict.get("cache_creation_input_tokens"):
|
334
|
+
set_span_attribute(
|
335
|
+
span,
|
336
|
+
"gen_ai.usage.cache_creation_input_tokens",
|
337
|
+
response_dict.get("cache_creation_input_tokens"),
|
338
|
+
)
|
339
|
+
if response_dict.get("cache_read_input_tokens"):
|
340
|
+
set_span_attribute(
|
341
|
+
span,
|
342
|
+
"gen_ai.usage.cache_read_input_tokens",
|
343
|
+
response_dict.get("cache_read_input_tokens"),
|
344
|
+
)
|
345
|
+
|
346
|
+
if response_dict.get("choices"):
|
347
|
+
self._process_response_choices(span, response_dict.get("choices"))
|
348
|
+
|
349
|
+
except ImportError as e:
|
350
|
+
logger.warning(f"LiteLLM callback unavailable: {e}")
|
351
|
+
|
352
|
+
# Create a no-op logger when LiteLLM is not available
|
353
|
+
class LaminarLiteLLMCallback:
|
354
|
+
"""No-op logger when LiteLLM is not available"""
|
355
|
+
|
356
|
+
def __init__(self, **kwargs):
|
357
|
+
logger.warning(
|
358
|
+
"LiteLLM is not installed. Install with: pip install litellm"
|
359
|
+
)
|
360
|
+
|
361
|
+
def log_success_event(self, *args, **kwargs):
|
362
|
+
pass
|
363
|
+
|
364
|
+
def log_failure_event(self, *args, **kwargs):
|
365
|
+
pass
|
366
|
+
|
367
|
+
async def async_log_success_event(self, *args, **kwargs):
|
368
|
+
pass
|
369
|
+
|
370
|
+
async def async_log_failure_event(self, *args, **kwargs):
|
371
|
+
pass
|