langtrace-python-sdk 2.3.21__py3-none-any.whl → 2.3.27__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- examples/dspy_example/optimizers/bootstrap_fewshot.py +89 -0
- examples/openai_example/chat_completion.py +19 -16
- langtrace_python_sdk/constants/instrumentation/common.py +1 -0
- langtrace_python_sdk/constants/instrumentation/litellm.py +18 -0
- langtrace_python_sdk/instrumentation/__init__.py +2 -0
- langtrace_python_sdk/instrumentation/dspy/patch.py +18 -8
- langtrace_python_sdk/instrumentation/litellm/__init__.py +5 -0
- langtrace_python_sdk/instrumentation/litellm/instrumentation.py +87 -0
- langtrace_python_sdk/instrumentation/litellm/patch.py +651 -0
- langtrace_python_sdk/instrumentation/litellm/types.py +170 -0
- langtrace_python_sdk/langtrace.py +10 -2
- langtrace_python_sdk/version.py +1 -1
- {langtrace_python_sdk-2.3.21.dist-info → langtrace_python_sdk-2.3.27.dist-info}/METADATA +15 -3
- {langtrace_python_sdk-2.3.21.dist-info → langtrace_python_sdk-2.3.27.dist-info}/RECORD +17 -11
- {langtrace_python_sdk-2.3.21.dist-info → langtrace_python_sdk-2.3.27.dist-info}/WHEEL +0 -0
- {langtrace_python_sdk-2.3.21.dist-info → langtrace_python_sdk-2.3.27.dist-info}/entry_points.txt +0 -0
- {langtrace_python_sdk-2.3.21.dist-info → langtrace_python_sdk-2.3.27.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,89 @@
|
|
1
|
+
import dspy
|
2
|
+
from dotenv import find_dotenv, load_dotenv
|
3
|
+
from dspy.datasets import HotPotQA
|
4
|
+
from dspy.teleprompt import BootstrapFewShot
|
5
|
+
|
6
|
+
from langtrace_python_sdk import inject_additional_attributes, langtrace
|
7
|
+
|
8
|
+
_ = load_dotenv(find_dotenv())
|
9
|
+
|
10
|
+
langtrace.init()
|
11
|
+
|
12
|
+
turbo = dspy.LM('openai/gpt-4o-mini')
|
13
|
+
colbertv2_wiki17_abstracts = dspy.ColBERTv2(url='http://20.102.90.50:2017/wiki17_abstracts')
|
14
|
+
|
15
|
+
dspy.settings.configure(lm=turbo, rm=colbertv2_wiki17_abstracts)
|
16
|
+
|
17
|
+
|
18
|
+
# Load the dataset.
|
19
|
+
dataset = HotPotQA(train_seed=1, train_size=20, eval_seed=2023, dev_size=50, test_size=0)
|
20
|
+
|
21
|
+
# Tell DSPy that the 'question' field is the input. Any other fields are labels and/or metadata.
|
22
|
+
trainset = [x.with_inputs('question') for x in dataset.train]
|
23
|
+
devset = [x.with_inputs('question') for x in dataset.dev]
|
24
|
+
|
25
|
+
|
26
|
+
class GenerateAnswer(dspy.Signature):
|
27
|
+
"""Answer questions with short factoid answers."""
|
28
|
+
|
29
|
+
context = dspy.InputField(desc="may contain relevant facts")
|
30
|
+
question = dspy.InputField()
|
31
|
+
answer = dspy.OutputField(desc="often between 1 and 5 words")
|
32
|
+
|
33
|
+
|
34
|
+
class RAG(dspy.Module):
|
35
|
+
def __init__(self, num_passages=3):
|
36
|
+
super().__init__()
|
37
|
+
|
38
|
+
self.retrieve = dspy.Retrieve(k=num_passages)
|
39
|
+
self.generate_answer = dspy.ChainOfThought(GenerateAnswer)
|
40
|
+
|
41
|
+
def forward(self, question):
|
42
|
+
context = self.retrieve(question).passages
|
43
|
+
prediction = self.generate_answer(context=context, question=question)
|
44
|
+
return dspy.Prediction(context=context, answer=prediction.answer)
|
45
|
+
|
46
|
+
|
47
|
+
# Validation logic: check that the predicted answer is correct.
|
48
|
+
# Also check that the retrieved context does actually contain that answer.
|
49
|
+
def validate_context_and_answer(example, prediction, trace=None):
|
50
|
+
answer_em = dspy.evaluate.answer_exact_match(example, prediction)
|
51
|
+
answer_pm = dspy.evaluate.answer_passage_match(example, prediction)
|
52
|
+
return answer_em and answer_pm
|
53
|
+
|
54
|
+
|
55
|
+
# Set up a basic optimizer, which will compile our RAG program.
|
56
|
+
optimizer = BootstrapFewShot(metric=validate_context_and_answer)
|
57
|
+
|
58
|
+
# Compile!
|
59
|
+
compiled_rag = optimizer.compile(RAG(), trainset=trainset)
|
60
|
+
|
61
|
+
# Ask any question you like to this simple RAG program.
|
62
|
+
my_question = "Who was the hero of the movie peraanmai?"
|
63
|
+
|
64
|
+
# Get the prediction. This contains `pred.context` and `pred.answer`.
|
65
|
+
# pred = compiled_rag(my_question)
|
66
|
+
pred = inject_additional_attributes(lambda: compiled_rag(my_question), {'experiment': 'experiment 6', 'description': 'trying additional stuff', 'run_id': 'run_1'})
|
67
|
+
# compiled_rag.save('compiled_rag_v1.json')
|
68
|
+
|
69
|
+
# Print the contexts and the answer.
|
70
|
+
print(f"Question: {my_question}")
|
71
|
+
print(f"Predicted Answer: {pred.answer}")
|
72
|
+
print(f"Retrieved Contexts (truncated): {[c[:200] + '...' for c in pred.context]}")
|
73
|
+
|
74
|
+
# print("Inspecting the history of the optimizer:")
|
75
|
+
# turbo.inspect_history(n=1)
|
76
|
+
|
77
|
+
from dspy.evaluate import Evaluate
|
78
|
+
|
79
|
+
|
80
|
+
def validate_answer(example, pred, trace=None):
|
81
|
+
return True
|
82
|
+
|
83
|
+
|
84
|
+
# Set up the evaluator, which can be used multiple times.
|
85
|
+
evaluate = Evaluate(devset=devset, metric=validate_answer, num_threads=4, display_progress=True, display_table=0)
|
86
|
+
|
87
|
+
|
88
|
+
# Evaluate our `optimized_cot` program.
|
89
|
+
evaluate(compiled_rag)
|
@@ -9,19 +9,19 @@ from langtrace_python_sdk.utils.with_root_span import (
|
|
9
9
|
|
10
10
|
_ = load_dotenv(find_dotenv())
|
11
11
|
|
12
|
-
langtrace.init(
|
12
|
+
langtrace.init()
|
13
13
|
client = OpenAI()
|
14
14
|
|
15
15
|
|
16
16
|
def api():
|
17
17
|
response = client.chat.completions.create(
|
18
|
-
model="
|
18
|
+
model="o1-mini",
|
19
19
|
messages=[
|
20
|
-
{"role": "system", "content": "Talk like a pirate"},
|
21
|
-
{"role": "user", "content": "
|
20
|
+
# {"role": "system", "content": "Talk like a pirate"},
|
21
|
+
{"role": "user", "content": "How many r's are in strawberry?"},
|
22
22
|
],
|
23
|
-
stream=True,
|
24
|
-
|
23
|
+
# stream=True,
|
24
|
+
stream=False,
|
25
25
|
)
|
26
26
|
return response
|
27
27
|
|
@@ -31,14 +31,17 @@ def chat_completion():
|
|
31
31
|
response = api()
|
32
32
|
# print(response)
|
33
33
|
# Uncomment this for streaming
|
34
|
-
result = []
|
35
|
-
for chunk in response:
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
# print("".join(result))
|
34
|
+
# result = []
|
35
|
+
# for chunk in response:
|
36
|
+
# if chunk.choices[0].delta.content is not None:
|
37
|
+
# content = [
|
38
|
+
# choice.delta.content if choice.delta and choice.delta.content else ""
|
39
|
+
# for choice in chunk.choices
|
40
|
+
# ]
|
41
|
+
# result.append(content[0] if len(content) > 0 else "")
|
42
|
+
|
43
|
+
# # print("".join(result))
|
44
|
+
print(response)
|
44
45
|
return response
|
46
|
+
|
47
|
+
chat_completion()
|
@@ -0,0 +1,18 @@
|
|
1
|
+
APIS = {
|
2
|
+
"CHAT_COMPLETION": {
|
3
|
+
"METHOD": "chat.completions.create",
|
4
|
+
"ENDPOINT": "/chat/completions",
|
5
|
+
},
|
6
|
+
"IMAGES_GENERATION": {
|
7
|
+
"METHOD": "images.generate",
|
8
|
+
"ENDPOINT": "/images/generations",
|
9
|
+
},
|
10
|
+
"IMAGES_EDIT": {
|
11
|
+
"METHOD": "images.edit",
|
12
|
+
"ENDPOINT": "/images/edits",
|
13
|
+
},
|
14
|
+
"EMBEDDINGS_CREATE": {
|
15
|
+
"METHOD": "embeddings.create",
|
16
|
+
"ENDPOINT": "/embeddings",
|
17
|
+
},
|
18
|
+
}
|
@@ -19,6 +19,7 @@ from .vertexai import VertexAIInstrumentation
|
|
19
19
|
from .gemini import GeminiInstrumentation
|
20
20
|
from .mistral import MistralInstrumentation
|
21
21
|
from .embedchain import EmbedchainInstrumentation
|
22
|
+
from .litellm import LiteLLMInstrumentation
|
22
23
|
|
23
24
|
__all__ = [
|
24
25
|
"AnthropicInstrumentation",
|
@@ -31,6 +32,7 @@ __all__ = [
|
|
31
32
|
"LangchainCommunityInstrumentation",
|
32
33
|
"LangchainCoreInstrumentation",
|
33
34
|
"LanggraphInstrumentation",
|
35
|
+
"LiteLLMInstrumentation",
|
34
36
|
"LlamaindexInstrumentation",
|
35
37
|
"OpenAIInstrumentation",
|
36
38
|
"PineconeInstrumentation",
|
@@ -1,6 +1,19 @@
|
|
1
1
|
import json
|
2
|
+
import os
|
3
|
+
|
4
|
+
import ujson
|
5
|
+
from colorama import Fore
|
2
6
|
from importlib_metadata import version as v
|
7
|
+
from langtrace.trace_attributes import FrameworkSpanAttributes
|
8
|
+
from opentelemetry import baggage
|
9
|
+
from opentelemetry.trace import SpanKind
|
10
|
+
from opentelemetry.trace.status import Status, StatusCode
|
11
|
+
|
3
12
|
from langtrace_python_sdk.constants import LANGTRACE_SDK_NAME
|
13
|
+
from langtrace_python_sdk.constants.instrumentation.common import (
|
14
|
+
LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY,
|
15
|
+
SERVICE_PROVIDERS,
|
16
|
+
)
|
4
17
|
from langtrace_python_sdk.utils import set_span_attribute
|
5
18
|
from langtrace_python_sdk.utils.llm import (
|
6
19
|
get_extra_attributes,
|
@@ -9,14 +22,6 @@ from langtrace_python_sdk.utils.llm import (
|
|
9
22
|
set_span_attributes,
|
10
23
|
)
|
11
24
|
from langtrace_python_sdk.utils.silently_fail import silently_fail
|
12
|
-
from langtrace_python_sdk.constants.instrumentation.common import (
|
13
|
-
LANGTRACE_ADDITIONAL_SPAN_ATTRIBUTES_KEY,
|
14
|
-
SERVICE_PROVIDERS,
|
15
|
-
)
|
16
|
-
from opentelemetry import baggage
|
17
|
-
from langtrace.trace_attributes import FrameworkSpanAttributes
|
18
|
-
from opentelemetry.trace import SpanKind
|
19
|
-
from opentelemetry.trace.status import Status, StatusCode
|
20
25
|
|
21
26
|
|
22
27
|
def patch_bootstrapfewshot_optimizer(operation_name, version, tracer):
|
@@ -115,6 +120,8 @@ def patch_signature(operation_name, version, tracer):
|
|
115
120
|
**get_extra_attributes(),
|
116
121
|
}
|
117
122
|
|
123
|
+
trace_checkpoint = os.environ.get("TRACE_DSPY_CHECKPOINT", "true").lower()
|
124
|
+
|
118
125
|
if instance.__class__.__name__:
|
119
126
|
span_attributes["dspy.signature.name"] = instance.__class__.__name__
|
120
127
|
span_attributes["dspy.signature"] = str(instance.signature)
|
@@ -136,6 +143,9 @@ def patch_signature(operation_name, version, tracer):
|
|
136
143
|
"dspy.signature.result",
|
137
144
|
json.dumps(result.toDict()),
|
138
145
|
)
|
146
|
+
if trace_checkpoint == "true":
|
147
|
+
print(Fore.RED + "Note: DSPy checkpoint tracing is enabled in Langtrace. To disable it, set the env var, TRACE_DSPY_CHECKPOINT to false" + Fore.RESET)
|
148
|
+
set_span_attribute(span, "dspy.checkpoint", ujson.dumps(instance.dump_state(False), indent=2))
|
139
149
|
span.set_status(Status(StatusCode.OK))
|
140
150
|
|
141
151
|
span.end()
|
@@ -0,0 +1,87 @@
|
|
1
|
+
"""
|
2
|
+
Copyright (c) 2024 Scale3 Labs
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
7
|
+
Unless required by applicable law or agreed to in writing, software
|
8
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
9
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
10
|
+
See the License for the specific language governing permissions and
|
11
|
+
limitations under the License.
|
12
|
+
"""
|
13
|
+
|
14
|
+
from typing import Collection, Optional, Any
|
15
|
+
import importlib.metadata
|
16
|
+
import logging
|
17
|
+
|
18
|
+
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
19
|
+
from opentelemetry.trace import get_tracer, TracerProvider
|
20
|
+
from wrapt import wrap_function_wrapper
|
21
|
+
|
22
|
+
from langtrace_python_sdk.instrumentation.litellm.patch import (
|
23
|
+
async_chat_completions_create,
|
24
|
+
async_embeddings_create,
|
25
|
+
async_images_generate,
|
26
|
+
chat_completions_create,
|
27
|
+
embeddings_create,
|
28
|
+
images_generate,
|
29
|
+
)
|
30
|
+
|
31
|
+
logging.basicConfig(level=logging.FATAL)
|
32
|
+
|
33
|
+
|
34
|
+
class LiteLLMInstrumentation(BaseInstrumentor): # type: ignore
|
35
|
+
|
36
|
+
def instrumentation_dependencies(self) -> Collection[str]:
|
37
|
+
return ["litellm >= 1.48.0", "trace-attributes >= 4.0.5"]
|
38
|
+
|
39
|
+
def _instrument(self, **kwargs: Any) -> None:
|
40
|
+
tracer_provider: Optional[TracerProvider] = kwargs.get("tracer_provider")
|
41
|
+
tracer = get_tracer(__name__, "", tracer_provider)
|
42
|
+
version: str = importlib.metadata.version("openai")
|
43
|
+
|
44
|
+
wrap_function_wrapper(
|
45
|
+
"litellm",
|
46
|
+
"completion",
|
47
|
+
chat_completions_create(version, tracer),
|
48
|
+
)
|
49
|
+
|
50
|
+
wrap_function_wrapper(
|
51
|
+
"litellm",
|
52
|
+
"text_completion",
|
53
|
+
chat_completions_create(version, tracer),
|
54
|
+
)
|
55
|
+
|
56
|
+
wrap_function_wrapper(
|
57
|
+
"litellm.main",
|
58
|
+
"acompletion",
|
59
|
+
async_chat_completions_create(version, tracer),
|
60
|
+
)
|
61
|
+
|
62
|
+
wrap_function_wrapper(
|
63
|
+
"litellm.main",
|
64
|
+
"image_generation",
|
65
|
+
images_generate(version, tracer),
|
66
|
+
)
|
67
|
+
|
68
|
+
wrap_function_wrapper(
|
69
|
+
"litellm.main",
|
70
|
+
"aimage_generation",
|
71
|
+
async_images_generate(version, tracer),
|
72
|
+
)
|
73
|
+
|
74
|
+
wrap_function_wrapper(
|
75
|
+
"litellm.main",
|
76
|
+
"embedding",
|
77
|
+
embeddings_create(version, tracer),
|
78
|
+
)
|
79
|
+
|
80
|
+
wrap_function_wrapper(
|
81
|
+
"litellm.main",
|
82
|
+
"aembedding",
|
83
|
+
async_embeddings_create(version, tracer),
|
84
|
+
)
|
85
|
+
|
86
|
+
def _uninstrument(self, **kwargs: Any) -> None:
|
87
|
+
pass
|
@@ -0,0 +1,651 @@
|
|
1
|
+
import json
|
2
|
+
from typing import Any, Dict, List, Optional, Callable, Awaitable, Union
|
3
|
+
from langtrace.trace_attributes import (
|
4
|
+
LLMSpanAttributes,
|
5
|
+
SpanAttributes,
|
6
|
+
)
|
7
|
+
from langtrace_python_sdk.utils import set_span_attribute
|
8
|
+
from langtrace_python_sdk.utils.silently_fail import silently_fail
|
9
|
+
from opentelemetry import trace
|
10
|
+
from opentelemetry.trace import SpanKind, Tracer, Span
|
11
|
+
from opentelemetry.trace.status import Status, StatusCode
|
12
|
+
from opentelemetry.trace.propagation import set_span_in_context
|
13
|
+
from langtrace_python_sdk.constants.instrumentation.common import (
|
14
|
+
SERVICE_PROVIDERS,
|
15
|
+
)
|
16
|
+
from langtrace_python_sdk.constants.instrumentation.litellm import APIS
|
17
|
+
from langtrace_python_sdk.utils.llm import (
|
18
|
+
calculate_prompt_tokens,
|
19
|
+
get_base_url,
|
20
|
+
get_extra_attributes,
|
21
|
+
get_langtrace_attributes,
|
22
|
+
get_llm_request_attributes,
|
23
|
+
get_span_name,
|
24
|
+
get_tool_calls,
|
25
|
+
is_streaming,
|
26
|
+
set_event_completion,
|
27
|
+
StreamWrapper,
|
28
|
+
set_span_attributes,
|
29
|
+
)
|
30
|
+
from langtrace_python_sdk.types import NOT_GIVEN
|
31
|
+
|
32
|
+
from langtrace_python_sdk.instrumentation.openai.types import (
|
33
|
+
ImagesGenerateKwargs,
|
34
|
+
ChatCompletionsCreateKwargs,
|
35
|
+
EmbeddingsCreateKwargs,
|
36
|
+
ImagesEditKwargs,
|
37
|
+
ResultType,
|
38
|
+
ContentItem,
|
39
|
+
)
|
40
|
+
|
41
|
+
|
42
|
+
def filter_valid_attributes(attributes):
|
43
|
+
"""Filter attributes where value is not None, not an empty string."""
|
44
|
+
return {
|
45
|
+
key: value
|
46
|
+
for key, value in attributes.items()
|
47
|
+
if value is not None and value != ""
|
48
|
+
}
|
49
|
+
|
50
|
+
|
51
|
+
def images_generate(version: str, tracer: Tracer) -> Callable:
|
52
|
+
"""
|
53
|
+
Wrap the `generate` method of the `Images` class to trace it.
|
54
|
+
"""
|
55
|
+
|
56
|
+
def traced_method(
|
57
|
+
wrapped: Callable, instance: Any, args: List[Any], kwargs: ImagesGenerateKwargs
|
58
|
+
) -> Any:
|
59
|
+
service_provider = SERVICE_PROVIDERS["LITELLM"]
|
60
|
+
span_attributes = {
|
61
|
+
**get_langtrace_attributes(version, service_provider, vendor_type="llm"),
|
62
|
+
**get_llm_request_attributes(kwargs, operation_name="images_generate"),
|
63
|
+
SpanAttributes.LLM_URL: "not available",
|
64
|
+
SpanAttributes.LLM_PATH: APIS["IMAGES_GENERATION"]["ENDPOINT"],
|
65
|
+
**get_extra_attributes(), # type: ignore
|
66
|
+
}
|
67
|
+
|
68
|
+
attributes = LLMSpanAttributes(**filter_valid_attributes(span_attributes))
|
69
|
+
|
70
|
+
with tracer.start_as_current_span(
|
71
|
+
name=get_span_name(APIS["IMAGES_GENERATION"]["METHOD"]),
|
72
|
+
kind=SpanKind.CLIENT,
|
73
|
+
context=set_span_in_context(trace.get_current_span()),
|
74
|
+
) as span:
|
75
|
+
set_span_attributes(span, attributes)
|
76
|
+
try:
|
77
|
+
# Attempt to call the original method
|
78
|
+
result = wrapped(*args, **kwargs)
|
79
|
+
if not is_streaming(kwargs):
|
80
|
+
data: Optional[ContentItem] = (
|
81
|
+
result.data[0]
|
82
|
+
if hasattr(result, "data") and len(result.data) > 0
|
83
|
+
else None
|
84
|
+
)
|
85
|
+
response = [
|
86
|
+
{
|
87
|
+
"role": "assistant",
|
88
|
+
"content": {
|
89
|
+
"url": getattr(data, "url", ""),
|
90
|
+
"revised_prompt": getattr(data, "revised_prompt", ""),
|
91
|
+
},
|
92
|
+
}
|
93
|
+
]
|
94
|
+
set_event_completion(span, response)
|
95
|
+
|
96
|
+
span.set_status(StatusCode.OK)
|
97
|
+
return result
|
98
|
+
except Exception as err:
|
99
|
+
# Record the exception in the span
|
100
|
+
span.record_exception(err)
|
101
|
+
|
102
|
+
# Set the span status to indicate an error
|
103
|
+
span.set_status(Status(StatusCode.ERROR, str(err)))
|
104
|
+
|
105
|
+
# Reraise the exception to ensure it's not swallowed
|
106
|
+
raise
|
107
|
+
|
108
|
+
return traced_method
|
109
|
+
|
110
|
+
|
111
|
+
def async_images_generate(version: str, tracer: Tracer) -> Callable:
|
112
|
+
"""
|
113
|
+
Wrap the `generate` method of the `Images` class to trace it.
|
114
|
+
"""
|
115
|
+
|
116
|
+
async def traced_method(
|
117
|
+
wrapped: Callable, instance: Any, args: List[Any], kwargs: ImagesGenerateKwargs
|
118
|
+
) -> Awaitable[Any]:
|
119
|
+
service_provider = SERVICE_PROVIDERS["LITELLM"]
|
120
|
+
|
121
|
+
span_attributes = {
|
122
|
+
**get_langtrace_attributes(version, service_provider, vendor_type="llm"),
|
123
|
+
**get_llm_request_attributes(kwargs, operation_name="images_generate"),
|
124
|
+
SpanAttributes.LLM_URL: "not available",
|
125
|
+
SpanAttributes.LLM_PATH: APIS["IMAGES_GENERATION"]["ENDPOINT"],
|
126
|
+
**get_extra_attributes(), # type: ignore
|
127
|
+
}
|
128
|
+
|
129
|
+
attributes = LLMSpanAttributes(**filter_valid_attributes(span_attributes))
|
130
|
+
|
131
|
+
with tracer.start_as_current_span(
|
132
|
+
name=get_span_name(APIS["IMAGES_GENERATION"]["METHOD"]),
|
133
|
+
kind=SpanKind.CLIENT,
|
134
|
+
context=set_span_in_context(trace.get_current_span()),
|
135
|
+
) as span:
|
136
|
+
set_span_attributes(span, attributes)
|
137
|
+
try:
|
138
|
+
# Attempt to call the original method
|
139
|
+
result = await wrapped(*args, **kwargs)
|
140
|
+
if not is_streaming(kwargs):
|
141
|
+
data: Optional[ContentItem] = (
|
142
|
+
result.data[0]
|
143
|
+
if hasattr(result, "data") and len(result.data) > 0
|
144
|
+
else None
|
145
|
+
)
|
146
|
+
response = [
|
147
|
+
{
|
148
|
+
"role": "assistant",
|
149
|
+
"content": {
|
150
|
+
"url": getattr(data, "url", ""),
|
151
|
+
"revised_prompt": getattr(data, "revised_prompt", ""),
|
152
|
+
},
|
153
|
+
}
|
154
|
+
]
|
155
|
+
set_event_completion(span, response)
|
156
|
+
|
157
|
+
span.set_status(StatusCode.OK)
|
158
|
+
return result
|
159
|
+
except Exception as err:
|
160
|
+
# Record the exception in the span
|
161
|
+
span.record_exception(err)
|
162
|
+
|
163
|
+
# Set the span status to indicate an error
|
164
|
+
span.set_status(Status(StatusCode.ERROR, str(err)))
|
165
|
+
|
166
|
+
# Reraise the exception to ensure it's not swallowed
|
167
|
+
raise
|
168
|
+
|
169
|
+
return traced_method
|
170
|
+
|
171
|
+
|
172
|
+
def images_edit(version: str, tracer: Tracer) -> Callable:
|
173
|
+
"""
|
174
|
+
Wrap the `edit` method of the `Images` class to trace it.
|
175
|
+
"""
|
176
|
+
|
177
|
+
def traced_method(
|
178
|
+
wrapped: Callable, instance: Any, args: List[Any], kwargs: ImagesEditKwargs
|
179
|
+
) -> Any:
|
180
|
+
service_provider = SERVICE_PROVIDERS["LITELLM"]
|
181
|
+
|
182
|
+
span_attributes = {
|
183
|
+
**get_langtrace_attributes(version, service_provider, vendor_type="llm"),
|
184
|
+
**get_llm_request_attributes(kwargs, operation_name="images_edit"),
|
185
|
+
SpanAttributes.LLM_URL: "not available",
|
186
|
+
SpanAttributes.LLM_PATH: APIS["IMAGES_EDIT"]["ENDPOINT"],
|
187
|
+
SpanAttributes.LLM_RESPONSE_FORMAT: kwargs.get("response_format"),
|
188
|
+
SpanAttributes.LLM_IMAGE_SIZE: kwargs.get("size"),
|
189
|
+
**get_extra_attributes(), # type: ignore
|
190
|
+
}
|
191
|
+
|
192
|
+
attributes = LLMSpanAttributes(**filter_valid_attributes(span_attributes))
|
193
|
+
|
194
|
+
with tracer.start_as_current_span(
|
195
|
+
name=APIS["IMAGES_EDIT"]["METHOD"],
|
196
|
+
kind=SpanKind.CLIENT,
|
197
|
+
context=set_span_in_context(trace.get_current_span()),
|
198
|
+
) as span:
|
199
|
+
set_span_attributes(span, attributes)
|
200
|
+
try:
|
201
|
+
# Attempt to call the original method
|
202
|
+
result = wrapped(*args, **kwargs)
|
203
|
+
|
204
|
+
response = []
|
205
|
+
# Parse each image object
|
206
|
+
for each_data in result.data:
|
207
|
+
response.append(
|
208
|
+
{
|
209
|
+
"role": "assistant",
|
210
|
+
"content": {
|
211
|
+
"url": each_data.url,
|
212
|
+
"revised_prompt": each_data.revised_prompt,
|
213
|
+
"base64": each_data.b64_json,
|
214
|
+
},
|
215
|
+
}
|
216
|
+
)
|
217
|
+
|
218
|
+
set_event_completion(span, response)
|
219
|
+
|
220
|
+
span.set_status(StatusCode.OK)
|
221
|
+
return result
|
222
|
+
except Exception as err:
|
223
|
+
# Record the exception in the span
|
224
|
+
span.record_exception(err)
|
225
|
+
|
226
|
+
# Set the span status to indicate an error
|
227
|
+
span.set_status(Status(StatusCode.ERROR, str(err)))
|
228
|
+
|
229
|
+
# Reraise the exception to ensure it's not swallowed
|
230
|
+
raise
|
231
|
+
|
232
|
+
return traced_method
|
233
|
+
|
234
|
+
|
235
|
+
def chat_completions_create(version: str, tracer: Tracer) -> Callable:
|
236
|
+
"""Wrap the `create` method of the `ChatCompletion` class to trace it."""
|
237
|
+
|
238
|
+
def traced_method(
|
239
|
+
wrapped: Callable,
|
240
|
+
instance: Any,
|
241
|
+
args: List[Any],
|
242
|
+
kwargs: ChatCompletionsCreateKwargs,
|
243
|
+
) -> Any:
|
244
|
+
service_provider = SERVICE_PROVIDERS["LITELLM"]
|
245
|
+
if "perplexity" in get_base_url(instance):
|
246
|
+
service_provider = SERVICE_PROVIDERS["PPLX"]
|
247
|
+
elif "azure" in get_base_url(instance):
|
248
|
+
service_provider = SERVICE_PROVIDERS["AZURE"]
|
249
|
+
elif "groq" in get_base_url(instance):
|
250
|
+
service_provider = SERVICE_PROVIDERS["GROQ"]
|
251
|
+
llm_prompts = []
|
252
|
+
for item in kwargs.get("messages", []):
|
253
|
+
tools = get_tool_calls(item)
|
254
|
+
if tools is not None:
|
255
|
+
tool_calls = []
|
256
|
+
for tool_call in tools:
|
257
|
+
tool_call_dict = {
|
258
|
+
"id": getattr(tool_call, "id", ""),
|
259
|
+
"type": getattr(tool_call, "type", ""),
|
260
|
+
}
|
261
|
+
if hasattr(tool_call, "function"):
|
262
|
+
tool_call_dict["function"] = {
|
263
|
+
"name": getattr(tool_call.function, "name", ""),
|
264
|
+
"arguments": getattr(tool_call.function, "arguments", ""),
|
265
|
+
}
|
266
|
+
tool_calls.append(tool_call_dict)
|
267
|
+
llm_prompts.append(tool_calls)
|
268
|
+
else:
|
269
|
+
llm_prompts.append(item)
|
270
|
+
|
271
|
+
span_attributes = {
|
272
|
+
**get_langtrace_attributes(version, service_provider, vendor_type="llm"),
|
273
|
+
**get_llm_request_attributes(kwargs, prompts=llm_prompts),
|
274
|
+
SpanAttributes.LLM_URL: "not available",
|
275
|
+
SpanAttributes.LLM_PATH: APIS["CHAT_COMPLETION"]["ENDPOINT"],
|
276
|
+
**get_extra_attributes(), # type: ignore
|
277
|
+
}
|
278
|
+
|
279
|
+
attributes = LLMSpanAttributes(**filter_valid_attributes(span_attributes))
|
280
|
+
|
281
|
+
span = tracer.start_span(
|
282
|
+
name=get_span_name(APIS["CHAT_COMPLETION"]["METHOD"]),
|
283
|
+
kind=SpanKind.CLIENT,
|
284
|
+
context=set_span_in_context(trace.get_current_span()),
|
285
|
+
)
|
286
|
+
_set_input_attributes(span, kwargs, attributes)
|
287
|
+
|
288
|
+
try:
|
289
|
+
result = wrapped(*args, **kwargs)
|
290
|
+
if is_streaming(kwargs):
|
291
|
+
prompt_tokens = 0
|
292
|
+
for message in kwargs.get("messages", {}):
|
293
|
+
prompt_tokens += calculate_prompt_tokens(
|
294
|
+
json.dumps(str(message)), kwargs.get("model")
|
295
|
+
)
|
296
|
+
functions = kwargs.get("functions")
|
297
|
+
if functions is not None and functions != NOT_GIVEN:
|
298
|
+
for function in functions:
|
299
|
+
prompt_tokens += calculate_prompt_tokens(
|
300
|
+
json.dumps(function), kwargs.get("model")
|
301
|
+
)
|
302
|
+
|
303
|
+
return StreamWrapper(
|
304
|
+
result,
|
305
|
+
span,
|
306
|
+
prompt_tokens,
|
307
|
+
function_call=kwargs.get("functions") is not None,
|
308
|
+
tool_calls=kwargs.get("tools") is not None,
|
309
|
+
)
|
310
|
+
else:
|
311
|
+
_set_response_attributes(span, result)
|
312
|
+
span.set_status(StatusCode.OK)
|
313
|
+
span.end()
|
314
|
+
return result
|
315
|
+
|
316
|
+
except Exception as error:
|
317
|
+
span.record_exception(error)
|
318
|
+
span.set_status(Status(StatusCode.ERROR, str(error)))
|
319
|
+
span.end()
|
320
|
+
raise
|
321
|
+
|
322
|
+
return traced_method
|
323
|
+
|
324
|
+
|
325
|
+
def async_chat_completions_create(version: str, tracer: Tracer) -> Callable:
|
326
|
+
"""Wrap the `create` method of the `ChatCompletion` class to trace it."""
|
327
|
+
|
328
|
+
async def traced_method(
|
329
|
+
wrapped: Callable,
|
330
|
+
instance: Any,
|
331
|
+
args: List[Any],
|
332
|
+
kwargs: ChatCompletionsCreateKwargs,
|
333
|
+
) -> Awaitable[Any]:
|
334
|
+
service_provider = SERVICE_PROVIDERS["LITELLM"]
|
335
|
+
if "perplexity" in get_base_url(instance):
|
336
|
+
service_provider = SERVICE_PROVIDERS["PPLX"]
|
337
|
+
elif "azure" in get_base_url(instance):
|
338
|
+
service_provider = SERVICE_PROVIDERS["AZURE"]
|
339
|
+
llm_prompts = []
|
340
|
+
for item in kwargs.get("messages", []):
|
341
|
+
tools = get_tool_calls(item)
|
342
|
+
if tools is not None:
|
343
|
+
tool_calls = []
|
344
|
+
for tool_call in tools:
|
345
|
+
tool_call_dict = {
|
346
|
+
"id": getattr(tool_call, "id", ""),
|
347
|
+
"type": getattr(tool_call, "type", ""),
|
348
|
+
}
|
349
|
+
if hasattr(tool_call, "function"):
|
350
|
+
tool_call_dict["function"] = {
|
351
|
+
"name": getattr(tool_call.function, "name", ""),
|
352
|
+
"arguments": getattr(tool_call.function, "arguments", ""),
|
353
|
+
}
|
354
|
+
tool_calls.append(json.dumps(tool_call_dict))
|
355
|
+
llm_prompts.append(tool_calls)
|
356
|
+
else:
|
357
|
+
llm_prompts.append(item)
|
358
|
+
|
359
|
+
span_attributes = {
|
360
|
+
**get_langtrace_attributes(version, service_provider, vendor_type="llm"),
|
361
|
+
**get_llm_request_attributes(kwargs, prompts=llm_prompts),
|
362
|
+
SpanAttributes.LLM_URL: "not available",
|
363
|
+
SpanAttributes.LLM_PATH: APIS["CHAT_COMPLETION"]["ENDPOINT"],
|
364
|
+
**get_extra_attributes(), # type: ignore
|
365
|
+
}
|
366
|
+
|
367
|
+
attributes = LLMSpanAttributes(**filter_valid_attributes(span_attributes))
|
368
|
+
|
369
|
+
span = tracer.start_span(
|
370
|
+
name=get_span_name(APIS["CHAT_COMPLETION"]["METHOD"]),
|
371
|
+
kind=SpanKind.CLIENT,
|
372
|
+
context=set_span_in_context(trace.get_current_span()),
|
373
|
+
)
|
374
|
+
_set_input_attributes(span, kwargs, attributes)
|
375
|
+
|
376
|
+
try:
|
377
|
+
result = await wrapped(*args, **kwargs)
|
378
|
+
if is_streaming(kwargs):
|
379
|
+
prompt_tokens = 0
|
380
|
+
for message in kwargs.get("messages", {}):
|
381
|
+
prompt_tokens += calculate_prompt_tokens(
|
382
|
+
json.dumps((str(message))), kwargs.get("model")
|
383
|
+
)
|
384
|
+
|
385
|
+
functions = kwargs.get("functions")
|
386
|
+
if functions is not None and functions != NOT_GIVEN:
|
387
|
+
for function in functions:
|
388
|
+
prompt_tokens += calculate_prompt_tokens(
|
389
|
+
json.dumps(function), kwargs.get("model")
|
390
|
+
)
|
391
|
+
|
392
|
+
return StreamWrapper(
|
393
|
+
result,
|
394
|
+
span,
|
395
|
+
prompt_tokens,
|
396
|
+
function_call=kwargs.get("functions") is not None,
|
397
|
+
tool_calls=kwargs.get("tools") is not None,
|
398
|
+
) # type: ignore
|
399
|
+
else:
|
400
|
+
_set_response_attributes(span, result)
|
401
|
+
span.set_status(StatusCode.OK)
|
402
|
+
span.end()
|
403
|
+
return result
|
404
|
+
|
405
|
+
except Exception as error:
|
406
|
+
span.record_exception(error)
|
407
|
+
span.set_status(Status(StatusCode.ERROR, str(error)))
|
408
|
+
span.end()
|
409
|
+
raise
|
410
|
+
|
411
|
+
return traced_method
|
412
|
+
|
413
|
+
|
414
|
+
def embeddings_create(version: str, tracer: Tracer) -> Callable:
|
415
|
+
"""
|
416
|
+
Wrap the `create` method of the `Embeddings` class to trace it.
|
417
|
+
"""
|
418
|
+
|
419
|
+
def traced_method(
|
420
|
+
wrapped: Callable,
|
421
|
+
instance: Any,
|
422
|
+
args: List[Any],
|
423
|
+
kwargs: EmbeddingsCreateKwargs,
|
424
|
+
) -> Any:
|
425
|
+
service_provider = SERVICE_PROVIDERS["LITELLM"]
|
426
|
+
|
427
|
+
span_attributes = {
|
428
|
+
**get_langtrace_attributes(version, service_provider, vendor_type="llm"),
|
429
|
+
**get_llm_request_attributes(kwargs, operation_name="embed"),
|
430
|
+
SpanAttributes.LLM_URL: "not available",
|
431
|
+
SpanAttributes.LLM_PATH: APIS["EMBEDDINGS_CREATE"]["ENDPOINT"],
|
432
|
+
SpanAttributes.LLM_REQUEST_DIMENSIONS: kwargs.get("dimensions"),
|
433
|
+
**get_extra_attributes(), # type: ignore
|
434
|
+
}
|
435
|
+
|
436
|
+
encoding_format = kwargs.get("encoding_format")
|
437
|
+
if encoding_format is not None:
|
438
|
+
if not isinstance(encoding_format, list):
|
439
|
+
encoding_format = [encoding_format]
|
440
|
+
span_attributes[SpanAttributes.LLM_REQUEST_ENCODING_FORMATS] = (
|
441
|
+
encoding_format
|
442
|
+
)
|
443
|
+
|
444
|
+
if kwargs.get("input") is not None:
|
445
|
+
span_attributes[SpanAttributes.LLM_REQUEST_EMBEDDING_INPUTS] = json.dumps(
|
446
|
+
[kwargs.get("input", "")]
|
447
|
+
)
|
448
|
+
|
449
|
+
attributes = LLMSpanAttributes(**filter_valid_attributes(span_attributes))
|
450
|
+
|
451
|
+
with tracer.start_as_current_span(
|
452
|
+
name=get_span_name(APIS["EMBEDDINGS_CREATE"]["METHOD"]),
|
453
|
+
kind=SpanKind.CLIENT,
|
454
|
+
context=set_span_in_context(trace.get_current_span()),
|
455
|
+
) as span:
|
456
|
+
|
457
|
+
set_span_attributes(span, attributes)
|
458
|
+
try:
|
459
|
+
# Attempt to call the original method
|
460
|
+
result = wrapped(*args, **kwargs)
|
461
|
+
span.set_status(StatusCode.OK)
|
462
|
+
return result
|
463
|
+
except Exception as err:
|
464
|
+
# Record the exception in the span
|
465
|
+
span.record_exception(err)
|
466
|
+
|
467
|
+
# Set the span status to indicate an error
|
468
|
+
span.set_status(Status(StatusCode.ERROR, str(err)))
|
469
|
+
|
470
|
+
# Reraise the exception to ensure it's not swallowed
|
471
|
+
raise
|
472
|
+
|
473
|
+
return traced_method
|
474
|
+
|
475
|
+
|
476
|
+
def async_embeddings_create(version: str, tracer: Tracer) -> Callable:
|
477
|
+
"""
|
478
|
+
Wrap the `create` method of the `Embeddings` class to trace it.
|
479
|
+
"""
|
480
|
+
|
481
|
+
async def traced_method(
|
482
|
+
wrapped: Callable,
|
483
|
+
instance: Any,
|
484
|
+
args: List[Any],
|
485
|
+
kwargs: EmbeddingsCreateKwargs,
|
486
|
+
) -> Awaitable[Any]:
|
487
|
+
|
488
|
+
service_provider = SERVICE_PROVIDERS["LITELLM"]
|
489
|
+
|
490
|
+
span_attributes = {
|
491
|
+
**get_langtrace_attributes(version, service_provider, vendor_type="llm"),
|
492
|
+
**get_llm_request_attributes(kwargs, operation_name="embed"),
|
493
|
+
SpanAttributes.LLM_PATH: APIS["EMBEDDINGS_CREATE"]["ENDPOINT"],
|
494
|
+
SpanAttributes.LLM_REQUEST_DIMENSIONS: kwargs.get("dimensions"),
|
495
|
+
**get_extra_attributes(), # type: ignore
|
496
|
+
}
|
497
|
+
|
498
|
+
attributes = LLMSpanAttributes(**filter_valid_attributes(span_attributes))
|
499
|
+
|
500
|
+
encoding_format = kwargs.get("encoding_format")
|
501
|
+
if encoding_format is not None:
|
502
|
+
if not isinstance(encoding_format, list):
|
503
|
+
encoding_format = [encoding_format]
|
504
|
+
span_attributes[SpanAttributes.LLM_REQUEST_ENCODING_FORMATS] = (
|
505
|
+
encoding_format
|
506
|
+
)
|
507
|
+
|
508
|
+
if kwargs.get("input") is not None:
|
509
|
+
span_attributes[SpanAttributes.LLM_REQUEST_EMBEDDING_INPUTS] = json.dumps(
|
510
|
+
[kwargs.get("input", "")]
|
511
|
+
)
|
512
|
+
|
513
|
+
with tracer.start_as_current_span(
|
514
|
+
name=get_span_name(APIS["EMBEDDINGS_CREATE"]["METHOD"]),
|
515
|
+
kind=SpanKind.CLIENT,
|
516
|
+
context=set_span_in_context(trace.get_current_span()),
|
517
|
+
) as span:
|
518
|
+
|
519
|
+
set_span_attributes(span, attributes)
|
520
|
+
try:
|
521
|
+
# Attempt to call the original method
|
522
|
+
result = await wrapped(*args, **kwargs)
|
523
|
+
span.set_status(StatusCode.OK)
|
524
|
+
return result
|
525
|
+
except Exception as err:
|
526
|
+
# Record the exception in the span
|
527
|
+
span.record_exception(err)
|
528
|
+
|
529
|
+
# Set the span status to indicate an error
|
530
|
+
span.set_status(Status(StatusCode.ERROR, str(err)))
|
531
|
+
|
532
|
+
# Reraise the exception to ensure it's not swallowed
|
533
|
+
raise
|
534
|
+
|
535
|
+
return traced_method
|
536
|
+
|
537
|
+
|
538
|
+
def extract_content(choice: Any) -> Union[str, List[Dict[str, Any]], Dict[str, Any]]:
|
539
|
+
# Check if choice.message exists and has a content attribute
|
540
|
+
if (
|
541
|
+
hasattr(choice, "message")
|
542
|
+
and hasattr(choice.message, "content")
|
543
|
+
and choice.message.content is not None
|
544
|
+
):
|
545
|
+
return choice.message.content
|
546
|
+
|
547
|
+
# Check if choice.message has tool_calls and extract information accordingly
|
548
|
+
elif (
|
549
|
+
hasattr(choice, "message")
|
550
|
+
and hasattr(choice.message, "tool_calls")
|
551
|
+
and choice.message.tool_calls is not None
|
552
|
+
):
|
553
|
+
result = [
|
554
|
+
{
|
555
|
+
"id": tool_call.id,
|
556
|
+
"type": tool_call.type,
|
557
|
+
"function": {
|
558
|
+
"name": tool_call.function.name,
|
559
|
+
"arguments": tool_call.function.arguments,
|
560
|
+
},
|
561
|
+
}
|
562
|
+
for tool_call in choice.message.tool_calls
|
563
|
+
]
|
564
|
+
return result
|
565
|
+
|
566
|
+
# Check if choice.message has a function_call and extract information accordingly
|
567
|
+
elif (
|
568
|
+
hasattr(choice, "message")
|
569
|
+
and hasattr(choice.message, "function_call")
|
570
|
+
and choice.message.function_call is not None
|
571
|
+
):
|
572
|
+
return {
|
573
|
+
"name": choice.message.function_call.name,
|
574
|
+
"arguments": choice.message.function_call.arguments,
|
575
|
+
}
|
576
|
+
|
577
|
+
# Return an empty string if none of the above conditions are met
|
578
|
+
else:
|
579
|
+
return ""
|
580
|
+
|
581
|
+
|
582
|
+
@silently_fail
|
583
|
+
def _set_input_attributes(
|
584
|
+
span: Span, kwargs: ChatCompletionsCreateKwargs, attributes: LLMSpanAttributes
|
585
|
+
) -> None:
|
586
|
+
tools = []
|
587
|
+
for field, value in attributes.model_dump(by_alias=True).items():
|
588
|
+
set_span_attribute(span, field, value)
|
589
|
+
functions = kwargs.get("functions")
|
590
|
+
if functions is not None and functions != NOT_GIVEN:
|
591
|
+
for function in functions:
|
592
|
+
tools.append(json.dumps({"type": "function", "function": function}))
|
593
|
+
|
594
|
+
if kwargs.get("tools") is not None and kwargs.get("tools") != NOT_GIVEN:
|
595
|
+
tools.append(json.dumps(kwargs.get("tools")))
|
596
|
+
|
597
|
+
if tools:
|
598
|
+
set_span_attribute(span, SpanAttributes.LLM_TOOLS, json.dumps(tools))
|
599
|
+
|
600
|
+
|
601
|
+
@silently_fail
|
602
|
+
def _set_response_attributes(span: Span, result: ResultType) -> None:
|
603
|
+
set_span_attribute(span, SpanAttributes.LLM_RESPONSE_MODEL, result.model)
|
604
|
+
if hasattr(result, "choices") and result.choices is not None:
|
605
|
+
responses = [
|
606
|
+
{
|
607
|
+
"role": (
|
608
|
+
choice.message.role
|
609
|
+
if choice.message and choice.message.role
|
610
|
+
else "assistant"
|
611
|
+
),
|
612
|
+
"content": extract_content(choice),
|
613
|
+
**(
|
614
|
+
{"content_filter_results": choice.content_filter_results}
|
615
|
+
if hasattr(choice, "content_filter_results")
|
616
|
+
else {}
|
617
|
+
),
|
618
|
+
}
|
619
|
+
for choice in result.choices
|
620
|
+
]
|
621
|
+
set_event_completion(span, responses)
|
622
|
+
|
623
|
+
if (
|
624
|
+
hasattr(result, "system_fingerprint")
|
625
|
+
and result.system_fingerprint is not None
|
626
|
+
and result.system_fingerprint != NOT_GIVEN
|
627
|
+
):
|
628
|
+
set_span_attribute(
|
629
|
+
span,
|
630
|
+
SpanAttributes.LLM_SYSTEM_FINGERPRINT,
|
631
|
+
result.system_fingerprint,
|
632
|
+
)
|
633
|
+
# Get the usage
|
634
|
+
if hasattr(result, "usage") and result.usage is not None:
|
635
|
+
usage = result.usage
|
636
|
+
if usage is not None:
|
637
|
+
set_span_attribute(
|
638
|
+
span,
|
639
|
+
SpanAttributes.LLM_USAGE_PROMPT_TOKENS,
|
640
|
+
result.usage.prompt_tokens,
|
641
|
+
)
|
642
|
+
set_span_attribute(
|
643
|
+
span,
|
644
|
+
SpanAttributes.LLM_USAGE_COMPLETION_TOKENS,
|
645
|
+
result.usage.completion_tokens,
|
646
|
+
)
|
647
|
+
set_span_attribute(
|
648
|
+
span,
|
649
|
+
SpanAttributes.LLM_USAGE_TOTAL_TOKENS,
|
650
|
+
result.usage.total_tokens,
|
651
|
+
)
|
@@ -0,0 +1,170 @@
|
|
1
|
+
"""
|
2
|
+
Copyright (c) 2024 Scale3 Labs
|
3
|
+
Licensed under the Apache License, Version 2.0 (the "License");
|
4
|
+
you may not use this file except in compliance with the License.
|
5
|
+
You may obtain a copy of the License at
|
6
|
+
http://www.apache.org/licenses/LICENSE-2.0
|
7
|
+
Unless required by applicable law or agreed to in writing, software
|
8
|
+
distributed under the License is distributed on an "AS IS" BASIS,
|
9
|
+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
10
|
+
See the License for the specific language governing permissions and
|
11
|
+
limitations under the License.
|
12
|
+
"""
|
13
|
+
|
14
|
+
from typing import Any, Dict, List, Union, Optional, TypedDict
|
15
|
+
|
16
|
+
|
17
|
+
class ContentItem:
|
18
|
+
url: str
|
19
|
+
revised_prompt: str
|
20
|
+
base64: Optional[str]
|
21
|
+
|
22
|
+
def __init__(
|
23
|
+
self,
|
24
|
+
url: str,
|
25
|
+
revised_prompt: str,
|
26
|
+
base64: Optional[str],
|
27
|
+
):
|
28
|
+
self.url = url
|
29
|
+
self.revised_prompt = revised_prompt
|
30
|
+
self.base64 = base64
|
31
|
+
|
32
|
+
|
33
|
+
class ToolFunction:
|
34
|
+
name: str
|
35
|
+
arguments: str
|
36
|
+
|
37
|
+
def __init__(
|
38
|
+
self,
|
39
|
+
name: str,
|
40
|
+
arguments: str,
|
41
|
+
):
|
42
|
+
self.name = name
|
43
|
+
self.arguments = arguments
|
44
|
+
|
45
|
+
|
46
|
+
class ToolCall:
|
47
|
+
id: str
|
48
|
+
type: str
|
49
|
+
function: ToolFunction
|
50
|
+
|
51
|
+
def __init__(
|
52
|
+
self,
|
53
|
+
id: str,
|
54
|
+
type: str,
|
55
|
+
function: ToolFunction,
|
56
|
+
):
|
57
|
+
self.id = id
|
58
|
+
self.type = type
|
59
|
+
self.function = function
|
60
|
+
|
61
|
+
|
62
|
+
class Message:
|
63
|
+
role: str
|
64
|
+
content: Union[str, List[ContentItem], Dict[str, Any]]
|
65
|
+
tool_calls: Optional[List[ToolCall]]
|
66
|
+
|
67
|
+
def __init__(
|
68
|
+
self,
|
69
|
+
role: str,
|
70
|
+
content: Union[str, List[ContentItem], Dict[str, Any]],
|
71
|
+
content_filter_results: Optional[Any],
|
72
|
+
):
|
73
|
+
self.role = role
|
74
|
+
self.content = content
|
75
|
+
self.content_filter_results = content_filter_results
|
76
|
+
|
77
|
+
|
78
|
+
class Usage:
|
79
|
+
prompt_tokens: int
|
80
|
+
completion_tokens: int
|
81
|
+
total_tokens: int
|
82
|
+
|
83
|
+
def __init__(
|
84
|
+
self,
|
85
|
+
prompt_tokens: int,
|
86
|
+
completion_tokens: int,
|
87
|
+
total_tokens: int,
|
88
|
+
):
|
89
|
+
self.prompt_tokens = prompt_tokens
|
90
|
+
self.completion_tokens = completion_tokens
|
91
|
+
self.total_tokens = total_tokens
|
92
|
+
|
93
|
+
|
94
|
+
class Choice:
|
95
|
+
message: Message
|
96
|
+
content_filter_results: Optional[Any]
|
97
|
+
|
98
|
+
def __init__(
|
99
|
+
self,
|
100
|
+
message: Message,
|
101
|
+
content_filter_results: Optional[Any],
|
102
|
+
):
|
103
|
+
self.message = message
|
104
|
+
self.content_filter_results = content_filter_results
|
105
|
+
|
106
|
+
|
107
|
+
class ResultType:
|
108
|
+
model: Optional[str]
|
109
|
+
content: List[ContentItem]
|
110
|
+
system_fingerprint: Optional[str]
|
111
|
+
usage: Optional[Usage]
|
112
|
+
choices: Optional[List[Choice]]
|
113
|
+
response_format: Optional[str]
|
114
|
+
size: Optional[str]
|
115
|
+
encoding_format: Optional[str]
|
116
|
+
|
117
|
+
def __init__(
|
118
|
+
self,
|
119
|
+
model: Optional[str],
|
120
|
+
role: Optional[str],
|
121
|
+
content: List[ContentItem],
|
122
|
+
system_fingerprint: Optional[str],
|
123
|
+
usage: Optional[Usage],
|
124
|
+
functions: Optional[List[ToolCall]],
|
125
|
+
tools: Optional[List[ToolCall]],
|
126
|
+
choices: Optional[List[Choice]],
|
127
|
+
response_format: Optional[str],
|
128
|
+
size: Optional[str],
|
129
|
+
encoding_format: Optional[str],
|
130
|
+
):
|
131
|
+
self.model = model
|
132
|
+
self.role = role
|
133
|
+
self.content = content
|
134
|
+
self.system_fingerprint = system_fingerprint
|
135
|
+
self.usage = usage
|
136
|
+
self.functions = functions
|
137
|
+
self.tools = tools
|
138
|
+
self.choices = choices
|
139
|
+
self.response_format = response_format
|
140
|
+
self.size = size
|
141
|
+
self.encoding_format = encoding_format
|
142
|
+
|
143
|
+
|
144
|
+
class ImagesGenerateKwargs(TypedDict, total=False):
|
145
|
+
operation_name: str
|
146
|
+
model: Optional[str]
|
147
|
+
messages: Optional[List[Message]]
|
148
|
+
functions: Optional[List[ToolCall]]
|
149
|
+
tools: Optional[List[ToolCall]]
|
150
|
+
response_format: Optional[str]
|
151
|
+
size: Optional[str]
|
152
|
+
encoding_format: Optional[str]
|
153
|
+
|
154
|
+
|
155
|
+
class ImagesEditKwargs(TypedDict, total=False):
|
156
|
+
response_format: Optional[str]
|
157
|
+
size: Optional[str]
|
158
|
+
|
159
|
+
|
160
|
+
class ChatCompletionsCreateKwargs(TypedDict, total=False):
|
161
|
+
model: Optional[str]
|
162
|
+
messages: List[Message]
|
163
|
+
functions: Optional[List[ToolCall]]
|
164
|
+
tools: Optional[List[ToolCall]]
|
165
|
+
|
166
|
+
|
167
|
+
class EmbeddingsCreateKwargs(TypedDict, total=False):
|
168
|
+
dimensions: Optional[str]
|
169
|
+
input: Union[str, List[str], None]
|
170
|
+
encoding_format: Optional[Union[List[str], str]]
|
@@ -46,6 +46,7 @@ from langtrace_python_sdk.instrumentation import (
|
|
46
46
|
LangchainCoreInstrumentation,
|
47
47
|
LangchainInstrumentation,
|
48
48
|
LanggraphInstrumentation,
|
49
|
+
LiteLLMInstrumentation,
|
49
50
|
LlamaindexInstrumentation,
|
50
51
|
MistralInstrumentation,
|
51
52
|
OllamaInstrumentor,
|
@@ -137,6 +138,7 @@ def init(
|
|
137
138
|
"langchain-core": LangchainCoreInstrumentation(),
|
138
139
|
"langchain-community": LangchainCommunityInstrumentation(),
|
139
140
|
"langgraph": LanggraphInstrumentation(),
|
141
|
+
"litellm": LiteLLMInstrumentation(),
|
140
142
|
"anthropic": AnthropicInstrumentation(),
|
141
143
|
"cohere": CohereInstrumentation(),
|
142
144
|
"weaviate-client": WeaviateInstrumentation(),
|
@@ -220,7 +222,10 @@ def init_instrumentations(
|
|
220
222
|
if disable_instrumentations is None:
|
221
223
|
for name, v in all_instrumentations.items():
|
222
224
|
if is_package_installed(name):
|
223
|
-
|
225
|
+
try:
|
226
|
+
v.instrument()
|
227
|
+
except Exception as e:
|
228
|
+
print(f"Skipping {name} due to error while instrumenting: {e}")
|
224
229
|
|
225
230
|
else:
|
226
231
|
|
@@ -242,4 +247,7 @@ def init_instrumentations(
|
|
242
247
|
|
243
248
|
for name, v in filtered_dict.items():
|
244
249
|
if is_package_installed(name):
|
245
|
-
|
250
|
+
try:
|
251
|
+
v.instrument()
|
252
|
+
except Exception as e:
|
253
|
+
print(f"Skipping {name} due to error while instrumenting: {e}")
|
langtrace_python_sdk/version.py
CHANGED
@@ -1 +1 @@
|
|
1
|
-
__version__ = "2.3.
|
1
|
+
__version__ = "2.3.27"
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: langtrace-python-sdk
|
3
|
-
Version: 2.3.
|
3
|
+
Version: 2.3.27
|
4
4
|
Summary: Python SDK for LangTrace
|
5
5
|
Project-URL: Homepage, https://github.com/Scale3-Labs/langtrace-python-sdk
|
6
6
|
Author-email: Scale3 Labs <engineering@scale3labs.com>
|
@@ -23,6 +23,7 @@ Requires-Dist: sqlalchemy
|
|
23
23
|
Requires-Dist: tiktoken>=0.1.1
|
24
24
|
Requires-Dist: trace-attributes==7.0.4
|
25
25
|
Requires-Dist: transformers>=4.11.3
|
26
|
+
Requires-Dist: ujson>=5.10.0
|
26
27
|
Provides-Extra: dev
|
27
28
|
Requires-Dist: anthropic; extra == 'dev'
|
28
29
|
Requires-Dist: chromadb; extra == 'dev'
|
@@ -34,12 +35,14 @@ Requires-Dist: groq; extra == 'dev'
|
|
34
35
|
Requires-Dist: langchain; extra == 'dev'
|
35
36
|
Requires-Dist: langchain-community; extra == 'dev'
|
36
37
|
Requires-Dist: langchain-openai; extra == 'dev'
|
38
|
+
Requires-Dist: litellm==1.48.7; extra == 'dev'
|
37
39
|
Requires-Dist: mistralai; extra == 'dev'
|
38
40
|
Requires-Dist: ollama; extra == 'dev'
|
39
|
-
Requires-Dist: openai==1.
|
41
|
+
Requires-Dist: openai==1.45.0; extra == 'dev'
|
40
42
|
Requires-Dist: pinecone-client; extra == 'dev'
|
41
43
|
Requires-Dist: python-dotenv; extra == 'dev'
|
42
44
|
Requires-Dist: qdrant-client; extra == 'dev'
|
45
|
+
Requires-Dist: typing-extensions; extra == 'dev'
|
43
46
|
Requires-Dist: weaviate-client; extra == 'dev'
|
44
47
|
Provides-Extra: test
|
45
48
|
Requires-Dist: pytest; extra == 'test'
|
@@ -287,6 +290,14 @@ By default, prompt and completion data are captured. If you would like to opt ou
|
|
287
290
|
|
288
291
|
`TRACE_PROMPT_COMPLETION_DATA=false`
|
289
292
|
|
293
|
+
### Enable/Disable checkpoint tracing for DSPy
|
294
|
+
|
295
|
+
By default, checkpoints are traced for DSPy pipelines. If you would like to disable it, set the following env var,
|
296
|
+
|
297
|
+
`TRACE_DSPY_CHECKPOINT=false`
|
298
|
+
|
299
|
+
Note: Checkpoint tracing will increase the latency of executions as the state is serialized. Please disable it in production.
|
300
|
+
|
290
301
|
## Supported integrations
|
291
302
|
|
292
303
|
Langtrace automatically captures traces from the following vendors:
|
@@ -302,8 +313,9 @@ Langtrace automatically captures traces from the following vendors:
|
|
302
313
|
| Gemini | LLM | :x: | :white_check_mark: |
|
303
314
|
| Mistral | LLM | :x: | :white_check_mark: |
|
304
315
|
| Langchain | Framework | :x: | :white_check_mark: |
|
305
|
-
| LlamaIndex | Framework | :white_check_mark: | :white_check_mark: |
|
306
316
|
| Langgraph | Framework | :x: | :white_check_mark: |
|
317
|
+
| LlamaIndex | Framework | :white_check_mark: | :white_check_mark: |
|
318
|
+
| LiteLLM | Framework | :x: | :white_check_mark: |
|
307
319
|
| DSPy | Framework | :x: | :white_check_mark: |
|
308
320
|
| CrewAI | Framework | :x: | :white_check_mark: |
|
309
321
|
| Ollama | Framework | :x: | :white_check_mark: |
|
@@ -40,6 +40,7 @@ examples/dspy_example/math_problems_cot_parallel.py,sha256=5clw-IIVA0mWm0N0xWNDM
|
|
40
40
|
examples/dspy_example/program_of_thought_basic.py,sha256=oEbtJdeKENMUbex25-zyStWwurRWW6OdP0KDs-jUkko,984
|
41
41
|
examples/dspy_example/quiz_gen.py,sha256=OyGhepeX8meKOtLdmlYUjMD2ECk-ZQuQXUZif1hFQY4,3371
|
42
42
|
examples/dspy_example/react.py,sha256=APAnHqgy9w-qY5jnPD_WbBx6bwo9C-DhPnUuhL-t7sg,1376
|
43
|
+
examples/dspy_example/optimizers/bootstrap_fewshot.py,sha256=IxJJIaPKowP0-iZSuviKQnhc0bLj0_46cO13O9vLAlc,3135
|
43
44
|
examples/embedchain_example/simple.py,sha256=1lwnsh5wVjGjQ18OinID6aJ_itR-x0TOngtNU1E-Emc,373
|
44
45
|
examples/fastapi_example/__init__.py,sha256=INIfvJP7zC_KkJCtulS1qbh61-MJTPAHnzAgzeKi0yU,87
|
45
46
|
examples/fastapi_example/basic_route.py,sha256=_IRXjkOtJQ-bTIGa1WbvUF_2LF4bjghjyXt4YrHaRvw,1170
|
@@ -70,7 +71,7 @@ examples/ollama_example/basic.py,sha256=EPbsigOF4xBDBgLgAD0EzPo737ycVm7aXZr7F5Xt
|
|
70
71
|
examples/openai_example/__init__.py,sha256=6faH7wTegSozKmS89sd1Tgv8AcEH0GfKkC7YaBWA8tg,849
|
71
72
|
examples/openai_example/async_tool_calling_nonstreaming.py,sha256=H1-CrNfNDfqAkB5wEipITXlW2OsYL7XD5uQb6k3C6ps,3865
|
72
73
|
examples/openai_example/async_tool_calling_streaming.py,sha256=LaSKmn_Unv55eTHXYdEmKjo39eNuB3ASOBV-m8U1HfU,7136
|
73
|
-
examples/openai_example/chat_completion.py,sha256=
|
74
|
+
examples/openai_example/chat_completion.py,sha256=lOp5BK-LXj0g1ErP3Ry6moUVMDkpbF-f6S4uqwNbhCo,1217
|
74
75
|
examples/openai_example/chat_completion_tool_choice.py,sha256=rkOjbFnIJ5hWWHWg-aTSek41UN2PBfufGpdaFhkWYj8,2356
|
75
76
|
examples/openai_example/embeddings_create.py,sha256=kcOZpl5nhHo_NC-3n2yKX5W8mAzNfut43mSy1BmQJUI,555
|
76
77
|
examples/openai_example/function_calling.py,sha256=zz-JdCcpP7uCXG21EYXF1Y39IKj6gYt2fOP5N_ywpnc,2338
|
@@ -95,18 +96,19 @@ examples/vertexai_example/main.py,sha256=gndId5X5ksD-ycxnAWMdEqIDbLc3kz5Vt8vm4YP
|
|
95
96
|
examples/weaviate_example/__init__.py,sha256=8JMDBsRSEV10HfTd-YC7xb4txBjD3la56snk-Bbg2Kw,618
|
96
97
|
examples/weaviate_example/query_text.py,sha256=wPHQTc_58kPoKTZMygVjTj-2ZcdrIuaausJfMxNQnQc,127162
|
97
98
|
langtrace_python_sdk/__init__.py,sha256=VZM6i71NR7pBQK6XvJWRelknuTYUhqwqE7PlicKa5Wg,1166
|
98
|
-
langtrace_python_sdk/langtrace.py,sha256=
|
99
|
-
langtrace_python_sdk/version.py,sha256=
|
99
|
+
langtrace_python_sdk/langtrace.py,sha256=z4rE-TotlUVv-4o9y9xdv9oIkl1DuWRtsgWN8Arr6LI,9232
|
100
|
+
langtrace_python_sdk/version.py,sha256=YVUif1pItm5OFXaUCTRs90Q3yab7wNGL3Bo8WTr03vk,23
|
100
101
|
langtrace_python_sdk/constants/__init__.py,sha256=3CNYkWMdd1DrkGqzLUgNZXjdAlM6UFMlf_F-odAToyc,146
|
101
102
|
langtrace_python_sdk/constants/exporter/langtrace_exporter.py,sha256=5MNjnAOg-4am78J3gVMH6FSwq5N8TOj72ugkhsw4vi0,46
|
102
103
|
langtrace_python_sdk/constants/instrumentation/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
103
104
|
langtrace_python_sdk/constants/instrumentation/anthropic.py,sha256=YX3llt3zwDY6XrYk3CB8WEVqgrzRXEw_ffyk56JoF3k,126
|
104
105
|
langtrace_python_sdk/constants/instrumentation/chroma.py,sha256=hiPGYdHS0Yj4Kh3eaYBbuCAl_swqIygu80yFqkOgdak,955
|
105
106
|
langtrace_python_sdk/constants/instrumentation/cohere.py,sha256=tf9sDfb5K3qOAHChEE5o8eYWPZ1io58VsOjZDCZPxfw,577
|
106
|
-
langtrace_python_sdk/constants/instrumentation/common.py,sha256=
|
107
|
+
langtrace_python_sdk/constants/instrumentation/common.py,sha256=yqSheP9Yx_otzrau3KgdMSNHMvBpWzt2ahifoDTbLCg,1045
|
107
108
|
langtrace_python_sdk/constants/instrumentation/embedchain.py,sha256=HodCJvaFjILoOG50OwFObxfVxt_8VUaIAIqvgoN3tzo,278
|
108
109
|
langtrace_python_sdk/constants/instrumentation/gemini.py,sha256=UAmfgg9FM7uNeOCdPfWlir6OIH-8BoxFGPRpdBd9ZZs,358
|
109
110
|
langtrace_python_sdk/constants/instrumentation/groq.py,sha256=VFXmIl4aqGY_fS0PAmjPj_Qm7Tibxbx7Ur_e7rQpqXc,134
|
111
|
+
langtrace_python_sdk/constants/instrumentation/litellm.py,sha256=bMAlpY2scFe6Lql0Nl7euGNSO9QEV5Uzne12hnw3mSE,449
|
110
112
|
langtrace_python_sdk/constants/instrumentation/mistral.py,sha256=9PlmcC5P5_BHJ-zsX1xekht6rSm7arTin58HAfdYvLk,730
|
111
113
|
langtrace_python_sdk/constants/instrumentation/ollama.py,sha256=H_-S0xjqRsi5qSp7mAlK7Y9NlQ3BqOkG6ASogqqgdJY,212
|
112
114
|
langtrace_python_sdk/constants/instrumentation/openai.py,sha256=uEOH5UXapU2DSf2AdgXTRhhJEHGWXUNFkUGD5QafflM,1164
|
@@ -117,7 +119,7 @@ langtrace_python_sdk/constants/instrumentation/weaviate.py,sha256=gtv-JBxvNGClEM
|
|
117
119
|
langtrace_python_sdk/extensions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
118
120
|
langtrace_python_sdk/extensions/langtrace_exporter.py,sha256=UFupNL03zklVd5penpsfXjbWSb5qB39mEv2BY2wczSs,6307
|
119
121
|
langtrace_python_sdk/extensions/langtrace_filesystem.py,sha256=34fZutG28EJ66l67OvTGsydAH3ZpXgikdE7hVLqBpG4,7863
|
120
|
-
langtrace_python_sdk/instrumentation/__init__.py,sha256=
|
122
|
+
langtrace_python_sdk/instrumentation/__init__.py,sha256=U2uQxrczJzPxZUFaRniN2iEK5ujRk7QadG7iM0sLDEc,1696
|
121
123
|
langtrace_python_sdk/instrumentation/anthropic/__init__.py,sha256=donrurJAGYlxrSRA3BIf76jGeUcAx9Tq8CVpah68S0Y,101
|
122
124
|
langtrace_python_sdk/instrumentation/anthropic/instrumentation.py,sha256=ndXdruI0BG7n75rsuEpKjfzePxrZxg40gZ39ONmD_v4,1845
|
123
125
|
langtrace_python_sdk/instrumentation/anthropic/patch.py,sha256=ztPN4VZujoxYOKhTbFnup7Ibms9NAzYCPAJY43NUgKw,4935
|
@@ -136,7 +138,7 @@ langtrace_python_sdk/instrumentation/crewai/instrumentation.py,sha256=5Umzq8zjEn
|
|
136
138
|
langtrace_python_sdk/instrumentation/crewai/patch.py,sha256=C2OKKPC-pzfzZWxPc74kHdYsKTX9yRhOgVY47WY9KN8,9109
|
137
139
|
langtrace_python_sdk/instrumentation/dspy/__init__.py,sha256=tM1srfi_QgyCzrde4izojMrRq2Wm7Dj5QUvVQXIJzkk,84
|
138
140
|
langtrace_python_sdk/instrumentation/dspy/instrumentation.py,sha256=o8URiDvCbZ8LL0I-4xKHkn_Ms2sETBRpn-gOliv3xzQ,2929
|
139
|
-
langtrace_python_sdk/instrumentation/dspy/patch.py,sha256=
|
141
|
+
langtrace_python_sdk/instrumentation/dspy/patch.py,sha256=H7zF4PVdtepOSpzJuEcckKUjnZQYKlY7yhn3dk6xbpY,10458
|
140
142
|
langtrace_python_sdk/instrumentation/embedchain/__init__.py,sha256=5L6n8-brMnRWZ0CMmHEuN1mrhIxrYLNtxRy0Ujc-hOY,103
|
141
143
|
langtrace_python_sdk/instrumentation/embedchain/instrumentation.py,sha256=dShwm0duy25IvL7g9I_v-2oYuyh2fadeiJqXtXBay-8,1987
|
142
144
|
langtrace_python_sdk/instrumentation/embedchain/patch.py,sha256=ovvBrtqUDwGSmSgK_S3pOOrDa4gkPSFG-HvmsxqmJE8,3627
|
@@ -158,6 +160,10 @@ langtrace_python_sdk/instrumentation/langchain_core/patch.py,sha256=CXEfbq6E88X_
|
|
158
160
|
langtrace_python_sdk/instrumentation/langgraph/__init__.py,sha256=eitlHloY-aZ4ZuIEJx61AadEA3G7siyecP-V-lziAr8,101
|
159
161
|
langtrace_python_sdk/instrumentation/langgraph/instrumentation.py,sha256=SUZZhWSIbcfsF1S5NtEqW8QzkRM_pKAuXB7pwk5tsOU,2526
|
160
162
|
langtrace_python_sdk/instrumentation/langgraph/patch.py,sha256=PGe1ZywXctB_yYqnp8AtD8Xqj7EZ087-S5_2vLRYhEQ,4987
|
163
|
+
langtrace_python_sdk/instrumentation/litellm/__init__.py,sha256=8uziCc56rFSRiPkYcrcBRbtppOANkZ7uZssCKAl2MKk,97
|
164
|
+
langtrace_python_sdk/instrumentation/litellm/instrumentation.py,sha256=Km2q_yfZU6nSqPEXG2xbtTSjqv7xSS92Kxqzw-GtQno,2655
|
165
|
+
langtrace_python_sdk/instrumentation/litellm/patch.py,sha256=6ed50KrSC-2Upoh12BlcqfRVzZ1iXcTr8U9cVh9LhvU,24263
|
166
|
+
langtrace_python_sdk/instrumentation/litellm/types.py,sha256=aVkoa7tmAbYfyOhnyMrDaVjQuwhmRNLMthlNtKMtWX8,4311
|
161
167
|
langtrace_python_sdk/instrumentation/llamaindex/__init__.py,sha256=rHvuqpuQKLj57Ow7vuKRqxAN5jT0b5NBeHwhXbbnRa4,103
|
162
168
|
langtrace_python_sdk/instrumentation/llamaindex/instrumentation.py,sha256=8iAg-Oxwf2W4S60qRfO5mvzORYxublgq7FdGWqUB4q8,2965
|
163
169
|
langtrace_python_sdk/instrumentation/llamaindex/patch.py,sha256=548hzPyT_k-2wmt9AArv4JzTT4j4AGKJq5Ar2bWv7o8,4615
|
@@ -235,8 +241,8 @@ tests/pinecone/cassettes/test_query.yaml,sha256=b5v9G3ssUy00oG63PlFUR3JErF2Js-5A
|
|
235
241
|
tests/pinecone/cassettes/test_upsert.yaml,sha256=neWmQ1v3d03V8WoLl8FoFeeCYImb8pxlJBWnFd_lITU,38607
|
236
242
|
tests/qdrant/conftest.py,sha256=9n0uHxxIjWk9fbYc4bx-uP8lSAgLBVx-cV9UjnsyCHM,381
|
237
243
|
tests/qdrant/test_qdrant.py,sha256=pzjAjVY2kmsmGfrI2Gs2xrolfuaNHz7l1fqGQCjp5_o,3353
|
238
|
-
langtrace_python_sdk-2.3.
|
239
|
-
langtrace_python_sdk-2.3.
|
240
|
-
langtrace_python_sdk-2.3.
|
241
|
-
langtrace_python_sdk-2.3.
|
242
|
-
langtrace_python_sdk-2.3.
|
244
|
+
langtrace_python_sdk-2.3.27.dist-info/METADATA,sha256=UDYZg7l4J6JldIBnNtCPiuMoYmDcIQowMhXTR-ME-fQ,15918
|
245
|
+
langtrace_python_sdk-2.3.27.dist-info/WHEEL,sha256=1yFddiXMmvYK7QYTqtRNtX66WJ0Mz8PYEiEUoOUUxRY,87
|
246
|
+
langtrace_python_sdk-2.3.27.dist-info/entry_points.txt,sha256=1_b9-qvf2fE7uQNZcbUei9vLpFZBbbh9LrtGw95ssAo,70
|
247
|
+
langtrace_python_sdk-2.3.27.dist-info/licenses/LICENSE,sha256=QwcOLU5TJoTeUhuIXzhdCEEDDvorGiC6-3YTOl4TecE,11356
|
248
|
+
langtrace_python_sdk-2.3.27.dist-info/RECORD,,
|
File without changes
|
{langtrace_python_sdk-2.3.21.dist-info → langtrace_python_sdk-2.3.27.dist-info}/entry_points.txt
RENAMED
File without changes
|
{langtrace_python_sdk-2.3.21.dist-info → langtrace_python_sdk-2.3.27.dist-info}/licenses/LICENSE
RENAMED
File without changes
|