langtrace-python-sdk 2.1.29__py3-none-any.whl → 2.2.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- examples/cohere_example/chat.py +1 -0
- examples/cohere_example/chat_stream.py +3 -0
- examples/dspy_example/math_problems_cot_parallel.py +59 -0
- examples/gemini_example/__init__.py +6 -0
- examples/gemini_example/function_tools.py +62 -0
- examples/gemini_example/main.py +91 -0
- examples/langchain_example/__init__.py +8 -0
- examples/langchain_example/groq_example.py +28 -15
- examples/ollama_example/basic.py +1 -0
- examples/openai_example/__init__.py +1 -0
- examples/openai_example/async_tool_calling_nonstreaming.py +1 -1
- examples/openai_example/chat_completion.py +1 -1
- examples/openai_example/embeddings_create.py +1 -0
- examples/openai_example/images_edit.py +2 -2
- examples/vertexai_example/__init__.py +6 -0
- examples/vertexai_example/main.py +214 -0
- langtrace_python_sdk/constants/instrumentation/common.py +2 -0
- langtrace_python_sdk/constants/instrumentation/gemini.py +12 -0
- langtrace_python_sdk/constants/instrumentation/vertexai.py +42 -0
- langtrace_python_sdk/instrumentation/__init__.py +4 -0
- langtrace_python_sdk/instrumentation/anthropic/patch.py +68 -96
- langtrace_python_sdk/instrumentation/chroma/patch.py +29 -29
- langtrace_python_sdk/instrumentation/cohere/patch.py +143 -242
- langtrace_python_sdk/instrumentation/dspy/instrumentation.py +2 -2
- langtrace_python_sdk/instrumentation/dspy/patch.py +36 -36
- langtrace_python_sdk/instrumentation/gemini/__init__.py +3 -0
- langtrace_python_sdk/instrumentation/gemini/instrumentation.py +36 -0
- langtrace_python_sdk/instrumentation/gemini/patch.py +186 -0
- langtrace_python_sdk/instrumentation/groq/patch.py +82 -125
- langtrace_python_sdk/instrumentation/ollama/patch.py +62 -65
- langtrace_python_sdk/instrumentation/openai/patch.py +190 -494
- langtrace_python_sdk/instrumentation/qdrant/patch.py +6 -6
- langtrace_python_sdk/instrumentation/vertexai/__init__.py +3 -0
- langtrace_python_sdk/instrumentation/vertexai/instrumentation.py +33 -0
- langtrace_python_sdk/instrumentation/vertexai/patch.py +131 -0
- langtrace_python_sdk/langtrace.py +5 -0
- langtrace_python_sdk/utils/__init__.py +14 -3
- langtrace_python_sdk/utils/llm.py +311 -6
- langtrace_python_sdk/version.py +1 -1
- {langtrace_python_sdk-2.1.29.dist-info → langtrace_python_sdk-2.2.2.dist-info}/METADATA +26 -19
- {langtrace_python_sdk-2.1.29.dist-info → langtrace_python_sdk-2.2.2.dist-info}/RECORD +58 -38
- tests/anthropic/test_anthropic.py +28 -27
- tests/cohere/test_cohere_chat.py +36 -36
- tests/cohere/test_cohere_embed.py +12 -9
- tests/cohere/test_cohere_rerank.py +18 -11
- tests/groq/cassettes/test_async_chat_completion.yaml +113 -0
- tests/groq/cassettes/test_async_chat_completion_streaming.yaml +2232 -0
- tests/groq/cassettes/test_chat_completion.yaml +114 -0
- tests/groq/cassettes/test_chat_completion_streaming.yaml +2512 -0
- tests/groq/conftest.py +33 -0
- tests/groq/test_groq.py +142 -0
- tests/openai/cassettes/test_async_chat_completion_streaming.yaml +28 -28
- tests/openai/test_chat_completion.py +53 -67
- tests/openai/test_image_generation.py +47 -24
- tests/utils.py +40 -5
- {langtrace_python_sdk-2.1.29.dist-info → langtrace_python_sdk-2.2.2.dist-info}/WHEEL +0 -0
- {langtrace_python_sdk-2.1.29.dist-info → langtrace_python_sdk-2.2.2.dist-info}/entry_points.txt +0 -0
- {langtrace_python_sdk-2.1.29.dist-info → langtrace_python_sdk-2.2.2.dist-info}/licenses/LICENSE +0 -0
|
@@ -39,25 +39,25 @@ def patch_bootstrapfewshot_optimizer(operation_name, version, tracer):
|
|
|
39
39
|
),
|
|
40
40
|
}
|
|
41
41
|
span_attributes["dspy.optimizer.module.prog"] = json.dumps(prog)
|
|
42
|
-
if
|
|
43
|
-
span_attributes["dspy.optimizer.metric"] = instance
|
|
42
|
+
if hasattr(instance, 'metric'):
|
|
43
|
+
span_attributes["dspy.optimizer.metric"] = getattr(instance, 'metric').__name__
|
|
44
44
|
if kwargs.get("trainset") and len(kwargs.get("trainset")) > 0:
|
|
45
45
|
span_attributes["dspy.optimizer.trainset"] = str(kwargs.get("trainset"))
|
|
46
46
|
config = {}
|
|
47
|
-
if
|
|
48
|
-
config["metric_threshold"] = instance
|
|
49
|
-
if
|
|
50
|
-
config["teacher_settings"] = instance
|
|
51
|
-
if
|
|
52
|
-
config["max_bootstrapped_demos"] = instance
|
|
53
|
-
if
|
|
54
|
-
config["max_labeled_demos"] = instance
|
|
55
|
-
if
|
|
56
|
-
config["max_rounds"] = instance
|
|
57
|
-
if
|
|
58
|
-
config["max_errors"] = instance
|
|
59
|
-
if
|
|
60
|
-
config["error_count"] = instance
|
|
47
|
+
if hasattr(instance, 'metric_threshold'):
|
|
48
|
+
config["metric_threshold"] = getattr(instance, 'metric_threshold')
|
|
49
|
+
if hasattr(instance, 'teacher_settings'):
|
|
50
|
+
config["teacher_settings"] = getattr(instance, 'teacher_settings')
|
|
51
|
+
if hasattr(instance, 'max_bootstrapped_demos'):
|
|
52
|
+
config["max_bootstrapped_demos"] = getattr(instance, 'max_bootstrapped_demos')
|
|
53
|
+
if hasattr(instance, 'max_labeled_demos'):
|
|
54
|
+
config["max_labeled_demos"] = getattr(instance, 'max_labeled_demos')
|
|
55
|
+
if hasattr(instance, 'max_rounds'):
|
|
56
|
+
config["max_rounds"] = getattr(instance, 'max_rounds')
|
|
57
|
+
if hasattr(instance, 'max_steps'):
|
|
58
|
+
config["max_errors"] = getattr(instance, 'max_errors')
|
|
59
|
+
if hasattr(instance, 'error_count'):
|
|
60
|
+
config["error_count"] = getattr(instance, 'error_count')
|
|
61
61
|
if config and len(config) > 0:
|
|
62
62
|
span_attributes["dspy.optimizer.config"] = json.dumps(config)
|
|
63
63
|
|
|
@@ -147,30 +147,30 @@ def patch_evaluate(operation_name, version, tracer):
|
|
|
147
147
|
**(extra_attributes if extra_attributes is not None else {}),
|
|
148
148
|
}
|
|
149
149
|
|
|
150
|
-
if "devset"
|
|
151
|
-
span_attributes["dspy.evaluate.devset"] = str(instance
|
|
152
|
-
if "
|
|
153
|
-
span_attributes["dspy.evaluate.display"] = str(instance
|
|
154
|
-
if "num_threads"
|
|
155
|
-
span_attributes["dspy.evaluate.num_threads"] = str(instance
|
|
156
|
-
if "return_outputs"
|
|
150
|
+
if hasattr(instance, "devset"):
|
|
151
|
+
span_attributes["dspy.evaluate.devset"] = str(getattr(instance, "devset"))
|
|
152
|
+
if hasattr(instance, "trainset"):
|
|
153
|
+
span_attributes["dspy.evaluate.display"] = str(getattr(instance, "trainset"))
|
|
154
|
+
if hasattr(instance, "num_threads"):
|
|
155
|
+
span_attributes["dspy.evaluate.num_threads"] = str(getattr(instance, "num_threads"))
|
|
156
|
+
if hasattr(instance, "return_outputs"):
|
|
157
157
|
span_attributes["dspy.evaluate.return_outputs"] = str(
|
|
158
|
-
instance
|
|
158
|
+
getattr(instance, "return_outputs")
|
|
159
159
|
)
|
|
160
|
-
if "display_table"
|
|
161
|
-
span_attributes["dspy.evaluate.display_table"] = str(instance
|
|
162
|
-
if "display_progress"
|
|
160
|
+
if hasattr(instance, "display_table"):
|
|
161
|
+
span_attributes["dspy.evaluate.display_table"] = str(getattr(instance, "display_table"))
|
|
162
|
+
if hasattr(instance, "display_progress"):
|
|
163
163
|
span_attributes["dspy.evaluate.display_progress"] = str(
|
|
164
|
-
instance
|
|
164
|
+
getattr(instance, "display_progress")
|
|
165
165
|
)
|
|
166
|
-
if "metric"
|
|
167
|
-
span_attributes["dspy.evaluate.metric"] = instance
|
|
168
|
-
if "error_count"
|
|
169
|
-
span_attributes["dspy.evaluate.error_count"] = str(instance
|
|
170
|
-
if "error_lock"
|
|
171
|
-
span_attributes["dspy.evaluate.error_lock"] = str(instance
|
|
172
|
-
if "max_errors"
|
|
173
|
-
span_attributes["dspy.evaluate.max_errors"] = str(instance
|
|
166
|
+
if hasattr(instance, "metric"):
|
|
167
|
+
span_attributes["dspy.evaluate.metric"] = getattr(instance, "metric").__name__
|
|
168
|
+
if hasattr(instance, "error_count"):
|
|
169
|
+
span_attributes["dspy.evaluate.error_count"] = str(getattr(instance, "error_count"))
|
|
170
|
+
if hasattr(instance, "error_lock"):
|
|
171
|
+
span_attributes["dspy.evaluate.error_lock"] = str(getattr(instance, "error_lock"))
|
|
172
|
+
if hasattr(instance, "max_errors"):
|
|
173
|
+
span_attributes["dspy.evaluate.max_errors"] = str(getattr(instance, "max_errors"))
|
|
174
174
|
if args and len(args) > 0:
|
|
175
175
|
span_attributes["dspy.evaluate.args"] = str(args)
|
|
176
176
|
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
from typing import Collection
|
|
2
|
+
from importlib_metadata import version as v
|
|
3
|
+
from langtrace_python_sdk.constants.instrumentation.gemini import APIS
|
|
4
|
+
from wrapt import wrap_function_wrapper as _W
|
|
5
|
+
from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
|
|
6
|
+
from opentelemetry.trace import get_tracer
|
|
7
|
+
from .patch import patch_gemini, apatch_gemini
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class GeminiInstrumentation(BaseInstrumentor):
|
|
11
|
+
def instrumentation_dependencies(self) -> Collection[str]:
|
|
12
|
+
return ["google-generativeai >= 0.5.0"]
|
|
13
|
+
|
|
14
|
+
def _instrument(self, **kwargs):
|
|
15
|
+
trace_provider = kwargs.get("tracer_provider")
|
|
16
|
+
tracer = get_tracer(__name__, "", trace_provider)
|
|
17
|
+
version = v("google-cloud-aiplatform")
|
|
18
|
+
|
|
19
|
+
for _, api_config in APIS.items():
|
|
20
|
+
module = api_config.get("module")
|
|
21
|
+
operation = api_config.get("operation")
|
|
22
|
+
method = api_config.get("method")
|
|
23
|
+
name = f"{method}.{operation}"
|
|
24
|
+
|
|
25
|
+
_W(
|
|
26
|
+
module=module,
|
|
27
|
+
name=name,
|
|
28
|
+
wrapper=(
|
|
29
|
+
apatch_gemini(name, version, tracer)
|
|
30
|
+
if operation == "generate_content_async"
|
|
31
|
+
else patch_gemini(name, version, tracer)
|
|
32
|
+
),
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
def _uninstrument(self, **kwargs):
|
|
36
|
+
pass
|
|
@@ -0,0 +1,186 @@
|
|
|
1
|
+
from langtrace.trace_attributes import LLMSpanAttributes, SpanAttributes
|
|
2
|
+
from opentelemetry import trace
|
|
3
|
+
from opentelemetry.trace import Span, SpanKind, Tracer
|
|
4
|
+
from opentelemetry.trace.propagation import set_span_in_context
|
|
5
|
+
from opentelemetry.trace.status import Status, StatusCode
|
|
6
|
+
|
|
7
|
+
from langtrace_python_sdk.constants.instrumentation.common import SERVICE_PROVIDERS
|
|
8
|
+
from langtrace_python_sdk.utils.llm import (
|
|
9
|
+
get_extra_attributes,
|
|
10
|
+
get_langtrace_attributes,
|
|
11
|
+
get_llm_request_attributes,
|
|
12
|
+
get_llm_url,
|
|
13
|
+
is_streaming,
|
|
14
|
+
set_event_completion,
|
|
15
|
+
set_event_completion_chunk,
|
|
16
|
+
set_span_attributes,
|
|
17
|
+
set_usage_attributes,
|
|
18
|
+
)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def patch_gemini(name, version, tracer: Tracer):
|
|
22
|
+
def traced_method(wrapped, instance, args, kwargs):
|
|
23
|
+
service_provider = SERVICE_PROVIDERS["GEMINI"]
|
|
24
|
+
prompts = serialize_prompts(args, kwargs, instance)
|
|
25
|
+
span_attributes = {
|
|
26
|
+
**get_langtrace_attributes(version, service_provider),
|
|
27
|
+
**get_llm_request_attributes(
|
|
28
|
+
kwargs,
|
|
29
|
+
prompts=prompts,
|
|
30
|
+
model=get_llm_model(instance),
|
|
31
|
+
),
|
|
32
|
+
**get_llm_url(instance),
|
|
33
|
+
SpanAttributes.LLM_PATH: "",
|
|
34
|
+
**get_extra_attributes(),
|
|
35
|
+
}
|
|
36
|
+
attributes = LLMSpanAttributes(**span_attributes)
|
|
37
|
+
span = tracer.start_span(
|
|
38
|
+
name=name,
|
|
39
|
+
kind=SpanKind.CLIENT,
|
|
40
|
+
context=set_span_in_context(trace.get_current_span()),
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
try:
|
|
44
|
+
set_span_attributes(span, attributes)
|
|
45
|
+
result = wrapped(*args, **kwargs)
|
|
46
|
+
if is_streaming(kwargs):
|
|
47
|
+
return build_streaming_response(span, result)
|
|
48
|
+
|
|
49
|
+
else:
|
|
50
|
+
set_response_attributes(span, result)
|
|
51
|
+
span.end()
|
|
52
|
+
return result
|
|
53
|
+
except Exception as error:
|
|
54
|
+
span.record_exception(error)
|
|
55
|
+
span.set_status(Status(StatusCode.ERROR, str(error)))
|
|
56
|
+
span.end()
|
|
57
|
+
raise
|
|
58
|
+
|
|
59
|
+
return traced_method
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def apatch_gemini(name, version, tracer: Tracer):
|
|
63
|
+
async def traced_method(wrapped, instance, args, kwargs):
|
|
64
|
+
service_provider = SERVICE_PROVIDERS["GEMINI"]
|
|
65
|
+
prompts = serialize_prompts(args, kwargs, instance)
|
|
66
|
+
span_attributes = {
|
|
67
|
+
**get_langtrace_attributes(version, service_provider),
|
|
68
|
+
**get_llm_request_attributes(
|
|
69
|
+
kwargs,
|
|
70
|
+
prompts=prompts,
|
|
71
|
+
model=get_llm_model(instance),
|
|
72
|
+
),
|
|
73
|
+
**get_llm_url(instance),
|
|
74
|
+
SpanAttributes.LLM_PATH: "",
|
|
75
|
+
**get_extra_attributes(),
|
|
76
|
+
}
|
|
77
|
+
attributes = LLMSpanAttributes(**span_attributes)
|
|
78
|
+
span = tracer.start_span(
|
|
79
|
+
name=name,
|
|
80
|
+
kind=SpanKind.CLIENT,
|
|
81
|
+
context=set_span_in_context(trace.get_current_span()),
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
try:
|
|
85
|
+
set_span_attributes(span, attributes)
|
|
86
|
+
result = await wrapped(*args, **kwargs)
|
|
87
|
+
if is_streaming(kwargs):
|
|
88
|
+
return abuild_streaming_response(span, result)
|
|
89
|
+
else:
|
|
90
|
+
set_response_attributes(span, result)
|
|
91
|
+
span.end()
|
|
92
|
+
return result
|
|
93
|
+
except Exception as error:
|
|
94
|
+
span.record_exception(error)
|
|
95
|
+
span.set_status(Status(StatusCode.ERROR, str(error)))
|
|
96
|
+
span.end()
|
|
97
|
+
raise
|
|
98
|
+
|
|
99
|
+
return traced_method
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def get_llm_model(instance):
|
|
103
|
+
llm_model = "unknown"
|
|
104
|
+
if hasattr(instance, "_model_id"):
|
|
105
|
+
llm_model = instance._model_id
|
|
106
|
+
if hasattr(instance, "_model_name"):
|
|
107
|
+
llm_model = instance._model_name.replace("models/", "")
|
|
108
|
+
return llm_model
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
def serialize_prompts(args, kwargs, instance):
|
|
112
|
+
prompts = []
|
|
113
|
+
if hasattr(instance, "_system_instruction") and instance._system_instruction is not None:
|
|
114
|
+
system_prompt = {
|
|
115
|
+
"role": "system",
|
|
116
|
+
"content": instance._system_instruction.__dict__["_pb"].parts[0].text,
|
|
117
|
+
}
|
|
118
|
+
prompts.append(system_prompt)
|
|
119
|
+
|
|
120
|
+
if args is not None and len(args) > 0:
|
|
121
|
+
content = ""
|
|
122
|
+
for arg in args:
|
|
123
|
+
if isinstance(arg, str):
|
|
124
|
+
content = f"{content}{arg}\n"
|
|
125
|
+
elif isinstance(arg, list):
|
|
126
|
+
for subarg in arg:
|
|
127
|
+
content = f"{content}{subarg}\n"
|
|
128
|
+
prompts.append({"role": "user", "content": content})
|
|
129
|
+
return prompts
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
def set_response_attributes(
|
|
133
|
+
span: Span,
|
|
134
|
+
result,
|
|
135
|
+
):
|
|
136
|
+
span.set_status(Status(StatusCode.OK))
|
|
137
|
+
if hasattr(result, "text"):
|
|
138
|
+
set_event_completion(span, [{"role": "assistant", "content": result.text}])
|
|
139
|
+
|
|
140
|
+
if hasattr(result, "usage_metadata"):
|
|
141
|
+
usage = result.usage_metadata
|
|
142
|
+
input_tokens = usage.prompt_token_count
|
|
143
|
+
output_tokens = usage.candidates_token_count
|
|
144
|
+
set_usage_attributes(
|
|
145
|
+
span, {"input_tokens": input_tokens, "output_tokens": output_tokens}
|
|
146
|
+
)
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
def build_streaming_response(span, response):
|
|
150
|
+
complete_response = ""
|
|
151
|
+
for item in response:
|
|
152
|
+
item_to_yield = item
|
|
153
|
+
complete_response += str(item.text)
|
|
154
|
+
yield item_to_yield
|
|
155
|
+
set_event_completion_chunk(span, item.text)
|
|
156
|
+
if hasattr(item, "usage_metadata"):
|
|
157
|
+
usage = item.usage_metadata
|
|
158
|
+
input_tokens = usage.prompt_token_count
|
|
159
|
+
output_tokens = usage.candidates_token_count
|
|
160
|
+
set_usage_attributes(
|
|
161
|
+
span, {"input_tokens": input_tokens, "output_tokens": output_tokens}
|
|
162
|
+
)
|
|
163
|
+
|
|
164
|
+
set_response_attributes(span, response)
|
|
165
|
+
span.set_status(Status(StatusCode.OK))
|
|
166
|
+
span.end()
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
async def abuild_streaming_response(span, response):
|
|
170
|
+
complete_response = ""
|
|
171
|
+
async for item in response:
|
|
172
|
+
item_to_yield = item
|
|
173
|
+
complete_response += str(item.text)
|
|
174
|
+
yield item_to_yield
|
|
175
|
+
set_event_completion_chunk(span, item.text)
|
|
176
|
+
if hasattr(item, "usage_metadata"):
|
|
177
|
+
usage = item.usage_metadata
|
|
178
|
+
input_tokens = usage.prompt_token_count
|
|
179
|
+
output_tokens = usage.candidates_token_count
|
|
180
|
+
set_usage_attributes(
|
|
181
|
+
span, {"input_tokens": input_tokens, "output_tokens": output_tokens}
|
|
182
|
+
)
|
|
183
|
+
|
|
184
|
+
set_response_attributes(span, response)
|
|
185
|
+
span.set_status(Status(StatusCode.OK))
|
|
186
|
+
span.end()
|