openlit 1.34.10__tar.gz → 1.34.11__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {openlit-1.34.10 → openlit-1.34.11}/PKG-INFO +1 -1
- {openlit-1.34.10 → openlit-1.34.11}/pyproject.toml +1 -1
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/__helpers.py +23 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/groq/__init__.py +7 -9
- openlit-1.34.11/src/openlit/instrumentation/groq/async_groq.py +143 -0
- openlit-1.34.11/src/openlit/instrumentation/groq/groq.py +143 -0
- openlit-1.34.11/src/openlit/instrumentation/groq/utils.py +199 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/ollama/async_ollama.py +3 -2
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/ollama/ollama.py +3 -2
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/ollama/utils.py +10 -6
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/premai/utils.py +3 -73
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/reka/utils.py +3 -51
- openlit-1.34.10/src/openlit/instrumentation/groq/async_groq.py +0 -467
- openlit-1.34.10/src/openlit/instrumentation/groq/groq.py +0 -467
- {openlit-1.34.10 → openlit-1.34.11}/LICENSE +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/README.md +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/evals/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/evals/all.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/evals/bias_detection.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/evals/hallucination.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/evals/toxicity.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/evals/utils.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/guard/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/guard/all.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/guard/prompt_injection.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/guard/restrict_topic.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/guard/sensitive_topic.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/guard/utils.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/ag2/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/ag2/ag2.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/ai21/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/ai21/ai21.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/ai21/async_ai21.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/ai21/utils.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/anthropic/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/anthropic/anthropic.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/anthropic/async_anthropic.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/anthropic/utils.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/assemblyai/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/assemblyai/assemblyai.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/assemblyai/utils.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/astra/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/astra/astra.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/astra/async_astra.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/astra/utils.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/azure_ai_inference/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/azure_ai_inference/utils.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/bedrock/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/bedrock/bedrock.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/bedrock/utils.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/chroma/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/chroma/chroma.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/cohere/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/cohere/async_cohere.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/cohere/cohere.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/controlflow/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/controlflow/controlflow.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/crawl4ai/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/crawl4ai/async_crawl4ai.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/crawl4ai/crawl4ai.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/crewai/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/crewai/crewai.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/dynamiq/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/dynamiq/dynamiq.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/elevenlabs/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/elevenlabs/async_elevenlabs.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/elevenlabs/elevenlabs.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/elevenlabs/utils.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/embedchain/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/embedchain/embedchain.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/firecrawl/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/firecrawl/firecrawl.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/google_ai_studio/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/google_ai_studio/async_google_ai_studio.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/google_ai_studio/google_ai_studio.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/google_ai_studio/utils.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/gpt4all/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/gpt4all/gpt4all.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/gpt4all/utils.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/gpu/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/haystack/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/haystack/haystack.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/julep/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/julep/async_julep.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/julep/julep.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/langchain/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/langchain/async_langchain.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/langchain/langchain.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/letta/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/letta/letta.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/litellm/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/litellm/async_litellm.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/litellm/litellm.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/llamaindex/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/llamaindex/llamaindex.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/mem0/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/mem0/mem0.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/milvus/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/milvus/milvus.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/mistral/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/mistral/async_mistral.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/mistral/mistral.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/multion/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/multion/async_multion.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/multion/multion.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/ollama/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/openai/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/openai/async_openai.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/openai/openai.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/openai_agents/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/openai_agents/openai_agents.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/phidata/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/phidata/phidata.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/pinecone/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/pinecone/pinecone.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/premai/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/premai/premai.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/pydantic_ai/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/pydantic_ai/pydantic_ai.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/pydantic_ai/utils.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/qdrant/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/qdrant/async_qdrant.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/qdrant/qdrant.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/reka/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/reka/async_reka.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/reka/reka.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/together/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/together/async_together.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/together/together.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/together/utils.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/transformers/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/transformers/transformers.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/transformers/utils.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/vertexai/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/vertexai/async_vertexai.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/vertexai/vertexai.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/vllm/__init__.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/vllm/utils.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/instrumentation/vllm/vllm.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/otel/events.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/otel/metrics.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/otel/tracing.py +0 -0
- {openlit-1.34.10 → openlit-1.34.11}/src/openlit/semcov/__init__.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.3
|
2
2
|
Name: openlit
|
3
|
-
Version: 1.34.
|
3
|
+
Version: 1.34.11
|
4
4
|
Summary: OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects
|
5
5
|
License: Apache-2.0
|
6
6
|
Keywords: OpenTelemetry,otel,otlp,llm,tracing,openai,anthropic,claude,cohere,llm monitoring,observability,monitoring,gpt,Generative AI,chatGPT,gpu
|
@@ -1,6 +1,6 @@
|
|
1
1
|
[tool.poetry]
|
2
2
|
name = "openlit"
|
3
|
-
version = "1.34.
|
3
|
+
version = "1.34.11"
|
4
4
|
description = "OpenTelemetry-native Auto instrumentation library for monitoring LLM Applications and GPUs, facilitating the integration of observability into your GenAI-driven projects"
|
5
5
|
authors = ["OpenLIT"]
|
6
6
|
license = "Apache-2.0"
|
@@ -379,3 +379,26 @@ def record_completion_metrics(metrics, gen_ai_operation, gen_ai_system, server_a
|
|
379
379
|
metrics["genai_completion_tokens"].add(output_tokens, attributes)
|
380
380
|
metrics["genai_prompt_tokens"].add(input_tokens, attributes)
|
381
381
|
metrics["genai_cost"].record(cost, attributes)
|
382
|
+
|
383
|
+
def record_embedding_metrics(metrics, gen_ai_operation, gen_ai_system, server_address, server_port,
|
384
|
+
request_model, response_model, environment, application_name, start_time, end_time,
|
385
|
+
input_tokens, cost):
|
386
|
+
"""
|
387
|
+
Record embedding-specific metrics for the operation.
|
388
|
+
"""
|
389
|
+
|
390
|
+
attributes = create_metrics_attributes(
|
391
|
+
operation=gen_ai_operation,
|
392
|
+
system=gen_ai_system,
|
393
|
+
server_address=server_address,
|
394
|
+
server_port=server_port,
|
395
|
+
request_model=request_model,
|
396
|
+
response_model=response_model,
|
397
|
+
service_name=application_name,
|
398
|
+
deployment_environment=environment,
|
399
|
+
)
|
400
|
+
metrics["genai_client_usage_tokens"].record(input_tokens, attributes)
|
401
|
+
metrics["genai_client_operation_duration"].record(end_time - start_time, attributes)
|
402
|
+
metrics["genai_requests"].add(1, attributes)
|
403
|
+
metrics["genai_prompt_tokens"].add(input_tokens, attributes)
|
404
|
+
metrics["genai_cost"].record(cost, attributes)
|
@@ -1,4 +1,3 @@
|
|
1
|
-
# pylint: disable=useless-return, bad-staticmethod-argument, disable=duplicate-code
|
2
1
|
"""Initializer of Auto Instrumentation of Groq Functions"""
|
3
2
|
|
4
3
|
from typing import Collection
|
@@ -13,15 +12,15 @@ _instruments = ("groq >= 0.5.0",)
|
|
13
12
|
|
14
13
|
class GroqInstrumentor(BaseInstrumentor):
|
15
14
|
"""
|
16
|
-
An instrumentor for Groq
|
15
|
+
An instrumentor for Groq client library.
|
17
16
|
"""
|
18
17
|
|
19
18
|
def instrumentation_dependencies(self) -> Collection[str]:
|
20
19
|
return _instruments
|
21
20
|
|
22
21
|
def _instrument(self, **kwargs):
|
23
|
-
application_name = kwargs.get("application_name", "
|
24
|
-
environment = kwargs.get("environment", "
|
22
|
+
application_name = kwargs.get("application_name", "default")
|
23
|
+
environment = kwargs.get("environment", "default")
|
25
24
|
tracer = kwargs.get("tracer")
|
26
25
|
metrics = kwargs.get("metrics_dict")
|
27
26
|
pricing_info = kwargs.get("pricing_info", {})
|
@@ -29,22 +28,21 @@ class GroqInstrumentor(BaseInstrumentor):
|
|
29
28
|
disable_metrics = kwargs.get("disable_metrics")
|
30
29
|
version = importlib.metadata.version("groq")
|
31
30
|
|
32
|
-
#
|
31
|
+
# Chat completions
|
33
32
|
wrap_function_wrapper(
|
34
33
|
"groq.resources.chat.completions",
|
35
34
|
"Completions.create",
|
36
35
|
chat(version, environment, application_name,
|
37
|
-
|
36
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
38
37
|
)
|
39
38
|
|
40
|
-
#
|
39
|
+
# Chat completions
|
41
40
|
wrap_function_wrapper(
|
42
41
|
"groq.resources.chat.completions",
|
43
42
|
"AsyncCompletions.create",
|
44
43
|
async_chat(version, environment, application_name,
|
45
|
-
|
44
|
+
tracer, pricing_info, capture_message_content, metrics, disable_metrics),
|
46
45
|
)
|
47
46
|
|
48
47
|
def _uninstrument(self, **kwargs):
|
49
|
-
# Proper uninstrumentation logic to revert patched methods
|
50
48
|
pass
|
@@ -0,0 +1,143 @@
|
|
1
|
+
"""
|
2
|
+
Module for monitoring Groq API calls (async version).
|
3
|
+
"""
|
4
|
+
|
5
|
+
import time
|
6
|
+
from opentelemetry.trace import SpanKind
|
7
|
+
from openlit.__helpers import (
|
8
|
+
handle_exception,
|
9
|
+
set_server_address_and_port
|
10
|
+
)
|
11
|
+
from openlit.instrumentation.groq.utils import (
|
12
|
+
process_chunk,
|
13
|
+
process_streaming_chat_response,
|
14
|
+
process_chat_response
|
15
|
+
)
|
16
|
+
from openlit.semcov import SemanticConvention
|
17
|
+
|
18
|
+
def async_chat(version, environment, application_name, tracer, pricing_info,
|
19
|
+
capture_message_content, metrics, disable_metrics):
|
20
|
+
"""
|
21
|
+
Generates a telemetry wrapper for GenAI function call
|
22
|
+
"""
|
23
|
+
|
24
|
+
class TracedAsyncStream:
|
25
|
+
"""
|
26
|
+
Wrapper for async streaming responses to collect telemetry.
|
27
|
+
"""
|
28
|
+
|
29
|
+
def __init__(
|
30
|
+
self,
|
31
|
+
wrapped,
|
32
|
+
span,
|
33
|
+
span_name,
|
34
|
+
kwargs,
|
35
|
+
server_address,
|
36
|
+
server_port,
|
37
|
+
**args,
|
38
|
+
):
|
39
|
+
self.__wrapped__ = wrapped
|
40
|
+
self._span = span
|
41
|
+
self._span_name = span_name
|
42
|
+
self._llmresponse = ""
|
43
|
+
self._response_id = ""
|
44
|
+
self._response_model = ""
|
45
|
+
self._finish_reason = ""
|
46
|
+
self._tools = None
|
47
|
+
self._system_fingerprint = ""
|
48
|
+
self._input_tokens = 0
|
49
|
+
self._output_tokens = 0
|
50
|
+
self._args = args
|
51
|
+
self._kwargs = kwargs
|
52
|
+
self._start_time = time.time()
|
53
|
+
self._end_time = None
|
54
|
+
self._timestamps = []
|
55
|
+
self._ttft = 0
|
56
|
+
self._tbt = 0
|
57
|
+
self._server_address = server_address
|
58
|
+
self._server_port = server_port
|
59
|
+
|
60
|
+
async def __aenter__(self):
|
61
|
+
await self.__wrapped__.__aenter__()
|
62
|
+
return self
|
63
|
+
|
64
|
+
async def __aexit__(self, exc_type, exc_value, traceback):
|
65
|
+
await self.__wrapped__.__aexit__(exc_type, exc_value, traceback)
|
66
|
+
|
67
|
+
def __aiter__(self):
|
68
|
+
return self
|
69
|
+
|
70
|
+
async def __getattr__(self, name):
|
71
|
+
"""Delegate attribute access to the wrapped object."""
|
72
|
+
return getattr(await self.__wrapped__, name)
|
73
|
+
|
74
|
+
async def __anext__(self):
|
75
|
+
try:
|
76
|
+
chunk = await self.__wrapped__.__anext__()
|
77
|
+
process_chunk(self, chunk)
|
78
|
+
return chunk
|
79
|
+
except StopAsyncIteration:
|
80
|
+
try:
|
81
|
+
with tracer.start_as_current_span(self._span_name, kind= SpanKind.CLIENT) as self._span:
|
82
|
+
process_streaming_chat_response(
|
83
|
+
self,
|
84
|
+
pricing_info=pricing_info,
|
85
|
+
environment=environment,
|
86
|
+
application_name=application_name,
|
87
|
+
metrics=metrics,
|
88
|
+
capture_message_content=capture_message_content,
|
89
|
+
disable_metrics=disable_metrics,
|
90
|
+
version=version
|
91
|
+
)
|
92
|
+
|
93
|
+
except Exception as e:
|
94
|
+
handle_exception(self._span, e)
|
95
|
+
|
96
|
+
raise
|
97
|
+
|
98
|
+
async def wrapper(wrapped, instance, args, kwargs):
|
99
|
+
"""
|
100
|
+
Wraps the GenAI function call.
|
101
|
+
"""
|
102
|
+
# Check if streaming is enabled for the API call
|
103
|
+
streaming = kwargs.get("stream", False)
|
104
|
+
server_address, server_port = set_server_address_and_port(instance, "api.groq.com", 443)
|
105
|
+
request_model = kwargs.get("model", "mixtral-8x7b-32768")
|
106
|
+
|
107
|
+
span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
|
108
|
+
|
109
|
+
if streaming:
|
110
|
+
# Special handling for streaming response
|
111
|
+
awaited_wrapped = await wrapped(*args, **kwargs)
|
112
|
+
span = tracer.start_span(span_name, kind=SpanKind.CLIENT)
|
113
|
+
return TracedAsyncStream(awaited_wrapped, span, span_name, kwargs, server_address, server_port)
|
114
|
+
else:
|
115
|
+
# Handling for non-streaming responses
|
116
|
+
with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
|
117
|
+
start_time = time.time()
|
118
|
+
response = await wrapped(*args, **kwargs)
|
119
|
+
|
120
|
+
try:
|
121
|
+
response = process_chat_response(
|
122
|
+
response=response,
|
123
|
+
request_model=request_model,
|
124
|
+
pricing_info=pricing_info,
|
125
|
+
server_port=server_port,
|
126
|
+
server_address=server_address,
|
127
|
+
environment=environment,
|
128
|
+
application_name=application_name,
|
129
|
+
metrics=metrics,
|
130
|
+
start_time=start_time,
|
131
|
+
span=span,
|
132
|
+
capture_message_content=capture_message_content,
|
133
|
+
disable_metrics=disable_metrics,
|
134
|
+
version=version,
|
135
|
+
**kwargs
|
136
|
+
)
|
137
|
+
|
138
|
+
except Exception as e:
|
139
|
+
handle_exception(span, e)
|
140
|
+
|
141
|
+
return response
|
142
|
+
|
143
|
+
return wrapper
|
@@ -0,0 +1,143 @@
|
|
1
|
+
"""
|
2
|
+
Module for monitoring Groq API calls.
|
3
|
+
"""
|
4
|
+
|
5
|
+
import time
|
6
|
+
from opentelemetry.trace import SpanKind
|
7
|
+
from openlit.__helpers import (
|
8
|
+
handle_exception,
|
9
|
+
set_server_address_and_port
|
10
|
+
)
|
11
|
+
from openlit.instrumentation.groq.utils import (
|
12
|
+
process_chunk,
|
13
|
+
process_streaming_chat_response,
|
14
|
+
process_chat_response
|
15
|
+
)
|
16
|
+
from openlit.semcov import SemanticConvention
|
17
|
+
|
18
|
+
def chat(version, environment, application_name, tracer, pricing_info,
|
19
|
+
capture_message_content, metrics, disable_metrics):
|
20
|
+
"""
|
21
|
+
Generates a telemetry wrapper for GenAI function call
|
22
|
+
"""
|
23
|
+
|
24
|
+
class TracedSyncStream:
|
25
|
+
"""
|
26
|
+
Wrapper for streaming responses to collect telemetry.
|
27
|
+
"""
|
28
|
+
|
29
|
+
def __init__(
|
30
|
+
self,
|
31
|
+
wrapped,
|
32
|
+
span,
|
33
|
+
span_name,
|
34
|
+
kwargs,
|
35
|
+
server_address,
|
36
|
+
server_port,
|
37
|
+
**args,
|
38
|
+
):
|
39
|
+
self.__wrapped__ = wrapped
|
40
|
+
self._span = span
|
41
|
+
self._span_name = span_name
|
42
|
+
self._llmresponse = ""
|
43
|
+
self._response_id = ""
|
44
|
+
self._response_model = ""
|
45
|
+
self._finish_reason = ""
|
46
|
+
self._tools = None
|
47
|
+
self._system_fingerprint = ""
|
48
|
+
self._input_tokens = 0
|
49
|
+
self._output_tokens = 0
|
50
|
+
self._args = args
|
51
|
+
self._kwargs = kwargs
|
52
|
+
self._start_time = time.time()
|
53
|
+
self._end_time = None
|
54
|
+
self._timestamps = []
|
55
|
+
self._ttft = 0
|
56
|
+
self._tbt = 0
|
57
|
+
self._server_address = server_address
|
58
|
+
self._server_port = server_port
|
59
|
+
|
60
|
+
def __enter__(self):
|
61
|
+
self.__wrapped__.__enter__()
|
62
|
+
return self
|
63
|
+
|
64
|
+
def __exit__(self, exc_type, exc_value, traceback):
|
65
|
+
self.__wrapped__.__exit__(exc_type, exc_value, traceback)
|
66
|
+
|
67
|
+
def __iter__(self):
|
68
|
+
return self
|
69
|
+
|
70
|
+
def __getattr__(self, name):
|
71
|
+
"""Delegate attribute access to the wrapped object."""
|
72
|
+
return getattr(self.__wrapped__, name)
|
73
|
+
|
74
|
+
def __next__(self):
|
75
|
+
try:
|
76
|
+
chunk = self.__wrapped__.__next__()
|
77
|
+
process_chunk(self, chunk)
|
78
|
+
return chunk
|
79
|
+
except StopIteration:
|
80
|
+
try:
|
81
|
+
with tracer.start_as_current_span(self._span_name, kind= SpanKind.CLIENT) as self._span:
|
82
|
+
process_streaming_chat_response(
|
83
|
+
self,
|
84
|
+
pricing_info=pricing_info,
|
85
|
+
environment=environment,
|
86
|
+
application_name=application_name,
|
87
|
+
metrics=metrics,
|
88
|
+
capture_message_content=capture_message_content,
|
89
|
+
disable_metrics=disable_metrics,
|
90
|
+
version=version
|
91
|
+
)
|
92
|
+
|
93
|
+
except Exception as e:
|
94
|
+
handle_exception(self._span, e)
|
95
|
+
|
96
|
+
raise
|
97
|
+
|
98
|
+
def wrapper(wrapped, instance, args, kwargs):
|
99
|
+
"""
|
100
|
+
Wraps the GenAI function call.
|
101
|
+
"""
|
102
|
+
# Check if streaming is enabled for the API call
|
103
|
+
streaming = kwargs.get("stream", False)
|
104
|
+
server_address, server_port = set_server_address_and_port(instance, "api.groq.com", 443)
|
105
|
+
request_model = kwargs.get("model", "mixtral-8x7b-32768")
|
106
|
+
|
107
|
+
span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
|
108
|
+
|
109
|
+
if streaming:
|
110
|
+
# Special handling for streaming response
|
111
|
+
awaited_wrapped = wrapped(*args, **kwargs)
|
112
|
+
span = tracer.start_span(span_name, kind=SpanKind.CLIENT)
|
113
|
+
return TracedSyncStream(awaited_wrapped, span, span_name, kwargs, server_address, server_port)
|
114
|
+
else:
|
115
|
+
# Handling for non-streaming responses
|
116
|
+
with tracer.start_as_current_span(span_name, kind=SpanKind.CLIENT) as span:
|
117
|
+
start_time = time.time()
|
118
|
+
response = wrapped(*args, **kwargs)
|
119
|
+
|
120
|
+
try:
|
121
|
+
response = process_chat_response(
|
122
|
+
response=response,
|
123
|
+
request_model=request_model,
|
124
|
+
pricing_info=pricing_info,
|
125
|
+
server_port=server_port,
|
126
|
+
server_address=server_address,
|
127
|
+
environment=environment,
|
128
|
+
application_name=application_name,
|
129
|
+
metrics=metrics,
|
130
|
+
start_time=start_time,
|
131
|
+
span=span,
|
132
|
+
capture_message_content=capture_message_content,
|
133
|
+
disable_metrics=disable_metrics,
|
134
|
+
version=version,
|
135
|
+
**kwargs
|
136
|
+
)
|
137
|
+
|
138
|
+
except Exception as e:
|
139
|
+
handle_exception(span, e)
|
140
|
+
|
141
|
+
return response
|
142
|
+
|
143
|
+
return wrapper
|
@@ -0,0 +1,199 @@
|
|
1
|
+
"""
|
2
|
+
Groq OpenTelemetry instrumentation utility functions
|
3
|
+
"""
|
4
|
+
import time
|
5
|
+
|
6
|
+
from opentelemetry.trace import Status, StatusCode
|
7
|
+
|
8
|
+
from openlit.__helpers import (
|
9
|
+
calculate_ttft,
|
10
|
+
response_as_dict,
|
11
|
+
calculate_tbt,
|
12
|
+
get_chat_model_cost,
|
13
|
+
common_span_attributes,
|
14
|
+
record_completion_metrics,
|
15
|
+
)
|
16
|
+
from openlit.semcov import SemanticConvention
|
17
|
+
|
18
|
+
def format_content(messages):
|
19
|
+
"""
|
20
|
+
Process a list of messages to extract content.
|
21
|
+
"""
|
22
|
+
|
23
|
+
formatted_messages = []
|
24
|
+
for message in messages:
|
25
|
+
role = message["role"]
|
26
|
+
content = message["content"]
|
27
|
+
|
28
|
+
if isinstance(content, list):
|
29
|
+
content_str = ", ".join(
|
30
|
+
f'{item["type"]}: {item["text"] if "text" in item else item["image_url"]}'
|
31
|
+
if "type" in item else f'text: {item["text"]}'
|
32
|
+
for item in content
|
33
|
+
)
|
34
|
+
formatted_messages.append(f"{role}: {content_str}")
|
35
|
+
else:
|
36
|
+
formatted_messages.append(f"{role}: {content}")
|
37
|
+
|
38
|
+
return "\n".join(formatted_messages)
|
39
|
+
|
40
|
+
def process_chunk(scope, chunk):
|
41
|
+
"""
|
42
|
+
Process a chunk of response data and update state.
|
43
|
+
"""
|
44
|
+
|
45
|
+
end_time = time.time()
|
46
|
+
# Record the timestamp for the current chunk
|
47
|
+
scope._timestamps.append(end_time)
|
48
|
+
|
49
|
+
if len(scope._timestamps) == 1:
|
50
|
+
# Calculate time to first chunk
|
51
|
+
scope._ttft = calculate_ttft(scope._timestamps, scope._start_time)
|
52
|
+
|
53
|
+
chunked = response_as_dict(chunk)
|
54
|
+
|
55
|
+
# Collect message IDs and aggregated response from events
|
56
|
+
if (len(chunked.get("choices", [])) > 0 and
|
57
|
+
"delta" in chunked.get("choices")[0] and
|
58
|
+
"content" in chunked.get("choices")[0].get("delta", {})):
|
59
|
+
|
60
|
+
content = chunked.get("choices")[0].get("delta").get("content")
|
61
|
+
if content:
|
62
|
+
scope._llmresponse += content
|
63
|
+
|
64
|
+
if chunked.get('x_groq') is not None:
|
65
|
+
if chunked.get('x_groq').get('usage') is not None:
|
66
|
+
scope._input_tokens = chunked.get('x_groq').get('usage').get('prompt_tokens')
|
67
|
+
scope._output_tokens = chunked.get('x_groq').get('usage').get('completion_tokens')
|
68
|
+
scope._response_id = chunked.get('x_groq').get('id')
|
69
|
+
scope._response_model = chunked.get('x_groq').get('model')
|
70
|
+
scope._finish_reason = chunked.get('choices', [{}])[0].get('finish_reason')
|
71
|
+
scope._system_fingerprint = chunked.get('x_groq').get('system_fingerprint')
|
72
|
+
scope._end_time = time.time()
|
73
|
+
|
74
|
+
|
75
|
+
def common_chat_logic(scope, pricing_info, environment, application_name, metrics,
|
76
|
+
capture_message_content, disable_metrics, version, is_stream):
|
77
|
+
"""
|
78
|
+
Process chat request and generate Telemetry
|
79
|
+
"""
|
80
|
+
|
81
|
+
if len(scope._timestamps) > 1:
|
82
|
+
scope._tbt = calculate_tbt(scope._timestamps)
|
83
|
+
|
84
|
+
prompt = format_content(scope._kwargs.get("messages", []))
|
85
|
+
request_model = scope._kwargs.get("model", "mixtral-8x7b-32768")
|
86
|
+
|
87
|
+
cost = get_chat_model_cost(request_model, pricing_info, scope._input_tokens, scope._output_tokens)
|
88
|
+
|
89
|
+
# Common Span Attributes
|
90
|
+
common_span_attributes(scope,
|
91
|
+
SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT, SemanticConvention.GEN_AI_SYSTEM_GROQ,
|
92
|
+
scope._server_address, scope._server_port, request_model, scope._response_model,
|
93
|
+
environment, application_name, is_stream, scope._tbt, scope._ttft, version)
|
94
|
+
|
95
|
+
# Span Attributes for Request parameters
|
96
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_SEED, scope._kwargs.get("seed", ""))
|
97
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_FREQUENCY_PENALTY, scope._kwargs.get("frequency_penalty", 0.0))
|
98
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_MAX_TOKENS, scope._kwargs.get("max_completion_tokens", -1))
|
99
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_PRESENCE_PENALTY, scope._kwargs.get("presence_penalty", 0.0))
|
100
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_STOP_SEQUENCES, scope._kwargs.get("stop", []))
|
101
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TEMPERATURE, scope._kwargs.get("temperature", 1.0))
|
102
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_TOP_P, scope._kwargs.get("top_p", 1.0))
|
103
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_USER, scope._kwargs.get("user", ""))
|
104
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_REQUEST_SERVICE_TIER, scope._kwargs.get("service_tier", "on_demand"))
|
105
|
+
|
106
|
+
# Span Attributes for Response parameters
|
107
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_ID, scope._response_id)
|
108
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_FINISH_REASON, [scope._finish_reason])
|
109
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_RESPONSE_SYSTEM_FINGERPRINT, scope._system_fingerprint)
|
110
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_OUTPUT_TYPE, "text" if isinstance(scope._llmresponse, str) else "json")
|
111
|
+
|
112
|
+
# Span Attributes for Cost and Tokens
|
113
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_INPUT_TOKENS, scope._input_tokens)
|
114
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_OUTPUT_TOKENS, scope._output_tokens)
|
115
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_CLIENT_TOKEN_USAGE, scope._input_tokens + scope._output_tokens)
|
116
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, cost)
|
117
|
+
|
118
|
+
# Span Attributes for Tools
|
119
|
+
if scope._tools:
|
120
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_TOOL_NAME, scope._tools.get("function", {}).get("name", ""))
|
121
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_TOOL_CALL_ID, str(scope._tools.get("id", "")))
|
122
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_TOOL_ARGS, str(scope._tools.get("function", {}).get("arguments", "")))
|
123
|
+
|
124
|
+
# Span Attributes for Content
|
125
|
+
if capture_message_content:
|
126
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_CONTENT_PROMPT, prompt)
|
127
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_CONTENT_COMPLETION, scope._llmresponse)
|
128
|
+
|
129
|
+
# To be removed one the change to span_attributes (from span events) is complete
|
130
|
+
scope._span.add_event(
|
131
|
+
name=SemanticConvention.GEN_AI_CONTENT_PROMPT_EVENT,
|
132
|
+
attributes={
|
133
|
+
SemanticConvention.GEN_AI_CONTENT_PROMPT: prompt,
|
134
|
+
},
|
135
|
+
)
|
136
|
+
scope._span.add_event(
|
137
|
+
name=SemanticConvention.GEN_AI_CONTENT_COMPLETION_EVENT,
|
138
|
+
attributes={
|
139
|
+
SemanticConvention.GEN_AI_CONTENT_COMPLETION: scope._llmresponse,
|
140
|
+
},
|
141
|
+
)
|
142
|
+
|
143
|
+
scope._span.set_status(Status(StatusCode.OK))
|
144
|
+
|
145
|
+
# Metrics
|
146
|
+
if not disable_metrics:
|
147
|
+
record_completion_metrics(metrics, SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT, SemanticConvention.GEN_AI_SYSTEM_GROQ,
|
148
|
+
scope._server_address, scope._server_port, request_model, scope._response_model, environment,
|
149
|
+
application_name, scope._start_time, scope._end_time, scope._input_tokens, scope._output_tokens,
|
150
|
+
cost, scope._tbt, scope._ttft)
|
151
|
+
|
152
|
+
def process_streaming_chat_response(scope, pricing_info, environment, application_name, metrics,
|
153
|
+
capture_message_content=False, disable_metrics=False, version=""):
|
154
|
+
"""
|
155
|
+
Process chat request and generate Telemetry
|
156
|
+
"""
|
157
|
+
|
158
|
+
common_chat_logic(scope, pricing_info, environment, application_name, metrics,
|
159
|
+
capture_message_content, disable_metrics, version, is_stream=True)
|
160
|
+
|
161
|
+
def process_chat_response(response, request_model, pricing_info, server_port, server_address,
|
162
|
+
environment, application_name, metrics, start_time, span, capture_message_content=False,
|
163
|
+
disable_metrics=False, version="1.0.0", **kwargs):
|
164
|
+
"""
|
165
|
+
Process chat request and generate Telemetry
|
166
|
+
"""
|
167
|
+
|
168
|
+
# Create scope object
|
169
|
+
scope = type("GenericScope", (), {})()
|
170
|
+
response_dict = response_as_dict(response)
|
171
|
+
|
172
|
+
scope._start_time = start_time
|
173
|
+
scope._end_time = time.time()
|
174
|
+
scope._span = span
|
175
|
+
scope._llmresponse = " ".join(
|
176
|
+
(choice.get("message", {}).get("content") or "")
|
177
|
+
for choice in response_dict.get("choices", [])
|
178
|
+
)
|
179
|
+
scope._response_id = response_dict.get("id")
|
180
|
+
scope._response_model = response_dict.get("model")
|
181
|
+
scope._input_tokens = response_dict.get("usage", {}).get("prompt_tokens", 0)
|
182
|
+
scope._output_tokens = response_dict.get("usage", {}).get("completion_tokens", 0)
|
183
|
+
scope._timestamps = []
|
184
|
+
scope._ttft, scope._tbt = scope._end_time - scope._start_time, 0
|
185
|
+
scope._server_address, scope._server_port = server_address, server_port
|
186
|
+
scope._kwargs = kwargs
|
187
|
+
scope._system_fingerprint = response_dict.get("system_fingerprint")
|
188
|
+
scope._finish_reason = str(response_dict.get("choices", [])[0].get("finish_reason", ""))
|
189
|
+
|
190
|
+
# Handle tool calls
|
191
|
+
if scope._kwargs.get("tools"):
|
192
|
+
scope._tools = response_dict.get("choices", [{}])[0].get("message", {}).get("tool_calls")
|
193
|
+
else:
|
194
|
+
scope._tools = None
|
195
|
+
|
196
|
+
common_chat_logic(scope, pricing_info, environment, application_name, metrics,
|
197
|
+
capture_message_content, disable_metrics, version, is_stream=False)
|
198
|
+
|
199
|
+
return response
|
@@ -42,7 +42,7 @@ def async_chat(version, environment, application_name,
|
|
42
42
|
self._llmresponse = ""
|
43
43
|
self._response_model = ""
|
44
44
|
self._finish_reason = ""
|
45
|
-
self.
|
45
|
+
self._tools = []
|
46
46
|
self._input_tokens = 0
|
47
47
|
self._output_tokens = 0
|
48
48
|
self._response_role = ""
|
@@ -102,7 +102,8 @@ def async_chat(version, environment, application_name,
|
|
102
102
|
streaming = kwargs.get("stream", False)
|
103
103
|
|
104
104
|
server_address, server_port = set_server_address_and_port(instance, "127.0.0.1", 11434)
|
105
|
-
|
105
|
+
json_body = kwargs.get("json", {}) or {}
|
106
|
+
request_model = json_body.get("model") or kwargs.get("model")
|
106
107
|
|
107
108
|
span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
|
108
109
|
|
@@ -42,7 +42,7 @@ def chat(version, environment, application_name,
|
|
42
42
|
self._llmresponse = ""
|
43
43
|
self._response_model = ""
|
44
44
|
self._finish_reason = ""
|
45
|
-
self.
|
45
|
+
self._tools = []
|
46
46
|
self._input_tokens = 0
|
47
47
|
self._output_tokens = 0
|
48
48
|
self._response_role = ""
|
@@ -102,7 +102,8 @@ def chat(version, environment, application_name,
|
|
102
102
|
streaming = kwargs.get("stream", False)
|
103
103
|
|
104
104
|
server_address, server_port = set_server_address_and_port(instance, "127.0.0.1", 11434)
|
105
|
-
|
105
|
+
json_body = kwargs.get("json", {}) or {}
|
106
|
+
request_model = json_body.get("model") or kwargs.get("model")
|
106
107
|
|
107
108
|
span_name = f"{SemanticConvention.GEN_AI_OPERATION_TYPE_CHAT} {request_model}"
|
108
109
|
|
@@ -57,7 +57,7 @@ def process_chunk(self, chunk):
|
|
57
57
|
self._llmresponse += chunked.get("message", {}).get("content", "")
|
58
58
|
|
59
59
|
if chunked.get("message", {}).get("tool_calls"):
|
60
|
-
self.
|
60
|
+
self._tools = chunked["message"]["tool_calls"]
|
61
61
|
|
62
62
|
if chunked.get("eval_count"):
|
63
63
|
self._response_role = chunked.get("message", {}).get("role", "")
|
@@ -138,10 +138,10 @@ def common_chat_logic(scope, gen_ai_endpoint, pricing_info, environment, applica
|
|
138
138
|
scope._span.set_attribute(SemanticConvention.GEN_AI_USAGE_COST, cost)
|
139
139
|
|
140
140
|
# Span Attributes for Tools
|
141
|
-
if
|
142
|
-
|
143
|
-
scope._span.set_attribute(SemanticConvention.
|
144
|
-
scope._span.set_attribute(SemanticConvention.GEN_AI_TOOL_ARGS, str(
|
141
|
+
if scope._tools is not None:
|
142
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_TOOL_NAME, scope._tools.get("function","")).get("name","")
|
143
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_TOOL_CALL_ID, str(scope._tools.get("id","")))
|
144
|
+
scope._span.set_attribute(SemanticConvention.GEN_AI_TOOL_ARGS, str(scope._tools.get("function","").get("arguments","")))
|
145
145
|
|
146
146
|
# Span Attributes for Content
|
147
147
|
if capture_message_content:
|
@@ -244,7 +244,11 @@ def process_chat_response(response, gen_ai_endpoint, pricing_info, server_port,
|
|
244
244
|
scope._tbt = 0
|
245
245
|
scope._server_address, scope._server_port = server_address, server_port
|
246
246
|
scope._kwargs = kwargs
|
247
|
-
|
247
|
+
|
248
|
+
if scope._kwargs.get("tools"):
|
249
|
+
scope._tools = response_dict.get("choices")[0].get("message").get("tool_calls")
|
250
|
+
else:
|
251
|
+
scope._tools = None
|
248
252
|
|
249
253
|
common_chat_logic(scope, gen_ai_endpoint, pricing_info, environment, application_name, metrics,
|
250
254
|
capture_message_content, disable_metrics, version)
|