openlit 1.33.9__py3-none-any.whl → 1.33.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (77) hide show
  1. openlit/__helpers.py +5 -0
  2. openlit/__init__.py +3 -2
  3. openlit/instrumentation/ag2/ag2.py +3 -3
  4. openlit/instrumentation/ai21/ai21.py +1 -1
  5. openlit/instrumentation/ai21/async_ai21.py +1 -1
  6. openlit/instrumentation/anthropic/anthropic.py +1 -1
  7. openlit/instrumentation/anthropic/async_anthropic.py +1 -1
  8. openlit/instrumentation/astra/astra.py +5 -5
  9. openlit/instrumentation/astra/async_astra.py +5 -5
  10. openlit/instrumentation/azure_ai_inference/async_azure_ai_inference.py +3 -3
  11. openlit/instrumentation/azure_ai_inference/azure_ai_inference.py +3 -3
  12. openlit/instrumentation/chroma/chroma.py +5 -5
  13. openlit/instrumentation/cohere/async_cohere.py +1 -1
  14. openlit/instrumentation/cohere/cohere.py +2 -2
  15. openlit/instrumentation/controlflow/controlflow.py +3 -3
  16. openlit/instrumentation/crawl4ai/async_crawl4ai.py +3 -3
  17. openlit/instrumentation/crawl4ai/crawl4ai.py +3 -3
  18. openlit/instrumentation/crewai/crewai.py +4 -2
  19. openlit/instrumentation/dynamiq/dynamiq.py +3 -3
  20. openlit/instrumentation/elevenlabs/async_elevenlabs.py +1 -2
  21. openlit/instrumentation/elevenlabs/elevenlabs.py +1 -2
  22. openlit/instrumentation/embedchain/embedchain.py +5 -5
  23. openlit/instrumentation/firecrawl/firecrawl.py +3 -3
  24. openlit/instrumentation/gpt4all/__init__.py +2 -2
  25. openlit/instrumentation/gpt4all/gpt4all.py +345 -220
  26. openlit/instrumentation/gpu/__init__.py +5 -5
  27. openlit/instrumentation/groq/__init__.py +2 -2
  28. openlit/instrumentation/groq/async_groq.py +356 -240
  29. openlit/instrumentation/groq/groq.py +356 -240
  30. openlit/instrumentation/haystack/haystack.py +3 -3
  31. openlit/instrumentation/julep/async_julep.py +3 -3
  32. openlit/instrumentation/julep/julep.py +3 -3
  33. openlit/instrumentation/langchain/__init__.py +13 -7
  34. openlit/instrumentation/langchain/async_langchain.py +384 -0
  35. openlit/instrumentation/langchain/langchain.py +98 -490
  36. openlit/instrumentation/letta/letta.py +5 -3
  37. openlit/instrumentation/litellm/__init__.py +4 -5
  38. openlit/instrumentation/litellm/async_litellm.py +316 -245
  39. openlit/instrumentation/litellm/litellm.py +312 -241
  40. openlit/instrumentation/llamaindex/llamaindex.py +3 -3
  41. openlit/instrumentation/mem0/mem0.py +3 -3
  42. openlit/instrumentation/milvus/milvus.py +5 -5
  43. openlit/instrumentation/mistral/__init__.py +6 -6
  44. openlit/instrumentation/mistral/async_mistral.py +421 -248
  45. openlit/instrumentation/mistral/mistral.py +418 -244
  46. openlit/instrumentation/multion/async_multion.py +4 -2
  47. openlit/instrumentation/multion/multion.py +4 -2
  48. openlit/instrumentation/ollama/__init__.py +8 -30
  49. openlit/instrumentation/ollama/async_ollama.py +385 -417
  50. openlit/instrumentation/ollama/ollama.py +384 -417
  51. openlit/instrumentation/openai/async_openai.py +7 -9
  52. openlit/instrumentation/openai/openai.py +7 -9
  53. openlit/instrumentation/phidata/phidata.py +4 -2
  54. openlit/instrumentation/pinecone/pinecone.py +5 -5
  55. openlit/instrumentation/premai/__init__.py +2 -2
  56. openlit/instrumentation/premai/premai.py +262 -213
  57. openlit/instrumentation/qdrant/async_qdrant.py +5 -5
  58. openlit/instrumentation/qdrant/qdrant.py +5 -5
  59. openlit/instrumentation/reka/__init__.py +2 -2
  60. openlit/instrumentation/reka/async_reka.py +90 -52
  61. openlit/instrumentation/reka/reka.py +90 -52
  62. openlit/instrumentation/together/__init__.py +4 -4
  63. openlit/instrumentation/together/async_together.py +278 -236
  64. openlit/instrumentation/together/together.py +278 -236
  65. openlit/instrumentation/transformers/__init__.py +1 -1
  66. openlit/instrumentation/transformers/transformers.py +75 -44
  67. openlit/instrumentation/vertexai/__init__.py +14 -64
  68. openlit/instrumentation/vertexai/async_vertexai.py +329 -986
  69. openlit/instrumentation/vertexai/vertexai.py +329 -986
  70. openlit/instrumentation/vllm/__init__.py +1 -1
  71. openlit/instrumentation/vllm/vllm.py +62 -32
  72. openlit/semcov/__init__.py +3 -3
  73. {openlit-1.33.9.dist-info → openlit-1.33.10.dist-info}/METADATA +1 -1
  74. openlit-1.33.10.dist-info/RECORD +122 -0
  75. openlit-1.33.9.dist-info/RECORD +0 -121
  76. {openlit-1.33.9.dist-info → openlit-1.33.10.dist-info}/LICENSE +0 -0
  77. {openlit-1.33.9.dist-info → openlit-1.33.10.dist-info}/WHEEL +0 -0
openlit/__helpers.py CHANGED
@@ -266,6 +266,11 @@ def set_server_address_and_port(client_instance: Any,
266
266
  config = getattr(client_instance, "_config", None)
267
267
  base_url = getattr(config, "endpoint", None)
268
268
 
269
+ if not base_url:
270
+ # Attempt to get server_url from instance.sdk_configuration.server_url
271
+ config = getattr(client_instance, "sdk_configuration", None)
272
+ base_url = getattr(config, "server_url", None)
273
+
269
274
  if base_url:
270
275
  if isinstance(base_url, str):
271
276
  url = urlparse(base_url)
openlit/__init__.py CHANGED
@@ -17,6 +17,7 @@ import requests
17
17
  # Import internal modules for setting up tracing and fetching pricing info.
18
18
  from opentelemetry import trace as t
19
19
  from opentelemetry.trace import SpanKind, Status, StatusCode, Span
20
+ from opentelemetry.sdk.resources import SERVICE_NAME, DEPLOYMENT_ENVIRONMENT
20
21
  from openlit.semcov import SemanticConvetion
21
22
  from openlit.otel.tracing import setup_tracing
22
23
  from openlit.otel.metrics import setup_meter
@@ -555,11 +556,11 @@ def trace(wrapped):
555
556
  span.set_attribute("function.args", str(args))
556
557
  span.set_attribute("function.kwargs", str(kwargs))
557
558
  span.set_attribute(
558
- SemanticConvetion.GEN_AI_APPLICATION_NAME,
559
+ SERVICE_NAME,
559
560
  OpenlitConfig.application_name,
560
561
  )
561
562
  span.set_attribute(
562
- SemanticConvetion.GEN_AI_ENVIRONMENT, OpenlitConfig.environment
563
+ DEPLOYMENT_ENVIRONMENT, OpenlitConfig.environment
563
564
  )
564
565
  except Exception as meta_exception:
565
566
  logging.error(
@@ -5,7 +5,7 @@ Module for monitoring AG2.
5
5
 
6
6
  import logging
7
7
  from opentelemetry.trace import SpanKind, Status, StatusCode
8
- from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
8
+ from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
9
9
  from openlit.__helpers import handle_exception
10
10
  from openlit.semcov import SemanticConvetion
11
11
 
@@ -70,9 +70,9 @@ def wrap_ag2(gen_ai_endpoint, version, environment, application_name,
70
70
  gen_ai_endpoint)
71
71
  span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
72
72
  SemanticConvetion.GEN_AI_SYSTEM_AG2)
73
- span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
73
+ span.set_attribute(DEPLOYMENT_ENVIRONMENT,
74
74
  environment)
75
- span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
75
+ span.set_attribute(SERVICE_NAME,
76
76
  application_name)
77
77
  span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
78
78
  SemanticConvetion.GEN_AI_OPERATION_TYPE_AGENT)
@@ -41,7 +41,7 @@ def chat(version, environment, application_name,
41
41
  class TracedSyncStream:
42
42
  """
43
43
  Wrapper for streaming responses to collect metrics and trace data.
44
- Wraps the 'ai21.AsyncStream' response to collect message IDs and aggregated response.
44
+ Wraps the response to collect message IDs and aggregated response.
45
45
 
46
46
  This class implements the '__aiter__' and '__anext__' methods that
47
47
  handle asynchronous streaming responses.
@@ -41,7 +41,7 @@ def async_chat(version, environment, application_name,
41
41
  class TracedAsyncStream:
42
42
  """
43
43
  Wrapper for streaming responses to collect metrics and trace data.
44
- Wraps the 'ai21.AsyncStream' response to collect message IDs and aggregated response.
44
+ Wraps the response to collect message IDs and aggregated response.
45
45
 
46
46
  This class implements the '__aiter__' and '__anext__' methods that
47
47
  handle asynchronous streaming responses.
@@ -40,7 +40,7 @@ def messages(version, environment, application_name, tracer,
40
40
  class TracedSyncStream:
41
41
  """
42
42
  Wrapper for streaming responses to collect metrics and trace data.
43
- Wraps the 'anthropic.AsyncStream' response to collect message IDs and aggregated response.
43
+ Wraps the response to collect message IDs and aggregated response.
44
44
 
45
45
  This class implements the '__aiter__' and '__anext__' methods that
46
46
  handle asynchronous streaming responses.
@@ -40,7 +40,7 @@ def async_messages(version, environment, application_name, tracer,
40
40
  class TracedASyncStream:
41
41
  """
42
42
  Wrapper for streaming responses to collect metrics and trace data.
43
- Wraps the 'anthropic.AsyncStream' response to collect message IDs and aggregated response.
43
+ Wraps the response to collect message IDs and aggregated response.
44
44
 
45
45
  This class implements the '__aiter__' and '__anext__' methods that
46
46
  handle asynchronous streaming responses.
@@ -5,7 +5,7 @@ Module for monitoring AstraDB.
5
5
 
6
6
  import logging
7
7
  from opentelemetry.trace import SpanKind, Status, StatusCode
8
- from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
8
+ from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
9
9
  from openlit.__helpers import handle_exception
10
10
  from openlit.semcov import SemanticConvetion
11
11
 
@@ -69,9 +69,9 @@ def general_wrap(gen_ai_endpoint, version, environment, application_name,
69
69
  span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
70
70
  span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
71
71
  gen_ai_endpoint)
72
- span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
72
+ span.set_attribute(DEPLOYMENT_ENVIRONMENT,
73
73
  environment)
74
- span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
74
+ span.set_attribute(SERVICE_NAME,
75
75
  application_name)
76
76
  span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
77
77
  SemanticConvetion.GEN_AI_OPERATION_TYPE_VECTORDB)
@@ -199,11 +199,11 @@ def general_wrap(gen_ai_endpoint, version, environment, application_name,
199
199
  attributes = {
200
200
  TELEMETRY_SDK_NAME:
201
201
  "openlit",
202
- SemanticConvetion.GEN_AI_APPLICATION_NAME:
202
+ SERVICE_NAME:
203
203
  application_name,
204
204
  SemanticConvetion.DB_SYSTEM:
205
205
  SemanticConvetion.DB_SYSTEM_ASTRA,
206
- SemanticConvetion.GEN_AI_ENVIRONMENT:
206
+ DEPLOYMENT_ENVIRONMENT:
207
207
  environment,
208
208
  SemanticConvetion.GEN_AI_OPERATION:
209
209
  SemanticConvetion.GEN_AI_OPERATION_TYPE_VECTORDB,
@@ -5,7 +5,7 @@ Module for monitoring AstraDB.
5
5
 
6
6
  import logging
7
7
  from opentelemetry.trace import SpanKind, Status, StatusCode
8
- from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
8
+ from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
9
9
  from openlit.__helpers import handle_exception
10
10
  from openlit.semcov import SemanticConvetion
11
11
 
@@ -69,9 +69,9 @@ def general_wrap(gen_ai_endpoint, version, environment, application_name,
69
69
  span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
70
70
  span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
71
71
  gen_ai_endpoint)
72
- span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
72
+ span.set_attribute(DEPLOYMENT_ENVIRONMENT,
73
73
  environment)
74
- span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
74
+ span.set_attribute(SERVICE_NAME,
75
75
  application_name)
76
76
  span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
77
77
  SemanticConvetion.GEN_AI_OPERATION_TYPE_VECTORDB)
@@ -199,11 +199,11 @@ def general_wrap(gen_ai_endpoint, version, environment, application_name,
199
199
  attributes = {
200
200
  TELEMETRY_SDK_NAME:
201
201
  "openlit",
202
- SemanticConvetion.GEN_AI_APPLICATION_NAME:
202
+ SERVICE_NAME:
203
203
  application_name,
204
204
  SemanticConvetion.DB_SYSTEM:
205
205
  SemanticConvetion.DB_SYSTEM_ASTRA,
206
- SemanticConvetion.GEN_AI_ENVIRONMENT:
206
+ DEPLOYMENT_ENVIRONMENT:
207
207
  environment,
208
208
  SemanticConvetion.GEN_AI_OPERATION:
209
209
  SemanticConvetion.GEN_AI_OPERATION_TYPE_VECTORDB,
@@ -42,7 +42,7 @@ def async_complete(version, environment, application_name,
42
42
  class TracedAsyncStream:
43
43
  """
44
44
  Wrapper for streaming responses to collect metrics and trace data.
45
- Wraps the 'az.ai.inference.AsyncStream' response to collect message IDs and aggregated response.
45
+ Wraps the response to collect message IDs and aggregated response.
46
46
 
47
47
  This class implements the '__aiter__' and '__anext__' methods that
48
48
  handle asynchronous streaming responses.
@@ -191,7 +191,7 @@ def async_complete(version, environment, application_name,
191
191
  output_tokens)
192
192
  self._span.set_attribute(SemanticConvetion.SERVER_ADDRESS,
193
193
  self._server_address)
194
- self._span.set_attribute(SemanticConvetion.GEN_AI_OPENAI_RESPONSE_SYSTEM_FINGERPRINT,
194
+ self._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_SYSTEM_FINGERPRINT,
195
195
  self._system_fingerprint)
196
196
  if isinstance(self._llmresponse, str):
197
197
  self._span.set_attribute(SemanticConvetion.GEN_AI_OUTPUT_TYPE,
@@ -370,7 +370,7 @@ def async_complete(version, environment, application_name,
370
370
  output_tokens)
371
371
  span.set_attribute(SemanticConvetion.SERVER_ADDRESS,
372
372
  server_address)
373
- span.set_attribute(SemanticConvetion.GEN_AI_OPENAI_RESPONSE_SYSTEM_FINGERPRINT,
373
+ span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_SYSTEM_FINGERPRINT,
374
374
  response_dict.get('system_fingerprint'))
375
375
 
376
376
  # Set base span attribues (Extras)
@@ -42,7 +42,7 @@ def complete(version, environment, application_name,
42
42
  class TracedSyncStream:
43
43
  """
44
44
  Wrapper for streaming responses to collect metrics and trace data.
45
- Wraps the 'az.ai.inference.AsyncStream' response to collect message IDs and aggregated response.
45
+ Wraps the response to collect message IDs and aggregated response.
46
46
 
47
47
  This class implements the '__aiter__' and '__anext__' methods that
48
48
  handle asynchronous streaming responses.
@@ -191,7 +191,7 @@ def complete(version, environment, application_name,
191
191
  output_tokens)
192
192
  self._span.set_attribute(SemanticConvetion.SERVER_ADDRESS,
193
193
  self._server_address)
194
- self._span.set_attribute(SemanticConvetion.GEN_AI_OPENAI_RESPONSE_SYSTEM_FINGERPRINT,
194
+ self._span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_SYSTEM_FINGERPRINT,
195
195
  self._system_fingerprint)
196
196
  if isinstance(self._llmresponse, str):
197
197
  self._span.set_attribute(SemanticConvetion.GEN_AI_OUTPUT_TYPE,
@@ -370,7 +370,7 @@ def complete(version, environment, application_name,
370
370
  output_tokens)
371
371
  span.set_attribute(SemanticConvetion.SERVER_ADDRESS,
372
372
  server_address)
373
- span.set_attribute(SemanticConvetion.GEN_AI_OPENAI_RESPONSE_SYSTEM_FINGERPRINT,
373
+ span.set_attribute(SemanticConvetion.GEN_AI_RESPONSE_SYSTEM_FINGERPRINT,
374
374
  response_dict.get('system_fingerprint'))
375
375
 
376
376
  # Set base span attribues (Extras)
@@ -5,7 +5,7 @@ Module for monitoring ChromaDB.
5
5
 
6
6
  import logging
7
7
  from opentelemetry.trace import SpanKind, Status, StatusCode
8
- from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
8
+ from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
9
9
  from openlit.__helpers import handle_exception
10
10
  from openlit.semcov import SemanticConvetion
11
11
 
@@ -73,9 +73,9 @@ def general_wrap(gen_ai_endpoint, version, environment, application_name,
73
73
  span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
74
74
  span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
75
75
  gen_ai_endpoint)
76
- span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
76
+ span.set_attribute(DEPLOYMENT_ENVIRONMENT,
77
77
  environment)
78
- span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
78
+ span.set_attribute(SERVICE_NAME,
79
79
  application_name)
80
80
  span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
81
81
  SemanticConvetion.GEN_AI_OPERATION_TYPE_VECTORDB)
@@ -173,11 +173,11 @@ def general_wrap(gen_ai_endpoint, version, environment, application_name,
173
173
  attributes = {
174
174
  TELEMETRY_SDK_NAME:
175
175
  "openlit",
176
- SemanticConvetion.GEN_AI_APPLICATION_NAME:
176
+ SERVICE_NAME:
177
177
  application_name,
178
178
  SemanticConvetion.DB_SYSTEM:
179
179
  SemanticConvetion.DB_SYSTEM_CHROMA,
180
- SemanticConvetion.GEN_AI_ENVIRONMENT:
180
+ DEPLOYMENT_ENVIRONMENT:
181
181
  environment,
182
182
  SemanticConvetion.GEN_AI_OPERATION:
183
183
  SemanticConvetion.GEN_AI_OPERATION_TYPE_VECTORDB,
@@ -372,7 +372,7 @@ def async_chat_stream(version, environment, application_name,
372
372
  class TracedAsyncStream:
373
373
  """
374
374
  Wrapper for streaming responses to collect metrics and trace data.
375
- Wraps the 'cohere.AsyncStream' response to collect message IDs and aggregated response.
375
+ Wraps the response to collect message IDs and aggregated response.
376
376
 
377
377
  This class implements the '__aiter__' and '__anext__' methods that
378
378
  handle asynchronous streaming responses.
@@ -222,7 +222,7 @@ def chat(version, environment, application_name, tracer,
222
222
 
223
223
  llm_response = response_dict.get('message').get('content')[0].get('text')
224
224
 
225
- # Set base span attribues (OTel Semconv)
225
+ # Set base span attribues (OTel Semconv)
226
226
  span.set_attribute(TELEMETRY_SDK_NAME, "openlit")
227
227
  span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
228
228
  SemanticConvetion.GEN_AI_OPERATION_TYPE_CHAT)
@@ -372,7 +372,7 @@ def chat_stream(version, environment, application_name,
372
372
  class TracedSyncStream:
373
373
  """
374
374
  Wrapper for streaming responses to collect metrics and trace data.
375
- Wraps the 'cohere.AsyncStream' response to collect message IDs and aggregated response.
375
+ Wraps the response to collect message IDs and aggregated response.
376
376
 
377
377
  This class implements the '__aiter__' and '__anext__' methods that
378
378
  handle asynchronous streaming responses.
@@ -5,7 +5,7 @@ Module for monitoring controlflow.
5
5
 
6
6
  import logging
7
7
  from opentelemetry.trace import SpanKind, Status, StatusCode
8
- from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
8
+ from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
9
9
  from openlit.__helpers import handle_exception
10
10
  from openlit.semcov import SemanticConvetion
11
11
 
@@ -64,9 +64,9 @@ def wrap_controlflow(gen_ai_endpoint, version, environment, application_name,
64
64
  gen_ai_endpoint)
65
65
  span.set_attribute(SemanticConvetion.GEN_AI_SYSTEM,
66
66
  SemanticConvetion.GEN_AI_SYSTEM_CONTROLFLOW)
67
- span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
67
+ span.set_attribute(DEPLOYMENT_ENVIRONMENT,
68
68
  environment)
69
- span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
69
+ span.set_attribute(SERVICE_NAME,
70
70
  application_name)
71
71
  span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
72
72
  SemanticConvetion.GEN_AI_OPERATION_TYPE_AGENT)
@@ -5,7 +5,7 @@ Module for monitoring Crawl4AI calls.
5
5
 
6
6
  import logging
7
7
  from opentelemetry.trace import SpanKind, Status, StatusCode
8
- from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
8
+ from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
9
9
  from openlit.__helpers import (
10
10
  handle_exception,
11
11
  )
@@ -62,9 +62,9 @@ def async_wrap_crawl(gen_ai_endpoint, version, environment, application_name,
62
62
  SemanticConvetion.GEN_AI_OPERATION_TYPE_AGENT)
63
63
  span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
64
64
  gen_ai_endpoint)
65
- span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
65
+ span.set_attribute(SERVICE_NAME,
66
66
  application_name)
67
- span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
67
+ span.set_attribute(DEPLOYMENT_ENVIRONMENT,
68
68
  environment)
69
69
  span.set_attribute(SemanticConvetion.GEN_AI_AGENT_TYPE,
70
70
  SemanticConvetion.GEN_AI_AGENT_TYPE_BROWSER)
@@ -5,7 +5,7 @@ Module for monitoring Crawl4AI calls.
5
5
 
6
6
  import logging
7
7
  from opentelemetry.trace import SpanKind, Status, StatusCode
8
- from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
8
+ from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
9
9
  from openlit.__helpers import (
10
10
  handle_exception,
11
11
  )
@@ -62,9 +62,9 @@ def wrap_crawl(gen_ai_endpoint, version, environment, application_name,
62
62
  SemanticConvetion.GEN_AI_OPERATION_TYPE_AGENT)
63
63
  span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
64
64
  gen_ai_endpoint)
65
- span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
65
+ span.set_attribute(SERVICE_NAME,
66
66
  application_name)
67
- span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
67
+ span.set_attribute(DEPLOYMENT_ENVIRONMENT,
68
68
  environment)
69
69
  span.set_attribute(SemanticConvetion.GEN_AI_AGENT_TYPE,
70
70
  SemanticConvetion.GEN_AI_AGENT_TYPE_BROWSER)
@@ -6,7 +6,7 @@ Module for monitoring LiteLLM calls.
6
6
  import logging
7
7
  import json
8
8
  from opentelemetry.trace import SpanKind, Status, StatusCode
9
- from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
9
+ from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
10
10
  from openlit.__helpers import (
11
11
  handle_exception,
12
12
  )
@@ -75,8 +75,10 @@ def crew_wrap(gen_ai_endpoint, version, environment, application_name,
75
75
  SemanticConvetion.GEN_AI_OPERATION_TYPE_AGENT)
76
76
  span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
77
77
  gen_ai_endpoint)
78
- span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
78
+ span.set_attribute(SERVICE_NAME,
79
79
  application_name)
80
+ span.set_attribute(DEPLOYMENT_ENVIRONMENT,
81
+ environment)
80
82
 
81
83
  instance_class = instance.__class__.__name__
82
84
 
@@ -5,7 +5,7 @@ Module for monitoring Dynamiq calls.
5
5
 
6
6
  import logging
7
7
  from opentelemetry.trace import SpanKind, Status, StatusCode
8
- from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
8
+ from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
9
9
  from openlit.__helpers import (
10
10
  handle_exception,
11
11
  )
@@ -62,9 +62,9 @@ def dynamiq_wrap(gen_ai_endpoint, version, environment, application_name,
62
62
  SemanticConvetion.GEN_AI_OPERATION_TYPE_AGENT)
63
63
  span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
64
64
  gen_ai_endpoint)
65
- span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
65
+ span.set_attribute(SERVICE_NAME,
66
66
  application_name)
67
- span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
67
+ span.set_attribute(DEPLOYMENT_ENVIRONMENT,
68
68
  environment)
69
69
 
70
70
  if gen_ai_endpoint == "dynamiq.agent_run":
@@ -60,8 +60,7 @@ def async_generate(gen_ai_endpoint, version, environment, application_name,
60
60
  start_time = time.time()
61
61
  response = await wrapped(*args, **kwargs)
62
62
  end_time = time.time()
63
- for key, value in instance.__dict__.items():
64
- print(f'{key}: {value}')
63
+
65
64
  try:
66
65
  # Calculate cost of the operation
67
66
  cost = get_audio_model_cost(request_model,
@@ -61,8 +61,7 @@ def generate(gen_ai_endpoint, version, environment, application_name,
61
61
  start_time = time.time()
62
62
  response = wrapped(*args, **kwargs)
63
63
  end_time = time.time()
64
- for key, value in instance.__dict__.items():
65
- print(f'{key}: {value}')
64
+
66
65
  try:
67
66
  # Calculate cost of the operation
68
67
  cost = get_audio_model_cost(request_model,
@@ -5,7 +5,7 @@ Module for monitoring EmbedChain applications.
5
5
 
6
6
  import logging
7
7
  from opentelemetry.trace import SpanKind, Status, StatusCode
8
- from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
8
+ from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
9
9
  from openlit.__helpers import handle_exception
10
10
  from openlit.semcov import SemanticConvetion
11
11
 
@@ -63,11 +63,11 @@ def evaluate(gen_ai_endpoint, version, environment, application_name,
63
63
  SemanticConvetion.GEN_AI_SYSTEM_EMBEDCHAIN)
64
64
  span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
65
65
  gen_ai_endpoint)
66
- span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
66
+ span.set_attribute(DEPLOYMENT_ENVIRONMENT,
67
67
  environment)
68
68
  span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
69
69
  SemanticConvetion.GEN_AI_OPERATION_TYPE_FRAMEWORK)
70
- span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
70
+ span.set_attribute(SERVICE_NAME,
71
71
  application_name)
72
72
  span.set_attribute(SemanticConvetion.GEN_AI_EVAL_CONTEXT_RELEVANCY,
73
73
  response["context_relevancy"])
@@ -141,11 +141,11 @@ def get_data_sources(gen_ai_endpoint, version, environment, application_name,
141
141
  SemanticConvetion.GEN_AI_SYSTEM_EMBEDCHAIN)
142
142
  span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
143
143
  gen_ai_endpoint)
144
- span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
144
+ span.set_attribute(DEPLOYMENT_ENVIRONMENT,
145
145
  environment)
146
146
  span.set_attribute(SemanticConvetion.GEN_AI_OPERATION,
147
147
  SemanticConvetion.GEN_AI_OPERATION_TYPE_FRAMEWORK)
148
- span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
148
+ span.set_attribute(SERVICE_NAME,
149
149
  application_name)
150
150
  span.set_attribute(SemanticConvetion.GEN_AI_DATA_SOURCES,
151
151
  len(response))
@@ -5,7 +5,7 @@ Module for monitoring FireCrawl calls.
5
5
 
6
6
  import logging
7
7
  from opentelemetry.trace import SpanKind, Status, StatusCode
8
- from opentelemetry.sdk.resources import TELEMETRY_SDK_NAME
8
+ from opentelemetry.sdk.resources import SERVICE_NAME, TELEMETRY_SDK_NAME, DEPLOYMENT_ENVIRONMENT
9
9
  from openlit.__helpers import (
10
10
  handle_exception,
11
11
  )
@@ -62,9 +62,9 @@ def wrap_crawl(gen_ai_endpoint, version, environment, application_name,
62
62
  SemanticConvetion.GEN_AI_OPERATION_TYPE_AGENT)
63
63
  span.set_attribute(SemanticConvetion.GEN_AI_ENDPOINT,
64
64
  gen_ai_endpoint)
65
- span.set_attribute(SemanticConvetion.GEN_AI_APPLICATION_NAME,
65
+ span.set_attribute(SERVICE_NAME,
66
66
  application_name)
67
- span.set_attribute(SemanticConvetion.GEN_AI_ENVIRONMENT,
67
+ span.set_attribute(DEPLOYMENT_ENVIRONMENT,
68
68
  environment)
69
69
  span.set_attribute(SemanticConvetion.GEN_AI_AGENT_TYPE,
70
70
  SemanticConvetion.GEN_AI_AGENT_TYPE_BROWSER)
@@ -34,7 +34,7 @@ class GPT4AllInstrumentor(BaseInstrumentor):
34
34
  wrap_function_wrapper(
35
35
  "gpt4all",
36
36
  "GPT4All.generate",
37
- generate("gpt4all.generate", version, environment, application_name,
37
+ generate(version, environment, application_name,
38
38
  tracer, pricing_info, trace_content, metrics, disable_metrics),
39
39
  )
40
40
 
@@ -42,7 +42,7 @@ class GPT4AllInstrumentor(BaseInstrumentor):
42
42
  wrap_function_wrapper(
43
43
  "gpt4all",
44
44
  "Embed4All.embed",
45
- embed("gpt4all.embed", version, environment, application_name,
45
+ embed(version, environment, application_name,
46
46
  tracer, pricing_info, trace_content, metrics, disable_metrics),
47
47
  )
48
48