opentelemetry-instrumentation-llamaindex 0.40.13__tar.gz → 0.41.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of opentelemetry-instrumentation-llamaindex might be problematic. Click here for more details.

Files changed (21) hide show
  1. {opentelemetry_instrumentation_llamaindex-0.40.13 → opentelemetry_instrumentation_llamaindex-0.41.0}/PKG-INFO +2 -2
  2. {opentelemetry_instrumentation_llamaindex-0.40.13 → opentelemetry_instrumentation_llamaindex-0.41.0}/opentelemetry/instrumentation/llamaindex/__init__.py +20 -12
  3. opentelemetry_instrumentation_llamaindex-0.41.0/opentelemetry/instrumentation/llamaindex/config.py +9 -0
  4. {opentelemetry_instrumentation_llamaindex-0.40.13 → opentelemetry_instrumentation_llamaindex-0.41.0}/opentelemetry/instrumentation/llamaindex/dispatcher_wrapper.py +49 -121
  5. opentelemetry_instrumentation_llamaindex-0.41.0/opentelemetry/instrumentation/llamaindex/event_emitter.py +152 -0
  6. opentelemetry_instrumentation_llamaindex-0.41.0/opentelemetry/instrumentation/llamaindex/event_models.py +41 -0
  7. {opentelemetry_instrumentation_llamaindex-0.40.13 → opentelemetry_instrumentation_llamaindex-0.41.0}/opentelemetry/instrumentation/llamaindex/retriever_query_engine_instrumentor.py +3 -3
  8. opentelemetry_instrumentation_llamaindex-0.41.0/opentelemetry/instrumentation/llamaindex/span_utils.py +143 -0
  9. {opentelemetry_instrumentation_llamaindex-0.40.13 → opentelemetry_instrumentation_llamaindex-0.41.0}/opentelemetry/instrumentation/llamaindex/utils.py +20 -2
  10. opentelemetry_instrumentation_llamaindex-0.41.0/opentelemetry/instrumentation/llamaindex/version.py +1 -0
  11. {opentelemetry_instrumentation_llamaindex-0.40.13 → opentelemetry_instrumentation_llamaindex-0.41.0}/pyproject.toml +5 -5
  12. opentelemetry_instrumentation_llamaindex-0.40.13/opentelemetry/instrumentation/llamaindex/config.py +0 -2
  13. opentelemetry_instrumentation_llamaindex-0.40.13/opentelemetry/instrumentation/llamaindex/version.py +0 -1
  14. {opentelemetry_instrumentation_llamaindex-0.40.13 → opentelemetry_instrumentation_llamaindex-0.41.0}/README.md +0 -0
  15. {opentelemetry_instrumentation_llamaindex-0.40.13 → opentelemetry_instrumentation_llamaindex-0.41.0}/opentelemetry/instrumentation/llamaindex/base_agent_instrumentor.py +0 -0
  16. {opentelemetry_instrumentation_llamaindex-0.40.13 → opentelemetry_instrumentation_llamaindex-0.41.0}/opentelemetry/instrumentation/llamaindex/base_embedding_instrumentor.py +0 -0
  17. {opentelemetry_instrumentation_llamaindex-0.40.13 → opentelemetry_instrumentation_llamaindex-0.41.0}/opentelemetry/instrumentation/llamaindex/base_retriever_instrumentor.py +0 -0
  18. {opentelemetry_instrumentation_llamaindex-0.40.13 → opentelemetry_instrumentation_llamaindex-0.41.0}/opentelemetry/instrumentation/llamaindex/base_synthesizer_instrumentor.py +0 -0
  19. {opentelemetry_instrumentation_llamaindex-0.40.13 → opentelemetry_instrumentation_llamaindex-0.41.0}/opentelemetry/instrumentation/llamaindex/base_tool_instrumentor.py +0 -0
  20. {opentelemetry_instrumentation_llamaindex-0.40.13 → opentelemetry_instrumentation_llamaindex-0.41.0}/opentelemetry/instrumentation/llamaindex/custom_llm_instrumentor.py +0 -0
  21. {opentelemetry_instrumentation_llamaindex-0.40.13 → opentelemetry_instrumentation_llamaindex-0.41.0}/opentelemetry/instrumentation/llamaindex/query_pipeline_instrumentor.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: opentelemetry-instrumentation-llamaindex
3
- Version: 0.40.13
3
+ Version: 0.41.0
4
4
  Summary: OpenTelemetry LlamaIndex instrumentation
5
5
  License: Apache-2.0
6
6
  Author: Gal Kleinman
@@ -18,7 +18,7 @@ Requires-Dist: inflection (>=0.5.1,<0.6.0)
18
18
  Requires-Dist: opentelemetry-api (>=1.28.0,<2.0.0)
19
19
  Requires-Dist: opentelemetry-instrumentation (>=0.50b0)
20
20
  Requires-Dist: opentelemetry-semantic-conventions (>=0.50b0)
21
- Requires-Dist: opentelemetry-semantic-conventions-ai (==0.4.9)
21
+ Requires-Dist: opentelemetry-semantic-conventions-ai (==0.4.10)
22
22
  Project-URL: Repository, https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-llamaindex
23
23
  Description-Content-Type: text/markdown
24
24
 
@@ -4,16 +4,13 @@ import logging
4
4
  from importlib.metadata import version as import_version
5
5
  from typing import Collection
6
6
 
7
- from opentelemetry.instrumentation.llamaindex.config import Config
8
- from opentelemetry.trace import get_tracer
9
-
7
+ from opentelemetry._events import get_event_logger
10
8
  from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
11
-
12
9
  from opentelemetry.instrumentation.llamaindex.base_agent_instrumentor import (
13
10
  BaseAgentInstrumentor,
14
11
  )
15
- from opentelemetry.instrumentation.llamaindex.retriever_query_engine_instrumentor import (
16
- RetrieverQueryEngineInstrumentor,
12
+ from opentelemetry.instrumentation.llamaindex.base_embedding_instrumentor import (
13
+ BaseEmbeddingInstrumentor,
17
14
  )
18
15
  from opentelemetry.instrumentation.llamaindex.base_retriever_instrumentor import (
19
16
  BaseRetrieverInstrumentor,
@@ -24,30 +21,35 @@ from opentelemetry.instrumentation.llamaindex.base_synthesizer_instrumentor impo
24
21
  from opentelemetry.instrumentation.llamaindex.base_tool_instrumentor import (
25
22
  BaseToolInstrumentor,
26
23
  )
27
- from opentelemetry.instrumentation.llamaindex.base_embedding_instrumentor import (
28
- BaseEmbeddingInstrumentor,
29
- )
24
+ from opentelemetry.instrumentation.llamaindex.config import Config
30
25
  from opentelemetry.instrumentation.llamaindex.custom_llm_instrumentor import (
31
26
  CustomLLMInstrumentor,
32
27
  )
28
+ from opentelemetry.instrumentation.llamaindex.dispatcher_wrapper import (
29
+ instrument_with_dispatcher,
30
+ )
33
31
  from opentelemetry.instrumentation.llamaindex.query_pipeline_instrumentor import (
34
32
  QueryPipelineInstrumentor,
35
33
  )
34
+ from opentelemetry.instrumentation.llamaindex.retriever_query_engine_instrumentor import (
35
+ RetrieverQueryEngineInstrumentor,
36
+ )
36
37
  from opentelemetry.instrumentation.llamaindex.version import __version__
37
- from opentelemetry.instrumentation.llamaindex.dispatcher_wrapper import instrument_with_dispatcher
38
+ from opentelemetry.trace import get_tracer
38
39
 
39
40
  logger = logging.getLogger(__name__)
40
41
 
41
- _core_instruments = ("llama-index-core >= 0.7.0", )
42
+ _core_instruments = ("llama-index-core >= 0.7.0",)
42
43
  _full_instruments = ("llama-index >= 0.7.0",)
43
44
 
44
45
 
45
46
  class LlamaIndexInstrumentor(BaseInstrumentor):
46
47
  """An instrumentor for both: core and legacy LlamaIndex SDK."""
47
48
 
48
- def __init__(self, exception_logger=None):
49
+ def __init__(self, exception_logger=None, use_legacy_attributes=True):
49
50
  self.legacy = LlamaIndexInstrumentorFull(exception_logger)
50
51
  self.core = LlamaIndexInstrumentorCore(exception_logger)
52
+ Config.use_legacy_attributes = use_legacy_attributes
51
53
 
52
54
  def instrumentation_dependencies(self) -> Collection[str]:
53
55
  return ()
@@ -70,6 +72,12 @@ class LlamaIndexInstrumentor(BaseInstrumentor):
70
72
  tracer_provider = kwargs.get("tracer_provider")
71
73
  tracer = get_tracer(__name__, __version__, tracer_provider)
72
74
 
75
+ if not Config.use_legacy_attributes:
76
+ event_logger_provider = kwargs.get("event_logger_provider")
77
+ Config.event_logger = get_event_logger(
78
+ __name__, __version__, event_logger_provider=event_logger_provider
79
+ )
80
+
73
81
  if import_version(name) >= "0.10.20":
74
82
  instrument_with_dispatcher(tracer)
75
83
  else:
@@ -0,0 +1,9 @@
1
+ from typing import Optional
2
+
3
+ from opentelemetry._events import EventLogger
4
+
5
+
6
+ class Config:
7
+ exception_logger = None
8
+ use_legacy_attributes = True
9
+ event_logger: Optional[EventLogger] = None
@@ -1,20 +1,22 @@
1
- from functools import singledispatchmethod
2
1
  import inspect
3
2
  import json
4
3
  import re
5
- from typing import Any, AsyncGenerator, Dict, Generator, List, Optional
6
4
  from dataclasses import dataclass, field
5
+ from functools import singledispatchmethod
6
+ from typing import Any, AsyncGenerator, Dict, Generator, List, Optional
7
7
 
8
- from llama_index.core.bridge.pydantic import PrivateAttr
9
- from llama_index.core.base.llms.types import MessageRole
10
8
  from llama_index.core.base.response.schema import StreamingResponse
9
+ from llama_index.core.bridge.pydantic import PrivateAttr
11
10
  from llama_index.core.instrumentation import get_dispatcher
11
+ from llama_index.core.instrumentation.event_handlers import BaseEventHandler
12
12
  from llama_index.core.instrumentation.events import BaseEvent
13
13
  from llama_index.core.instrumentation.events.agent import AgentToolCallEvent
14
- from llama_index.core.instrumentation.events.embedding import EmbeddingStartEvent
15
14
  from llama_index.core.instrumentation.events.chat_engine import (
16
15
  StreamChatEndEvent,
17
16
  )
17
+ from llama_index.core.instrumentation.events.embedding import (
18
+ EmbeddingStartEvent,
19
+ )
18
20
  from llama_index.core.instrumentation.events.llm import (
19
21
  LLMChatEndEvent,
20
22
  LLMChatStartEvent,
@@ -22,24 +24,36 @@ from llama_index.core.instrumentation.events.llm import (
22
24
  LLMPredictEndEvent,
23
25
  )
24
26
  from llama_index.core.instrumentation.events.rerank import ReRankStartEvent
25
- from llama_index.core.instrumentation.event_handlers import BaseEventHandler
26
27
  from llama_index.core.instrumentation.span_handlers import BaseSpanHandler
27
28
  from llama_index.core.workflow import Workflow
28
29
  from opentelemetry import context as context_api
30
+ from opentelemetry.instrumentation.llamaindex.event_emitter import (
31
+ emit_chat_message_events,
32
+ emit_chat_response_events,
33
+ emit_rerank_message_event,
34
+ )
35
+ from opentelemetry.instrumentation.llamaindex.span_utils import (
36
+ set_embedding,
37
+ set_llm_chat_request,
38
+ set_llm_chat_request_model_attributes,
39
+ set_llm_chat_response,
40
+ set_llm_chat_response_model_attributes,
41
+ set_llm_predict_response,
42
+ set_rerank,
43
+ set_rerank_model_attributes,
44
+ set_tool,
45
+ )
29
46
  from opentelemetry.instrumentation.llamaindex.utils import (
30
47
  JSONEncoder,
31
- dont_throw,
48
+ should_emit_events,
32
49
  should_send_prompts,
33
50
  )
34
51
  from opentelemetry.semconv_ai import (
35
52
  SUPPRESS_LANGUAGE_MODEL_INSTRUMENTATION_KEY,
36
- LLMRequestTypeValues,
37
53
  SpanAttributes,
38
54
  TraceloopSpanKindValues,
39
55
  )
40
- from opentelemetry.trace import set_span_in_context, Tracer
41
- from opentelemetry.trace.span import Span
42
-
56
+ from opentelemetry.trace import Span, Tracer, set_span_in_context
43
57
 
44
58
  # For these spans, instead of creating a span using data from LlamaIndex,
45
59
  # we use the regular OpenLLMetry instrumentations
@@ -60,109 +74,6 @@ def instrument_with_dispatcher(tracer: Tracer):
60
74
  dispatcher.add_event_handler(OpenLLMetryEventHandler(openllmetry_span_handler))
61
75
 
62
76
 
63
- @dont_throw
64
- def _set_llm_chat_request(event, span) -> None:
65
- model_dict = event.model_dict
66
- span.set_attribute(SpanAttributes.LLM_REQUEST_TYPE, LLMRequestTypeValues.CHAT.value)
67
- span.set_attribute(SpanAttributes.LLM_REQUEST_MODEL, model_dict.get("model"))
68
- span.set_attribute(
69
- SpanAttributes.LLM_REQUEST_TEMPERATURE, model_dict.get("temperature")
70
- )
71
- if should_send_prompts():
72
- for idx, message in enumerate(event.messages):
73
- span.set_attribute(
74
- f"{SpanAttributes.LLM_PROMPTS}.{idx}.role", message.role.value
75
- )
76
- span.set_attribute(
77
- f"{SpanAttributes.LLM_PROMPTS}.{idx}.content", message.content
78
- )
79
-
80
-
81
- @dont_throw
82
- def _set_llm_chat_response(event, span) -> None:
83
- response = event.response
84
- if should_send_prompts():
85
- for idx, message in enumerate(event.messages):
86
- span.set_attribute(
87
- f"{SpanAttributes.LLM_PROMPTS}.{idx}.role", message.role.value
88
- )
89
- span.set_attribute(
90
- f"{SpanAttributes.LLM_PROMPTS}.{idx}.content", message.content
91
- )
92
- span.set_attribute(
93
- f"{SpanAttributes.LLM_COMPLETIONS}.0.role",
94
- response.message.role.value,
95
- )
96
- span.set_attribute(
97
- f"{SpanAttributes.LLM_COMPLETIONS}.0.content",
98
- response.message.content,
99
- )
100
- if not (raw := response.raw):
101
- return
102
- span.set_attribute(
103
- SpanAttributes.LLM_RESPONSE_MODEL,
104
- (
105
- raw.get("model") if "model" in raw else raw.model
106
- ), # raw can be Any, not just ChatCompletion
107
- )
108
- if usage := raw.get("usage") if "usage" in raw else raw.usage:
109
- span.set_attribute(
110
- SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, usage.completion_tokens
111
- )
112
- span.set_attribute(SpanAttributes.LLM_USAGE_PROMPT_TOKENS, usage.prompt_tokens)
113
- span.set_attribute(SpanAttributes.LLM_USAGE_TOTAL_TOKENS, usage.total_tokens)
114
- if choices := raw.choices:
115
- span.set_attribute(
116
- SpanAttributes.LLM_RESPONSE_FINISH_REASON, choices[0].finish_reason
117
- )
118
-
119
-
120
- @dont_throw
121
- def _set_llm_predict_response(event, span) -> None:
122
- if should_send_prompts():
123
- span.set_attribute(
124
- f"{SpanAttributes.LLM_COMPLETIONS}.role",
125
- MessageRole.ASSISTANT.value,
126
- )
127
- span.set_attribute(
128
- f"{SpanAttributes.LLM_COMPLETIONS}.content",
129
- event.output,
130
- )
131
-
132
-
133
- @dont_throw
134
- def _set_embedding(event, span) -> None:
135
- model_dict = event.model_dict
136
- span.set_attribute(
137
- f"{LLMRequestTypeValues.EMBEDDING.value}.model_name",
138
- model_dict.get("model_name"),
139
- )
140
-
141
-
142
- @dont_throw
143
- def _set_rerank(event, span) -> None:
144
- span.set_attribute(
145
- f"{LLMRequestTypeValues.RERANK.value}.model_name",
146
- event.model_name,
147
- )
148
- span.set_attribute(
149
- f"{LLMRequestTypeValues.RERANK.value}.top_n",
150
- event.top_n,
151
- )
152
- if should_send_prompts():
153
- span.set_attribute(
154
- f"{LLMRequestTypeValues.RERANK.value}.query",
155
- event.query.query_str,
156
- )
157
-
158
-
159
- @dont_throw
160
- def _set_tool(event, span) -> None:
161
- span.set_attribute("tool.name", event.tool.name)
162
- span.set_attribute("tool.description", event.tool.description)
163
- span.set_attribute("tool.arguments", event.arguments)
164
-
165
-
166
77
  @dataclass
167
78
  class SpanHolder:
168
79
  span_id: str
@@ -205,27 +116,40 @@ class SpanHolder:
205
116
 
206
117
  @update_span_for_event.register
207
118
  def _(self, event: LLMChatStartEvent):
208
- _set_llm_chat_request(event, self.otel_span)
119
+ set_llm_chat_request_model_attributes(event, self.otel_span)
120
+ if should_emit_events():
121
+ emit_chat_message_events(event)
122
+ else:
123
+ set_llm_chat_request(event, self.otel_span)
209
124
 
210
125
  @update_span_for_event.register
211
126
  def _(self, event: LLMChatEndEvent):
212
- _set_llm_chat_response(event, self.otel_span)
127
+ set_llm_chat_response_model_attributes(event, self.otel_span)
128
+ if should_emit_events():
129
+ emit_chat_response_events(event)
130
+ else:
131
+ set_llm_chat_response(event, self.otel_span) # noqa: F821
213
132
 
214
133
  @update_span_for_event.register
215
134
  def _(self, event: LLMPredictEndEvent):
216
- _set_llm_predict_response(event, self.otel_span)
135
+ if not should_emit_events():
136
+ set_llm_predict_response(event, self.otel_span)
217
137
 
218
138
  @update_span_for_event.register
219
139
  def _(self, event: EmbeddingStartEvent):
220
- _set_embedding(event, self.otel_span)
140
+ set_embedding(event, self.otel_span)
221
141
 
222
142
  @update_span_for_event.register
223
143
  def _(self, event: ReRankStartEvent):
224
- _set_rerank(event, self.otel_span)
144
+ set_rerank_model_attributes(event, self.otel_span)
145
+ if should_emit_events():
146
+ emit_rerank_message_event(event)
147
+ else:
148
+ set_rerank(event, self.otel_span)
225
149
 
226
150
  @update_span_for_event.register
227
151
  def _(self, event: AgentToolCallEvent):
228
- _set_tool(event, self.otel_span)
152
+ set_tool(event, self.otel_span)
229
153
 
230
154
 
231
155
  class OpenLLMetrySpanHandler(BaseSpanHandler[SpanHolder]):
@@ -269,7 +193,11 @@ class OpenLLMetrySpanHandler(BaseSpanHandler[SpanHolder]):
269
193
  )
270
194
 
271
195
  if isinstance(instance, Workflow):
272
- span_name = f"{instance.__class__.__name__}.{kind}" if not parent_span_id else f"{method_name}.{kind}"
196
+ span_name = (
197
+ f"{instance.__class__.__name__}.{kind}"
198
+ if not parent_span_id
199
+ else f"{method_name}.{kind}"
200
+ )
273
201
  else:
274
202
  span_name = f"{class_name}.{kind}"
275
203
 
@@ -0,0 +1,152 @@
1
+ from dataclasses import asdict
2
+ from enum import Enum
3
+ from typing import Union
4
+
5
+ from llama_index.core.instrumentation.events.llm import (
6
+ LLMChatEndEvent,
7
+ LLMChatStartEvent,
8
+ )
9
+ from llama_index.core.instrumentation.events.rerank import ReRankStartEvent
10
+ from opentelemetry._events import Event
11
+ from opentelemetry.instrumentation.llamaindex.event_models import (
12
+ ChoiceEvent,
13
+ MessageEvent,
14
+ )
15
+ from opentelemetry.instrumentation.llamaindex.utils import (
16
+ should_emit_events,
17
+ should_send_prompts,
18
+ )
19
+ from opentelemetry.semconv._incubating.attributes import (
20
+ gen_ai_attributes as GenAIAttributes,
21
+ )
22
+
23
+ from .config import Config
24
+
25
+
26
+ class Roles(Enum):
27
+ USER = "user"
28
+ ASSISTANT = "assistant"
29
+ SYSTEM = "system"
30
+ TOOL = "tool"
31
+
32
+
33
+ VALID_MESSAGE_ROLES = {role.value for role in Roles}
34
+ """The valid roles for naming the message event."""
35
+
36
+ EVENT_ATTRIBUTES = {GenAIAttributes.GEN_AI_SYSTEM: "llamaindex"}
37
+ """The attributes to be used for the event."""
38
+
39
+
40
+ def emit_chat_message_events(event: LLMChatStartEvent):
41
+ for message in event.messages:
42
+ emit_event(MessageEvent(content=message.content, role=message.role.value))
43
+
44
+
45
+ def emit_chat_response_events(event: LLMChatEndEvent):
46
+ if event.response:
47
+ try:
48
+ finish_reason = event.response.raw.get("choices", [{}])[0].get(
49
+ "finish_reason", "unknown"
50
+ )
51
+ except (AttributeError, ValueError):
52
+ finish_reason = "unknown"
53
+ emit_choice_event(
54
+ index=0,
55
+ content=event.response.message.content,
56
+ role=event.response.message.role.value,
57
+ finish_reason=finish_reason,
58
+ )
59
+
60
+
61
+ def emit_rerank_message_event(event: ReRankStartEvent):
62
+ if event.query:
63
+ if isinstance(event.query, str):
64
+ emit_message_event(content=event.query, role="user")
65
+ else:
66
+ emit_message_event(content=event.query.query_str, role="user")
67
+
68
+
69
+ def emit_message_event(*, content, role: str):
70
+ emit_event(MessageEvent(content=content, role=role))
71
+
72
+
73
+ def emit_choice_event(
74
+ *,
75
+ index: int = 0,
76
+ content,
77
+ role: str,
78
+ finish_reason: str,
79
+ ):
80
+ emit_event(
81
+ ChoiceEvent(
82
+ index=index,
83
+ message={"content": content, "role": role},
84
+ finish_reason=finish_reason,
85
+ )
86
+ )
87
+
88
+
89
+ def emit_event(event: Union[MessageEvent, ChoiceEvent]) -> None:
90
+ """
91
+ Emit an event to the OpenTelemetry SDK.
92
+
93
+ Args:
94
+ event: The event to emit.
95
+ """
96
+ if not should_emit_events():
97
+ return
98
+
99
+ if isinstance(event, MessageEvent):
100
+ _emit_message_event(event)
101
+ elif isinstance(event, ChoiceEvent):
102
+ _emit_choice_event(event)
103
+ else:
104
+ raise TypeError("Unsupported event type")
105
+
106
+
107
+ def _emit_message_event(event: MessageEvent) -> None:
108
+ body = asdict(event)
109
+
110
+ if event.role in VALID_MESSAGE_ROLES:
111
+ name = "gen_ai.{}.message".format(event.role)
112
+ # According to the semantic conventions, the role is conditionally required if available
113
+ # and not equal to the "role" in the message name. So, remove the role from the body if
114
+ # it is the same as the in the event name.
115
+ body.pop("role", None)
116
+ else:
117
+ name = "gen_ai.user.message"
118
+
119
+ # According to the semantic conventions, only the assistant role has tool call
120
+ if event.role != Roles.ASSISTANT.value and event.tool_calls is not None:
121
+ del body["tool_calls"]
122
+ elif event.tool_calls is None:
123
+ del body["tool_calls"]
124
+
125
+ if not should_send_prompts():
126
+ del body["content"]
127
+ if body.get("tool_calls") is not None:
128
+ for tool_call in body["tool_calls"]:
129
+ tool_call["function"].pop("arguments", None)
130
+
131
+ Config.event_logger.emit(Event(name=name, body=body, attributes=EVENT_ATTRIBUTES))
132
+
133
+
134
+ def _emit_choice_event(event: ChoiceEvent) -> None:
135
+ body = asdict(event)
136
+ if event.message["role"] == Roles.ASSISTANT.value:
137
+ # According to the semantic conventions, the role is conditionally required if available
138
+ # and not equal to "assistant", so remove the role from the body if it is "assistant".
139
+ body["message"].pop("role", None)
140
+
141
+ if event.tool_calls is None:
142
+ del body["tool_calls"]
143
+
144
+ if not should_send_prompts():
145
+ body["message"].pop("content", None)
146
+ if body.get("tool_calls") is not None:
147
+ for tool_call in body["tool_calls"]:
148
+ tool_call["function"].pop("arguments", None)
149
+
150
+ Config.event_logger.emit(
151
+ Event(name="gen_ai.choice", body=body, attributes=EVENT_ATTRIBUTES)
152
+ )
@@ -0,0 +1,41 @@
1
+ from dataclasses import dataclass
2
+ from typing import Any, List, Literal, Optional, TypedDict
3
+
4
+
5
+ class _FunctionToolCall(TypedDict):
6
+ function_name: str
7
+ arguments: Optional[dict[str, Any]]
8
+
9
+
10
+ class ToolCall(TypedDict):
11
+ """Represents a tool call in the AI model."""
12
+
13
+ id: str
14
+ function: _FunctionToolCall
15
+ type: Literal["function"]
16
+
17
+
18
+ class CompletionMessage(TypedDict):
19
+ """Represents a message in the AI model."""
20
+
21
+ content: Any
22
+ role: str = "assistant"
23
+
24
+
25
+ @dataclass
26
+ class MessageEvent:
27
+ """Represents an input event for the AI model."""
28
+
29
+ content: Any
30
+ role: str = "user"
31
+ tool_calls: Optional[List[ToolCall]] = None
32
+
33
+
34
+ @dataclass
35
+ class ChoiceEvent:
36
+ """Represents a completion event for the AI model."""
37
+
38
+ index: int
39
+ message: CompletionMessage
40
+ finish_reason: str = "unknown"
41
+ tool_calls: Optional[List[ToolCall]] = None
@@ -1,8 +1,7 @@
1
- from importlib.metadata import version as package_version, PackageNotFoundError
1
+ from importlib.metadata import PackageNotFoundError
2
+ from importlib.metadata import version as package_version
2
3
 
3
- from wrapt import wrap_function_wrapper
4
4
  from opentelemetry.context import attach, set_value
5
-
6
5
  from opentelemetry.instrumentation.llamaindex.utils import (
7
6
  _with_tracer_wrapper,
8
7
  process_request,
@@ -10,6 +9,7 @@ from opentelemetry.instrumentation.llamaindex.utils import (
10
9
  start_as_current_span_async,
11
10
  )
12
11
  from opentelemetry.semconv_ai import SpanAttributes, TraceloopSpanKindValues
12
+ from wrapt import wrap_function_wrapper
13
13
 
14
14
  V9_MODULE_NAME = "llama_index.query_engine.retriever_query_engine"
15
15
  V10_MODULE_NAME = "llama_index.core.query_engine.retriever_query_engine"
@@ -0,0 +1,143 @@
1
+ from llama_index.core.base.llms.types import MessageRole
2
+ from opentelemetry.instrumentation.llamaindex.utils import (
3
+ dont_throw,
4
+ should_send_prompts,
5
+ )
6
+ from opentelemetry.semconv_ai import (
7
+ LLMRequestTypeValues,
8
+ SpanAttributes,
9
+ )
10
+
11
+
12
+ @dont_throw
13
+ def set_llm_chat_request(event, span) -> None:
14
+ if not span.is_recording():
15
+ return
16
+
17
+ if should_send_prompts():
18
+ for idx, message in enumerate(event.messages):
19
+ span.set_attribute(
20
+ f"{SpanAttributes.LLM_PROMPTS}.{idx}.role", message.role.value
21
+ )
22
+ span.set_attribute(
23
+ f"{SpanAttributes.LLM_PROMPTS}.{idx}.content", message.content
24
+ )
25
+
26
+
27
+ @dont_throw
28
+ def set_llm_chat_request_model_attributes(event, span):
29
+ if span and not span.is_recording():
30
+ return
31
+
32
+ model_dict = event.model_dict
33
+ span.set_attribute(SpanAttributes.LLM_REQUEST_TYPE, LLMRequestTypeValues.CHAT.value)
34
+ span.set_attribute(SpanAttributes.LLM_REQUEST_MODEL, model_dict.get("model"))
35
+ span.set_attribute(
36
+ SpanAttributes.LLM_REQUEST_TEMPERATURE, model_dict.get("temperature")
37
+ )
38
+
39
+
40
+ @dont_throw
41
+ def set_llm_chat_response(event, span) -> None:
42
+ if not span.is_recording():
43
+ return
44
+
45
+ response = event.response
46
+ if should_send_prompts():
47
+ for idx, message in enumerate(event.messages):
48
+ span.set_attribute(
49
+ f"{SpanAttributes.LLM_PROMPTS}.{idx}.role", message.role.value
50
+ )
51
+ span.set_attribute(
52
+ f"{SpanAttributes.LLM_PROMPTS}.{idx}.content", message.content
53
+ )
54
+ span.set_attribute(
55
+ f"{SpanAttributes.LLM_COMPLETIONS}.0.role",
56
+ response.message.role.value,
57
+ )
58
+ span.set_attribute(
59
+ f"{SpanAttributes.LLM_COMPLETIONS}.0.content",
60
+ response.message.content,
61
+ )
62
+
63
+
64
+ @dont_throw
65
+ def set_llm_chat_response_model_attributes(event, span):
66
+ if not span.is_recording():
67
+ return
68
+
69
+ response = event.response
70
+
71
+ if not (raw := response.raw):
72
+ return
73
+
74
+ span.set_attribute(
75
+ SpanAttributes.LLM_RESPONSE_MODEL,
76
+ (
77
+ raw.get("model") if "model" in raw else raw.model
78
+ ), # raw can be Any, not just ChatCompletion
79
+ )
80
+ if usage := raw.get("usage") if "usage" in raw else raw.usage:
81
+ span.set_attribute(
82
+ SpanAttributes.LLM_USAGE_COMPLETION_TOKENS, usage.completion_tokens
83
+ )
84
+ span.set_attribute(SpanAttributes.LLM_USAGE_PROMPT_TOKENS, usage.prompt_tokens)
85
+ span.set_attribute(SpanAttributes.LLM_USAGE_TOTAL_TOKENS, usage.total_tokens)
86
+ if choices := raw.choices:
87
+ span.set_attribute(
88
+ SpanAttributes.LLM_RESPONSE_FINISH_REASON, choices[0].finish_reason
89
+ )
90
+
91
+
92
+ @dont_throw
93
+ def set_llm_predict_response(event, span) -> None:
94
+ if should_send_prompts():
95
+ span.set_attribute(
96
+ f"{SpanAttributes.LLM_COMPLETIONS}.role",
97
+ MessageRole.ASSISTANT.value,
98
+ )
99
+ span.set_attribute(
100
+ f"{SpanAttributes.LLM_COMPLETIONS}.content",
101
+ event.output,
102
+ )
103
+
104
+
105
+ @dont_throw
106
+ def set_embedding(event, span) -> None:
107
+ model_dict = event.model_dict
108
+ span.set_attribute(
109
+ f"{LLMRequestTypeValues.EMBEDDING.value}.model_name",
110
+ model_dict.get("model_name"),
111
+ )
112
+
113
+
114
+ @dont_throw
115
+ def set_rerank(event, span) -> None:
116
+ if not span.is_recording():
117
+ return
118
+ if should_send_prompts():
119
+ span.set_attribute(
120
+ f"{LLMRequestTypeValues.RERANK.value}.query",
121
+ event.query.query_str,
122
+ )
123
+
124
+
125
+ @dont_throw
126
+ def set_rerank_model_attributes(event, span):
127
+ if not span.is_recording():
128
+ return
129
+ span.set_attribute(
130
+ f"{LLMRequestTypeValues.RERANK.value}.model_name",
131
+ event.model_name,
132
+ )
133
+ span.set_attribute(
134
+ f"{LLMRequestTypeValues.RERANK.value}.top_n",
135
+ event.top_n,
136
+ )
137
+
138
+
139
+ @dont_throw
140
+ def set_tool(event, span) -> None:
141
+ span.set_attribute("tool.name", event.tool.name)
142
+ span.set_attribute("tool.description", event.tool.description)
143
+ span.set_attribute("tool.arguments", event.arguments)
@@ -1,14 +1,17 @@
1
1
  import dataclasses
2
2
  import json
3
- import os
4
3
  import logging
4
+ import os
5
5
  import traceback
6
6
  from contextlib import asynccontextmanager
7
7
 
8
8
  from opentelemetry import context as context_api
9
+ from opentelemetry._events import EventLogger
9
10
  from opentelemetry.instrumentation.llamaindex.config import Config
10
11
  from opentelemetry.semconv_ai import SpanAttributes
11
12
 
13
+ TRACELOOP_TRACE_CONTENT = "TRACELOOP_TRACE_CONTENT"
14
+
12
15
 
13
16
  def _with_tracer_wrapper(func):
14
17
  def _with_tracer(tracer):
@@ -28,7 +31,7 @@ async def start_as_current_span_async(tracer, *args, **kwargs):
28
31
 
29
32
  def should_send_prompts():
30
33
  return (
31
- os.getenv("TRACELOOP_TRACE_CONTENT") or "true"
34
+ os.getenv(TRACELOOP_TRACE_CONTENT) or "true"
32
35
  ).lower() == "true" or context_api.get_value("override_enable_content_tracing")
33
36
 
34
37
 
@@ -84,3 +87,18 @@ def process_response(span, res):
84
87
  SpanAttributes.TRACELOOP_ENTITY_OUTPUT,
85
88
  json.dumps(res, cls=JSONEncoder),
86
89
  )
90
+
91
+
92
+ def is_role_valid(role: str) -> bool:
93
+ return role in ["user", "assistant", "system", "tool"]
94
+
95
+
96
+ def should_emit_events() -> bool:
97
+ """
98
+ Checks if the instrumentation isn't using the legacy attributes
99
+ and if the event logger is not None.
100
+ """
101
+
102
+ return not Config.use_legacy_attributes and isinstance(
103
+ Config.event_logger, EventLogger
104
+ )
@@ -8,7 +8,7 @@ show_missing = true
8
8
 
9
9
  [tool.poetry]
10
10
  name = "opentelemetry-instrumentation-llamaindex"
11
- version = "0.40.13"
11
+ version = "0.41.0"
12
12
  description = "OpenTelemetry LlamaIndex instrumentation"
13
13
  authors = [
14
14
  "Gal Kleinman <gal@traceloop.com>",
@@ -27,7 +27,7 @@ python = ">=3.9,<4"
27
27
  opentelemetry-api = "^1.28.0"
28
28
  opentelemetry-instrumentation = ">=0.50b0"
29
29
  opentelemetry-semantic-conventions = ">=0.50b0"
30
- opentelemetry-semantic-conventions-ai = "0.4.9"
30
+ opentelemetry-semantic-conventions-ai = "0.4.10"
31
31
  inflection = "^0.5.1"
32
32
 
33
33
  [tool.poetry.group.dev.dependencies]
@@ -43,9 +43,9 @@ openai = "^1.52.2"
43
43
  opentelemetry-sdk = "^1.27.0"
44
44
  llama-index = "^0.12.6"
45
45
  llama-index-postprocessor-cohere-rerank = "^0.3.0"
46
- opentelemetry-instrumentation-openai = "==0.40.13"
47
- opentelemetry-instrumentation-cohere = "==0.40.13"
48
- opentelemetry-instrumentation-chromadb = "==0.40.13"
46
+ opentelemetry-instrumentation-openai = "==0.41.0"
47
+ opentelemetry-instrumentation-cohere = "==0.41.0"
48
+ opentelemetry-instrumentation-chromadb = "==0.41.0"
49
49
  sqlalchemy = "^2.0.31"
50
50
  llama-index-agent-openai = "^0.4.1"
51
51
  llama-index-vector-stores-chroma = "^0.4.1"
@@ -1,2 +0,0 @@
1
- class Config:
2
- exception_logger = None