opentelemetry-instrumentation-replicate 0.40.14__py3-none-any.whl → 0.42.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of opentelemetry-instrumentation-replicate might be problematic. Click here for more details.

@@ -1,25 +1,33 @@
1
1
  """OpenTelemetry Replicate instrumentation"""
2
2
 
3
3
  import logging
4
- import os
5
4
  import types
6
5
  from typing import Collection
7
- from opentelemetry.instrumentation.replicate.config import Config
8
- from opentelemetry.instrumentation.replicate.utils import dont_throw
9
- from wrapt import wrap_function_wrapper
10
6
 
11
7
  from opentelemetry import context as context_api
12
- from opentelemetry.trace import get_tracer, SpanKind
13
- from opentelemetry.trace.status import Status, StatusCode
14
-
8
+ from opentelemetry._events import get_event_logger
15
9
  from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
10
+ from opentelemetry.instrumentation.replicate.config import Config
11
+ from opentelemetry.instrumentation.replicate.event_emitter import (
12
+ emit_choice_events,
13
+ emit_event,
14
+ )
15
+ from opentelemetry.instrumentation.replicate.event_models import MessageEvent
16
+ from opentelemetry.instrumentation.replicate.span_utils import (
17
+ set_input_attributes,
18
+ set_model_input_attributes,
19
+ set_response_attributes,
20
+ )
21
+ from opentelemetry.instrumentation.replicate.utils import dont_throw, should_emit_events
22
+ from opentelemetry.instrumentation.replicate.version import __version__
16
23
  from opentelemetry.instrumentation.utils import (
17
24
  _SUPPRESS_INSTRUMENTATION_KEY,
18
25
  unwrap,
19
26
  )
20
-
21
- from opentelemetry.semconv_ai import SpanAttributes, LLMRequestTypeValues
22
- from opentelemetry.instrumentation.replicate.version import __version__
27
+ from opentelemetry.semconv_ai import LLMRequestTypeValues, SpanAttributes
28
+ from opentelemetry.trace import SpanKind, get_tracer
29
+ from opentelemetry.trace.status import Status, StatusCode
30
+ from wrapt import wrap_function_wrapper
23
31
 
24
32
  logger = logging.getLogger(__name__)
25
33
 
@@ -44,68 +52,11 @@ WRAPPED_METHODS = [
44
52
  ]
45
53
 
46
54
 
47
- def should_send_prompts():
48
- return (
49
- os.getenv("TRACELOOP_TRACE_CONTENT") or "true"
50
- ).lower() == "true" or context_api.get_value("override_enable_content_tracing")
51
-
52
-
53
55
  def is_streaming_response(response):
54
56
  return isinstance(response, types.GeneratorType)
55
57
 
56
58
 
57
- def _set_span_attribute(span, name, value):
58
- if value is not None:
59
- if value != "":
60
- span.set_attribute(name, value)
61
- return
62
-
63
-
64
- input_attribute_map = {
65
- "prompt": f"{SpanAttributes.LLM_PROMPTS}.0.user",
66
- "temperature": SpanAttributes.LLM_REQUEST_TEMPERATURE,
67
- "top_p": SpanAttributes.LLM_REQUEST_TOP_P,
68
- }
69
-
70
-
71
- def _set_input_attributes(span, args, kwargs):
72
- if args is not None and len(args) > 0:
73
- _set_span_attribute(span, SpanAttributes.LLM_REQUEST_MODEL, args[0])
74
- elif kwargs.get("version"):
75
- _set_span_attribute(
76
- span, SpanAttributes.LLM_REQUEST_MODEL, kwargs.get("version").id
77
- )
78
- else:
79
- _set_span_attribute(span, SpanAttributes.LLM_REQUEST_MODEL, "unknown")
80
-
81
- input_attribute = kwargs.get("input")
82
- for key in input_attribute:
83
- if key in input_attribute_map:
84
- if key == "prompt" and not should_send_prompts():
85
- continue
86
- _set_span_attribute(
87
- span,
88
- input_attribute_map.get(key, f"llm.request.{key}"),
89
- input_attribute.get(key),
90
- )
91
- return
92
-
93
-
94
- @dont_throw
95
- def _set_response_attributes(span, response):
96
- if should_send_prompts():
97
- if isinstance(response, list):
98
- for index, item in enumerate(response):
99
- prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
100
- _set_span_attribute(span, f"{prefix}.content", item)
101
- elif isinstance(response, str):
102
- _set_span_attribute(
103
- span, f"{SpanAttributes.LLM_COMPLETIONS}.0.content", response
104
- )
105
- return
106
-
107
-
108
- def _build_from_streaming_response(span, response):
59
+ def _build_from_streaming_response(span, event_logger, response):
109
60
  complete_response = ""
110
61
  for item in response:
111
62
  item_to_yield = item
@@ -113,32 +64,40 @@ def _build_from_streaming_response(span, response):
113
64
 
114
65
  yield item_to_yield
115
66
 
116
- _set_response_attributes(span, complete_response)
67
+ _handle_response(span, event_logger, complete_response)
117
68
 
118
- span.set_status(Status(StatusCode.OK))
119
69
  span.end()
120
70
 
121
71
 
122
72
  @dont_throw
123
- def _handle_request(span, args, kwargs):
124
- if span.is_recording():
125
- _set_input_attributes(span, args, kwargs)
73
+ def _handle_request(span, event_logger, args, kwargs):
74
+ set_model_input_attributes(span, args, kwargs)
75
+
76
+ model_input = kwargs.get("input") or (args[1] if len(args) > 1 else None)
77
+
78
+ if should_emit_events() and event_logger:
79
+ emit_event(MessageEvent(content=model_input.get("prompt")), event_logger)
80
+ else:
81
+ set_input_attributes(span, args, kwargs)
126
82
 
127
83
 
128
84
  @dont_throw
129
- def _handle_response(span, response):
130
- if span.is_recording():
131
- _set_response_attributes(span, response)
85
+ def _handle_response(span, event_logger, response):
86
+ if should_emit_events() and event_logger:
87
+ emit_choice_events(response, event_logger)
88
+ else:
89
+ set_response_attributes(span, response)
132
90
 
91
+ if span.is_recording():
133
92
  span.set_status(Status(StatusCode.OK))
134
93
 
135
94
 
136
95
  def _with_tracer_wrapper(func):
137
96
  """Helper for providing tracer for wrapper functions."""
138
97
 
139
- def _with_tracer(tracer, to_wrap):
98
+ def _with_tracer(tracer, event_logger, to_wrap):
140
99
  def wrapper(wrapped, instance, args, kwargs):
141
- return func(tracer, to_wrap, wrapped, instance, args, kwargs)
100
+ return func(tracer, event_logger, to_wrap, wrapped, instance, args, kwargs)
142
101
 
143
102
  return wrapper
144
103
 
@@ -146,7 +105,15 @@ def _with_tracer_wrapper(func):
146
105
 
147
106
 
148
107
  @_with_tracer_wrapper
149
- def _wrap(tracer, to_wrap, wrapped, instance, args, kwargs):
108
+ def _wrap(
109
+ tracer,
110
+ event_logger,
111
+ to_wrap,
112
+ wrapped,
113
+ instance,
114
+ args,
115
+ kwargs,
116
+ ):
150
117
  """Instruments and calls every function defined in TO_WRAP."""
151
118
  if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
152
119
  return wrapped(*args, **kwargs)
@@ -161,15 +128,15 @@ def _wrap(tracer, to_wrap, wrapped, instance, args, kwargs):
161
128
  },
162
129
  )
163
130
 
164
- _handle_request(span, args, kwargs)
131
+ _handle_request(span, event_logger, args, kwargs)
165
132
 
166
133
  response = wrapped(*args, **kwargs)
167
134
 
168
135
  if response:
169
136
  if is_streaming_response(response):
170
- return _build_from_streaming_response(span, response)
137
+ return _build_from_streaming_response(span, event_logger, response)
171
138
  else:
172
- _handle_response(span, response)
139
+ _handle_response(span, event_logger, response)
173
140
 
174
141
  span.end()
175
142
  return response
@@ -178,9 +145,10 @@ def _wrap(tracer, to_wrap, wrapped, instance, args, kwargs):
178
145
  class ReplicateInstrumentor(BaseInstrumentor):
179
146
  """An instrumentor for Replicate's client library."""
180
147
 
181
- def __init__(self, exception_logger=None):
148
+ def __init__(self, exception_logger=None, use_legacy_attributes=True):
182
149
  super().__init__()
183
150
  Config.exception_logger = exception_logger
151
+ Config.use_legacy_attributes = use_legacy_attributes
184
152
 
185
153
  def instrumentation_dependencies(self) -> Collection[str]:
186
154
  return _instruments
@@ -188,13 +156,23 @@ class ReplicateInstrumentor(BaseInstrumentor):
188
156
  def _instrument(self, **kwargs):
189
157
  tracer_provider = kwargs.get("tracer_provider")
190
158
  tracer = get_tracer(__name__, __version__, tracer_provider)
159
+
160
+ event_logger = None
161
+ if not Config.use_legacy_attributes:
162
+ event_logger_provider = kwargs.get("event_logger_provider")
163
+ event_logger = get_event_logger(
164
+ __name__, __version__, event_logger_provider=event_logger_provider
165
+ )
166
+
191
167
  for wrapper_method in WRAPPED_METHODS:
192
168
  wrap_function_wrapper(
193
169
  wrapper_method.get("module"),
194
170
  wrapper_method.get("method"),
195
- _wrap(tracer, wrapper_method),
171
+ _wrap(tracer, event_logger, wrapper_method),
196
172
  )
197
173
 
198
174
  def _uninstrument(self, **kwargs):
175
+ import replicate
176
+
199
177
  for wrapper_method in WRAPPED_METHODS:
200
- unwrap(wrapper_method.get("module"), wrapper_method.get("method", ""))
178
+ unwrap(replicate, wrapper_method.get("method", ""))
@@ -1,2 +1,3 @@
1
1
  class Config:
2
2
  exception_logger = None
3
+ use_legacy_attributes = True
@@ -0,0 +1,134 @@
1
+ from dataclasses import asdict
2
+ from enum import Enum
3
+ from typing import Union
4
+
5
+ from opentelemetry._events import Event, EventLogger
6
+ from opentelemetry.instrumentation.replicate.event_models import (
7
+ ChoiceEvent,
8
+ MessageEvent,
9
+ )
10
+ from opentelemetry.instrumentation.replicate.utils import (
11
+ dont_throw,
12
+ should_emit_events,
13
+ should_send_prompts,
14
+ )
15
+ from opentelemetry.semconv._incubating.attributes import (
16
+ gen_ai_attributes as GenAIAttributes,
17
+ )
18
+
19
+ from replicate.prediction import Prediction
20
+
21
+
22
+ class Roles(Enum):
23
+ USER = "user"
24
+ ASSISTANT = "assistant"
25
+ SYSTEM = "system"
26
+ TOOL = "tool"
27
+
28
+
29
+ VALID_MESSAGE_ROLES = {role.value for role in Roles}
30
+ """The valid roles for naming the message event."""
31
+
32
+ EVENT_ATTRIBUTES = {GenAIAttributes.GEN_AI_SYSTEM: "replicate"}
33
+ """The attributes to be used for the event."""
34
+
35
+
36
+ @dont_throw
37
+ def emit_choice_events(
38
+ response: Union[str, list, Prediction], event_logger: Union[EventLogger, None]
39
+ ):
40
+ # Handle replicate.run responses
41
+ if isinstance(response, list):
42
+ for i, generation in enumerate(response):
43
+ emit_event(
44
+ ChoiceEvent(
45
+ index=i, message={"content": generation, "role": "assistant"}
46
+ ),
47
+ event_logger,
48
+ )
49
+ # Handle replicate.predictions.create responses
50
+ elif isinstance(response, Prediction):
51
+ emit_event(
52
+ ChoiceEvent(
53
+ index=0, message={"content": response.output, "role": "assistant"}
54
+ ),
55
+ event_logger,
56
+ )
57
+ # Handle replicate.stream responses built from _build_from_streaming_response
58
+ elif isinstance(response, str):
59
+ emit_event(
60
+ ChoiceEvent(index=0, message={"content": response, "role": "assistant"}),
61
+ event_logger,
62
+ )
63
+ else:
64
+ raise ValueError(
65
+ "It wasn't possible to emit the choice events due to an unsupported response type"
66
+ )
67
+
68
+
69
+ def emit_event(
70
+ event: Union[MessageEvent, ChoiceEvent], event_logger: Union[EventLogger, None]
71
+ ) -> None:
72
+ """
73
+ Emit an event to the OpenTelemetry SDK.
74
+
75
+ Args:
76
+ event: The event to emit.
77
+ """
78
+ if not should_emit_events() or event_logger is None:
79
+ return
80
+
81
+ if isinstance(event, MessageEvent):
82
+ _emit_message_event(event, event_logger)
83
+ elif isinstance(event, ChoiceEvent):
84
+ _emit_choice_event(event, event_logger)
85
+ else:
86
+ raise TypeError("Unsupported event type")
87
+
88
+
89
+ def _emit_message_event(event: MessageEvent, event_logger: EventLogger) -> None:
90
+ body = asdict(event)
91
+
92
+ if event.role in VALID_MESSAGE_ROLES:
93
+ name = "gen_ai.{}.message".format(event.role)
94
+ # According to the semantic conventions, the role is conditionally required if available
95
+ # and not equal to the "role" in the message name. So, remove the role from the body if
96
+ # it is the same as the in the event name.
97
+ body.pop("role", None)
98
+ else:
99
+ name = "gen_ai.user.message"
100
+
101
+ # According to the semantic conventions, only the assistant role has tool call
102
+ if event.role != Roles.ASSISTANT.value and event.tool_calls is not None:
103
+ del body["tool_calls"]
104
+ elif event.tool_calls is None:
105
+ del body["tool_calls"]
106
+
107
+ if not should_send_prompts():
108
+ del body["content"]
109
+ if body.get("tool_calls") is not None:
110
+ for tool_call in body["tool_calls"]:
111
+ tool_call["function"].pop("arguments", None)
112
+
113
+ event_logger.emit(Event(name=name, body=body, attributes=EVENT_ATTRIBUTES))
114
+
115
+
116
+ def _emit_choice_event(event: ChoiceEvent, event_logger: EventLogger) -> None:
117
+ body = asdict(event)
118
+ if event.message["role"] == Roles.ASSISTANT.value:
119
+ # According to the semantic conventions, the role is conditionally required if available
120
+ # and not equal to "assistant", so remove the role from the body if it is "assistant".
121
+ body["message"].pop("role", None)
122
+
123
+ if event.tool_calls is None:
124
+ del body["tool_calls"]
125
+
126
+ if not should_send_prompts():
127
+ body["message"].pop("content", None)
128
+ if body.get("tool_calls") is not None:
129
+ for tool_call in body["tool_calls"]:
130
+ tool_call["function"].pop("arguments", None)
131
+
132
+ event_logger.emit(
133
+ Event(name="gen_ai.choice", body=body, attributes=EVENT_ATTRIBUTES)
134
+ )
@@ -0,0 +1,41 @@
1
+ from dataclasses import dataclass
2
+ from typing import Any, List, Literal, Optional, TypedDict
3
+
4
+
5
+ class _FunctionToolCall(TypedDict):
6
+ function_name: str
7
+ arguments: Optional[dict[str, Any]]
8
+
9
+
10
+ class ToolCall(TypedDict):
11
+ """Represents a tool call in the AI model."""
12
+
13
+ id: str
14
+ function: _FunctionToolCall
15
+ type: Literal["function"]
16
+
17
+
18
+ class CompletionMessage(TypedDict):
19
+ """Represents a message in the AI model."""
20
+
21
+ content: Any
22
+ role: str = "assistant"
23
+
24
+
25
+ @dataclass
26
+ class MessageEvent:
27
+ """Represents an input event for the AI model."""
28
+
29
+ content: Any
30
+ role: str = "user"
31
+ tool_calls: Optional[List[ToolCall]] = None
32
+
33
+
34
+ @dataclass
35
+ class ChoiceEvent:
36
+ """Represents a completion event for the AI model."""
37
+
38
+ index: int
39
+ message: CompletionMessage
40
+ finish_reason: str = "unknown"
41
+ tool_calls: Optional[List[ToolCall]] = None
@@ -0,0 +1,61 @@
1
+ from opentelemetry.instrumentation.replicate.utils import (
2
+ dont_throw,
3
+ should_send_prompts,
4
+ )
5
+ from opentelemetry.semconv_ai import SpanAttributes
6
+
7
+
8
+ def _set_span_attribute(span, name, value):
9
+ if value is not None:
10
+ if value != "":
11
+ span.set_attribute(name, value)
12
+ return
13
+
14
+
15
+ @dont_throw
16
+ def set_input_attributes(span, args, kwargs):
17
+ if not span.is_recording():
18
+ return
19
+
20
+ input_attribute = kwargs.get("input")
21
+ if should_send_prompts():
22
+ _set_span_attribute(
23
+ span, f"{SpanAttributes.LLM_PROMPTS}.0.user", input_attribute.get("prompt")
24
+ )
25
+
26
+
27
+ @dont_throw
28
+ def set_model_input_attributes(span, args, kwargs):
29
+ if not span.is_recording():
30
+ return
31
+
32
+ if args is not None and len(args) > 0:
33
+ _set_span_attribute(span, SpanAttributes.LLM_REQUEST_MODEL, args[0])
34
+ elif kwargs.get("version"):
35
+ _set_span_attribute(
36
+ span, SpanAttributes.LLM_REQUEST_MODEL, kwargs.get("version").id
37
+ )
38
+ else:
39
+ _set_span_attribute(span, SpanAttributes.LLM_REQUEST_MODEL, "unknown")
40
+
41
+ input_attribute = kwargs.get("input")
42
+
43
+ _set_span_attribute(
44
+ span, SpanAttributes.LLM_REQUEST_TEMPERATURE, input_attribute.get("temperature")
45
+ )
46
+ _set_span_attribute(
47
+ span, SpanAttributes.LLM_REQUEST_TOP_P, input_attribute.get("top_p")
48
+ )
49
+
50
+
51
+ @dont_throw
52
+ def set_response_attributes(span, response):
53
+ if should_send_prompts():
54
+ if isinstance(response, list):
55
+ for index, item in enumerate(response):
56
+ prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
57
+ _set_span_attribute(span, f"{prefix}.content", item)
58
+ elif isinstance(response, str):
59
+ _set_span_attribute(
60
+ span, f"{SpanAttributes.LLM_COMPLETIONS}.0.content", response
61
+ )
@@ -1,7 +1,18 @@
1
1
  import logging
2
+ import os
2
3
  import traceback
4
+
5
+ from opentelemetry import context as context_api
3
6
  from opentelemetry.instrumentation.replicate.config import Config
4
7
 
8
+ TRACELOOP_TRACE_CONTENT = "TRACELOOP_TRACE_CONTENT"
9
+
10
+
11
+ def should_send_prompts():
12
+ return (
13
+ os.getenv(TRACELOOP_TRACE_CONTENT) or "true"
14
+ ).lower() == "true" or context_api.get_value("override_enable_content_tracing")
15
+
5
16
 
6
17
  def dont_throw(func):
7
18
  """
@@ -26,3 +37,12 @@ def dont_throw(func):
26
37
  Config.exception_logger(e)
27
38
 
28
39
  return wrapper
40
+
41
+
42
+ def should_emit_events() -> bool:
43
+ """
44
+ Checks if the instrumentation isn't using the legacy attributes
45
+ and if the event logger is not None.
46
+ """
47
+
48
+ return not Config.use_legacy_attributes
@@ -1 +1 @@
1
- __version__ = "0.40.14"
1
+ __version__ = "0.42.0"
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: opentelemetry-instrumentation-replicate
3
- Version: 0.40.14
3
+ Version: 0.42.0
4
4
  Summary: OpenTelemetry Replicate instrumentation
5
5
  License: Apache-2.0
6
6
  Author: Kartik Prajapati
@@ -17,7 +17,7 @@ Provides-Extra: instruments
17
17
  Requires-Dist: opentelemetry-api (>=1.28.0,<2.0.0)
18
18
  Requires-Dist: opentelemetry-instrumentation (>=0.50b0)
19
19
  Requires-Dist: opentelemetry-semantic-conventions (>=0.50b0)
20
- Requires-Dist: opentelemetry-semantic-conventions-ai (==0.4.9)
20
+ Requires-Dist: opentelemetry-semantic-conventions-ai (==0.4.11)
21
21
  Project-URL: Repository, https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-replicate
22
22
  Description-Content-Type: text/markdown
23
23
 
@@ -0,0 +1,11 @@
1
+ opentelemetry/instrumentation/replicate/__init__.py,sha256=efxMBZKhkhfCmAQDEFLeHQqE1JKZmtQW19h44FRj0JQ,5262
2
+ opentelemetry/instrumentation/replicate/config.py,sha256=aaFMaCpJS9eAxkHIP88wvlc8B1tKspysmXdULfhWGSA,75
3
+ opentelemetry/instrumentation/replicate/event_emitter.py,sha256=1TMYvK9IVPnb6IIDaD_Uumay2LCoGxPHqwkDFqJ35R4,4374
4
+ opentelemetry/instrumentation/replicate/event_models.py,sha256=PCfCGxrrArwZqR-4wFcXrhwQq0sBMAxmSrpC4PUMtaM,876
5
+ opentelemetry/instrumentation/replicate/span_utils.py,sha256=6--CZZvb7eAMUyGlKRRZ5B0tF4YgUgRwRDN9NMqKZT8,1839
6
+ opentelemetry/instrumentation/replicate/utils.py,sha256=wtv-bT3CojjX6ACbDtvehZDsINnDHM8s881JOAz0tAo,1307
7
+ opentelemetry/instrumentation/replicate/version.py,sha256=fXDp5M3ZbGNxa2CR646QIbRo8oBi15GhGQT_3rvssIE,23
8
+ opentelemetry_instrumentation_replicate-0.42.0.dist-info/METADATA,sha256=haOfO4RkLJwH4CMj_zvjAOkTpXTlkzmFz8jR_-bI0lA,2201
9
+ opentelemetry_instrumentation_replicate-0.42.0.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
10
+ opentelemetry_instrumentation_replicate-0.42.0.dist-info/entry_points.txt,sha256=YDOSAhxrkimcGYNOJLwqT1VoPS9YM-mXd1DT4hBk0Oc,102
11
+ opentelemetry_instrumentation_replicate-0.42.0.dist-info/RECORD,,
@@ -1,8 +0,0 @@
1
- opentelemetry/instrumentation/replicate/__init__.py,sha256=RA_x-p8zpGT0MqW26iOau4FTXcTJoRHyo6rX7WpwtpE,5808
2
- opentelemetry/instrumentation/replicate/config.py,sha256=CtypZov_ytI9nSrfN9lWnjcufbAR9sfkXRA0OstDEUw,42
3
- opentelemetry/instrumentation/replicate/utils.py,sha256=E4Ur_vkGqrfsNpFJ4oU72GQudgyH0-NFsbOYjLfEkIo,809
4
- opentelemetry/instrumentation/replicate/version.py,sha256=TzmqqRPz5JsMF0vCMChofQC_r_x0W9P-JB4K5rRCvtE,24
5
- opentelemetry_instrumentation_replicate-0.40.14.dist-info/METADATA,sha256=ut4Jwr1A5PpR89G8rorz9A3oQQFhEjuhBLutxsyzDnY,2201
6
- opentelemetry_instrumentation_replicate-0.40.14.dist-info/WHEEL,sha256=b4K_helf-jlQoXBBETfwnf4B04YC67LOev0jo4fX5m8,88
7
- opentelemetry_instrumentation_replicate-0.40.14.dist-info/entry_points.txt,sha256=YDOSAhxrkimcGYNOJLwqT1VoPS9YM-mXd1DT4hBk0Oc,102
8
- opentelemetry_instrumentation_replicate-0.40.14.dist-info/RECORD,,