opentelemetry-instrumentation-replicate 0.17.3__tar.gz → 0.49.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,8 +1,7 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.4
2
2
  Name: opentelemetry-instrumentation-replicate
3
- Version: 0.17.3
3
+ Version: 0.49.1
4
4
  Summary: OpenTelemetry Replicate instrumentation
5
- Home-page: https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-replicate
6
5
  License: Apache-2.0
7
6
  Author: Kartik Prajapati
8
7
  Author-email: kartik@ktklab.org
@@ -13,11 +12,13 @@ Classifier: Programming Language :: Python :: 3.9
13
12
  Classifier: Programming Language :: Python :: 3.10
14
13
  Classifier: Programming Language :: Python :: 3.11
15
14
  Classifier: Programming Language :: Python :: 3.12
15
+ Classifier: Programming Language :: Python :: 3.13
16
+ Classifier: Programming Language :: Python :: 3.14
16
17
  Provides-Extra: instruments
17
- Requires-Dist: opentelemetry-api (>=1.24.0,<2.0.0)
18
- Requires-Dist: opentelemetry-instrumentation (>=0.45b0,<0.46)
19
- Requires-Dist: opentelemetry-semantic-conventions (>=0.45b0,<0.46)
20
- Requires-Dist: opentelemetry-semantic-conventions-ai (==0.2.0)
18
+ Requires-Dist: opentelemetry-api (>=1.38.0,<2.0.0)
19
+ Requires-Dist: opentelemetry-instrumentation (>=0.59b0)
20
+ Requires-Dist: opentelemetry-semantic-conventions (>=0.59b0)
21
+ Requires-Dist: opentelemetry-semantic-conventions-ai (>=0.4.13,<0.5.0)
21
22
  Project-URL: Repository, https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-replicate
22
23
  Description-Content-Type: text/markdown
23
24
 
@@ -1,25 +1,36 @@
1
1
  """OpenTelemetry Replicate instrumentation"""
2
2
 
3
3
  import logging
4
- import os
5
4
  import types
6
5
  from typing import Collection
7
- from opentelemetry.instrumentation.replicate.config import Config
8
- from opentelemetry.instrumentation.replicate.utils import dont_throw
9
- from wrapt import wrap_function_wrapper
10
6
 
11
7
  from opentelemetry import context as context_api
12
- from opentelemetry.trace import get_tracer, SpanKind
13
- from opentelemetry.trace.status import Status, StatusCode
14
-
8
+ from opentelemetry._logs import get_logger
15
9
  from opentelemetry.instrumentation.instrumentor import BaseInstrumentor
10
+ from opentelemetry.instrumentation.replicate.config import Config
11
+ from opentelemetry.instrumentation.replicate.event_emitter import (
12
+ emit_choice_events,
13
+ emit_event,
14
+ )
15
+ from opentelemetry.instrumentation.replicate.event_models import MessageEvent
16
+ from opentelemetry.instrumentation.replicate.span_utils import (
17
+ set_input_attributes,
18
+ set_model_input_attributes,
19
+ set_response_attributes,
20
+ )
21
+ from opentelemetry.instrumentation.replicate.utils import dont_throw, should_emit_events
22
+ from opentelemetry.instrumentation.replicate.version import __version__
16
23
  from opentelemetry.instrumentation.utils import (
17
24
  _SUPPRESS_INSTRUMENTATION_KEY,
18
25
  unwrap,
19
26
  )
20
-
21
- from opentelemetry.semconv.ai import SpanAttributes, LLMRequestTypeValues
22
- from opentelemetry.instrumentation.replicate.version import __version__
27
+ from opentelemetry.semconv._incubating.attributes import (
28
+ gen_ai_attributes as GenAIAttributes,
29
+ )
30
+ from opentelemetry.semconv_ai import LLMRequestTypeValues, SpanAttributes
31
+ from opentelemetry.trace import SpanKind, get_tracer
32
+ from opentelemetry.trace.status import Status, StatusCode
33
+ from wrapt import wrap_function_wrapper
23
34
 
24
35
  logger = logging.getLogger(__name__)
25
36
 
@@ -44,68 +55,11 @@ WRAPPED_METHODS = [
44
55
  ]
45
56
 
46
57
 
47
- def should_send_prompts():
48
- return (
49
- os.getenv("TRACELOOP_TRACE_CONTENT") or "true"
50
- ).lower() == "true" or context_api.get_value("override_enable_content_tracing")
51
-
52
-
53
58
  def is_streaming_response(response):
54
59
  return isinstance(response, types.GeneratorType)
55
60
 
56
61
 
57
- def _set_span_attribute(span, name, value):
58
- if value is not None:
59
- if value != "":
60
- span.set_attribute(name, value)
61
- return
62
-
63
-
64
- input_attribute_map = {
65
- "prompt": f"{SpanAttributes.LLM_PROMPTS}.0.user",
66
- "temperature": SpanAttributes.LLM_REQUEST_TEMPERATURE,
67
- "top_p": SpanAttributes.LLM_REQUEST_TOP_P,
68
- }
69
-
70
-
71
- def _set_input_attributes(span, args, kwargs):
72
- if args is not None and len(args) > 0:
73
- _set_span_attribute(span, SpanAttributes.LLM_REQUEST_MODEL, args[0])
74
- elif kwargs.get("version"):
75
- _set_span_attribute(
76
- span, SpanAttributes.LLM_REQUEST_MODEL, kwargs.get("version").id
77
- )
78
- else:
79
- _set_span_attribute(span, SpanAttributes.LLM_REQUEST_MODEL, "unknown")
80
-
81
- input_attribute = kwargs.get("input")
82
- for key in input_attribute:
83
- if key in input_attribute_map:
84
- if key == "prompt" and not should_send_prompts():
85
- continue
86
- _set_span_attribute(
87
- span,
88
- input_attribute_map.get(key, f"llm.request.{key}"),
89
- input_attribute.get(key),
90
- )
91
- return
92
-
93
-
94
- @dont_throw
95
- def _set_response_attributes(span, response):
96
- if should_send_prompts():
97
- if isinstance(response, list):
98
- for index, item in enumerate(response):
99
- prefix = f"{SpanAttributes.LLM_COMPLETIONS}.{index}"
100
- _set_span_attribute(span, f"{prefix}.content", item)
101
- elif isinstance(response, str):
102
- _set_span_attribute(
103
- span, f"{SpanAttributes.LLM_COMPLETIONS}.0.content", response
104
- )
105
- return
106
-
107
-
108
- def _build_from_streaming_response(span, response):
62
+ def _build_from_streaming_response(span, event_logger, response):
109
63
  complete_response = ""
110
64
  for item in response:
111
65
  item_to_yield = item
@@ -113,32 +67,40 @@ def _build_from_streaming_response(span, response):
113
67
 
114
68
  yield item_to_yield
115
69
 
116
- _set_response_attributes(span, complete_response)
70
+ _handle_response(span, event_logger, complete_response)
117
71
 
118
- span.set_status(Status(StatusCode.OK))
119
72
  span.end()
120
73
 
121
74
 
122
75
  @dont_throw
123
- def _handle_request(span, args, kwargs):
124
- if span.is_recording():
125
- _set_input_attributes(span, args, kwargs)
76
+ def _handle_request(span, event_logger, args, kwargs):
77
+ set_model_input_attributes(span, args, kwargs)
78
+
79
+ model_input = kwargs.get("input") or (args[1] if len(args) > 1 else None)
80
+
81
+ if should_emit_events() and event_logger:
82
+ emit_event(MessageEvent(content=model_input.get("prompt")), event_logger)
83
+ else:
84
+ set_input_attributes(span, args, kwargs)
126
85
 
127
86
 
128
87
  @dont_throw
129
- def _handle_response(span, response):
130
- if span.is_recording():
131
- _set_response_attributes(span, response)
88
+ def _handle_response(span, event_logger, response):
89
+ if should_emit_events() and event_logger:
90
+ emit_choice_events(response, event_logger)
91
+ else:
92
+ set_response_attributes(span, response)
132
93
 
94
+ if span.is_recording():
133
95
  span.set_status(Status(StatusCode.OK))
134
96
 
135
97
 
136
98
  def _with_tracer_wrapper(func):
137
99
  """Helper for providing tracer for wrapper functions."""
138
100
 
139
- def _with_tracer(tracer, to_wrap):
101
+ def _with_tracer(tracer, event_logger, to_wrap):
140
102
  def wrapper(wrapped, instance, args, kwargs):
141
- return func(tracer, to_wrap, wrapped, instance, args, kwargs)
103
+ return func(tracer, event_logger, to_wrap, wrapped, instance, args, kwargs)
142
104
 
143
105
  return wrapper
144
106
 
@@ -146,7 +108,15 @@ def _with_tracer_wrapper(func):
146
108
 
147
109
 
148
110
  @_with_tracer_wrapper
149
- def _wrap(tracer, to_wrap, wrapped, instance, args, kwargs):
111
+ def _wrap(
112
+ tracer,
113
+ event_logger,
114
+ to_wrap,
115
+ wrapped,
116
+ instance,
117
+ args,
118
+ kwargs,
119
+ ):
150
120
  """Instruments and calls every function defined in TO_WRAP."""
151
121
  if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY):
152
122
  return wrapped(*args, **kwargs)
@@ -156,20 +126,20 @@ def _wrap(tracer, to_wrap, wrapped, instance, args, kwargs):
156
126
  name,
157
127
  kind=SpanKind.CLIENT,
158
128
  attributes={
159
- SpanAttributes.LLM_SYSTEM: "Replicate",
129
+ GenAIAttributes.GEN_AI_SYSTEM: "Replicate",
160
130
  SpanAttributes.LLM_REQUEST_TYPE: LLMRequestTypeValues.COMPLETION.value,
161
131
  },
162
132
  )
163
133
 
164
- _handle_request(span, args, kwargs)
134
+ _handle_request(span, event_logger, args, kwargs)
165
135
 
166
136
  response = wrapped(*args, **kwargs)
167
137
 
168
138
  if response:
169
139
  if is_streaming_response(response):
170
- return _build_from_streaming_response(span, response)
140
+ return _build_from_streaming_response(span, event_logger, response)
171
141
  else:
172
- _handle_response(span, response)
142
+ _handle_response(span, event_logger, response)
173
143
 
174
144
  span.end()
175
145
  return response
@@ -178,9 +148,10 @@ def _wrap(tracer, to_wrap, wrapped, instance, args, kwargs):
178
148
  class ReplicateInstrumentor(BaseInstrumentor):
179
149
  """An instrumentor for Replicate's client library."""
180
150
 
181
- def __init__(self, exception_logger=None):
151
+ def __init__(self, exception_logger=None, use_legacy_attributes=True):
182
152
  super().__init__()
183
153
  Config.exception_logger = exception_logger
154
+ Config.use_legacy_attributes = use_legacy_attributes
184
155
 
185
156
  def instrumentation_dependencies(self) -> Collection[str]:
186
157
  return _instruments
@@ -188,13 +159,23 @@ class ReplicateInstrumentor(BaseInstrumentor):
188
159
  def _instrument(self, **kwargs):
189
160
  tracer_provider = kwargs.get("tracer_provider")
190
161
  tracer = get_tracer(__name__, __version__, tracer_provider)
162
+
163
+ event_logger = None
164
+ if not Config.use_legacy_attributes:
165
+ logger_provider = kwargs.get("logger_provider")
166
+ event_logger = get_logger(
167
+ __name__, __version__, logger_provider=logger_provider
168
+ )
169
+
191
170
  for wrapper_method in WRAPPED_METHODS:
192
171
  wrap_function_wrapper(
193
172
  wrapper_method.get("module"),
194
173
  wrapper_method.get("method"),
195
- _wrap(tracer, wrapper_method),
174
+ _wrap(tracer, event_logger, wrapper_method),
196
175
  )
197
176
 
198
177
  def _uninstrument(self, **kwargs):
178
+ import replicate
179
+
199
180
  for wrapper_method in WRAPPED_METHODS:
200
- unwrap(wrapper_method.get("module"), wrapper_method.get("method", ""))
181
+ unwrap(replicate, wrapper_method.get("method", ""))
@@ -0,0 +1,143 @@
1
+ from dataclasses import asdict
2
+ from enum import Enum
3
+ from typing import Union
4
+
5
+ from opentelemetry._logs import Logger, LogRecord
6
+ from opentelemetry.instrumentation.replicate.event_models import (
7
+ ChoiceEvent,
8
+ MessageEvent,
9
+ )
10
+ from opentelemetry.instrumentation.replicate.utils import (
11
+ dont_throw,
12
+ should_emit_events,
13
+ should_send_prompts,
14
+ )
15
+ from opentelemetry.semconv._incubating.attributes import (
16
+ gen_ai_attributes as GenAIAttributes,
17
+ )
18
+
19
+ from replicate.prediction import Prediction
20
+
21
+
22
+ class Roles(Enum):
23
+ USER = "user"
24
+ ASSISTANT = "assistant"
25
+ SYSTEM = "system"
26
+ TOOL = "tool"
27
+
28
+
29
+ VALID_MESSAGE_ROLES = {role.value for role in Roles}
30
+ """The valid roles for naming the message event."""
31
+
32
+ EVENT_ATTRIBUTES = {GenAIAttributes.GEN_AI_SYSTEM: "replicate"}
33
+ """The attributes to be used for the event."""
34
+
35
+
36
+ @dont_throw
37
+ def emit_choice_events(
38
+ response: Union[str, list, Prediction], event_logger: Union[Logger, None]
39
+ ):
40
+ # Handle replicate.run responses
41
+ if isinstance(response, list):
42
+ for i, generation in enumerate(response):
43
+ emit_event(
44
+ ChoiceEvent(
45
+ index=i, message={"content": generation, "role": "assistant"}
46
+ ),
47
+ event_logger,
48
+ )
49
+ # Handle replicate.predictions.create responses
50
+ elif isinstance(response, Prediction):
51
+ emit_event(
52
+ ChoiceEvent(
53
+ index=0, message={"content": response.output, "role": "assistant"}
54
+ ),
55
+ event_logger,
56
+ )
57
+ # Handle replicate.stream responses built from _build_from_streaming_response
58
+ elif isinstance(response, str):
59
+ emit_event(
60
+ ChoiceEvent(index=0, message={"content": response, "role": "assistant"}),
61
+ event_logger,
62
+ )
63
+ else:
64
+ raise ValueError(
65
+ "It wasn't possible to emit the choice events due to an unsupported response type"
66
+ )
67
+
68
+
69
+ def emit_event(
70
+ event: Union[MessageEvent, ChoiceEvent], event_logger: Union[Logger, None]
71
+ ) -> None:
72
+ """
73
+ Emit an event to the OpenTelemetry SDK.
74
+
75
+ Args:
76
+ event: The event to emit.
77
+ """
78
+ if not should_emit_events() or event_logger is None:
79
+ return
80
+
81
+ if isinstance(event, MessageEvent):
82
+ _emit_message_event(event, event_logger)
83
+ elif isinstance(event, ChoiceEvent):
84
+ _emit_choice_event(event, event_logger)
85
+ else:
86
+ raise TypeError("Unsupported event type")
87
+
88
+
89
+ def _emit_message_event(event: MessageEvent, event_logger: Logger) -> None:
90
+ body = asdict(event)
91
+
92
+ if event.role in VALID_MESSAGE_ROLES:
93
+ name = "gen_ai.{}.message".format(event.role)
94
+ # According to the semantic conventions, the role is conditionally required if available
95
+ # and not equal to the "role" in the message name. So, remove the role from the body if
96
+ # it is the same as the in the event name.
97
+ body.pop("role", None)
98
+ else:
99
+ name = "gen_ai.user.message"
100
+
101
+ # According to the semantic conventions, only the assistant role has tool call
102
+ if event.role != Roles.ASSISTANT.value and event.tool_calls is not None:
103
+ del body["tool_calls"]
104
+ elif event.tool_calls is None:
105
+ del body["tool_calls"]
106
+
107
+ if not should_send_prompts():
108
+ del body["content"]
109
+ if body.get("tool_calls") is not None:
110
+ for tool_call in body["tool_calls"]:
111
+ tool_call["function"].pop("arguments", None)
112
+
113
+ log_record = LogRecord(
114
+ body=body,
115
+ attributes=EVENT_ATTRIBUTES,
116
+ event_name=name
117
+ )
118
+ event_logger.emit(log_record)
119
+
120
+
121
+ def _emit_choice_event(event: ChoiceEvent, event_logger: Logger) -> None:
122
+ body = asdict(event)
123
+ if event.message["role"] == Roles.ASSISTANT.value:
124
+ # According to the semantic conventions, the role is conditionally required if available
125
+ # and not equal to "assistant", so remove the role from the body if it is "assistant".
126
+ body["message"].pop("role", None)
127
+
128
+ if event.tool_calls is None:
129
+ del body["tool_calls"]
130
+
131
+ if not should_send_prompts():
132
+ body["message"].pop("content", None)
133
+ if body.get("tool_calls") is not None:
134
+ for tool_call in body["tool_calls"]:
135
+ tool_call["function"].pop("arguments", None)
136
+
137
+ log_record = LogRecord(
138
+ body=body,
139
+ attributes=EVENT_ATTRIBUTES,
140
+ event_name="gen_ai.choice"
141
+
142
+ )
143
+ event_logger.emit(log_record)
@@ -0,0 +1,41 @@
1
+ from dataclasses import dataclass
2
+ from typing import Any, List, Literal, Optional, TypedDict
3
+
4
+
5
+ class _FunctionToolCall(TypedDict):
6
+ function_name: str
7
+ arguments: Optional[dict[str, Any]]
8
+
9
+
10
+ class ToolCall(TypedDict):
11
+ """Represents a tool call in the AI model."""
12
+
13
+ id: str
14
+ function: _FunctionToolCall
15
+ type: Literal["function"]
16
+
17
+
18
+ class CompletionMessage(TypedDict):
19
+ """Represents a message in the AI model."""
20
+
21
+ content: Any
22
+ role: str = "assistant"
23
+
24
+
25
+ @dataclass
26
+ class MessageEvent:
27
+ """Represents an input event for the AI model."""
28
+
29
+ content: Any
30
+ role: str = "user"
31
+ tool_calls: Optional[List[ToolCall]] = None
32
+
33
+
34
+ @dataclass
35
+ class ChoiceEvent:
36
+ """Represents a completion event for the AI model."""
37
+
38
+ index: int
39
+ message: CompletionMessage
40
+ finish_reason: str = "unknown"
41
+ tool_calls: Optional[List[ToolCall]] = None
@@ -0,0 +1,63 @@
1
+ from opentelemetry.instrumentation.replicate.utils import (
2
+ dont_throw,
3
+ should_send_prompts,
4
+ )
5
+ from opentelemetry.semconv._incubating.attributes import (
6
+ gen_ai_attributes as GenAIAttributes,
7
+ )
8
+
9
+
10
+ def _set_span_attribute(span, name, value):
11
+ if value is not None:
12
+ if value != "":
13
+ span.set_attribute(name, value)
14
+ return
15
+
16
+
17
+ @dont_throw
18
+ def set_input_attributes(span, args, kwargs):
19
+ if not span.is_recording():
20
+ return
21
+
22
+ input_attribute = kwargs.get("input")
23
+ if should_send_prompts():
24
+ _set_span_attribute(
25
+ span, f"{GenAIAttributes.GEN_AI_PROMPT}.0.user", input_attribute.get("prompt")
26
+ )
27
+
28
+
29
+ @dont_throw
30
+ def set_model_input_attributes(span, args, kwargs):
31
+ if not span.is_recording():
32
+ return
33
+
34
+ if args is not None and len(args) > 0:
35
+ _set_span_attribute(span, GenAIAttributes.GEN_AI_REQUEST_MODEL, args[0])
36
+ elif kwargs.get("version"):
37
+ _set_span_attribute(
38
+ span, GenAIAttributes.GEN_AI_REQUEST_MODEL, kwargs.get("version").id
39
+ )
40
+ else:
41
+ _set_span_attribute(span, GenAIAttributes.GEN_AI_REQUEST_MODEL, "unknown")
42
+
43
+ input_attribute = kwargs.get("input")
44
+
45
+ _set_span_attribute(
46
+ span, GenAIAttributes.GEN_AI_REQUEST_TEMPERATURE, input_attribute.get("temperature")
47
+ )
48
+ _set_span_attribute(
49
+ span, GenAIAttributes.GEN_AI_REQUEST_TOP_P, input_attribute.get("top_p")
50
+ )
51
+
52
+
53
+ @dont_throw
54
+ def set_response_attributes(span, response):
55
+ if should_send_prompts():
56
+ if isinstance(response, list):
57
+ for index, item in enumerate(response):
58
+ prefix = f"{GenAIAttributes.GEN_AI_COMPLETION}.{index}"
59
+ _set_span_attribute(span, f"{prefix}.content", item)
60
+ elif isinstance(response, str):
61
+ _set_span_attribute(
62
+ span, f"{GenAIAttributes.GEN_AI_COMPLETION}.0.content", response
63
+ )
@@ -0,0 +1,48 @@
1
+ import logging
2
+ import os
3
+ import traceback
4
+
5
+ from opentelemetry import context as context_api
6
+ from opentelemetry.instrumentation.replicate.config import Config
7
+
8
+ TRACELOOP_TRACE_CONTENT = "TRACELOOP_TRACE_CONTENT"
9
+
10
+
11
+ def should_send_prompts():
12
+ return (
13
+ os.getenv(TRACELOOP_TRACE_CONTENT) or "true"
14
+ ).lower() == "true" or context_api.get_value("override_enable_content_tracing")
15
+
16
+
17
+ def dont_throw(func):
18
+ """
19
+ A decorator that wraps the passed in function and logs exceptions instead of throwing them.
20
+
21
+ @param func: The function to wrap
22
+ @return: The wrapper function
23
+ """
24
+ # Obtain a logger specific to the function's module
25
+ logger = logging.getLogger(func.__module__)
26
+
27
+ def wrapper(*args, **kwargs):
28
+ try:
29
+ return func(*args, **kwargs)
30
+ except Exception as e:
31
+ logger.debug(
32
+ "OpenLLMetry failed to trace in %s, error: %s",
33
+ func.__name__,
34
+ traceback.format_exc(),
35
+ )
36
+ if Config.exception_logger:
37
+ Config.exception_logger(e)
38
+
39
+ return wrapper
40
+
41
+
42
+ def should_emit_events() -> bool:
43
+ """
44
+ Checks if the instrumentation isn't using the legacy attributes
45
+ and if the event logger is not None.
46
+ """
47
+
48
+ return not Config.use_legacy_attributes
@@ -1,6 +1,6 @@
1
1
  [tool.coverage.run]
2
2
  branch = true
3
- source = [ "opentelemetry/instrumentation/replicate" ]
3
+ source = ["opentelemetry/instrumentation/replicate"]
4
4
 
5
5
  [tool.coverage.report]
6
6
  exclude_lines = ['if TYPE_CHECKING:']
@@ -8,9 +8,9 @@ show_missing = true
8
8
 
9
9
  [tool.poetry]
10
10
  name = "opentelemetry-instrumentation-replicate"
11
- version = "0.17.3"
11
+ version = "0.49.1"
12
12
  description = "OpenTelemetry Replicate instrumentation"
13
- authors = [ "Kartik Prajapati <kartik@ktklab.org>" ]
13
+ authors = ["Kartik Prajapati <kartik@ktklab.org>"]
14
14
  repository = "https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-replicate"
15
15
  license = 'Apache-2.0'
16
16
  readme = 'README.md'
@@ -20,24 +20,24 @@ include = "opentelemetry/instrumentation/replicate"
20
20
 
21
21
  [tool.poetry.dependencies]
22
22
  python = ">=3.9,<4"
23
- opentelemetry-api = "^1.24.0"
24
- opentelemetry-instrumentation = "^0.45b0"
25
- opentelemetry-semantic-conventions = "^0.45b0"
26
- opentelemetry-semantic-conventions-ai = "0.2.0"
23
+ opentelemetry-api = "^1.38.0"
24
+ opentelemetry-instrumentation = ">=0.59b0"
25
+ opentelemetry-semantic-conventions = ">=0.59b0"
26
+ opentelemetry-semantic-conventions-ai = "^0.4.13"
27
27
 
28
28
  [tool.poetry.group.dev.dependencies]
29
- autopep8 = "2.1.0"
29
+ autopep8 = "^2.2.0"
30
30
  flake8 = "7.0.0"
31
- pytest = "8.1.1"
31
+ pytest = "^8.2.2"
32
32
  pytest-sugar = "1.0.0"
33
33
 
34
34
  [tool.poetry.group.test.dependencies]
35
- pytest = "8.1.1"
35
+ pytest = "^8.2.2"
36
36
  pytest-sugar = "1.0.0"
37
37
  vcrpy = "^6.0.1"
38
38
  pytest-recording = "^0.13.1"
39
- opentelemetry-sdk = "^1.23.0"
40
- replicate = ">=0.23.1,<0.26.0"
39
+ opentelemetry-sdk = "^1.38.0"
40
+ replicate = ">=0.23.1,<0.27.0"
41
41
 
42
42
  [build-system]
43
43
  requires = ["poetry-core"]
@@ -1,23 +0,0 @@
1
- import logging
2
- from opentelemetry.instrumentation.replicate.config import Config
3
-
4
-
5
- def dont_throw(func):
6
- """
7
- A decorator that wraps the passed in function and logs exceptions instead of throwing them.
8
-
9
- @param func: The function to wrap
10
- @return: The wrapper function
11
- """
12
- # Obtain a logger specific to the function's module
13
- logger = logging.getLogger(func.__module__)
14
-
15
- def wrapper(*args, **kwargs):
16
- try:
17
- return func(*args, **kwargs)
18
- except Exception as e:
19
- logger.warning("Failed to execute %s, error: %s", func.__name__, str(e))
20
- if Config.exception_logger:
21
- Config.exception_logger(e)
22
-
23
- return wrapper