lmnr 0.3.7__py3-none-any.whl → 0.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
lmnr/sdk/interface.py DELETED
@@ -1,316 +0,0 @@
1
- from .context import LaminarSingleton
2
- from .tracing_types import EvaluateEvent, Span, Trace, Event
3
-
4
- from typing import Any, Literal, Optional, Union
5
- import datetime
6
- import logging
7
- import uuid
8
-
9
-
10
- laminar = LaminarSingleton().get()
11
-
12
-
13
- class ObservationContext:
14
- observation: Union[Span, Trace] = None
15
- _parent: "ObservationContext" = None
16
- _children: dict[uuid.UUID, "ObservationContext"] = {}
17
- _log = logging.getLogger("laminar.observation_context")
18
-
19
- def __init__(self, observation: Union[Span, Trace], parent: "ObservationContext"):
20
- self.observation = observation
21
- self._parent = parent
22
- self._children = {}
23
-
24
- def _get_parent(self) -> "ObservationContext":
25
- raise NotImplementedError
26
-
27
- def update(self, *args, **kwargs):
28
- raise NotImplementedError
29
-
30
- def span(
31
- self,
32
- name: str,
33
- input: Optional[Any] = None,
34
- metadata: Optional[dict[str, Any]] = None,
35
- attributes: Optional[dict[str, Any]] = None,
36
- span_type: Literal["DEFAULT", "LLM"] = "DEFAULT",
37
- ) -> "SpanContext":
38
- """Create a span within the current (trace or span) context.
39
-
40
- Args:
41
- name (str): Span name
42
- input (Optional[Any], optional): Inputs to the span. Defaults to None.
43
- metadata (Optional[dict[str, Any]], optional): Any additional metadata. Defaults to None.
44
- attributes (Optional[dict[str, Any]], optional): Any pre-defined attributes. Must comply to the convention. Defaults to None.
45
- span_type (Literal["DEFAULT", "LLM"], optional): Type of the span. Defaults to "DEFAULT".
46
-
47
- Returns:
48
- SpanContext: The new span context
49
- """
50
- parent = self
51
- parent_span_id = (
52
- parent.observation.id if isinstance(parent.observation, Span) else None
53
- )
54
- trace_id = (
55
- parent.observation.traceId
56
- if isinstance(parent.observation, Span)
57
- else parent.observation.id
58
- )
59
- span = laminar.create_span(
60
- name=name,
61
- trace_id=trace_id,
62
- input=input,
63
- metadata=metadata,
64
- attributes=attributes,
65
- parent_span_id=parent_span_id,
66
- span_type=span_type,
67
- )
68
- span_context = SpanContext(span, self)
69
- self._children[span.id] = span_context
70
- return span_context
71
-
72
- def id(self) -> uuid.UUID:
73
- """Get the uuid of the current observation
74
-
75
- Returns:
76
- uuid.UUID: UUID of the observation
77
- """
78
- return self.observation.id
79
-
80
-
81
- class SpanContext(ObservationContext):
82
- def _get_parent(self) -> ObservationContext:
83
- return self._parent
84
-
85
- def end(
86
- self,
87
- input: Optional[Any] = None,
88
- output: Optional[Any] = None,
89
- metadata: Optional[dict[str, Any]] = None,
90
- attributes: Optional[dict[str, Any]] = None,
91
- evaluate_events: Optional[list[EvaluateEvent]] = None,
92
- override: bool = False,
93
- ) -> "SpanContext":
94
- """End the span with the given output and optional metadata and evaluate events.
95
-
96
- Args:
97
- input (Optional[Any], optional): Inputs to the span. Defaults to None.
98
- output (Optional[Any], optional): output of the span. Defaults to None.
99
- metadata (Optional[dict[str, Any]], optional): any additional metadata to the span. Defaults to None.
100
- attributes (Optional[dict[str, Any]], optional): pre-defined attributes (see semantic-convention). Defaults to None.
101
- override (bool, optional): override existing metadata fully. If False, metadata is merged. Defaults to False.
102
-
103
- Returns:
104
- SpanContext: the finished span context
105
- """
106
- if self._children:
107
- self._log.warning(
108
- "Ending span %s, but it has children that have not been finalized. Children: %s",
109
- self.observation.name,
110
- [child.observation.name for child in self._children.values()],
111
- )
112
- self._get_parent()._children.pop(self.observation.id)
113
- return self._update(
114
- input=input,
115
- output=output,
116
- metadata=metadata,
117
- evaluate_events=evaluate_events,
118
- attributes=attributes,
119
- override=override,
120
- finalize=True,
121
- )
122
-
123
- def update(
124
- self,
125
- input: Optional[Any] = None,
126
- output: Optional[Any] = None,
127
- metadata: Optional[dict[str, Any]] = None,
128
- attributes: Optional[dict[str, Any]] = None,
129
- evaluate_events: Optional[list[EvaluateEvent]] = None,
130
- override: bool = False,
131
- ) -> "SpanContext":
132
- """Update the current span with (optionally) the given output and optional metadata and evaluate events, but don't end it.
133
-
134
- Args:
135
- input (Optional[Any], optional): Inputs to the span. Defaults to None.
136
- output (Optional[Any], optional): output of the span. Defaults to None.
137
- metadata (Optional[dict[str, Any]], optional): any additional metadata to the span. Defaults to None.
138
- attributes (Optional[dict[str, Any]], optional): pre-defined attributes (see semantic-convention). Defaults to None.
139
- override (bool, optional): override existing metadata fully. If False, metadata is merged. Defaults to False.
140
-
141
- Returns:
142
- SpanContext: the finished span context
143
- """
144
- return self._update(
145
- input=input or self.observation.input,
146
- output=output or self.observation.output,
147
- metadata=metadata,
148
- evaluate_events=evaluate_events,
149
- attributes=attributes,
150
- override=override,
151
- finalize=False,
152
- )
153
-
154
- def event(
155
- self,
156
- name: str,
157
- value: Optional[Union[str, int, float, bool]] = None,
158
- timestamp: Optional[datetime.datetime] = None,
159
- ) -> "SpanContext":
160
- """Associate an event with the current span
161
-
162
- Args:
163
- name (str): name of the event. Must be predefined in the Laminar events page.
164
- value (Optional[Union[str, int, float, bool]], optional): value of the event. Must match range definition in Laminar events page. Defaults to None.
165
- timestamp (Optional[datetime.datetime], optional): If you need custom timestamp. If not specified, current time is used. Defaults to None.
166
-
167
- Returns:
168
- SpanContext: the updated span context
169
- """
170
- event = Event(
171
- name=name,
172
- span_id=self.observation.id,
173
- timestamp=timestamp,
174
- value=value,
175
- )
176
- self.observation.add_event(event)
177
- return self
178
-
179
- def evaluate_event(self, name: str, evaluator: str, data: dict) -> "SpanContext":
180
- """Evaluate an event with the given name by evaluator based on the given data.
181
- Evaluator is the Laminar pipeline name.
182
- Data is passed as an input to the the evaluator pipeline, so you need to specify which data you want to evaluate. The prompt
183
- of the evaluator will be templated with the keys of the data dictionary.
184
-
185
- Usually, you would want to pass the output of LLM generation, users' messages, and some other surrounding data to 'data'.
186
-
187
- Args:
188
- name (str): Name of the event.
189
- evaluator (str): Name of the evaluator pipeline.
190
- data (dict): Data to be used when evaluating the event.
191
-
192
- Returns:
193
- SpanContext: the updated span context
194
- """
195
- self._update(
196
- input=self.observation.input,
197
- output=self.observation.output,
198
- evaluate_events=[
199
- EvaluateEvent(
200
- name=name,
201
- evaluator=evaluator,
202
- data=data,
203
- timestamp=datetime.datetime.now(datetime.timezone.utc),
204
- )
205
- ],
206
- override=False,
207
- )
208
-
209
- def _update(
210
- self,
211
- input: Optional[Any] = None,
212
- output: Optional[Any] = None,
213
- metadata: Optional[dict[str, Any]] = None,
214
- attributes: Optional[dict[str, Any]] = None,
215
- evaluate_events: Optional[list[EvaluateEvent]] = None,
216
- events: Optional[list[Event]] = None,
217
- override: bool = False,
218
- finalize: bool = False,
219
- ) -> "SpanContext":
220
- self.observation = laminar.update_span(
221
- input=input,
222
- output=output,
223
- span=self.observation,
224
- end_time=datetime.datetime.now(datetime.timezone.utc),
225
- metadata=metadata,
226
- attributes=attributes,
227
- evaluate_events=evaluate_events,
228
- finalize=finalize,
229
- override=override,
230
- )
231
- return self
232
-
233
-
234
- class TraceContext(ObservationContext):
235
- def _get_parent(self) -> "ObservationContext":
236
- return self
237
-
238
- def update(
239
- self,
240
- user_id: Optional[str] = None,
241
- session_id: Optional[str] = None,
242
- release: Optional[str] = None,
243
- metadata: Optional[dict[str, Any]] = None,
244
- success: bool = True,
245
- ) -> "TraceContext":
246
- """Update the current trace with the given metadata and success status.
247
-
248
- Args:
249
- user_id (Optional[str], optional): Custom user_id of your user. Useful for grouping and further analytics. Defaults to None.
250
- session_id (Optional[str], optional): Custom session_id for your session. Random UUID is generated on Laminar side, if not specified.
251
- Defaults to None.
252
- release (Optional[str], optional): _description_. Release of your application. Useful for grouping and further analytics. Defaults to None.
253
- metadata (Optional[dict[str, Any]], optional): any additional metadata to the trace. Defaults to None.
254
- success (bool, optional): whether this trace ran successfully. Defaults to True.
255
-
256
- Returns:
257
- TraceContext: updated trace context
258
- """
259
- return self._update(
260
- user_id=user_id or self.observation.userId,
261
- session_id=session_id or self.observation.sessionId,
262
- release=release or self.observation.release,
263
- metadata=metadata or self.observation.metadata,
264
- success=success if success is not None else self.observation.success,
265
- )
266
-
267
- def _update(
268
- self,
269
- user_id: Optional[str] = None,
270
- session_id: Optional[str] = None,
271
- release: Optional[str] = None,
272
- metadata: Optional[dict[str, Any]] = None,
273
- success: bool = True,
274
- end_time: Optional[datetime.datetime] = None,
275
- ) -> "TraceContext":
276
- self.observation = laminar.update_trace(
277
- id=self.observation.id,
278
- user_id=user_id,
279
- session_id=session_id,
280
- release=release,
281
- metadata=metadata,
282
- success=success,
283
- )
284
- return self
285
-
286
-
287
- def trace(
288
- user_id: Optional[str] = None,
289
- session_id: Optional[str] = None,
290
- release: Optional[str] = None,
291
- ) -> TraceContext:
292
- """Create the initial trace context. All further spans will be created within this context.
293
-
294
- Args:
295
- user_id (Optional[str], optional): Custom user_id of your user. Useful for grouping and further analytics. Defaults to None.
296
- session_id (Optional[str], optional): Custom session_id for your session. Random UUID is generated on Laminar side, if not specified.
297
- Defaults to None.
298
- release (Optional[str], optional): _description_. Release of your application. Useful for grouping and further analytics. Defaults to None.
299
-
300
- Returns:
301
- TraceContext: the pointer to the trace context. Use `.span()` to create a new span within this context.
302
- """
303
- session_id = session_id or str(uuid.uuid4())
304
- trace_id = uuid.uuid4()
305
- trace = laminar.update_trace(
306
- id=trace_id,
307
- user_id=user_id,
308
- session_id=session_id,
309
- release=release,
310
- )
311
- return TraceContext(trace, None)
312
-
313
-
314
- def initialize(env: dict[str, str]) -> None:
315
- laminar = LaminarSingleton().get()
316
- laminar.set_env(env)
@@ -1,2 +0,0 @@
1
- from .base import Provider
2
- from .openai import OpenAI
@@ -1,28 +0,0 @@
1
- import abc
2
- import pydantic
3
- import typing
4
-
5
-
6
- class Provider(abc.ABC):
7
- def display_name(self) -> str:
8
- raise NotImplementedError("display_name not implemented")
9
-
10
- def stream_list_to_dict(self, response: list[typing.Any]) -> dict[str, typing.Any]:
11
- raise NotImplementedError("stream_list_to_dict not implemented")
12
-
13
- def extract_llm_attributes_from_response(
14
- self, response: typing.Union[str, dict[str, typing.Any], pydantic.BaseModel]
15
- ) -> dict[str, typing.Any]:
16
- raise NotImplementedError(
17
- "extract_llm_attributes_from_response not implemented"
18
- )
19
-
20
- def extract_llm_output(
21
- self, response: typing.Union[str, dict[str, typing.Any], pydantic.BaseModel]
22
- ) -> typing.Any:
23
- raise NotImplementedError("extract_llm_output not implemented")
24
-
25
- def extract_llm_attributes_from_args(
26
- self, func_args: list[typing.Any], func_kwargs: dict[str, typing.Any]
27
- ) -> dict[str, typing.Any]:
28
- raise NotImplementedError("_extract_llm_attributes_from_args not implemented")
@@ -1,154 +0,0 @@
1
- from ...semantic_conventions.gen_ai_spans import (
2
- FINISH_REASONS,
3
- FREQUENCY_PENALTY,
4
- INPUT_TOKEN_COUNT,
5
- MAX_TOKENS,
6
- OUTPUT_TOKEN_COUNT,
7
- PRESENCE_PENALTY,
8
- REQUEST_MODEL,
9
- RESPONSE_MODEL,
10
- STOP_SEQUENCES,
11
- STREAM,
12
- TEMPERATURE,
13
- TOP_K,
14
- TOP_P,
15
- TOTAL_TOKEN_COUNT,
16
- )
17
- from .base import Provider
18
- from .utils import parse_or_dump_to_dict
19
-
20
- from collections import defaultdict
21
- from typing import Any, Optional, Union
22
- import logging
23
- import pydantic
24
-
25
-
26
- class FallbackProvider(Provider):
27
- logger = logging.getLogger("lmnr.sdk.tracing.providers.fallback")
28
-
29
- def display_name(self) -> str:
30
- return ""
31
-
32
- def stream_list_to_dict(
33
- self,
34
- response: list[Union[dict[str, Any], str, pydantic.BaseModel]],
35
- ) -> dict[str, Any]:
36
- model = None
37
- output_tokens = 0
38
- outputs = defaultdict(lambda: defaultdict(str))
39
- try:
40
- for chunk in response:
41
- chunk = parse_or_dump_to_dict(chunk)
42
- if model is None:
43
- model = chunk.get("model")
44
- for i, choice in enumerate(chunk.get("choices", [])):
45
- # TODO: not only content, but also tool_calls and refusal
46
- # also handle roles
47
- if choice["delta"] and isinstance(choice["delta"], dict):
48
- for key in choice["delta"]:
49
- if choice["delta"][key] is None:
50
- if key not in outputs[i]:
51
- outputs[i][key] = None
52
- continue
53
- outputs[i][key] += choice["delta"][key]
54
- output_tokens += 1
55
- except Exception as e:
56
- self.logger.error(f"Error parsing streamming response: {e}")
57
- pass
58
-
59
- output_key_values = [
60
- self._message_to_key_and_output(dict(outputs[i]))
61
- for i in range(len(outputs))
62
- ]
63
- return {
64
- "model": model,
65
- "prompt_tokens": 0,
66
- "usage": {
67
- "prompt_tokens": 0,
68
- "completion_tokens": output_tokens,
69
- "total_tokens": output_tokens,
70
- },
71
- "choices": [
72
- (
73
- {
74
- "message": {
75
- output[0]: output[1],
76
- "role": "assistant",
77
- }
78
- }
79
- if output
80
- else None
81
- )
82
- for output in output_key_values
83
- ],
84
- }
85
-
86
- def extract_llm_attributes_from_response(
87
- self, response: Union[str, dict[str, Any], pydantic.BaseModel]
88
- ) -> dict[str, Any]:
89
- obj = parse_or_dump_to_dict(response)
90
-
91
- choices = obj.get("choices", [])
92
- decisions = []
93
- for choice in choices:
94
- if choice.get("content"):
95
- decisions.append("completion")
96
- elif choice.get("refusal"):
97
- decisions.append("refusal")
98
- elif choice.get("tool_calls"):
99
- decisions.append("tool_calls")
100
- else:
101
- decisions.append(None)
102
-
103
- return {
104
- RESPONSE_MODEL: obj.get("model"),
105
- INPUT_TOKEN_COUNT: obj.get("usage", {}).get("prompt_tokens"),
106
- OUTPUT_TOKEN_COUNT: obj.get("usage", {}).get("completion_tokens"),
107
- TOTAL_TOKEN_COUNT: obj.get("usage", {}).get("total_tokens"),
108
- FINISH_REASONS: obj.get("finish_reason"),
109
- # "decision": self._from_singleton_list(decisions),
110
- }
111
-
112
- def extract_llm_output(
113
- self, result: Union[str, dict[str, Any], pydantic.BaseModel]
114
- ) -> Any:
115
- result = parse_or_dump_to_dict(result)
116
- choices = result.get("choices")
117
- if not choices:
118
- return None
119
- outputs = [choice.get("message") for choice in choices]
120
-
121
- return self._from_singleton_list(outputs)
122
-
123
- def extract_llm_attributes_from_args(
124
- self, func_args: list[Any], func_kwargs: dict[str, Any]
125
- ) -> dict[str, Any]:
126
- return {
127
- REQUEST_MODEL: func_kwargs.get("model"),
128
- TEMPERATURE: func_kwargs.get("temperature"),
129
- TOP_P: func_kwargs.get("top_p"),
130
- TOP_K: func_kwargs.get("top_k"),
131
- FREQUENCY_PENALTY: func_kwargs.get("frequency_penalty"),
132
- PRESENCE_PENALTY: func_kwargs.get("presence_penalty"),
133
- STOP_SEQUENCES: func_kwargs.get("stop"),
134
- MAX_TOKENS: func_kwargs.get("max_tokens"),
135
- STREAM: func_kwargs.get("stream", False),
136
- }
137
-
138
- def _message_to_key_and_output(
139
- self, message: Union[dict[str, Any], pydantic.BaseModel]
140
- ) -> Optional[tuple[str, str]]:
141
- message = parse_or_dump_to_dict(message)
142
-
143
- for key in ["content", "refusal", "tool_calls"]:
144
- if message.get(key) is not None:
145
- return (key, message[key])
146
- return None
147
-
148
- def _from_singleton_list(self, obj: Any) -> Any:
149
- # OpenAI returns list of choices. This will have more than one item
150
- # only if request parameter `n` is specified and is grate than 1.
151
- # That's a rare case, so we return the [contents of the] choice alone if there is just one.
152
- if isinstance(obj, list) and len(obj) == 1:
153
- return obj[0]
154
- return obj
@@ -1,159 +0,0 @@
1
- from .base import Provider
2
- from ...semantic_conventions.gen_ai_spans import (
3
- FINISH_REASONS,
4
- FREQUENCY_PENALTY,
5
- INPUT_TOKEN_COUNT,
6
- MAX_TOKENS,
7
- OUTPUT_TOKEN_COUNT,
8
- PRESENCE_PENALTY,
9
- REQUEST_MODEL,
10
- RESPONSE_MODEL,
11
- STOP_SEQUENCES,
12
- STREAM,
13
- TEMPERATURE,
14
- TOP_P,
15
- TOTAL_TOKEN_COUNT,
16
- )
17
- from .utils import parse_or_dump_to_dict
18
-
19
- from collections import defaultdict
20
- from typing import Any, Optional, Union
21
- import logging
22
- import pydantic
23
-
24
- from openai.types.chat.chat_completion import ChatCompletion
25
- from openai.types.chat.chat_completion_message import ChatCompletionMessage
26
- from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
27
-
28
-
29
- class OpenAI(Provider):
30
- logger = logging.getLogger("lmnr.sdk.tracing.providers.openai")
31
-
32
- def display_name(self) -> str:
33
- return "OpenAI"
34
-
35
- def stream_list_to_dict(
36
- self, response: list[Union[ChatCompletionChunk, str]]
37
- ) -> dict[str, Any]:
38
- model = None
39
- finish_reasons = []
40
- output_tokens = 0
41
- outputs = defaultdict(lambda: defaultdict(str))
42
- try:
43
- for chunk in response:
44
- chunk = parse_or_dump_to_dict(chunk)
45
- if model is None:
46
- model = chunk["model"]
47
- finish_reasons = [
48
- choice.get("finish_reason") for choice in chunk.get("choices", [])
49
- ]
50
- for i, choice in enumerate(chunk.get("choices", [])):
51
- if choice["delta"] and isinstance(choice["delta"], dict):
52
- for key in choice["delta"]:
53
- if choice["delta"][key] is None:
54
- if key not in outputs[i]:
55
- outputs[i][key] = None
56
- continue
57
- outputs[i][key] += choice["delta"][key]
58
- output_tokens += 1
59
- except Exception as e:
60
- self.logger.error(f"Error parsing streamming response: {e}")
61
- pass
62
-
63
- output_key_values = [
64
- self._message_to_key_and_output(dict(outputs[i]))
65
- for i in range(len(outputs))
66
- ]
67
- return {
68
- "model": model,
69
- "prompt_tokens": 0,
70
- "usage": {
71
- "prompt_tokens": 0,
72
- "completion_tokens": output_tokens,
73
- "total_tokens": output_tokens,
74
- "finish_reason": self._from_singleton_list(finish_reasons),
75
- },
76
- "choices": [
77
- (
78
- {
79
- "message": {
80
- output[0]: output[1],
81
- "role": "assistant",
82
- }
83
- }
84
- if output
85
- else None
86
- )
87
- for output in output_key_values
88
- ],
89
- }
90
-
91
- def extract_llm_attributes_from_response(
92
- self, response: Union[str, dict[str, Any], pydantic.BaseModel]
93
- ) -> dict[str, Any]:
94
- obj = parse_or_dump_to_dict(response)
95
-
96
- choices = obj.get("choices", [])
97
- decisions = []
98
- for choice in choices:
99
- # choice = parse_or_dump_to_dict(choice)
100
- if choice.get("content"):
101
- decisions.append("completion")
102
- elif choice.get("refusal"):
103
- decisions.append("refusal")
104
- elif choice.get("tool_calls"):
105
- decisions.append("tool_calls")
106
- else:
107
- decisions.append(None)
108
-
109
- return {
110
- RESPONSE_MODEL: obj.get("model"),
111
- INPUT_TOKEN_COUNT: obj.get("usage", {}).get("prompt_tokens"),
112
- OUTPUT_TOKEN_COUNT: obj.get("usage", {}).get("completion_tokens"),
113
- TOTAL_TOKEN_COUNT: obj.get("usage", {}).get("total_tokens"),
114
- FINISH_REASONS: obj.get("finish_reason"),
115
- # "decision": self._from_singleton_list(decisions),
116
- }
117
-
118
- def extract_llm_output(
119
- self, result: Union[str, dict[str, Any], ChatCompletion]
120
- ) -> Any:
121
- result = parse_or_dump_to_dict(result)
122
- choices = result.get("choices")
123
- if not choices:
124
- return None
125
- outputs = [choice["message"] for choice in choices]
126
-
127
- return self._from_singleton_list(outputs)
128
-
129
- def extract_llm_attributes_from_args(
130
- self, func_args: list[Any], func_kwargs: dict[str, Any]
131
- ) -> dict[str, Any]:
132
- return {
133
- REQUEST_MODEL: func_kwargs.get("model"),
134
- TEMPERATURE: func_kwargs.get("temperature"),
135
- TOP_P: func_kwargs.get("top_p"),
136
- FREQUENCY_PENALTY: func_kwargs.get("frequency_penalty"),
137
- PRESENCE_PENALTY: func_kwargs.get("presence_penalty"),
138
- STOP_SEQUENCES: func_kwargs.get("stop"),
139
- MAX_TOKENS: func_kwargs.get("max_tokens"),
140
- STREAM: func_kwargs.get("stream", False),
141
- }
142
-
143
- def _message_to_key_and_output(
144
- self, message: Union[dict[str, Any], ChatCompletionMessage]
145
- ) -> Optional[tuple[str, str]]:
146
- message = parse_or_dump_to_dict(message)
147
-
148
- for key in ["content", "refusal", "tool_calls"]:
149
- if message.get(key) is not None:
150
- return (key, message[key])
151
- return None
152
-
153
- def _from_singleton_list(self, obj: Any) -> Any:
154
- # OpenAI returns list of choices. This will have more than one item
155
- # only if request parameter `n` is specified and is grate than 1.
156
- # That's a rare case, so we return the [contents of the] choice alone if there is just one.
157
- if isinstance(obj, list) and len(obj) == 1:
158
- return obj[0]
159
- return obj
@@ -1,33 +0,0 @@
1
- import logging
2
- import json
3
- import pydantic
4
- import typing
5
-
6
- logger = logging.getLogger("lmnr.sdk.tracing.providers.utils")
7
-
8
-
9
- def parse_or_dump_to_dict(
10
- obj: typing.Union[pydantic.BaseModel, dict[str, typing.Any], str]
11
- ) -> dict[str, typing.Any]:
12
- if isinstance(obj, pydantic.BaseModel):
13
- return obj.model_dump()
14
- if isinstance(obj, str):
15
- try:
16
- parsed = json.loads(obj)
17
- if isinstance(parsed, dict):
18
- return parsed
19
- else:
20
- logging.warning(
21
- f"Expected a dict, but got: {type(parsed)}. Returning empty dict."
22
- )
23
- return {}
24
- except Exception as e:
25
- logging.error(f"Error parsing string: {e}")
26
- return {}
27
-
28
- if isinstance(obj, dict):
29
- return obj
30
- logging.warning(
31
- f"Expected a dict, BaseModel, or str, but got {type(obj)}. Returning empty dict."
32
- )
33
- return {}