lmnr 0.2.15__py3-none-any.whl → 0.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. lmnr/__init__.py +4 -4
  2. lmnr/sdk/client.py +161 -0
  3. lmnr/sdk/collector.py +177 -0
  4. lmnr/sdk/constants.py +1 -0
  5. lmnr/sdk/context.py +456 -0
  6. lmnr/sdk/decorators.py +277 -0
  7. lmnr/sdk/interface.py +339 -0
  8. lmnr/sdk/providers/__init__.py +2 -0
  9. lmnr/sdk/providers/base.py +28 -0
  10. lmnr/sdk/providers/fallback.py +131 -0
  11. lmnr/sdk/providers/openai.py +140 -0
  12. lmnr/sdk/providers/utils.py +33 -0
  13. lmnr/sdk/tracing_types.py +197 -0
  14. lmnr/sdk/types.py +69 -0
  15. lmnr/sdk/utils.py +102 -0
  16. lmnr-0.3.0.dist-info/METADATA +185 -0
  17. lmnr-0.3.0.dist-info/RECORD +21 -0
  18. lmnr/cli/__init__.py +0 -0
  19. lmnr/cli/__main__.py +0 -4
  20. lmnr/cli/cli.py +0 -230
  21. lmnr/cli/parser/__init__.py +0 -0
  22. lmnr/cli/parser/nodes/__init__.py +0 -45
  23. lmnr/cli/parser/nodes/code.py +0 -36
  24. lmnr/cli/parser/nodes/condition.py +0 -30
  25. lmnr/cli/parser/nodes/input.py +0 -25
  26. lmnr/cli/parser/nodes/json_extractor.py +0 -29
  27. lmnr/cli/parser/nodes/llm.py +0 -56
  28. lmnr/cli/parser/nodes/output.py +0 -27
  29. lmnr/cli/parser/nodes/router.py +0 -37
  30. lmnr/cli/parser/nodes/semantic_search.py +0 -53
  31. lmnr/cli/parser/nodes/types.py +0 -153
  32. lmnr/cli/parser/parser.py +0 -62
  33. lmnr/cli/parser/utils.py +0 -49
  34. lmnr/cli/zip.py +0 -16
  35. lmnr/sdk/endpoint.py +0 -186
  36. lmnr/sdk/registry.py +0 -29
  37. lmnr/sdk/remote_debugger.py +0 -148
  38. lmnr/types.py +0 -101
  39. lmnr-0.2.15.dist-info/METADATA +0 -187
  40. lmnr-0.2.15.dist-info/RECORD +0 -28
  41. {lmnr-0.2.15.dist-info → lmnr-0.3.0.dist-info}/LICENSE +0 -0
  42. {lmnr-0.2.15.dist-info → lmnr-0.3.0.dist-info}/WHEEL +0 -0
  43. {lmnr-0.2.15.dist-info → lmnr-0.3.0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,131 @@
1
+ from .base import Provider
2
+ from .utils import parse_or_dump_to_dict
3
+
4
+ from collections import defaultdict
5
+ from typing import Any, Optional, Union
6
+ import logging
7
+ import pydantic
8
+
9
+
10
+ class FallbackProvider(Provider):
11
+ logger = logging.getLogger("lmnr.sdk.tracing.providers.fallback")
12
+
13
+ def display_name(self) -> str:
14
+ return ""
15
+
16
+ def stream_list_to_dict(
17
+ self,
18
+ response: list[Union[dict[str, Any], str, pydantic.BaseModel]],
19
+ ) -> dict[str, Any]:
20
+ model = None
21
+ output_tokens = 0
22
+ outputs = defaultdict(lambda: defaultdict(str))
23
+ try:
24
+ for chunk in response:
25
+ chunk = parse_or_dump_to_dict(chunk)
26
+ if model is None:
27
+ model = chunk.get("model")
28
+ for i, choice in enumerate(chunk.get("choices", [])):
29
+ # TODO: not only content, but also tool_calls and refusal
30
+ # also handle roles
31
+ if choice["delta"] and isinstance(choice["delta"], dict):
32
+ for key in choice["delta"]:
33
+ if choice["delta"][key] is None:
34
+ if key not in outputs[i]:
35
+ outputs[i][key] = None
36
+ continue
37
+ outputs[i][key] += choice["delta"][key]
38
+ output_tokens += 1
39
+ except Exception as e:
40
+ self.logger.error(f"Error parsing streamming response: {e}")
41
+ pass
42
+
43
+ output_key_values = [
44
+ self._message_to_key_and_output(dict(outputs[i]))
45
+ for i in range(len(outputs))
46
+ ]
47
+ return {
48
+ "model": model,
49
+ "prompt_tokens": 0,
50
+ "usage": {
51
+ "prompt_tokens": 0,
52
+ "completion_tokens": output_tokens,
53
+ "total_tokens": output_tokens,
54
+ },
55
+ "choices": [
56
+ (
57
+ {
58
+ "message": {
59
+ output[0]: output[1],
60
+ "role": "assistant",
61
+ }
62
+ }
63
+ if output
64
+ else None
65
+ )
66
+ for output in output_key_values
67
+ ],
68
+ }
69
+
70
+ def extract_llm_attributes_from_response(
71
+ self, response: Union[str, dict[str, Any], pydantic.BaseModel]
72
+ ) -> dict[str, Any]:
73
+ obj = parse_or_dump_to_dict(response)
74
+
75
+ choices = obj.get("choices", [])
76
+ decisions = []
77
+ for choice in choices:
78
+ if choice.get("content"):
79
+ decisions.append("completion")
80
+ elif choice.get("refusal"):
81
+ decisions.append("refusal")
82
+ elif choice.get("tool_calls"):
83
+ decisions.append("tool_calls")
84
+ else:
85
+ decisions.append(None)
86
+
87
+ return {
88
+ "response_model": obj.get("model"),
89
+ "input_token_count": obj.get("usage", {}).get("prompt_tokens"),
90
+ "output_token_count": obj.get("usage", {}).get("completion_tokens"),
91
+ "total_token_count": obj.get("usage", {}).get("total_tokens"),
92
+ "decision": self._from_singleton_list(decisions),
93
+ }
94
+
95
+ def extract_llm_output(
96
+ self, result: Union[str, dict[str, Any], pydantic.BaseModel]
97
+ ) -> Any:
98
+ result = parse_or_dump_to_dict(result)
99
+ choices = result.get("choices")
100
+ if not choices:
101
+ return None
102
+ outputs = [choice.get("message") for choice in choices]
103
+
104
+ return self._from_singleton_list(outputs)
105
+
106
+ def extract_llm_attributes_from_args(
107
+ self, func_args: list[Any], func_kwargs: dict[str, Any]
108
+ ) -> dict[str, Any]:
109
+ return {
110
+ "request_model": func_kwargs.get("model"),
111
+ "temperature": func_kwargs.get("temperature"),
112
+ "stream": func_kwargs.get("stream", False),
113
+ }
114
+
115
+ def _message_to_key_and_output(
116
+ self, message: Union[dict[str, Any], pydantic.BaseModel]
117
+ ) -> Optional[tuple[str, str]]:
118
+ message = parse_or_dump_to_dict(message)
119
+
120
+ for key in ["content", "refusal", "tool_calls"]:
121
+ if message.get(key) is not None:
122
+ return (key, message[key])
123
+ return None
124
+
125
+ def _from_singleton_list(self, obj: Any) -> Any:
126
+ # OpenAI returns list of choices. This will have more than one item
127
+ # only if request parameter `n` is specified and is grate than 1.
128
+ # That's a rare case, so we return the [contents of the] choice alone if there is just one.
129
+ if isinstance(obj, list) and len(obj) == 1:
130
+ return obj[0]
131
+ return obj
@@ -0,0 +1,140 @@
1
+ from .base import Provider
2
+ from .utils import parse_or_dump_to_dict
3
+
4
+ from collections import defaultdict
5
+ from typing import Any, Optional, Union
6
+ import logging
7
+ import pydantic
8
+
9
+ from openai.types.chat.chat_completion import ChatCompletion
10
+ from openai.types.chat.chat_completion_message import ChatCompletionMessage
11
+ from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
12
+
13
+
14
+ class OpenAI(Provider):
15
+ logger = logging.getLogger("lmnr.sdk.tracing.providers.openai")
16
+
17
+ def display_name(self) -> str:
18
+ return "OpenAI"
19
+
20
+ def stream_list_to_dict(
21
+ self, response: list[Union[ChatCompletionChunk, str]]
22
+ ) -> dict[str, Any]:
23
+ model = None
24
+ finish_reasons = []
25
+ output_tokens = 0
26
+ outputs = defaultdict(lambda: defaultdict(str))
27
+ try:
28
+ for chunk in response:
29
+ chunk = parse_or_dump_to_dict(chunk)
30
+ if model is None:
31
+ model = chunk["model"]
32
+ finish_reasons = [
33
+ choice.get("finish_reason") for choice in chunk.get("choices", [])
34
+ ]
35
+ for i, choice in enumerate(chunk.get("choices", [])):
36
+ if choice["delta"] and isinstance(choice["delta"], dict):
37
+ for key in choice["delta"]:
38
+ if choice["delta"][key] is None:
39
+ if key not in outputs[i]:
40
+ outputs[i][key] = None
41
+ continue
42
+ outputs[i][key] += choice["delta"][key]
43
+ output_tokens += 1
44
+ except Exception as e:
45
+ self.logger.error(f"Error parsing streamming response: {e}")
46
+ pass
47
+
48
+ output_key_values = [
49
+ self._message_to_key_and_output(dict(outputs[i]))
50
+ for i in range(len(outputs))
51
+ ]
52
+ return {
53
+ "model": model,
54
+ "prompt_tokens": 0,
55
+ "usage": {
56
+ "prompt_tokens": 0,
57
+ "completion_tokens": output_tokens,
58
+ "total_tokens": output_tokens,
59
+ "finish_reason": self._from_singleton_list(finish_reasons),
60
+ },
61
+ "choices": [
62
+ (
63
+ {
64
+ "message": {
65
+ output[0]: output[1],
66
+ "role": "assistant",
67
+ }
68
+ }
69
+ if output
70
+ else None
71
+ )
72
+ for output in output_key_values
73
+ ],
74
+ }
75
+
76
+ def extract_llm_attributes_from_response(
77
+ self, response: Union[str, dict[str, Any], pydantic.BaseModel]
78
+ ) -> dict[str, Any]:
79
+ obj = parse_or_dump_to_dict(response)
80
+
81
+ choices = obj.get("choices", [])
82
+ decisions = []
83
+ for choice in choices:
84
+ # choice = parse_or_dump_to_dict(choice)
85
+ if choice.get("content"):
86
+ decisions.append("completion")
87
+ elif choice.get("refusal"):
88
+ decisions.append("refusal")
89
+ elif choice.get("tool_calls"):
90
+ decisions.append("tool_calls")
91
+ else:
92
+ decisions.append(None)
93
+
94
+ return {
95
+ "response_model": obj.get("model"),
96
+ "input_token_count": obj.get("usage", {}).get("prompt_tokens"),
97
+ "output_token_count": obj.get("usage", {}).get("completion_tokens"),
98
+ "total_token_count": obj.get("usage", {}).get("total_tokens"),
99
+ "finish_reason": obj.get("finish_reason"),
100
+ "decision": self._from_singleton_list(decisions),
101
+ }
102
+
103
+ def extract_llm_output(
104
+ self, result: Union[str, dict[str, Any], ChatCompletion]
105
+ ) -> Any:
106
+ result = parse_or_dump_to_dict(result)
107
+ choices = result.get("choices")
108
+ if not choices:
109
+ return None
110
+ outputs = [choice["message"] for choice in choices]
111
+
112
+ return self._from_singleton_list(outputs)
113
+
114
+ def extract_llm_attributes_from_args(
115
+ self, func_args: list[Any], func_kwargs: dict[str, Any]
116
+ ) -> dict[str, Any]:
117
+ return {
118
+ "request_model": func_kwargs.get("model"),
119
+ "temperature": func_kwargs.get("temperature"),
120
+ "top_p": func_kwargs.get("top_p"),
121
+ "stream": func_kwargs.get("stream", False),
122
+ }
123
+
124
+ def _message_to_key_and_output(
125
+ self, message: Union[dict[str, Any], ChatCompletionMessage]
126
+ ) -> Optional[tuple[str, str]]:
127
+ message = parse_or_dump_to_dict(message)
128
+
129
+ for key in ["content", "refusal", "tool_calls"]:
130
+ if message.get(key) is not None:
131
+ return (key, message[key])
132
+ return None
133
+
134
+ def _from_singleton_list(self, obj: Any) -> Any:
135
+ # OpenAI returns list of choices. This will have more than one item
136
+ # only if request parameter `n` is specified and is grate than 1.
137
+ # That's a rare case, so we return the [contents of the] choice alone if there is just one.
138
+ if isinstance(obj, list) and len(obj) == 1:
139
+ return obj[0]
140
+ return obj
@@ -0,0 +1,33 @@
1
+ import logging
2
+ import json
3
+ import pydantic
4
+ import typing
5
+
6
+ logger = logging.getLogger("lmnr.sdk.tracing.providers.utils")
7
+
8
+
9
+ def parse_or_dump_to_dict(
10
+ obj: typing.Union[pydantic.BaseModel, dict[str, typing.Any], str]
11
+ ) -> dict[str, typing.Any]:
12
+ if isinstance(obj, pydantic.BaseModel):
13
+ return obj.model_dump()
14
+ if isinstance(obj, str):
15
+ try:
16
+ parsed = json.loads(obj)
17
+ if isinstance(parsed, dict):
18
+ return parsed
19
+ else:
20
+ logging.warning(
21
+ f"Expected a dict, but got: {type(parsed)}. Returning empty dict."
22
+ )
23
+ return {}
24
+ except Exception as e:
25
+ logging.error(f"Error parsing string: {e}")
26
+ return {}
27
+
28
+ if isinstance(obj, dict):
29
+ return obj
30
+ logging.warning(
31
+ f"Expected a dict, BaseModel, or str, but got {type(obj)}. Returning empty dict."
32
+ )
33
+ return {}
@@ -0,0 +1,197 @@
1
+ from typing import Any, Literal, Optional, Union
2
+ import datetime
3
+ import pydantic
4
+ import uuid
5
+
6
+ from .constants import CURRENT_TRACING_VERSION
7
+ from .utils import to_dict
8
+
9
+
10
+ class EvaluateEvent(pydantic.BaseModel):
11
+ name: str
12
+ data: str
13
+
14
+
15
+ class Span(pydantic.BaseModel):
16
+ version: str = CURRENT_TRACING_VERSION
17
+ spanType: Literal["DEFAULT", "LLM"] = "DEFAULT"
18
+ id: uuid.UUID
19
+ traceId: uuid.UUID
20
+ parentSpanId: Optional[uuid.UUID] = None
21
+ name: str
22
+ # generated at start of span, so required
23
+ startTime: datetime.datetime
24
+ # generated at end of span, optional when span is still running
25
+ endTime: Optional[datetime.datetime] = None
26
+ attributes: dict[str, Any] = {}
27
+ input: Optional[Any] = None
28
+ output: Optional[Any] = None
29
+ metadata: Optional[dict[str, Any]] = None
30
+ evaluateEvents: list[EvaluateEvent] = []
31
+ events: list["Event"] = None
32
+
33
+ def __init__(
34
+ self,
35
+ name: str,
36
+ trace_id: uuid.UUID,
37
+ start_time: Optional[datetime.datetime] = None,
38
+ version: str = CURRENT_TRACING_VERSION,
39
+ span_type: Literal["DEFAULT", "LLM"] = "DEFAULT",
40
+ id: Optional[uuid.UUID] = None,
41
+ parent_span_id: Optional[uuid.UUID] = None,
42
+ input: Optional[Any] = None,
43
+ metadata: Optional[dict[str, Any]] = {},
44
+ attributes: Optional[dict[str, Any]] = {},
45
+ evaluate_events: list[EvaluateEvent] = [],
46
+ ):
47
+ super().__init__(
48
+ version=version,
49
+ spanType=span_type,
50
+ id=id or uuid.uuid4(),
51
+ traceId=trace_id,
52
+ parentSpanId=parent_span_id,
53
+ name=name,
54
+ startTime=start_time or datetime.datetime.now(datetime.timezone.utc),
55
+ input=input,
56
+ metadata=metadata or {},
57
+ attributes=attributes or {},
58
+ evaluateEvents=evaluate_events,
59
+ events=[],
60
+ )
61
+
62
+ def update(
63
+ self,
64
+ end_time: Optional[datetime.datetime],
65
+ output: Optional[Any] = None,
66
+ metadata: Optional[dict[str, Any]] = None,
67
+ attributes: Optional[dict[str, Any]] = None,
68
+ evaluate_events: Optional[list[EvaluateEvent]] = None,
69
+ override: bool = False,
70
+ ):
71
+ self.endTime = end_time or datetime.datetime.now(datetime.timezone.utc)
72
+ self.output = output
73
+ new_metadata = (
74
+ metadata if override else {**(self.metadata or {}), **(metadata or {})}
75
+ )
76
+ new_attributes = (
77
+ attributes or {}
78
+ if override
79
+ else {**(self.attributes or {}), **(attributes or {})}
80
+ )
81
+ new_evaluate_events = (
82
+ evaluate_events or []
83
+ if override
84
+ else self.evaluateEvents + (evaluate_events or [])
85
+ )
86
+ self.metadata = new_metadata
87
+ self.attributes = new_attributes
88
+ self.evaluateEvents = new_evaluate_events
89
+
90
+ def add_event(self, event: "Event"):
91
+ self.events.append(event)
92
+
93
+ def to_dict(self) -> dict[str, Any]:
94
+ try:
95
+ obj = self.model_dump()
96
+ except TypeError:
97
+ # if inner values are pydantic models, we need to call model_dump on them
98
+ # see: https://github.com/pydantic/pydantic/issues/7713
99
+ obj = {}
100
+ for key, value in self.__dict__.items():
101
+ obj[key] = (
102
+ value.model_dump()
103
+ if isinstance(value, pydantic.BaseModel)
104
+ else value
105
+ )
106
+ obj = to_dict(obj)
107
+ return obj
108
+
109
+
110
+ class Trace(pydantic.BaseModel):
111
+ id: uuid.UUID
112
+ version: str = CURRENT_TRACING_VERSION
113
+ success: bool = True
114
+ startTime: Optional[datetime.datetime] = None
115
+ endTime: Optional[datetime.datetime] = None
116
+ userId: Optional[str] = None # provided by user or null
117
+ sessionId: Optional[str] = None # provided by user or uuid()
118
+ release: Optional[str] = None
119
+ metadata: Optional[dict[str, Any]] = None
120
+
121
+ def __init__(
122
+ self,
123
+ success: bool = True,
124
+ start_time: Optional[datetime.datetime] = None,
125
+ end_time: Optional[datetime.datetime] = None,
126
+ id: Optional[uuid.UUID] = None,
127
+ user_id: Optional[str] = None,
128
+ session_id: Optional[str] = None,
129
+ release: Optional[str] = None,
130
+ metadata: Optional[dict[str, Any]] = None,
131
+ ):
132
+ id_ = id or uuid.uuid4()
133
+ super().__init__(
134
+ id=id_,
135
+ startTime=start_time,
136
+ success=success,
137
+ endTime=end_time,
138
+ userId=user_id,
139
+ sessionId=session_id,
140
+ release=release,
141
+ metadata=metadata or {},
142
+ )
143
+
144
+ def to_dict(self) -> dict[str, Any]:
145
+ try:
146
+ obj = self.model_dump()
147
+ except TypeError:
148
+ # if inner values are pydantic models, we need to call model_dump on them
149
+ # see: https://github.com/pydantic/pydantic/issues/7713
150
+ obj = {}
151
+ for key, value in self.__dict__.items():
152
+ obj[key] = (
153
+ value.model_dump()
154
+ if isinstance(value, pydantic.BaseModel)
155
+ else value
156
+ )
157
+ obj = to_dict(obj)
158
+ return obj
159
+
160
+
161
+ class Event(pydantic.BaseModel):
162
+ id: uuid.UUID
163
+ templateName: str
164
+ timestamp: datetime.datetime
165
+ spanId: uuid.UUID
166
+ value: Optional[Union[int, str]] = None
167
+
168
+ def __init__(
169
+ self,
170
+ name: str,
171
+ span_id: uuid.UUID,
172
+ timestamp: Optional[datetime.datetime] = None,
173
+ value: Optional[Union[int, str]] = None,
174
+ ):
175
+ super().__init__(
176
+ id=uuid.uuid4(),
177
+ templateName=name,
178
+ spanId=span_id,
179
+ timestamp=timestamp or datetime.datetime.now(datetime.timezone.utc),
180
+ value=value,
181
+ )
182
+
183
+ def to_dict(self) -> dict[str, Any]:
184
+ try:
185
+ obj = self.model_dump()
186
+ except TypeError:
187
+ # if inner values are pydantic models, we need to call model_dump on them
188
+ # see: https://github.com/pydantic/pydantic/issues/7713
189
+ obj = {}
190
+ for key, value in self.__dict__.items():
191
+ obj[key] = (
192
+ value.model_dump()
193
+ if isinstance(value, pydantic.BaseModel)
194
+ else value
195
+ )
196
+ obj = to_dict(obj)
197
+ return obj
lmnr/sdk/types.py ADDED
@@ -0,0 +1,69 @@
1
+ import requests
2
+ import pydantic
3
+ import uuid
4
+ from typing import Optional, Union
5
+
6
+
7
+ class ChatMessage(pydantic.BaseModel):
8
+ role: str
9
+ content: str
10
+
11
+
12
+ class ConditionedValue(pydantic.BaseModel):
13
+ condition: str
14
+ value: "NodeInput"
15
+
16
+
17
+ NodeInput = Union[str, list[ChatMessage], ConditionedValue] # TypeAlias
18
+
19
+
20
+ class PipelineRunRequest(pydantic.BaseModel):
21
+ inputs: dict[str, NodeInput]
22
+ pipeline: str
23
+ env: dict[str, str] = pydantic.Field(default_factory=dict)
24
+ metadata: dict[str, str] = pydantic.Field(default_factory=dict)
25
+ stream: bool = pydantic.Field(default=False)
26
+ parent_span_id: Optional[uuid.UUID] = pydantic.Field(default=None)
27
+ trace_id: Optional[uuid.UUID] = pydantic.Field(default=None)
28
+
29
+ # uuid is not serializable by default, so we need to convert it to a string
30
+ def to_dict(self):
31
+ return {
32
+ "inputs": {
33
+ k: v.model_dump() if isinstance(v, pydantic.BaseModel) else v
34
+ for k, v in self.inputs.items()
35
+ },
36
+ "pipeline": self.pipeline,
37
+ "env": self.env,
38
+ "metadata": self.metadata,
39
+ "stream": self.stream,
40
+ "parentSpanId": str(self.parent_span_id) if self.parent_span_id else None,
41
+ "traceId": str(self.trace_id) if self.trace_id else None,
42
+ }
43
+
44
+
45
+ class PipelineRunResponse(pydantic.BaseModel):
46
+ outputs: dict[str, dict[str, NodeInput]]
47
+ run_id: str
48
+
49
+
50
+ class PipelineRunError(Exception):
51
+ error_code: str
52
+ error_message: str
53
+
54
+ def __init__(self, response: requests.Response):
55
+ try:
56
+ resp_json = response.json()
57
+ self.error_code = resp_json["error_code"]
58
+ self.error_message = resp_json["error_message"]
59
+ super().__init__(self.error_message)
60
+ except Exception:
61
+ super().__init__(response.text)
62
+
63
+ def __str__(self) -> str:
64
+ try:
65
+ return str(
66
+ {"error_code": self.error_code, "error_message": self.error_message}
67
+ )
68
+ except Exception:
69
+ return super().__str__()
lmnr/sdk/utils.py ADDED
@@ -0,0 +1,102 @@
1
+ import asyncio
2
+ import copy
3
+ import datetime
4
+ import dataclasses
5
+ import enum
6
+ import inspect
7
+ import pydantic
8
+ import queue
9
+ import typing
10
+ import uuid
11
+
12
+ from .providers import Provider, OpenAI
13
+
14
+
15
+ def is_method(func: typing.Callable) -> bool:
16
+ # inspect.ismethod is True for bound methods only, but in the decorator,
17
+ # the method is not bound yet, so we need to check if the first parameter
18
+ # is either 'self' or 'cls'. This only relies on naming conventions
19
+
20
+ # `signature._parameters` is an OrderedDict,
21
+ # so the order of insertion is preserved
22
+ params = list(inspect.signature(func).parameters)
23
+ return len(params) > 0 and params[0] in ["self", "cls"]
24
+
25
+
26
+ def is_async(func: typing.Callable) -> bool:
27
+ # `__wrapped__` is set automatically by `functools.wraps` and `functools.update_wrapper`
28
+ # so we can use it to get the original function
29
+ while hasattr(func, "__wrapped__"):
30
+ func = func.__wrapped__
31
+
32
+ # Check if the function is asynchronous
33
+ if asyncio.iscoroutinefunction(func):
34
+ return True
35
+
36
+ # Fallback: check if the function's code object contains 'async'. This is for
37
+ # cases when the decorator did not properly use `functools.wraps` or `functools.update_wrapper`
38
+ CO_COROUTINE = inspect.CO_COROUTINE
39
+ return (func.__code__.co_flags & CO_COROUTINE) != 0
40
+
41
+
42
+ def is_async_iterator(o: typing.Any) -> bool:
43
+ return hasattr(o, "__aiter__") and hasattr(o, "__anext__")
44
+
45
+
46
+ def is_iterator(o: typing.Any) -> bool:
47
+ return hasattr(o, "__iter__") and hasattr(o, "__next__")
48
+
49
+
50
+ def to_dict(obj: typing.Any) -> dict[str, typing.Any]:
51
+ def to_dict_inner(o: typing.Any):
52
+ if isinstance(o, (datetime.datetime, datetime.date)):
53
+ return o.strftime("%Y-%m-%dT%H:%M:%S.%f%z")
54
+ elif o is None:
55
+ return None
56
+ elif isinstance(o, (int, float, str, bool)):
57
+ return o
58
+ elif isinstance(o, uuid.UUID):
59
+ return str(o) # same as in return, but explicit
60
+ elif isinstance(o, enum.Enum):
61
+ return o.value
62
+ elif dataclasses.is_dataclass(o):
63
+ return dataclasses.asdict(o)
64
+ elif isinstance(o, bytes):
65
+ return o.decode("utf-8")
66
+ elif isinstance(o, pydantic.BaseModel):
67
+ return o.model_dump()
68
+ elif isinstance(o, (tuple, set, frozenset)):
69
+ return [to_dict_inner(item) for item in o]
70
+ elif isinstance(o, list):
71
+ return [to_dict_inner(item) for item in o]
72
+ elif isinstance(o, dict):
73
+ return {to_dict_inner(k): to_dict_inner(v) for k, v in o.items()}
74
+ elif isinstance(o, queue.Queue):
75
+ return type(o).__name__
76
+
77
+ return str(o)
78
+
79
+ for key in obj.keys():
80
+ obj[key] = to_dict_inner(obj[key])
81
+
82
+ return obj
83
+
84
+
85
+ def get_input_from_func_args(
86
+ func: typing.Callable,
87
+ is_method: bool = False,
88
+ func_args: list[typing.Any] = [],
89
+ func_kwargs: dict[str, typing.Any] = {},
90
+ ) -> dict[str, typing.Any]:
91
+ # Remove implicitly passed "self" or "cls" argument for instance or class methods
92
+ res = copy.deepcopy(func_kwargs)
93
+ for i, k in enumerate(inspect.signature(func).parameters.keys()):
94
+ if is_method and k in ["self", "cls"]:
95
+ continue
96
+ res[k] = func_args[i]
97
+ return res
98
+
99
+
100
+ PROVIDER_NAME_TO_OBJECT: dict[str, Provider] = {
101
+ "openai": OpenAI(),
102
+ }