uipath-langchain 0.0.142__py3-none-any.whl → 0.0.143__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of uipath-langchain might be problematic. Click here for more details.

@@ -12,8 +12,9 @@ from uipath._cli._runtime._contracts import (
12
12
  UiPathRuntimeFactory,
13
13
  )
14
14
  from uipath._cli.middlewares import MiddlewareResult
15
+ from uipath.tracing import LlmOpsHttpExporter
15
16
 
16
- from .._tracing import LangChainExporter, _instrument_traceable_attributes
17
+ from .._tracing import _instrument_traceable_attributes
17
18
  from ._runtime._exception import LangGraphRuntimeError
18
19
  from ._runtime._runtime import ( # type: ignore[attr-defined]
19
20
  LangGraphRuntimeContext,
@@ -61,7 +62,9 @@ def langgraph_debug_middleware(
61
62
  )
62
63
 
63
64
  if context.job_id:
64
- runtime_factory.add_span_exporter(LangChainExporter())
65
+ runtime_factory.add_span_exporter(
66
+ LlmOpsHttpExporter(extra_process_spans=True)
67
+ )
65
68
 
66
69
  runtime_factory.add_instrumentor(LangChainInstrumentor, get_current_span)
67
70
 
@@ -15,12 +15,12 @@ from uipath._cli._utils._eval_set import EvalHelpers
15
15
  from uipath._cli.middlewares import MiddlewareResult
16
16
  from uipath._events._event_bus import EventBus
17
17
  from uipath.eval._helpers import auto_discover_entrypoint
18
+ from uipath.tracing import LlmOpsHttpExporter
18
19
 
19
20
  from uipath_langchain._cli._runtime._context import LangGraphRuntimeContext
20
21
  from uipath_langchain._cli._runtime._runtime import LangGraphScriptRuntime
21
22
  from uipath_langchain._cli._utils._graph import LangGraphConfig
22
23
  from uipath_langchain._tracing import (
23
- LangChainExporter,
24
24
  _instrument_traceable_attributes,
25
25
  )
26
26
 
@@ -41,7 +41,7 @@ def langgraph_eval_middleware(
41
41
 
42
42
  if kwargs.get("register_progress_reporter", False):
43
43
  progress_reporter = StudioWebProgressReporter(
44
- spans_exporter=LangChainExporter()
44
+ spans_exporter=LlmOpsHttpExporter(extra_process_spans=True)
45
45
  )
46
46
  asyncio.run(progress_reporter.subscribe_to_eval_runtime_events(event_bus))
47
47
  console_reporter = ConsoleProgressReporter()
@@ -76,7 +76,9 @@ def langgraph_eval_middleware(
76
76
  )
77
77
 
78
78
  if eval_context.job_id:
79
- runtime_factory.add_span_exporter(LangChainExporter())
79
+ runtime_factory.add_span_exporter(
80
+ LlmOpsHttpExporter(extra_process_spans=True)
81
+ )
80
82
 
81
83
  runtime_factory.add_instrumentor(LangChainInstrumentor, get_current_span)
82
84
 
@@ -13,8 +13,11 @@ from uipath._cli._runtime._contracts import (
13
13
  )
14
14
  from uipath._cli.middlewares import MiddlewareResult
15
15
  from uipath._events._events import UiPathAgentStateEvent
16
+ from uipath.tracing import JsonLinesFileExporter, LlmOpsHttpExporter
16
17
 
17
- from .._tracing import LangChainExporter, _instrument_traceable_attributes
18
+ from .._tracing import (
19
+ _instrument_traceable_attributes,
20
+ )
18
21
  from ._runtime._exception import LangGraphRuntimeError
19
22
  from ._runtime._runtime import ( # type: ignore[attr-defined]
20
23
  LangGraphRuntimeContext,
@@ -24,7 +27,11 @@ from ._utils._graph import LangGraphConfig
24
27
 
25
28
 
26
29
  def langgraph_run_middleware(
27
- entrypoint: Optional[str], input: Optional[str], resume: bool, **kwargs
30
+ entrypoint: Optional[str],
31
+ input: Optional[str],
32
+ resume: bool,
33
+ trace_file: Optional[str] = None,
34
+ **kwargs,
28
35
  ) -> MiddlewareResult:
29
36
  """Middleware to handle LangGraph execution"""
30
37
  config = LangGraphConfig()
@@ -61,8 +68,13 @@ def langgraph_run_middleware(
61
68
 
62
69
  runtime_factory.add_instrumentor(LangChainInstrumentor, get_current_span)
63
70
 
71
+ if trace_file:
72
+ runtime_factory.add_span_exporter(JsonLinesFileExporter(trace_file))
73
+
64
74
  if context.job_id:
65
- runtime_factory.add_span_exporter(LangChainExporter())
75
+ runtime_factory.add_span_exporter(
76
+ LlmOpsHttpExporter(extra_process_spans=True)
77
+ )
66
78
  await runtime_factory.execute(context)
67
79
  else:
68
80
  debug_bridge: UiPathDebugBridge = ConsoleDebugBridge()
@@ -1,4 +1,5 @@
1
1
  from ._instrument_traceable import _instrument_traceable_attributes
2
- from ._oteladapter import LangChainExporter
3
2
 
4
- __all__ = ["LangChainExporter", "_instrument_traceable_attributes"]
3
+ __all__ = [
4
+ "_instrument_traceable_attributes",
5
+ ]
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: uipath-langchain
3
- Version: 0.0.142
3
+ Version: 0.0.143
4
4
  Summary: UiPath Langchain
5
5
  Project-URL: Homepage, https://uipath.com
6
6
  Project-URL: Repository, https://github.com/UiPath/uipath-langchain-python
@@ -26,7 +26,7 @@ Requires-Dist: openai>=1.65.5
26
26
  Requires-Dist: openinference-instrumentation-langchain>=0.1.50
27
27
  Requires-Dist: pydantic-settings>=2.6.0
28
28
  Requires-Dist: python-dotenv>=1.0.1
29
- Requires-Dist: uipath<2.2.0,>=2.1.101
29
+ Requires-Dist: uipath<2.2.0,>=2.1.103
30
30
  Provides-Extra: langchain
31
31
  Description-Content-Type: text/markdown
32
32
 
@@ -2,12 +2,12 @@ uipath_langchain/__init__.py,sha256=VBrvQn7d3nuOdN7zEnV2_S-uhmkjgEIlXiFVeZxZakQ,
2
2
  uipath_langchain/middlewares.py,sha256=x3U_tmDIyMXPLzq6n-oNRAnpAF6pKa9wfkPYwE-oUfo,848
3
3
  uipath_langchain/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  uipath_langchain/_cli/__init__.py,sha256=juqd9PbXs4yg45zMJ7BHAOPQjb7sgEbWE9InBtGZhfo,24
5
- uipath_langchain/_cli/cli_debug.py,sha256=RQqBVne3OdXYPvgl5f7kIrV5zztA8W8LMXWex7oOQeU,3221
5
+ uipath_langchain/_cli/cli_debug.py,sha256=zaB-W3_29FsCqF-YZ3EsayyxC957tg4tOjdcdX8ew-M,3311
6
6
  uipath_langchain/_cli/cli_dev.py,sha256=l3XFHrh-0OUFJq3zLMKuzedJAluGQBIZQTHP1KWOmpw,1725
7
- uipath_langchain/_cli/cli_eval.py,sha256=r8mGlKh-ymxfKrvrU4n0Hg3pQv36c_NhTNR_eokyQEM,3650
7
+ uipath_langchain/_cli/cli_eval.py,sha256=yzxOz-JOMMl1fejZNVQYlBSo-yUIxArtfp2EW1Ow6j4,3753
8
8
  uipath_langchain/_cli/cli_init.py,sha256=B-Ht1lz4HNlpYELZU7DLNhSrhGJbsaCdU9UMO2iHUgM,12654
9
9
  uipath_langchain/_cli/cli_new.py,sha256=KKLxCzz7cDQ__rRr_a496IHWlSQXhmrBNgmKHnXAnTY,2336
10
- uipath_langchain/_cli/cli_run.py,sha256=-k7QsfHWRQUAvVB6l533SCC9Xcxm7NSWq_oR0iy6THY,3426
10
+ uipath_langchain/_cli/cli_run.py,sha256=DIsAKsbQ8gTRz44q9ZV3jBjrbM8bhS6lEQ3dd4joDFU,3712
11
11
  uipath_langchain/_cli/_runtime/_context.py,sha256=mjmGEogKiO8tUV878BgV9rFIeA9MCmEH6hgs5W_dm4g,328
12
12
  uipath_langchain/_cli/_runtime/_conversation.py,sha256=ayghRqhyLeVUZg1WHnpeOYtPNhRwDOl4z8OSYiJkWSU,11529
13
13
  uipath_langchain/_cli/_runtime/_exception.py,sha256=USKkLYkG-dzjX3fEiMMOHnVUpiXJs_xF0OQXCCOvbYM,546
@@ -20,9 +20,8 @@ uipath_langchain/_cli/_templates/main.py.template,sha256=GpSblGH2hwS9ibqQmX2iB2n
20
20
  uipath_langchain/_cli/_utils/_graph.py,sha256=nMJWy8FmaD9rqPUY2lHc5uVpUzbXD1RO12uJnhe0kdo,6803
21
21
  uipath_langchain/_resources/AGENTS.md,sha256=5VmIfaQ6H91VxInnxFmJklURXeWIIQpGQTYBEmvvoVA,1060
22
22
  uipath_langchain/_resources/REQUIRED_STRUCTURE.md,sha256=BRmWWFtM0qNXj5uumALVxq9h6pifJDGh5NzuyctuH1Q,2569
23
- uipath_langchain/_tracing/__init__.py,sha256=UqrLc_WimpzKY82M0LJsgJ-HFQUQFjOmOlD1XQ8V-R4,181
23
+ uipath_langchain/_tracing/__init__.py,sha256=C2dRvQ2ynxCmyICgE-rJHimWKEcFRME_o9gfX84Mb3Y,123
24
24
  uipath_langchain/_tracing/_instrument_traceable.py,sha256=8f9FyAKWE6kH1N8ErbpwqZHAzNjGwbLjQn7jdX5yAgA,4343
25
- uipath_langchain/_tracing/_oteladapter.py,sha256=PD0gsC39ZNvrm0gsfnt1ti6DEy56sBA9sIoxaAbHFFM,8887
26
25
  uipath_langchain/_tracing/_utils.py,sha256=r_fiSk3HDDAcePY_UbbEYiSbNqzn5gFeMPYBDvGrFx0,902
27
26
  uipath_langchain/_utils/__init__.py,sha256=-w-4TD9ZnJDCpj4VIPXhJciukrmDJJbmnOFnhAkAaEU,81
28
27
  uipath_langchain/_utils/_request_mixin.py,sha256=sYvvn3_fUJxtF893xFpVGwJx2YoEbw1m5gp_U_lWjR8,20092
@@ -38,8 +37,8 @@ uipath_langchain/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3
38
37
  uipath_langchain/tools/preconfigured.py,sha256=SyvrLrM1kezZxVVytgScVO8nBfVYfFGobWjY7erzsYU,7490
39
38
  uipath_langchain/vectorstores/__init__.py,sha256=w8qs1P548ud1aIcVA_QhBgf_jZDrRMK5Lono78yA8cs,114
40
39
  uipath_langchain/vectorstores/context_grounding_vectorstore.py,sha256=TncIXG-YsUlO0R5ZYzWsM-Dj1SVCZbzmo2LraVxXelc,9559
41
- uipath_langchain-0.0.142.dist-info/METADATA,sha256=tvJ8KYbN0xmVw3LNm-y7LfazY0wBVPTCswXlNkulGmw,4276
42
- uipath_langchain-0.0.142.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
43
- uipath_langchain-0.0.142.dist-info/entry_points.txt,sha256=FUtzqGOEntlJKMJIXhQUfT7ZTbQmGhke1iCmDWZaQZI,81
44
- uipath_langchain-0.0.142.dist-info/licenses/LICENSE,sha256=JDpt-uotAkHFmxpwxi6gwx6HQ25e-lG4U_Gzcvgp7JY,1063
45
- uipath_langchain-0.0.142.dist-info/RECORD,,
40
+ uipath_langchain-0.0.143.dist-info/METADATA,sha256=lh7djtB3vKXf2xB6b8BT9IksQGlun2fj2UiNFIver-E,4276
41
+ uipath_langchain-0.0.143.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
42
+ uipath_langchain-0.0.143.dist-info/entry_points.txt,sha256=FUtzqGOEntlJKMJIXhQUfT7ZTbQmGhke1iCmDWZaQZI,81
43
+ uipath_langchain-0.0.143.dist-info/licenses/LICENSE,sha256=JDpt-uotAkHFmxpwxi6gwx6HQ25e-lG4U_Gzcvgp7JY,1063
44
+ uipath_langchain-0.0.143.dist-info/RECORD,,
@@ -1,234 +0,0 @@
1
- import json
2
- import logging
3
- from typing import Any, Dict, List, Optional
4
-
5
- from opentelemetry.sdk.trace.export import SpanExportResult
6
- from uipath.tracing import LlmOpsHttpExporter
7
-
8
- logger = logging.getLogger(__name__)
9
-
10
-
11
- def _safe_parse_json(s: Any) -> Any:
12
- """Safely parse a JSON string, returning the original if not a string or on error."""
13
- if not isinstance(s, str):
14
- return s
15
- try:
16
- return json.loads(s)
17
- except (json.JSONDecodeError, TypeError):
18
- return s
19
-
20
-
21
- def _get_llm_messages(attributes: Dict[str, Any], prefix: str) -> List[Dict[str, Any]]:
22
- """Extracts and reconstructs LLM messages from flattened attributes."""
23
- messages: dict[int, dict[str, Any]] = {}
24
- message_prefix = f"{prefix}."
25
-
26
- for key, value in attributes.items():
27
- if key.startswith(message_prefix):
28
- parts = key[len(message_prefix) :].split(".")
29
- if len(parts) >= 2 and parts[0].isdigit():
30
- index = int(parts[0])
31
- if index not in messages:
32
- messages[index] = {}
33
- current: Any = messages[index]
34
-
35
- for i, part in enumerate(parts[1:-1]):
36
- key_part: str | int = part
37
- if part.isdigit() and (
38
- i + 2 < len(parts) and parts[i + 2].isdigit()
39
- ):
40
- key_part = int(part)
41
-
42
- if isinstance(current, dict):
43
- if key_part not in current:
44
- current[key_part] = {}
45
- current = current[key_part]
46
- elif isinstance(current, list) and isinstance(key_part, int):
47
- if key_part >= len(current):
48
- current.append({})
49
- current = current[key_part]
50
-
51
- current[parts[-1]] = value
52
-
53
- # Convert dict to list, ordered by index
54
- return [messages[i] for i in sorted(messages.keys())]
55
-
56
-
57
- class LangChainExporter(LlmOpsHttpExporter):
58
- # Mapping of old attribute names to new attribute names or (new name, function)
59
- ATTRIBUTE_MAPPING: dict[str, str | tuple[str, Any]] = {
60
- "input.value": ("input", _safe_parse_json),
61
- "output.value": ("output", _safe_parse_json),
62
- "llm.model_name": "model",
63
- }
64
-
65
- # Mapping of span types
66
- SPAN_TYPE_MAPPING: dict[str, str] = {
67
- "LLM": "completion",
68
- "TOOL": "toolCall",
69
- # Add more mappings as needed
70
- }
71
-
72
- class Status:
73
- SUCCESS = 1
74
- ERROR = 2
75
- INTERRUPTED = 3
76
-
77
- def __init__(self, *args: Any, **kwargs: Any) -> None:
78
- super().__init__(*args, **kwargs)
79
-
80
- def _map_llm_call_attributes(self, attributes: Dict[str, Any]) -> Dict[str, Any]:
81
- """Maps attributes for LLM calls, handling flattened keys."""
82
- result = attributes.copy() # Keep original attributes including basic mappings
83
-
84
- # Token Usage
85
- token_keys = {
86
- "llm.token_count.prompt": "promptTokens",
87
- "llm.token_count.completion": "completionTokens",
88
- "llm.token_count.total": "totalTokens",
89
- }
90
- usage = {
91
- new_key: attributes.get(old_key)
92
- for old_key, new_key in token_keys.items()
93
- if old_key in attributes
94
- }
95
- if usage:
96
- result["usage"] = usage
97
-
98
- # Input/Output Messages
99
- result["input"] = _get_llm_messages(attributes, "llm.input_messages")
100
- output_messages = _get_llm_messages(attributes, "llm.output_messages")
101
- result["output"] = output_messages
102
-
103
- # Invocation Parameters
104
- invocation_params = _safe_parse_json(
105
- attributes.get("llm.invocation_parameters", "{}")
106
- )
107
- if isinstance(invocation_params, dict):
108
- result["model"] = invocation_params.get("model", result.get("model"))
109
- settings: dict[str, Any] = {}
110
- if "max_tokens" in invocation_params:
111
- settings["maxTokens"] = invocation_params["max_tokens"]
112
- if "temperature" in invocation_params:
113
- settings["temperature"] = invocation_params["temperature"]
114
- if settings:
115
- result["settings"] = settings
116
-
117
- # Tool Calls
118
- tool_calls: list[dict[str, Any]] = []
119
- for msg in output_messages:
120
- # Ensure msg is a dictionary before proceeding
121
- if not isinstance(msg, dict):
122
- continue
123
- msg_tool_calls = msg.get("message", {}).get("tool_calls", [])
124
-
125
- # Ensure msg_tool_calls is a list
126
- if not isinstance(msg_tool_calls, list):
127
- continue
128
-
129
- for tc in msg_tool_calls:
130
- if not isinstance(tc, dict):
131
- continue
132
- tool_call_data = tc.get("tool_call", {})
133
- if not isinstance(tool_call_data, dict):
134
- continue
135
- tool_calls.append(
136
- {
137
- "id": tool_call_data.get("id"),
138
- "name": tool_call_data.get("function", {}).get("name"),
139
- "arguments": _safe_parse_json(
140
- tool_call_data.get("function", {}).get("arguments", "{}")
141
- ),
142
- }
143
- )
144
- if tool_calls:
145
- result["toolCalls"] = tool_calls
146
-
147
- return result
148
-
149
- def _map_tool_call_attributes(self, attributes: Dict[str, Any]) -> Dict[str, Any]:
150
- """Maps attributes for tool calls."""
151
- result = attributes.copy() # Keep original attributes
152
-
153
- result["type"] = "toolCall"
154
- result["callId"] = attributes.get("call_id") or attributes.get("id")
155
- result["toolName"] = attributes.get("tool.name")
156
- result["arguments"] = _safe_parse_json(
157
- attributes.get("input", attributes.get("input.value", "{}"))
158
- )
159
- result["toolType"] = "Integration"
160
- result["result"] = _safe_parse_json(
161
- attributes.get("output", attributes.get("output.value"))
162
- )
163
- result["error"] = None
164
-
165
- return result
166
-
167
- def _determine_status(self, error: Optional[str]) -> int:
168
- if error:
169
- if error and error.startswith("GraphInterrupt("):
170
- return self.Status.INTERRUPTED
171
- return self.Status.ERROR
172
- return self.Status.SUCCESS
173
-
174
- def _process_span_attributes(self, span_data: Dict[str, Any]) -> Dict[str, Any]:
175
- """Extracts, transforms, and maps attributes for a span."""
176
- if "Attributes" not in span_data:
177
- return span_data
178
-
179
- attributes_val = span_data["Attributes"]
180
- if isinstance(attributes_val, str):
181
- try:
182
- attributes: Dict[str, Any] = json.loads(attributes_val)
183
- except json.JSONDecodeError as e:
184
- logger.warning(f"Failed to parse attributes JSON: {e}")
185
- return span_data
186
- elif isinstance(attributes_val, dict):
187
- attributes = attributes_val
188
- else:
189
- return span_data
190
-
191
- # Determine SpanType
192
- if "openinference.span.kind" in attributes:
193
- span_type = attributes["openinference.span.kind"]
194
- span_data["SpanType"] = self.SPAN_TYPE_MAPPING.get(span_type, span_type)
195
-
196
- # Apply basic attribute mapping
197
- for old_key, mapping in self.ATTRIBUTE_MAPPING.items():
198
- if old_key in attributes:
199
- if isinstance(mapping, tuple):
200
- new_key, func = mapping
201
- attributes[new_key] = func(attributes[old_key])
202
- else:
203
- new_key = mapping
204
- attributes[new_key] = attributes[old_key]
205
-
206
- # Apply detailed mapping based on SpanType
207
- span_type = span_data.get("SpanType")
208
- if span_type == "completion":
209
- processed_attributes = self._map_llm_call_attributes(attributes)
210
- elif span_type == "toolCall":
211
- processed_attributes = self._map_tool_call_attributes(attributes)
212
- else:
213
- processed_attributes = attributes.copy()
214
-
215
- span_data["Attributes"] = json.dumps(processed_attributes)
216
-
217
- # Determine status based on error information
218
- error = attributes.get("error") or attributes.get("exception.message")
219
- status = self._determine_status(error)
220
- span_data["Status"] = status
221
-
222
- return span_data
223
-
224
- def _send_with_retries(
225
- self, url: str, payload: List[Dict[str, Any]], max_retries: int = 4
226
- ) -> SpanExportResult:
227
- # Transform attributes in each span's payload before sending
228
- transformed_payload = [self._process_span_attributes(span) for span in payload]
229
-
230
- return super()._send_with_retries(
231
- url=url,
232
- payload=transformed_payload,
233
- max_retries=max_retries,
234
- )