uipath-langchain 0.0.142__py3-none-any.whl → 0.0.144__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of uipath-langchain might be problematic. Click here for more details.

Files changed (35) hide show
  1. uipath_langchain/_cli/_runtime/_exception.py +31 -5
  2. uipath_langchain/_cli/_runtime/_graph_resolver.py +10 -12
  3. uipath_langchain/_cli/_runtime/_input.py +2 -2
  4. uipath_langchain/_cli/_runtime/_output.py +5 -8
  5. uipath_langchain/_cli/_runtime/_runtime.py +12 -6
  6. uipath_langchain/_cli/cli_debug.py +5 -2
  7. uipath_langchain/_cli/cli_eval.py +5 -3
  8. uipath_langchain/_cli/cli_run.py +15 -3
  9. uipath_langchain/_tracing/__init__.py +3 -2
  10. uipath_langchain/_utils/_request_mixin.py +5 -2
  11. uipath_langchain/agent/react/__init__.py +12 -0
  12. uipath_langchain/agent/react/agent.py +76 -0
  13. uipath_langchain/agent/react/constants.py +2 -0
  14. uipath_langchain/agent/react/exceptions.py +11 -0
  15. uipath_langchain/agent/react/init_node.py +16 -0
  16. uipath_langchain/agent/react/llm_node.py +44 -0
  17. uipath_langchain/agent/react/router.py +97 -0
  18. uipath_langchain/agent/react/state.py +18 -0
  19. uipath_langchain/agent/react/terminate_node.py +53 -0
  20. uipath_langchain/agent/react/tools/__init__.py +7 -0
  21. uipath_langchain/agent/react/tools/tools.py +50 -0
  22. uipath_langchain/agent/react/utils.py +39 -0
  23. uipath_langchain/agent/tools/__init__.py +8 -0
  24. uipath_langchain/agent/tools/context_tool.py +42 -0
  25. uipath_langchain/agent/tools/process_tool.py +51 -0
  26. uipath_langchain/agent/tools/tool_factory.py +39 -0
  27. uipath_langchain/agent/tools/tool_node.py +22 -0
  28. uipath_langchain/agent/tools/utils.py +11 -0
  29. {uipath_langchain-0.0.142.dist-info → uipath_langchain-0.0.144.dist-info}/METADATA +2 -2
  30. uipath_langchain-0.0.144.dist-info/RECORD +62 -0
  31. uipath_langchain/_tracing/_oteladapter.py +0 -234
  32. uipath_langchain-0.0.142.dist-info/RECORD +0 -45
  33. {uipath_langchain-0.0.142.dist-info → uipath_langchain-0.0.144.dist-info}/WHEEL +0 -0
  34. {uipath_langchain-0.0.142.dist-info → uipath_langchain-0.0.144.dist-info}/entry_points.txt +0 -0
  35. {uipath_langchain-0.0.142.dist-info → uipath_langchain-0.0.144.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,18 @@
1
+ from __future__ import annotations
2
+
3
+ from enum import StrEnum
4
+
5
+ from langgraph.graph import MessagesState
6
+
7
+
8
+ class AgentGraphState(MessagesState):
9
+ """Agent Graph state for standard loop execution."""
10
+
11
+ pass
12
+
13
+
14
+ class AgentGraphNode(StrEnum):
15
+ INIT = "init"
16
+ AGENT = "agent"
17
+ TOOLS = "tools"
18
+ TERMINATE = "terminate"
@@ -0,0 +1,53 @@
1
+ """Termination node for the Agent graph."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from langchain_core.messages import AIMessage
6
+ from pydantic import BaseModel
7
+ from uipath._cli._runtime._contracts import UiPathErrorCode
8
+ from uipath.agent.react import END_EXECUTION_TOOL, RAISE_ERROR_TOOL
9
+
10
+ from .exceptions import (
11
+ AgentNodeRoutingException,
12
+ AgentTerminationException,
13
+ )
14
+ from .state import AgentGraphState
15
+
16
+
17
+ def create_terminate_node(
18
+ response_schema: type[BaseModel] | None = None,
19
+ ):
20
+ """Validates and returns end_execution args, or raises AgentTerminationException for raise_error."""
21
+
22
+ def terminate_node(state: AgentGraphState):
23
+ last_message = state["messages"][-1]
24
+ if not isinstance(last_message, AIMessage):
25
+ raise AgentNodeRoutingException(
26
+ f"Expected last message to be AIMessage, got {type(last_message).__name__}"
27
+ )
28
+
29
+ for tool_call in last_message.tool_calls:
30
+ tool_name = tool_call["name"]
31
+
32
+ if tool_name == END_EXECUTION_TOOL.name:
33
+ args = tool_call["args"]
34
+ output_schema = response_schema or END_EXECUTION_TOOL.args_schema
35
+ validated = output_schema.model_validate(args)
36
+ return validated.model_dump()
37
+
38
+ if tool_name == RAISE_ERROR_TOOL.name:
39
+ error_message = tool_call["args"].get(
40
+ "message", "The LLM did not set the error message"
41
+ )
42
+ detail = tool_call["args"].get("details", "")
43
+ raise AgentTerminationException(
44
+ code=UiPathErrorCode.EXECUTION_ERROR,
45
+ title=error_message,
46
+ detail=detail,
47
+ )
48
+
49
+ raise AgentNodeRoutingException(
50
+ "No control flow tool call found in terminate node. Unexpected state."
51
+ )
52
+
53
+ return terminate_node
@@ -0,0 +1,7 @@
1
+ from .tools import (
2
+ create_flow_control_tools,
3
+ )
4
+
5
+ __all__ = [
6
+ "create_flow_control_tools",
7
+ ]
@@ -0,0 +1,50 @@
1
+ """Control flow tools for agent execution."""
2
+
3
+ from typing import Any
4
+
5
+ from langchain_core.tools import BaseTool, StructuredTool
6
+ from pydantic import BaseModel
7
+ from uipath.agent.react import (
8
+ END_EXECUTION_TOOL,
9
+ RAISE_ERROR_TOOL,
10
+ )
11
+
12
+
13
+ def create_end_execution_tool(
14
+ agent_output_schema: type[BaseModel] | None = None,
15
+ ) -> StructuredTool:
16
+ """Never executed - routing intercepts and extracts args for successful termination."""
17
+ input_schema = agent_output_schema or END_EXECUTION_TOOL.args_schema
18
+
19
+ async def end_execution_fn(**kwargs: Any) -> dict[str, Any]:
20
+ return kwargs
21
+
22
+ return StructuredTool(
23
+ name=END_EXECUTION_TOOL.name,
24
+ description=END_EXECUTION_TOOL.description,
25
+ args_schema=input_schema,
26
+ coroutine=end_execution_fn,
27
+ )
28
+
29
+
30
+ def create_raise_error_tool() -> StructuredTool:
31
+ """Never executed - routing intercepts and raises AgentTerminationException."""
32
+
33
+ async def raise_error_fn(**kwargs: Any) -> dict[str, Any]:
34
+ return kwargs
35
+
36
+ return StructuredTool(
37
+ name=RAISE_ERROR_TOOL.name,
38
+ description=RAISE_ERROR_TOOL.description,
39
+ args_schema=RAISE_ERROR_TOOL.args_schema,
40
+ coroutine=raise_error_fn,
41
+ )
42
+
43
+
44
+ def create_flow_control_tools(
45
+ agent_output_schema: type[BaseModel] | None = None,
46
+ ) -> list[BaseTool]:
47
+ return [
48
+ create_end_execution_tool(agent_output_schema),
49
+ create_raise_error_tool(),
50
+ ]
@@ -0,0 +1,39 @@
1
+ """ReAct Agent loop utilities."""
2
+
3
+ from typing import Any, Sequence
4
+
5
+ from jsonschema_pydantic import jsonschema_to_pydantic # type: ignore[import-untyped]
6
+ from langchain_core.messages import AIMessage, BaseMessage
7
+ from pydantic import BaseModel
8
+ from uipath.agent.react import END_EXECUTION_TOOL
9
+
10
+
11
+ def resolve_output_model(
12
+ output_schema: dict[str, Any] | None,
13
+ ) -> type[BaseModel]:
14
+ """Fallback to default end_execution tool schema when no agent output schema is provided."""
15
+ if output_schema:
16
+ return jsonschema_to_pydantic(output_schema)
17
+
18
+ return END_EXECUTION_TOOL.args_schema
19
+
20
+
21
+ def count_successive_completions(messages: Sequence[BaseMessage]) -> int:
22
+ """Count consecutive AIMessages without tool calls at end of message history."""
23
+ if not messages:
24
+ return 0
25
+
26
+ count = 0
27
+ for message in reversed(messages):
28
+ if not isinstance(message, AIMessage):
29
+ break
30
+
31
+ if message.tool_calls:
32
+ break
33
+
34
+ if not message.content:
35
+ break
36
+
37
+ count += 1
38
+
39
+ return count
@@ -0,0 +1,8 @@
1
+ """Tool creation and management for LowCode agents."""
2
+
3
+ from .tool_factory import (
4
+ create_tools_from_resources,
5
+ )
6
+ from .tool_node import create_tool_node
7
+
8
+ __all__ = ["create_tools_from_resources", "create_tool_node"]
@@ -0,0 +1,42 @@
1
+ """Context tool creation for semantic index retrieval."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import json
6
+
7
+ from langchain_core.tools import StructuredTool
8
+ from pydantic import BaseModel, Field
9
+ from uipath.agent.models.agent import AgentContextResourceConfig
10
+
11
+ from uipath_langchain.retrievers import ContextGroundingRetriever
12
+
13
+ from .utils import sanitize_tool_name
14
+
15
+
16
+ def create_context_tool(resource: AgentContextResourceConfig) -> StructuredTool:
17
+ tool_name = sanitize_tool_name(resource.name)
18
+ retriever = ContextGroundingRetriever(
19
+ index_name=resource.index_name,
20
+ folder_path=resource.folder_path,
21
+ number_of_results=resource.settings.result_count,
22
+ )
23
+
24
+ async def context_tool_fn(query: str) -> str:
25
+ docs = await retriever.ainvoke(query)
26
+
27
+ if not docs:
28
+ return ""
29
+
30
+ return json.dumps([doc.model_dump() for doc in docs], indent=2)
31
+
32
+ class ContextInputSchemaModel(BaseModel):
33
+ query: str = Field(
34
+ ..., description="The query to search for in the knowledge base"
35
+ )
36
+
37
+ return StructuredTool(
38
+ name=tool_name,
39
+ description=resource.description,
40
+ args_schema=ContextInputSchemaModel,
41
+ coroutine=context_tool_fn,
42
+ )
@@ -0,0 +1,51 @@
1
+ """Process tool creation for UiPath process execution."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Any, Type
6
+
7
+ from jsonschema_pydantic import jsonschema_to_pydantic # type: ignore[import-untyped]
8
+ from langchain_core.tools import StructuredTool
9
+ from langgraph.types import interrupt
10
+ from pydantic import BaseModel
11
+ from uipath.agent.models.agent import AgentProcessToolResourceConfig
12
+ from uipath.models import InvokeProcess
13
+
14
+ from .utils import sanitize_tool_name
15
+
16
+
17
+ def create_process_tool(resource: AgentProcessToolResourceConfig) -> StructuredTool:
18
+ """Uses interrupt() to suspend graph execution until process completes (handled by runtime)."""
19
+ tool_name: str = sanitize_tool_name(resource.name)
20
+ process_name = resource.properties.process_name
21
+ folder_path = resource.properties.folder_path
22
+
23
+ input_model: Type[BaseModel] = jsonschema_to_pydantic(resource.input_schema)
24
+ output_model: Type[BaseModel] = jsonschema_to_pydantic(resource.output_schema)
25
+
26
+ async def process_tool_fn(**kwargs: Any):
27
+ try:
28
+ result = interrupt(
29
+ InvokeProcess(
30
+ name=process_name,
31
+ input_arguments=kwargs,
32
+ process_folder_path=folder_path,
33
+ process_folder_key=None,
34
+ )
35
+ )
36
+ except Exception:
37
+ raise
38
+
39
+ return result
40
+
41
+ class ProcessTool(StructuredTool):
42
+ """Process tool with OutputType for schema compatibility."""
43
+
44
+ OutputType: Type[BaseModel] = output_model
45
+
46
+ return ProcessTool(
47
+ name=tool_name,
48
+ description=resource.description,
49
+ args_schema=input_model,
50
+ coroutine=process_tool_fn,
51
+ )
@@ -0,0 +1,39 @@
1
+ """Factory functions for creating tools from agent resources."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from langchain_core.tools import BaseTool, StructuredTool
6
+ from uipath.agent.models.agent import (
7
+ AgentContextResourceConfig,
8
+ AgentProcessToolResourceConfig,
9
+ BaseAgentResourceConfig,
10
+ LowCodeAgentDefinition,
11
+ )
12
+
13
+ from .context_tool import create_context_tool
14
+ from .process_tool import create_process_tool
15
+
16
+
17
+ async def create_tools_from_resources(
18
+ agent: LowCodeAgentDefinition,
19
+ ) -> list[BaseTool]:
20
+ tools: list[BaseTool] = []
21
+
22
+ for resource in agent.resources:
23
+ tool = await _build_tool_for_resource(resource)
24
+ if tool is not None:
25
+ tools.append(tool)
26
+
27
+ return tools
28
+
29
+
30
+ async def _build_tool_for_resource(
31
+ resource: BaseAgentResourceConfig,
32
+ ) -> StructuredTool | None:
33
+ if isinstance(resource, AgentProcessToolResourceConfig):
34
+ return create_process_tool(resource)
35
+
36
+ elif isinstance(resource, AgentContextResourceConfig):
37
+ return create_context_tool(resource)
38
+
39
+ return None
@@ -0,0 +1,22 @@
1
+ """Tool node factory wiring directly to LangGraph's ToolNode."""
2
+
3
+ from collections.abc import Sequence
4
+
5
+ from langchain_core.tools import BaseTool
6
+ from langgraph.prebuilt import ToolNode
7
+
8
+
9
+ def create_tool_node(tools: Sequence[BaseTool]) -> dict[str, ToolNode]:
10
+ """Create individual ToolNode for each tool.
11
+
12
+ Args:
13
+ tools: Sequence of tools to create nodes for.
14
+
15
+ Returns:
16
+ Dict mapping tool.name -> ToolNode([tool]).
17
+ Each tool gets its own dedicated node for middleware composition.
18
+
19
+ Note:
20
+ handle_tool_errors=False delegates error handling to LangGraph's error boundary.
21
+ """
22
+ return {tool.name: ToolNode([tool], handle_tool_errors=False) for tool in tools}
@@ -0,0 +1,11 @@
1
+ """Tool-related utility functions."""
2
+
3
+ import re
4
+
5
+
6
+ def sanitize_tool_name(name: str) -> str:
7
+ """Sanitize tool name for LLM compatibility (alphanumeric, underscore, hyphen only, max 64 chars)."""
8
+ trim_whitespaces = "_".join(name.split())
9
+ sanitized_tool_name = re.sub(r"[^a-zA-Z0-9_-]", "", trim_whitespaces)
10
+ sanitized_tool_name = sanitized_tool_name[:64]
11
+ return sanitized_tool_name
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: uipath-langchain
3
- Version: 0.0.142
3
+ Version: 0.0.144
4
4
  Summary: UiPath Langchain
5
5
  Project-URL: Homepage, https://uipath.com
6
6
  Project-URL: Repository, https://github.com/UiPath/uipath-langchain-python
@@ -26,7 +26,7 @@ Requires-Dist: openai>=1.65.5
26
26
  Requires-Dist: openinference-instrumentation-langchain>=0.1.50
27
27
  Requires-Dist: pydantic-settings>=2.6.0
28
28
  Requires-Dist: python-dotenv>=1.0.1
29
- Requires-Dist: uipath<2.2.0,>=2.1.101
29
+ Requires-Dist: uipath<2.2.0,>=2.1.103
30
30
  Provides-Extra: langchain
31
31
  Description-Content-Type: text/markdown
32
32
 
@@ -0,0 +1,62 @@
1
+ uipath_langchain/__init__.py,sha256=VBrvQn7d3nuOdN7zEnV2_S-uhmkjgEIlXiFVeZxZakQ,80
2
+ uipath_langchain/middlewares.py,sha256=x3U_tmDIyMXPLzq6n-oNRAnpAF6pKa9wfkPYwE-oUfo,848
3
+ uipath_langchain/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
+ uipath_langchain/_cli/__init__.py,sha256=juqd9PbXs4yg45zMJ7BHAOPQjb7sgEbWE9InBtGZhfo,24
5
+ uipath_langchain/_cli/cli_debug.py,sha256=zaB-W3_29FsCqF-YZ3EsayyxC957tg4tOjdcdX8ew-M,3311
6
+ uipath_langchain/_cli/cli_dev.py,sha256=l3XFHrh-0OUFJq3zLMKuzedJAluGQBIZQTHP1KWOmpw,1725
7
+ uipath_langchain/_cli/cli_eval.py,sha256=yzxOz-JOMMl1fejZNVQYlBSo-yUIxArtfp2EW1Ow6j4,3753
8
+ uipath_langchain/_cli/cli_init.py,sha256=B-Ht1lz4HNlpYELZU7DLNhSrhGJbsaCdU9UMO2iHUgM,12654
9
+ uipath_langchain/_cli/cli_new.py,sha256=KKLxCzz7cDQ__rRr_a496IHWlSQXhmrBNgmKHnXAnTY,2336
10
+ uipath_langchain/_cli/cli_run.py,sha256=DIsAKsbQ8gTRz44q9ZV3jBjrbM8bhS6lEQ3dd4joDFU,3712
11
+ uipath_langchain/_cli/_runtime/_context.py,sha256=mjmGEogKiO8tUV878BgV9rFIeA9MCmEH6hgs5W_dm4g,328
12
+ uipath_langchain/_cli/_runtime/_conversation.py,sha256=ayghRqhyLeVUZg1WHnpeOYtPNhRwDOl4z8OSYiJkWSU,11529
13
+ uipath_langchain/_cli/_runtime/_exception.py,sha256=xHKeu8njByiMcObbggyZk0cXYXX5BjLLF9PtSJtB4_Q,1358
14
+ uipath_langchain/_cli/_runtime/_graph_resolver.py,sha256=c-JrsX7rx_CflDPfKhz9q-PgBrgI2IOBcYwiffwddh8,5457
15
+ uipath_langchain/_cli/_runtime/_input.py,sha256=HAJUxjNmOg9q7l_ebF1AzIKL5_ysXyjk1bWXHsjhEPI,5761
16
+ uipath_langchain/_cli/_runtime/_output.py,sha256=2VvdW4olv7Vd0c4grtTQazXxfBbcuocgSSP6V2P8uHE,4887
17
+ uipath_langchain/_cli/_runtime/_runtime.py,sha256=HFWU2h864Ifq0gK9-_Jy6sbg0CMxmFExSdSEiUArChE,18548
18
+ uipath_langchain/_cli/_templates/langgraph.json.template,sha256=eeh391Gta_hoRgaNaZ58nW1LNvCVXA7hlAH6l7Veous,107
19
+ uipath_langchain/_cli/_templates/main.py.template,sha256=GpSblGH2hwS9ibqQmX2iB2nsmOA5zDfEEF4ChLiMxbQ,875
20
+ uipath_langchain/_cli/_utils/_graph.py,sha256=nMJWy8FmaD9rqPUY2lHc5uVpUzbXD1RO12uJnhe0kdo,6803
21
+ uipath_langchain/_resources/AGENTS.md,sha256=5VmIfaQ6H91VxInnxFmJklURXeWIIQpGQTYBEmvvoVA,1060
22
+ uipath_langchain/_resources/REQUIRED_STRUCTURE.md,sha256=BRmWWFtM0qNXj5uumALVxq9h6pifJDGh5NzuyctuH1Q,2569
23
+ uipath_langchain/_tracing/__init__.py,sha256=C2dRvQ2ynxCmyICgE-rJHimWKEcFRME_o9gfX84Mb3Y,123
24
+ uipath_langchain/_tracing/_instrument_traceable.py,sha256=8f9FyAKWE6kH1N8ErbpwqZHAzNjGwbLjQn7jdX5yAgA,4343
25
+ uipath_langchain/_tracing/_utils.py,sha256=r_fiSk3HDDAcePY_UbbEYiSbNqzn5gFeMPYBDvGrFx0,902
26
+ uipath_langchain/_utils/__init__.py,sha256=-w-4TD9ZnJDCpj4VIPXhJciukrmDJJbmnOFnhAkAaEU,81
27
+ uipath_langchain/_utils/_request_mixin.py,sha256=_drxHTRpfyVn3g3ppKgn466EBaUWH83qyeGKLY41CGY,20142
28
+ uipath_langchain/_utils/_settings.py,sha256=2fExMQJ88YptfldmzMfZIpsx-m1gfMkeYGf5t6KIe0A,3084
29
+ uipath_langchain/_utils/_sleep_policy.py,sha256=e9pHdjmcCj4CVoFM1jMyZFelH11YatsgWfpyrfXzKBQ,1251
30
+ uipath_langchain/agent/react/__init__.py,sha256=rfVB6PQWUhPHff3J1BnPMBKBMaHfgEkII1gXwjiqUMY,272
31
+ uipath_langchain/agent/react/agent.py,sha256=cU9ZiXi7EfV1pMf9nZBO8LhjYUmRsncyBk9k1pEBRh8,2455
32
+ uipath_langchain/agent/react/constants.py,sha256=B2yqryh242DETslaRYacUPbVdpjvvApjsBira_qhQwk,61
33
+ uipath_langchain/agent/react/exceptions.py,sha256=b3lDhrIIHFljlLK3zXPznT7fYzfMRjSd8JfF4247tbI,226
34
+ uipath_langchain/agent/react/init_node.py,sha256=zfPKgxi_mWsX7nBcK6wpqcDjHx8Q61TSnXFcTPLUd28,389
35
+ uipath_langchain/agent/react/llm_node.py,sha256=jkbfzPNn6rNubgncPlPDQRNuk-sJbj08r95JfWxxWL8,1491
36
+ uipath_langchain/agent/react/router.py,sha256=Ttq5O1_8t-z7pQ9tGhiaMmd_Da7_TWULLcgOED7gw_A,3626
37
+ uipath_langchain/agent/react/state.py,sha256=EnkGXFlmMtJUy7BTZrbYlGBvvAZ70_HwKPW8n6uwjz0,330
38
+ uipath_langchain/agent/react/terminate_node.py,sha256=Uuc-0z4qcPjHB_qZlaEaM2mK1ymCuJJludS7LopyCZg,1898
39
+ uipath_langchain/agent/react/utils.py,sha256=0kZoEkGzddtTZSlGQcqbaPHH5MVtZegq0kBI5_vreGA,1060
40
+ uipath_langchain/agent/react/tools/__init__.py,sha256=LGfG8Dc32ffKdXQyMI2oYzhNnTs1wbzsddXz6eU-0MY,102
41
+ uipath_langchain/agent/react/tools/tools.py,sha256=vFBGnFrGocX__sotKisMJr2lxRRVqA0-uThzzhPADIw,1443
42
+ uipath_langchain/agent/tools/__init__.py,sha256=GqQZLoMxkujjsButdLruxgzmQLX9k0YPtBaBAULSV2o,222
43
+ uipath_langchain/agent/tools/context_tool.py,sha256=Xd2qvc52Ks2eFXF2n-i0d8QDnbHXls_V8jyyNXwdySI,1243
44
+ uipath_langchain/agent/tools/process_tool.py,sha256=3OPxkIAJw_haohDeyDwSo8CIz9300XSvqIz5yjItspk,1715
45
+ uipath_langchain/agent/tools/tool_factory.py,sha256=NfhUU7EWf-zkt4xglkzp8ReOYDvKmoOTiz-OBh3ACWs,1053
46
+ uipath_langchain/agent/tools/tool_node.py,sha256=TnXsjoShvhsoBuV5RoUVoJCc2zYPKSnJYSC9MGJoeOk,707
47
+ uipath_langchain/agent/tools/utils.py,sha256=DsFeZ7kDzFaZ0bGHQN6TlGMJ90wYr7P1Vo1rpHPHWws,401
48
+ uipath_langchain/chat/__init__.py,sha256=WDcvy91ixvZ3Mq7Ae94g5CjyQwXovDBnEv1NlD5SXBE,116
49
+ uipath_langchain/chat/models.py,sha256=PifcbDURqfttqVYKSnzdbOdbSiLiwHfQ6lWgVAtoLj8,16407
50
+ uipath_langchain/embeddings/__init__.py,sha256=QICtYB58ZyqFfDQrEaO8lTEgAU5NuEKlR7iIrS0OBtc,156
51
+ uipath_langchain/embeddings/embeddings.py,sha256=45gKyb6HVKigwE-0CXeZcAk33c0mteaEdPGa8hviqcw,4339
52
+ uipath_langchain/retrievers/__init__.py,sha256=rOn7PyyHgZ4pMnXWPkGqmuBmx8eGuo-Oyndo7Wm9IUU,108
53
+ uipath_langchain/retrievers/context_grounding_retriever.py,sha256=YLCIwy89LhLnNqcM0YJ5mZoeNyCs5UiKD3Wly8gnW1E,2239
54
+ uipath_langchain/tools/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
55
+ uipath_langchain/tools/preconfigured.py,sha256=SyvrLrM1kezZxVVytgScVO8nBfVYfFGobWjY7erzsYU,7490
56
+ uipath_langchain/vectorstores/__init__.py,sha256=w8qs1P548ud1aIcVA_QhBgf_jZDrRMK5Lono78yA8cs,114
57
+ uipath_langchain/vectorstores/context_grounding_vectorstore.py,sha256=TncIXG-YsUlO0R5ZYzWsM-Dj1SVCZbzmo2LraVxXelc,9559
58
+ uipath_langchain-0.0.144.dist-info/METADATA,sha256=fnn1iHs-_Q0lyubeoDFTb6c5LGZaWZbpxDnMytjjbRg,4276
59
+ uipath_langchain-0.0.144.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
60
+ uipath_langchain-0.0.144.dist-info/entry_points.txt,sha256=FUtzqGOEntlJKMJIXhQUfT7ZTbQmGhke1iCmDWZaQZI,81
61
+ uipath_langchain-0.0.144.dist-info/licenses/LICENSE,sha256=JDpt-uotAkHFmxpwxi6gwx6HQ25e-lG4U_Gzcvgp7JY,1063
62
+ uipath_langchain-0.0.144.dist-info/RECORD,,
@@ -1,234 +0,0 @@
1
- import json
2
- import logging
3
- from typing import Any, Dict, List, Optional
4
-
5
- from opentelemetry.sdk.trace.export import SpanExportResult
6
- from uipath.tracing import LlmOpsHttpExporter
7
-
8
- logger = logging.getLogger(__name__)
9
-
10
-
11
- def _safe_parse_json(s: Any) -> Any:
12
- """Safely parse a JSON string, returning the original if not a string or on error."""
13
- if not isinstance(s, str):
14
- return s
15
- try:
16
- return json.loads(s)
17
- except (json.JSONDecodeError, TypeError):
18
- return s
19
-
20
-
21
- def _get_llm_messages(attributes: Dict[str, Any], prefix: str) -> List[Dict[str, Any]]:
22
- """Extracts and reconstructs LLM messages from flattened attributes."""
23
- messages: dict[int, dict[str, Any]] = {}
24
- message_prefix = f"{prefix}."
25
-
26
- for key, value in attributes.items():
27
- if key.startswith(message_prefix):
28
- parts = key[len(message_prefix) :].split(".")
29
- if len(parts) >= 2 and parts[0].isdigit():
30
- index = int(parts[0])
31
- if index not in messages:
32
- messages[index] = {}
33
- current: Any = messages[index]
34
-
35
- for i, part in enumerate(parts[1:-1]):
36
- key_part: str | int = part
37
- if part.isdigit() and (
38
- i + 2 < len(parts) and parts[i + 2].isdigit()
39
- ):
40
- key_part = int(part)
41
-
42
- if isinstance(current, dict):
43
- if key_part not in current:
44
- current[key_part] = {}
45
- current = current[key_part]
46
- elif isinstance(current, list) and isinstance(key_part, int):
47
- if key_part >= len(current):
48
- current.append({})
49
- current = current[key_part]
50
-
51
- current[parts[-1]] = value
52
-
53
- # Convert dict to list, ordered by index
54
- return [messages[i] for i in sorted(messages.keys())]
55
-
56
-
57
- class LangChainExporter(LlmOpsHttpExporter):
58
- # Mapping of old attribute names to new attribute names or (new name, function)
59
- ATTRIBUTE_MAPPING: dict[str, str | tuple[str, Any]] = {
60
- "input.value": ("input", _safe_parse_json),
61
- "output.value": ("output", _safe_parse_json),
62
- "llm.model_name": "model",
63
- }
64
-
65
- # Mapping of span types
66
- SPAN_TYPE_MAPPING: dict[str, str] = {
67
- "LLM": "completion",
68
- "TOOL": "toolCall",
69
- # Add more mappings as needed
70
- }
71
-
72
- class Status:
73
- SUCCESS = 1
74
- ERROR = 2
75
- INTERRUPTED = 3
76
-
77
- def __init__(self, *args: Any, **kwargs: Any) -> None:
78
- super().__init__(*args, **kwargs)
79
-
80
- def _map_llm_call_attributes(self, attributes: Dict[str, Any]) -> Dict[str, Any]:
81
- """Maps attributes for LLM calls, handling flattened keys."""
82
- result = attributes.copy() # Keep original attributes including basic mappings
83
-
84
- # Token Usage
85
- token_keys = {
86
- "llm.token_count.prompt": "promptTokens",
87
- "llm.token_count.completion": "completionTokens",
88
- "llm.token_count.total": "totalTokens",
89
- }
90
- usage = {
91
- new_key: attributes.get(old_key)
92
- for old_key, new_key in token_keys.items()
93
- if old_key in attributes
94
- }
95
- if usage:
96
- result["usage"] = usage
97
-
98
- # Input/Output Messages
99
- result["input"] = _get_llm_messages(attributes, "llm.input_messages")
100
- output_messages = _get_llm_messages(attributes, "llm.output_messages")
101
- result["output"] = output_messages
102
-
103
- # Invocation Parameters
104
- invocation_params = _safe_parse_json(
105
- attributes.get("llm.invocation_parameters", "{}")
106
- )
107
- if isinstance(invocation_params, dict):
108
- result["model"] = invocation_params.get("model", result.get("model"))
109
- settings: dict[str, Any] = {}
110
- if "max_tokens" in invocation_params:
111
- settings["maxTokens"] = invocation_params["max_tokens"]
112
- if "temperature" in invocation_params:
113
- settings["temperature"] = invocation_params["temperature"]
114
- if settings:
115
- result["settings"] = settings
116
-
117
- # Tool Calls
118
- tool_calls: list[dict[str, Any]] = []
119
- for msg in output_messages:
120
- # Ensure msg is a dictionary before proceeding
121
- if not isinstance(msg, dict):
122
- continue
123
- msg_tool_calls = msg.get("message", {}).get("tool_calls", [])
124
-
125
- # Ensure msg_tool_calls is a list
126
- if not isinstance(msg_tool_calls, list):
127
- continue
128
-
129
- for tc in msg_tool_calls:
130
- if not isinstance(tc, dict):
131
- continue
132
- tool_call_data = tc.get("tool_call", {})
133
- if not isinstance(tool_call_data, dict):
134
- continue
135
- tool_calls.append(
136
- {
137
- "id": tool_call_data.get("id"),
138
- "name": tool_call_data.get("function", {}).get("name"),
139
- "arguments": _safe_parse_json(
140
- tool_call_data.get("function", {}).get("arguments", "{}")
141
- ),
142
- }
143
- )
144
- if tool_calls:
145
- result["toolCalls"] = tool_calls
146
-
147
- return result
148
-
149
- def _map_tool_call_attributes(self, attributes: Dict[str, Any]) -> Dict[str, Any]:
150
- """Maps attributes for tool calls."""
151
- result = attributes.copy() # Keep original attributes
152
-
153
- result["type"] = "toolCall"
154
- result["callId"] = attributes.get("call_id") or attributes.get("id")
155
- result["toolName"] = attributes.get("tool.name")
156
- result["arguments"] = _safe_parse_json(
157
- attributes.get("input", attributes.get("input.value", "{}"))
158
- )
159
- result["toolType"] = "Integration"
160
- result["result"] = _safe_parse_json(
161
- attributes.get("output", attributes.get("output.value"))
162
- )
163
- result["error"] = None
164
-
165
- return result
166
-
167
- def _determine_status(self, error: Optional[str]) -> int:
168
- if error:
169
- if error and error.startswith("GraphInterrupt("):
170
- return self.Status.INTERRUPTED
171
- return self.Status.ERROR
172
- return self.Status.SUCCESS
173
-
174
- def _process_span_attributes(self, span_data: Dict[str, Any]) -> Dict[str, Any]:
175
- """Extracts, transforms, and maps attributes for a span."""
176
- if "Attributes" not in span_data:
177
- return span_data
178
-
179
- attributes_val = span_data["Attributes"]
180
- if isinstance(attributes_val, str):
181
- try:
182
- attributes: Dict[str, Any] = json.loads(attributes_val)
183
- except json.JSONDecodeError as e:
184
- logger.warning(f"Failed to parse attributes JSON: {e}")
185
- return span_data
186
- elif isinstance(attributes_val, dict):
187
- attributes = attributes_val
188
- else:
189
- return span_data
190
-
191
- # Determine SpanType
192
- if "openinference.span.kind" in attributes:
193
- span_type = attributes["openinference.span.kind"]
194
- span_data["SpanType"] = self.SPAN_TYPE_MAPPING.get(span_type, span_type)
195
-
196
- # Apply basic attribute mapping
197
- for old_key, mapping in self.ATTRIBUTE_MAPPING.items():
198
- if old_key in attributes:
199
- if isinstance(mapping, tuple):
200
- new_key, func = mapping
201
- attributes[new_key] = func(attributes[old_key])
202
- else:
203
- new_key = mapping
204
- attributes[new_key] = attributes[old_key]
205
-
206
- # Apply detailed mapping based on SpanType
207
- span_type = span_data.get("SpanType")
208
- if span_type == "completion":
209
- processed_attributes = self._map_llm_call_attributes(attributes)
210
- elif span_type == "toolCall":
211
- processed_attributes = self._map_tool_call_attributes(attributes)
212
- else:
213
- processed_attributes = attributes.copy()
214
-
215
- span_data["Attributes"] = json.dumps(processed_attributes)
216
-
217
- # Determine status based on error information
218
- error = attributes.get("error") or attributes.get("exception.message")
219
- status = self._determine_status(error)
220
- span_data["Status"] = status
221
-
222
- return span_data
223
-
224
- def _send_with_retries(
225
- self, url: str, payload: List[Dict[str, Any]], max_retries: int = 4
226
- ) -> SpanExportResult:
227
- # Transform attributes in each span's payload before sending
228
- transformed_payload = [self._process_span_attributes(span) for span in payload]
229
-
230
- return super()._send_with_retries(
231
- url=url,
232
- payload=transformed_payload,
233
- max_retries=max_retries,
234
- )