uipath-langchain 0.0.124__py3-none-any.whl → 0.0.126__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of uipath-langchain might be problematic. Click here for more details.

@@ -0,0 +1,256 @@
1
+ import uuid
2
+ from datetime import datetime
3
+ from typing import Optional
4
+
5
+ from langchain_core.messages import (
6
+ AIMessage,
7
+ AIMessageChunk,
8
+ BaseMessage,
9
+ HumanMessage,
10
+ ToolMessage,
11
+ )
12
+ from uipath.agent.conversation import (
13
+ UiPathConversationContentPartChunkEvent,
14
+ UiPathConversationContentPartEndEvent,
15
+ UiPathConversationContentPartEvent,
16
+ UiPathConversationContentPartStartEvent,
17
+ UiPathConversationEvent,
18
+ UiPathConversationExchangeEvent,
19
+ UiPathConversationMessageEndEvent,
20
+ UiPathConversationMessageEvent,
21
+ UiPathConversationMessageStartEvent,
22
+ UiPathConversationToolCallEndEvent,
23
+ UiPathConversationToolCallEvent,
24
+ UiPathConversationToolCallStartEvent,
25
+ UiPathInlineValue,
26
+ )
27
+
28
+
29
+ def _new_id() -> str:
30
+ return str(uuid.uuid4())
31
+
32
+
33
+ def _wrap_in_conversation_event(
34
+ msg_event: UiPathConversationMessageEvent,
35
+ exchange_id: Optional[str] = None,
36
+ conversation_id: Optional[str] = None,
37
+ ) -> UiPathConversationEvent:
38
+ """Helper to wrap a message event into a conversation-level event."""
39
+ return UiPathConversationEvent(
40
+ conversation_id=conversation_id or _new_id(),
41
+ exchange=UiPathConversationExchangeEvent(
42
+ exchange_id=exchange_id or _new_id(),
43
+ message=msg_event,
44
+ ),
45
+ )
46
+
47
+
48
+ def _extract_text(content) -> str:
49
+ """Normalize LangGraph message.content to plain text."""
50
+ if isinstance(content, str):
51
+ return content
52
+ if isinstance(content, list):
53
+ return "".join(
54
+ part.get("text", "")
55
+ for part in content
56
+ if isinstance(part, dict) and part.get("type") == "text"
57
+ )
58
+ return str(content or "")
59
+
60
+
61
+ def map_message(
62
+ message: BaseMessage,
63
+ exchange_id: Optional[str] = None,
64
+ conversation_id: Optional[str] = None,
65
+ ) -> Optional[UiPathConversationEvent]:
66
+ """Convert LangGraph BaseMessage (chunk or full) into a UiPathConversationEvent."""
67
+ message_id = getattr(message, "id", None) or _new_id()
68
+ timestamp = datetime.now().isoformat()
69
+
70
+ # --- Streaming AIMessageChunk ---
71
+ if isinstance(message, AIMessageChunk):
72
+ msg_event = UiPathConversationMessageEvent(
73
+ message_id=message.id or _new_id(),
74
+ )
75
+
76
+ if message.content == []:
77
+ msg_event.start = UiPathConversationMessageStartEvent(
78
+ role="assistant", timestamp=timestamp
79
+ )
80
+ msg_event.content_part = UiPathConversationContentPartEvent(
81
+ content_part_id=f"chunk-{message.id}-{0}",
82
+ start=UiPathConversationContentPartStartEvent(mime_type="text/plain"),
83
+ )
84
+
85
+ elif isinstance(message.content, list) and message.content:
86
+ for chunk in message.content:
87
+ if not isinstance(chunk, dict):
88
+ continue
89
+ idx = chunk.get("index", 0)
90
+ ctype = chunk.get("type")
91
+ id = chunk.get("id", f"chunk-{message.id}-{idx}")
92
+
93
+ # Start of a tool call
94
+ if ctype == "tool_use":
95
+ msg_event.tool_call = UiPathConversationToolCallEvent(
96
+ tool_call_id=id,
97
+ start=UiPathConversationToolCallStartEvent(
98
+ tool_name=chunk.get("name") or "",
99
+ arguments=UiPathInlineValue(inline=""),
100
+ timestamp=timestamp,
101
+ ),
102
+ )
103
+
104
+ # JSON args streaming (content part for tool args)
105
+ elif ctype == "input_json_delta":
106
+ text = chunk.get("partial_json", "")
107
+ # first delta: emit content part start + first chunk
108
+ if text == "":
109
+ msg_event.content_part = UiPathConversationContentPartEvent(
110
+ content_part_id=id,
111
+ start=UiPathConversationContentPartStartEvent(
112
+ mime_type="application/json"
113
+ ),
114
+ )
115
+ else:
116
+ msg_event.content_part = UiPathConversationContentPartEvent(
117
+ content_part_id=id,
118
+ chunk=UiPathConversationContentPartChunkEvent(
119
+ data=text,
120
+ content_part_sequence=idx,
121
+ ),
122
+ )
123
+
124
+ # Plain text from assistant
125
+ elif ctype == "text":
126
+ text = chunk.get("text", "")
127
+ msg_event.content_part = UiPathConversationContentPartEvent(
128
+ content_part_id=id,
129
+ chunk=UiPathConversationContentPartChunkEvent(
130
+ data=text,
131
+ content_part_sequence=idx,
132
+ ),
133
+ )
134
+
135
+ stop_reason = message.response_metadata.get("stop_reason")
136
+ if not message.content and stop_reason in ("tool_use", "end_turn"):
137
+ msg_event.end = UiPathConversationMessageEndEvent(timestamp=timestamp)
138
+
139
+ if (
140
+ msg_event.start
141
+ or msg_event.content_part
142
+ or msg_event.tool_call
143
+ or msg_event.end
144
+ ):
145
+ return _wrap_in_conversation_event(msg_event, exchange_id, conversation_id)
146
+
147
+ return None
148
+
149
+ text_content = _extract_text(message.content)
150
+
151
+ # --- HumanMessage ---
152
+ if isinstance(message, HumanMessage):
153
+ return _wrap_in_conversation_event(
154
+ UiPathConversationMessageEvent(
155
+ message_id=message_id,
156
+ start=UiPathConversationMessageStartEvent(
157
+ role="user", timestamp=timestamp
158
+ ),
159
+ content_part=UiPathConversationContentPartEvent(
160
+ content_part_id=f"cp-{message_id}",
161
+ start=UiPathConversationContentPartStartEvent(
162
+ mime_type="text/plain"
163
+ ),
164
+ chunk=UiPathConversationContentPartChunkEvent(data=text_content),
165
+ end=UiPathConversationContentPartEndEvent(),
166
+ ),
167
+ end=UiPathConversationMessageEndEvent(),
168
+ ),
169
+ exchange_id,
170
+ conversation_id,
171
+ )
172
+
173
+ # --- AIMessage ---
174
+ if isinstance(message, AIMessage):
175
+ # Extract first tool call if present
176
+ tool_calls = getattr(message, "tool_calls", []) or []
177
+ first_tc = tool_calls[0] if tool_calls else None
178
+
179
+ return _wrap_in_conversation_event(
180
+ UiPathConversationMessageEvent(
181
+ message_id=message_id,
182
+ start=UiPathConversationMessageStartEvent(
183
+ role="assistant", timestamp=timestamp
184
+ ),
185
+ content_part=(
186
+ UiPathConversationContentPartEvent(
187
+ content_part_id=f"cp-{message_id}",
188
+ start=UiPathConversationContentPartStartEvent(
189
+ mime_type="text/plain"
190
+ ),
191
+ chunk=UiPathConversationContentPartChunkEvent(
192
+ data=text_content
193
+ ),
194
+ end=UiPathConversationContentPartEndEvent(),
195
+ )
196
+ if text_content
197
+ else None
198
+ ),
199
+ tool_call=(
200
+ UiPathConversationToolCallEvent(
201
+ tool_call_id=first_tc.get("id") or _new_id(),
202
+ start=UiPathConversationToolCallStartEvent(
203
+ tool_name=first_tc.get("name"),
204
+ arguments=UiPathInlineValue(
205
+ inline=str(first_tc.get("args", ""))
206
+ ),
207
+ timestamp=timestamp,
208
+ ),
209
+ )
210
+ if first_tc
211
+ else None
212
+ ),
213
+ end=UiPathConversationMessageEndEvent(),
214
+ ),
215
+ exchange_id,
216
+ conversation_id,
217
+ )
218
+
219
+ # --- ToolMessage ---
220
+ if isinstance(message, ToolMessage):
221
+ return _wrap_in_conversation_event(
222
+ UiPathConversationMessageEvent(
223
+ message_id=message_id,
224
+ tool_call=UiPathConversationToolCallEvent(
225
+ tool_call_id=message.tool_call_id,
226
+ start=UiPathConversationToolCallStartEvent(
227
+ tool_name=message.name or "",
228
+ arguments=UiPathInlineValue(inline=""),
229
+ timestamp=timestamp,
230
+ ),
231
+ end=UiPathConversationToolCallEndEvent(
232
+ timestamp=timestamp,
233
+ result=UiPathInlineValue(inline=message.content),
234
+ ),
235
+ ),
236
+ ),
237
+ exchange_id,
238
+ conversation_id,
239
+ )
240
+
241
+ # --- Fallback ---
242
+ return _wrap_in_conversation_event(
243
+ UiPathConversationMessageEvent(
244
+ message_id=message_id,
245
+ start=UiPathConversationMessageStartEvent(
246
+ role="assistant", timestamp=timestamp
247
+ ),
248
+ content_part=UiPathConversationContentPartEvent(
249
+ content_part_id=f"cp-{message_id}",
250
+ chunk=UiPathConversationContentPartChunkEvent(data=text_content),
251
+ ),
252
+ end=UiPathConversationMessageEndEvent(),
253
+ ),
254
+ exchange_id,
255
+ conversation_id,
256
+ )
@@ -20,6 +20,7 @@ from ..._utils import _instrument_traceable_attributes
20
20
  from ...tracers import AsyncUiPathTracer
21
21
  from .._utils._graph import LangGraphConfig
22
22
  from ._context import LangGraphRuntimeContext
23
+ from ._conversation import map_message
23
24
  from ._exception import LangGraphRuntimeError
24
25
  from ._input import LangGraphInputProcessor
25
26
  from ._output import LangGraphOutputProcessor
@@ -57,11 +58,6 @@ class LangGraphRuntime(UiPathBaseRuntime):
57
58
  tracer = None
58
59
 
59
60
  try:
60
- if self.context.resume is False and self.context.job_id is None:
61
- # Delete the previous graph state file at debug time
62
- if os.path.exists(self.state_file_path):
63
- os.remove(self.state_file_path)
64
-
65
61
  async with AsyncSqliteSaver.from_conn_string(
66
62
  self.state_file_path
67
63
  ) as memory:
@@ -86,9 +82,11 @@ class LangGraphRuntime(UiPathBaseRuntime):
86
82
 
87
83
  graph_config: RunnableConfig = {
88
84
  "configurable": {
89
- "thread_id": self.context.job_id
90
- if self.context.job_id
91
- else "default"
85
+ "thread_id": (
86
+ self.context.execution_id
87
+ or self.context.job_id
88
+ or "default"
89
+ )
92
90
  },
93
91
  "callbacks": callbacks,
94
92
  }
@@ -101,8 +99,27 @@ class LangGraphRuntime(UiPathBaseRuntime):
101
99
  if max_concurrency is not None:
102
100
  graph_config["max_concurrency"] = int(max_concurrency)
103
101
 
102
+ if self.context.chat_handler:
103
+ async for stream_chunk in graph.astream(
104
+ processed_input,
105
+ graph_config,
106
+ stream_mode="messages",
107
+ subgraphs=True,
108
+ ):
109
+ if not isinstance(stream_chunk, tuple) or len(stream_chunk) < 2:
110
+ continue
111
+
112
+ _, (message, _) = stream_chunk
113
+ event = map_message(
114
+ message=message,
115
+ conversation_id=self.context.execution_id,
116
+ exchange_id=self.context.execution_id,
117
+ )
118
+ if event:
119
+ self.context.chat_handler.on_event(event)
120
+
104
121
  # Stream the output at debug time
105
- if self.is_debug_run():
122
+ elif self.is_debug_run():
106
123
  # Get final chunk while streaming
107
124
  final_chunk = None
108
125
  async for stream_chunk in graph.astream(
@@ -1,4 +1,5 @@
1
1
  import asyncio
2
+ import os
2
3
  from os import environ as env
3
4
  from typing import Optional
4
5
 
@@ -40,7 +41,8 @@ def langgraph_run_middleware(
40
41
  context.langgraph_config = config
41
42
  context.debug = kwargs.get("debug", False)
42
43
  context.logs_min_level = env.get("LOG_LEVEL", "INFO")
43
- context.job_id = env.get("UIPATH_JOB_KEY")
44
+ context.job_id = env.get("UIPATH_JOB_KEY", None)
45
+ context.execution_id = env.get("UIPATH_JOB_KEY", None)
44
46
  context.trace_id = env.get("UIPATH_TRACE_ID")
45
47
  context.is_eval_run = kwargs.get("is_eval_run", False)
46
48
  context.tracing_enabled = tracing
@@ -64,6 +66,10 @@ def langgraph_run_middleware(
64
66
  env["UIPATH_REQUESTING_FEATURE"] = "langgraph-agent"
65
67
 
66
68
  async with LangGraphRuntime.from_context(context) as runtime:
69
+ if context.resume is False and context.job_id is None:
70
+ # Delete the previous graph state file at debug time
71
+ if os.path.exists(runtime.state_file_path):
72
+ os.remove(runtime.state_file_path)
67
73
  await runtime.execute()
68
74
 
69
75
  asyncio.run(execute())
@@ -9,7 +9,7 @@ import httpx
9
9
  import openai
10
10
  from langchain_core.embeddings import Embeddings
11
11
  from langchain_core.language_models.chat_models import _cleanup_llm_representation
12
- from pydantic import BaseModel, Field, SecretStr
12
+ from pydantic import BaseModel, ConfigDict, Field, SecretStr
13
13
  from tenacity import (
14
14
  AsyncRetrying,
15
15
  Retrying,
@@ -37,8 +37,7 @@ def get_from_uipath_url():
37
37
 
38
38
 
39
39
  class UiPathRequestMixin(BaseModel):
40
- class Config:
41
- arbitrary_types_allowed = True
40
+ model_config = ConfigDict(arbitrary_types_allowed=True)
42
41
 
43
42
  default_headers: Optional[Mapping[str, str]] = {
44
43
  "X-UiPath-Streaming-Enabled": "false",
@@ -0,0 +1,191 @@
1
+ from enum import Enum
2
+ from typing import Annotated, Any, Dict, List, Literal, Optional, Union
3
+
4
+ from pydantic import BaseModel, ConfigDict, Field
5
+
6
+
7
+ class AgentMessageRole(str, Enum):
8
+ """Enum for message roles"""
9
+
10
+ SYSTEM = "System"
11
+ USER = "User"
12
+
13
+
14
+ class AgentMessage(BaseModel):
15
+ """Message model for agent conversations"""
16
+
17
+ role: AgentMessageRole
18
+ content: str
19
+
20
+ model_config = ConfigDict(
21
+ validate_by_name=True, validate_by_alias=True, extra="allow"
22
+ )
23
+
24
+
25
+ class AgentSettings(BaseModel):
26
+ """Settings for agent configuration"""
27
+
28
+ engine: str = Field(..., description="Engine type, e.g., 'basic-v1'")
29
+ model: str = Field(..., description="LLM model identifier")
30
+ max_tokens: int = Field(
31
+ ..., alias="maxTokens", description="Maximum number of tokens"
32
+ )
33
+ temperature: float = Field(..., description="Temperature for response generation")
34
+
35
+ model_config = ConfigDict(
36
+ validate_by_name=True, validate_by_alias=True, extra="allow"
37
+ )
38
+
39
+
40
+ class AgentResourceType(str, Enum):
41
+ """Enum for resource types"""
42
+
43
+ TOOL = "tool"
44
+ CONTEXT = "context"
45
+ ESCALATION = "escalation"
46
+
47
+
48
+ class AgentBaseResourceConfig(BaseModel):
49
+ """Base resource model with common properties"""
50
+
51
+ name: str
52
+ description: str
53
+
54
+ model_config = ConfigDict(
55
+ validate_by_name=True, validate_by_alias=True, extra="allow"
56
+ )
57
+
58
+
59
+ class AgentUnknownResourceConfig(AgentBaseResourceConfig):
60
+ """Fallback for unknown or future resource types"""
61
+
62
+ resource_type: str = Field(alias="$resourceType")
63
+
64
+ model_config = ConfigDict(extra="allow")
65
+
66
+
67
+ class AgentToolSettings(BaseModel):
68
+ """Settings for tool configuration"""
69
+
70
+ max_attempts: int = Field(0, alias="maxAttempts")
71
+ retry_delay: int = Field(0, alias="retryDelay")
72
+ timeout: int = Field(0)
73
+
74
+ model_config = ConfigDict(
75
+ validate_by_name=True, validate_by_alias=True, extra="allow"
76
+ )
77
+
78
+
79
+ class AgentToolProperties(BaseModel):
80
+ """Properties specific to tool configuration"""
81
+
82
+ folder_path: Optional[str] = Field(None, alias="folderPath")
83
+ process_name: Optional[str] = Field(None, alias="processName")
84
+
85
+ model_config = ConfigDict(
86
+ validate_by_name=True, validate_by_alias=True, extra="allow"
87
+ )
88
+
89
+
90
+ class AgentToolResourceConfig(AgentBaseResourceConfig):
91
+ """Tool resource with tool-specific properties"""
92
+
93
+ resource_type: Literal[AgentResourceType.TOOL] = Field(alias="$resourceType")
94
+ type: str = Field(..., description="Tool type")
95
+ arguments: Dict[str, Any] = Field(
96
+ default_factory=dict, description="Tool arguments"
97
+ )
98
+ input_schema: Dict[str, Any] = Field(
99
+ ..., alias="inputSchema", description="Input schema for the tool"
100
+ )
101
+ output_schema: Dict[str, Any] = Field(
102
+ ..., alias="outputSchema", description="Output schema for the tool"
103
+ )
104
+ properties: AgentToolProperties = Field(..., description="Tool-specific properties")
105
+ settings: AgentToolSettings = Field(
106
+ default_factory=AgentToolSettings, description="Tool settings"
107
+ )
108
+
109
+ model_config = ConfigDict(
110
+ validate_by_name=True, validate_by_alias=True, extra="allow"
111
+ )
112
+
113
+
114
+ class AgentContextSettings(BaseModel):
115
+ """Settings for context configuration"""
116
+
117
+ result_count: int = Field(alias="resultCount")
118
+ retrieval_mode: Literal["Semantic", "Structured"] = Field(alias="retrievalMode")
119
+ threshold: float = Field(default=0)
120
+
121
+ model_config = ConfigDict(
122
+ validate_by_name=True, validate_by_alias=True, extra="allow"
123
+ )
124
+
125
+
126
+ class AgentContextResourceConfig(AgentBaseResourceConfig):
127
+ """Context resource with context-specific properties"""
128
+
129
+ resource_type: Literal[AgentResourceType.CONTEXT] = Field(alias="$resourceType")
130
+ folder_path: str = Field(alias="folderPath")
131
+ index_name: str = Field(alias="indexName")
132
+ settings: AgentContextSettings = Field(..., description="Context settings")
133
+
134
+ model_config = ConfigDict(
135
+ validate_by_name=True, validate_by_alias=True, extra="allow"
136
+ )
137
+
138
+
139
+ class AgentEscalationResourceConfig(AgentBaseResourceConfig):
140
+ """Escalation resource with escalation-specific properties"""
141
+
142
+ resource_type: Literal[AgentResourceType.ESCALATION] = Field(alias="$resourceType")
143
+
144
+ model_config = ConfigDict(
145
+ validate_by_name=True, validate_by_alias=True, extra="allow"
146
+ )
147
+
148
+
149
+ # Discriminated union for known types
150
+ KnownAgentResourceConfig = Annotated[
151
+ Union[
152
+ AgentToolResourceConfig,
153
+ AgentContextResourceConfig,
154
+ AgentEscalationResourceConfig,
155
+ ],
156
+ Field(discriminator="resource_type"),
157
+ ]
158
+
159
+ # Final union includes unknowns as a catch-all
160
+ AgentResourceConfig = Union[
161
+ KnownAgentResourceConfig,
162
+ AgentUnknownResourceConfig,
163
+ ]
164
+
165
+
166
+ class AgentConfig(BaseModel):
167
+ """Main agent model"""
168
+
169
+ id: str = Field(..., description="Agent id or project name")
170
+ name: str = Field(..., description="Agent name or project name")
171
+ input_schema: Dict[str, Any] = Field(
172
+ ..., alias="inputSchema", description="JSON schema for input arguments"
173
+ )
174
+ output_schema: Dict[str, Any] = Field(
175
+ ..., alias="outputSchema", description="JSON schema for output arguments"
176
+ )
177
+ messages: List[AgentMessage] = Field(
178
+ ..., description="List of system and user messages"
179
+ )
180
+ features: List[Any] = Field(
181
+ default_factory=list, description="Currently empty feature list"
182
+ )
183
+ version: str = Field("1.0.0", description="Agent version")
184
+ settings: AgentSettings = Field(..., description="Agent settings configuration")
185
+ resources: List[AgentResourceConfig] = Field(
186
+ ..., description="List of tools, context, and escalation resources"
187
+ )
188
+
189
+ model_config = ConfigDict(
190
+ validate_by_name=True, validate_by_alias=True, extra="allow"
191
+ )
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: uipath-langchain
3
- Version: 0.0.124
3
+ Version: 0.0.126
4
4
  Summary: UiPath Langchain
5
5
  Project-URL: Homepage, https://uipath.com
6
6
  Project-URL: Repository, https://github.com/UiPath/uipath-langchain-python
@@ -25,7 +25,7 @@ Requires-Dist: openai>=1.65.5
25
25
  Requires-Dist: openinference-instrumentation-langchain>=0.1.50
26
26
  Requires-Dist: pydantic-settings>=2.6.0
27
27
  Requires-Dist: python-dotenv>=1.0.1
28
- Requires-Dist: uipath<2.2.0,>=2.1.30
28
+ Requires-Dist: uipath<2.2.0,>=2.1.38
29
29
  Provides-Extra: langchain
30
30
  Description-Content-Type: text/markdown
31
31
 
@@ -4,19 +4,21 @@ uipath_langchain/_cli/__init__.py,sha256=juqd9PbXs4yg45zMJ7BHAOPQjb7sgEbWE9InBtG
4
4
  uipath_langchain/_cli/cli_dev.py,sha256=VlI8qgCw-63I97tp_9lbXs-CVcNSjpd2sC13YNZAIuU,1401
5
5
  uipath_langchain/_cli/cli_init.py,sha256=xhxJ8tuMSrVUNHvltgyPpOrvgMA-wq9shHeYYwvHILs,8199
6
6
  uipath_langchain/_cli/cli_new.py,sha256=dL8-Rri6u67ZZdbb4nT38A5xD_Q3fVnG0UK9VSeKaqg,2563
7
- uipath_langchain/_cli/cli_run.py,sha256=Jr-nxzBaSPQoVrdV1nQBSDVFElFV3N0a0uNDDrdSc8I,3375
7
+ uipath_langchain/_cli/cli_run.py,sha256=R-cUi3lO3Qd4ysTXD7PW4sa1RsB427v_Y6xUQxWijfQ,3725
8
8
  uipath_langchain/_cli/_runtime/_context.py,sha256=yyzYJDmk2fkH4T5gm4cLGRyXtjLESrpzHBT9euqluTA,817
9
+ uipath_langchain/_cli/_runtime/_conversation.py,sha256=qobBjb3PEDRQVhrcy8vUXXuzBneELqwphDAU7xEUOqA,9683
9
10
  uipath_langchain/_cli/_runtime/_exception.py,sha256=USKkLYkG-dzjX3fEiMMOHnVUpiXJs_xF0OQXCCOvbYM,546
10
11
  uipath_langchain/_cli/_runtime/_input.py,sha256=vZ8vfVxvPSaPWmIPghvNx1VRKzbalHsKUMBPiKDvJWM,5492
11
12
  uipath_langchain/_cli/_runtime/_output.py,sha256=yJOZPWv2FRUJWv1NRs9JmpB4QMTDXu8jrxoaKrfJvzw,9078
12
- uipath_langchain/_cli/_runtime/_runtime.py,sha256=E4aEX4eTjK2XymdKYD1fXNz668S4U2CwEmfL7HSNxjo,14692
13
+ uipath_langchain/_cli/_runtime/_runtime.py,sha256=9X_8YEny238V1sTb4cjkpd6J69DYQWo6eYVH9kA9gEQ,15383
13
14
  uipath_langchain/_cli/_templates/langgraph.json.template,sha256=eeh391Gta_hoRgaNaZ58nW1LNvCVXA7hlAH6l7Veous,107
14
15
  uipath_langchain/_cli/_templates/main.py.template,sha256=nMJQIYPlRk90iANfNVpkJ2EQX20Dxsyq92-BucEz_UM,1189
15
16
  uipath_langchain/_cli/_utils/_graph.py,sha256=JPShHNl0UQvl4AdjLIqLpNt_JAjpWH9WWF22Gs47Xew,7445
16
17
  uipath_langchain/_utils/__init__.py,sha256=WoY66enCygRXTh6v5B1UrRcFCnQYuPJ8oqDkwomXzLc,194
17
- uipath_langchain/_utils/_request_mixin.py,sha256=Tr57358_dwb7SQ1OU75XjdFVWEIr05-QJifeJfHHZgc,19680
18
+ uipath_langchain/_utils/_request_mixin.py,sha256=ddKFs_0mjoFCmvPTiOTPJh1IIqYUo5CUka-B7zAZphE,19695
18
19
  uipath_langchain/_utils/_settings.py,sha256=2fExMQJ88YptfldmzMfZIpsx-m1gfMkeYGf5t6KIe0A,3084
19
20
  uipath_langchain/_utils/_sleep_policy.py,sha256=e9pHdjmcCj4CVoFM1jMyZFelH11YatsgWfpyrfXzKBQ,1251
21
+ uipath_langchain/builder/agent_config.py,sha256=b9WODKPjvufj41Ow_dQn5CnaTAjAZyQoNhuAl8vfiso,5809
20
22
  uipath_langchain/chat/__init__.py,sha256=WDcvy91ixvZ3Mq7Ae94g5CjyQwXovDBnEv1NlD5SXBE,116
21
23
  uipath_langchain/chat/models.py,sha256=m5PRAFXzUamt6-1K9uSlWUvZg_NfVyYHkgoQDJ-1rGs,10527
22
24
  uipath_langchain/embeddings/__init__.py,sha256=QICtYB58ZyqFfDQrEaO8lTEgAU5NuEKlR7iIrS0OBtc,156
@@ -30,8 +32,8 @@ uipath_langchain/tracers/_instrument_traceable.py,sha256=0e841zVzcPWjOGtmBx0GeHb
30
32
  uipath_langchain/tracers/_utils.py,sha256=JOT1tKMdvqjMDtj2WbmbOWMeMlTXBWavxWpogX7KlRA,1543
31
33
  uipath_langchain/vectorstores/__init__.py,sha256=w8qs1P548ud1aIcVA_QhBgf_jZDrRMK5Lono78yA8cs,114
32
34
  uipath_langchain/vectorstores/context_grounding_vectorstore.py,sha256=TncIXG-YsUlO0R5ZYzWsM-Dj1SVCZbzmo2LraVxXelc,9559
33
- uipath_langchain-0.0.124.dist-info/METADATA,sha256=zIj8p28Nc1KOTVJ7wjJNCXbsyMAmbx995lA8GdAoeUs,4235
34
- uipath_langchain-0.0.124.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
35
- uipath_langchain-0.0.124.dist-info/entry_points.txt,sha256=FUtzqGOEntlJKMJIXhQUfT7ZTbQmGhke1iCmDWZaQZI,81
36
- uipath_langchain-0.0.124.dist-info/licenses/LICENSE,sha256=JDpt-uotAkHFmxpwxi6gwx6HQ25e-lG4U_Gzcvgp7JY,1063
37
- uipath_langchain-0.0.124.dist-info/RECORD,,
35
+ uipath_langchain-0.0.126.dist-info/METADATA,sha256=rzCwILV0ipPfMnQks58Uh3J2KVNeb9F7FmymzJG0plo,4235
36
+ uipath_langchain-0.0.126.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
37
+ uipath_langchain-0.0.126.dist-info/entry_points.txt,sha256=FUtzqGOEntlJKMJIXhQUfT7ZTbQmGhke1iCmDWZaQZI,81
38
+ uipath_langchain-0.0.126.dist-info/licenses/LICENSE,sha256=JDpt-uotAkHFmxpwxi6gwx6HQ25e-lG4U_Gzcvgp7JY,1063
39
+ uipath_langchain-0.0.126.dist-info/RECORD,,