uipath-langchain 0.1.28__py3-none-any.whl → 0.3.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. uipath_langchain/_cli/_templates/langgraph.json.template +2 -4
  2. uipath_langchain/_cli/cli_new.py +1 -2
  3. uipath_langchain/_utils/_request_mixin.py +8 -0
  4. uipath_langchain/_utils/_settings.py +3 -2
  5. uipath_langchain/agent/guardrails/__init__.py +0 -16
  6. uipath_langchain/agent/guardrails/actions/__init__.py +2 -0
  7. uipath_langchain/agent/guardrails/actions/block_action.py +1 -1
  8. uipath_langchain/agent/guardrails/actions/escalate_action.py +265 -138
  9. uipath_langchain/agent/guardrails/actions/filter_action.py +290 -0
  10. uipath_langchain/agent/guardrails/actions/log_action.py +1 -1
  11. uipath_langchain/agent/guardrails/guardrail_nodes.py +193 -42
  12. uipath_langchain/agent/guardrails/guardrails_factory.py +235 -14
  13. uipath_langchain/agent/guardrails/types.py +0 -12
  14. uipath_langchain/agent/guardrails/utils.py +177 -0
  15. uipath_langchain/agent/react/agent.py +24 -9
  16. uipath_langchain/agent/react/constants.py +1 -2
  17. uipath_langchain/agent/react/file_type_handler.py +123 -0
  18. uipath_langchain/agent/{guardrails → react/guardrails}/guardrails_subgraph.py +119 -25
  19. uipath_langchain/agent/react/init_node.py +16 -1
  20. uipath_langchain/agent/react/job_attachments.py +125 -0
  21. uipath_langchain/agent/react/json_utils.py +183 -0
  22. uipath_langchain/agent/react/jsonschema_pydantic_converter.py +76 -0
  23. uipath_langchain/agent/react/llm_node.py +41 -10
  24. uipath_langchain/agent/react/llm_with_files.py +76 -0
  25. uipath_langchain/agent/react/router.py +48 -37
  26. uipath_langchain/agent/react/types.py +19 -1
  27. uipath_langchain/agent/react/utils.py +30 -4
  28. uipath_langchain/agent/tools/__init__.py +7 -1
  29. uipath_langchain/agent/tools/context_tool.py +151 -1
  30. uipath_langchain/agent/tools/escalation_tool.py +46 -15
  31. uipath_langchain/agent/tools/integration_tool.py +20 -16
  32. uipath_langchain/agent/tools/internal_tools/__init__.py +5 -0
  33. uipath_langchain/agent/tools/internal_tools/analyze_files_tool.py +113 -0
  34. uipath_langchain/agent/tools/internal_tools/internal_tool_factory.py +54 -0
  35. uipath_langchain/agent/tools/mcp_tool.py +86 -0
  36. uipath_langchain/agent/tools/process_tool.py +8 -1
  37. uipath_langchain/agent/tools/static_args.py +18 -40
  38. uipath_langchain/agent/tools/tool_factory.py +13 -5
  39. uipath_langchain/agent/tools/tool_node.py +133 -4
  40. uipath_langchain/agent/tools/utils.py +31 -0
  41. uipath_langchain/agent/wrappers/__init__.py +6 -0
  42. uipath_langchain/agent/wrappers/job_attachment_wrapper.py +62 -0
  43. uipath_langchain/agent/wrappers/static_args_wrapper.py +34 -0
  44. uipath_langchain/chat/__init__.py +4 -0
  45. uipath_langchain/chat/bedrock.py +16 -0
  46. uipath_langchain/chat/mapper.py +60 -42
  47. uipath_langchain/chat/openai.py +56 -26
  48. uipath_langchain/chat/supported_models.py +9 -0
  49. uipath_langchain/chat/vertex.py +62 -46
  50. uipath_langchain/embeddings/embeddings.py +18 -12
  51. uipath_langchain/runtime/factory.py +10 -5
  52. uipath_langchain/runtime/runtime.py +38 -35
  53. uipath_langchain/runtime/schema.py +72 -16
  54. uipath_langchain/runtime/storage.py +178 -71
  55. {uipath_langchain-0.1.28.dist-info → uipath_langchain-0.3.1.dist-info}/METADATA +7 -4
  56. uipath_langchain-0.3.1.dist-info/RECORD +90 -0
  57. uipath_langchain-0.1.28.dist-info/RECORD +0 -76
  58. {uipath_langchain-0.1.28.dist-info → uipath_langchain-0.3.1.dist-info}/WHEEL +0 -0
  59. {uipath_langchain-0.1.28.dist-info → uipath_langchain-0.3.1.dist-info}/entry_points.txt +0 -0
  60. {uipath_langchain-0.1.28.dist-info → uipath_langchain-0.3.1.dist-info}/licenses/LICENSE +0 -0
@@ -1,24 +1,31 @@
1
1
  from functools import partial
2
- from typing import Any, Callable, Sequence
2
+ from typing import Any, Callable, Mapping, Sequence
3
3
 
4
+ from langgraph._internal._runnable import RunnableCallable
4
5
  from langgraph.constants import END, START
5
6
  from langgraph.graph import StateGraph
6
- from langgraph.prebuilt import ToolNode
7
+ from uipath.core.guardrails import DeterministicGuardrail
7
8
  from uipath.platform.guardrails import (
8
9
  BaseGuardrail,
9
10
  BuiltInValidatorGuardrail,
10
11
  GuardrailScope,
11
12
  )
12
13
 
13
- from uipath_langchain.agent.guardrails.types import ExecutionStage
14
-
15
- from .actions.base_action import GuardrailAction, GuardrailActionNode
16
- from .guardrail_nodes import (
17
- create_agent_guardrail_node,
14
+ from uipath_langchain.agent.guardrails.actions.base_action import (
15
+ GuardrailAction,
16
+ GuardrailActionNode,
17
+ )
18
+ from uipath_langchain.agent.guardrails.guardrail_nodes import (
19
+ create_agent_init_guardrail_node,
20
+ create_agent_terminate_guardrail_node,
18
21
  create_llm_guardrail_node,
19
22
  create_tool_guardrail_node,
20
23
  )
21
- from .types import AgentGuardrailsGraphState
24
+ from uipath_langchain.agent.guardrails.types import ExecutionStage
25
+ from uipath_langchain.agent.react.types import (
26
+ AgentGraphState,
27
+ AgentGuardrailsGraphState,
28
+ )
22
29
 
23
30
  _VALIDATOR_ALLOWED_STAGES = {
24
31
  "prompt_injection": {ExecutionStage.PRE_EXECUTION},
@@ -110,7 +117,7 @@ def _create_guardrails_subgraph(
110
117
  ExecutionStage.POST_EXECUTION,
111
118
  node_factory,
112
119
  END,
113
- inner_node,
120
+ inner_name,
114
121
  )
115
122
  subgraph.add_edge(inner_name, first_post_exec_guardrail_node)
116
123
  else:
@@ -197,10 +204,21 @@ def create_llm_guardrails_subgraph(
197
204
  llm_node: tuple[str, Any],
198
205
  guardrails: Sequence[tuple[BaseGuardrail, GuardrailAction]] | None,
199
206
  ):
207
+ """Create a guarded LLM node.
208
+
209
+ Args:
210
+ llm_node: Tuple of (node_name, node_callable) for the LLM node.
211
+ guardrails: Optional sequence of (guardrail, action) tuples.
212
+
213
+ Returns:
214
+ Either the original node callable (if no applicable guardrails) or a compiled
215
+ LangGraph subgraph that enforces the configured guardrails.
216
+ """
200
217
  applicable_guardrails = [
201
218
  (guardrail, _)
202
219
  for (guardrail, _) in (guardrails or [])
203
220
  if GuardrailScope.LLM in guardrail.selector.scopes
221
+ and not isinstance(guardrail, DeterministicGuardrail)
204
222
  ]
205
223
  if applicable_guardrails is None or len(applicable_guardrails) == 0:
206
224
  return llm_node[1]
@@ -215,13 +233,19 @@ def create_llm_guardrails_subgraph(
215
233
 
216
234
 
217
235
  def create_tools_guardrails_subgraph(
218
- tool_nodes: dict[str, ToolNode],
236
+ tool_nodes: Mapping[str, RunnableCallable],
219
237
  guardrails: Sequence[tuple[BaseGuardrail, GuardrailAction]] | None,
220
- ) -> dict[str, ToolNode]:
221
- """Create tool nodes with guardrails.
238
+ ) -> dict[str, RunnableCallable]:
239
+ """Create tool nodes with guardrails applied.
222
240
  Args:
241
+ tool_nodes: Mapping of tool name to a LangGraph `ToolNode`.
242
+ guardrails: Optional sequence of (guardrail, action) tuples.
243
+
244
+ Returns:
245
+ A mapping of tool name to either the original `ToolNode` or a compiled subgraph
246
+ that enforces the matching tool guardrails.
223
247
  """
224
- result: dict[str, ToolNode] = {}
248
+ result: dict[str, RunnableCallable] = {}
225
249
  for tool_name, tool_node in tool_nodes.items():
226
250
  subgraph = create_tool_guardrails_subgraph(
227
251
  (tool_name, tool_node),
@@ -232,37 +256,107 @@ def create_tools_guardrails_subgraph(
232
256
  return result
233
257
 
234
258
 
235
- def create_agent_guardrails_subgraph(
236
- agent_node: tuple[str, Any],
259
+ def create_agent_init_guardrails_subgraph(
260
+ init_node: tuple[str, Any],
237
261
  guardrails: Sequence[tuple[BaseGuardrail, GuardrailAction]] | None,
238
- execution_stage: ExecutionStage,
239
- ):
240
- """Create a subgraph for AGENT-scoped guardrails that applies checks at the specified stage.
262
+ ) -> Any:
263
+ """Create a subgraph for the INIT node and apply AGENT guardrails after INIT.
264
+
265
+ This subgraph intentionally **runs the INIT node first** (so it can seed/normalize
266
+ the agent state), and then evaluates guardrails as **PRE_EXECUTION**. This lets
267
+ guardrails intended to run "before agent execution" validate the post-init state.
268
+
269
+ Args:
270
+ init_node: Tuple of (node_name, node_callable) for the INIT node.
271
+ guardrails: Optional sequence of (guardrail, action) tuples.
241
272
 
242
- This is intended for wrapping nodes like INIT or TERMINATE, where guardrails should run
243
- either before (pre-execution) or after (post-execution) the node logic.
273
+ Returns:
274
+ Either the original node callable (if no applicable guardrails) or a compiled
275
+ LangGraph subgraph that runs INIT then enforces PRE_EXECUTION AGENT guardrails.
244
276
  """
245
277
  applicable_guardrails = [
246
278
  (guardrail, _)
247
279
  for (guardrail, _) in (guardrails or [])
248
280
  if GuardrailScope.AGENT in guardrail.selector.scopes
281
+ and not isinstance(guardrail, DeterministicGuardrail)
249
282
  ]
283
+ applicable_guardrails = _filter_guardrails_by_stage(
284
+ applicable_guardrails, ExecutionStage.PRE_EXECUTION
285
+ )
250
286
  if applicable_guardrails is None or len(applicable_guardrails) == 0:
251
- return agent_node[1]
287
+ return init_node[1]
252
288
 
253
- return _create_guardrails_subgraph(
254
- main_inner_node=agent_node,
289
+ inner_name, inner_node = init_node
290
+ subgraph = StateGraph(AgentGuardrailsGraphState)
291
+ subgraph.add_node(inner_name, inner_node)
292
+ subgraph.add_edge(START, inner_name)
293
+
294
+ first_guardrail_node = _build_guardrail_node_chain(
295
+ subgraph=subgraph,
255
296
  guardrails=applicable_guardrails,
256
297
  scope=GuardrailScope.AGENT,
257
- execution_stages=[execution_stage],
258
- node_factory=create_agent_guardrail_node,
298
+ execution_stage=ExecutionStage.PRE_EXECUTION,
299
+ node_factory=create_agent_init_guardrail_node,
300
+ next_node=END,
301
+ guarded_node_name=inner_name,
259
302
  )
303
+ subgraph.add_edge(inner_name, first_guardrail_node)
304
+ return subgraph.compile()
305
+
306
+
307
+ def create_agent_terminate_guardrails_subgraph(
308
+ terminate_node: tuple[str, Any],
309
+ guardrails: Sequence[tuple[BaseGuardrail, GuardrailAction]] | None,
310
+ ):
311
+ """Create a subgraph for TERMINATE node that applies guardrails on the agent result."""
312
+ node_name, node_func = terminate_node
313
+
314
+ def terminate_wrapper(state: Any) -> dict[str, Any]:
315
+ # Call original terminate node
316
+ result = node_func(state)
317
+ # Store result in state
318
+ return {"agent_result": result, "messages": state.messages}
319
+
320
+ applicable_guardrails = [
321
+ (guardrail, _)
322
+ for (guardrail, _) in (guardrails or [])
323
+ if GuardrailScope.AGENT in guardrail.selector.scopes
324
+ and not isinstance(guardrail, DeterministicGuardrail)
325
+ ]
326
+ if applicable_guardrails is None or len(applicable_guardrails) == 0:
327
+ return terminate_node[1]
328
+
329
+ subgraph = _create_guardrails_subgraph(
330
+ main_inner_node=(node_name, terminate_wrapper),
331
+ guardrails=applicable_guardrails,
332
+ scope=GuardrailScope.AGENT,
333
+ execution_stages=[ExecutionStage.POST_EXECUTION],
334
+ node_factory=create_agent_terminate_guardrail_node,
335
+ )
336
+
337
+ async def run_terminate_subgraph(
338
+ state: AgentGraphState,
339
+ ) -> dict[str, Any]:
340
+ result_state = await subgraph.ainvoke(state)
341
+ return result_state["agent_result"]
342
+
343
+ return run_terminate_subgraph
260
344
 
261
345
 
262
346
  def create_tool_guardrails_subgraph(
263
347
  tool_node: tuple[str, Any],
264
348
  guardrails: Sequence[tuple[BaseGuardrail, GuardrailAction]] | None,
265
349
  ):
350
+ """Create a guarded tool node.
351
+
352
+ Args:
353
+ tool_node: Tuple of (tool_name, tool_node_callable).
354
+ guardrails: Optional sequence of (guardrail, action) tuples.
355
+
356
+ Returns:
357
+ Either the original tool node callable (if no matching guardrails) or a compiled
358
+ LangGraph subgraph that enforces the matching tool guardrails.
359
+ """
266
360
  tool_name, _ = tool_node
267
361
  applicable_guardrails = [
268
362
  (guardrail, action)
@@ -3,11 +3,17 @@
3
3
  from typing import Any, Callable, Sequence
4
4
 
5
5
  from langchain_core.messages import HumanMessage, SystemMessage
6
+ from pydantic import BaseModel
7
+
8
+ from .job_attachments import (
9
+ get_job_attachments,
10
+ )
6
11
 
7
12
 
8
13
  def create_init_node(
9
14
  messages: Sequence[SystemMessage | HumanMessage]
10
15
  | Callable[[Any], Sequence[SystemMessage | HumanMessage]],
16
+ input_schema: type[BaseModel] | None,
11
17
  ):
12
18
  def graph_state_init(state: Any):
13
19
  if callable(messages):
@@ -15,6 +21,15 @@ def create_init_node(
15
21
  else:
16
22
  resolved_messages = messages
17
23
 
18
- return {"messages": list(resolved_messages)}
24
+ schema = input_schema if input_schema is not None else BaseModel
25
+ job_attachments = get_job_attachments(schema, state)
26
+ job_attachments_dict = {
27
+ str(att.id): att for att in job_attachments if att.id is not None
28
+ }
29
+
30
+ return {
31
+ "messages": list(resolved_messages),
32
+ "job_attachments": job_attachments_dict,
33
+ }
19
34
 
20
35
  return graph_state_init
@@ -0,0 +1,125 @@
1
+ """Job attachment utilities for ReAct Agent."""
2
+
3
+ import copy
4
+ import uuid
5
+ from typing import Any
6
+
7
+ from jsonpath_ng import parse # type: ignore[import-untyped]
8
+ from pydantic import BaseModel
9
+ from uipath.platform.attachments import Attachment
10
+
11
+ from .json_utils import extract_values_by_paths, get_json_paths_by_type
12
+
13
+
14
+ def get_job_attachments(
15
+ schema: type[BaseModel],
16
+ data: dict[str, Any] | BaseModel,
17
+ ) -> list[Attachment]:
18
+ """Extract job attachments from data based on schema and convert to Attachment objects.
19
+
20
+ Args:
21
+ schema: The Pydantic model class defining the data structure
22
+ data: The data object (dict or Pydantic model) to extract attachments from
23
+
24
+ Returns:
25
+ List of Attachment objects
26
+ """
27
+ job_attachment_paths = get_job_attachment_paths(schema)
28
+ job_attachments = extract_values_by_paths(data, job_attachment_paths)
29
+
30
+ result = []
31
+ for attachment in job_attachments:
32
+ result.append(Attachment.model_validate(attachment, from_attributes=True))
33
+
34
+ return result
35
+
36
+
37
+ def get_job_attachment_paths(model: type[BaseModel]) -> list[str]:
38
+ """Get JSONPath expressions for all job attachment fields in a Pydantic model.
39
+
40
+ Args:
41
+ model: The Pydantic model class to analyze
42
+
43
+ Returns:
44
+ List of JSONPath expressions pointing to job attachment fields
45
+ """
46
+ return get_json_paths_by_type(model, "Job_attachment")
47
+
48
+
49
+ def replace_job_attachment_ids(
50
+ json_paths: list[str],
51
+ tool_args: dict[str, Any],
52
+ state: dict[str, Attachment],
53
+ errors: list[str],
54
+ ) -> dict[str, Any]:
55
+ """Replace job attachment IDs in tool_args with full attachment objects from state.
56
+
57
+ For each JSON path, this function finds matching objects in tool_args and
58
+ replaces them with corresponding attachment objects from state. The matching
59
+ is done by looking up the object's 'ID' field in the state dictionary.
60
+
61
+ If an ID is not a valid UUID or is not present in state, an error message
62
+ is added to the errors list.
63
+
64
+ Args:
65
+ json_paths: List of JSONPath expressions (e.g., ["$.attachment", "$.attachments[*]"])
66
+ tool_args: The dictionary containing tool arguments to modify
67
+ state: Dictionary mapping attachment UUID strings to Attachment objects
68
+ errors: List to collect error messages for invalid or missing IDs
69
+
70
+ Returns:
71
+ Modified copy of tool_args with attachment IDs replaced by full objects
72
+
73
+ Example:
74
+ >>> state = {
75
+ ... "123e4567-e89b-12d3-a456-426614174000": Attachment(id="123e4567-e89b-12d3-a456-426614174000", name="file1.pdf"),
76
+ ... "223e4567-e89b-12d3-a456-426614174001": Attachment(id="223e4567-e89b-12d3-a456-426614174001", name="file2.pdf")
77
+ ... }
78
+ >>> tool_args = {
79
+ ... "attachment": {"ID": "123"},
80
+ ... "other_field": "value"
81
+ ... }
82
+ >>> paths = ['$.attachment']
83
+ >>> errors = []
84
+ >>> replace_job_attachment_ids(paths, tool_args, state, errors)
85
+ {'attachment': {'ID': '123', 'name': 'file1.pdf', ...}, 'other_field': 'value'}
86
+ """
87
+ result = copy.deepcopy(tool_args)
88
+
89
+ for json_path in json_paths:
90
+ expr = parse(json_path)
91
+ matches = expr.find(result)
92
+
93
+ for match in matches:
94
+ current_value = match.value
95
+
96
+ if isinstance(current_value, dict) and "ID" in current_value:
97
+ attachment_id_str = str(current_value["ID"])
98
+
99
+ try:
100
+ uuid.UUID(attachment_id_str)
101
+ except (ValueError, AttributeError):
102
+ errors.append(
103
+ _create_job_attachment_error_message(attachment_id_str)
104
+ )
105
+ continue
106
+
107
+ if attachment_id_str in state:
108
+ replacement_value = state[attachment_id_str]
109
+ match.full_path.update(
110
+ result, replacement_value.model_dump(by_alias=True, mode="json")
111
+ )
112
+ else:
113
+ errors.append(
114
+ _create_job_attachment_error_message(attachment_id_str)
115
+ )
116
+
117
+ return result
118
+
119
+
120
+ def _create_job_attachment_error_message(attachment_id_str: str) -> str:
121
+ return (
122
+ f"Could not find JobAttachment with ID='{attachment_id_str}'. "
123
+ f"Try invoking the tool again and please make sure that you pass "
124
+ f"valid JobAttachment IDs associated with existing JobAttachments in the current context."
125
+ )
@@ -0,0 +1,183 @@
1
+ import sys
2
+ from typing import Any, ForwardRef, Union, get_args, get_origin
3
+
4
+ from jsonpath_ng import parse # type: ignore[import-untyped]
5
+ from pydantic import BaseModel
6
+
7
+
8
+ def get_json_paths_by_type(model: type[BaseModel], type_name: str) -> list[str]:
9
+ """Get JSONPath expressions for all fields that reference a specific type.
10
+
11
+ This function recursively traverses nested Pydantic models to find all paths
12
+ that lead to fields of the specified type.
13
+
14
+ Args:
15
+ model: A Pydantic model class
16
+ type_name: The name of the type to search for (e.g., "Job_attachment")
17
+
18
+ Returns:
19
+ List of JSONPath expressions using standard JSONPath syntax.
20
+ For array fields, uses [*] to indicate all array elements.
21
+
22
+ Example:
23
+ >>> schema = {
24
+ ... "type": "object",
25
+ ... "properties": {
26
+ ... "attachment": {"$ref": "#/definitions/job-attachment"},
27
+ ... "attachments": {
28
+ ... "type": "array",
29
+ ... "items": {"$ref": "#/definitions/job-attachment"}
30
+ ... }
31
+ ... },
32
+ ... "definitions": {
33
+ ... "job-attachment": {"type": "object", "properties": {"id": {"type": "string"}}}
34
+ ... }
35
+ ... }
36
+ >>> model = transform(schema)
37
+ >>> _get_json_paths_by_type(model, "Job_attachment")
38
+ ['$.attachment', '$.attachments[*]']
39
+ """
40
+
41
+ def _recursive_search(
42
+ current_model: type[BaseModel], current_path: str
43
+ ) -> list[str]:
44
+ """Recursively search for fields of the target type."""
45
+ json_paths = []
46
+
47
+ target_type = _get_target_type(current_model, type_name)
48
+ matches_type = _create_type_matcher(type_name, target_type)
49
+
50
+ for field_name, field_info in current_model.model_fields.items():
51
+ annotation = field_info.annotation
52
+
53
+ if current_path:
54
+ field_path = f"{current_path}.{field_name}"
55
+ else:
56
+ field_path = f"$.{field_name}"
57
+
58
+ annotation = _unwrap_optional(annotation)
59
+ origin = get_origin(annotation)
60
+
61
+ if matches_type(annotation):
62
+ json_paths.append(field_path)
63
+ continue
64
+
65
+ if origin is list:
66
+ args = get_args(annotation)
67
+ if args:
68
+ list_item_type = args[0]
69
+ if matches_type(list_item_type):
70
+ json_paths.append(f"{field_path}[*]")
71
+ continue
72
+
73
+ if _is_pydantic_model(list_item_type):
74
+ nested_paths = _recursive_search(
75
+ list_item_type, f"{field_path}[*]"
76
+ )
77
+ json_paths.extend(nested_paths)
78
+ continue
79
+
80
+ if _is_pydantic_model(annotation):
81
+ nested_paths = _recursive_search(annotation, field_path)
82
+ json_paths.extend(nested_paths)
83
+
84
+ return json_paths
85
+
86
+ return _recursive_search(model, "")
87
+
88
+
89
+ def extract_values_by_paths(
90
+ obj: dict[str, Any] | BaseModel, json_paths: list[str]
91
+ ) -> list[Any]:
92
+ """Extract values from an object using JSONPath expressions.
93
+
94
+ Args:
95
+ obj: The object (dict or Pydantic model) to extract values from
96
+ json_paths: List of JSONPath expressions. **Paths are assumed to be disjoint**
97
+ (non-overlapping). If paths overlap, duplicate values will be returned.
98
+
99
+ Returns:
100
+ List of all extracted values (flattened)
101
+
102
+ Example:
103
+ >>> obj = {
104
+ ... "attachment": {"id": "123"},
105
+ ... "attachments": [{"id": "456"}, {"id": "789"}]
106
+ ... }
107
+ >>> paths = ['$.attachment', '$.attachments[*]']
108
+ >>> _extract_values_by_paths(obj, paths)
109
+ [{'id': '123'}, {'id': '456'}, {'id': '789'}]
110
+ """
111
+ data = obj.model_dump() if isinstance(obj, BaseModel) else obj
112
+
113
+ results = []
114
+ for json_path in json_paths:
115
+ expr = parse(json_path)
116
+ matches = expr.find(data)
117
+ results.extend([match.value for match in matches])
118
+
119
+ return results
120
+
121
+
122
+ def _get_target_type(model: type[BaseModel], type_name: str) -> Any:
123
+ """Get the target type from the model's module.
124
+
125
+ Args:
126
+ model: A Pydantic model class
127
+ type_name: The name of the type to search for
128
+
129
+ Returns:
130
+ The target type if found, None otherwise
131
+ """
132
+ model_module = sys.modules.get(model.__module__)
133
+ if model_module and hasattr(model_module, type_name):
134
+ return getattr(model_module, type_name)
135
+ return None
136
+
137
+
138
+ def _create_type_matcher(type_name: str, target_type: Any) -> Any:
139
+ """Create a function that checks if an annotation matches the target type.
140
+
141
+ Args:
142
+ type_name: The name of the type to match
143
+ target_type: The actual type object (can be None)
144
+
145
+ Returns:
146
+ A function that takes an annotation and returns True if it matches
147
+ """
148
+
149
+ def matches_type(annotation: Any) -> bool:
150
+ """Check if an annotation matches the target type name."""
151
+ if isinstance(annotation, ForwardRef):
152
+ return annotation.__forward_arg__ == type_name
153
+ if isinstance(annotation, str):
154
+ return annotation == type_name
155
+ if hasattr(annotation, "__name__") and annotation.__name__ == type_name:
156
+ return True
157
+ if target_type is not None and annotation is target_type:
158
+ return True
159
+ return False
160
+
161
+ return matches_type
162
+
163
+
164
+ def _unwrap_optional(annotation: Any) -> Any:
165
+ """Unwrap Optional/Union types to get the underlying type.
166
+
167
+ Args:
168
+ annotation: The type annotation to unwrap
169
+
170
+ Returns:
171
+ The unwrapped type, or the original if not Optional/Union
172
+ """
173
+ origin = get_origin(annotation)
174
+ if origin is Union:
175
+ args = get_args(annotation)
176
+ non_none_args = [arg for arg in args if arg is not type(None)]
177
+ if non_none_args:
178
+ return non_none_args[0]
179
+ return annotation
180
+
181
+
182
+ def _is_pydantic_model(annotation: Any) -> bool:
183
+ return isinstance(annotation, type) and issubclass(annotation, BaseModel)
@@ -0,0 +1,76 @@
1
+ import inspect
2
+ import sys
3
+ from types import ModuleType
4
+ from typing import Any, Type, get_args, get_origin
5
+
6
+ from jsonschema_pydantic_converter import transform_with_modules
7
+ from pydantic import BaseModel
8
+
9
+ # Shared pseudo-module for all dynamically created types
10
+ # This allows get_type_hints() to resolve forward references
11
+ _DYNAMIC_MODULE_NAME = "jsonschema_pydantic_converter._dynamic"
12
+
13
+
14
+ def _get_or_create_dynamic_module() -> ModuleType:
15
+ """Get or create the shared pseudo-module for dynamic types."""
16
+ if _DYNAMIC_MODULE_NAME not in sys.modules:
17
+ pseudo_module = ModuleType(_DYNAMIC_MODULE_NAME)
18
+ pseudo_module.__doc__ = (
19
+ "Shared module for dynamically generated Pydantic models from JSON schemas"
20
+ )
21
+ sys.modules[_DYNAMIC_MODULE_NAME] = pseudo_module
22
+ return sys.modules[_DYNAMIC_MODULE_NAME]
23
+
24
+
25
+ def create_model(
26
+ schema: dict[str, Any],
27
+ ) -> Type[BaseModel]:
28
+ model, namespace = transform_with_modules(schema)
29
+ corrected_namespace: dict[str, Any] = {}
30
+
31
+ def collect_types(annotation: Any) -> None:
32
+ """Recursively collect all BaseModel types from an annotation."""
33
+ # Unwrap generic types like List, Optional, etc.
34
+ origin = get_origin(annotation)
35
+ if origin is not None:
36
+ for arg in get_args(annotation):
37
+ collect_types(arg)
38
+
39
+ elif inspect.isclass(annotation) and issubclass(annotation, BaseModel):
40
+ # Find the original name for this type from the namespace
41
+ for type_name, type_def in namespace.items():
42
+ # Match by class name since rebuild may create new instances
43
+ if (
44
+ hasattr(annotation, "__name__")
45
+ and hasattr(type_def, "__name__")
46
+ and annotation.__name__ == type_def.__name__
47
+ ):
48
+ # Store the actual annotation type, not the old namespace one
49
+ annotation.__name__ = type_name
50
+ corrected_namespace[type_name] = annotation
51
+ break
52
+
53
+ # Collect all types from field annotations
54
+ for field_info in model.model_fields.values():
55
+ collect_types(field_info.annotation)
56
+
57
+ # Get the shared pseudo-module and populate it with this schema's types
58
+ # This ensures that forward references can be resolved by get_type_hints()
59
+ # when the model is used with external libraries (e.g., LangGraph)
60
+ pseudo_module = _get_or_create_dynamic_module()
61
+
62
+ # Populate the pseudo-module with all types from the namespace
63
+ # Use the original names so forward references resolve correctly
64
+ for type_name, type_def in corrected_namespace.items():
65
+ setattr(pseudo_module, type_name, type_def)
66
+
67
+ setattr(pseudo_module, model.__name__, model)
68
+
69
+ # Update the model's __module__ to point to the shared pseudo-module
70
+ model.__module__ = _DYNAMIC_MODULE_NAME
71
+
72
+ # Update the __module__ of all generated types in the namespace
73
+ for type_def in corrected_namespace.values():
74
+ if inspect.isclass(type_def) and issubclass(type_def, BaseModel):
75
+ type_def.__module__ = _DYNAMIC_MODULE_NAME
76
+ return model