lmnr 0.2.4__tar.gz → 0.2.6__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. {lmnr-0.2.4 → lmnr-0.2.6}/PKG-INFO +6 -13
  2. {lmnr-0.2.4 → lmnr-0.2.6}/README.md +5 -12
  3. {lmnr-0.2.4 → lmnr-0.2.6}/pyproject.toml +2 -3
  4. lmnr-0.2.6/src/lmnr/cli/parser/nodes/input.py +26 -0
  5. lmnr-0.2.6/src/lmnr/cli/parser/nodes/llm.py +51 -0
  6. lmnr-0.2.6/src/lmnr/cli/parser/nodes/output.py +27 -0
  7. lmnr-0.2.6/src/lmnr/cli/parser/nodes/semantic_search.py +81 -0
  8. lmnr-0.2.6/src/lmnr/cli/parser/nodes/types.py +91 -0
  9. {lmnr-0.2.4 → lmnr-0.2.6}/src/lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/engine/engine.py +2 -1
  10. {lmnr-0.2.4 → lmnr-0.2.6}/src/lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/pipelines/{{cookiecutter.pipeline_dir_name}}/nodes/functions.py +36 -0
  11. {lmnr-0.2.4 → lmnr-0.2.6}/src/lmnr/sdk/endpoint.py +31 -11
  12. lmnr-0.2.6/src/lmnr/sdk/remote_debugger.py +139 -0
  13. {lmnr-0.2.4 → lmnr-0.2.6}/src/lmnr/types.py +38 -11
  14. lmnr-0.2.4/src/lmnr/cli/parser/nodes/types.py +0 -157
  15. lmnr-0.2.4/src/lmnr/sdk/remote_debugger.py +0 -87
  16. {lmnr-0.2.4 → lmnr-0.2.6}/LICENSE +0 -0
  17. {lmnr-0.2.4 → lmnr-0.2.6}/src/lmnr/__init__.py +0 -0
  18. {lmnr-0.2.4 → lmnr-0.2.6}/src/lmnr/cli/__init__.py +0 -0
  19. {lmnr-0.2.4 → lmnr-0.2.6}/src/lmnr/cli/__main__.py +0 -0
  20. {lmnr-0.2.4 → lmnr-0.2.6}/src/lmnr/cli/cli.py +0 -0
  21. {lmnr-0.2.4 → lmnr-0.2.6}/src/lmnr/cli/cookiecutter.json +0 -0
  22. {lmnr-0.2.4 → lmnr-0.2.6}/src/lmnr/cli/parser/__init__.py +0 -0
  23. {lmnr-0.2.4 → lmnr-0.2.6}/src/lmnr/cli/parser/nodes/__init__.py +0 -0
  24. {lmnr-0.2.4 → lmnr-0.2.6}/src/lmnr/cli/parser/parser.py +0 -0
  25. {lmnr-0.2.4 → lmnr-0.2.6}/src/lmnr/cli/parser/utils.py +0 -0
  26. {lmnr-0.2.4 → lmnr-0.2.6}/src/lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/__init__.py +0 -0
  27. {lmnr-0.2.4 → lmnr-0.2.6}/src/lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/engine/__init__.py +0 -0
  28. {lmnr-0.2.4 → lmnr-0.2.6}/src/lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/engine/action.py +0 -0
  29. {lmnr-0.2.4 → lmnr-0.2.6}/src/lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/engine/state.py +0 -0
  30. {lmnr-0.2.4 → lmnr-0.2.6}/src/lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/engine/task.py +0 -0
  31. {lmnr-0.2.4 → lmnr-0.2.6}/src/lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/pipelines/{{cookiecutter.pipeline_dir_name}}/__init__.py +0 -0
  32. {lmnr-0.2.4 → lmnr-0.2.6}/src/lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/pipelines/{{cookiecutter.pipeline_dir_name}}/{{cookiecutter.pipeline_dir_name}}.py +0 -0
  33. {lmnr-0.2.4 → lmnr-0.2.6}/src/lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/types.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lmnr
3
- Version: 0.2.4
3
+ Version: 0.2.6
4
4
  Summary: Python SDK for Laminar AI
5
5
  License: Apache-2.0
6
6
  Author: lmnr.ai
@@ -17,11 +17,12 @@ Requires-Dist: cookiecutter (>=2.6.0,<3.0.0)
17
17
  Requires-Dist: pydantic (>=2.7.4,<3.0.0)
18
18
  Requires-Dist: python-dotenv (>=1.0.1,<2.0.0)
19
19
  Requires-Dist: requests (>=2.32.3,<3.0.0)
20
- Requires-Dist: urllib3 (==1.26.6)
21
20
  Requires-Dist: websockets (>=12.0,<13.0)
22
21
  Description-Content-Type: text/markdown
23
22
 
24
- # Python SDK for Laminar AI
23
+ # Laminar AI
24
+
25
+ This reipo provides core for code generation, Laminar CLI, and Laminar SDK.
25
26
 
26
27
  ## Quickstart
27
28
  ```sh
@@ -121,9 +122,8 @@ Example:
121
122
  from lmnr import LaminarRemoteDebugger, NodeInput
122
123
 
123
124
  # adding **kwargs is safer, in case an LLM produces more arguments than needed
124
- def my_tool(arg1: string, arg2: string, **kwargs) -> NodeInput {
125
+ def my_tool(arg1: string, arg2: string, **kwargs) -> NodeInput:
125
126
  return f'{arg1}&{arg2}'
126
- }
127
127
 
128
128
  debugger = LaminarRemoteDebugger('<YOUR_PROJECT_API_KEY>', [my_tool])
129
129
  session_id = debugger.start() # the session id will also be printed to console
@@ -140,14 +140,6 @@ Set up `DEBUGGER_SESSION_ID` environment variable in your pipeline.
140
140
 
141
141
  You can run as many sessions as you need, experimenting with your flows.
142
142
 
143
- #### 5. Stop the debugger
144
-
145
- In order to stop the session, do
146
-
147
- ```python
148
- debugger.stop()
149
- ```
150
-
151
143
  ## CLI for code generation
152
144
 
153
145
  ### Basic usage
@@ -184,3 +176,4 @@ print(f"RESULT:\n{res}")
184
176
  ## PROJECT_API_KEY
185
177
 
186
178
  Read more [here](https://docs.lmnr.ai/api-reference/introduction#authentication) on how to get `PROJECT_API_KEY`.
179
+
@@ -1,4 +1,6 @@
1
- # Python SDK for Laminar AI
1
+ # Laminar AI
2
+
3
+ This reipo provides core for code generation, Laminar CLI, and Laminar SDK.
2
4
 
3
5
  ## Quickstart
4
6
  ```sh
@@ -98,9 +100,8 @@ Example:
98
100
  from lmnr import LaminarRemoteDebugger, NodeInput
99
101
 
100
102
  # adding **kwargs is safer, in case an LLM produces more arguments than needed
101
- def my_tool(arg1: string, arg2: string, **kwargs) -> NodeInput {
103
+ def my_tool(arg1: string, arg2: string, **kwargs) -> NodeInput:
102
104
  return f'{arg1}&{arg2}'
103
- }
104
105
 
105
106
  debugger = LaminarRemoteDebugger('<YOUR_PROJECT_API_KEY>', [my_tool])
106
107
  session_id = debugger.start() # the session id will also be printed to console
@@ -117,14 +118,6 @@ Set up `DEBUGGER_SESSION_ID` environment variable in your pipeline.
117
118
 
118
119
  You can run as many sessions as you need, experimenting with your flows.
119
120
 
120
- #### 5. Stop the debugger
121
-
122
- In order to stop the session, do
123
-
124
- ```python
125
- debugger.stop()
126
- ```
127
-
128
121
  ## CLI for code generation
129
122
 
130
123
  ### Basic usage
@@ -160,4 +153,4 @@ print(f"RESULT:\n{res}")
160
153
 
161
154
  ## PROJECT_API_KEY
162
155
 
163
- Read more [here](https://docs.lmnr.ai/api-reference/introduction#authentication) on how to get `PROJECT_API_KEY`.
156
+ Read more [here](https://docs.lmnr.ai/api-reference/introduction#authentication) on how to get `PROJECT_API_KEY`.
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "lmnr"
3
- version = "0.2.4"
3
+ version = "0.2.5"
4
4
  description = "Python SDK for Laminar AI"
5
5
  authors = [
6
6
  { name = "lmnr.ai", email = "founders@lmnr.ai" }
@@ -11,7 +11,7 @@ license = "Apache-2.0"
11
11
 
12
12
  [tool.poetry]
13
13
  name = "lmnr"
14
- version = "0.2.4"
14
+ version = "0.2.6"
15
15
  description = "Python SDK for Laminar AI"
16
16
  authors = ["lmnr.ai"]
17
17
  readme = "README.md"
@@ -23,7 +23,6 @@ black = "^24.4.2"
23
23
  pydantic = "^2.7.4"
24
24
  click = "^8.1.7"
25
25
  requests = "^2.32.3"
26
- urllib3 = "1.26.6"
27
26
  websockets = "^12.0"
28
27
  cookiecutter = "^2.6.0"
29
28
  python-dotenv = "^1.0.1"
@@ -0,0 +1,26 @@
1
+ from dataclasses import dataclass
2
+ from typing import Optional
3
+ import uuid
4
+
5
+ from lmnr.cli.parser.nodes import Handle, HandleType, NodeFunctions
6
+ from lmnr.types import NodeInput
7
+
8
+
9
+ @dataclass
10
+ class InputNode(NodeFunctions):
11
+ id: uuid.UUID
12
+ name: str
13
+ outputs: list[Handle]
14
+ input: Optional[NodeInput]
15
+ input_type: HandleType
16
+
17
+ def handles_mapping(
18
+ self, output_handle_id_to_node_name: dict[str, str]
19
+ ) -> list[tuple[str, str]]:
20
+ return []
21
+
22
+ def node_type(self) -> str:
23
+ return "Input"
24
+
25
+ def config(self) -> dict:
26
+ return {}
@@ -0,0 +1,51 @@
1
+ from dataclasses import dataclass
2
+ from typing import Optional
3
+ import uuid
4
+
5
+ from lmnr.cli.parser.nodes import Handle, NodeFunctions
6
+ from lmnr.cli.parser.utils import map_handles
7
+
8
+
9
+ @dataclass
10
+ class LLMNode(NodeFunctions):
11
+ id: uuid.UUID
12
+ name: str
13
+ inputs: list[Handle]
14
+ dynamic_inputs: list[Handle]
15
+ outputs: list[Handle]
16
+ inputs_mappings: dict[uuid.UUID, uuid.UUID]
17
+ prompt: str
18
+ model: str
19
+ model_params: Optional[str]
20
+ stream: bool
21
+ structured_output_enabled: bool
22
+ structured_output_max_retries: int
23
+ structured_output_schema: Optional[str]
24
+ structured_output_schema_target: Optional[str]
25
+
26
+ def handles_mapping(
27
+ self, output_handle_id_to_node_name: dict[str, str]
28
+ ) -> list[tuple[str, str]]:
29
+ combined_inputs = self.inputs + self.dynamic_inputs
30
+ return map_handles(
31
+ combined_inputs, self.inputs_mappings, output_handle_id_to_node_name
32
+ )
33
+
34
+ def node_type(self) -> str:
35
+ return "LLM"
36
+
37
+ def config(self) -> dict:
38
+ # For easier access in the template separate the provider and model here
39
+ provider, model = self.model.split(":", maxsplit=1)
40
+
41
+ return {
42
+ "prompt": self.prompt,
43
+ "provider": provider,
44
+ "model": model,
45
+ "model_params": self.model_params,
46
+ "stream": self.stream,
47
+ "structured_output_enabled": self.structured_output_enabled,
48
+ "structured_output_max_retries": self.structured_output_max_retries,
49
+ "structured_output_schema": self.structured_output_schema,
50
+ "structured_output_schema_target": self.structured_output_schema_target,
51
+ }
@@ -0,0 +1,27 @@
1
+ from dataclasses import dataclass
2
+ import uuid
3
+
4
+ from lmnr.cli.parser.nodes import Handle, NodeFunctions
5
+ from lmnr.cli.parser.utils import map_handles
6
+
7
+
8
+ @dataclass
9
+ class OutputNode(NodeFunctions):
10
+ id: uuid.UUID
11
+ name: str
12
+ inputs: list[Handle]
13
+ outputs: list[Handle]
14
+ inputs_mappings: dict[uuid.UUID, uuid.UUID]
15
+
16
+ def handles_mapping(
17
+ self, output_handle_id_to_node_name: dict[str, str]
18
+ ) -> list[tuple[str, str]]:
19
+ return map_handles(
20
+ self.inputs, self.inputs_mappings, output_handle_id_to_node_name
21
+ )
22
+
23
+ def node_type(self) -> str:
24
+ return "Output"
25
+
26
+ def config(self) -> dict:
27
+ return {}
@@ -0,0 +1,81 @@
1
+ from dataclasses import dataclass
2
+ from datetime import datetime
3
+
4
+ import uuid
5
+
6
+ from lmnr.cli.parser.nodes import Handle, NodeFunctions
7
+ from lmnr.cli.parser.utils import map_handles
8
+
9
+
10
+ @dataclass
11
+ class FileMetadata:
12
+ id: uuid.UUID
13
+ created_at: datetime
14
+ project_id: uuid.UUID
15
+ filename: str
16
+
17
+
18
+ @dataclass
19
+ class Dataset:
20
+ id: uuid.UUID
21
+ created_at: datetime
22
+ project_id: uuid.UUID
23
+ name: str
24
+
25
+
26
+ @dataclass
27
+ class SemanticSearchDatasource:
28
+ type: str
29
+ id: uuid.UUID
30
+ # TODO: Paste other fields here, use Union[FileMetadata, Dataset]
31
+
32
+ @classmethod
33
+ def from_dict(cls, datasource_dict: dict) -> "SemanticSearchDatasource":
34
+ if datasource_dict["type"] == "File":
35
+ return cls(
36
+ type="File",
37
+ id=uuid.UUID(datasource_dict["id"]),
38
+ )
39
+ elif datasource_dict["type"] == "Dataset":
40
+ return cls(
41
+ type="Dataset",
42
+ id=uuid.UUID(datasource_dict["id"]),
43
+ )
44
+ else:
45
+ raise ValueError(
46
+ f"Invalid SemanticSearchDatasource type: {datasource_dict['type']}"
47
+ )
48
+
49
+
50
+ @dataclass
51
+ class SemanticSearchNode(NodeFunctions):
52
+ id: uuid.UUID
53
+ name: str
54
+ inputs: list[Handle]
55
+ outputs: list[Handle]
56
+ inputs_mappings: dict[uuid.UUID, uuid.UUID]
57
+ limit: int
58
+ threshold: float
59
+ template: str
60
+ datasources: list[SemanticSearchDatasource]
61
+
62
+ def handles_mapping(
63
+ self, output_handle_id_to_node_name: dict[str, str]
64
+ ) -> list[tuple[str, str]]:
65
+ return map_handles(
66
+ self.inputs, self.inputs_mappings, output_handle_id_to_node_name
67
+ )
68
+
69
+ def node_type(self) -> str:
70
+ return "SemanticSearch"
71
+
72
+ def config(self) -> dict:
73
+ return {
74
+ "limit": self.limit,
75
+ "threshold": self.threshold,
76
+ "template": self.template,
77
+ "datasource_ids": [str(datasource.id) for datasource in self.datasources],
78
+ "datasource_ids_list": str(
79
+ [str(datasource.id) for datasource in self.datasources]
80
+ ),
81
+ }
@@ -0,0 +1,91 @@
1
+ from typing import Any, Union
2
+ import uuid
3
+
4
+ from lmnr.cli.parser.nodes import Handle
5
+ from lmnr.cli.parser.nodes.input import InputNode
6
+ from lmnr.cli.parser.nodes.llm import LLMNode
7
+ from lmnr.cli.parser.nodes.output import OutputNode
8
+ from lmnr.cli.parser.nodes.semantic_search import (
9
+ SemanticSearchDatasource,
10
+ SemanticSearchNode,
11
+ )
12
+ from lmnr.types import NodeInput, ChatMessage
13
+
14
+
15
+ def node_input_from_json(json_val: Any) -> NodeInput:
16
+ if isinstance(json_val, str):
17
+ return json_val
18
+ elif isinstance(json_val, list):
19
+ return [ChatMessage.model_validate(msg) for msg in json_val]
20
+ else:
21
+ raise ValueError(f"Invalid NodeInput value: {json_val}")
22
+
23
+
24
+ Node = Union[InputNode, OutputNode, LLMNode, SemanticSearchNode]
25
+
26
+
27
+ def node_from_dict(node_dict: dict) -> Node:
28
+ if node_dict["type"] == "Input":
29
+ return InputNode(
30
+ id=uuid.UUID(node_dict["id"]),
31
+ name=node_dict["name"],
32
+ outputs=[Handle.from_dict(handle) for handle in node_dict["outputs"]],
33
+ input=node_input_from_json(node_dict["input"]),
34
+ input_type=node_dict["inputType"],
35
+ )
36
+ elif node_dict["type"] == "Output":
37
+ return OutputNode(
38
+ id=uuid.UUID(node_dict["id"]),
39
+ name=node_dict["name"],
40
+ inputs=[Handle.from_dict(handle) for handle in node_dict["inputs"]],
41
+ outputs=[Handle.from_dict(handle) for handle in node_dict["outputs"]],
42
+ inputs_mappings={
43
+ uuid.UUID(k): uuid.UUID(v)
44
+ for k, v in node_dict["inputsMappings"].items()
45
+ },
46
+ )
47
+ elif node_dict["type"] == "LLM":
48
+ return LLMNode(
49
+ id=uuid.UUID(node_dict["id"]),
50
+ name=node_dict["name"],
51
+ inputs=[Handle.from_dict(handle) for handle in node_dict["inputs"]],
52
+ dynamic_inputs=[
53
+ Handle.from_dict(handle) for handle in node_dict["dynamicInputs"]
54
+ ],
55
+ outputs=[Handle.from_dict(handle) for handle in node_dict["outputs"]],
56
+ inputs_mappings={
57
+ uuid.UUID(k): uuid.UUID(v)
58
+ for k, v in node_dict["inputsMappings"].items()
59
+ },
60
+ prompt=node_dict["prompt"],
61
+ model=node_dict["model"],
62
+ model_params=(
63
+ node_dict["modelParams"] if "modelParams" in node_dict else None
64
+ ),
65
+ stream=False,
66
+ # TODO: Implement structured output
67
+ structured_output_enabled=False,
68
+ structured_output_max_retries=3,
69
+ structured_output_schema=None,
70
+ structured_output_schema_target=None,
71
+ )
72
+ elif node_dict["type"] == "SemanticSearch":
73
+ return SemanticSearchNode(
74
+ id=uuid.UUID(node_dict["id"]),
75
+ name=node_dict["name"],
76
+ inputs=[Handle.from_dict(handle) for handle in node_dict["inputs"]],
77
+ outputs=[Handle.from_dict(handle) for handle in node_dict["outputs"]],
78
+ inputs_mappings={
79
+ uuid.UUID(k): uuid.UUID(v)
80
+ for k, v in node_dict["inputsMappings"].items()
81
+ },
82
+ limit=node_dict["limit"],
83
+ threshold=node_dict["threshold"],
84
+ template=node_dict["template"],
85
+ datasources=[
86
+ SemanticSearchDatasource.from_dict(ds)
87
+ for ds in node_dict["datasources"]
88
+ ],
89
+ )
90
+ else:
91
+ raise ValueError(f"Node type {node_dict['type']} not supported")
@@ -9,7 +9,8 @@ import queue
9
9
  from .task import Task
10
10
  from .action import NodeRunError, RunOutput
11
11
  from .state import State
12
- from lmnr_engine.types import Message, NodeInput
12
+ from lmnr.types import NodeInput
13
+ from lmnr_engine.types import Message
13
14
 
14
15
 
15
16
  logger = logging.getLogger(__name__)
@@ -132,6 +132,42 @@ def {{task.function_name}}({{ task.handle_args }}, _env: dict[str, str]) -> RunO
132
132
  return RunOutput(status="Success", output=completion_message)
133
133
 
134
134
 
135
+ {% elif task.node_type == "SemanticSearch" %}
136
+ def {{task.function_name}}(query: NodeInput, _env: dict[str, str]) -> RunOutput:
137
+ {% set datasources_length=task.config.datasource_ids|length %}
138
+ {% if datasources_length == 0 %}
139
+ raise NodeRunError("No datasources provided")
140
+ {% endif %}
141
+
142
+ headers = {
143
+ "Authorization": f"Bearer {_env['LMNR_PROJECT_API_KEY']}",
144
+ }
145
+ data = {
146
+ "query": query,
147
+ "limit": {{ task.config.limit }},
148
+ "threshold": {{ task.config.threshold }},
149
+ "datasourceIds": {{ task.config.datasource_ids_list }},
150
+ }
151
+ query_res = requests.post("https://api.lmnr.ai/v2/semantic-search", headers=headers, json=data)
152
+ if query_res.status_code != 200:
153
+ raise NodeRunError(f"Vector search request failed: {query_res.text}")
154
+
155
+ results = query_res.json()
156
+
157
+ def render_query_res_point(template: str, point: dict, relevance_index: int) -> str:
158
+ data = point["data"]
159
+ data["relevance_index"] = relevance_index
160
+ res = template
161
+ for key, value in data.items():
162
+ res = res.replace("{{'{{'}}" + key + "{{'}}'}}", str(value))
163
+ return res
164
+
165
+ rendered_res_points = [render_query_res_point("""{{task.config.template}}""", res_point, index + 1) for (index, res_point) in enumerate(results)]
166
+ output = "\n".join(rendered_res_points)
167
+
168
+ return RunOutput(status="Success", output=output)
169
+
170
+
135
171
  {% elif task.node_type == "Output" %}
136
172
  def {{task.function_name}}(output: NodeInput, _env: dict[str, str]) -> RunOutput:
137
173
  return RunOutput(status="Success", output=output)
@@ -4,7 +4,7 @@ import pydantic
4
4
  import requests
5
5
  from lmnr.types import (
6
6
  EndpointRunError, EndpointRunResponse, NodeInput, EndpointRunRequest,
7
- ToolCall, SDKError
7
+ ToolCallError, ToolCallRequest, ToolCallResponse, SDKError
8
8
  )
9
9
  from typing import Callable, Optional
10
10
  from websockets.sync.client import connect
@@ -126,31 +126,52 @@ class Laminar:
126
126
  }
127
127
  ) as websocket:
128
128
  websocket.send(request.model_dump_json())
129
+ req_id = None
129
130
 
130
131
  while True:
131
132
  message = websocket.recv()
132
133
  try:
133
- tool_call = ToolCall.model_validate_json(message)
134
+ tool_call = ToolCallRequest.model_validate_json(message)
135
+ req_id = tool_call.req_id
134
136
  matching_tools = [
135
137
  tool for tool in tools
136
- if tool.__name__ == tool_call.function.name
138
+ if tool.__name__ == tool_call.toolCall.function.name
137
139
  ]
138
140
  if not matching_tools:
139
141
  raise SDKError(
140
- f'Tool {tool_call.function.name} not found.'
142
+ f'Tool {tool_call.toolCall.function.name} not found.'
141
143
  ' Registered tools: '
142
144
  f'{", ".join([tool.__name__ for tool in tools])}'
143
145
  )
144
146
  tool = matching_tools[0]
145
- if tool.__name__ == tool_call.function.name:
146
147
  # default the arguments to an empty dictionary
148
+ if tool.__name__ == tool_call.toolCall.function.name:
147
149
  arguments = {}
148
150
  try:
149
- arguments = json.loads(tool_call.function.arguments)
151
+ arguments = json.loads(tool_call.toolCall.function.arguments)
150
152
  except:
151
153
  pass
152
- response = tool(**arguments)
153
- websocket.send(json.dumps(response))
154
+ try:
155
+ response = tool(**arguments)
156
+ except Exception as e:
157
+ error_message = 'Error occurred while running tool' +\
158
+ f'{tool.__name__}: {e}'
159
+ e = ToolCallError(error=error_message, reqId=req_id)
160
+ websocket.send(e.model_dump_json())
161
+ formatted_response = None
162
+ try:
163
+ formatted_response = ToolCallResponse(
164
+ reqId=tool_call.reqId,
165
+ response=response
166
+ )
167
+ except pydantic.ValidationError as e:
168
+ formatted_response = ToolCallResponse(
169
+ reqId=tool_call.reqId,
170
+ response=str(response)
171
+ )
172
+ websocket.send(
173
+ formatted_response.model_dump_json()
174
+ )
154
175
  except pydantic.ValidationError as e:
155
176
  message_json = json.loads(message)
156
177
  keys = list(message_json.keys())
@@ -161,6 +182,5 @@ class Laminar:
161
182
  result = EndpointRunResponse.model_validate(message_json)
162
183
  websocket.close()
163
184
  return result
164
- except Exception:
165
- websocket.close()
166
- raise SDKError('Error communicating to backend through websocket')
185
+ except Exception as e:
186
+ raise SDKError(f'Error communicating to backend through websocket {e}')
@@ -0,0 +1,139 @@
1
+ from typing import Callable, Optional
2
+ from websockets.sync.client import connect
3
+ import pydantic
4
+ import websockets
5
+ from lmnr.types import (
6
+ DeregisterDebuggerRequest, NodeInput, RegisterDebuggerRequest,
7
+ SDKError, ToolCallError, ToolCallRequest, ToolCallResponse
8
+ )
9
+ import uuid
10
+ import json
11
+ from threading import Thread
12
+
13
+ class RemoteDebugger:
14
+ def __init__(
15
+ self,
16
+ project_api_key: str,
17
+ tools: list[Callable[..., NodeInput]] = []
18
+ ):
19
+ self.project_api_key = project_api_key
20
+ self.url = 'wss://api.lmnr.ai/v2/endpoint/ws'
21
+ self.tools = tools
22
+ self.thread = Thread(target=self._run)
23
+ self.stop_flag = False
24
+ self.session = None
25
+
26
+ def start(self) -> Optional[str]:
27
+ self.stop_flag = False
28
+ self.session = self._generate_session_id()
29
+ self.thread.start()
30
+ return self.session
31
+
32
+ def stop(self):
33
+ self.stop_flag = True
34
+ self.thread.join()
35
+ self.session = None
36
+ # python allows running threads only once, so we need to create
37
+ # a new thread
38
+ # in case the user wants to start the debugger again
39
+ self.thread = Thread(target=self._run)
40
+
41
+ def get_session_id(self) -> str:
42
+ return self.session
43
+
44
+ def _run(self):
45
+ request = RegisterDebuggerRequest(debuggerSessionId=self.session)
46
+ with connect(
47
+ self.url,
48
+ additional_headers={
49
+ 'Authorization': f'Bearer {self.project_api_key}'
50
+ }
51
+ ) as websocket:
52
+ websocket.send(request.model_dump_json())
53
+ print(self._format_session_id_and_registerd_functions())
54
+ req_id = None
55
+
56
+ while not self.stop_flag:
57
+ try:
58
+ # blocks the thread until a message
59
+ # is received or a timeout (3 seconds) occurs
60
+ message = websocket.recv(3)
61
+ except TimeoutError:
62
+ continue
63
+ except websockets.exceptions.ConnectionClosedError:
64
+ print("Connection closed. Please restart the debugger.")
65
+ return
66
+ try:
67
+ tool_call = ToolCallRequest.model_validate_json(message)
68
+ req_id = tool_call.reqId
69
+ except:
70
+ raise SDKError(f'Invalid message received:\n{message}')
71
+ matching_tools = [
72
+ tool for tool in self.tools
73
+ if tool.__name__ == tool_call.toolCall.function.name
74
+ ]
75
+ if not matching_tools:
76
+ error_message = \
77
+ f'Tool {tool_call.toolCall.function.name} not found' +\
78
+ '. Registered tools: ' +\
79
+ {", ".join([tool.__name__ for tool in self.tools])}
80
+ e = ToolCallError(error=error_message, reqId=req_id)
81
+ websocket.send(e.model_dump_json())
82
+ continue
83
+ tool = matching_tools[0]
84
+ if tool.__name__ == tool_call.toolCall.function.name:
85
+ # default the arguments to an empty dictionary
86
+ arguments = {}
87
+ try:
88
+ arguments = json.loads(
89
+ tool_call.toolCall.function.arguments)
90
+ except:
91
+ pass
92
+ try:
93
+ response = tool(**arguments)
94
+ except Exception as e:
95
+ error_message = 'Error occurred while running tool' +\
96
+ f'{tool.__name__}: {e}'
97
+ e = ToolCallError(error=error_message, reqId=req_id)
98
+ websocket.send(e.model_dump_json())
99
+ formatted_response = None
100
+ try:
101
+ formatted_response = ToolCallResponse(
102
+ reqId=tool_call.reqId,
103
+ response=response
104
+ )
105
+ except pydantic.ValidationError as e:
106
+ formatted_response = ToolCallResponse(
107
+ reqId=tool_call.reqId,
108
+ response=str(response)
109
+ )
110
+ websocket.send(
111
+ formatted_response.model_dump_json()
112
+ )
113
+ websocket.send(
114
+ DeregisterDebuggerRequest(
115
+ debuggerSessionId=self.session,
116
+ deregister=True
117
+ ).model_dump_json()
118
+ )
119
+
120
+ def _generate_session_id(self) -> str:
121
+ return uuid.uuid4().urn[9:]
122
+
123
+ def _format_session_id_and_registerd_functions(self) -> str:
124
+ registered_functions = \
125
+ ',\n'.join(['- ' + tool.__name__ for tool in self.tools])
126
+ return \
127
+ f"""
128
+ ========================================
129
+ Debugger Session ID:
130
+ {self.session}
131
+ ========================================
132
+
133
+ Registered functions:
134
+ {registered_functions}
135
+
136
+ ========================================
137
+ """
138
+
139
+
@@ -1,13 +1,16 @@
1
-
2
1
  import requests
3
2
  import pydantic
3
+ import uuid
4
4
  from typing import Union, Optional
5
5
 
6
+
6
7
  class ChatMessage(pydantic.BaseModel):
7
8
  role: str
8
9
  content: str
9
10
 
10
- NodeInput = Union[str, list[ChatMessage]] # TypeAlias
11
+
12
+ NodeInput = Union[str, list[ChatMessage]] # TypeAlias
13
+
11
14
 
12
15
  class EndpointRunRequest(pydantic.BaseModel):
13
16
  inputs: dict[str, NodeInput]
@@ -15,10 +18,12 @@ class EndpointRunRequest(pydantic.BaseModel):
15
18
  env: dict[str, str] = pydantic.Field(default_factory=dict)
16
19
  metadata: dict[str, str] = pydantic.Field(default_factory=dict)
17
20
 
21
+
18
22
  class EndpointRunResponse(pydantic.BaseModel):
19
23
  outputs: dict[str, dict[str, NodeInput]]
20
24
  run_id: str
21
25
 
26
+
22
27
  class EndpointRunError(Exception):
23
28
  error_code: str
24
29
  error_message: str
@@ -26,35 +31,57 @@ class EndpointRunError(Exception):
26
31
  def __init__(self, response: requests.Response):
27
32
  try:
28
33
  resp_json = response.json()
29
- self.error_code = resp_json['error_code']
30
- self.error_message = resp_json['error_message']
34
+ self.error_code = resp_json["error_code"]
35
+ self.error_message = resp_json["error_message"]
31
36
  super().__init__(self.error_message)
32
- except:
37
+ except Exception:
33
38
  super().__init__(response.text)
34
-
39
+
35
40
  def __str__(self) -> str:
36
41
  try:
37
- return str({'error_code': self.error_code, 'error_message': self.error_message})
38
- except:
42
+ return str(
43
+ {"error_code": self.error_code, "error_message": self.error_message}
44
+ )
45
+ except Exception:
39
46
  return super().__str__()
40
-
47
+
48
+
41
49
  class SDKError(Exception):
42
- def __init__(self, error_mesasge: str):
43
- super().__init__(error_mesasge)
50
+ def __init__(self, error_message: str):
51
+ super().__init__(error_message)
52
+
44
53
 
45
54
  class ToolCallRequest(pydantic.BaseModel):
46
55
  name: str
47
56
  arguments: str
48
57
 
58
+
49
59
  class ToolCall(pydantic.BaseModel):
50
60
  id: Optional[str]
51
61
  type: Optional[str]
52
62
  function: ToolCallRequest
53
63
 
64
+
54
65
  # TODO: allow snake_case and manually convert to camelCase
66
+ class ToolCallRequest(pydantic.BaseModel):
67
+ reqId: uuid.UUID
68
+ toolCall: ToolCall
69
+
70
+
71
+ class ToolCallResponse(pydantic.BaseModel):
72
+ reqId: uuid.UUID
73
+ response: NodeInput
74
+
75
+
76
+ class ToolCallError(pydantic.BaseModel):
77
+ reqId: uuid.UUID
78
+ error: str
79
+
80
+
55
81
  class RegisterDebuggerRequest(pydantic.BaseModel):
56
82
  debuggerSessionId: str
57
83
 
84
+
58
85
  class DeregisterDebuggerRequest(pydantic.BaseModel):
59
86
  debuggerSessionId: str
60
87
  deregister: bool
@@ -1,157 +0,0 @@
1
- from dataclasses import dataclass
2
- from typing import Any, Optional, Union
3
- import uuid
4
- from lmnr.cli.parser.nodes import Handle, HandleType, NodeFunctions
5
- from lmnr.cli.parser.utils import map_handles
6
- from lmnr.types import NodeInput, ChatMessage
7
-
8
-
9
- def node_input_from_json(json_val: Any) -> NodeInput:
10
- if isinstance(json_val, str):
11
- return json_val
12
- elif isinstance(json_val, list):
13
- return [ChatMessage.model_validate(msg) for msg in json_val]
14
- else:
15
- raise ValueError(f"Invalid NodeInput value: {json_val}")
16
-
17
-
18
- # TODO: Convert to Pydantic
19
- @dataclass
20
- class InputNode(NodeFunctions):
21
- id: uuid.UUID
22
- name: str
23
- outputs: list[Handle]
24
- input: Optional[NodeInput]
25
- input_type: HandleType
26
-
27
- def handles_mapping(
28
- self, output_handle_id_to_node_name: dict[str, str]
29
- ) -> list[tuple[str, str]]:
30
- return []
31
-
32
- def node_type(self) -> str:
33
- return "Input"
34
-
35
- def config(self) -> dict:
36
- return {}
37
-
38
-
39
- # TODO: Convert to Pydantic
40
- @dataclass
41
- class LLMNode(NodeFunctions):
42
- id: uuid.UUID
43
- name: str
44
- inputs: list[Handle]
45
- dynamic_inputs: list[Handle]
46
- outputs: list[Handle]
47
- inputs_mappings: dict[uuid.UUID, uuid.UUID]
48
- prompt: str
49
- model: str
50
- model_params: Optional[str]
51
- stream: bool
52
- structured_output_enabled: bool
53
- structured_output_max_retries: int
54
- structured_output_schema: Optional[str]
55
- structured_output_schema_target: Optional[str]
56
-
57
- def handles_mapping(
58
- self, output_handle_id_to_node_name: dict[str, str]
59
- ) -> list[tuple[str, str]]:
60
- combined_inputs = self.inputs + self.dynamic_inputs
61
- return map_handles(
62
- combined_inputs, self.inputs_mappings, output_handle_id_to_node_name
63
- )
64
-
65
- def node_type(self) -> str:
66
- return "LLM"
67
-
68
- def config(self) -> dict:
69
- # For easier access in the template separate the provider and model here
70
- provider, model = self.model.split(":", maxsplit=1)
71
-
72
- return {
73
- "prompt": self.prompt,
74
- "provider": provider,
75
- "model": model,
76
- "model_params": self.model_params,
77
- "stream": self.stream,
78
- "structured_output_enabled": self.structured_output_enabled,
79
- "structured_output_max_retries": self.structured_output_max_retries,
80
- "structured_output_schema": self.structured_output_schema,
81
- "structured_output_schema_target": self.structured_output_schema_target,
82
- }
83
-
84
-
85
- # TODO: Convert to Pydantic
86
- @dataclass
87
- class OutputNode(NodeFunctions):
88
- id: uuid.UUID
89
- name: str
90
- inputs: list[Handle]
91
- outputs: list[Handle]
92
- inputs_mappings: dict[uuid.UUID, uuid.UUID]
93
-
94
- def handles_mapping(
95
- self, output_handle_id_to_node_name: dict[str, str]
96
- ) -> list[tuple[str, str]]:
97
- return map_handles(
98
- self.inputs, self.inputs_mappings, output_handle_id_to_node_name
99
- )
100
-
101
- def node_type(self) -> str:
102
- return "Output"
103
-
104
- def config(self) -> dict:
105
- return {}
106
-
107
-
108
- Node = Union[InputNode, OutputNode, LLMNode]
109
-
110
-
111
- def node_from_dict(node_dict: dict) -> Node:
112
- if node_dict["type"] == "Input":
113
- return InputNode(
114
- id=uuid.UUID(node_dict["id"]),
115
- name=node_dict["name"],
116
- outputs=[Handle.from_dict(handle) for handle in node_dict["outputs"]],
117
- input=node_input_from_json(node_dict["input"]),
118
- input_type=node_dict["inputType"],
119
- )
120
- elif node_dict["type"] == "Output":
121
- return OutputNode(
122
- id=uuid.UUID(node_dict["id"]),
123
- name=node_dict["name"],
124
- inputs=[Handle.from_dict(handle) for handle in node_dict["inputs"]],
125
- outputs=[Handle.from_dict(handle) for handle in node_dict["outputs"]],
126
- inputs_mappings={
127
- uuid.UUID(k): uuid.UUID(v)
128
- for k, v in node_dict["inputsMappings"].items()
129
- },
130
- )
131
- elif node_dict["type"] == "LLM":
132
- return LLMNode(
133
- id=uuid.UUID(node_dict["id"]),
134
- name=node_dict["name"],
135
- inputs=[Handle.from_dict(handle) for handle in node_dict["inputs"]],
136
- dynamic_inputs=[
137
- Handle.from_dict(handle) for handle in node_dict["dynamicInputs"]
138
- ],
139
- outputs=[Handle.from_dict(handle) for handle in node_dict["outputs"]],
140
- inputs_mappings={
141
- uuid.UUID(k): uuid.UUID(v)
142
- for k, v in node_dict["inputsMappings"].items()
143
- },
144
- prompt=node_dict["prompt"],
145
- model=node_dict["model"],
146
- model_params=(
147
- node_dict["modelParams"] if "modelParams" in node_dict else None
148
- ),
149
- stream=False,
150
- # TODO: Implement structured output
151
- structured_output_enabled=False,
152
- structured_output_max_retries=3,
153
- structured_output_schema=None,
154
- structured_output_schema_target=None,
155
- )
156
- else:
157
- raise ValueError(f"Node type {node_dict['type']} not supported")
@@ -1,87 +0,0 @@
1
- from typing import Callable, Optional
2
- from websockets.sync.client import connect
3
- from lmnr.types import DeregisterDebuggerRequest, NodeInput, RegisterDebuggerRequest, SDKError, ToolCall
4
- import uuid
5
- import json
6
- from threading import Thread
7
-
8
- class RemoteDebugger:
9
- def __init__(self, project_api_key: str, tools: list[Callable[..., NodeInput]] = []):
10
- self.project_api_key = project_api_key
11
- self.url = 'wss://api.lmnr.ai/v2/endpoint/ws'
12
- self.tools = tools
13
- self.thread = Thread(target=self._run)
14
- self.stop_flag = False
15
- self.session = None
16
-
17
- def start(self) -> Optional[str]:
18
- self.stop_flag = False
19
- self.session = self._generate_session_id()
20
- self.thread.start()
21
- return self.session
22
-
23
- def stop(self):
24
- self.stop_flag = True
25
- self.thread.join()
26
- self.session = None
27
- # python allows running threads only once, so we need to create a new thread
28
- # in case the user wants to start the debugger again
29
- self.thread = Thread(target=self._run)
30
-
31
- def get_session_id(self) -> str:
32
- return self.session
33
-
34
- def _run(self):
35
- request = RegisterDebuggerRequest(debuggerSessionId=self.session)
36
- with connect(
37
- self.url,
38
- additional_headers={
39
- 'Authorization': f'Bearer {self.project_api_key}'
40
- }
41
- ) as websocket:
42
- websocket.send(request.model_dump_json())
43
- print(self._format_session_id())
44
- while not self.stop_flag:
45
- try:
46
- # blocks the thread until a message is received or a timeout (3 seconds) occurs
47
- message = websocket.recv(3)
48
- except TimeoutError:
49
- continue
50
- try:
51
- tool_call = ToolCall.model_validate_json(message)
52
- except:
53
- raise SDKError(f'Invalid message received:\n{message}')
54
- matching_tools = [
55
- tool for tool in self.tools
56
- if tool.__name__ == tool_call.function.name
57
- ]
58
- if not matching_tools:
59
- raise SDKError(
60
- f'Tool {tool_call.function.name} not found.'
61
- ' Registered tools: '
62
- f'{", ".join([tool.__name__ for tool in self.tools])}'
63
- )
64
- tool = matching_tools[0]
65
- if tool.__name__ == tool_call.function.name:
66
- # default the arguments to an empty dictionary
67
- arguments = {}
68
- try:
69
- arguments = json.loads(tool_call.function.arguments)
70
- except:
71
- pass
72
- response = tool(**arguments)
73
- websocket.send(json.dumps(response))
74
- websocket.send(DeregisterDebuggerRequest(debuggerSessionId=self.session, deregister=True).model_dump_json())
75
-
76
- def _generate_session_id(self) -> str:
77
- return uuid.uuid4().urn[9:]
78
-
79
- def _format_session_id(self) -> str:
80
- return \
81
- f"""
82
- ========================================
83
- Debugger Session ID:
84
- {self.session}
85
- ========================================
86
- """
87
-
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes
File without changes