lmnr 0.2.5__tar.gz → 0.2.6__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {lmnr-0.2.5 → lmnr-0.2.6}/PKG-INFO +4 -11
- {lmnr-0.2.5 → lmnr-0.2.6}/README.md +3 -9
- {lmnr-0.2.5 → lmnr-0.2.6}/pyproject.toml +1 -2
- lmnr-0.2.6/src/lmnr/cli/parser/nodes/input.py +26 -0
- lmnr-0.2.6/src/lmnr/cli/parser/nodes/llm.py +51 -0
- lmnr-0.2.6/src/lmnr/cli/parser/nodes/output.py +27 -0
- lmnr-0.2.6/src/lmnr/cli/parser/nodes/semantic_search.py +81 -0
- lmnr-0.2.6/src/lmnr/cli/parser/nodes/types.py +91 -0
- {lmnr-0.2.5 → lmnr-0.2.6}/src/lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/engine/engine.py +2 -1
- {lmnr-0.2.5 → lmnr-0.2.6}/src/lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/pipelines/{{cookiecutter.pipeline_dir_name}}/nodes/functions.py +36 -0
- {lmnr-0.2.5 → lmnr-0.2.6}/src/lmnr/sdk/endpoint.py +31 -11
- lmnr-0.2.6/src/lmnr/sdk/remote_debugger.py +139 -0
- {lmnr-0.2.5 → lmnr-0.2.6}/src/lmnr/types.py +34 -11
- lmnr-0.2.5/src/lmnr/cli/parser/nodes/types.py +0 -157
- lmnr-0.2.5/src/lmnr/sdk/remote_debugger.py +0 -96
- {lmnr-0.2.5 → lmnr-0.2.6}/LICENSE +0 -0
- {lmnr-0.2.5 → lmnr-0.2.6}/src/lmnr/__init__.py +0 -0
- {lmnr-0.2.5 → lmnr-0.2.6}/src/lmnr/cli/__init__.py +0 -0
- {lmnr-0.2.5 → lmnr-0.2.6}/src/lmnr/cli/__main__.py +0 -0
- {lmnr-0.2.5 → lmnr-0.2.6}/src/lmnr/cli/cli.py +0 -0
- {lmnr-0.2.5 → lmnr-0.2.6}/src/lmnr/cli/cookiecutter.json +0 -0
- {lmnr-0.2.5 → lmnr-0.2.6}/src/lmnr/cli/parser/__init__.py +0 -0
- {lmnr-0.2.5 → lmnr-0.2.6}/src/lmnr/cli/parser/nodes/__init__.py +0 -0
- {lmnr-0.2.5 → lmnr-0.2.6}/src/lmnr/cli/parser/parser.py +0 -0
- {lmnr-0.2.5 → lmnr-0.2.6}/src/lmnr/cli/parser/utils.py +0 -0
- {lmnr-0.2.5 → lmnr-0.2.6}/src/lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/__init__.py +0 -0
- {lmnr-0.2.5 → lmnr-0.2.6}/src/lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/engine/__init__.py +0 -0
- {lmnr-0.2.5 → lmnr-0.2.6}/src/lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/engine/action.py +0 -0
- {lmnr-0.2.5 → lmnr-0.2.6}/src/lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/engine/state.py +0 -0
- {lmnr-0.2.5 → lmnr-0.2.6}/src/lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/engine/task.py +0 -0
- {lmnr-0.2.5 → lmnr-0.2.6}/src/lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/pipelines/{{cookiecutter.pipeline_dir_name}}/__init__.py +0 -0
- {lmnr-0.2.5 → lmnr-0.2.6}/src/lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/pipelines/{{cookiecutter.pipeline_dir_name}}/{{cookiecutter.pipeline_dir_name}}.py +0 -0
- {lmnr-0.2.5 → lmnr-0.2.6}/src/lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/types.py +0 -0
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: lmnr
|
3
|
-
Version: 0.2.
|
3
|
+
Version: 0.2.6
|
4
4
|
Summary: Python SDK for Laminar AI
|
5
5
|
License: Apache-2.0
|
6
6
|
Author: lmnr.ai
|
@@ -17,11 +17,12 @@ Requires-Dist: cookiecutter (>=2.6.0,<3.0.0)
|
|
17
17
|
Requires-Dist: pydantic (>=2.7.4,<3.0.0)
|
18
18
|
Requires-Dist: python-dotenv (>=1.0.1,<2.0.0)
|
19
19
|
Requires-Dist: requests (>=2.32.3,<3.0.0)
|
20
|
-
Requires-Dist: urllib3 (==1.26.6)
|
21
20
|
Requires-Dist: websockets (>=12.0,<13.0)
|
22
21
|
Description-Content-Type: text/markdown
|
23
22
|
|
24
|
-
#
|
23
|
+
# Laminar AI
|
24
|
+
|
25
|
+
This reipo provides core for code generation, Laminar CLI, and Laminar SDK.
|
25
26
|
|
26
27
|
## Quickstart
|
27
28
|
```sh
|
@@ -139,14 +140,6 @@ Set up `DEBUGGER_SESSION_ID` environment variable in your pipeline.
|
|
139
140
|
|
140
141
|
You can run as many sessions as you need, experimenting with your flows.
|
141
142
|
|
142
|
-
#### 5. Stop the debugger
|
143
|
-
|
144
|
-
In order to stop the session, do
|
145
|
-
|
146
|
-
```python
|
147
|
-
debugger.stop()
|
148
|
-
```
|
149
|
-
|
150
143
|
## CLI for code generation
|
151
144
|
|
152
145
|
### Basic usage
|
@@ -1,4 +1,6 @@
|
|
1
|
-
#
|
1
|
+
# Laminar AI
|
2
|
+
|
3
|
+
This reipo provides core for code generation, Laminar CLI, and Laminar SDK.
|
2
4
|
|
3
5
|
## Quickstart
|
4
6
|
```sh
|
@@ -116,14 +118,6 @@ Set up `DEBUGGER_SESSION_ID` environment variable in your pipeline.
|
|
116
118
|
|
117
119
|
You can run as many sessions as you need, experimenting with your flows.
|
118
120
|
|
119
|
-
#### 5. Stop the debugger
|
120
|
-
|
121
|
-
In order to stop the session, do
|
122
|
-
|
123
|
-
```python
|
124
|
-
debugger.stop()
|
125
|
-
```
|
126
|
-
|
127
121
|
## CLI for code generation
|
128
122
|
|
129
123
|
### Basic usage
|
@@ -11,7 +11,7 @@ license = "Apache-2.0"
|
|
11
11
|
|
12
12
|
[tool.poetry]
|
13
13
|
name = "lmnr"
|
14
|
-
version = "0.2.
|
14
|
+
version = "0.2.6"
|
15
15
|
description = "Python SDK for Laminar AI"
|
16
16
|
authors = ["lmnr.ai"]
|
17
17
|
readme = "README.md"
|
@@ -23,7 +23,6 @@ black = "^24.4.2"
|
|
23
23
|
pydantic = "^2.7.4"
|
24
24
|
click = "^8.1.7"
|
25
25
|
requests = "^2.32.3"
|
26
|
-
urllib3 = "1.26.6"
|
27
26
|
websockets = "^12.0"
|
28
27
|
cookiecutter = "^2.6.0"
|
29
28
|
python-dotenv = "^1.0.1"
|
@@ -0,0 +1,26 @@
|
|
1
|
+
from dataclasses import dataclass
|
2
|
+
from typing import Optional
|
3
|
+
import uuid
|
4
|
+
|
5
|
+
from lmnr.cli.parser.nodes import Handle, HandleType, NodeFunctions
|
6
|
+
from lmnr.types import NodeInput
|
7
|
+
|
8
|
+
|
9
|
+
@dataclass
|
10
|
+
class InputNode(NodeFunctions):
|
11
|
+
id: uuid.UUID
|
12
|
+
name: str
|
13
|
+
outputs: list[Handle]
|
14
|
+
input: Optional[NodeInput]
|
15
|
+
input_type: HandleType
|
16
|
+
|
17
|
+
def handles_mapping(
|
18
|
+
self, output_handle_id_to_node_name: dict[str, str]
|
19
|
+
) -> list[tuple[str, str]]:
|
20
|
+
return []
|
21
|
+
|
22
|
+
def node_type(self) -> str:
|
23
|
+
return "Input"
|
24
|
+
|
25
|
+
def config(self) -> dict:
|
26
|
+
return {}
|
@@ -0,0 +1,51 @@
|
|
1
|
+
from dataclasses import dataclass
|
2
|
+
from typing import Optional
|
3
|
+
import uuid
|
4
|
+
|
5
|
+
from lmnr.cli.parser.nodes import Handle, NodeFunctions
|
6
|
+
from lmnr.cli.parser.utils import map_handles
|
7
|
+
|
8
|
+
|
9
|
+
@dataclass
|
10
|
+
class LLMNode(NodeFunctions):
|
11
|
+
id: uuid.UUID
|
12
|
+
name: str
|
13
|
+
inputs: list[Handle]
|
14
|
+
dynamic_inputs: list[Handle]
|
15
|
+
outputs: list[Handle]
|
16
|
+
inputs_mappings: dict[uuid.UUID, uuid.UUID]
|
17
|
+
prompt: str
|
18
|
+
model: str
|
19
|
+
model_params: Optional[str]
|
20
|
+
stream: bool
|
21
|
+
structured_output_enabled: bool
|
22
|
+
structured_output_max_retries: int
|
23
|
+
structured_output_schema: Optional[str]
|
24
|
+
structured_output_schema_target: Optional[str]
|
25
|
+
|
26
|
+
def handles_mapping(
|
27
|
+
self, output_handle_id_to_node_name: dict[str, str]
|
28
|
+
) -> list[tuple[str, str]]:
|
29
|
+
combined_inputs = self.inputs + self.dynamic_inputs
|
30
|
+
return map_handles(
|
31
|
+
combined_inputs, self.inputs_mappings, output_handle_id_to_node_name
|
32
|
+
)
|
33
|
+
|
34
|
+
def node_type(self) -> str:
|
35
|
+
return "LLM"
|
36
|
+
|
37
|
+
def config(self) -> dict:
|
38
|
+
# For easier access in the template separate the provider and model here
|
39
|
+
provider, model = self.model.split(":", maxsplit=1)
|
40
|
+
|
41
|
+
return {
|
42
|
+
"prompt": self.prompt,
|
43
|
+
"provider": provider,
|
44
|
+
"model": model,
|
45
|
+
"model_params": self.model_params,
|
46
|
+
"stream": self.stream,
|
47
|
+
"structured_output_enabled": self.structured_output_enabled,
|
48
|
+
"structured_output_max_retries": self.structured_output_max_retries,
|
49
|
+
"structured_output_schema": self.structured_output_schema,
|
50
|
+
"structured_output_schema_target": self.structured_output_schema_target,
|
51
|
+
}
|
@@ -0,0 +1,27 @@
|
|
1
|
+
from dataclasses import dataclass
|
2
|
+
import uuid
|
3
|
+
|
4
|
+
from lmnr.cli.parser.nodes import Handle, NodeFunctions
|
5
|
+
from lmnr.cli.parser.utils import map_handles
|
6
|
+
|
7
|
+
|
8
|
+
@dataclass
|
9
|
+
class OutputNode(NodeFunctions):
|
10
|
+
id: uuid.UUID
|
11
|
+
name: str
|
12
|
+
inputs: list[Handle]
|
13
|
+
outputs: list[Handle]
|
14
|
+
inputs_mappings: dict[uuid.UUID, uuid.UUID]
|
15
|
+
|
16
|
+
def handles_mapping(
|
17
|
+
self, output_handle_id_to_node_name: dict[str, str]
|
18
|
+
) -> list[tuple[str, str]]:
|
19
|
+
return map_handles(
|
20
|
+
self.inputs, self.inputs_mappings, output_handle_id_to_node_name
|
21
|
+
)
|
22
|
+
|
23
|
+
def node_type(self) -> str:
|
24
|
+
return "Output"
|
25
|
+
|
26
|
+
def config(self) -> dict:
|
27
|
+
return {}
|
@@ -0,0 +1,81 @@
|
|
1
|
+
from dataclasses import dataclass
|
2
|
+
from datetime import datetime
|
3
|
+
|
4
|
+
import uuid
|
5
|
+
|
6
|
+
from lmnr.cli.parser.nodes import Handle, NodeFunctions
|
7
|
+
from lmnr.cli.parser.utils import map_handles
|
8
|
+
|
9
|
+
|
10
|
+
@dataclass
|
11
|
+
class FileMetadata:
|
12
|
+
id: uuid.UUID
|
13
|
+
created_at: datetime
|
14
|
+
project_id: uuid.UUID
|
15
|
+
filename: str
|
16
|
+
|
17
|
+
|
18
|
+
@dataclass
|
19
|
+
class Dataset:
|
20
|
+
id: uuid.UUID
|
21
|
+
created_at: datetime
|
22
|
+
project_id: uuid.UUID
|
23
|
+
name: str
|
24
|
+
|
25
|
+
|
26
|
+
@dataclass
|
27
|
+
class SemanticSearchDatasource:
|
28
|
+
type: str
|
29
|
+
id: uuid.UUID
|
30
|
+
# TODO: Paste other fields here, use Union[FileMetadata, Dataset]
|
31
|
+
|
32
|
+
@classmethod
|
33
|
+
def from_dict(cls, datasource_dict: dict) -> "SemanticSearchDatasource":
|
34
|
+
if datasource_dict["type"] == "File":
|
35
|
+
return cls(
|
36
|
+
type="File",
|
37
|
+
id=uuid.UUID(datasource_dict["id"]),
|
38
|
+
)
|
39
|
+
elif datasource_dict["type"] == "Dataset":
|
40
|
+
return cls(
|
41
|
+
type="Dataset",
|
42
|
+
id=uuid.UUID(datasource_dict["id"]),
|
43
|
+
)
|
44
|
+
else:
|
45
|
+
raise ValueError(
|
46
|
+
f"Invalid SemanticSearchDatasource type: {datasource_dict['type']}"
|
47
|
+
)
|
48
|
+
|
49
|
+
|
50
|
+
@dataclass
|
51
|
+
class SemanticSearchNode(NodeFunctions):
|
52
|
+
id: uuid.UUID
|
53
|
+
name: str
|
54
|
+
inputs: list[Handle]
|
55
|
+
outputs: list[Handle]
|
56
|
+
inputs_mappings: dict[uuid.UUID, uuid.UUID]
|
57
|
+
limit: int
|
58
|
+
threshold: float
|
59
|
+
template: str
|
60
|
+
datasources: list[SemanticSearchDatasource]
|
61
|
+
|
62
|
+
def handles_mapping(
|
63
|
+
self, output_handle_id_to_node_name: dict[str, str]
|
64
|
+
) -> list[tuple[str, str]]:
|
65
|
+
return map_handles(
|
66
|
+
self.inputs, self.inputs_mappings, output_handle_id_to_node_name
|
67
|
+
)
|
68
|
+
|
69
|
+
def node_type(self) -> str:
|
70
|
+
return "SemanticSearch"
|
71
|
+
|
72
|
+
def config(self) -> dict:
|
73
|
+
return {
|
74
|
+
"limit": self.limit,
|
75
|
+
"threshold": self.threshold,
|
76
|
+
"template": self.template,
|
77
|
+
"datasource_ids": [str(datasource.id) for datasource in self.datasources],
|
78
|
+
"datasource_ids_list": str(
|
79
|
+
[str(datasource.id) for datasource in self.datasources]
|
80
|
+
),
|
81
|
+
}
|
@@ -0,0 +1,91 @@
|
|
1
|
+
from typing import Any, Union
|
2
|
+
import uuid
|
3
|
+
|
4
|
+
from lmnr.cli.parser.nodes import Handle
|
5
|
+
from lmnr.cli.parser.nodes.input import InputNode
|
6
|
+
from lmnr.cli.parser.nodes.llm import LLMNode
|
7
|
+
from lmnr.cli.parser.nodes.output import OutputNode
|
8
|
+
from lmnr.cli.parser.nodes.semantic_search import (
|
9
|
+
SemanticSearchDatasource,
|
10
|
+
SemanticSearchNode,
|
11
|
+
)
|
12
|
+
from lmnr.types import NodeInput, ChatMessage
|
13
|
+
|
14
|
+
|
15
|
+
def node_input_from_json(json_val: Any) -> NodeInput:
|
16
|
+
if isinstance(json_val, str):
|
17
|
+
return json_val
|
18
|
+
elif isinstance(json_val, list):
|
19
|
+
return [ChatMessage.model_validate(msg) for msg in json_val]
|
20
|
+
else:
|
21
|
+
raise ValueError(f"Invalid NodeInput value: {json_val}")
|
22
|
+
|
23
|
+
|
24
|
+
Node = Union[InputNode, OutputNode, LLMNode, SemanticSearchNode]
|
25
|
+
|
26
|
+
|
27
|
+
def node_from_dict(node_dict: dict) -> Node:
|
28
|
+
if node_dict["type"] == "Input":
|
29
|
+
return InputNode(
|
30
|
+
id=uuid.UUID(node_dict["id"]),
|
31
|
+
name=node_dict["name"],
|
32
|
+
outputs=[Handle.from_dict(handle) for handle in node_dict["outputs"]],
|
33
|
+
input=node_input_from_json(node_dict["input"]),
|
34
|
+
input_type=node_dict["inputType"],
|
35
|
+
)
|
36
|
+
elif node_dict["type"] == "Output":
|
37
|
+
return OutputNode(
|
38
|
+
id=uuid.UUID(node_dict["id"]),
|
39
|
+
name=node_dict["name"],
|
40
|
+
inputs=[Handle.from_dict(handle) for handle in node_dict["inputs"]],
|
41
|
+
outputs=[Handle.from_dict(handle) for handle in node_dict["outputs"]],
|
42
|
+
inputs_mappings={
|
43
|
+
uuid.UUID(k): uuid.UUID(v)
|
44
|
+
for k, v in node_dict["inputsMappings"].items()
|
45
|
+
},
|
46
|
+
)
|
47
|
+
elif node_dict["type"] == "LLM":
|
48
|
+
return LLMNode(
|
49
|
+
id=uuid.UUID(node_dict["id"]),
|
50
|
+
name=node_dict["name"],
|
51
|
+
inputs=[Handle.from_dict(handle) for handle in node_dict["inputs"]],
|
52
|
+
dynamic_inputs=[
|
53
|
+
Handle.from_dict(handle) for handle in node_dict["dynamicInputs"]
|
54
|
+
],
|
55
|
+
outputs=[Handle.from_dict(handle) for handle in node_dict["outputs"]],
|
56
|
+
inputs_mappings={
|
57
|
+
uuid.UUID(k): uuid.UUID(v)
|
58
|
+
for k, v in node_dict["inputsMappings"].items()
|
59
|
+
},
|
60
|
+
prompt=node_dict["prompt"],
|
61
|
+
model=node_dict["model"],
|
62
|
+
model_params=(
|
63
|
+
node_dict["modelParams"] if "modelParams" in node_dict else None
|
64
|
+
),
|
65
|
+
stream=False,
|
66
|
+
# TODO: Implement structured output
|
67
|
+
structured_output_enabled=False,
|
68
|
+
structured_output_max_retries=3,
|
69
|
+
structured_output_schema=None,
|
70
|
+
structured_output_schema_target=None,
|
71
|
+
)
|
72
|
+
elif node_dict["type"] == "SemanticSearch":
|
73
|
+
return SemanticSearchNode(
|
74
|
+
id=uuid.UUID(node_dict["id"]),
|
75
|
+
name=node_dict["name"],
|
76
|
+
inputs=[Handle.from_dict(handle) for handle in node_dict["inputs"]],
|
77
|
+
outputs=[Handle.from_dict(handle) for handle in node_dict["outputs"]],
|
78
|
+
inputs_mappings={
|
79
|
+
uuid.UUID(k): uuid.UUID(v)
|
80
|
+
for k, v in node_dict["inputsMappings"].items()
|
81
|
+
},
|
82
|
+
limit=node_dict["limit"],
|
83
|
+
threshold=node_dict["threshold"],
|
84
|
+
template=node_dict["template"],
|
85
|
+
datasources=[
|
86
|
+
SemanticSearchDatasource.from_dict(ds)
|
87
|
+
for ds in node_dict["datasources"]
|
88
|
+
],
|
89
|
+
)
|
90
|
+
else:
|
91
|
+
raise ValueError(f"Node type {node_dict['type']} not supported")
|
{lmnr-0.2.5 → lmnr-0.2.6}/src/lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/engine/engine.py
RENAMED
@@ -9,7 +9,8 @@ import queue
|
|
9
9
|
from .task import Task
|
10
10
|
from .action import NodeRunError, RunOutput
|
11
11
|
from .state import State
|
12
|
-
from
|
12
|
+
from lmnr.types import NodeInput
|
13
|
+
from lmnr_engine.types import Message
|
13
14
|
|
14
15
|
|
15
16
|
logger = logging.getLogger(__name__)
|
@@ -132,6 +132,42 @@ def {{task.function_name}}({{ task.handle_args }}, _env: dict[str, str]) -> RunO
|
|
132
132
|
return RunOutput(status="Success", output=completion_message)
|
133
133
|
|
134
134
|
|
135
|
+
{% elif task.node_type == "SemanticSearch" %}
|
136
|
+
def {{task.function_name}}(query: NodeInput, _env: dict[str, str]) -> RunOutput:
|
137
|
+
{% set datasources_length=task.config.datasource_ids|length %}
|
138
|
+
{% if datasources_length == 0 %}
|
139
|
+
raise NodeRunError("No datasources provided")
|
140
|
+
{% endif %}
|
141
|
+
|
142
|
+
headers = {
|
143
|
+
"Authorization": f"Bearer {_env['LMNR_PROJECT_API_KEY']}",
|
144
|
+
}
|
145
|
+
data = {
|
146
|
+
"query": query,
|
147
|
+
"limit": {{ task.config.limit }},
|
148
|
+
"threshold": {{ task.config.threshold }},
|
149
|
+
"datasourceIds": {{ task.config.datasource_ids_list }},
|
150
|
+
}
|
151
|
+
query_res = requests.post("https://api.lmnr.ai/v2/semantic-search", headers=headers, json=data)
|
152
|
+
if query_res.status_code != 200:
|
153
|
+
raise NodeRunError(f"Vector search request failed: {query_res.text}")
|
154
|
+
|
155
|
+
results = query_res.json()
|
156
|
+
|
157
|
+
def render_query_res_point(template: str, point: dict, relevance_index: int) -> str:
|
158
|
+
data = point["data"]
|
159
|
+
data["relevance_index"] = relevance_index
|
160
|
+
res = template
|
161
|
+
for key, value in data.items():
|
162
|
+
res = res.replace("{{'{{'}}" + key + "{{'}}'}}", str(value))
|
163
|
+
return res
|
164
|
+
|
165
|
+
rendered_res_points = [render_query_res_point("""{{task.config.template}}""", res_point, index + 1) for (index, res_point) in enumerate(results)]
|
166
|
+
output = "\n".join(rendered_res_points)
|
167
|
+
|
168
|
+
return RunOutput(status="Success", output=output)
|
169
|
+
|
170
|
+
|
135
171
|
{% elif task.node_type == "Output" %}
|
136
172
|
def {{task.function_name}}(output: NodeInput, _env: dict[str, str]) -> RunOutput:
|
137
173
|
return RunOutput(status="Success", output=output)
|
@@ -4,7 +4,7 @@ import pydantic
|
|
4
4
|
import requests
|
5
5
|
from lmnr.types import (
|
6
6
|
EndpointRunError, EndpointRunResponse, NodeInput, EndpointRunRequest,
|
7
|
-
|
7
|
+
ToolCallError, ToolCallRequest, ToolCallResponse, SDKError
|
8
8
|
)
|
9
9
|
from typing import Callable, Optional
|
10
10
|
from websockets.sync.client import connect
|
@@ -126,31 +126,52 @@ class Laminar:
|
|
126
126
|
}
|
127
127
|
) as websocket:
|
128
128
|
websocket.send(request.model_dump_json())
|
129
|
+
req_id = None
|
129
130
|
|
130
131
|
while True:
|
131
132
|
message = websocket.recv()
|
132
133
|
try:
|
133
|
-
tool_call =
|
134
|
+
tool_call = ToolCallRequest.model_validate_json(message)
|
135
|
+
req_id = tool_call.req_id
|
134
136
|
matching_tools = [
|
135
137
|
tool for tool in tools
|
136
|
-
if tool.__name__ == tool_call.function.name
|
138
|
+
if tool.__name__ == tool_call.toolCall.function.name
|
137
139
|
]
|
138
140
|
if not matching_tools:
|
139
141
|
raise SDKError(
|
140
|
-
f'Tool {tool_call.function.name} not found.'
|
142
|
+
f'Tool {tool_call.toolCall.function.name} not found.'
|
141
143
|
' Registered tools: '
|
142
144
|
f'{", ".join([tool.__name__ for tool in tools])}'
|
143
145
|
)
|
144
146
|
tool = matching_tools[0]
|
145
|
-
if tool.__name__ == tool_call.function.name:
|
146
147
|
# default the arguments to an empty dictionary
|
148
|
+
if tool.__name__ == tool_call.toolCall.function.name:
|
147
149
|
arguments = {}
|
148
150
|
try:
|
149
|
-
arguments = json.loads(tool_call.function.arguments)
|
151
|
+
arguments = json.loads(tool_call.toolCall.function.arguments)
|
150
152
|
except:
|
151
153
|
pass
|
152
|
-
|
153
|
-
|
154
|
+
try:
|
155
|
+
response = tool(**arguments)
|
156
|
+
except Exception as e:
|
157
|
+
error_message = 'Error occurred while running tool' +\
|
158
|
+
f'{tool.__name__}: {e}'
|
159
|
+
e = ToolCallError(error=error_message, reqId=req_id)
|
160
|
+
websocket.send(e.model_dump_json())
|
161
|
+
formatted_response = None
|
162
|
+
try:
|
163
|
+
formatted_response = ToolCallResponse(
|
164
|
+
reqId=tool_call.reqId,
|
165
|
+
response=response
|
166
|
+
)
|
167
|
+
except pydantic.ValidationError as e:
|
168
|
+
formatted_response = ToolCallResponse(
|
169
|
+
reqId=tool_call.reqId,
|
170
|
+
response=str(response)
|
171
|
+
)
|
172
|
+
websocket.send(
|
173
|
+
formatted_response.model_dump_json()
|
174
|
+
)
|
154
175
|
except pydantic.ValidationError as e:
|
155
176
|
message_json = json.loads(message)
|
156
177
|
keys = list(message_json.keys())
|
@@ -161,6 +182,5 @@ class Laminar:
|
|
161
182
|
result = EndpointRunResponse.model_validate(message_json)
|
162
183
|
websocket.close()
|
163
184
|
return result
|
164
|
-
except Exception:
|
165
|
-
websocket
|
166
|
-
raise SDKError('Error communicating to backend through websocket')
|
185
|
+
except Exception as e:
|
186
|
+
raise SDKError(f'Error communicating to backend through websocket {e}')
|
@@ -0,0 +1,139 @@
|
|
1
|
+
from typing import Callable, Optional
|
2
|
+
from websockets.sync.client import connect
|
3
|
+
import pydantic
|
4
|
+
import websockets
|
5
|
+
from lmnr.types import (
|
6
|
+
DeregisterDebuggerRequest, NodeInput, RegisterDebuggerRequest,
|
7
|
+
SDKError, ToolCallError, ToolCallRequest, ToolCallResponse
|
8
|
+
)
|
9
|
+
import uuid
|
10
|
+
import json
|
11
|
+
from threading import Thread
|
12
|
+
|
13
|
+
class RemoteDebugger:
|
14
|
+
def __init__(
|
15
|
+
self,
|
16
|
+
project_api_key: str,
|
17
|
+
tools: list[Callable[..., NodeInput]] = []
|
18
|
+
):
|
19
|
+
self.project_api_key = project_api_key
|
20
|
+
self.url = 'wss://api.lmnr.ai/v2/endpoint/ws'
|
21
|
+
self.tools = tools
|
22
|
+
self.thread = Thread(target=self._run)
|
23
|
+
self.stop_flag = False
|
24
|
+
self.session = None
|
25
|
+
|
26
|
+
def start(self) -> Optional[str]:
|
27
|
+
self.stop_flag = False
|
28
|
+
self.session = self._generate_session_id()
|
29
|
+
self.thread.start()
|
30
|
+
return self.session
|
31
|
+
|
32
|
+
def stop(self):
|
33
|
+
self.stop_flag = True
|
34
|
+
self.thread.join()
|
35
|
+
self.session = None
|
36
|
+
# python allows running threads only once, so we need to create
|
37
|
+
# a new thread
|
38
|
+
# in case the user wants to start the debugger again
|
39
|
+
self.thread = Thread(target=self._run)
|
40
|
+
|
41
|
+
def get_session_id(self) -> str:
|
42
|
+
return self.session
|
43
|
+
|
44
|
+
def _run(self):
|
45
|
+
request = RegisterDebuggerRequest(debuggerSessionId=self.session)
|
46
|
+
with connect(
|
47
|
+
self.url,
|
48
|
+
additional_headers={
|
49
|
+
'Authorization': f'Bearer {self.project_api_key}'
|
50
|
+
}
|
51
|
+
) as websocket:
|
52
|
+
websocket.send(request.model_dump_json())
|
53
|
+
print(self._format_session_id_and_registerd_functions())
|
54
|
+
req_id = None
|
55
|
+
|
56
|
+
while not self.stop_flag:
|
57
|
+
try:
|
58
|
+
# blocks the thread until a message
|
59
|
+
# is received or a timeout (3 seconds) occurs
|
60
|
+
message = websocket.recv(3)
|
61
|
+
except TimeoutError:
|
62
|
+
continue
|
63
|
+
except websockets.exceptions.ConnectionClosedError:
|
64
|
+
print("Connection closed. Please restart the debugger.")
|
65
|
+
return
|
66
|
+
try:
|
67
|
+
tool_call = ToolCallRequest.model_validate_json(message)
|
68
|
+
req_id = tool_call.reqId
|
69
|
+
except:
|
70
|
+
raise SDKError(f'Invalid message received:\n{message}')
|
71
|
+
matching_tools = [
|
72
|
+
tool for tool in self.tools
|
73
|
+
if tool.__name__ == tool_call.toolCall.function.name
|
74
|
+
]
|
75
|
+
if not matching_tools:
|
76
|
+
error_message = \
|
77
|
+
f'Tool {tool_call.toolCall.function.name} not found' +\
|
78
|
+
'. Registered tools: ' +\
|
79
|
+
{", ".join([tool.__name__ for tool in self.tools])}
|
80
|
+
e = ToolCallError(error=error_message, reqId=req_id)
|
81
|
+
websocket.send(e.model_dump_json())
|
82
|
+
continue
|
83
|
+
tool = matching_tools[0]
|
84
|
+
if tool.__name__ == tool_call.toolCall.function.name:
|
85
|
+
# default the arguments to an empty dictionary
|
86
|
+
arguments = {}
|
87
|
+
try:
|
88
|
+
arguments = json.loads(
|
89
|
+
tool_call.toolCall.function.arguments)
|
90
|
+
except:
|
91
|
+
pass
|
92
|
+
try:
|
93
|
+
response = tool(**arguments)
|
94
|
+
except Exception as e:
|
95
|
+
error_message = 'Error occurred while running tool' +\
|
96
|
+
f'{tool.__name__}: {e}'
|
97
|
+
e = ToolCallError(error=error_message, reqId=req_id)
|
98
|
+
websocket.send(e.model_dump_json())
|
99
|
+
formatted_response = None
|
100
|
+
try:
|
101
|
+
formatted_response = ToolCallResponse(
|
102
|
+
reqId=tool_call.reqId,
|
103
|
+
response=response
|
104
|
+
)
|
105
|
+
except pydantic.ValidationError as e:
|
106
|
+
formatted_response = ToolCallResponse(
|
107
|
+
reqId=tool_call.reqId,
|
108
|
+
response=str(response)
|
109
|
+
)
|
110
|
+
websocket.send(
|
111
|
+
formatted_response.model_dump_json()
|
112
|
+
)
|
113
|
+
websocket.send(
|
114
|
+
DeregisterDebuggerRequest(
|
115
|
+
debuggerSessionId=self.session,
|
116
|
+
deregister=True
|
117
|
+
).model_dump_json()
|
118
|
+
)
|
119
|
+
|
120
|
+
def _generate_session_id(self) -> str:
|
121
|
+
return uuid.uuid4().urn[9:]
|
122
|
+
|
123
|
+
def _format_session_id_and_registerd_functions(self) -> str:
|
124
|
+
registered_functions = \
|
125
|
+
',\n'.join(['- ' + tool.__name__ for tool in self.tools])
|
126
|
+
return \
|
127
|
+
f"""
|
128
|
+
========================================
|
129
|
+
Debugger Session ID:
|
130
|
+
{self.session}
|
131
|
+
========================================
|
132
|
+
|
133
|
+
Registered functions:
|
134
|
+
{registered_functions}
|
135
|
+
|
136
|
+
========================================
|
137
|
+
"""
|
138
|
+
|
139
|
+
|
@@ -1,13 +1,16 @@
|
|
1
|
-
|
2
1
|
import requests
|
3
2
|
import pydantic
|
3
|
+
import uuid
|
4
4
|
from typing import Union, Optional
|
5
5
|
|
6
|
+
|
6
7
|
class ChatMessage(pydantic.BaseModel):
|
7
8
|
role: str
|
8
9
|
content: str
|
9
10
|
|
10
|
-
|
11
|
+
|
12
|
+
NodeInput = Union[str, list[ChatMessage]] # TypeAlias
|
13
|
+
|
11
14
|
|
12
15
|
class EndpointRunRequest(pydantic.BaseModel):
|
13
16
|
inputs: dict[str, NodeInput]
|
@@ -15,10 +18,12 @@ class EndpointRunRequest(pydantic.BaseModel):
|
|
15
18
|
env: dict[str, str] = pydantic.Field(default_factory=dict)
|
16
19
|
metadata: dict[str, str] = pydantic.Field(default_factory=dict)
|
17
20
|
|
21
|
+
|
18
22
|
class EndpointRunResponse(pydantic.BaseModel):
|
19
23
|
outputs: dict[str, dict[str, NodeInput]]
|
20
24
|
run_id: str
|
21
25
|
|
26
|
+
|
22
27
|
class EndpointRunError(Exception):
|
23
28
|
error_code: str
|
24
29
|
error_message: str
|
@@ -26,39 +31,57 @@ class EndpointRunError(Exception):
|
|
26
31
|
def __init__(self, response: requests.Response):
|
27
32
|
try:
|
28
33
|
resp_json = response.json()
|
29
|
-
self.error_code = resp_json[
|
30
|
-
self.error_message = resp_json[
|
34
|
+
self.error_code = resp_json["error_code"]
|
35
|
+
self.error_message = resp_json["error_message"]
|
31
36
|
super().__init__(self.error_message)
|
32
|
-
except:
|
37
|
+
except Exception:
|
33
38
|
super().__init__(response.text)
|
34
|
-
|
39
|
+
|
35
40
|
def __str__(self) -> str:
|
36
41
|
try:
|
37
|
-
return str(
|
38
|
-
|
42
|
+
return str(
|
43
|
+
{"error_code": self.error_code, "error_message": self.error_message}
|
44
|
+
)
|
45
|
+
except Exception:
|
39
46
|
return super().__str__()
|
40
|
-
|
47
|
+
|
48
|
+
|
41
49
|
class SDKError(Exception):
|
42
50
|
def __init__(self, error_message: str):
|
43
51
|
super().__init__(error_message)
|
44
52
|
|
53
|
+
|
45
54
|
class ToolCallRequest(pydantic.BaseModel):
|
46
55
|
name: str
|
47
56
|
arguments: str
|
48
57
|
|
58
|
+
|
49
59
|
class ToolCall(pydantic.BaseModel):
|
50
60
|
id: Optional[str]
|
51
61
|
type: Optional[str]
|
52
62
|
function: ToolCallRequest
|
53
63
|
|
54
|
-
|
64
|
+
|
65
|
+
# TODO: allow snake_case and manually convert to camelCase
|
66
|
+
class ToolCallRequest(pydantic.BaseModel):
|
67
|
+
reqId: uuid.UUID
|
68
|
+
toolCall: ToolCall
|
69
|
+
|
70
|
+
|
71
|
+
class ToolCallResponse(pydantic.BaseModel):
|
72
|
+
reqId: uuid.UUID
|
73
|
+
response: NodeInput
|
74
|
+
|
75
|
+
|
55
76
|
class ToolCallError(pydantic.BaseModel):
|
77
|
+
reqId: uuid.UUID
|
56
78
|
error: str
|
57
79
|
|
58
|
-
|
80
|
+
|
59
81
|
class RegisterDebuggerRequest(pydantic.BaseModel):
|
60
82
|
debuggerSessionId: str
|
61
83
|
|
84
|
+
|
62
85
|
class DeregisterDebuggerRequest(pydantic.BaseModel):
|
63
86
|
debuggerSessionId: str
|
64
87
|
deregister: bool
|
@@ -1,157 +0,0 @@
|
|
1
|
-
from dataclasses import dataclass
|
2
|
-
from typing import Any, Optional, Union
|
3
|
-
import uuid
|
4
|
-
from lmnr.cli.parser.nodes import Handle, HandleType, NodeFunctions
|
5
|
-
from lmnr.cli.parser.utils import map_handles
|
6
|
-
from lmnr.types import NodeInput, ChatMessage
|
7
|
-
|
8
|
-
|
9
|
-
def node_input_from_json(json_val: Any) -> NodeInput:
|
10
|
-
if isinstance(json_val, str):
|
11
|
-
return json_val
|
12
|
-
elif isinstance(json_val, list):
|
13
|
-
return [ChatMessage.model_validate(msg) for msg in json_val]
|
14
|
-
else:
|
15
|
-
raise ValueError(f"Invalid NodeInput value: {json_val}")
|
16
|
-
|
17
|
-
|
18
|
-
# TODO: Convert to Pydantic
|
19
|
-
@dataclass
|
20
|
-
class InputNode(NodeFunctions):
|
21
|
-
id: uuid.UUID
|
22
|
-
name: str
|
23
|
-
outputs: list[Handle]
|
24
|
-
input: Optional[NodeInput]
|
25
|
-
input_type: HandleType
|
26
|
-
|
27
|
-
def handles_mapping(
|
28
|
-
self, output_handle_id_to_node_name: dict[str, str]
|
29
|
-
) -> list[tuple[str, str]]:
|
30
|
-
return []
|
31
|
-
|
32
|
-
def node_type(self) -> str:
|
33
|
-
return "Input"
|
34
|
-
|
35
|
-
def config(self) -> dict:
|
36
|
-
return {}
|
37
|
-
|
38
|
-
|
39
|
-
# TODO: Convert to Pydantic
|
40
|
-
@dataclass
|
41
|
-
class LLMNode(NodeFunctions):
|
42
|
-
id: uuid.UUID
|
43
|
-
name: str
|
44
|
-
inputs: list[Handle]
|
45
|
-
dynamic_inputs: list[Handle]
|
46
|
-
outputs: list[Handle]
|
47
|
-
inputs_mappings: dict[uuid.UUID, uuid.UUID]
|
48
|
-
prompt: str
|
49
|
-
model: str
|
50
|
-
model_params: Optional[str]
|
51
|
-
stream: bool
|
52
|
-
structured_output_enabled: bool
|
53
|
-
structured_output_max_retries: int
|
54
|
-
structured_output_schema: Optional[str]
|
55
|
-
structured_output_schema_target: Optional[str]
|
56
|
-
|
57
|
-
def handles_mapping(
|
58
|
-
self, output_handle_id_to_node_name: dict[str, str]
|
59
|
-
) -> list[tuple[str, str]]:
|
60
|
-
combined_inputs = self.inputs + self.dynamic_inputs
|
61
|
-
return map_handles(
|
62
|
-
combined_inputs, self.inputs_mappings, output_handle_id_to_node_name
|
63
|
-
)
|
64
|
-
|
65
|
-
def node_type(self) -> str:
|
66
|
-
return "LLM"
|
67
|
-
|
68
|
-
def config(self) -> dict:
|
69
|
-
# For easier access in the template separate the provider and model here
|
70
|
-
provider, model = self.model.split(":", maxsplit=1)
|
71
|
-
|
72
|
-
return {
|
73
|
-
"prompt": self.prompt,
|
74
|
-
"provider": provider,
|
75
|
-
"model": model,
|
76
|
-
"model_params": self.model_params,
|
77
|
-
"stream": self.stream,
|
78
|
-
"structured_output_enabled": self.structured_output_enabled,
|
79
|
-
"structured_output_max_retries": self.structured_output_max_retries,
|
80
|
-
"structured_output_schema": self.structured_output_schema,
|
81
|
-
"structured_output_schema_target": self.structured_output_schema_target,
|
82
|
-
}
|
83
|
-
|
84
|
-
|
85
|
-
# TODO: Convert to Pydantic
|
86
|
-
@dataclass
|
87
|
-
class OutputNode(NodeFunctions):
|
88
|
-
id: uuid.UUID
|
89
|
-
name: str
|
90
|
-
inputs: list[Handle]
|
91
|
-
outputs: list[Handle]
|
92
|
-
inputs_mappings: dict[uuid.UUID, uuid.UUID]
|
93
|
-
|
94
|
-
def handles_mapping(
|
95
|
-
self, output_handle_id_to_node_name: dict[str, str]
|
96
|
-
) -> list[tuple[str, str]]:
|
97
|
-
return map_handles(
|
98
|
-
self.inputs, self.inputs_mappings, output_handle_id_to_node_name
|
99
|
-
)
|
100
|
-
|
101
|
-
def node_type(self) -> str:
|
102
|
-
return "Output"
|
103
|
-
|
104
|
-
def config(self) -> dict:
|
105
|
-
return {}
|
106
|
-
|
107
|
-
|
108
|
-
Node = Union[InputNode, OutputNode, LLMNode]
|
109
|
-
|
110
|
-
|
111
|
-
def node_from_dict(node_dict: dict) -> Node:
|
112
|
-
if node_dict["type"] == "Input":
|
113
|
-
return InputNode(
|
114
|
-
id=uuid.UUID(node_dict["id"]),
|
115
|
-
name=node_dict["name"],
|
116
|
-
outputs=[Handle.from_dict(handle) for handle in node_dict["outputs"]],
|
117
|
-
input=node_input_from_json(node_dict["input"]),
|
118
|
-
input_type=node_dict["inputType"],
|
119
|
-
)
|
120
|
-
elif node_dict["type"] == "Output":
|
121
|
-
return OutputNode(
|
122
|
-
id=uuid.UUID(node_dict["id"]),
|
123
|
-
name=node_dict["name"],
|
124
|
-
inputs=[Handle.from_dict(handle) for handle in node_dict["inputs"]],
|
125
|
-
outputs=[Handle.from_dict(handle) for handle in node_dict["outputs"]],
|
126
|
-
inputs_mappings={
|
127
|
-
uuid.UUID(k): uuid.UUID(v)
|
128
|
-
for k, v in node_dict["inputsMappings"].items()
|
129
|
-
},
|
130
|
-
)
|
131
|
-
elif node_dict["type"] == "LLM":
|
132
|
-
return LLMNode(
|
133
|
-
id=uuid.UUID(node_dict["id"]),
|
134
|
-
name=node_dict["name"],
|
135
|
-
inputs=[Handle.from_dict(handle) for handle in node_dict["inputs"]],
|
136
|
-
dynamic_inputs=[
|
137
|
-
Handle.from_dict(handle) for handle in node_dict["dynamicInputs"]
|
138
|
-
],
|
139
|
-
outputs=[Handle.from_dict(handle) for handle in node_dict["outputs"]],
|
140
|
-
inputs_mappings={
|
141
|
-
uuid.UUID(k): uuid.UUID(v)
|
142
|
-
for k, v in node_dict["inputsMappings"].items()
|
143
|
-
},
|
144
|
-
prompt=node_dict["prompt"],
|
145
|
-
model=node_dict["model"],
|
146
|
-
model_params=(
|
147
|
-
node_dict["modelParams"] if "modelParams" in node_dict else None
|
148
|
-
),
|
149
|
-
stream=False,
|
150
|
-
# TODO: Implement structured output
|
151
|
-
structured_output_enabled=False,
|
152
|
-
structured_output_max_retries=3,
|
153
|
-
structured_output_schema=None,
|
154
|
-
structured_output_schema_target=None,
|
155
|
-
)
|
156
|
-
else:
|
157
|
-
raise ValueError(f"Node type {node_dict['type']} not supported")
|
@@ -1,96 +0,0 @@
|
|
1
|
-
from typing import Callable, Optional
|
2
|
-
from websockets.sync.client import connect
|
3
|
-
import websockets
|
4
|
-
from lmnr.types import DeregisterDebuggerRequest, NodeInput, RegisterDebuggerRequest, SDKError, ToolCall, ToolCallError
|
5
|
-
import uuid
|
6
|
-
import json
|
7
|
-
from threading import Thread
|
8
|
-
|
9
|
-
class RemoteDebugger:
|
10
|
-
def __init__(self, project_api_key: str, tools: list[Callable[..., NodeInput]] = []):
|
11
|
-
self.project_api_key = project_api_key
|
12
|
-
self.url = 'wss://api.lmnr.ai/v2/endpoint/ws'
|
13
|
-
self.tools = tools
|
14
|
-
self.thread = Thread(target=self._run)
|
15
|
-
self.stop_flag = False
|
16
|
-
self.session = None
|
17
|
-
|
18
|
-
def start(self) -> Optional[str]:
|
19
|
-
self.stop_flag = False
|
20
|
-
self.session = self._generate_session_id()
|
21
|
-
self.thread.start()
|
22
|
-
return self.session
|
23
|
-
|
24
|
-
def stop(self):
|
25
|
-
self.stop_flag = True
|
26
|
-
self.thread.join()
|
27
|
-
self.session = None
|
28
|
-
# python allows running threads only once, so we need to create a new thread
|
29
|
-
# in case the user wants to start the debugger again
|
30
|
-
self.thread = Thread(target=self._run)
|
31
|
-
|
32
|
-
def get_session_id(self) -> str:
|
33
|
-
return self.session
|
34
|
-
|
35
|
-
def _run(self):
|
36
|
-
request = RegisterDebuggerRequest(debuggerSessionId=self.session)
|
37
|
-
with connect(
|
38
|
-
self.url,
|
39
|
-
additional_headers={
|
40
|
-
'Authorization': f'Bearer {self.project_api_key}'
|
41
|
-
}
|
42
|
-
) as websocket:
|
43
|
-
websocket.send(request.model_dump_json())
|
44
|
-
print(self._format_session_id())
|
45
|
-
while not self.stop_flag:
|
46
|
-
try:
|
47
|
-
# blocks the thread until a message is received or a timeout (3 seconds) occurs
|
48
|
-
message = websocket.recv(3)
|
49
|
-
except TimeoutError:
|
50
|
-
continue
|
51
|
-
except websockets.exceptions.ConnectionClosedError:
|
52
|
-
print("Connection closed. Please restart the debugger.")
|
53
|
-
return
|
54
|
-
try:
|
55
|
-
tool_call = ToolCall.model_validate_json(message)
|
56
|
-
except:
|
57
|
-
raise SDKError(f'Invalid message received:\n{message}')
|
58
|
-
matching_tools = [
|
59
|
-
tool for tool in self.tools
|
60
|
-
if tool.__name__ == tool_call.function.name
|
61
|
-
]
|
62
|
-
if not matching_tools:
|
63
|
-
error_message = f'Tool {tool_call.function.name} not found.' +\
|
64
|
-
f' Registered tools: {", ".join([tool.__name__ for tool in self.tools])}'
|
65
|
-
e = ToolCallError(error=error_message)
|
66
|
-
websocket.send(e.model_dump_json())
|
67
|
-
continue
|
68
|
-
tool = matching_tools[0]
|
69
|
-
if tool.__name__ == tool_call.function.name:
|
70
|
-
# default the arguments to an empty dictionary
|
71
|
-
arguments = {}
|
72
|
-
try:
|
73
|
-
arguments = json.loads(tool_call.function.arguments)
|
74
|
-
except:
|
75
|
-
pass
|
76
|
-
try:
|
77
|
-
response = tool(**arguments) # of type NodeInput
|
78
|
-
websocket.send(json.dumps(response))
|
79
|
-
except Exception as e:
|
80
|
-
error_message = f'Error occurred while running tool {tool.__name__}: {e}'
|
81
|
-
e = ToolCallError(error=error_message)
|
82
|
-
websocket.send(e.model_dump_json())
|
83
|
-
websocket.send(DeregisterDebuggerRequest(debuggerSessionId=self.session, deregister=True).model_dump_json())
|
84
|
-
|
85
|
-
def _generate_session_id(self) -> str:
|
86
|
-
return uuid.uuid4().urn[9:]
|
87
|
-
|
88
|
-
def _format_session_id(self) -> str:
|
89
|
-
return \
|
90
|
-
f"""
|
91
|
-
========================================
|
92
|
-
Debugger Session ID:
|
93
|
-
{self.session}
|
94
|
-
========================================
|
95
|
-
"""
|
96
|
-
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|
{lmnr-0.2.5 → lmnr-0.2.6}/src/lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/engine/__init__.py
RENAMED
File without changes
|
{lmnr-0.2.5 → lmnr-0.2.6}/src/lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/engine/action.py
RENAMED
File without changes
|
{lmnr-0.2.5 → lmnr-0.2.6}/src/lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/engine/state.py
RENAMED
File without changes
|
{lmnr-0.2.5 → lmnr-0.2.6}/src/lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/engine/task.py
RENAMED
File without changes
|
File without changes
|
File without changes
|
File without changes
|