lmnr 0.2.7__py3-none-any.whl → 0.2.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. lmnr/cli/cli.py +1 -4
  2. lmnr/cli/parser/nodes/__init__.py +0 -5
  3. lmnr/cli/parser/nodes/code.py +10 -1
  4. lmnr/cli/parser/nodes/input.py +1 -2
  5. lmnr/cli/parser/nodes/json_extractor.py +29 -0
  6. lmnr/cli/parser/nodes/llm.py +7 -2
  7. lmnr/cli/parser/nodes/semantic_search.py +11 -39
  8. lmnr/cli/parser/nodes/types.py +26 -11
  9. lmnr/cli/parser/parser.py +4 -0
  10. lmnr/cli/parser/utils.py +24 -0
  11. lmnr/sdk/remote_debugger.py +1 -0
  12. {lmnr-0.2.7.dist-info → lmnr-0.2.9.dist-info}/METADATA +6 -4
  13. lmnr-0.2.9.dist-info/RECORD +25 -0
  14. lmnr/cli/cookiecutter.json +0 -9
  15. lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/__init__.py +0 -0
  16. lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/engine/__init__.py +0 -1
  17. lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/engine/action.py +0 -14
  18. lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/engine/engine.py +0 -293
  19. lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/engine/state.py +0 -69
  20. lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/engine/task.py +0 -38
  21. lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/pipelines/{{cookiecutter.pipeline_dir_name}}/__init__.py +0 -1
  22. lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/pipelines/{{cookiecutter.pipeline_dir_name}}/nodes/functions.py +0 -221
  23. lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/pipelines/{{cookiecutter.pipeline_dir_name}}/{{cookiecutter.pipeline_dir_name}}.py +0 -87
  24. lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/types.py +0 -35
  25. lmnr-0.2.7.dist-info/RECORD +0 -35
  26. {lmnr-0.2.7.dist-info → lmnr-0.2.9.dist-info}/LICENSE +0 -0
  27. {lmnr-0.2.7.dist-info → lmnr-0.2.9.dist-info}/WHEEL +0 -0
  28. {lmnr-0.2.7.dist-info → lmnr-0.2.9.dist-info}/entry_points.txt +0 -0
lmnr/cli/cli.py CHANGED
@@ -4,11 +4,9 @@ import os
4
4
  import click
5
5
  import logging
6
6
  from cookiecutter.main import cookiecutter
7
- from importlib import resources as importlib_resources
8
7
  from pydantic.alias_generators import to_pascal
9
8
 
10
9
  from .parser.parser import runnable_graph_to_template_vars
11
- import lmnr
12
10
 
13
11
  logger = logging.getLogger(__name__)
14
12
 
@@ -87,11 +85,10 @@ def pull(pipeline_name, pipeline_version_name, project_api_key, loglevel):
87
85
 
88
86
  logger.info(f"Context:\n{context}")
89
87
  cookiecutter(
90
- str(importlib_resources.files(lmnr)),
88
+ "https://github.com/lmnr-ai/lmnr-python-engine.git",
91
89
  output_dir=".",
92
90
  config_file=None,
93
91
  extra_context=context,
94
- directory="cli/",
95
92
  no_input=True,
96
93
  overwrite_if_exists=True,
97
94
  )
@@ -4,21 +4,16 @@ from typing import Optional
4
4
  import uuid
5
5
 
6
6
 
7
- HandleType = str # "String" | "ChatMessageList" | "Any"
8
-
9
-
10
7
  @dataclass
11
8
  class Handle:
12
9
  id: uuid.UUID
13
10
  name: Optional[str]
14
- type: HandleType
15
11
 
16
12
  @classmethod
17
13
  def from_dict(cls, dict: dict) -> "Handle":
18
14
  return cls(
19
15
  id=uuid.UUID(dict["id"]),
20
16
  name=(dict["name"] if "name" in dict else None),
21
- type=dict["type"],
22
17
  )
23
18
 
24
19
 
@@ -12,6 +12,8 @@ class CodeNode(NodeFunctions):
12
12
  inputs: list[Handle]
13
13
  outputs: list[Handle]
14
14
  inputs_mappings: dict[uuid.UUID, uuid.UUID]
15
+ code: str
16
+ fn_name: str
15
17
 
16
18
  def handles_mapping(
17
19
  self, output_handle_id_to_node_name: dict[str, str]
@@ -24,4 +26,11 @@ class CodeNode(NodeFunctions):
24
26
  return "Code"
25
27
 
26
28
  def config(self) -> dict:
27
- return {}
29
+ return {
30
+ "code": self.code,
31
+ "fn_name": self.fn_name,
32
+ "fn_inputs": ", ".join(
33
+ f"{handle.name}=input_to_code_node_arg({handle.name})"
34
+ for handle in self.inputs
35
+ ),
36
+ }
@@ -2,7 +2,7 @@ from dataclasses import dataclass
2
2
  from typing import Optional
3
3
  import uuid
4
4
 
5
- from lmnr.cli.parser.nodes import Handle, HandleType, NodeFunctions
5
+ from lmnr.cli.parser.nodes import Handle, NodeFunctions
6
6
  from lmnr.types import NodeInput
7
7
 
8
8
 
@@ -12,7 +12,6 @@ class InputNode(NodeFunctions):
12
12
  name: str
13
13
  outputs: list[Handle]
14
14
  input: Optional[NodeInput]
15
- input_type: HandleType
16
15
 
17
16
  def handles_mapping(
18
17
  self, output_handle_id_to_node_name: dict[str, str]
@@ -0,0 +1,29 @@
1
+ from dataclasses import dataclass
2
+
3
+ import uuid
4
+
5
+ from lmnr.cli.parser.nodes import Handle, NodeFunctions
6
+ from lmnr.cli.parser.utils import map_handles
7
+
8
+
9
+ @dataclass
10
+ class JsonExtractorNode(NodeFunctions):
11
+ id: uuid.UUID
12
+ name: str
13
+ inputs: list[Handle]
14
+ outputs: list[Handle]
15
+ inputs_mappings: dict[uuid.UUID, uuid.UUID]
16
+ template: str
17
+
18
+ def handles_mapping(
19
+ self, output_handle_id_to_node_name: dict[str, str]
20
+ ) -> list[tuple[str, str]]:
21
+ return map_handles(
22
+ self.inputs, self.inputs_mappings, output_handle_id_to_node_name
23
+ )
24
+
25
+ def node_type(self) -> str:
26
+ return "JsonExtractor"
27
+
28
+ def config(self) -> dict:
29
+ return {"template": self.template}
@@ -44,8 +44,13 @@ class LLMNode(NodeFunctions):
44
44
  "model": model,
45
45
  "model_params": self.model_params,
46
46
  "stream": self.stream,
47
- "structured_output_enabled": self.structured_output_enabled,
47
+ "enable_structured_output": self.structured_output_enabled
48
+ and self.structured_output_schema is not None,
48
49
  "structured_output_max_retries": self.structured_output_max_retries,
49
50
  "structured_output_schema": self.structured_output_schema,
50
- "structured_output_schema_target": self.structured_output_schema_target,
51
+ "structured_output_schema_target_str": (
52
+ "None"
53
+ if self.structured_output_schema_target is None
54
+ else f'"{self.structured_output_schema_target}"'
55
+ ),
51
56
  }
@@ -1,5 +1,4 @@
1
1
  from dataclasses import dataclass
2
- from datetime import datetime
3
2
 
4
3
  import uuid
5
4
 
@@ -7,44 +6,19 @@ from lmnr.cli.parser.nodes import Handle, NodeFunctions
7
6
  from lmnr.cli.parser.utils import map_handles
8
7
 
9
8
 
10
- @dataclass
11
- class FileMetadata:
12
- id: uuid.UUID
13
- created_at: datetime
14
- project_id: uuid.UUID
15
- filename: str
16
-
17
-
18
9
  @dataclass
19
10
  class Dataset:
20
11
  id: uuid.UUID
21
- created_at: datetime
22
- project_id: uuid.UUID
23
- name: str
24
-
25
-
26
- @dataclass
27
- class SemanticSearchDatasource:
28
- type: str
29
- id: uuid.UUID
30
- # TODO: Paste other fields here, use Union[FileMetadata, Dataset]
12
+ # created_at: datetime
13
+ # project_id: uuid.UUID
14
+ # name: str
15
+ # indexed_on: Optional[str]
31
16
 
32
17
  @classmethod
33
- def from_dict(cls, datasource_dict: dict) -> "SemanticSearchDatasource":
34
- if datasource_dict["type"] == "File":
35
- return cls(
36
- type="File",
37
- id=uuid.UUID(datasource_dict["id"]),
38
- )
39
- elif datasource_dict["type"] == "Dataset":
40
- return cls(
41
- type="Dataset",
42
- id=uuid.UUID(datasource_dict["id"]),
43
- )
44
- else:
45
- raise ValueError(
46
- f"Invalid SemanticSearchDatasource type: {datasource_dict['type']}"
47
- )
18
+ def from_dict(cls, dataset_dict: dict) -> "Dataset":
19
+ return cls(
20
+ id=uuid.UUID(dataset_dict["id"]),
21
+ )
48
22
 
49
23
 
50
24
  @dataclass
@@ -57,7 +31,7 @@ class SemanticSearchNode(NodeFunctions):
57
31
  limit: int
58
32
  threshold: float
59
33
  template: str
60
- datasources: list[SemanticSearchDatasource]
34
+ datasets: list[Dataset]
61
35
 
62
36
  def handles_mapping(
63
37
  self, output_handle_id_to_node_name: dict[str, str]
@@ -74,8 +48,6 @@ class SemanticSearchNode(NodeFunctions):
74
48
  "limit": self.limit,
75
49
  "threshold": self.threshold,
76
50
  "template": self.template,
77
- "datasource_ids": [str(datasource.id) for datasource in self.datasources],
78
- "datasource_ids_list": str(
79
- [str(datasource.id) for datasource in self.datasources]
80
- ),
51
+ "datasource_ids": [str(dataset.id) for dataset in self.datasets],
52
+ "datasource_ids_list": str([str(dataset.id) for dataset in self.datasets]),
81
53
  }
@@ -5,11 +5,12 @@ from lmnr.cli.parser.nodes import Handle
5
5
  from lmnr.cli.parser.nodes.code import CodeNode
6
6
  from lmnr.cli.parser.nodes.condition import ConditionNode
7
7
  from lmnr.cli.parser.nodes.input import InputNode
8
+ from lmnr.cli.parser.nodes.json_extractor import JsonExtractorNode
8
9
  from lmnr.cli.parser.nodes.llm import LLMNode
9
10
  from lmnr.cli.parser.nodes.output import OutputNode
10
11
  from lmnr.cli.parser.nodes.router import Route, RouterNode
11
12
  from lmnr.cli.parser.nodes.semantic_search import (
12
- SemanticSearchDatasource,
13
+ Dataset,
13
14
  SemanticSearchNode,
14
15
  )
15
16
  from lmnr.types import NodeInput, ChatMessage
@@ -32,6 +33,7 @@ Node = Union[
32
33
  RouterNode,
33
34
  SemanticSearchNode,
34
35
  CodeNode,
36
+ JsonExtractorNode,
35
37
  ]
36
38
 
37
39
 
@@ -42,7 +44,6 @@ def node_from_dict(node_dict: dict) -> Node:
42
44
  name=node_dict["name"],
43
45
  outputs=[Handle.from_dict(handle) for handle in node_dict["outputs"]],
44
46
  input=node_input_from_json(node_dict["input"]),
45
- input_type=node_dict["inputType"],
46
47
  )
47
48
  elif node_dict["type"] == "Output":
48
49
  return OutputNode(
@@ -86,11 +87,14 @@ def node_from_dict(node_dict: dict) -> Node:
86
87
  node_dict["modelParams"] if "modelParams" in node_dict else None
87
88
  ),
88
89
  stream=False,
89
- # TODO: Implement structured output
90
- structured_output_enabled=False,
91
- structured_output_max_retries=3,
92
- structured_output_schema=None,
93
- structured_output_schema_target=None,
90
+ structured_output_enabled=node_dict.get("structuredOutputEnabled", False),
91
+ structured_output_max_retries=node_dict.get(
92
+ "structuredOutputMaxRetries", 0
93
+ ),
94
+ structured_output_schema=node_dict.get("structuredOutputSchema", None),
95
+ structured_output_schema_target=node_dict.get(
96
+ "structuredOutputSchemaTarget", None
97
+ ),
94
98
  )
95
99
  elif node_dict["type"] == "Router":
96
100
  return RouterNode(
@@ -118,10 +122,7 @@ def node_from_dict(node_dict: dict) -> Node:
118
122
  limit=node_dict["limit"],
119
123
  threshold=node_dict["threshold"],
120
124
  template=node_dict["template"],
121
- datasources=[
122
- SemanticSearchDatasource.from_dict(ds)
123
- for ds in node_dict["datasources"]
124
- ],
125
+ datasets=[Dataset.from_dict(ds) for ds in node_dict["datasets"]],
125
126
  )
126
127
  elif node_dict["type"] == "Code":
127
128
  return CodeNode(
@@ -133,6 +134,20 @@ def node_from_dict(node_dict: dict) -> Node:
133
134
  uuid.UUID(k): uuid.UUID(v)
134
135
  for k, v in node_dict["inputsMappings"].items()
135
136
  },
137
+ code=node_dict["code"],
138
+ fn_name=node_dict["fnName"],
139
+ )
140
+ elif node_dict["type"] == "JsonExtractor":
141
+ return JsonExtractorNode(
142
+ id=uuid.UUID(node_dict["id"]),
143
+ name=node_dict["name"],
144
+ inputs=[Handle.from_dict(handle) for handle in node_dict["inputs"]],
145
+ outputs=[Handle.from_dict(handle) for handle in node_dict["outputs"]],
146
+ inputs_mappings={
147
+ uuid.UUID(k): uuid.UUID(v)
148
+ for k, v in node_dict["inputsMappings"].items()
149
+ },
150
+ template=node_dict["template"],
136
151
  )
137
152
  else:
138
153
  raise ValueError(f"Node type {node_dict['type']} not supported")
lmnr/cli/parser/parser.py CHANGED
@@ -1,3 +1,4 @@
1
+ from lmnr.cli.parser.utils import replace_spaces_with_underscores
1
2
  from .nodes.types import node_from_dict
2
3
 
3
4
 
@@ -8,6 +9,9 @@ def runnable_graph_to_template_vars(graph: dict) -> dict:
8
9
  node_id_to_node_name = {}
9
10
  output_handle_id_to_node_name: dict[str, str] = {}
10
11
  for node in graph["nodes"].values():
12
+ # override node names in the graph itself to be safe
13
+ node["name"] = replace_spaces_with_underscores(node["name"])
14
+
11
15
  node_id_to_node_name[node["id"]] = node["name"]
12
16
  for handle in node["outputs"]:
13
17
  output_handle_id_to_node_name[handle["id"]] = node["name"]
lmnr/cli/parser/utils.py CHANGED
@@ -23,3 +23,27 @@ def map_handles(
23
23
  (input_name, output_handle_id_to_node_name[str(output_id)])
24
24
  for input_name, output_id in mapping
25
25
  ]
26
+
27
+
28
+ def replace_spaces_with_underscores(s: str):
29
+ spaces = [
30
+ "\u0020",
31
+ "\u00A0",
32
+ "\u1680",
33
+ "\u2000",
34
+ "\u2001",
35
+ "\u2002",
36
+ "\u2003",
37
+ "\u2004",
38
+ "\u2005",
39
+ "\u2006",
40
+ "\u2007",
41
+ "\u2008",
42
+ "\u2009",
43
+ "\u200A",
44
+ "\u200B",
45
+ "\u202F",
46
+ "\u205F",
47
+ "\u3000",
48
+ ]
49
+ return s.translate({ord(space): "_" for space in spaces})
@@ -96,6 +96,7 @@ class RemoteDebugger:
96
96
  f'{tool.__name__}: {e}'
97
97
  e = ToolCallError(error=error_message, reqId=req_id)
98
98
  websocket.send(e.model_dump_json())
99
+ continue
99
100
  formatted_response = None
100
101
  try:
101
102
  formatted_response = ToolCallResponse(
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: lmnr
3
- Version: 0.2.7
3
+ Version: 0.2.9
4
4
  Summary: Python SDK for Laminar AI
5
5
  License: Apache-2.0
6
6
  Author: lmnr.ai
@@ -14,7 +14,9 @@ Classifier: Programming Language :: Python :: 3.12
14
14
  Requires-Dist: black (>=24.4.2,<25.0.0)
15
15
  Requires-Dist: click (>=8.1.7,<9.0.0)
16
16
  Requires-Dist: cookiecutter (>=2.6.0,<3.0.0)
17
+ Requires-Dist: lmnr-baml (>=0.40.0,<0.41.0)
17
18
  Requires-Dist: pydantic (>=2.7.4,<3.0.0)
19
+ Requires-Dist: pystache (>=0.6.5,<0.7.0)
18
20
  Requires-Dist: python-dotenv (>=1.0.1,<2.0.0)
19
21
  Requires-Dist: requests (>=2.32.3,<3.0.0)
20
22
  Requires-Dist: websockets (>=12.0,<13.0)
@@ -166,12 +168,12 @@ res = pipeline.run(
166
168
  "OPENAI_API_KEY": <OPENAI_API_KEY>,
167
169
  }
168
170
  )
169
- print(f"RESULT:\n{res}")
171
+ print(f"Pipeline run result:\n{res}")
170
172
  ```
171
173
 
172
174
  ### Current functionality
173
- - Supports graph generation for graphs with Input, Output, and LLM nodes only
174
- - For LLM nodes, it only supports OpenAI and Anthropic models and doesn't support structured output
175
+ - Supports graph generation for graphs with the following nodes: Input, Output, LLM, Router, Code.
176
+ - For LLM nodes, it only supports OpenAI and Anthropic models. Structured output in LLM nodes will be supported soon.
175
177
 
176
178
  ## PROJECT_API_KEY
177
179
 
@@ -0,0 +1,25 @@
1
+ lmnr/__init__.py,sha256=NWHDZ-KAl3pQGDnKUbBbZaW4es-0-9Xt6gP-UrjyuEQ,207
2
+ lmnr/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
+ lmnr/cli/__main__.py,sha256=8hDtWlaFZK24KhfNq_ZKgtXqYHsDQDetukOCMlsbW0Q,59
4
+ lmnr/cli/cli.py,sha256=0Qw_dxE_N9F38asUB7pMbILJGVi-pPtqiao4aTjQQGM,2769
5
+ lmnr/cli/parser/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ lmnr/cli/parser/nodes/__init__.py,sha256=2MkPdKulb1kuNe6aT71CaqBA8iBrXyb5pq5bu_EvCb8,1052
7
+ lmnr/cli/parser/nodes/code.py,sha256=8lTPBibUzaw_t-9QoPljhxH3KA4CLn9DJjA-iWpprOA,933
8
+ lmnr/cli/parser/nodes/condition.py,sha256=AJny0ILXbSy1hTwsRvZvDUqts9INNx63yQSkD7Dp7KU,740
9
+ lmnr/cli/parser/nodes/input.py,sha256=o8EfCmBbNyQL8FzmAtgnNDFlWqZmRAgkbw4HzKXZepU,539
10
+ lmnr/cli/parser/nodes/json_extractor.py,sha256=CnVwZ-wU_Ro4WkJLw9Uk_SS3yvZ66UPa5mK4JdkM8w4,723
11
+ lmnr/cli/parser/nodes/llm.py,sha256=Wpmo9cfNiYN9DRbj7oBS6RYcKXLwlGtF6RdF4jFQm5I,1866
12
+ lmnr/cli/parser/nodes/output.py,sha256=1XBppSscxM01kfZhE9oOh2GgdCVzyPVe2RAxLI5HmUc,665
13
+ lmnr/cli/parser/nodes/router.py,sha256=dmCx4ho8_GdFJXQa8UevMf_uEP7AKBv_MJ2zpLC6Vck,894
14
+ lmnr/cli/parser/nodes/semantic_search.py,sha256=DWDPpV78XZ7vPIaPd86FbeDFAnKah4e61M1TOzwnt84,1352
15
+ lmnr/cli/parser/nodes/types.py,sha256=OVXj-iMEDY9nPKCX1-zddtoszZcUL3CXYYryI7O3et0,6094
16
+ lmnr/cli/parser/parser.py,sha256=yDa-ysAkh6si_hHU8Gw8EdtNWc4pFc5RbvgWEXGEPys,2370
17
+ lmnr/cli/parser/utils.py,sha256=1oy6BApHXOF7BTXbP8v3Oi9bwOdWZjoxDlRIOfXVxro,1169
18
+ lmnr/sdk/endpoint.py,sha256=0HjcxMUcJz-klFZO2f5xtTaoLjcaEb8vrJ_YldTWUc8,7467
19
+ lmnr/sdk/remote_debugger.py,sha256=tC8OywEkxzeUCAHrpfhlL-iMP_0Rqh9qC2DtoYEGJjU,5174
20
+ lmnr/types.py,sha256=OR9xRAQ5uTTwpJTDL_e3jZqxYJWvyX96CCoxr3oo94g,2112
21
+ lmnr-0.2.9.dist-info/LICENSE,sha256=67b_wJHVV1CBaWkrKFWU1wyqTPSdzH77Ls-59631COg,10411
22
+ lmnr-0.2.9.dist-info/METADATA,sha256=80S5dQujRPHbBr-zlV4W1GouTyFybBR7z2JanLJSuo4,5564
23
+ lmnr-0.2.9.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
24
+ lmnr-0.2.9.dist-info/entry_points.txt,sha256=Qg7ZRax4k-rcQsZ26XRYQ8YFSBiyY2PNxYfq4a6PYXI,41
25
+ lmnr-0.2.9.dist-info/RECORD,,
@@ -1,9 +0,0 @@
1
- {
2
- "lmnr_pipelines_dir_name": "lmnr_engine",
3
- "pipeline_name": "Laminar Pipeline",
4
- "pipeline_dir_name": "{{ cookiecutter['pipeline_name'].lower().replace('-', '_').replace(' ', '_') }}",
5
- "class_name": "LaminarPipeline",
6
- "pipeline_version_name": "main",
7
- "_tasks": {},
8
- "_jinja2_env_vars": {"lstrip_blocks": true, "trim_blocks": true}
9
- }
@@ -1 +0,0 @@
1
- from .engine import Engine
@@ -1,14 +0,0 @@
1
- from dataclasses import dataclass
2
- from typing import Union
3
-
4
- from lmnr_engine.types import NodeInput
5
-
6
-
7
- @dataclass
8
- class RunOutput:
9
- status: str # "Success" | "Termination" TODO: Turn into Enum
10
- output: Union[NodeInput, None]
11
-
12
-
13
- class NodeRunError(Exception):
14
- pass
@@ -1,293 +0,0 @@
1
- from concurrent.futures import ThreadPoolExecutor
2
- import datetime
3
- import logging
4
- from typing import Optional
5
- import uuid
6
- from dataclasses import dataclass
7
- import queue
8
-
9
- from .task import Task
10
- from .action import NodeRunError, RunOutput
11
- from .state import State
12
- from lmnr.types import NodeInput
13
- from lmnr_engine.types import Message
14
-
15
-
16
- logger = logging.getLogger(__name__)
17
-
18
-
19
- @dataclass
20
- class ScheduledTask:
21
- status: str # "Task" | "Err" TODO: Use an enum
22
- task_name: Optional[str]
23
-
24
-
25
- class RunError(Exception):
26
- outputs: dict[str, Message]
27
-
28
-
29
- @dataclass
30
- class Engine:
31
- tasks: dict[str, Task]
32
- active_tasks: set[str]
33
- depths: dict[str, int]
34
- outputs: dict[str, Message]
35
- env: dict[str, str]
36
- thread_pool_executor: ThreadPoolExecutor
37
- # TODO: Store thread pool executor's Futures here to have control
38
- # over them (e.g. cancel them)
39
-
40
- @classmethod
41
- def new(
42
- cls, thread_pool_executor: ThreadPoolExecutor, env: dict[str, str] = {}
43
- ) -> "Engine":
44
- return cls(
45
- tasks={},
46
- active_tasks=set(),
47
- depths={},
48
- outputs={},
49
- env=env,
50
- thread_pool_executor=thread_pool_executor,
51
- )
52
-
53
- @classmethod
54
- def with_tasks(
55
- cls,
56
- tasks: list[Task],
57
- thread_pool_executor: ThreadPoolExecutor,
58
- env: dict[str, str] = {},
59
- ) -> "Engine":
60
- dag = cls.new(thread_pool_executor, env=env)
61
-
62
- for task in tasks:
63
- dag.tasks[task.name] = task
64
- dag.depths[task.name] = 0
65
-
66
- return dag
67
-
68
- def override_inputs(self, inputs: dict[str, NodeInput]) -> None:
69
- for task in self.tasks.values():
70
- # TODO: Check that it's the Input type task
71
- if not task.prev:
72
- task.value = inputs[task.name]
73
-
74
- def run(self, inputs: dict[str, NodeInput]) -> dict[str, Message]:
75
- self.override_inputs(inputs)
76
-
77
- q = queue.Queue()
78
-
79
- input_tasks = []
80
- for task in self.tasks.values():
81
- if len(task.prev) == 0:
82
- input_tasks.append(task.name)
83
-
84
- for task_id in input_tasks:
85
- q.put(ScheduledTask(status="Task", task_name=task_id))
86
-
87
- while True:
88
- logger.info("Waiting for task from queue")
89
- scheduled_task: ScheduledTask = q.get()
90
- logger.info(f"Got task from queue: {scheduled_task}")
91
- if scheduled_task.status == "Err":
92
- # TODO: Abort all other threads
93
- raise RunError(self.outputs)
94
-
95
- task: Task = self.tasks[scheduled_task.task_name] # type: ignore
96
- logger.info(f"Task next: {task.next}")
97
-
98
- if not task.next:
99
- try:
100
- fut = self.execute_task(task, q)
101
- fut.result()
102
- if not self.active_tasks:
103
- return self.outputs
104
- except Exception:
105
- raise RunError(self.outputs)
106
- else:
107
- self.execute_task(task, q)
108
-
109
- def execute_task_inner(
110
- self,
111
- task: Task,
112
- queue: queue.Queue,
113
- ) -> None:
114
- task_id = task.name
115
- next = task.next
116
- input_states = task.input_states
117
- active_tasks = self.active_tasks
118
- tasks = self.tasks
119
- depths = self.depths
120
- depth = depths[task.name]
121
- outputs = self.outputs
122
-
123
- inputs: dict[str, NodeInput] = {}
124
- input_messages = []
125
-
126
- # Wait for inputs for this task to be set
127
- for handle_name, input_state in input_states.items():
128
- logger.info(f"Task {task_id} waiting for semaphore for {handle_name}")
129
- input_state.semaphore.acquire()
130
- logger.info(f"Task {task_id} acquired semaphore for {handle_name}")
131
-
132
- # Set the outputs of predecessors as inputs of the current
133
- output = input_state.get_state()
134
- # If at least one of the inputs is termination,
135
- # also terminate this task early and set its state to termination
136
- if output.status == "Termination":
137
- return
138
- message = output.get_out()
139
-
140
- inputs[handle_name] = message.value
141
- input_messages.append(message)
142
-
143
- start_time = datetime.datetime.now()
144
-
145
- try:
146
- if callable(task.value):
147
- res = task.value(**inputs, _env=self.env)
148
- else:
149
- res = RunOutput(status="Success", output=task.value)
150
-
151
- if res.status == "Success":
152
- id = uuid.uuid4()
153
- state = State.new(
154
- Message(
155
- id=id,
156
- value=res.output, # type: ignore
157
- start_time=start_time,
158
- end_time=datetime.datetime.now(),
159
- )
160
- )
161
- else:
162
- assert res.status == "Termination"
163
- state = State.termination()
164
-
165
- is_termination = state.is_termination()
166
- logger.info(f"Task {task_id} executed")
167
-
168
- # remove the task from active tasks once it's done
169
- if task_id in active_tasks:
170
- active_tasks.remove(task_id)
171
-
172
- if depth > 0:
173
- self.propagate_reset(task_id, task_id, tasks)
174
-
175
- # terminate graph on recursion depth exceeding 10
176
- if depth == 10:
177
- logging.error("Max recursion depth exceeded, terminating graph")
178
- error = Message(
179
- id=uuid.uuid4(),
180
- value="Max recursion depth exceeded",
181
- start_time=start_time,
182
- end_time=datetime.datetime.now(),
183
- )
184
-
185
- if not next:
186
- # if there are no next tasks, we can terminate the graph
187
- outputs[task.name] = state.get_out()
188
-
189
- # push next tasks to the channel only if
190
- # the current task is not a termination
191
- for next_task_name in next:
192
- # we set the inputs of the next tasks
193
- # to the outputs of the current task
194
- next_task = tasks[next_task_name]
195
-
196
- # in majority of cases there will be only one handle name
197
- # however we need to handle the case when single output
198
- # is mapped to multiple inputs on the next node
199
- handle_names = []
200
- for k, v in next_task.handles_mapping:
201
- if v == task.name:
202
- handle_names.append(k)
203
-
204
- for handle_name in handle_names:
205
- next_state = next_task.input_states[handle_name]
206
- next_state.set_state_and_permits(state, 1)
207
-
208
- # push next tasks to the channel only if the task is not active
209
- # and current task is not a termination
210
- if not (next_task_name in active_tasks) and not is_termination:
211
- active_tasks.add(next_task_name)
212
- queue.put(
213
- ScheduledTask(
214
- status="Task",
215
- task_name=next_task_name,
216
- )
217
- )
218
-
219
- # increment depth of the finished task
220
- depths[task_id] = depth + 1
221
- except NodeRunError as e:
222
- logger.exception(f"Execution failed [id: {task_id}]")
223
-
224
- error = Message(
225
- id=uuid.uuid4(),
226
- value=str(e),
227
- start_time=start_time,
228
- end_time=datetime.datetime.now(),
229
- )
230
-
231
- outputs[task.name] = error
232
-
233
- # terminate entire graph by sending err task
234
- queue.put(
235
- ScheduledTask(
236
- status="Err",
237
- task_name=None,
238
- )
239
- )
240
-
241
- except Exception:
242
- logger.exception(f"Execution failed [id: {task_id}]")
243
- error = Message(
244
- id=uuid.uuid4(),
245
- value="Unexpected server error",
246
- start_time=start_time,
247
- end_time=datetime.datetime.now(),
248
- )
249
- outputs[task.name] = error
250
- queue.put(
251
- ScheduledTask(
252
- status="Err",
253
- task_name=None,
254
- )
255
- )
256
-
257
- def execute_task(
258
- self,
259
- task: Task,
260
- queue: queue.Queue,
261
- ):
262
- return self.thread_pool_executor.submit(
263
- self.execute_task_inner,
264
- task,
265
- queue,
266
- )
267
-
268
- def propagate_reset(
269
- self, current_task_name: str, start_task_name: str, tasks: dict[str, Task]
270
- ) -> None:
271
- task = tasks[current_task_name]
272
-
273
- for next_task_name in task.next:
274
- if next_task_name == start_task_name:
275
- return
276
-
277
- next_task = tasks[next_task_name]
278
-
279
- # in majority of cases there will be only one handle name
280
- # however we need to handle the case when single output is mapped
281
- # to multiple inputs on the next node
282
- handle_names = []
283
- for k, v in next_task.handles_mapping:
284
- if v == task.name:
285
- handle_names.append(k)
286
-
287
- for handle_name in handle_names:
288
- next_state = next_task.input_states[handle_name]
289
-
290
- if next_state.get_state().is_success():
291
- next_state.set_state(State.empty())
292
- next_state.semaphore.release()
293
- self.propagate_reset(next_task_name, start_task_name, tasks)
@@ -1,69 +0,0 @@
1
- import threading
2
- from dataclasses import dataclass
3
- from typing import Union
4
-
5
- from lmnr_engine.types import Message
6
-
7
-
8
- @dataclass
9
- class State:
10
- status: str # "Success", "Empty", "Termination" # TODO: Turn into Enum
11
- message: Union[Message, None]
12
-
13
- @classmethod
14
- def new(cls, val: Message) -> "State":
15
- return cls(
16
- status="Success",
17
- message=val,
18
- )
19
-
20
- @classmethod
21
- def empty(cls) -> "State":
22
- return cls(
23
- status="Empty",
24
- message=Message.empty(),
25
- )
26
-
27
- @classmethod
28
- def termination(cls) -> "State":
29
- return cls(
30
- status="Termination",
31
- message=None,
32
- )
33
-
34
- def is_success(self) -> bool:
35
- return self.status == "Success"
36
-
37
- def is_termination(self) -> bool:
38
- return self.status == "Termination"
39
-
40
- def get_out(self) -> Message:
41
- if self.message is None:
42
- raise ValueError("Cannot get message from a termination state")
43
-
44
- return self.message
45
-
46
-
47
- @dataclass
48
- class ExecState:
49
- output: State
50
- semaphore: threading.Semaphore
51
-
52
- @classmethod
53
- def new(cls) -> "ExecState":
54
- return cls(
55
- output=State.empty(),
56
- semaphore=threading.Semaphore(0),
57
- )
58
-
59
- # Assume this is called by the caller who doesn't need to acquire semaphore
60
- def set_state(self, output: State):
61
- self.output = output
62
-
63
- # Assume the caller is smart to call this after acquiring the semaphore
64
- def get_state(self) -> State:
65
- return self.output
66
-
67
- def set_state_and_permits(self, output: State, permits: int):
68
- self.output = output
69
- self.semaphore.release(permits)
@@ -1,38 +0,0 @@
1
- from typing import Callable, Union
2
-
3
- from .action import RunOutput
4
- from .state import ExecState
5
- from lmnr_engine.types import NodeInput
6
-
7
-
8
- class Task:
9
- # unique identifier
10
- name: str
11
- # mapping from current node's handle name to previous node's unique name
12
- # assumes nodes have only one output
13
- handles_mapping: list[tuple[str, str]]
14
- # Value or a function that returns a value
15
- # Usually a function which waits for inputs from previous nodes
16
- value: Union[NodeInput, Callable[..., RunOutput]] # TODO: Type this fully
17
- # unique node names of previous nodes
18
- prev: list[str]
19
- # unique node names of next nodes
20
- next: list[str]
21
- input_states: dict[str, ExecState]
22
-
23
- def __init__(
24
- self,
25
- name: str,
26
- handles_mapping: list[tuple[str, str]],
27
- value: Union[NodeInput, Callable[..., RunOutput]],
28
- prev: list[str],
29
- next: list[str],
30
- ) -> None:
31
- self.name = name
32
- self.handles_mapping = handles_mapping
33
- self.value = value
34
- self.prev = prev
35
- self.next = next
36
- self.input_states = {
37
- handle_name: ExecState.new() for (handle_name, _) in self.handles_mapping
38
- }
@@ -1 +0,0 @@
1
- from .{{ cookiecutter.pipeline_dir_name }} import {{ cookiecutter.class_name }}
@@ -1,221 +0,0 @@
1
- import requests
2
- import json
3
-
4
- from lmnr.types import ConditionedValue
5
- from lmnr_engine.engine.action import NodeRunError, RunOutput
6
- from lmnr_engine.types import ChatMessage, NodeInput
7
-
8
-
9
- {% for task in cookiecutter._tasks.values() %}
10
- {% if task.node_type == "LLM" %}
11
- def {{task.function_name}}({{ task.handle_args }}, _env: dict[str, str]) -> RunOutput:
12
- {% set chat_messages_found = false %}
13
- {% for input_handle_name in task.input_handle_names %}
14
- {% if input_handle_name == 'chat_messages' %}
15
- {% set chat_messages_found = true %}
16
- {% endif %}
17
- {% endfor %}
18
-
19
- {% if chat_messages_found %}
20
- input_chat_messages = chat_messages
21
- {% else %}
22
- input_chat_messages = []
23
- {% endif %}
24
-
25
- rendered_prompt = """{{task.config.prompt}}"""
26
- {% set prompt_variables = task.input_handle_names|reject("equalto", "chat_messages") %}
27
- {% for prompt_variable in prompt_variables %}
28
- {# TODO: Fix this. Using double curly braces in quotes because normal double curly braces
29
- # get replaced during rendering by Cookiecutter. This is a hacky solution.#}
30
- rendered_prompt = rendered_prompt.replace("{{'{{'}}{{prompt_variable}}{{'}}'}}", {{prompt_variable}}) # type: ignore
31
- {% endfor %}
32
-
33
- {% if task.config.model_params == none %}
34
- params = {}
35
- {% else %}
36
- params = json.loads(
37
- """{{task.config.model_params}}"""
38
- )
39
- {% endif %}
40
-
41
- messages = [ChatMessage(role="system", content=rendered_prompt)]
42
- messages.extend(input_chat_messages)
43
-
44
- {% if task.config.provider == "openai" %}
45
- message_jsons = [
46
- {"role": message.role, "content": message.content} for message in messages
47
- ]
48
-
49
- data = {
50
- "model": "{{task.config.model}}",
51
- "messages": message_jsons,
52
- }
53
- data.update(params)
54
-
55
- headers = {
56
- "Content-Type": "application/json",
57
- "Authorization": f"Bearer {_env['OPENAI_API_KEY']}",
58
- }
59
- res = requests.post(
60
- "https://api.openai.com/v1/chat/completions", json=data, headers=headers
61
- )
62
-
63
- if res.status_code != 200:
64
- res_json = res.json()
65
- raise NodeRunError(f'OpenAI completions request failed: {res_json["error"]["message"]}')
66
-
67
- chat_completion = res.json()
68
-
69
- completion_message = chat_completion["choices"][0]["message"]["content"]
70
-
71
- meta_log = {}
72
- {# TODO: Add node chunk id #}
73
- meta_log["node_chunk_id"] = None
74
- meta_log["model"] = "{{task.config.model}}"
75
- meta_log["prompt"] = rendered_prompt
76
- meta_log["input_message_count"] = len(messages)
77
- meta_log["input_token_count"] = chat_completion["usage"]["prompt_tokens"]
78
- meta_log["output_token_count"] = chat_completion["usage"]["completion_tokens"]
79
- meta_log["total_token_count"] = (
80
- chat_completion["usage"]["prompt_tokens"] + chat_completion["usage"]["completion_tokens"]
81
- )
82
- {# TODO: Add approximate cost #}
83
- meta_log["approximate_cost"] = None
84
- {% elif task.config.provider == "anthropic" %}
85
- data = {
86
- "model": "{{task.config.model}}",
87
- "max_tokens": 4096,
88
- }
89
- data.update(params)
90
-
91
- {# TODO: Generate appropriate code based on this if-else block #}
92
- if len(messages) == 1 and messages[0].role == "system":
93
- messages[0].role = "user"
94
- message_jsons = [
95
- {"role": message.role, "content": message.content} for message in messages
96
- ]
97
- data["messages"] = message_jsons
98
- else:
99
- data["system"] = messages[0].content
100
- message_jsons = [
101
- {"role": message.role, "content": message.content} for message in messages[1:]
102
- ]
103
- data["messages"] = message_jsons
104
-
105
- headers = {
106
- "Content-Type": "application/json",
107
- "X-Api-Key": _env['ANTHROPIC_API_KEY'],
108
- "Anthropic-Version": "2023-06-01",
109
- }
110
- res = requests.post(
111
- "https://api.anthropic.com/v1/messages", json=data, headers=headers
112
- )
113
-
114
- if res.status_code != 200:
115
- raise NodeRunError(f"Anthropic message request failed: {res.text}")
116
-
117
- chat_completion = res.json()
118
-
119
- completion_message = chat_completion["content"][0]["text"]
120
-
121
- meta_log = {}
122
- {# TODO: Add node chunk id#}
123
- meta_log["node_chunk_id"] = None
124
- meta_log["model"] = "{{task.config.model}}"
125
- meta_log["prompt"] = rendered_prompt
126
- meta_log["input_message_count"] = len(messages)
127
- meta_log["input_token_count"] = chat_completion["usage"]["input_tokens"]
128
- meta_log["output_token_count"] = chat_completion["usage"]["output_tokens"]
129
- meta_log["total_token_count"] = (
130
- chat_completion["usage"]["input_tokens"] + chat_completion["usage"]["output_tokens"]
131
- )
132
- {# TODO: Add approximate cost#}
133
- meta_log["approximate_cost"] = None
134
- {% else %}
135
- {% endif %}
136
-
137
- return RunOutput(status="Success", output=completion_message)
138
-
139
-
140
- {% elif task.node_type == "SemanticSearch" %}
141
- def {{task.function_name}}(query: NodeInput, _env: dict[str, str]) -> RunOutput:
142
- {% set datasources_length=task.config.datasource_ids|length %}
143
- {% if datasources_length == 0 %}
144
- raise NodeRunError("No datasources provided")
145
- {% endif %}
146
-
147
- headers = {
148
- "Authorization": f"Bearer {_env['LMNR_PROJECT_API_KEY']}",
149
- }
150
- data = {
151
- "query": query,
152
- "limit": {{ task.config.limit }},
153
- "threshold": {{ task.config.threshold }},
154
- "datasourceIds": {{ task.config.datasource_ids_list }},
155
- }
156
- query_res = requests.post("https://api.lmnr.ai/v2/semantic-search", headers=headers, json=data)
157
- if query_res.status_code != 200:
158
- raise NodeRunError(f"Vector search request failed:{query_res.status_code}\n{query_res.text}")
159
-
160
- results = query_res.json()
161
-
162
- def render_query_res_point(template: str, point: dict, relevance_index: int) -> str:
163
- data = point["data"]
164
- data["relevance_index"] = relevance_index
165
- res = template
166
- for key, value in data.items():
167
- res = res.replace("{{'{{'}}" + key + "{{'}}'}}", str(value))
168
- return res
169
-
170
- rendered_res_points = [render_query_res_point("""{{task.config.template}}""", res_point, index + 1) for (index, res_point) in enumerate(results)]
171
- output = "\n".join(rendered_res_points)
172
-
173
- return RunOutput(status="Success", output=output)
174
-
175
-
176
- {% elif task.node_type == "Router" %}
177
- def {{task.function_name}}(condition: NodeInput, input: NodeInput, _env: dict[str, str]) -> RunOutput:
178
- routes = {{ task.config.routes }}
179
- has_default_route = {{ task.config.has_default_route }}
180
-
181
- for route in routes:
182
- if route == condition:
183
- return RunOutput(status="Success", output=ConditionedValue(condition=route, value=input))
184
-
185
- if has_default_route:
186
- return RunOutput(status="Success", output=ConditionedValue(condition=routes[-1], value=input))
187
-
188
- raise NodeRunError(f"No route found for condition {condition}")
189
-
190
-
191
- {% elif task.node_type == "Condition" %}
192
- def {{task.function_name}}(input: NodeInput, _env: dict[str, str]) -> RunOutput:
193
- condition = "{{task.config.condition}}"
194
-
195
- if input.condition == condition:
196
- return RunOutput(status="Success", output=input.value)
197
- else:
198
- return RunOutput(status="Termination", output=None)
199
-
200
-
201
- {% elif task.node_type == "Code" %}
202
- def {{task.function_name}}({{ task.handle_args }}, _env: dict[str, str]) -> RunOutput:
203
- # Implement any functionality you want here
204
- raise NodeRunError("Implement your code here")
205
-
206
-
207
- {% elif task.node_type == "Output" %}
208
- def {{task.function_name}}(output: NodeInput, _env: dict[str, str]) -> RunOutput:
209
- return RunOutput(status="Success", output=output)
210
-
211
-
212
- {% elif task.node_type == "Input" %}
213
- {# Do nothing for Input tasks #}
214
- {% else %}
215
- def {{task.function_name}}(output: NodeInput, _env: dict[str, str]) -> RunOutput:
216
- return RunOutput(status="Success", output=output)
217
-
218
-
219
- {% endif %}
220
- {% endfor %}
221
- # Other functions can be added here
@@ -1,87 +0,0 @@
1
- from concurrent.futures import ThreadPoolExecutor
2
- from dataclasses import dataclass
3
- import logging
4
- from typing import Optional, Union
5
-
6
- from lmnr.types import ChatMessage
7
- from lmnr_engine.engine import Engine
8
- {% set function_names = cookiecutter._tasks.values() | selectattr('node_type', '!=', 'Input') | map(attribute='function_name') | join(', ') %}
9
- from .nodes.functions import {{ function_names }}
10
- from lmnr_engine.engine.task import Task
11
-
12
-
13
- logger = logging.getLogger(__name__)
14
-
15
-
16
- class PipelineRunnerError(Exception):
17
- pass
18
-
19
-
20
- @dataclass
21
- class PipelineRunOutput:
22
- value: Union[str, list[ChatMessage]]
23
-
24
-
25
- # This class is not imported in other files and can be renamed to desired name
26
- class {{ cookiecutter.class_name }}:
27
- thread_pool_executor: ThreadPoolExecutor
28
-
29
- def __init__(
30
- self, thread_pool_executor: Optional[ThreadPoolExecutor] = None
31
- ) -> None:
32
- # Set max workers to hard-coded value for now
33
- self.thread_pool_executor = (
34
- ThreadPoolExecutor(max_workers=10)
35
- if thread_pool_executor is None
36
- else thread_pool_executor
37
- )
38
-
39
- def run(
40
- self,
41
- inputs: dict[str, Union[str, list]],
42
- env: dict[str, str] = {},
43
- ) -> dict[str, PipelineRunOutput]:
44
- """
45
- Run the pipeline with the given graph
46
-
47
- Raises:
48
- PipelineRunnerError: if there is an error running the pipeline
49
- """
50
- logger.info("Running pipeline {{ cookiecutter.pipeline_name }}, pipeline_version: {{ cookiecutter.pipeline_version_name }}")
51
-
52
- run_inputs = {}
53
- for inp_name, inp in inputs.items():
54
- if isinstance(inp, str):
55
- run_inputs[inp_name] = inp
56
- else:
57
- assert isinstance(inp, list), f"Invalid input type: {type(inp)}"
58
- run_inputs[inp_name] = [ChatMessage.model_validate(msg) for msg in inp]
59
-
60
- tasks = []
61
- {% for task in cookiecutter._tasks.values() %}
62
- tasks.append(
63
- Task(
64
- name="{{ task.name }}",
65
- value={{ "''" if task.node_type == "Input" else task.function_name }},
66
- handles_mapping={{ task.handles_mapping }},
67
- prev=[
68
- {% for prev in task.prev %}
69
- "{{ prev }}",
70
- {% endfor %}
71
- ],
72
- next=[
73
- {% for next in task.next %}
74
- "{{ next }}",
75
- {% endfor %}
76
- ],
77
- )
78
- )
79
- {% endfor %}
80
- engine = Engine.with_tasks(tasks, self.thread_pool_executor, env=env)
81
-
82
- # TODO: Raise PipelineRunnerError with node_errors
83
- run_res = engine.run(run_inputs)
84
- return {
85
- name: PipelineRunOutput(value=output.value)
86
- for name, output in run_res.items()
87
- }
@@ -1,35 +0,0 @@
1
- from dataclasses import dataclass
2
- from typing import Union
3
- import uuid
4
- import datetime
5
- from lmnr.types import NodeInput, ChatMessage
6
-
7
-
8
- @dataclass
9
- class Message:
10
- id: uuid.UUID
11
- # output value of producing node in form of NodeInput
12
- # for the following consumer
13
- value: NodeInput
14
- # all input messages to this node; accumulates previous messages too
15
- # input_messages: list["Message"]
16
- start_time: datetime.datetime
17
- end_time: datetime.datetime
18
- # node_id: uuid.UUID
19
- # node_name: str
20
- # node_type: str
21
- # all node per-run metadata that needs to be logged at the end of execution
22
- # meta_log: MetaLog | None
23
-
24
- @classmethod
25
- def empty(cls) -> "Message":
26
- return cls(
27
- id=uuid.uuid4(),
28
- value="",
29
- # input_messages=[],
30
- start_time=datetime.datetime.now(),
31
- end_time=datetime.datetime.now(),
32
- # node_id=uuid.uuid4(),
33
- # node_name="",
34
- # node_type="",
35
- )
@@ -1,35 +0,0 @@
1
- lmnr/__init__.py,sha256=NWHDZ-KAl3pQGDnKUbBbZaW4es-0-9Xt6gP-UrjyuEQ,207
2
- lmnr/cli/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
- lmnr/cli/__main__.py,sha256=8hDtWlaFZK24KhfNq_ZKgtXqYHsDQDetukOCMlsbW0Q,59
4
- lmnr/cli/cli.py,sha256=pzr5LUILi7TcaJIkC-CzmT7RG7-HWApQmUpgK9bc7mI,2847
5
- lmnr/cli/cookiecutter.json,sha256=PeiMMzCPzDhsapqYoAceYGPI5lOUNimvFzh5KeQv5QE,359
6
- lmnr/cli/parser/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
- lmnr/cli/parser/nodes/__init__.py,sha256=BNbbfn0WwbFDA6TNhLOaT_Ji69rCL5voUibqMD7Knng,1163
8
- lmnr/cli/parser/nodes/code.py,sha256=GXqOxN6tdiStZGWLbN3WZCmDfzwYIgSRmZ5t72AOIXc,661
9
- lmnr/cli/parser/nodes/condition.py,sha256=AJny0ILXbSy1hTwsRvZvDUqts9INNx63yQSkD7Dp7KU,740
10
- lmnr/cli/parser/nodes/input.py,sha256=Xwktcih7Mezqv4cEejfPkpG8uJxDsbqRytBvKmlJDYE,578
11
- lmnr/cli/parser/nodes/llm.py,sha256=iQWYFnQi5PcQD9WJpTSHbSzClM6s0wBOoEqyN5c4yQo,1674
12
- lmnr/cli/parser/nodes/output.py,sha256=1XBppSscxM01kfZhE9oOh2GgdCVzyPVe2RAxLI5HmUc,665
13
- lmnr/cli/parser/nodes/router.py,sha256=dmCx4ho8_GdFJXQa8UevMf_uEP7AKBv_MJ2zpLC6Vck,894
14
- lmnr/cli/parser/nodes/semantic_search.py,sha256=o_XCR7BShAq8VGeKjPTwL6MxLdB07XHSd5CE71sFFiY,2105
15
- lmnr/cli/parser/nodes/types.py,sha256=NRhlgI3WGd86AToo-tU974DEZzbLaH4iDdP-hEEQiIo,5343
16
- lmnr/cli/parser/parser.py,sha256=kAZEeg358lyj_Q1IIhQB_bA7LW3Aw6RduShIfUSmLqQ,2173
17
- lmnr/cli/parser/utils.py,sha256=wVaqHVOR9VXl8Og9nkVyCVgHIcgbtYGkDOEGPtmjZ8g,715
18
- lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
- lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/engine/__init__.py,sha256=pLVZqvDnNf9foGR-HXnX2F7WC2TWmyCTNcUctG8SXAI,27
20
- lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/engine/action.py,sha256=mZMQwwPV5LtSfwdwQ7HefI3ttvwuokp4mhVI_Xn1Zck,274
21
- lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/engine/engine.py,sha256=kCY6J7oQpm3f9YCYY2ZBzM_9bUv_XYTCRD_uFa6PLWQ,9640
22
- lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/engine/state.py,sha256=wTx7jAv7b63-8k34cYfQp_DJxhCCOYT_qRHkmnZfnc0,1686
23
- lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/engine/task.py,sha256=ware5VIrZvluHH3mpH6h7G0YDk5L0buSQ7s09za4Fro,1200
24
- lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/pipelines/{{cookiecutter.pipeline_dir_name}}/__init__.py,sha256=bsfbNUBYtKv37qzc_GLhSAzBam2lnowP_dlr8pubhcg,80
25
- lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/pipelines/{{cookiecutter.pipeline_dir_name}}/nodes/functions.py,sha256=Bwu8p7m16NAyt9wC0DTQL0MrHbM44WylLs5wTLwSxBM,7845
26
- lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/pipelines/{{cookiecutter.pipeline_dir_name}}/{{cookiecutter.pipeline_dir_name}}.py,sha256=WG-ZMofPpGXCx5jdWVry3_XBzcKjqn8ZycFSiWEOBPg,2858
27
- lmnr/cli/{{cookiecutter.lmnr_pipelines_dir_name}}/types.py,sha256=iWuflMV7TiaBPs6-B-BlrovvWpZgHGGHK0v8rSqER7A,997
28
- lmnr/sdk/endpoint.py,sha256=0HjcxMUcJz-klFZO2f5xtTaoLjcaEb8vrJ_YldTWUc8,7467
29
- lmnr/sdk/remote_debugger.py,sha256=vCpMz7y3uboOi81qEwr8z3fhQ2H1y2YtJAxXVtb6uCA,5141
30
- lmnr/types.py,sha256=OR9xRAQ5uTTwpJTDL_e3jZqxYJWvyX96CCoxr3oo94g,2112
31
- lmnr-0.2.7.dist-info/LICENSE,sha256=67b_wJHVV1CBaWkrKFWU1wyqTPSdzH77Ls-59631COg,10411
32
- lmnr-0.2.7.dist-info/METADATA,sha256=Ya1KVPAiyGxAZybuXSum8wmy4l-SnyYvaTTrvQ7uZRU,5427
33
- lmnr-0.2.7.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
34
- lmnr-0.2.7.dist-info/entry_points.txt,sha256=Qg7ZRax4k-rcQsZ26XRYQ8YFSBiyY2PNxYfq4a6PYXI,41
35
- lmnr-0.2.7.dist-info/RECORD,,
File without changes
File without changes