lmnr 0.2.14__py3-none-any.whl → 0.3.0b1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. lmnr/__init__.py +4 -4
  2. lmnr/sdk/client.py +156 -0
  3. lmnr/sdk/collector.py +177 -0
  4. lmnr/sdk/constants.py +1 -0
  5. lmnr/sdk/context.py +456 -0
  6. lmnr/sdk/decorators.py +277 -0
  7. lmnr/sdk/interface.py +339 -0
  8. lmnr/sdk/providers/__init__.py +2 -0
  9. lmnr/sdk/providers/base.py +28 -0
  10. lmnr/sdk/providers/fallback.py +131 -0
  11. lmnr/sdk/providers/openai.py +140 -0
  12. lmnr/sdk/providers/utils.py +33 -0
  13. lmnr/sdk/tracing_types.py +197 -0
  14. lmnr/sdk/types.py +69 -0
  15. lmnr/sdk/utils.py +102 -0
  16. lmnr-0.3.0b1.dist-info/METADATA +186 -0
  17. lmnr-0.3.0b1.dist-info/RECORD +21 -0
  18. lmnr/cli/__init__.py +0 -0
  19. lmnr/cli/__main__.py +0 -4
  20. lmnr/cli/cli.py +0 -232
  21. lmnr/cli/parser/__init__.py +0 -0
  22. lmnr/cli/parser/nodes/__init__.py +0 -45
  23. lmnr/cli/parser/nodes/code.py +0 -36
  24. lmnr/cli/parser/nodes/condition.py +0 -30
  25. lmnr/cli/parser/nodes/input.py +0 -25
  26. lmnr/cli/parser/nodes/json_extractor.py +0 -29
  27. lmnr/cli/parser/nodes/llm.py +0 -56
  28. lmnr/cli/parser/nodes/output.py +0 -27
  29. lmnr/cli/parser/nodes/router.py +0 -37
  30. lmnr/cli/parser/nodes/semantic_search.py +0 -53
  31. lmnr/cli/parser/nodes/types.py +0 -153
  32. lmnr/cli/parser/parser.py +0 -62
  33. lmnr/cli/parser/utils.py +0 -49
  34. lmnr/cli/zip.py +0 -16
  35. lmnr/sdk/endpoint.py +0 -186
  36. lmnr/sdk/registry.py +0 -29
  37. lmnr/sdk/remote_debugger.py +0 -148
  38. lmnr/types.py +0 -101
  39. lmnr-0.2.14.dist-info/METADATA +0 -187
  40. lmnr-0.2.14.dist-info/RECORD +0 -28
  41. {lmnr-0.2.14.dist-info → lmnr-0.3.0b1.dist-info}/LICENSE +0 -0
  42. {lmnr-0.2.14.dist-info → lmnr-0.3.0b1.dist-info}/WHEEL +0 -0
  43. {lmnr-0.2.14.dist-info → lmnr-0.3.0b1.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,186 @@
1
+ Metadata-Version: 2.1
2
+ Name: lmnr
3
+ Version: 0.3.0b1
4
+ Summary: Python SDK for Laminar AI
5
+ License: Apache-2.0
6
+ Author: lmnr.ai
7
+ Requires-Python: >=3.9,<4.0
8
+ Classifier: License :: OSI Approved :: Apache Software License
9
+ Classifier: Programming Language :: Python :: 3
10
+ Classifier: Programming Language :: Python :: 3.9
11
+ Classifier: Programming Language :: Python :: 3.10
12
+ Classifier: Programming Language :: Python :: 3.11
13
+ Classifier: Programming Language :: Python :: 3.12
14
+ Requires-Dist: backoff (>=2.2.1,<3.0.0)
15
+ Requires-Dist: black (>=24.4.2,<25.0.0)
16
+ Requires-Dist: openai (>=1.41.1,<2.0.0)
17
+ Requires-Dist: pydantic (>=2.7.4,<3.0.0)
18
+ Requires-Dist: python-dotenv (>=1.0.1,<2.0.0)
19
+ Requires-Dist: requests (>=2.32.3,<3.0.0)
20
+ Description-Content-Type: text/markdown
21
+
22
+ # Laminar AI
23
+
24
+ This repo provides core for code generation, Laminar CLI, and Laminar SDK.
25
+
26
+ ## Quickstart
27
+ ```sh
28
+ python3 -m venv .myenv
29
+ source .myenv/bin/activate # or use your favorite env management tool
30
+
31
+ pip install lmnr
32
+ ```
33
+
34
+
35
+ ## Decorator instrumentation example
36
+
37
+ For easy automatic instrumentation, we provide you two simple primitives:
38
+
39
+ - `observe` - a multi-purpose automatic decorator that starts traces and spans when functions are entered, and finishes them when functions return
40
+ - `wrap_llm_call` - a function that takes in your LLM call and return a "decorated" version of it. This does all the same things as `observe`, plus
41
+ a few utilities around LLM-specific things, such as counting tokens and recording model params.
42
+
43
+ You can also import `lmnr_context` in order to interact and have more control over the context of the current span.
44
+
45
+ ```python
46
+ import os
47
+ from openai import OpenAI
48
+
49
+ from lmnr import observe, wrap_llm_call, lmnr_context
50
+ client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
51
+
52
+ @observe() # annotate all functions you want to trace
53
+ def poem_writer(topic="turbulence"):
54
+ prompt = f"write a poem about {topic}"
55
+
56
+ # wrap the actual final call to LLM with `wrap_llm_call`
57
+ response = wrap_llm_call(client.chat.completions.create)(
58
+ model="gpt-4o",
59
+ messages=[
60
+ {"role": "system", "content": "You are a helpful assistant."},
61
+ {"role": "user", "content": prompt},
62
+ ],
63
+ )
64
+
65
+ poem = response.choices[0].message.content
66
+
67
+ if topic in poem:
68
+ lmnr_context.event("topic_alignment") # send an event with a pre-defined name
69
+
70
+ # to trigger an automatic check for a possible event do:
71
+ lmnr_context.check_span_event("excessive_wordiness")
72
+
73
+ return poem
74
+
75
+ if __name__ == "__main__":
76
+ print(poem_writer(topic="laminar flow"))
77
+ ```
78
+
79
+ This gives an advantage of quick instrumentation, but is somewhat limited in flexibility + doesn't really work as expected with threading.
80
+ This is due to the fact that we use `contextvars.ContextVar` for this, and how Python manages them between threads.
81
+
82
+ If you want to instrument your code manually, follow on to the next section
83
+
84
+ ## Manual instrumentation example
85
+
86
+ For manual instrumetation you will need to import the following:
87
+ - `trace` - this is a function to start a trace. It returns a `TraceContext`
88
+ - `TraceContext` - a pointer to the current trace that you can pass around functions as you want.
89
+ - `SpanContext` - a pointer to the current span that you can pass around functions as you want
90
+
91
+ Both `TraceContext` and `SpanContext` expose the following interfaces:
92
+ - `span(name: str, **kwargs)` - create a child span within the current context. Returns `SpanContext`
93
+ - `update(**kwargs)` - update the current trace or span and return it. Returns `TraceContext` or `SpanContext`. Useful when some metadata becomes known later during the program execution
94
+ - `end(**kwargs)` – update the current span, and terminate it
95
+
96
+ In addition, `SpanContext` allows you to:
97
+ - `event(name: str, value: str | int = None)` - emit a custom event at any point
98
+ - `evaluate_event(name: str, data: str)` - register a possible event for automatic checking by Laminar.
99
+
100
+ Example:
101
+
102
+ ```python
103
+ import os
104
+ from openai import OpenAI
105
+
106
+ from lmnr import trace, TraceContext, SpanContext
107
+ client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
108
+
109
+ def poem_writer(t: TraceContext, topic = "turbulence"):
110
+ span: SpanContext = t.span(name="poem_writer", input=None)
111
+
112
+ prompt = f"write a poem about {topic}"
113
+ messages = [
114
+ {"role": "system", "content": "You are a helpful assistant."},
115
+ {"role": "user", "content": prompt},
116
+ ]
117
+ # create a child span within the current `poem_writer` span.
118
+ llm_span = span.span(name="OpenAI completion", input=messages, span_type="LLM")
119
+
120
+ response = client.chat.completions.create(
121
+ model="gpt-4o-mini",
122
+ messages=[
123
+ {"role": "system", "content": "You are a helpful assistant."},
124
+ {"role": "user", "content": "Hello. What is the capital of France?"},
125
+ ],
126
+ )
127
+ poem = response.choices[0].message.content
128
+ if topic in poem:
129
+ llm_span.event("topic_alignment") # send an event with a pre-defined name
130
+
131
+ # note that you can register possible events here as well, not only `llm_span.check_span_event()`
132
+ llm_span.end(output=poem, check_event_names=["excessive_wordiness"])
133
+ span.end(output=poem)
134
+ return poem
135
+
136
+
137
+ t: TraceContext = trace(user_id="user", session_id="session", release="release")
138
+ main(t, topic="laminar flow")
139
+ t.end(success=True)
140
+ ```
141
+
142
+ ## Features
143
+
144
+ - Make Laminar endpoint calls from your Python code
145
+ - Make Laminar endpoint calls that can run your own functions as tools
146
+ - CLI to generate code from pipelines you build on Laminar or execute your own functions while you test your flows in workshop
147
+
148
+ ## Making Laminar pipeline calls
149
+
150
+ After you are ready to use your pipeline in your code, deploy it in Laminar by selecting the target version for the pipeline.
151
+
152
+ Once your pipeline target is set, you can call it from Python in just a few lines.
153
+
154
+ Example use:
155
+
156
+ ```python
157
+ from lmnr import Laminar
158
+
159
+ # for decorator instrumentation, do: `from lmnr inport lmnr_context`
160
+
161
+ l = Laminar('<YOUR_PROJECT_API_KEY>')
162
+ result = l.run( # lmnr_context.run( for decorator instrumentation
163
+ pipeline = 'my_pipeline_name',
164
+ inputs = {'input_node_name': 'some_value'},
165
+ # all environment variables
166
+ env = {'OPENAI_API_KEY': 'sk-some-key'},
167
+ # any metadata to attach to this run's trace
168
+ metadata = {'session_id': 'your_custom_session_id'}
169
+ )
170
+ ```
171
+
172
+ Resulting in:
173
+
174
+ ```python
175
+ >>> result
176
+ PipelineRunResponse(
177
+ outputs={'output': {'value': [ChatMessage(role='user', content='hello')]}},
178
+ # useful to locate your trace
179
+ run_id='53b012d5-5759-48a6-a9c5-0011610e3669'
180
+ )
181
+ ```
182
+
183
+ ## PROJECT_API_KEY
184
+
185
+ Read more [here](https://docs.lmnr.ai/api-reference/introduction#authentication) on how to get `PROJECT_API_KEY`.
186
+
@@ -0,0 +1,21 @@
1
+ lmnr/__init__.py,sha256=U3sQyxCHM9ojzfo05XYxM0T_Bh1StZFSp5K82NjATxc,242
2
+ lmnr/sdk/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
3
+ lmnr/sdk/client.py,sha256=6mp4sQF1IEESPYe6ABFgchMBQBKr2AT7eqP-mIC5cEA,5482
4
+ lmnr/sdk/collector.py,sha256=6LRmPhOcmGplUDWm_sJh0dVrLTHknd_kmq7POGuAvoQ,5338
5
+ lmnr/sdk/constants.py,sha256=USCfwuUqRx6_0xC8WUxqGj766dInqQkWJcf8U5vPK7s,34
6
+ lmnr/sdk/context.py,sha256=jfu2HGyZEJYSDf-LQAmmK8MKFnNhYfR66k_baQWx99s,15271
7
+ lmnr/sdk/decorators.py,sha256=B2wdhs45-ZM0niotZBOx5FSSCu_vGQ32pntG9o3fKKU,11860
8
+ lmnr/sdk/interface.py,sha256=BucPNopp_Xvb1Tvn6We4ETvqqQiWtwjbCksAtt4qmvU,13717
9
+ lmnr/sdk/providers/__init__.py,sha256=wNCgQnt9-bnTNXLQWdPgyKhqA1ajiaEd1Rr2KPOpazM,54
10
+ lmnr/sdk/providers/base.py,sha256=xc6iA8yY_VK6tbzswt-3naZ53aAXtOLur9j8eimC_ZA,1054
11
+ lmnr/sdk/providers/fallback.py,sha256=DXnxBX1vxusGSUC76d0AjouR4NSoajQMdMeG37TRf4k,4741
12
+ lmnr/sdk/providers/openai.py,sha256=EygnBniKlcic6eIOfS5zORpytLqUYZxnDRB5Z4MnXZY,5193
13
+ lmnr/sdk/providers/utils.py,sha256=ROt82VrvezExYOxionAynD3dp6oX5JoPW6F1ayTm7q8,946
14
+ lmnr/sdk/tracing_types.py,sha256=RvVb8yCLjCu9DT59OX_tvUxaOTCtE6fcsDH4nMddzHA,6399
15
+ lmnr/sdk/types.py,sha256=hVxOsa3oCQQ-8aS_WkOtErg4nHJRkBVySfYlTgDlDyk,2084
16
+ lmnr/sdk/utils.py,sha256=1yhXtdGmVXfnc8SOQiTH_zAZGbZrzO8oaFd7q5nE7eY,3349
17
+ lmnr-0.3.0b1.dist-info/LICENSE,sha256=67b_wJHVV1CBaWkrKFWU1wyqTPSdzH77Ls-59631COg,10411
18
+ lmnr-0.3.0b1.dist-info/METADATA,sha256=U5UBpCkOSbDzsoQw4b4GkJxGKgMlxv8qoVRLzAVupiw,6700
19
+ lmnr-0.3.0b1.dist-info/WHEEL,sha256=sP946D7jFCHeNz5Iq4fL4Lu-PrWrFsgfLXbbkciIZwg,88
20
+ lmnr-0.3.0b1.dist-info/entry_points.txt,sha256=Qg7ZRax4k-rcQsZ26XRYQ8YFSBiyY2PNxYfq4a6PYXI,41
21
+ lmnr-0.3.0b1.dist-info/RECORD,,
lmnr/cli/__init__.py DELETED
File without changes
lmnr/cli/__main__.py DELETED
@@ -1,4 +0,0 @@
1
- from .cli import cli
2
-
3
- if __name__ == "__main__":
4
- cli()
lmnr/cli/cli.py DELETED
@@ -1,232 +0,0 @@
1
- from pathlib import Path
2
- import sys
3
- import requests
4
- from dotenv import load_dotenv, find_dotenv
5
- import importlib
6
- import os
7
- import click
8
- import logging
9
- from cookiecutter.main import cookiecutter
10
- from pydantic.alias_generators import to_pascal
11
-
12
- from lmnr.cli.zip import zip_directory
13
- from lmnr.sdk.registry import Registry as Pipeline
14
- from lmnr.sdk.remote_debugger import RemoteDebugger
15
- from lmnr.types import NodeFunction
16
-
17
- from .parser.parser import runnable_graph_to_template_vars
18
-
19
- logger = logging.getLogger(__name__)
20
-
21
-
22
- @click.group()
23
- @click.version_option()
24
- def cli():
25
- "CLI for Laminar AI Engine"
26
-
27
-
28
- @cli.command(name="pull")
29
- @click.argument("pipeline_name")
30
- @click.argument("pipeline_version_name")
31
- @click.option(
32
- "-p",
33
- "--project-api-key",
34
- help="Project API key",
35
- )
36
- @click.option(
37
- "-l",
38
- "--loglevel",
39
- help="Sets logging level",
40
- )
41
- def pull(pipeline_name, pipeline_version_name, project_api_key, loglevel):
42
- loglevel_str_to_val = {
43
- "DEBUG": logging.DEBUG,
44
- "INFO": logging.INFO,
45
- "WARNING": logging.WARNING,
46
- "ERROR": logging.ERROR,
47
- "CRITICAL": logging.CRITICAL,
48
- }
49
- logging.basicConfig()
50
- logging.getLogger().setLevel(loglevel_str_to_val.get(loglevel, logging.WARNING))
51
-
52
- project_api_key = project_api_key or os.environ.get("LMNR_PROJECT_API_KEY")
53
- if not project_api_key:
54
- load_dotenv()
55
- project_api_key = os.environ.get("LMNR_PROJECT_API_KEY")
56
- if not project_api_key:
57
- raise ValueError("LMNR_PROJECT_API_KEY is not set")
58
-
59
- headers = {"Authorization": f"Bearer {project_api_key}"}
60
- params = {
61
- "pipelineName": pipeline_name,
62
- "pipelineVersionName": pipeline_version_name,
63
- }
64
- res = requests.get(
65
- "https://api.lmnr.ai/v2/pipeline-version-by-name",
66
- headers=headers,
67
- params=params,
68
- )
69
- if res.status_code != 200:
70
- try:
71
- res_json = res.json()
72
- except Exception:
73
- raise ValueError(
74
- f"Error in fetching pipeline version: {res.status_code}\n{res.text}"
75
- )
76
- raise ValueError(
77
- f"Error in fetching pipeline version: {res.status_code}\n{res_json}"
78
- )
79
-
80
- pipeline_version = res.json()
81
-
82
- class_name = to_pascal(pipeline_name.replace(" ", "_").replace("-", "_"))
83
-
84
- context = {
85
- "pipeline_name": pipeline_name,
86
- "pipeline_version_name": pipeline_version_name,
87
- "class_name": class_name,
88
- # _tasks starts from underscore because we don't want it to be templated
89
- # some tasks contains LLM nodes which have prompts
90
- # which we don't want to be rendered by cookiecutter
91
- "_tasks": runnable_graph_to_template_vars(pipeline_version["runnableGraph"]),
92
- }
93
-
94
- logger.info(f"Context:\n{context}")
95
- cookiecutter(
96
- "https://github.com/lmnr-ai/lmnr-python-engine.git",
97
- output_dir=".",
98
- config_file=None,
99
- extra_context=context,
100
- no_input=True,
101
- overwrite_if_exists=True,
102
- )
103
-
104
-
105
- @cli.command(name="deploy")
106
- @click.argument("endpoint_id")
107
- @click.option(
108
- "-p",
109
- "--project-api-key",
110
- help="Project API key",
111
- )
112
- def deploy(endpoint_id, project_api_key):
113
- project_api_key = project_api_key or os.environ.get("LMNR_PROJECT_API_KEY")
114
- if not project_api_key:
115
- load_dotenv()
116
- project_api_key = os.environ.get("LMNR_PROJECT_API_KEY")
117
- if not project_api_key:
118
- raise ValueError("LMNR_PROJECT_API_KEY is not set")
119
-
120
- current_directory = Path.cwd()
121
- zip_file_path = current_directory / "archive.zip"
122
-
123
- zip_directory(current_directory, zip_file_path)
124
-
125
- try:
126
- url = f"https://api.lmnr.ai/v2/endpoints/{endpoint_id}/deploy-code"
127
- with open(zip_file_path, "rb") as f:
128
- headers = {
129
- "Authorization": f"Bearer {project_api_key}",
130
- }
131
- files = {"file": f}
132
- response = requests.post(url, headers=headers, files=files)
133
-
134
- if response.status_code != 200:
135
- raise ValueError(
136
- f"Error in deploying code: {response.status_code}\n{response.text}"
137
- )
138
- except Exception:
139
- logging.exception("Error in deploying code")
140
- finally:
141
- Path.unlink(zip_file_path, missing_ok=True)
142
-
143
-
144
- def _load_functions(cur_dir: str) -> dict[str, NodeFunction]:
145
- parent_dir, name = os.path.split(cur_dir) # e.g. /Users/username, project_name
146
-
147
- # Needed to __import__ pipeline.py
148
- if sys.path[0] != parent_dir:
149
- sys.path.insert(0, parent_dir)
150
- # Needed to import src in pipeline.py and other files
151
- if cur_dir not in sys.path:
152
- sys.path.insert(0, cur_dir)
153
-
154
- module_name = f"{name}.pipeline"
155
- if module_name in sys.modules:
156
- # Reload the module to get the updated version
157
- importlib.reload(sys.modules[module_name])
158
- else:
159
- # Import the module for the first time
160
- __import__(module_name)
161
-
162
- module = sys.modules[module_name]
163
-
164
- matches = [v for v in module.__dict__.values() if isinstance(v, Pipeline)]
165
- if not matches:
166
- raise ValueError("No Pipeline found in the module")
167
- if len(matches) > 1:
168
- raise ValueError("Multiple Pipelines found in the module")
169
- pipeline = matches[0]
170
-
171
- return pipeline.functions
172
-
173
- from watchdog.observers import Observer
174
- from watchdog.events import PatternMatchingEventHandler
175
- import time
176
-
177
- class SimpleEventHandler(PatternMatchingEventHandler):
178
- def __init__(self, project_api_key: str, session_id: str, functions: dict[str, NodeFunction]):
179
- super().__init__(ignore_patterns=["*.pyc*", "*.pyo", "**/__pycache__"])
180
- self.project_api_key = project_api_key
181
- self.session_id = session_id
182
- self.functions = functions
183
- self.debugger = RemoteDebugger(project_api_key, session_id, functions)
184
- self.debugger.start()
185
-
186
- def on_any_event(self, event):
187
- print(f"Files at {event.src_path} updated. Restarting debugger...")
188
- self.debugger.stop()
189
- self.functions = _load_functions(os.getcwd())
190
- self.debugger = RemoteDebugger(self.project_api_key, self.session_id, self.functions)
191
- self.debugger.start()
192
-
193
- @cli.command(name="dev")
194
- @click.option(
195
- "-p",
196
- "--project-api-key",
197
- help="Project API key. If not provided, LMNR_PROJECT_API_KEY from os.environ or .env is used",
198
- )
199
- @click.option(
200
- "-s",
201
- "--dev-session-id",
202
- help="Dev session ID. If not provided, LMNR_DEV_SESSION_ID from os.environ or .env is used",
203
- )
204
- def dev(project_api_key, dev_session_id):
205
- env_path = find_dotenv(usecwd=True)
206
- project_api_key = project_api_key or os.environ.get("LMNR_PROJECT_API_KEY")
207
- if not project_api_key:
208
- load_dotenv(env_path=env_path)
209
- project_api_key = os.environ.get("LMNR_PROJECT_API_KEY")
210
- if not project_api_key:
211
- raise ValueError("LMNR_PROJECT_API_KEY is not set")
212
-
213
- session_id = dev_session_id or os.environ.get("LMNR_DEV_SESSION_ID")
214
- if not session_id:
215
- load_dotenv(env_path=env_path)
216
- session_id = os.environ.get("LMNR_DEV_SESSION_ID")
217
- if not session_id:
218
- raise ValueError("LMNR_DEV_SESSION_ID is not set")
219
- cur_dir = os.getcwd() # e.g. /Users/username/project_name
220
- functions = _load_functions(cur_dir)
221
-
222
- observer = Observer()
223
- handler = SimpleEventHandler(project_api_key, session_id, functions)
224
- observer.schedule(handler, cur_dir, recursive=True)
225
- observer.start()
226
- try:
227
- while True:
228
- time.sleep(1)
229
- except KeyboardInterrupt:
230
- handler.debugger.stop()
231
- observer.stop()
232
- observer.join()
File without changes
@@ -1,45 +0,0 @@
1
- from abc import ABCMeta, abstractmethod
2
- from dataclasses import dataclass
3
- from typing import Optional
4
- import uuid
5
-
6
-
7
- @dataclass
8
- class Handle:
9
- id: uuid.UUID
10
- name: Optional[str]
11
-
12
- @classmethod
13
- def from_dict(cls, dict: dict) -> "Handle":
14
- return cls(
15
- id=uuid.UUID(dict["id"]),
16
- name=(dict["name"] if "name" in dict else None),
17
- )
18
-
19
-
20
- @abstractmethod
21
- class NodeFunctions(metaclass=ABCMeta):
22
- @abstractmethod
23
- def handles_mapping(
24
- self, output_handle_id_to_node_name: dict[str, str]
25
- ) -> list[tuple[str, str]]:
26
- """
27
- Returns a list of tuples mapping from this node's input
28
- handle name to the unique name of the previous node.
29
-
30
- Assumes previous node has only one output.
31
- """
32
- pass
33
-
34
- @abstractmethod
35
- def node_type(self) -> str:
36
- pass
37
-
38
- @abstractmethod
39
- def config(self) -> dict:
40
- """
41
- Returns a dictionary of node-specific configuration.
42
-
43
- E.g. prompt and model name for LLM node.
44
- """
45
- pass
@@ -1,36 +0,0 @@
1
- from dataclasses import dataclass
2
- import uuid
3
-
4
- from lmnr.cli.parser.nodes import Handle, NodeFunctions
5
- from lmnr.cli.parser.utils import map_handles
6
-
7
-
8
- @dataclass
9
- class CodeNode(NodeFunctions):
10
- id: uuid.UUID
11
- name: str
12
- inputs: list[Handle]
13
- outputs: list[Handle]
14
- inputs_mappings: dict[uuid.UUID, uuid.UUID]
15
- code: str
16
- fn_name: str
17
-
18
- def handles_mapping(
19
- self, output_handle_id_to_node_name: dict[str, str]
20
- ) -> list[tuple[str, str]]:
21
- return map_handles(
22
- self.inputs, self.inputs_mappings, output_handle_id_to_node_name
23
- )
24
-
25
- def node_type(self) -> str:
26
- return "Code"
27
-
28
- def config(self) -> dict:
29
- return {
30
- "code": self.code,
31
- "fn_name": self.fn_name,
32
- "fn_inputs": ", ".join(
33
- f"{handle.name}=input_to_code_node_arg({handle.name})"
34
- for handle in self.inputs
35
- ),
36
- }
@@ -1,30 +0,0 @@
1
- from dataclasses import dataclass
2
- import uuid
3
-
4
- from lmnr.cli.parser.nodes import Handle, NodeFunctions
5
- from lmnr.cli.parser.utils import map_handles
6
-
7
-
8
- @dataclass
9
- class ConditionNode(NodeFunctions):
10
- id: uuid.UUID
11
- name: str
12
- inputs: list[Handle]
13
- outputs: list[Handle]
14
- inputs_mappings: dict[uuid.UUID, uuid.UUID]
15
- condition: str
16
-
17
- def handles_mapping(
18
- self, output_handle_id_to_node_name: dict[str, str]
19
- ) -> list[tuple[str, str]]:
20
- return map_handles(
21
- self.inputs, self.inputs_mappings, output_handle_id_to_node_name
22
- )
23
-
24
- def node_type(self) -> str:
25
- return "Condition"
26
-
27
- def config(self) -> dict:
28
- return {
29
- "condition": self.condition,
30
- }
@@ -1,25 +0,0 @@
1
- from dataclasses import dataclass
2
- from typing import Optional
3
- import uuid
4
-
5
- from lmnr.cli.parser.nodes import Handle, NodeFunctions
6
- from lmnr.types import NodeInput
7
-
8
-
9
- @dataclass
10
- class InputNode(NodeFunctions):
11
- id: uuid.UUID
12
- name: str
13
- outputs: list[Handle]
14
- input: Optional[NodeInput]
15
-
16
- def handles_mapping(
17
- self, output_handle_id_to_node_name: dict[str, str]
18
- ) -> list[tuple[str, str]]:
19
- return []
20
-
21
- def node_type(self) -> str:
22
- return "Input"
23
-
24
- def config(self) -> dict:
25
- return {}
@@ -1,29 +0,0 @@
1
- from dataclasses import dataclass
2
-
3
- import uuid
4
-
5
- from lmnr.cli.parser.nodes import Handle, NodeFunctions
6
- from lmnr.cli.parser.utils import map_handles
7
-
8
-
9
- @dataclass
10
- class JsonExtractorNode(NodeFunctions):
11
- id: uuid.UUID
12
- name: str
13
- inputs: list[Handle]
14
- outputs: list[Handle]
15
- inputs_mappings: dict[uuid.UUID, uuid.UUID]
16
- template: str
17
-
18
- def handles_mapping(
19
- self, output_handle_id_to_node_name: dict[str, str]
20
- ) -> list[tuple[str, str]]:
21
- return map_handles(
22
- self.inputs, self.inputs_mappings, output_handle_id_to_node_name
23
- )
24
-
25
- def node_type(self) -> str:
26
- return "JsonExtractor"
27
-
28
- def config(self) -> dict:
29
- return {"template": self.template}
@@ -1,56 +0,0 @@
1
- from dataclasses import dataclass
2
- from typing import Optional
3
- import uuid
4
-
5
- from lmnr.cli.parser.nodes import Handle, NodeFunctions
6
- from lmnr.cli.parser.utils import map_handles
7
-
8
-
9
- @dataclass
10
- class LLMNode(NodeFunctions):
11
- id: uuid.UUID
12
- name: str
13
- inputs: list[Handle]
14
- dynamic_inputs: list[Handle]
15
- outputs: list[Handle]
16
- inputs_mappings: dict[uuid.UUID, uuid.UUID]
17
- prompt: str
18
- model: str
19
- model_params: Optional[str]
20
- stream: bool
21
- structured_output_enabled: bool
22
- structured_output_max_retries: int
23
- structured_output_schema: Optional[str]
24
- structured_output_schema_target: Optional[str]
25
-
26
- def handles_mapping(
27
- self, output_handle_id_to_node_name: dict[str, str]
28
- ) -> list[tuple[str, str]]:
29
- combined_inputs = self.inputs + self.dynamic_inputs
30
- return map_handles(
31
- combined_inputs, self.inputs_mappings, output_handle_id_to_node_name
32
- )
33
-
34
- def node_type(self) -> str:
35
- return "LLM"
36
-
37
- def config(self) -> dict:
38
- # For easier access in the template separate the provider and model here
39
- provider, model = self.model.split(":", maxsplit=1)
40
-
41
- return {
42
- "prompt": self.prompt,
43
- "provider": provider,
44
- "model": model,
45
- "model_params": self.model_params,
46
- "stream": self.stream,
47
- "enable_structured_output": self.structured_output_enabled
48
- and self.structured_output_schema is not None,
49
- "structured_output_max_retries": self.structured_output_max_retries,
50
- "structured_output_schema": self.structured_output_schema,
51
- "structured_output_schema_target_str": (
52
- "None"
53
- if self.structured_output_schema_target is None
54
- else f'"{self.structured_output_schema_target}"'
55
- ),
56
- }