namel3ss 0.1.0a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- namel3ss/__init__.py +4 -0
- namel3ss/ast/__init__.py +5 -0
- namel3ss/ast/agents.py +13 -0
- namel3ss/ast/ai.py +23 -0
- namel3ss/ast/base.py +10 -0
- namel3ss/ast/expressions.py +55 -0
- namel3ss/ast/nodes.py +86 -0
- namel3ss/ast/pages.py +43 -0
- namel3ss/ast/program.py +22 -0
- namel3ss/ast/records.py +27 -0
- namel3ss/ast/statements.py +107 -0
- namel3ss/ast/tool.py +11 -0
- namel3ss/cli/__init__.py +2 -0
- namel3ss/cli/actions_mode.py +39 -0
- namel3ss/cli/app_loader.py +22 -0
- namel3ss/cli/commands/action.py +27 -0
- namel3ss/cli/commands/run.py +43 -0
- namel3ss/cli/commands/ui.py +26 -0
- namel3ss/cli/commands/validate.py +23 -0
- namel3ss/cli/format_mode.py +30 -0
- namel3ss/cli/io/json_io.py +19 -0
- namel3ss/cli/io/read_source.py +16 -0
- namel3ss/cli/json_io.py +21 -0
- namel3ss/cli/lint_mode.py +29 -0
- namel3ss/cli/main.py +135 -0
- namel3ss/cli/new_mode.py +146 -0
- namel3ss/cli/runner.py +28 -0
- namel3ss/cli/studio_mode.py +22 -0
- namel3ss/cli/ui_mode.py +14 -0
- namel3ss/config/__init__.py +4 -0
- namel3ss/config/dotenv.py +33 -0
- namel3ss/config/loader.py +83 -0
- namel3ss/config/model.py +49 -0
- namel3ss/errors/__init__.py +2 -0
- namel3ss/errors/base.py +34 -0
- namel3ss/errors/render.py +22 -0
- namel3ss/format/__init__.py +3 -0
- namel3ss/format/formatter.py +18 -0
- namel3ss/format/rules.py +97 -0
- namel3ss/ir/__init__.py +3 -0
- namel3ss/ir/lowering/__init__.py +4 -0
- namel3ss/ir/lowering/agents.py +42 -0
- namel3ss/ir/lowering/ai.py +45 -0
- namel3ss/ir/lowering/expressions.py +49 -0
- namel3ss/ir/lowering/flow.py +21 -0
- namel3ss/ir/lowering/pages.py +48 -0
- namel3ss/ir/lowering/program.py +34 -0
- namel3ss/ir/lowering/records.py +25 -0
- namel3ss/ir/lowering/statements.py +122 -0
- namel3ss/ir/lowering/tools.py +16 -0
- namel3ss/ir/model/__init__.py +50 -0
- namel3ss/ir/model/agents.py +33 -0
- namel3ss/ir/model/ai.py +31 -0
- namel3ss/ir/model/base.py +20 -0
- namel3ss/ir/model/expressions.py +50 -0
- namel3ss/ir/model/pages.py +43 -0
- namel3ss/ir/model/program.py +28 -0
- namel3ss/ir/model/statements.py +76 -0
- namel3ss/ir/model/tools.py +11 -0
- namel3ss/ir/nodes.py +88 -0
- namel3ss/lexer/__init__.py +2 -0
- namel3ss/lexer/lexer.py +152 -0
- namel3ss/lexer/tokens.py +98 -0
- namel3ss/lint/__init__.py +4 -0
- namel3ss/lint/engine.py +125 -0
- namel3ss/lint/semantic.py +45 -0
- namel3ss/lint/text_scan.py +70 -0
- namel3ss/lint/types.py +22 -0
- namel3ss/parser/__init__.py +3 -0
- namel3ss/parser/agent.py +78 -0
- namel3ss/parser/ai.py +113 -0
- namel3ss/parser/constraints.py +37 -0
- namel3ss/parser/core.py +166 -0
- namel3ss/parser/expressions.py +105 -0
- namel3ss/parser/flow.py +37 -0
- namel3ss/parser/pages.py +76 -0
- namel3ss/parser/program.py +45 -0
- namel3ss/parser/records.py +66 -0
- namel3ss/parser/statements/__init__.py +27 -0
- namel3ss/parser/statements/control_flow.py +116 -0
- namel3ss/parser/statements/core.py +66 -0
- namel3ss/parser/statements/data.py +17 -0
- namel3ss/parser/statements/letset.py +22 -0
- namel3ss/parser/statements.py +1 -0
- namel3ss/parser/tokens.py +35 -0
- namel3ss/parser/tool.py +29 -0
- namel3ss/runtime/__init__.py +3 -0
- namel3ss/runtime/ai/http/client.py +24 -0
- namel3ss/runtime/ai/mock_provider.py +5 -0
- namel3ss/runtime/ai/provider.py +29 -0
- namel3ss/runtime/ai/providers/__init__.py +18 -0
- namel3ss/runtime/ai/providers/_shared/errors.py +20 -0
- namel3ss/runtime/ai/providers/_shared/parse.py +18 -0
- namel3ss/runtime/ai/providers/anthropic.py +55 -0
- namel3ss/runtime/ai/providers/gemini.py +50 -0
- namel3ss/runtime/ai/providers/mistral.py +51 -0
- namel3ss/runtime/ai/providers/mock.py +23 -0
- namel3ss/runtime/ai/providers/ollama.py +39 -0
- namel3ss/runtime/ai/providers/openai.py +55 -0
- namel3ss/runtime/ai/providers/registry.py +38 -0
- namel3ss/runtime/ai/trace.py +18 -0
- namel3ss/runtime/executor/__init__.py +3 -0
- namel3ss/runtime/executor/agents.py +91 -0
- namel3ss/runtime/executor/ai_runner.py +90 -0
- namel3ss/runtime/executor/api.py +54 -0
- namel3ss/runtime/executor/assign.py +40 -0
- namel3ss/runtime/executor/context.py +31 -0
- namel3ss/runtime/executor/executor.py +77 -0
- namel3ss/runtime/executor/expr_eval.py +110 -0
- namel3ss/runtime/executor/records_ops.py +64 -0
- namel3ss/runtime/executor/result.py +13 -0
- namel3ss/runtime/executor/signals.py +6 -0
- namel3ss/runtime/executor/statements.py +99 -0
- namel3ss/runtime/memory/manager.py +52 -0
- namel3ss/runtime/memory/profile.py +17 -0
- namel3ss/runtime/memory/semantic.py +20 -0
- namel3ss/runtime/memory/short_term.py +18 -0
- namel3ss/runtime/records/service.py +105 -0
- namel3ss/runtime/store/__init__.py +2 -0
- namel3ss/runtime/store/memory_store.py +62 -0
- namel3ss/runtime/tools/registry.py +13 -0
- namel3ss/runtime/ui/__init__.py +2 -0
- namel3ss/runtime/ui/actions.py +124 -0
- namel3ss/runtime/validators/__init__.py +2 -0
- namel3ss/runtime/validators/constraints.py +126 -0
- namel3ss/schema/__init__.py +2 -0
- namel3ss/schema/records.py +52 -0
- namel3ss/studio/__init__.py +4 -0
- namel3ss/studio/api.py +115 -0
- namel3ss/studio/edit/__init__.py +3 -0
- namel3ss/studio/edit/ops.py +80 -0
- namel3ss/studio/edit/selectors.py +74 -0
- namel3ss/studio/edit/transform.py +39 -0
- namel3ss/studio/server.py +175 -0
- namel3ss/studio/session.py +11 -0
- namel3ss/studio/web/app.js +248 -0
- namel3ss/studio/web/index.html +44 -0
- namel3ss/studio/web/styles.css +42 -0
- namel3ss/templates/__init__.py +3 -0
- namel3ss/templates/__pycache__/__init__.cpython-312.pyc +0 -0
- namel3ss/templates/ai_assistant/.gitignore +1 -0
- namel3ss/templates/ai_assistant/README.md +10 -0
- namel3ss/templates/ai_assistant/app.ai +30 -0
- namel3ss/templates/crud/.gitignore +1 -0
- namel3ss/templates/crud/README.md +10 -0
- namel3ss/templates/crud/app.ai +26 -0
- namel3ss/templates/multi_agent/.gitignore +1 -0
- namel3ss/templates/multi_agent/README.md +10 -0
- namel3ss/templates/multi_agent/app.ai +43 -0
- namel3ss/ui/__init__.py +2 -0
- namel3ss/ui/manifest.py +220 -0
- namel3ss/utils/__init__.py +2 -0
- namel3ss-0.1.0a0.dist-info/METADATA +123 -0
- namel3ss-0.1.0a0.dist-info/RECORD +157 -0
- namel3ss-0.1.0a0.dist-info/WHEEL +5 -0
- namel3ss-0.1.0a0.dist-info/entry_points.txt +2 -0
- namel3ss-0.1.0a0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
|
|
5
|
+
from namel3ss.errors.base import Namel3ssError
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def ensure_text_output(provider_name: str, text: object) -> str:
|
|
9
|
+
if isinstance(text, str) and text.strip() != "":
|
|
10
|
+
return text
|
|
11
|
+
raise Namel3ssError(f"Provider '{provider_name}' returned an invalid response")
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def json_loads_or_error(provider_name: str, raw: bytes) -> dict:
|
|
15
|
+
try:
|
|
16
|
+
return json.loads(raw.decode("utf-8"))
|
|
17
|
+
except Exception as err: # json.JSONDecodeError or UnicodeError
|
|
18
|
+
raise Namel3ssError(f"Provider '{provider_name}' returned an invalid response") from err
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from namel3ss.config.model import AnthropicConfig
|
|
4
|
+
from namel3ss.errors.base import Namel3ssError
|
|
5
|
+
from namel3ss.runtime.ai.http.client import post_json
|
|
6
|
+
from namel3ss.runtime.ai.provider import AIProvider, AIResponse
|
|
7
|
+
from namel3ss.runtime.ai.providers._shared.errors import require_env
|
|
8
|
+
from namel3ss.runtime.ai.providers._shared.parse import ensure_text_output
|
|
9
|
+
|
|
10
|
+
ANTHROPIC_VERSION = "2023-06-01"
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class AnthropicProvider(AIProvider):
|
|
14
|
+
def __init__(self, *, api_key: str | None, timeout_seconds: int = 30):
|
|
15
|
+
self.api_key = api_key
|
|
16
|
+
self.timeout_seconds = timeout_seconds
|
|
17
|
+
|
|
18
|
+
@classmethod
|
|
19
|
+
def from_config(cls, config: AnthropicConfig) -> "AnthropicProvider":
|
|
20
|
+
return cls(api_key=config.api_key)
|
|
21
|
+
|
|
22
|
+
def ask(self, *, model: str, system_prompt: str | None, user_input: str, tools=None, memory=None, tool_results=None):
|
|
23
|
+
key = require_env("anthropic", "NAMEL3SS_ANTHROPIC_API_KEY", self.api_key)
|
|
24
|
+
url = "https://api.anthropic.com/v1/messages"
|
|
25
|
+
payload = {"model": model, "messages": [{"role": "user", "content": user_input}]}
|
|
26
|
+
if system_prompt:
|
|
27
|
+
payload["system"] = system_prompt
|
|
28
|
+
headers = {
|
|
29
|
+
"x-api-key": key,
|
|
30
|
+
"anthropic-version": ANTHROPIC_VERSION,
|
|
31
|
+
"Content-Type": "application/json",
|
|
32
|
+
}
|
|
33
|
+
try:
|
|
34
|
+
result = post_json(
|
|
35
|
+
url=url,
|
|
36
|
+
headers=headers,
|
|
37
|
+
payload=payload,
|
|
38
|
+
timeout_seconds=self.timeout_seconds,
|
|
39
|
+
provider_name="anthropic",
|
|
40
|
+
)
|
|
41
|
+
except Namel3ssError:
|
|
42
|
+
raise
|
|
43
|
+
text = _extract_text(result)
|
|
44
|
+
return AIResponse(output=ensure_text_output("anthropic", text))
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def _extract_text(result: dict) -> str | None:
|
|
48
|
+
content = result.get("content")
|
|
49
|
+
if isinstance(content, list) and content:
|
|
50
|
+
first = content[0]
|
|
51
|
+
if isinstance(first, dict):
|
|
52
|
+
text = first.get("text")
|
|
53
|
+
if isinstance(text, str):
|
|
54
|
+
return text
|
|
55
|
+
return None
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from namel3ss.config.model import GeminiConfig
|
|
4
|
+
from namel3ss.errors.base import Namel3ssError
|
|
5
|
+
from namel3ss.runtime.ai.http.client import post_json
|
|
6
|
+
from namel3ss.runtime.ai.provider import AIProvider, AIResponse
|
|
7
|
+
from namel3ss.runtime.ai.providers._shared.errors import require_env
|
|
8
|
+
from namel3ss.runtime.ai.providers._shared.parse import ensure_text_output
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class GeminiProvider(AIProvider):
|
|
12
|
+
def __init__(self, *, api_key: str | None, timeout_seconds: int = 30):
|
|
13
|
+
self.api_key = api_key
|
|
14
|
+
self.timeout_seconds = timeout_seconds
|
|
15
|
+
|
|
16
|
+
@classmethod
|
|
17
|
+
def from_config(cls, config: GeminiConfig) -> "GeminiProvider":
|
|
18
|
+
return cls(api_key=config.api_key)
|
|
19
|
+
|
|
20
|
+
def ask(self, *, model: str, system_prompt: str | None, user_input: str, tools=None, memory=None, tool_results=None):
|
|
21
|
+
key = require_env("gemini", "NAMEL3SS_GEMINI_API_KEY", self.api_key)
|
|
22
|
+
url = f"https://generativelanguage.googleapis.com/v1beta/models/{model}:generateContent?key={key}"
|
|
23
|
+
text = user_input if not system_prompt else f"{system_prompt}\n{user_input}"
|
|
24
|
+
payload = {"contents": [{"role": "user", "parts": [{"text": text}]}]}
|
|
25
|
+
headers = {"Content-Type": "application/json"}
|
|
26
|
+
try:
|
|
27
|
+
result = post_json(
|
|
28
|
+
url=url,
|
|
29
|
+
headers=headers,
|
|
30
|
+
payload=payload,
|
|
31
|
+
timeout_seconds=self.timeout_seconds,
|
|
32
|
+
provider_name="gemini",
|
|
33
|
+
)
|
|
34
|
+
except Namel3ssError:
|
|
35
|
+
raise
|
|
36
|
+
text_out = _extract_text(result)
|
|
37
|
+
return AIResponse(output=ensure_text_output("gemini", text_out))
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def _extract_text(result: dict) -> str | None:
|
|
41
|
+
candidates = result.get("candidates")
|
|
42
|
+
if isinstance(candidates, list) and candidates:
|
|
43
|
+
content = candidates[0].get("content") if isinstance(candidates[0], dict) else None
|
|
44
|
+
if isinstance(content, dict):
|
|
45
|
+
parts = content.get("parts")
|
|
46
|
+
if isinstance(parts, list):
|
|
47
|
+
texts = [part.get("text") for part in parts if isinstance(part, dict) and isinstance(part.get("text"), str)]
|
|
48
|
+
if texts:
|
|
49
|
+
return "\n".join(texts)
|
|
50
|
+
return None
|
|
@@ -0,0 +1,51 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from namel3ss.config.model import MistralConfig
|
|
4
|
+
from namel3ss.errors.base import Namel3ssError
|
|
5
|
+
from namel3ss.runtime.ai.http.client import post_json
|
|
6
|
+
from namel3ss.runtime.ai.provider import AIProvider, AIResponse
|
|
7
|
+
from namel3ss.runtime.ai.providers._shared.errors import require_env
|
|
8
|
+
from namel3ss.runtime.ai.providers._shared.parse import ensure_text_output
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class MistralProvider(AIProvider):
|
|
12
|
+
def __init__(self, *, api_key: str | None, timeout_seconds: int = 30):
|
|
13
|
+
self.api_key = api_key
|
|
14
|
+
self.timeout_seconds = timeout_seconds
|
|
15
|
+
|
|
16
|
+
@classmethod
|
|
17
|
+
def from_config(cls, config: MistralConfig) -> "MistralProvider":
|
|
18
|
+
return cls(api_key=config.api_key)
|
|
19
|
+
|
|
20
|
+
def ask(self, *, model: str, system_prompt: str | None, user_input: str, tools=None, memory=None, tool_results=None):
|
|
21
|
+
key = require_env("mistral", "NAMEL3SS_MISTRAL_API_KEY", self.api_key)
|
|
22
|
+
url = "https://api.mistral.ai/v1/chat/completions"
|
|
23
|
+
messages = []
|
|
24
|
+
if system_prompt:
|
|
25
|
+
messages.append({"role": "system", "content": system_prompt})
|
|
26
|
+
messages.append({"role": "user", "content": user_input})
|
|
27
|
+
payload = {"model": model, "messages": messages}
|
|
28
|
+
headers = {"Authorization": f"Bearer {key}", "Content-Type": "application/json"}
|
|
29
|
+
try:
|
|
30
|
+
result = post_json(
|
|
31
|
+
url=url,
|
|
32
|
+
headers=headers,
|
|
33
|
+
payload=payload,
|
|
34
|
+
timeout_seconds=self.timeout_seconds,
|
|
35
|
+
provider_name="mistral",
|
|
36
|
+
)
|
|
37
|
+
except Namel3ssError:
|
|
38
|
+
raise
|
|
39
|
+
text = _extract_text(result)
|
|
40
|
+
return AIResponse(output=ensure_text_output("mistral", text))
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
def _extract_text(result: dict) -> str | None:
|
|
44
|
+
choices = result.get("choices")
|
|
45
|
+
if isinstance(choices, list) and choices:
|
|
46
|
+
message = choices[0].get("message") if isinstance(choices[0], dict) else None
|
|
47
|
+
if isinstance(message, dict):
|
|
48
|
+
content = message.get("content")
|
|
49
|
+
if isinstance(content, str):
|
|
50
|
+
return content
|
|
51
|
+
return None
|
|
@@ -0,0 +1,23 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from namel3ss.runtime.ai.provider import AIProvider, AIResponse, AIToolCallResponse
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class MockProvider(AIProvider):
|
|
7
|
+
def __init__(self, tool_call_sequence=None):
|
|
8
|
+
self.tool_call_sequence = tool_call_sequence or []
|
|
9
|
+
self.call_index = 0
|
|
10
|
+
|
|
11
|
+
def ask(self, *, model: str, system_prompt: str | None, user_input: str, tools=None, memory=None, tool_results=None):
|
|
12
|
+
if self.call_index < len(self.tool_call_sequence):
|
|
13
|
+
resp = self.tool_call_sequence[self.call_index]
|
|
14
|
+
self.call_index += 1
|
|
15
|
+
if isinstance(resp, AIToolCallResponse):
|
|
16
|
+
return resp
|
|
17
|
+
prefix = f"[{model}]"
|
|
18
|
+
mem_note = ""
|
|
19
|
+
if memory:
|
|
20
|
+
mem_note = f" | mem:st={len(memory.get('short_term', []))}"
|
|
21
|
+
if system_prompt:
|
|
22
|
+
return AIResponse(output=str(f"{prefix} {system_prompt} :: {user_input}{mem_note}"))
|
|
23
|
+
return AIResponse(output=str(f"{prefix} {user_input}{mem_note}"))
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from namel3ss.errors.base import Namel3ssError
|
|
4
|
+
from namel3ss.runtime.ai.http.client import post_json
|
|
5
|
+
from namel3ss.runtime.ai.provider import AIProvider, AIResponse
|
|
6
|
+
from namel3ss.runtime.ai.providers._shared.errors import map_http_error
|
|
7
|
+
from namel3ss.runtime.ai.providers._shared.parse import ensure_text_output
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class OllamaProvider(AIProvider):
|
|
11
|
+
def __init__(self, *, host: str, timeout_seconds: int = 30):
|
|
12
|
+
self.host = host.rstrip("/")
|
|
13
|
+
self.timeout_seconds = timeout_seconds
|
|
14
|
+
|
|
15
|
+
def ask(self, *, model: str, system_prompt: str | None, user_input: str, tools=None, memory=None, tool_results=None):
|
|
16
|
+
url = f"{self.host}/api/chat"
|
|
17
|
+
messages = []
|
|
18
|
+
if system_prompt:
|
|
19
|
+
messages.append({"role": "system", "content": system_prompt})
|
|
20
|
+
messages.append({"role": "user", "content": user_input})
|
|
21
|
+
payload = {"model": model, "messages": messages}
|
|
22
|
+
try:
|
|
23
|
+
result = post_json(url=url, headers={"Content-Type": "application/json"}, payload=payload, timeout_seconds=self.timeout_seconds, provider_name="ollama")
|
|
24
|
+
except Namel3ssError:
|
|
25
|
+
raise
|
|
26
|
+
except Exception as err:
|
|
27
|
+
raise map_http_error("ollama", err) from err
|
|
28
|
+
content = _extract_content(result)
|
|
29
|
+
return AIResponse(output=ensure_text_output("ollama", content))
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def _extract_content(payload: dict) -> str | None:
|
|
33
|
+
if "message" in payload and isinstance(payload["message"], dict):
|
|
34
|
+
content = payload["message"].get("content")
|
|
35
|
+
if isinstance(content, str):
|
|
36
|
+
return content
|
|
37
|
+
if "response" in payload and isinstance(payload["response"], str):
|
|
38
|
+
return payload["response"]
|
|
39
|
+
return None
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from namel3ss.config.model import OpenAIConfig
|
|
4
|
+
from namel3ss.errors.base import Namel3ssError
|
|
5
|
+
from namel3ss.runtime.ai.http.client import post_json
|
|
6
|
+
from namel3ss.runtime.ai.provider import AIProvider, AIResponse
|
|
7
|
+
from namel3ss.runtime.ai.providers._shared.errors import require_env
|
|
8
|
+
from namel3ss.runtime.ai.providers._shared.parse import ensure_text_output
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class OpenAIProvider(AIProvider):
|
|
12
|
+
def __init__(self, *, api_key: str | None, base_url: str = "https://api.openai.com", timeout_seconds: int = 30):
|
|
13
|
+
self.api_key = api_key
|
|
14
|
+
self.base_url = base_url.rstrip("/")
|
|
15
|
+
self.timeout_seconds = timeout_seconds
|
|
16
|
+
|
|
17
|
+
@classmethod
|
|
18
|
+
def from_config(cls, config: OpenAIConfig) -> "OpenAIProvider":
|
|
19
|
+
return cls(api_key=config.api_key, base_url=config.base_url)
|
|
20
|
+
|
|
21
|
+
def ask(self, *, model: str, system_prompt: str | None, user_input: str, tools=None, memory=None, tool_results=None):
|
|
22
|
+
key = require_env("openai", "NAMEL3SS_OPENAI_API_KEY", self.api_key)
|
|
23
|
+
url = f"{self.base_url}/v1/responses"
|
|
24
|
+
payload = {"model": model, "input": user_input}
|
|
25
|
+
if system_prompt:
|
|
26
|
+
payload["system"] = system_prompt
|
|
27
|
+
headers = {"Authorization": f"Bearer {key}", "Content-Type": "application/json"}
|
|
28
|
+
try:
|
|
29
|
+
result = post_json(
|
|
30
|
+
url=url,
|
|
31
|
+
headers=headers,
|
|
32
|
+
payload=payload,
|
|
33
|
+
timeout_seconds=self.timeout_seconds,
|
|
34
|
+
provider_name="openai",
|
|
35
|
+
)
|
|
36
|
+
except Namel3ssError:
|
|
37
|
+
raise
|
|
38
|
+
text = _extract_text(result)
|
|
39
|
+
return AIResponse(output=ensure_text_output("openai", text))
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def _extract_text(result: dict) -> str | None:
|
|
43
|
+
if isinstance(result.get("output_text"), str):
|
|
44
|
+
return result["output_text"]
|
|
45
|
+
output = result.get("output")
|
|
46
|
+
if isinstance(output, list) and output:
|
|
47
|
+
content = output[0].get("content") if isinstance(output[0], dict) else None
|
|
48
|
+
if isinstance(content, list) and content:
|
|
49
|
+
text = content[0].get("text")
|
|
50
|
+
if isinstance(text, str):
|
|
51
|
+
return text
|
|
52
|
+
message = result.get("message")
|
|
53
|
+
if isinstance(message, dict) and isinstance(message.get("content"), str):
|
|
54
|
+
return message["content"]
|
|
55
|
+
return None
|
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from namel3ss.config import load_config
|
|
4
|
+
from namel3ss.config.model import AppConfig
|
|
5
|
+
from namel3ss.errors.base import Namel3ssError
|
|
6
|
+
from namel3ss.runtime.ai.provider import AIProvider
|
|
7
|
+
from namel3ss.runtime.ai.providers.mock import MockProvider
|
|
8
|
+
from namel3ss.runtime.ai.providers.ollama import OllamaProvider
|
|
9
|
+
from namel3ss.runtime.ai.providers.openai import OpenAIProvider
|
|
10
|
+
from namel3ss.runtime.ai.providers.anthropic import AnthropicProvider
|
|
11
|
+
from namel3ss.runtime.ai.providers.gemini import GeminiProvider
|
|
12
|
+
from namel3ss.runtime.ai.providers.mistral import MistralProvider
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
_FACTORIES = {
|
|
16
|
+
"mock": lambda config: MockProvider(),
|
|
17
|
+
"ollama": lambda config: OllamaProvider(
|
|
18
|
+
host=config.ollama.host,
|
|
19
|
+
timeout_seconds=config.ollama.timeout_seconds,
|
|
20
|
+
),
|
|
21
|
+
"openai": lambda config: OpenAIProvider.from_config(config.openai),
|
|
22
|
+
"anthropic": lambda config: AnthropicProvider.from_config(config.anthropic),
|
|
23
|
+
"gemini": lambda config: GeminiProvider.from_config(config.gemini),
|
|
24
|
+
"mistral": lambda config: MistralProvider.from_config(config.mistral),
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def is_supported_provider(name: str) -> bool:
|
|
29
|
+
return name.lower() in _FACTORIES
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def get_provider(name: str, config: AppConfig | None = None) -> AIProvider:
|
|
33
|
+
normalized = name.lower()
|
|
34
|
+
if normalized not in _FACTORIES:
|
|
35
|
+
available = ", ".join(sorted(_FACTORIES))
|
|
36
|
+
raise Namel3ssError(f"Unknown AI provider '{name}'. Available: {available}")
|
|
37
|
+
cfg = config or load_config()
|
|
38
|
+
return _FACTORIES[normalized](cfg)
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from typing import Optional
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
@dataclass
|
|
8
|
+
class AITrace:
|
|
9
|
+
ai_name: str
|
|
10
|
+
agent_name: Optional[str]
|
|
11
|
+
ai_profile_name: Optional[str]
|
|
12
|
+
model: str
|
|
13
|
+
system_prompt: Optional[str]
|
|
14
|
+
input: str
|
|
15
|
+
output: str
|
|
16
|
+
memory: dict
|
|
17
|
+
tool_calls: list
|
|
18
|
+
tool_results: list
|
|
@@ -0,0 +1,91 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from namel3ss.errors.base import Namel3ssError
|
|
4
|
+
from namel3ss.ir import nodes as ir
|
|
5
|
+
from namel3ss.runtime.ai.trace import AITrace
|
|
6
|
+
from namel3ss.runtime.executor.ai_runner import run_ai_with_tools
|
|
7
|
+
from namel3ss.runtime.executor.context import ExecutionContext
|
|
8
|
+
from namel3ss.runtime.executor.expr_eval import evaluate_expression
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def execute_run_agent(ctx: ExecutionContext, stmt: ir.RunAgentStmt) -> None:
|
|
12
|
+
output, trace = run_agent_call(ctx, stmt.agent_name, stmt.input_expr, stmt.line, stmt.column)
|
|
13
|
+
ctx.traces.append(trace)
|
|
14
|
+
if stmt.target in ctx.constants:
|
|
15
|
+
raise Namel3ssError(f"Cannot assign to constant '{stmt.target}'", line=stmt.line, column=stmt.column)
|
|
16
|
+
ctx.locals[stmt.target] = output
|
|
17
|
+
ctx.last_value = output
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def execute_run_agents_parallel(ctx: ExecutionContext, stmt: ir.RunAgentsParallelStmt) -> None:
|
|
21
|
+
if len(stmt.entries) > 3:
|
|
22
|
+
raise Namel3ssError("Parallel agent limit exceeded")
|
|
23
|
+
results: list[str] = []
|
|
24
|
+
child_traces: list[dict] = []
|
|
25
|
+
for entry in stmt.entries:
|
|
26
|
+
try:
|
|
27
|
+
output, trace = run_agent_call(ctx, entry.agent_name, entry.input_expr, entry.line, entry.column)
|
|
28
|
+
except Namel3ssError as err:
|
|
29
|
+
raise Namel3ssError(f"Agent '{entry.agent_name}' failed: {err}", line=entry.line, column=entry.column) from err
|
|
30
|
+
results.append(output)
|
|
31
|
+
child_traces.append(_trace_to_dict(trace))
|
|
32
|
+
ctx.locals[stmt.target] = results
|
|
33
|
+
ctx.last_value = results
|
|
34
|
+
ctx.traces.append({"type": "parallel_agents", "target": stmt.target, "agents": child_traces})
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def run_agent_call(ctx: ExecutionContext, agent_name: str, input_expr, line: int | None, column: int | None):
|
|
38
|
+
ctx.agent_calls += 1
|
|
39
|
+
if ctx.agent_calls > 5:
|
|
40
|
+
raise Namel3ssError("Agent call limit exceeded in flow")
|
|
41
|
+
if agent_name not in ctx.agents:
|
|
42
|
+
raise Namel3ssError(f"Unknown agent '{agent_name}'", line=line, column=column)
|
|
43
|
+
agent = ctx.agents[agent_name]
|
|
44
|
+
ai_profile = ctx.ai_profiles.get(agent.ai_name)
|
|
45
|
+
if ai_profile is None:
|
|
46
|
+
raise Namel3ssError(f"Agent '{agent.name}' references unknown AI '{agent.ai_name}'", line=line, column=column)
|
|
47
|
+
user_input = evaluate_expression(ctx, input_expr)
|
|
48
|
+
if not isinstance(user_input, str):
|
|
49
|
+
raise Namel3ssError("Agent input must be a string", line=line, column=column)
|
|
50
|
+
profile_override = ir.AIDecl(
|
|
51
|
+
name=ai_profile.name,
|
|
52
|
+
model=ai_profile.model,
|
|
53
|
+
provider=ai_profile.provider,
|
|
54
|
+
system_prompt=agent.system_prompt or ai_profile.system_prompt,
|
|
55
|
+
exposed_tools=list(ai_profile.exposed_tools),
|
|
56
|
+
memory=ai_profile.memory,
|
|
57
|
+
line=ai_profile.line,
|
|
58
|
+
column=ai_profile.column,
|
|
59
|
+
)
|
|
60
|
+
memory_context = ctx.memory_manager.recall_context(profile_override, user_input, ctx.state)
|
|
61
|
+
tool_events: list[dict] = []
|
|
62
|
+
response_output = run_ai_with_tools(ctx, profile_override, user_input, memory_context, tool_events)
|
|
63
|
+
trace = AITrace(
|
|
64
|
+
ai_name=profile_override.name,
|
|
65
|
+
ai_profile_name=profile_override.name,
|
|
66
|
+
agent_name=agent.name,
|
|
67
|
+
model=profile_override.model,
|
|
68
|
+
system_prompt=profile_override.system_prompt,
|
|
69
|
+
input=user_input,
|
|
70
|
+
output=response_output,
|
|
71
|
+
memory=memory_context,
|
|
72
|
+
tool_calls=[e for e in tool_events if e.get("type") == "call"],
|
|
73
|
+
tool_results=[e for e in tool_events if e.get("type") == "result"],
|
|
74
|
+
)
|
|
75
|
+
ctx.memory_manager.record_interaction(profile_override, ctx.state, user_input, response_output, tool_events)
|
|
76
|
+
return response_output, trace
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def _trace_to_dict(trace: AITrace) -> dict:
|
|
80
|
+
return {
|
|
81
|
+
"ai_name": trace.ai_name,
|
|
82
|
+
"ai_profile_name": trace.ai_profile_name,
|
|
83
|
+
"agent_name": trace.agent_name,
|
|
84
|
+
"model": trace.model,
|
|
85
|
+
"system_prompt": trace.system_prompt,
|
|
86
|
+
"input": trace.input,
|
|
87
|
+
"output": trace.output,
|
|
88
|
+
"memory": trace.memory,
|
|
89
|
+
"tool_calls": trace.tool_calls,
|
|
90
|
+
"tool_results": trace.tool_results,
|
|
91
|
+
}
|
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from namel3ss.errors.base import Namel3ssError
|
|
4
|
+
from namel3ss.ir import nodes as ir
|
|
5
|
+
from namel3ss.runtime.ai.provider import AIToolCallResponse
|
|
6
|
+
from namel3ss.runtime.ai.providers.registry import get_provider
|
|
7
|
+
from namel3ss.runtime.ai.trace import AITrace
|
|
8
|
+
from namel3ss.runtime.executor.context import ExecutionContext
|
|
9
|
+
from namel3ss.runtime.executor.expr_eval import evaluate_expression
|
|
10
|
+
from namel3ss.runtime.tools.registry import execute_tool
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def execute_ask_ai(ctx: ExecutionContext, expr: ir.AskAIStmt) -> str:
|
|
14
|
+
if expr.ai_name not in ctx.ai_profiles:
|
|
15
|
+
raise Namel3ssError(
|
|
16
|
+
f"Unknown AI '{expr.ai_name}'",
|
|
17
|
+
line=expr.line,
|
|
18
|
+
column=expr.column,
|
|
19
|
+
)
|
|
20
|
+
profile = ctx.ai_profiles[expr.ai_name]
|
|
21
|
+
user_input = evaluate_expression(ctx, expr.input_expr)
|
|
22
|
+
if not isinstance(user_input, str):
|
|
23
|
+
raise Namel3ssError("AI input must be a string", line=expr.line, column=expr.column)
|
|
24
|
+
memory_context = ctx.memory_manager.recall_context(profile, user_input, ctx.state)
|
|
25
|
+
tool_events: list[dict] = []
|
|
26
|
+
response_output = run_ai_with_tools(ctx, profile, user_input, memory_context, tool_events)
|
|
27
|
+
trace = AITrace(
|
|
28
|
+
ai_name=expr.ai_name,
|
|
29
|
+
ai_profile_name=expr.ai_name,
|
|
30
|
+
agent_name=None,
|
|
31
|
+
model=profile.model,
|
|
32
|
+
system_prompt=profile.system_prompt,
|
|
33
|
+
input=user_input,
|
|
34
|
+
output=response_output,
|
|
35
|
+
memory=memory_context,
|
|
36
|
+
tool_calls=[e for e in tool_events if e.get("type") == "call"],
|
|
37
|
+
tool_results=[e for e in tool_events if e.get("type") == "result"],
|
|
38
|
+
)
|
|
39
|
+
ctx.traces.append(trace)
|
|
40
|
+
if expr.target in ctx.constants:
|
|
41
|
+
raise Namel3ssError(f"Cannot assign to constant '{expr.target}'", line=expr.line, column=expr.column)
|
|
42
|
+
ctx.locals[expr.target] = response_output
|
|
43
|
+
ctx.last_value = response_output
|
|
44
|
+
ctx.memory_manager.record_interaction(profile, ctx.state, user_input, response_output, tool_events)
|
|
45
|
+
return response_output
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
def run_ai_with_tools(
|
|
49
|
+
ctx: ExecutionContext,
|
|
50
|
+
profile: ir.AIDecl,
|
|
51
|
+
user_input: str,
|
|
52
|
+
memory_context: dict,
|
|
53
|
+
tool_events: list[dict],
|
|
54
|
+
) -> str:
|
|
55
|
+
max_calls = 3
|
|
56
|
+
tool_results: list[dict] = []
|
|
57
|
+
provider_name = getattr(profile, "provider", "mock") or "mock"
|
|
58
|
+
for _ in range(max_calls + 1):
|
|
59
|
+
provider = _resolve_provider(ctx, provider_name)
|
|
60
|
+
response = provider.ask(
|
|
61
|
+
model=profile.model,
|
|
62
|
+
system_prompt=profile.system_prompt,
|
|
63
|
+
user_input=user_input,
|
|
64
|
+
tools=[{"name": name} for name in profile.exposed_tools],
|
|
65
|
+
memory=memory_context,
|
|
66
|
+
tool_results=tool_results,
|
|
67
|
+
)
|
|
68
|
+
if isinstance(response, AIToolCallResponse):
|
|
69
|
+
if response.tool_name not in profile.exposed_tools:
|
|
70
|
+
raise Namel3ssError(f"AI requested unexposed tool '{response.tool_name}'")
|
|
71
|
+
if not isinstance(response.args, dict):
|
|
72
|
+
raise Namel3ssError("Tool call args must be a dictionary")
|
|
73
|
+
tool_events.append({"type": "call", "name": response.tool_name, "args": response.args})
|
|
74
|
+
result = execute_tool(response.tool_name, response.args)
|
|
75
|
+
tool_events.append({"type": "result", "name": response.tool_name, "result": result})
|
|
76
|
+
tool_results.append({"name": response.tool_name, "result": result})
|
|
77
|
+
continue
|
|
78
|
+
if not isinstance(response.output, str):
|
|
79
|
+
raise Namel3ssError("AI response must be a string")
|
|
80
|
+
return response.output
|
|
81
|
+
raise Namel3ssError("AI exceeded maximum tool calls")
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def _resolve_provider(ctx: ExecutionContext, provider_name: str):
|
|
85
|
+
key = provider_name.lower()
|
|
86
|
+
if key in ctx.provider_cache:
|
|
87
|
+
return ctx.provider_cache[key]
|
|
88
|
+
provider = get_provider(key, ctx.config)
|
|
89
|
+
ctx.provider_cache[key] = provider
|
|
90
|
+
return provider
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Dict, Optional
|
|
4
|
+
|
|
5
|
+
from namel3ss.errors.base import Namel3ssError
|
|
6
|
+
from namel3ss.ir import nodes as ir
|
|
7
|
+
from namel3ss.runtime.ai.provider import AIProvider
|
|
8
|
+
from namel3ss.runtime.executor.executor import Executor
|
|
9
|
+
from namel3ss.runtime.executor.result import ExecutionResult
|
|
10
|
+
from namel3ss.runtime.store.memory_store import MemoryStore
|
|
11
|
+
from namel3ss.schema.records import RecordSchema
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def execute_flow(
|
|
15
|
+
flow: ir.Flow,
|
|
16
|
+
schemas: Optional[Dict[str, RecordSchema]] = None,
|
|
17
|
+
initial_state: Optional[Dict[str, object]] = None,
|
|
18
|
+
input_data: Optional[Dict[str, object]] = None,
|
|
19
|
+
ai_provider: Optional[AIProvider] = None,
|
|
20
|
+
ai_profiles: Optional[Dict[str, ir.AIDecl]] = None,
|
|
21
|
+
) -> ExecutionResult:
|
|
22
|
+
return Executor(
|
|
23
|
+
flow,
|
|
24
|
+
schemas=schemas,
|
|
25
|
+
initial_state=initial_state,
|
|
26
|
+
input_data=input_data,
|
|
27
|
+
ai_provider=ai_provider,
|
|
28
|
+
ai_profiles=ai_profiles,
|
|
29
|
+
).run()
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def execute_program_flow(
|
|
33
|
+
program: ir.Program,
|
|
34
|
+
flow_name: str,
|
|
35
|
+
*,
|
|
36
|
+
state: Optional[Dict[str, object]] = None,
|
|
37
|
+
input: Optional[Dict[str, object]] = None,
|
|
38
|
+
store: Optional[MemoryStore] = None,
|
|
39
|
+
ai_provider: Optional[AIProvider] = None,
|
|
40
|
+
) -> ExecutionResult:
|
|
41
|
+
flow = next((f for f in program.flows if f.name == flow_name), None)
|
|
42
|
+
if flow is None:
|
|
43
|
+
raise Namel3ssError(f"Unknown flow '{flow_name}'")
|
|
44
|
+
schemas = {schema.name: schema for schema in program.records}
|
|
45
|
+
return Executor(
|
|
46
|
+
flow,
|
|
47
|
+
schemas=schemas,
|
|
48
|
+
initial_state=state,
|
|
49
|
+
input_data=input,
|
|
50
|
+
store=store,
|
|
51
|
+
ai_provider=ai_provider,
|
|
52
|
+
ai_profiles=program.ais,
|
|
53
|
+
agents=program.agents,
|
|
54
|
+
).run()
|
|
@@ -0,0 +1,40 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Dict
|
|
4
|
+
|
|
5
|
+
from namel3ss.errors.base import Namel3ssError
|
|
6
|
+
from namel3ss.ir import nodes as ir
|
|
7
|
+
from namel3ss.runtime.executor.context import ExecutionContext
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def assign(ctx: ExecutionContext, target: ir.Assignable, value: object, origin: ir.Statement) -> None:
|
|
11
|
+
if isinstance(target, ir.VarReference):
|
|
12
|
+
if target.name not in ctx.locals:
|
|
13
|
+
raise Namel3ssError(
|
|
14
|
+
f"Cannot set undeclared variable '{target.name}'",
|
|
15
|
+
line=origin.line,
|
|
16
|
+
column=origin.column,
|
|
17
|
+
)
|
|
18
|
+
if target.name in ctx.constants:
|
|
19
|
+
raise Namel3ssError(
|
|
20
|
+
f"Cannot set constant '{target.name}'",
|
|
21
|
+
line=origin.line,
|
|
22
|
+
column=origin.column,
|
|
23
|
+
)
|
|
24
|
+
ctx.locals[target.name] = value
|
|
25
|
+
return
|
|
26
|
+
|
|
27
|
+
if isinstance(target, ir.StatePath):
|
|
28
|
+
assign_state_path(ctx.state, target, value)
|
|
29
|
+
return
|
|
30
|
+
|
|
31
|
+
raise Namel3ssError(f"Unsupported assignment target: {type(target)}", line=origin.line, column=origin.column)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def assign_state_path(state: Dict[str, object], target: ir.StatePath, value: object) -> None:
|
|
35
|
+
cursor: Dict[str, object] = state
|
|
36
|
+
for segment in target.path[:-1]:
|
|
37
|
+
if segment not in cursor or not isinstance(cursor[segment], dict):
|
|
38
|
+
cursor[segment] = {}
|
|
39
|
+
cursor = cursor[segment] # type: ignore[assignment]
|
|
40
|
+
cursor[target.path[-1]] = value
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from typing import Dict, Optional
|
|
5
|
+
|
|
6
|
+
from namel3ss.config.model import AppConfig
|
|
7
|
+
from namel3ss.ir import nodes as ir
|
|
8
|
+
from namel3ss.runtime.ai.provider import AIProvider
|
|
9
|
+
from namel3ss.runtime.ai.trace import AITrace
|
|
10
|
+
from namel3ss.runtime.memory.manager import MemoryManager
|
|
11
|
+
from namel3ss.runtime.store.memory_store import MemoryStore
|
|
12
|
+
from namel3ss.schema.records import RecordSchema
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class ExecutionContext:
|
|
17
|
+
flow: ir.Flow
|
|
18
|
+
schemas: Dict[str, RecordSchema]
|
|
19
|
+
state: Dict[str, object]
|
|
20
|
+
locals: Dict[str, object]
|
|
21
|
+
constants: set[str]
|
|
22
|
+
last_value: Optional[object]
|
|
23
|
+
store: MemoryStore
|
|
24
|
+
ai_provider: AIProvider
|
|
25
|
+
ai_profiles: Dict[str, ir.AIDecl]
|
|
26
|
+
agents: Dict[str, ir.AgentDecl]
|
|
27
|
+
traces: list[AITrace]
|
|
28
|
+
memory_manager: MemoryManager
|
|
29
|
+
agent_calls: int
|
|
30
|
+
config: AppConfig
|
|
31
|
+
provider_cache: Dict[str, AIProvider]
|