yera 0.1.1__py3-none-any.whl → 0.2.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- infra_mvp/base_client.py +29 -0
- infra_mvp/base_server.py +68 -0
- infra_mvp/monitoring/__init__.py +15 -0
- infra_mvp/monitoring/metrics.py +185 -0
- infra_mvp/stream/README.md +56 -0
- infra_mvp/stream/__init__.py +14 -0
- infra_mvp/stream/__main__.py +101 -0
- infra_mvp/stream/agents/demos/financial/chart_additions_plan.md +170 -0
- infra_mvp/stream/agents/demos/financial/portfolio_assistant_stream.json +1571 -0
- infra_mvp/stream/agents/reference/blocks/action.json +170 -0
- infra_mvp/stream/agents/reference/blocks/button.json +66 -0
- infra_mvp/stream/agents/reference/blocks/date.json +65 -0
- infra_mvp/stream/agents/reference/blocks/input_prompt.json +94 -0
- infra_mvp/stream/agents/reference/blocks/layout.json +288 -0
- infra_mvp/stream/agents/reference/blocks/markdown.json +344 -0
- infra_mvp/stream/agents/reference/blocks/slider.json +67 -0
- infra_mvp/stream/agents/reference/blocks/spinner.json +110 -0
- infra_mvp/stream/agents/reference/blocks/table.json +56 -0
- infra_mvp/stream/agents/reference/chat_dynamics/branching_test_stream.json +145 -0
- infra_mvp/stream/app.py +49 -0
- infra_mvp/stream/container.py +112 -0
- infra_mvp/stream/schemas/__init__.py +16 -0
- infra_mvp/stream/schemas/agent.py +24 -0
- infra_mvp/stream/schemas/interaction.py +28 -0
- infra_mvp/stream/schemas/session.py +30 -0
- infra_mvp/stream/server.py +321 -0
- infra_mvp/stream/services/__init__.py +12 -0
- infra_mvp/stream/services/agent_service.py +40 -0
- infra_mvp/stream/services/event_converter.py +83 -0
- infra_mvp/stream/services/session_service.py +247 -0
- yera/__init__.py +50 -1
- yera/agents/__init__.py +2 -0
- yera/agents/context.py +41 -0
- yera/agents/dataclasses.py +69 -0
- yera/agents/decorator.py +207 -0
- yera/agents/discovery.py +124 -0
- yera/agents/typing/__init__.py +0 -0
- yera/agents/typing/coerce.py +408 -0
- yera/agents/typing/utils.py +19 -0
- yera/agents/typing/validate.py +206 -0
- yera/cli.py +377 -0
- yera/config/__init__.py +1 -0
- yera/config/config_utils.py +164 -0
- yera/config/function_config.py +55 -0
- yera/config/logging.py +18 -0
- yera/config/tool_config.py +8 -0
- yera/config2/__init__.py +8 -0
- yera/config2/dataclasses.py +534 -0
- yera/config2/keyring.py +270 -0
- yera/config2/paths.py +28 -0
- yera/config2/read.py +113 -0
- yera/config2/setup.py +109 -0
- yera/config2/setup_handlers/__init__.py +1 -0
- yera/config2/setup_handlers/anthropic.py +126 -0
- yera/config2/setup_handlers/azure.py +236 -0
- yera/config2/setup_handlers/base.py +125 -0
- yera/config2/setup_handlers/llama_cpp.py +205 -0
- yera/config2/setup_handlers/ollama.py +157 -0
- yera/config2/setup_handlers/openai.py +137 -0
- yera/config2/write.py +87 -0
- yera/dsl/__init__.py +0 -0
- yera/dsl/functions.py +94 -0
- yera/dsl/struct.py +20 -0
- yera/dsl/workspace.py +79 -0
- yera/events/__init__.py +57 -0
- yera/events/blocks/__init__.py +68 -0
- yera/events/blocks/action.py +57 -0
- yera/events/blocks/bar_chart.py +92 -0
- yera/events/blocks/base/__init__.py +20 -0
- yera/events/blocks/base/base.py +166 -0
- yera/events/blocks/base/chart.py +288 -0
- yera/events/blocks/base/layout.py +111 -0
- yera/events/blocks/buttons.py +37 -0
- yera/events/blocks/columns.py +26 -0
- yera/events/blocks/container.py +24 -0
- yera/events/blocks/date_picker.py +50 -0
- yera/events/blocks/exit.py +39 -0
- yera/events/blocks/form.py +24 -0
- yera/events/blocks/input_echo.py +22 -0
- yera/events/blocks/input_request.py +31 -0
- yera/events/blocks/line_chart.py +97 -0
- yera/events/blocks/markdown.py +67 -0
- yera/events/blocks/slider.py +54 -0
- yera/events/blocks/spinner.py +55 -0
- yera/events/blocks/system_prompt.py +22 -0
- yera/events/blocks/table.py +291 -0
- yera/events/models/__init__.py +39 -0
- yera/events/models/block_data.py +112 -0
- yera/events/models/in_event.py +7 -0
- yera/events/models/out_event.py +75 -0
- yera/events/runtime.py +187 -0
- yera/events/stream.py +91 -0
- yera/models/__init__.py +0 -0
- yera/models/data_classes.py +20 -0
- yera/models/llm_atlas_proxy.py +44 -0
- yera/models/llm_context.py +99 -0
- yera/models/llm_interfaces/__init__.py +0 -0
- yera/models/llm_interfaces/anthropic.py +153 -0
- yera/models/llm_interfaces/aws_bedrock.py +14 -0
- yera/models/llm_interfaces/azure_openai.py +143 -0
- yera/models/llm_interfaces/base.py +26 -0
- yera/models/llm_interfaces/interface_registry.py +74 -0
- yera/models/llm_interfaces/llama_cpp.py +136 -0
- yera/models/llm_interfaces/mock.py +29 -0
- yera/models/llm_interfaces/ollama_interface.py +118 -0
- yera/models/llm_interfaces/open_ai.py +150 -0
- yera/models/llm_workspace.py +19 -0
- yera/models/model_atlas.py +139 -0
- yera/models/model_definition.py +38 -0
- yera/models/model_factory.py +33 -0
- yera/opaque/__init__.py +9 -0
- yera/opaque/base.py +20 -0
- yera/opaque/decorator.py +8 -0
- yera/opaque/markdown.py +57 -0
- yera/opaque/opaque_function.py +25 -0
- yera/tools/__init__.py +29 -0
- yera/tools/atlas_tool.py +20 -0
- yera/tools/base.py +24 -0
- yera/tools/decorated_tool.py +18 -0
- yera/tools/decorator.py +35 -0
- yera/tools/tool_atlas.py +51 -0
- yera/tools/tool_utils.py +361 -0
- yera/ui/dist/404.html +1 -0
- yera/ui/dist/__next.__PAGE__.txt +10 -0
- yera/ui/dist/__next._full.txt +23 -0
- yera/ui/dist/__next._head.txt +6 -0
- yera/ui/dist/__next._index.txt +5 -0
- yera/ui/dist/__next._tree.txt +7 -0
- yera/ui/dist/_next/static/T8WGYqDMoHDKKoHj0O3HK/_buildManifest.js +11 -0
- yera/ui/dist/_next/static/T8WGYqDMoHDKKoHj0O3HK/_clientMiddlewareManifest.json +1 -0
- yera/ui/dist/_next/static/T8WGYqDMoHDKKoHj0O3HK/_ssgManifest.js +1 -0
- yera/ui/dist/_next/static/chunks/4c4688e1ff21ad98.js +1 -0
- yera/ui/dist/_next/static/chunks/652cd53c27924d50.js +4 -0
- yera/ui/dist/_next/static/chunks/786d2107b51e8499.css +1 -0
- yera/ui/dist/_next/static/chunks/7de9141b1af425c3.js +1 -0
- yera/ui/dist/_next/static/chunks/87ef65064d3524c1.js +2 -0
- yera/ui/dist/_next/static/chunks/a6dad97d9634a72d.js +1 -0
- yera/ui/dist/_next/static/chunks/a6dad97d9634a72d.js.map +1 -0
- yera/ui/dist/_next/static/chunks/c4c79d5d0b280aeb.js +1 -0
- yera/ui/dist/_next/static/chunks/dc2d2a247505d66f.css +5 -0
- yera/ui/dist/_next/static/chunks/f773f714b55ec620.js +37 -0
- yera/ui/dist/_next/static/chunks/turbopack-98b3031e1b1dbc33.js +4 -0
- yera/ui/dist/_next/static/media/14e23f9b59180572-s.9c448f3c.woff2 +0 -0
- yera/ui/dist/_next/static/media/2a65768255d6b625-s.p.d19752fb.woff2 +0 -0
- yera/ui/dist/_next/static/media/2b2eb4836d2dad95-s.f36de3af.woff2 +0 -0
- yera/ui/dist/_next/static/media/31183d9fd602dc89-s.c4ff9b73.woff2 +0 -0
- yera/ui/dist/_next/static/media/3fcb63a1ac6a562e-s.2f77a576.woff2 +0 -0
- yera/ui/dist/_next/static/media/45ec8de98929b0f6-s.81056204.woff2 +0 -0
- yera/ui/dist/_next/static/media/4fa387ec64143e14-s.c1fdd6c2.woff2 +0 -0
- yera/ui/dist/_next/static/media/65c558afe41e89d6-s.e2c8389a.woff2 +0 -0
- yera/ui/dist/_next/static/media/67add6cc0f54b8cf-s.8ce53448.woff2 +0 -0
- yera/ui/dist/_next/static/media/7178b3e590c64307-s.b97b3418.woff2 +0 -0
- yera/ui/dist/_next/static/media/797e433ab948586e-s.p.dbea232f.woff2 +0 -0
- yera/ui/dist/_next/static/media/8a480f0b521d4e75-s.8e0177b5.woff2 +0 -0
- yera/ui/dist/_next/static/media/a8ff2d5d0ccb0d12-s.fc5b72a7.woff2 +0 -0
- yera/ui/dist/_next/static/media/aae5f0be330e13db-s.p.853e26d6.woff2 +0 -0
- yera/ui/dist/_next/static/media/b11a6ccf4a3edec7-s.2113d282.woff2 +0 -0
- yera/ui/dist/_next/static/media/b49b0d9b851e4899-s.4f3fa681.woff2 +0 -0
- yera/ui/dist/_next/static/media/bbc41e54d2fcbd21-s.799d8ef8.woff2 +0 -0
- yera/ui/dist/_next/static/media/caa3a2e1cccd8315-s.p.853070df.woff2 +0 -0
- yera/ui/dist/_next/static/media/favicon.0b3bf435.ico +0 -0
- yera/ui/dist/_not-found/__next._full.txt +14 -0
- yera/ui/dist/_not-found/__next._head.txt +6 -0
- yera/ui/dist/_not-found/__next._index.txt +5 -0
- yera/ui/dist/_not-found/__next._not-found.__PAGE__.txt +5 -0
- yera/ui/dist/_not-found/__next._not-found.txt +4 -0
- yera/ui/dist/_not-found/__next._tree.txt +2 -0
- yera/ui/dist/_not-found.html +1 -0
- yera/ui/dist/_not-found.txt +14 -0
- yera/ui/dist/agent-icon.svg +3 -0
- yera/ui/dist/favicon.ico +0 -0
- yera/ui/dist/file.svg +1 -0
- yera/ui/dist/globe.svg +1 -0
- yera/ui/dist/index.html +1 -0
- yera/ui/dist/index.txt +23 -0
- yera/ui/dist/logo/full_logo.png +0 -0
- yera/ui/dist/logo/rune_logo.png +0 -0
- yera/ui/dist/logo/rune_logo_borderless.png +0 -0
- yera/ui/dist/logo/text_logo.png +0 -0
- yera/ui/dist/next.svg +1 -0
- yera/ui/dist/send.png +0 -0
- yera/ui/dist/send_single.png +0 -0
- yera/ui/dist/vercel.svg +1 -0
- yera/ui/dist/window.svg +1 -0
- yera/utils/__init__.py +1 -0
- yera/utils/path_utils.py +38 -0
- yera-0.2.1.dist-info/METADATA +65 -0
- yera-0.2.1.dist-info/RECORD +190 -0
- {yera-0.1.1.dist-info → yera-0.2.1.dist-info}/WHEEL +1 -1
- yera-0.2.1.dist-info/entry_points.txt +2 -0
- yera-0.1.1.dist-info/METADATA +0 -11
- yera-0.1.1.dist-info/RECORD +0 -4
|
@@ -0,0 +1,157 @@
|
|
|
1
|
+
# ruff: noqa: T201
|
|
2
|
+
"""Configuration import handler for Ollama."""
|
|
3
|
+
|
|
4
|
+
import os
|
|
5
|
+
from typing import Any
|
|
6
|
+
|
|
7
|
+
import requests as r
|
|
8
|
+
|
|
9
|
+
from yera.config2.dataclasses import (
|
|
10
|
+
CredentialsMap,
|
|
11
|
+
LLMConfig,
|
|
12
|
+
ModelRegistry,
|
|
13
|
+
)
|
|
14
|
+
from yera.config2.keyring import DevKeyring
|
|
15
|
+
from yera.config2.setup_handlers.base import BaseProviderSetup
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class OllamaSetup(BaseProviderSetup):
|
|
19
|
+
"""Setup handler for Ollama provider credentials and models.
|
|
20
|
+
|
|
21
|
+
Manages connection setup and model fetching for local Ollama.
|
|
22
|
+
Supports both automatic detection of running Ollama servers and interactive
|
|
23
|
+
configuration of custom base URLs.
|
|
24
|
+
"""
|
|
25
|
+
|
|
26
|
+
def __init__(self):
|
|
27
|
+
"""Initialise the Ollama setup handler."""
|
|
28
|
+
super().__init__("ollama")
|
|
29
|
+
|
|
30
|
+
@staticmethod
|
|
31
|
+
def _is_ollama_running(base_url: str) -> bool:
|
|
32
|
+
try:
|
|
33
|
+
response = r.get(f"{base_url}/api/tags", timeout=2)
|
|
34
|
+
return response.status_code == 200
|
|
35
|
+
except Exception:
|
|
36
|
+
return False
|
|
37
|
+
|
|
38
|
+
def detect_creds(self) -> dict[str, str] | None:
|
|
39
|
+
"""Detect Ollama connection from environment or default location.
|
|
40
|
+
|
|
41
|
+
Checks OLLAMA_BASE_URL environment variable or defaults to
|
|
42
|
+
http://localhost:11434. Verifies that Ollama is actually running.
|
|
43
|
+
|
|
44
|
+
Returns:
|
|
45
|
+
Dictionary with "base_url" if Ollama is running, otherwise None.
|
|
46
|
+
"""
|
|
47
|
+
base_url = os.environ.get("OLLAMA_BASE_URL", "http://localhost:11434")
|
|
48
|
+
|
|
49
|
+
if self._is_ollama_running(base_url):
|
|
50
|
+
return {"base_url": base_url}
|
|
51
|
+
return None
|
|
52
|
+
|
|
53
|
+
def ask_for_creds(self) -> dict[str, str] | None:
|
|
54
|
+
"""Prompt user to configure Ollama connection interactively.
|
|
55
|
+
|
|
56
|
+
Allows user to specify a custom base URL or use the default. Verifies
|
|
57
|
+
that Ollama is running at the specified URL before accepting.
|
|
58
|
+
|
|
59
|
+
Returns:
|
|
60
|
+
Dictionary with "base_url" if Ollama is running at the specified
|
|
61
|
+
URL, otherwise None if user declines or connection fails.
|
|
62
|
+
"""
|
|
63
|
+
ask = self._confirm_ask_for_creds()
|
|
64
|
+
if not ask:
|
|
65
|
+
return None
|
|
66
|
+
|
|
67
|
+
print(" Is Ollama running on a custom URL? (default: http://localhost:11434)")
|
|
68
|
+
print(" Press Enter to use default, or enter custom URL:")
|
|
69
|
+
base_url = input(" > ").strip()
|
|
70
|
+
|
|
71
|
+
if not base_url:
|
|
72
|
+
base_url = "http://localhost:11434"
|
|
73
|
+
|
|
74
|
+
if not self._is_ollama_running(base_url):
|
|
75
|
+
print(f" ⚠️ Cannot connect to Ollama at {base_url}")
|
|
76
|
+
print(" Make sure Ollama is installed and running: https://ollama.ai")
|
|
77
|
+
return None
|
|
78
|
+
|
|
79
|
+
return {"base_url": base_url}
|
|
80
|
+
|
|
81
|
+
def validate_creds(self, creds: dict[str, str]) -> None:
|
|
82
|
+
"""Validate that Ollama is running at the configured URL.
|
|
83
|
+
|
|
84
|
+
Args:
|
|
85
|
+
creds: Dictionary containing "base_url" to validate.
|
|
86
|
+
|
|
87
|
+
Raises:
|
|
88
|
+
ValueError: If Ollama is not running at the specified URL.
|
|
89
|
+
"""
|
|
90
|
+
base_url = creds.get("base_url", "http://localhost:11434")
|
|
91
|
+
|
|
92
|
+
if not self._is_ollama_running(base_url):
|
|
93
|
+
raise ValueError(
|
|
94
|
+
f"Cannot connect to Ollama at {base_url}\n"
|
|
95
|
+
"Make sure Ollama is installed and running.\n"
|
|
96
|
+
"Install from: https://ollama.ai"
|
|
97
|
+
)
|
|
98
|
+
|
|
99
|
+
def fetch_models(self, creds_map: CredentialsMap) -> ModelRegistry:
|
|
100
|
+
"""Fetch available models from local Ollama instance.
|
|
101
|
+
|
|
102
|
+
Retrieves the list of models currently installed in Ollama.
|
|
103
|
+
|
|
104
|
+
Args:
|
|
105
|
+
creds_map: Credentials map containing Ollama provider configuration.
|
|
106
|
+
|
|
107
|
+
Returns:
|
|
108
|
+
ModelRegistry containing LLMConfig entries for all installed
|
|
109
|
+
Ollama models, or empty registry if none found or on error.
|
|
110
|
+
"""
|
|
111
|
+
creds = {
|
|
112
|
+
k: DevKeyring.get(f"providers.{v}")
|
|
113
|
+
for k, v in creds_map.providers["ollama"].items()
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
base_url = creds.get("base_url", "http://localhost:11434")
|
|
117
|
+
|
|
118
|
+
try:
|
|
119
|
+
response = r.get(f"{base_url}/api/tags", timeout=5)
|
|
120
|
+
response.raise_for_status()
|
|
121
|
+
data = response.json()
|
|
122
|
+
|
|
123
|
+
models = data.get("models", [])
|
|
124
|
+
|
|
125
|
+
if not models:
|
|
126
|
+
print(
|
|
127
|
+
" ⚠️ No models found. Pull models with: ollama pull <model-name>"
|
|
128
|
+
)
|
|
129
|
+
print(" Popular models: llama3.1, mistral, codellama, phi3")
|
|
130
|
+
return ModelRegistry()
|
|
131
|
+
|
|
132
|
+
model_configs = [self._make_model_cfg(m) for m in models]
|
|
133
|
+
|
|
134
|
+
names = " \n * ".join(m.display_name for m in model_configs)
|
|
135
|
+
print(f" Adding {len(model_configs)} LLMs from Ollama:\n * {names}")
|
|
136
|
+
|
|
137
|
+
return ModelRegistry(
|
|
138
|
+
llm={m.id: m for m in model_configs},
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
except Exception as e:
|
|
142
|
+
print(f" ⚠️ Error fetching Ollama models: {e}")
|
|
143
|
+
return ModelRegistry()
|
|
144
|
+
|
|
145
|
+
def _make_model_cfg(self, model: dict[str, Any]) -> LLMConfig:
|
|
146
|
+
model_name = model["name"]
|
|
147
|
+
|
|
148
|
+
display_name = model_name.replace(":", " ")
|
|
149
|
+
|
|
150
|
+
return LLMConfig(
|
|
151
|
+
id=f"ollama.{model_name}".lower(),
|
|
152
|
+
display_name=display_name,
|
|
153
|
+
credentials="ollama",
|
|
154
|
+
provider="ollama",
|
|
155
|
+
interface="ollama", # Ollama uses OpenAI-compatible API
|
|
156
|
+
model_id=model_name,
|
|
157
|
+
)
|
|
@@ -0,0 +1,137 @@
|
|
|
1
|
+
# ruff: noqa: T201
|
|
2
|
+
"""Configuration and credentials import handler for OpenAI."""
|
|
3
|
+
|
|
4
|
+
import os
|
|
5
|
+
import re
|
|
6
|
+
|
|
7
|
+
from openai import OpenAI
|
|
8
|
+
from openai.types import Model
|
|
9
|
+
|
|
10
|
+
from yera.config2.dataclasses import CredentialsMap, LLMConfig, ModelRegistry
|
|
11
|
+
from yera.config2.keyring import DevKeyring
|
|
12
|
+
from yera.config2.setup_handlers.base import BaseProviderSetup
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class OpenAISetup(BaseProviderSetup):
|
|
16
|
+
"""Setup handler for OpenAI provider credentials and models.
|
|
17
|
+
|
|
18
|
+
Manages credential detection, validation, and model fetching for the
|
|
19
|
+
OpenAI API. Supports both automatic import from environment variables
|
|
20
|
+
and interactive credential entry.
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
def __init__(self):
|
|
24
|
+
"""Initialise the OpenAI setup handler."""
|
|
25
|
+
super().__init__("openai")
|
|
26
|
+
|
|
27
|
+
def detect_creds(self) -> dict[str, str] | None:
|
|
28
|
+
"""Detect OpenAI credentials from environment variables.
|
|
29
|
+
|
|
30
|
+
Returns:
|
|
31
|
+
Dictionary with "api_key" if OPENAI_API_KEY environment variable
|
|
32
|
+
is set, otherwise None.
|
|
33
|
+
"""
|
|
34
|
+
api_key = os.environ.get("OPENAI_API_KEY")
|
|
35
|
+
if api_key:
|
|
36
|
+
return {"api_key": api_key}
|
|
37
|
+
return None
|
|
38
|
+
|
|
39
|
+
def ask_for_creds(self) -> dict[str, str] | None:
|
|
40
|
+
"""Prompt user to enter OpenAI API credentials interactively.
|
|
41
|
+
|
|
42
|
+
Returns:
|
|
43
|
+
Dictionary with "api_key" if user provides credentials, otherwise
|
|
44
|
+
None if user declines to enter credentials.
|
|
45
|
+
"""
|
|
46
|
+
ask = self._confirm_ask_for_creds()
|
|
47
|
+
if ask:
|
|
48
|
+
print(" Please enter your OpenAI API Key:")
|
|
49
|
+
api_key = input(" > ")
|
|
50
|
+
return {"api_key": api_key}
|
|
51
|
+
return None
|
|
52
|
+
|
|
53
|
+
def validate_creds(self, creds: dict[str, str]) -> None:
|
|
54
|
+
"""Validate OpenAI API key format.
|
|
55
|
+
|
|
56
|
+
Args:
|
|
57
|
+
creds: Dictionary containing "api_key" to validate.
|
|
58
|
+
|
|
59
|
+
Raises:
|
|
60
|
+
ValueError: If the API key format is invalid (wrong prefix, invalid
|
|
61
|
+
characters, appears incomplete, or is an Anthropic key).
|
|
62
|
+
"""
|
|
63
|
+
api_key = creds["api_key"]
|
|
64
|
+
|
|
65
|
+
pattern = r"^sk-[A-Za-z0-9_-]+$"
|
|
66
|
+
|
|
67
|
+
if not api_key.startswith("sk-"):
|
|
68
|
+
raise ValueError(
|
|
69
|
+
"Invalid API key format. OpenAI API keys should start with 'sk-'\n"
|
|
70
|
+
"Please ensure you've copied the complete API key from the OpenAI Platform."
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
if api_key.startswith("sk-ant-"):
|
|
74
|
+
raise ValueError(
|
|
75
|
+
"This appears to be an Anthropic API key. OpenAI keys start with 'sk-' but not 'sk-ant-'\n"
|
|
76
|
+
"Please ensure you're using an OpenAI API key from platform.openai.com."
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
if not re.match(pattern, api_key):
|
|
80
|
+
raise ValueError(
|
|
81
|
+
"Invalid API key format. Key contains invalid characters.\n"
|
|
82
|
+
"Please ensure you've copied the complete API key from the OpenAI Platform."
|
|
83
|
+
)
|
|
84
|
+
|
|
85
|
+
if len(api_key) < 40:
|
|
86
|
+
raise ValueError(
|
|
87
|
+
"API key appears too short. Please ensure you've copied the complete key."
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
def fetch_models(self, creds_map: CredentialsMap) -> ModelRegistry:
|
|
91
|
+
"""Fetch available chat models from OpenAI API.
|
|
92
|
+
|
|
93
|
+
Retrieves the list of available models and filters for just language models.
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
creds_map: Credentials map containing OpenAI provider credentials.
|
|
97
|
+
|
|
98
|
+
Returns:
|
|
99
|
+
ModelRegistry containing LLMConfig entries for all available
|
|
100
|
+
OpenAI chat models.
|
|
101
|
+
"""
|
|
102
|
+
creds = {
|
|
103
|
+
k: DevKeyring.get(f"providers.{v}")
|
|
104
|
+
for k, v in creds_map.providers["openai"].items()
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
client = OpenAI(**creds)
|
|
108
|
+
|
|
109
|
+
# OpenAI returns all model types - filter to chat models only
|
|
110
|
+
model_list = client.models.list()
|
|
111
|
+
|
|
112
|
+
# Filter to GPT chat models and reasoning models
|
|
113
|
+
chat_models = [
|
|
114
|
+
m
|
|
115
|
+
for m in model_list.data
|
|
116
|
+
if m.id.startswith(("gpt-5", "gpt-4", "gpt-3", "o1-", "o3-"))
|
|
117
|
+
]
|
|
118
|
+
|
|
119
|
+
def _make_model_cfg(model: Model) -> LLMConfig:
|
|
120
|
+
# OpenAI uses the model ID as display name
|
|
121
|
+
model_id = model.id.lower().replace(" ", "-").replace(".", "-")
|
|
122
|
+
return LLMConfig(
|
|
123
|
+
id=f"openai.{model_id}".lower(),
|
|
124
|
+
display_name=model.id,
|
|
125
|
+
credentials="openai",
|
|
126
|
+
provider="openai",
|
|
127
|
+
interface="openai-sdk",
|
|
128
|
+
model_id=model.id,
|
|
129
|
+
)
|
|
130
|
+
|
|
131
|
+
model_configs = [_make_model_cfg(model) for model in chat_models]
|
|
132
|
+
names = " \n * ".join(m.display_name for m in model_configs)
|
|
133
|
+
print(f" Adding {len(model_configs)} LLMs from OpenAI:\n * {names}")
|
|
134
|
+
|
|
135
|
+
return ModelRegistry(
|
|
136
|
+
llm={m.id: m for m in model_configs},
|
|
137
|
+
)
|
yera/config2/write.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
"""Module for writing yera config toml."""
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
from typing import Literal
|
|
5
|
+
|
|
6
|
+
import tomli_w
|
|
7
|
+
|
|
8
|
+
from yera.config2.dataclasses import YeraConfig
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def _dump_config_object_to_dict(obj: YeraConfig) -> dict:
|
|
12
|
+
"""Un-flatten a YeraConfig object ready for serialisation.
|
|
13
|
+
|
|
14
|
+
Args:
|
|
15
|
+
obj: YeraConfig object to un-flatten
|
|
16
|
+
|
|
17
|
+
Returns:
|
|
18
|
+
Dictionary with nested model structure suitable for TOML
|
|
19
|
+
|
|
20
|
+
"""
|
|
21
|
+
yera_config_dict = obj.model_dump(exclude_unset=True, exclude_none=True)
|
|
22
|
+
|
|
23
|
+
models_categories = {}
|
|
24
|
+
|
|
25
|
+
for model_type, models in yera_config_dict["models"].items():
|
|
26
|
+
nested = {}
|
|
27
|
+
|
|
28
|
+
for model_config in models.values():
|
|
29
|
+
*parts, name = model_config.pop("id").split(".")
|
|
30
|
+
head = nested
|
|
31
|
+
for part in parts:
|
|
32
|
+
if part not in head:
|
|
33
|
+
head[part] = {}
|
|
34
|
+
head = head[part]
|
|
35
|
+
head[name] = model_config
|
|
36
|
+
|
|
37
|
+
models_categories[model_type] = nested
|
|
38
|
+
|
|
39
|
+
yera_config_dict["models"] = models_categories
|
|
40
|
+
|
|
41
|
+
return yera_config_dict
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def write_config(
|
|
45
|
+
yera_config: YeraConfig,
|
|
46
|
+
location: Literal["global", "local"],
|
|
47
|
+
exists_ok: bool,
|
|
48
|
+
) -> None:
|
|
49
|
+
"""Write a YeraConfig object to a yera.toml file.
|
|
50
|
+
|
|
51
|
+
Args:
|
|
52
|
+
yera_config: The configuration object to serialize and write.
|
|
53
|
+
location: Where to write the config file. 'global' writes to
|
|
54
|
+
~/.config/yera/yera.toml, 'local' writes to ./yera.toml
|
|
55
|
+
in the current working directory.
|
|
56
|
+
exists_ok: If False, raises FileExistsError when a yera.toml
|
|
57
|
+
already exists at the target location. If True, overwrites
|
|
58
|
+
existing files.
|
|
59
|
+
|
|
60
|
+
Raises:
|
|
61
|
+
ValueError: If location is not 'global' or 'local'.
|
|
62
|
+
FileExistsError: If a yera.toml already exists at the target
|
|
63
|
+
path and exists_ok is False.
|
|
64
|
+
|
|
65
|
+
Example:
|
|
66
|
+
>>> config = YeraConfig(...)
|
|
67
|
+
>>> write_config(config, location="local", exists_ok=False)
|
|
68
|
+
# Creates ./yera.toml in current directory
|
|
69
|
+
|
|
70
|
+
"""
|
|
71
|
+
if location == "global":
|
|
72
|
+
path = Path.home() / ".config" / "yera"
|
|
73
|
+
elif location == "local":
|
|
74
|
+
path = Path.cwd()
|
|
75
|
+
else:
|
|
76
|
+
raise ValueError(f"Invalid location: {location}")
|
|
77
|
+
|
|
78
|
+
path = path / "yera.toml"
|
|
79
|
+
|
|
80
|
+
if path.exists() and not exists_ok:
|
|
81
|
+
raise FileExistsError(f"A yera.toml file already exists at {path}")
|
|
82
|
+
|
|
83
|
+
path.parent.mkdir(parents=True, exist_ok=True)
|
|
84
|
+
|
|
85
|
+
config_dict = _dump_config_object_to_dict(yera_config)
|
|
86
|
+
toml_content = tomli_w.dumps(config_dict)
|
|
87
|
+
path.write_text(toml_content, encoding="utf-8")
|
yera/dsl/__init__.py
ADDED
|
File without changes
|
yera/dsl/functions.py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
1
|
+
from datetime import date
|
|
2
|
+
|
|
3
|
+
from yera.events import (
|
|
4
|
+
request_input_buttons,
|
|
5
|
+
request_input_date_picker,
|
|
6
|
+
request_input_slider,
|
|
7
|
+
request_input_text,
|
|
8
|
+
)
|
|
9
|
+
from yera.events.blocks.base.layout import _current_layout
|
|
10
|
+
from yera.events.stream import await_input
|
|
11
|
+
from yera.models.llm_context import llm_context
|
|
12
|
+
from yera.models.llm_interfaces.base import TBaseModel
|
|
13
|
+
from yera.opaque import opaque
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
@opaque
|
|
17
|
+
def sys_prompt(prompt: str):
|
|
18
|
+
llm = llm_context()
|
|
19
|
+
llm.add_sys_line(prompt)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
@opaque
|
|
23
|
+
def chat(prompt: str, **kwargs) -> str:
|
|
24
|
+
llm = llm_context()
|
|
25
|
+
return llm.prompt_chat(prompt, **kwargs)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
@opaque
|
|
29
|
+
def struct(prompt: str, cls: type[TBaseModel], **kwargs) -> TBaseModel:
|
|
30
|
+
llm = llm_context()
|
|
31
|
+
return llm.prompt_struct(prompt, cls, **kwargs)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
@opaque
|
|
35
|
+
def text_input(message: str | None = None) -> str:
|
|
36
|
+
layout_ctx = _current_layout.get()
|
|
37
|
+
if layout_ctx is not None:
|
|
38
|
+
request_input_text(message)
|
|
39
|
+
layout_ctx.input_count += 1
|
|
40
|
+
layout_ctx.result_parsers.append(lambda x: x)
|
|
41
|
+
return ""
|
|
42
|
+
request_input_text(message)
|
|
43
|
+
input_event = await_input()
|
|
44
|
+
return input_event.data
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
@opaque
|
|
48
|
+
def buttons(options: list[str], label: str | None = None) -> str:
|
|
49
|
+
layout_ctx = _current_layout.get()
|
|
50
|
+
if layout_ctx is not None:
|
|
51
|
+
request_input_buttons(options, label)
|
|
52
|
+
layout_ctx.input_count += 1
|
|
53
|
+
layout_ctx.result_parsers.append(lambda x: x)
|
|
54
|
+
return ""
|
|
55
|
+
request_input_buttons(options, label)
|
|
56
|
+
input_event = await_input()
|
|
57
|
+
return input_event.data
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
@opaque
|
|
61
|
+
def date_picker(
|
|
62
|
+
label: str,
|
|
63
|
+
default_date: date | str | None = None,
|
|
64
|
+
) -> date:
|
|
65
|
+
layout_ctx = _current_layout.get()
|
|
66
|
+
if layout_ctx is not None:
|
|
67
|
+
request_input_date_picker(label, default_date)
|
|
68
|
+
layout_ctx.input_count += 1
|
|
69
|
+
layout_ctx.result_parsers.append(lambda x: date.fromisoformat(x))
|
|
70
|
+
return date.today()
|
|
71
|
+
request_input_date_picker(label, default_date)
|
|
72
|
+
input_event = await_input()
|
|
73
|
+
# Parse the string date from the input event
|
|
74
|
+
date_str = input_event.data
|
|
75
|
+
return date.fromisoformat(date_str)
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
@opaque
|
|
79
|
+
def slider(
|
|
80
|
+
min_value: float,
|
|
81
|
+
max_value: float,
|
|
82
|
+
label: str,
|
|
83
|
+
default_value: float | None = None,
|
|
84
|
+
) -> float:
|
|
85
|
+
layout_ctx = _current_layout.get()
|
|
86
|
+
if layout_ctx is not None:
|
|
87
|
+
request_input_slider(min_value, max_value, label, default_value)
|
|
88
|
+
layout_ctx.input_count += 1
|
|
89
|
+
layout_ctx.result_parsers.append(lambda x: float(x))
|
|
90
|
+
return 0.0
|
|
91
|
+
request_input_slider(min_value, max_value, label, default_value)
|
|
92
|
+
input_event = await_input()
|
|
93
|
+
# Parse the float from the input event
|
|
94
|
+
return float(input_event.data)
|
yera/dsl/struct.py
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
from typing import Self, TypeVar
|
|
2
|
+
|
|
3
|
+
from pydantic import BaseModel as PydanticBaseModel
|
|
4
|
+
from pydantic import ConfigDict
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class Struct(PydanticBaseModel):
|
|
8
|
+
model_config = ConfigDict(extra="forbid")
|
|
9
|
+
|
|
10
|
+
@classmethod
|
|
11
|
+
def fill(cls, prompt: str, **kwargs) -> Self:
|
|
12
|
+
from .functions import struct
|
|
13
|
+
|
|
14
|
+
return struct(prompt, cls, **kwargs)
|
|
15
|
+
|
|
16
|
+
def __hash__(self) -> int:
|
|
17
|
+
return hash(repr(self))
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
TBaseModel = TypeVar("TBaseModel", bound=Struct)
|
yera/dsl/workspace.py
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
1
|
+
from typing import Any
|
|
2
|
+
|
|
3
|
+
from yera.models.llm_context import llm_context
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class Workspace:
|
|
7
|
+
"""Workspace for sharing data between agents within the same LLM context.
|
|
8
|
+
|
|
9
|
+
Provides dict-like API for accessing workspace variables:
|
|
10
|
+
- Use `workspace[key]` to access required keys (raises KeyError if missing)
|
|
11
|
+
- Use `workspace.get(key, default)` for optional keys (returns default if missing)
|
|
12
|
+
- Use `key in workspace` to check if a key exists
|
|
13
|
+
- Use `workspace[key] = value` or `workspace.set(key, value)` to set values
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
def get(self, key: str, default: Any = None) -> Any:
|
|
17
|
+
"""Get a value from workspace, returning default if key doesn't exist.
|
|
18
|
+
|
|
19
|
+
Args:
|
|
20
|
+
key: The workspace key to retrieve
|
|
21
|
+
default: Value to return if key doesn't exist (default: None)
|
|
22
|
+
|
|
23
|
+
Returns:
|
|
24
|
+
The value associated with key, or default if key doesn't exist
|
|
25
|
+
"""
|
|
26
|
+
llm = llm_context()
|
|
27
|
+
return llm.workspace.variables.get(key, default)
|
|
28
|
+
|
|
29
|
+
def set(self, key: str, value: Any) -> None:
|
|
30
|
+
"""Set a value in workspace.
|
|
31
|
+
|
|
32
|
+
Args:
|
|
33
|
+
key: The workspace key to set
|
|
34
|
+
value: The value to store
|
|
35
|
+
"""
|
|
36
|
+
llm = llm_context()
|
|
37
|
+
llm.workspace.variables[key] = value
|
|
38
|
+
|
|
39
|
+
def __getitem__(self, key: str) -> Any:
|
|
40
|
+
"""Get a value from workspace, raising KeyError if key doesn't exist.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
key: The workspace key to retrieve
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
The value associated with key
|
|
47
|
+
|
|
48
|
+
Raises:
|
|
49
|
+
KeyError: If the key doesn't exist in workspace
|
|
50
|
+
"""
|
|
51
|
+
llm = llm_context()
|
|
52
|
+
if key not in llm.workspace.variables:
|
|
53
|
+
raise KeyError(f"Workspace key '{key}' not found")
|
|
54
|
+
return llm.workspace.variables[key]
|
|
55
|
+
|
|
56
|
+
def __setitem__(self, key: str, value: Any) -> None:
|
|
57
|
+
"""Set a value in workspace.
|
|
58
|
+
|
|
59
|
+
Args:
|
|
60
|
+
key: The workspace key to set
|
|
61
|
+
value: The value to store
|
|
62
|
+
"""
|
|
63
|
+
self.set(key, value)
|
|
64
|
+
|
|
65
|
+
def __contains__(self, key: str) -> bool:
|
|
66
|
+
"""Check if a key exists in workspace.
|
|
67
|
+
|
|
68
|
+
Args:
|
|
69
|
+
key: The workspace key to check
|
|
70
|
+
|
|
71
|
+
Returns:
|
|
72
|
+
True if key exists, False otherwise
|
|
73
|
+
"""
|
|
74
|
+
llm = llm_context()
|
|
75
|
+
return key in llm.workspace.variables
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
workspace = Workspace()
|
|
79
|
+
__all__ = ["workspace"]
|
yera/events/__init__.py
ADDED
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
"""Chat-related helpers for the Yera library.
|
|
2
|
+
|
|
3
|
+
The block construction helpers defined here are also re-exported at the top
|
|
4
|
+
level of the `yera` package so that they can be used either as:
|
|
5
|
+
|
|
6
|
+
import yera
|
|
7
|
+
|
|
8
|
+
yera.events.markdown(...)
|
|
9
|
+
|
|
10
|
+
or:
|
|
11
|
+
|
|
12
|
+
import yera as yr
|
|
13
|
+
|
|
14
|
+
yr.markdown(...)
|
|
15
|
+
"""
|
|
16
|
+
|
|
17
|
+
from __future__ import annotations
|
|
18
|
+
|
|
19
|
+
from yera.events.blocks import (
|
|
20
|
+
action,
|
|
21
|
+
bar_chart,
|
|
22
|
+
columns,
|
|
23
|
+
container,
|
|
24
|
+
exit_event,
|
|
25
|
+
form,
|
|
26
|
+
input_echo,
|
|
27
|
+
line_chart,
|
|
28
|
+
markdown,
|
|
29
|
+
quit_event,
|
|
30
|
+
request_input_buttons,
|
|
31
|
+
request_input_date_picker,
|
|
32
|
+
request_input_slider,
|
|
33
|
+
request_input_text,
|
|
34
|
+
spinner,
|
|
35
|
+
system_prompt,
|
|
36
|
+
table,
|
|
37
|
+
)
|
|
38
|
+
|
|
39
|
+
__all__ = [
|
|
40
|
+
"action",
|
|
41
|
+
"bar_chart",
|
|
42
|
+
"columns",
|
|
43
|
+
"container",
|
|
44
|
+
"exit_event",
|
|
45
|
+
"form",
|
|
46
|
+
"input_echo",
|
|
47
|
+
"line_chart",
|
|
48
|
+
"markdown",
|
|
49
|
+
"quit_event",
|
|
50
|
+
"request_input_buttons",
|
|
51
|
+
"request_input_date_picker",
|
|
52
|
+
"request_input_slider",
|
|
53
|
+
"request_input_text",
|
|
54
|
+
"spinner",
|
|
55
|
+
"system_prompt",
|
|
56
|
+
"table",
|
|
57
|
+
]
|