yera 0.1.1__py3-none-any.whl → 0.2.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (192) hide show
  1. infra_mvp/base_client.py +29 -0
  2. infra_mvp/base_server.py +68 -0
  3. infra_mvp/monitoring/__init__.py +15 -0
  4. infra_mvp/monitoring/metrics.py +185 -0
  5. infra_mvp/stream/README.md +56 -0
  6. infra_mvp/stream/__init__.py +14 -0
  7. infra_mvp/stream/__main__.py +101 -0
  8. infra_mvp/stream/agents/demos/financial/chart_additions_plan.md +170 -0
  9. infra_mvp/stream/agents/demos/financial/portfolio_assistant_stream.json +1571 -0
  10. infra_mvp/stream/agents/reference/blocks/action.json +170 -0
  11. infra_mvp/stream/agents/reference/blocks/button.json +66 -0
  12. infra_mvp/stream/agents/reference/blocks/date.json +65 -0
  13. infra_mvp/stream/agents/reference/blocks/input_prompt.json +94 -0
  14. infra_mvp/stream/agents/reference/blocks/layout.json +288 -0
  15. infra_mvp/stream/agents/reference/blocks/markdown.json +344 -0
  16. infra_mvp/stream/agents/reference/blocks/slider.json +67 -0
  17. infra_mvp/stream/agents/reference/blocks/spinner.json +110 -0
  18. infra_mvp/stream/agents/reference/blocks/table.json +56 -0
  19. infra_mvp/stream/agents/reference/chat_dynamics/branching_test_stream.json +145 -0
  20. infra_mvp/stream/app.py +49 -0
  21. infra_mvp/stream/container.py +112 -0
  22. infra_mvp/stream/schemas/__init__.py +16 -0
  23. infra_mvp/stream/schemas/agent.py +24 -0
  24. infra_mvp/stream/schemas/interaction.py +28 -0
  25. infra_mvp/stream/schemas/session.py +30 -0
  26. infra_mvp/stream/server.py +321 -0
  27. infra_mvp/stream/services/__init__.py +12 -0
  28. infra_mvp/stream/services/agent_service.py +40 -0
  29. infra_mvp/stream/services/event_converter.py +83 -0
  30. infra_mvp/stream/services/session_service.py +247 -0
  31. yera/__init__.py +50 -1
  32. yera/agents/__init__.py +2 -0
  33. yera/agents/context.py +41 -0
  34. yera/agents/dataclasses.py +69 -0
  35. yera/agents/decorator.py +207 -0
  36. yera/agents/discovery.py +124 -0
  37. yera/agents/typing/__init__.py +0 -0
  38. yera/agents/typing/coerce.py +408 -0
  39. yera/agents/typing/utils.py +19 -0
  40. yera/agents/typing/validate.py +206 -0
  41. yera/cli.py +377 -0
  42. yera/config/__init__.py +1 -0
  43. yera/config/config_utils.py +164 -0
  44. yera/config/function_config.py +55 -0
  45. yera/config/logging.py +18 -0
  46. yera/config/tool_config.py +8 -0
  47. yera/config2/__init__.py +8 -0
  48. yera/config2/dataclasses.py +534 -0
  49. yera/config2/keyring.py +270 -0
  50. yera/config2/paths.py +28 -0
  51. yera/config2/read.py +113 -0
  52. yera/config2/setup.py +109 -0
  53. yera/config2/setup_handlers/__init__.py +1 -0
  54. yera/config2/setup_handlers/anthropic.py +126 -0
  55. yera/config2/setup_handlers/azure.py +236 -0
  56. yera/config2/setup_handlers/base.py +125 -0
  57. yera/config2/setup_handlers/llama_cpp.py +205 -0
  58. yera/config2/setup_handlers/ollama.py +157 -0
  59. yera/config2/setup_handlers/openai.py +137 -0
  60. yera/config2/write.py +87 -0
  61. yera/dsl/__init__.py +0 -0
  62. yera/dsl/functions.py +94 -0
  63. yera/dsl/struct.py +20 -0
  64. yera/dsl/workspace.py +79 -0
  65. yera/events/__init__.py +57 -0
  66. yera/events/blocks/__init__.py +68 -0
  67. yera/events/blocks/action.py +57 -0
  68. yera/events/blocks/bar_chart.py +92 -0
  69. yera/events/blocks/base/__init__.py +20 -0
  70. yera/events/blocks/base/base.py +166 -0
  71. yera/events/blocks/base/chart.py +288 -0
  72. yera/events/blocks/base/layout.py +111 -0
  73. yera/events/blocks/buttons.py +37 -0
  74. yera/events/blocks/columns.py +26 -0
  75. yera/events/blocks/container.py +24 -0
  76. yera/events/blocks/date_picker.py +50 -0
  77. yera/events/blocks/exit.py +39 -0
  78. yera/events/blocks/form.py +24 -0
  79. yera/events/blocks/input_echo.py +22 -0
  80. yera/events/blocks/input_request.py +31 -0
  81. yera/events/blocks/line_chart.py +97 -0
  82. yera/events/blocks/markdown.py +67 -0
  83. yera/events/blocks/slider.py +54 -0
  84. yera/events/blocks/spinner.py +55 -0
  85. yera/events/blocks/system_prompt.py +22 -0
  86. yera/events/blocks/table.py +291 -0
  87. yera/events/models/__init__.py +39 -0
  88. yera/events/models/block_data.py +112 -0
  89. yera/events/models/in_event.py +7 -0
  90. yera/events/models/out_event.py +75 -0
  91. yera/events/runtime.py +187 -0
  92. yera/events/stream.py +91 -0
  93. yera/models/__init__.py +0 -0
  94. yera/models/data_classes.py +20 -0
  95. yera/models/llm_atlas_proxy.py +44 -0
  96. yera/models/llm_context.py +99 -0
  97. yera/models/llm_interfaces/__init__.py +0 -0
  98. yera/models/llm_interfaces/anthropic.py +153 -0
  99. yera/models/llm_interfaces/aws_bedrock.py +14 -0
  100. yera/models/llm_interfaces/azure_openai.py +143 -0
  101. yera/models/llm_interfaces/base.py +26 -0
  102. yera/models/llm_interfaces/interface_registry.py +74 -0
  103. yera/models/llm_interfaces/llama_cpp.py +136 -0
  104. yera/models/llm_interfaces/mock.py +29 -0
  105. yera/models/llm_interfaces/ollama_interface.py +118 -0
  106. yera/models/llm_interfaces/open_ai.py +150 -0
  107. yera/models/llm_workspace.py +19 -0
  108. yera/models/model_atlas.py +139 -0
  109. yera/models/model_definition.py +38 -0
  110. yera/models/model_factory.py +33 -0
  111. yera/opaque/__init__.py +9 -0
  112. yera/opaque/base.py +20 -0
  113. yera/opaque/decorator.py +8 -0
  114. yera/opaque/markdown.py +57 -0
  115. yera/opaque/opaque_function.py +25 -0
  116. yera/tools/__init__.py +29 -0
  117. yera/tools/atlas_tool.py +20 -0
  118. yera/tools/base.py +24 -0
  119. yera/tools/decorated_tool.py +18 -0
  120. yera/tools/decorator.py +35 -0
  121. yera/tools/tool_atlas.py +51 -0
  122. yera/tools/tool_utils.py +361 -0
  123. yera/ui/dist/404.html +1 -0
  124. yera/ui/dist/__next.__PAGE__.txt +10 -0
  125. yera/ui/dist/__next._full.txt +23 -0
  126. yera/ui/dist/__next._head.txt +6 -0
  127. yera/ui/dist/__next._index.txt +5 -0
  128. yera/ui/dist/__next._tree.txt +7 -0
  129. yera/ui/dist/_next/static/T8WGYqDMoHDKKoHj0O3HK/_buildManifest.js +11 -0
  130. yera/ui/dist/_next/static/T8WGYqDMoHDKKoHj0O3HK/_clientMiddlewareManifest.json +1 -0
  131. yera/ui/dist/_next/static/T8WGYqDMoHDKKoHj0O3HK/_ssgManifest.js +1 -0
  132. yera/ui/dist/_next/static/chunks/4c4688e1ff21ad98.js +1 -0
  133. yera/ui/dist/_next/static/chunks/652cd53c27924d50.js +4 -0
  134. yera/ui/dist/_next/static/chunks/786d2107b51e8499.css +1 -0
  135. yera/ui/dist/_next/static/chunks/7de9141b1af425c3.js +1 -0
  136. yera/ui/dist/_next/static/chunks/87ef65064d3524c1.js +2 -0
  137. yera/ui/dist/_next/static/chunks/a6dad97d9634a72d.js +1 -0
  138. yera/ui/dist/_next/static/chunks/a6dad97d9634a72d.js.map +1 -0
  139. yera/ui/dist/_next/static/chunks/c4c79d5d0b280aeb.js +1 -0
  140. yera/ui/dist/_next/static/chunks/dc2d2a247505d66f.css +5 -0
  141. yera/ui/dist/_next/static/chunks/f773f714b55ec620.js +37 -0
  142. yera/ui/dist/_next/static/chunks/turbopack-98b3031e1b1dbc33.js +4 -0
  143. yera/ui/dist/_next/static/media/14e23f9b59180572-s.9c448f3c.woff2 +0 -0
  144. yera/ui/dist/_next/static/media/2a65768255d6b625-s.p.d19752fb.woff2 +0 -0
  145. yera/ui/dist/_next/static/media/2b2eb4836d2dad95-s.f36de3af.woff2 +0 -0
  146. yera/ui/dist/_next/static/media/31183d9fd602dc89-s.c4ff9b73.woff2 +0 -0
  147. yera/ui/dist/_next/static/media/3fcb63a1ac6a562e-s.2f77a576.woff2 +0 -0
  148. yera/ui/dist/_next/static/media/45ec8de98929b0f6-s.81056204.woff2 +0 -0
  149. yera/ui/dist/_next/static/media/4fa387ec64143e14-s.c1fdd6c2.woff2 +0 -0
  150. yera/ui/dist/_next/static/media/65c558afe41e89d6-s.e2c8389a.woff2 +0 -0
  151. yera/ui/dist/_next/static/media/67add6cc0f54b8cf-s.8ce53448.woff2 +0 -0
  152. yera/ui/dist/_next/static/media/7178b3e590c64307-s.b97b3418.woff2 +0 -0
  153. yera/ui/dist/_next/static/media/797e433ab948586e-s.p.dbea232f.woff2 +0 -0
  154. yera/ui/dist/_next/static/media/8a480f0b521d4e75-s.8e0177b5.woff2 +0 -0
  155. yera/ui/dist/_next/static/media/a8ff2d5d0ccb0d12-s.fc5b72a7.woff2 +0 -0
  156. yera/ui/dist/_next/static/media/aae5f0be330e13db-s.p.853e26d6.woff2 +0 -0
  157. yera/ui/dist/_next/static/media/b11a6ccf4a3edec7-s.2113d282.woff2 +0 -0
  158. yera/ui/dist/_next/static/media/b49b0d9b851e4899-s.4f3fa681.woff2 +0 -0
  159. yera/ui/dist/_next/static/media/bbc41e54d2fcbd21-s.799d8ef8.woff2 +0 -0
  160. yera/ui/dist/_next/static/media/caa3a2e1cccd8315-s.p.853070df.woff2 +0 -0
  161. yera/ui/dist/_next/static/media/favicon.0b3bf435.ico +0 -0
  162. yera/ui/dist/_not-found/__next._full.txt +14 -0
  163. yera/ui/dist/_not-found/__next._head.txt +6 -0
  164. yera/ui/dist/_not-found/__next._index.txt +5 -0
  165. yera/ui/dist/_not-found/__next._not-found.__PAGE__.txt +5 -0
  166. yera/ui/dist/_not-found/__next._not-found.txt +4 -0
  167. yera/ui/dist/_not-found/__next._tree.txt +2 -0
  168. yera/ui/dist/_not-found.html +1 -0
  169. yera/ui/dist/_not-found.txt +14 -0
  170. yera/ui/dist/agent-icon.svg +3 -0
  171. yera/ui/dist/favicon.ico +0 -0
  172. yera/ui/dist/file.svg +1 -0
  173. yera/ui/dist/globe.svg +1 -0
  174. yera/ui/dist/index.html +1 -0
  175. yera/ui/dist/index.txt +23 -0
  176. yera/ui/dist/logo/full_logo.png +0 -0
  177. yera/ui/dist/logo/rune_logo.png +0 -0
  178. yera/ui/dist/logo/rune_logo_borderless.png +0 -0
  179. yera/ui/dist/logo/text_logo.png +0 -0
  180. yera/ui/dist/next.svg +1 -0
  181. yera/ui/dist/send.png +0 -0
  182. yera/ui/dist/send_single.png +0 -0
  183. yera/ui/dist/vercel.svg +1 -0
  184. yera/ui/dist/window.svg +1 -0
  185. yera/utils/__init__.py +1 -0
  186. yera/utils/path_utils.py +38 -0
  187. yera-0.2.1.dist-info/METADATA +65 -0
  188. yera-0.2.1.dist-info/RECORD +190 -0
  189. {yera-0.1.1.dist-info → yera-0.2.1.dist-info}/WHEEL +1 -1
  190. yera-0.2.1.dist-info/entry_points.txt +2 -0
  191. yera-0.1.1.dist-info/METADATA +0 -11
  192. yera-0.1.1.dist-info/RECORD +0 -4
yera/events/runtime.py ADDED
@@ -0,0 +1,187 @@
1
+ import multiprocessing as mp
2
+ import shutil
3
+ import string
4
+ from time import sleep
5
+
6
+ from tabulate import tabulate
7
+ from termcolor import colored
8
+
9
+ from yera.events.models import TableData
10
+ from yera.events.stream import EventStream, OutputEvent, push_text_input
11
+
12
+
13
+ def pretty_print_table(data: TableData):
14
+ print(tabulate(data.rows, headers=data.columns, tablefmt="fancy_grid"))
15
+
16
+
17
+ class PyRuntimeExecutor:
18
+ """Execute an agent in a separate process using an EventStream."""
19
+
20
+ def __init__(self, agent, args):
21
+ """Initialise the runtime executor.
22
+
23
+ Args:
24
+ agent: The agent to execute.
25
+ args: Positional arguments to pass to the agent's ``invoke`` method.
26
+ """
27
+ self.process: mp.Process | None = None
28
+ self.agent = agent
29
+ self.args = args
30
+ self.stream: EventStream | None = None
31
+
32
+ @staticmethod
33
+ def _agent_process(agent, stream: EventStream, *args):
34
+ """Target function for the subprocess.
35
+
36
+ Sets the current EventStream, marks the agent as top-level, and invokes it.
37
+ """
38
+ stream.set_current()
39
+ agent.top_level = True
40
+ agent.invoke(*args)
41
+
42
+ def start(self) -> None:
43
+ """Start the agent execution in a new process with an internally created EventStream."""
44
+ if self.process is not None and self.process.is_alive():
45
+ # Already running; no-op
46
+ return
47
+
48
+ ctx = mp.get_context("spawn")
49
+ self.stream = EventStream.build()
50
+ self.process = ctx.Process(
51
+ target=self._agent_process,
52
+ args=(self.agent, self.stream, *self.args),
53
+ )
54
+ self.process.start()
55
+
56
+ def stop(self) -> None:
57
+ """Stop the agent process if it is running and clean up resources."""
58
+ # Capture process locally to avoid race conditions
59
+ process = self.process
60
+ if process is None:
61
+ return
62
+
63
+ process.join(timeout=2)
64
+ if process.is_alive():
65
+ process.terminate()
66
+ process.join()
67
+
68
+ self.process = None
69
+ self.stream = None
70
+
71
+ def is_running(self) -> bool:
72
+ """Check if the executor process is running.
73
+
74
+ Returns:
75
+ True if process is alive and stream is available
76
+ """
77
+ return (
78
+ self.process is not None
79
+ and self.process.is_alive()
80
+ and self.stream is not None
81
+ )
82
+
83
+ def __enter__(self):
84
+ """Start execution with a fresh EventStream and return self."""
85
+ self.start()
86
+ return self
87
+
88
+ def __exit__(self, exc_type, exc_val, exc_tb):
89
+ """Ensure the subprocess is terminated when leaving the context."""
90
+ self.stop()
91
+ return False
92
+
93
+
94
+ class TokenWrapper:
95
+ def __init__(self, max_width=100):
96
+ terminal_width = shutil.get_terminal_size().columns
97
+ self.width = min(max_width, terminal_width) # Cap at max_width
98
+ self.current_line_length = 0
99
+
100
+ def print_token(self, token):
101
+ # Handle newlines in token
102
+ if "\n" in token:
103
+ print(token, end="", flush=True)
104
+ self.current_line_length = len(token.split("\n")[-1])
105
+ return
106
+
107
+ token_length = len(token)
108
+
109
+ # Check if token is only punctuation
110
+ is_punctuation = token.strip() and all(
111
+ c in string.punctuation for c in token.strip()
112
+ )
113
+
114
+ # Would this token overflow the line?
115
+ longer = self.current_line_length + token_length > self.width
116
+ if longer and not is_punctuation and self.current_line_length > 0:
117
+ # Break to new line
118
+ print("\n", end="", flush=True)
119
+ token = token.lstrip(" ") # Remove leading spaces
120
+ token_length = len(token)
121
+ self.current_line_length = 0
122
+
123
+ print(token, end="", flush=True)
124
+ self.current_line_length += token_length
125
+
126
+ def reset(self, new_lines: int = 1):
127
+ """Reset line counter (call when starting new paragraph/section)"""
128
+ self.current_line_length = 0
129
+ print("\n" * (new_lines - 1))
130
+
131
+
132
+ def stream_handler(metadata) -> OutputEvent:
133
+ name = colored(metadata.name, "cyan")
134
+ print(f"Starting {name}:")
135
+ print(metadata.pretty_print())
136
+ stream = EventStream.build()
137
+
138
+ exit_event = None
139
+
140
+ current_agent_id = None
141
+ current_block_id = None
142
+
143
+ wrapper = TokenWrapper(max_width=100) # Cap at 100 columns
144
+
145
+ while exit_event is None:
146
+ for event in stream.iter_output():
147
+ if event.block_type == "exit":
148
+ exit_event = event
149
+ break
150
+
151
+ is_new_agent = event.agent_instance.agent_id != current_agent_id
152
+ if is_new_agent:
153
+ wrapper.reset(new_lines=2)
154
+ label = colored(event.agent_instance.agent_id.split(".")[-1], "yellow")
155
+ print(f"[{label}]", end="")
156
+
157
+ is_new_block = event.block_id != current_block_id
158
+ if is_new_block:
159
+ wrapper.reset()
160
+ label = colored(event.block_type.upper(), "green")
161
+ print(f"[{label}]")
162
+
163
+ # current_block_type = event.block_type
164
+ current_agent_id = event.agent_instance.agent_id
165
+ current_block_id = event.block_id
166
+
167
+ if event.block_type == "input_request":
168
+ print()
169
+ res = input(event.data.message)
170
+ push_text_input(res)
171
+
172
+ elif event.block_type in ["input_prompt", "system_prompt", "markdown"]:
173
+ wrapper.print_token(event.data.content)
174
+ elif event.block_type == "table":
175
+ pretty_print_table(event.data)
176
+ else:
177
+ print(event.data.model_dump_json(indent=2))
178
+
179
+ sleep(0.1)
180
+
181
+ exit_data = exit_event.data
182
+ colour = "cyan" if exit_data.exit_code == 0 else "red"
183
+ exit_msg = colored(f"Yera exit code {exit_data.exit_code}".upper(), colour)
184
+ print(f"\n\n[{exit_msg}]")
185
+ print(exit_data.model_dump_json(indent=2))
186
+ list(stream.iter_output())
187
+ return exit_event
yera/events/stream.py ADDED
@@ -0,0 +1,91 @@
1
+ import multiprocessing as mp
2
+ from collections.abc import Iterator
3
+ from contextvars import ContextVar
4
+ from typing import Optional
5
+
6
+ from .models.in_event import InputEvent
7
+ from .models.out_event import OutputEvent
8
+
9
+ _event_stream_context: ContextVar[Optional["EventStream"]] = ContextVar(
10
+ "event_stream", default=None
11
+ )
12
+
13
+
14
+ def push_output(event: OutputEvent):
15
+ stream = EventStream.get_current()
16
+ stream.push_output(event)
17
+
18
+
19
+ def await_input(timeout=None) -> InputEvent:
20
+ stream = EventStream.get_current()
21
+ return stream.pop_input(timeout)
22
+
23
+
24
+ def push_input(in_event: InputEvent):
25
+ stream = EventStream.get_current()
26
+ stream.push_input(in_event)
27
+
28
+
29
+ def push_text_input(text: str):
30
+ in_event = InputEvent(identifier="ABC", type="text", data=text)
31
+ push_input(in_event)
32
+
33
+
34
+ class EventStream:
35
+ def __init__(self, out_event_queue, in_event_queue):
36
+ self.out_event_queue = out_event_queue
37
+ self.in_event_queue = in_event_queue
38
+ super().__init__()
39
+
40
+ def push_output(self, event: OutputEvent) -> None:
41
+ if not isinstance(event, OutputEvent):
42
+ raise TypeError(f"Expected OutputEvent got {type(event).__name__}")
43
+ self.out_event_queue.put(event)
44
+
45
+ def push_input(self, in_event: InputEvent) -> None:
46
+ if not isinstance(in_event, InputEvent):
47
+ raise TypeError(f"Expected InputEvent got {type(in_event).__name__}")
48
+ self.in_event_queue.put(in_event)
49
+
50
+ def pop_output(self, timeout: float | None = None) -> OutputEvent:
51
+ return self.out_event_queue.get(timeout=timeout)
52
+
53
+ def pop_input(self, timeout: float | None = None) -> InputEvent:
54
+ return self.in_event_queue.get(timeout=timeout)
55
+
56
+ def has_output(self) -> bool:
57
+ return not self.out_event_queue.empty()
58
+
59
+ def has_input(self) -> bool:
60
+ return not self.in_event_queue.empty()
61
+
62
+ def iter_output(self) -> Iterator[OutputEvent]:
63
+ while self.has_output():
64
+ yield self.pop_output()
65
+
66
+ def set_current(self):
67
+ if _event_stream_context.get() is not None:
68
+ raise RuntimeError("Event stream is already set in context.")
69
+ _event_stream_context.set(self)
70
+
71
+ @classmethod
72
+ def get_current(cls):
73
+ reg = _event_stream_context.get()
74
+ if reg is None:
75
+ raise RuntimeError(
76
+ "No event stream has been set in current context. "
77
+ "Call EventStream's set_current method first."
78
+ )
79
+ return reg
80
+
81
+ @staticmethod
82
+ def build() -> "EventStream":
83
+ reg = _event_stream_context.get()
84
+ if reg is not None:
85
+ return reg
86
+
87
+ ctx = mp.get_context("spawn")
88
+ manager = ctx.Manager()
89
+ stream = EventStream(manager.Queue(), manager.Queue())
90
+ stream.set_current()
91
+ return stream
File without changes
@@ -0,0 +1,20 @@
1
+ from typing import Literal
2
+
3
+ from pydantic import BaseModel
4
+
5
+
6
+ class Message(BaseModel):
7
+ role: Literal["user", "system", "assistant"]
8
+ content: dict | str
9
+
10
+ @staticmethod
11
+ def user(content: str) -> "Message":
12
+ return Message(role="user", content=content)
13
+
14
+ @staticmethod
15
+ def system(content: str) -> "Message":
16
+ return Message(role="system", content=content)
17
+
18
+ @staticmethod
19
+ def assistant(content: str) -> "Message":
20
+ return Message(role="assistant", content=content)
@@ -0,0 +1,44 @@
1
+ from yera.config2.paths import has_global_yera_toml, has_local_yera_toml
2
+ from yera.models.model_atlas import ModelAtlas
3
+
4
+
5
+ class _ModelsProxy:
6
+ _atlas: ModelAtlas | None = None
7
+
8
+ def __init__(self):
9
+ self._atlas = None
10
+
11
+ def _ensure_atlas(self) -> ModelAtlas:
12
+ from ..config2.read import read_config
13
+
14
+ if has_global_yera_toml() or has_local_yera_toml():
15
+ yera_config = read_config()
16
+ self._atlas = ModelAtlas.from_yera_config(yera_config)
17
+ else:
18
+ self._atlas = ModelAtlas()
19
+ return self._atlas
20
+
21
+ def __getattr__(self, name):
22
+ return getattr(self._ensure_atlas(), name)
23
+
24
+ def __getitem__(self, key):
25
+ return self._ensure_atlas()[key]
26
+
27
+ def __dir__(self):
28
+ """Forward to atlas for tab completion"""
29
+ return dir(self._ensure_atlas())
30
+
31
+ def _ipython_key_completions_(self):
32
+ """Forward to atlas for IPython/Jupyter completion"""
33
+ return self._ensure_atlas()._ipython_key_completions_()
34
+
35
+ def list_models(self):
36
+ """Forward the list_models method"""
37
+ return self._ensure_atlas().list_models()
38
+
39
+ def _repr_html_(self):
40
+ """Forward HTML representation for Jupyter notebooks"""
41
+ return self._ensure_atlas()._repr_html_()
42
+
43
+
44
+ llm: ModelAtlas = _ModelsProxy()
@@ -0,0 +1,99 @@
1
+ from contextvars import ContextVar
2
+ from typing import Optional
3
+
4
+ from yera.events import input_echo, markdown, system_prompt
5
+ from yera.models.llm_interfaces.base import BaseLLMInterface, TBaseModel
6
+ from yera.models.llm_workspace import LLMWorkspace
7
+
8
+ _current_llm_context: ContextVar[Optional["LLMContext"]] = ContextVar(
9
+ "llm_context", default=None
10
+ )
11
+
12
+
13
+ class LLMContext:
14
+ def __init__(
15
+ self,
16
+ *,
17
+ interface: BaseLLMInterface,
18
+ workspace: LLMWorkspace,
19
+ ):
20
+ self.workspace = workspace
21
+ self.interface = interface
22
+ self._token = None
23
+
24
+ def _push_event(self, role, content):
25
+ if role == "user":
26
+ input_echo(content)
27
+ elif role == "system":
28
+ system_prompt(content)
29
+ elif role == "agent":
30
+ markdown(content)
31
+ else:
32
+ raise ValueError(f"Unknown role: {role}")
33
+
34
+ def add_sys_line(self, content):
35
+ self._push_event("system", content)
36
+ self.workspace.add_sys_message(content)
37
+
38
+ def add_user_line(self, content):
39
+ self._push_event("user", content)
40
+ self.workspace.add_user_message(content)
41
+
42
+ def add_assistant_line(self, content):
43
+ self.workspace.add_assistant_message(content)
44
+
45
+ def set(self, key, value):
46
+ self.workspace.variables[key] = value
47
+
48
+ def get(self, key, default=None):
49
+ return self.workspace.variables.get(key, default)
50
+
51
+ def __getitem__(self, key):
52
+ return self.workspace.variables[key]
53
+
54
+ def prompt_chat(self, prompt: str, **kwargs) -> str:
55
+ self.add_user_line(prompt)
56
+ stream = self.interface.chat(self.workspace.messages, **kwargs)
57
+
58
+ tokens = []
59
+ # Use markdown as context manager to create a single stream
60
+ # All tokens will go to the same block with sequential chunk_ids
61
+ with markdown() as md:
62
+ for token in stream:
63
+ md.append(token)
64
+ tokens.append(token)
65
+
66
+ response = "".join(tokens)
67
+ self.add_assistant_line(response)
68
+ return response
69
+
70
+ def prompt_struct(self, prompt: str, cls: type[TBaseModel], **kwargs) -> TBaseModel:
71
+ self.add_user_line(prompt)
72
+ stream = self.interface.make_struct(self.workspace.messages, cls, **kwargs)
73
+
74
+ tokens = []
75
+ # Use markdown as context manager to create a single stream
76
+ # All tokens will go to the same block with sequential chunk_ids
77
+ with markdown() as md:
78
+ for token in stream:
79
+ md.append(token)
80
+ tokens.append(token)
81
+
82
+ response_json = "".join(tokens)
83
+ self.add_assistant_line(response_json)
84
+ return cls.model_validate_json(response_json)
85
+
86
+ def __enter__(self):
87
+ self._token = _current_llm_context.set(self)
88
+ self.interface.start()
89
+
90
+ def __exit__(self, exc_type, exc_val, exc_tb):
91
+ self.interface.stop()
92
+ _current_llm_context.reset(self._token)
93
+
94
+
95
+ def llm_context() -> LLMContext:
96
+ llm = _current_llm_context.get()
97
+ if llm is None:
98
+ raise RuntimeError("No current LLM")
99
+ return llm
File without changes
@@ -0,0 +1,153 @@
1
+ import re
2
+ from collections.abc import Iterator
3
+
4
+ from anthropic import Anthropic, transform_schema
5
+
6
+ from yera.config2.dataclasses import LLMConfig
7
+ from yera.config2.keyring import DevKeyring
8
+ from yera.models.data_classes import Message
9
+ from yera.models.llm_interfaces.base import BaseLLMInterface, TBaseModel
10
+
11
+
12
+ class AnthropicLLM(BaseLLMInterface):
13
+ def __init__(self, config: LLMConfig, overrides, creds_map):
14
+ kws = {**config.inference.model_dump(), **overrides}
15
+ self.client = None
16
+ self.model_id = config.model_id
17
+ self.struct_gen = self._supports_structured_outputs(self.model_id)
18
+ self.creds_map = creds_map
19
+ super().__init__(**kws)
20
+
21
+ @staticmethod
22
+ def _supports_structured_outputs(model_id: str) -> bool:
23
+ pattern = r"claude-(?:sonnet|opus|haiku)-(\d+)-(\d+)"
24
+ match = re.search(pattern, model_id)
25
+
26
+ if not match:
27
+ return False
28
+
29
+ major = int(match.group(1))
30
+ minor = int(match.group(2))
31
+
32
+ # Structured outputs supported on 4.5+
33
+ return (major, minor) >= (4, 5)
34
+
35
+ def start(self):
36
+ api_key = DevKeyring.get("providers." + self.creds_map["api_key"])
37
+ self.client = Anthropic(api_key=api_key)
38
+
39
+ def stop(self):
40
+ self.client = None
41
+
42
+ def chat(
43
+ self, messages: list[Message], *, max_tokens: int = 4096, **anthropic_kw
44
+ ) -> Iterator[str]:
45
+ # todo: claude does not accept system lines, but one a single system param
46
+ # this means sys lines in the middle of the conversation have to be converted
47
+ # into user lines. Content: f"<instruction>\n{msg['content']}\n</instruction>"
48
+
49
+ system_lines = [msg.content for msg in messages if msg.role == "system"]
50
+ messages = [
51
+ {"role": msg.role, "content": msg.content}
52
+ for msg in messages
53
+ if msg.role != "system"
54
+ ]
55
+
56
+ system = "\n\n".join(system_lines) if system_lines else None
57
+
58
+ kws = {
59
+ "model": self.model_id,
60
+ "max_tokens": max_tokens,
61
+ "messages": messages,
62
+ **anthropic_kw,
63
+ "thinking": {"type": "enabled", "budget_tokens": 1024},
64
+ }
65
+ if system:
66
+ kws["system"] = system
67
+
68
+ with self.client.messages.stream(
69
+ **kws,
70
+ ) as stream:
71
+ for event in stream:
72
+ if event.type == "content_block_start":
73
+ if event.content_block.type == "thinking":
74
+ yield "<thinking>"
75
+ elif event.content_block.type == "text":
76
+ yield "\n<thinking>\n\n"
77
+
78
+ elif event.type == "content_block_delta":
79
+ if event.delta.type == "thinking_delta":
80
+ yield event.delta.thinking
81
+ elif event.delta.type == "text_delta":
82
+ yield event.delta.text
83
+
84
+ def make_struct(
85
+ self,
86
+ messages: list[Message],
87
+ cls: type[TBaseModel],
88
+ *,
89
+ max_tokens: int = 4096,
90
+ **anthropic_kw,
91
+ ) -> Iterator[str]:
92
+ system_lines = [msg.content for msg in messages if msg.role == "system"]
93
+ conversation_messages = [
94
+ {"role": msg.role, "content": msg.content}
95
+ for msg in messages
96
+ if msg.role != "system"
97
+ ]
98
+
99
+ system = "\n\n".join(system_lines) if system_lines else None
100
+
101
+ if self.struct_gen:
102
+ kws = {
103
+ "model": self.model_id,
104
+ "max_tokens": max_tokens,
105
+ "messages": conversation_messages,
106
+ "betas": ["structured-outputs-2025-11-13"],
107
+ "output_format": {
108
+ "type": "json_schema",
109
+ "schema": transform_schema(cls),
110
+ },
111
+ **anthropic_kw,
112
+ }
113
+ if system:
114
+ kws["system"] = system
115
+
116
+ with self.client.beta.messages.stream(**kws) as stream:
117
+ yield from stream.text_stream
118
+ else:
119
+ tool_schema = {
120
+ "name": "return_structured_output",
121
+ "description": f"Returns structured data conforming to the {cls.__name__} schema",
122
+ "input_schema": cls.model_json_schema(),
123
+ }
124
+
125
+ kws = {
126
+ "model": self.model_id,
127
+ "max_tokens": max_tokens,
128
+ "messages": conversation_messages,
129
+ "tools": [tool_schema],
130
+ "tool_choice": {"type": "tool", "name": "return_structured_output"},
131
+ **anthropic_kw,
132
+ }
133
+ if system:
134
+ kws["system"] = system
135
+
136
+ tool_use_started = False
137
+
138
+ with self.client.messages.stream(**kws) as stream:
139
+ for event in stream:
140
+ if event.type == "content_block_start":
141
+ if (
142
+ hasattr(event.content_block, "type")
143
+ and event.content_block.type == "tool_use"
144
+ ):
145
+ tool_use_started = True # We got the tool call!
146
+
147
+ elif event.type == "content_block_delta":
148
+ delta = event.delta
149
+ if hasattr(delta, "type") and delta.type == "input_json_delta":
150
+ yield delta.partial_json
151
+
152
+ if not tool_use_started:
153
+ raise ValueError("Model did not call the structured output tool")
@@ -0,0 +1,14 @@
1
+ from collections.abc import Iterator
2
+
3
+ from yera.models.data_classes import Message
4
+ from yera.models.llm_interfaces.base import BaseLLMInterface, TBaseModel
5
+
6
+
7
+ class AwsBedrockLLM(BaseLLMInterface):
8
+ def chat(self, messages: list[Message], **kwargs) -> Iterator[str]:
9
+ pass
10
+
11
+ def make_struct(
12
+ self, prompt: str, cls: type[TBaseModel], **kwargs
13
+ ) -> Iterator[str]:
14
+ pass