pylogue 0.3__py3-none-any.whl → 0.3.30__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pylogue/embeds.py ADDED
@@ -0,0 +1,32 @@
1
+ import secrets
2
+ import time
3
+ from typing import Optional
4
+
5
+ _HTML_CACHE: dict[str, tuple[float, str]] = {}
6
+ _TTL_SECONDS = 60 * 10
7
+
8
+
9
+ def _purge_expired(now: float) -> None:
10
+ expired = [key for key, (ts, _) in _HTML_CACHE.items() if now - ts > _TTL_SECONDS]
11
+ for key in expired:
12
+ _HTML_CACHE.pop(key, None)
13
+
14
+
15
+ def store_html(html: str) -> str:
16
+ """Store HTML and return a short-lived token."""
17
+ now = time.time()
18
+ _purge_expired(now)
19
+ token = secrets.token_urlsafe(16)
20
+ _HTML_CACHE[token] = (now, html)
21
+ return token
22
+
23
+
24
+ def take_html(token: str) -> Optional[str]:
25
+ """Retrieve and remove HTML by token."""
26
+ if not token:
27
+ return None
28
+ entry = _HTML_CACHE.pop(token, None)
29
+ if not entry:
30
+ return None
31
+ _, html = entry
32
+ return html
@@ -0,0 +1 @@
1
+ # Integrations package
@@ -0,0 +1,417 @@
1
+ # Pydantic AI integration for Pylogue
2
+ import asyncio
3
+ import copy
4
+ import html
5
+ import json
6
+ import re
7
+ from typing import Any, Optional
8
+
9
+ _TOOL_HTML_RE = re.compile(r'<div class="tool-html">.*?</div>', re.DOTALL)
10
+ _TAG_RE = re.compile(r"<[^>]+>")
11
+
12
+
13
+ def _sanitize_history_answer(answer: str) -> str:
14
+ if not isinstance(answer, str) or not answer:
15
+ return ""
16
+ text = _TOOL_HTML_RE.sub("Rendered tool output.", answer)
17
+ text = _TAG_RE.sub("", text)
18
+ return html.unescape(text).strip()
19
+
20
+
21
+ def _safe_json(value):
22
+ if value is None:
23
+ return "{}"
24
+ if isinstance(value, str):
25
+ try:
26
+ return json.dumps(json.loads(value), indent=2, sort_keys=True, ensure_ascii=True)
27
+ except json.JSONDecodeError:
28
+ return value
29
+ try:
30
+ return json.dumps(value, indent=2, sort_keys=True, ensure_ascii=True)
31
+ except TypeError:
32
+ return json.dumps(str(value), indent=2, sort_keys=True, ensure_ascii=True)
33
+
34
+
35
+ def _truncate(text: str, limit: int = 100) -> str:
36
+ if not isinstance(text, str):
37
+ return ""
38
+ return text if len(text) <= limit else f"{text[:limit//2]} ... (truncated) ... {text[-limit//2:]}"
39
+
40
+
41
+ def _get_tool_call_id(part):
42
+ return getattr(part, "tool_call_id", None) or getattr(part, "call_id", None)
43
+
44
+
45
+ def _unwrap_tool_return(part_or_result, messages_module):
46
+ if isinstance(part_or_result, messages_module.BaseToolReturnPart):
47
+ return (
48
+ part_or_result.tool_name,
49
+ part_or_result.content,
50
+ part_or_result.tool_call_id,
51
+ )
52
+ return None
53
+
54
+
55
+ def _extract_tool_result(event, messages_module):
56
+ part = getattr(event, "part", None)
57
+ if part is not None:
58
+ tool_name = getattr(part, "tool_name", None) or getattr(part, "name", None)
59
+ result = (
60
+ getattr(part, "content", None)
61
+ or getattr(part, "result", None)
62
+ or getattr(part, "return_value", None)
63
+ or getattr(part, "value", None)
64
+ )
65
+ call_id = _get_tool_call_id(part)
66
+ return tool_name, result, call_id
67
+ result = getattr(event, "result", None)
68
+ tool_name = getattr(event, "tool_name", None)
69
+ call_id = getattr(event, "tool_call_id", None) or getattr(event, "call_id", None)
70
+ unwrapped = _unwrap_tool_return(result, messages_module)
71
+ if unwrapped is not None:
72
+ tool_name = tool_name or unwrapped[0]
73
+ result = unwrapped[1]
74
+ call_id = call_id or unwrapped[2]
75
+ return tool_name, result, call_id
76
+
77
+
78
+ def _format_tool_result_summary(tool_name: str, args, result):
79
+ tool_label = html.escape(tool_name or "tool")
80
+ safe_args = html.escape(_safe_json(args))
81
+ safe_result = html.escape(_truncate(_safe_json(result)))
82
+ return (
83
+ "\n\n"
84
+ f'<details class="tool-call"><summary>Tool: {tool_label}</summary>'
85
+ f"<div><strong>Args</strong></div>"
86
+ f"<pre><code>{safe_args}</code></pre>"
87
+ f"<div><strong>Result</strong></div>"
88
+ f"<pre><code>{safe_result}</code></pre></details>\n\n"
89
+ )
90
+
91
+
92
+ def _safe_dom_id(value: str | None) -> str:
93
+ if not value:
94
+ return "tool-status"
95
+ safe = []
96
+ for ch in str(value):
97
+ if ch.isalnum() or ch in {"-", "_"}:
98
+ safe.append(ch)
99
+ return "".join(safe) or "tool-status"
100
+
101
+
102
+ def _format_tool_status_running(tool_name: str, args, call_id: str | None):
103
+ purpose = None
104
+ if isinstance(args, dict):
105
+ purpose = args.get("purpose")
106
+ label = purpose or (tool_name.replace("_", " ").title() if tool_name else "Working")
107
+ status_id = _safe_dom_id(f"tool-status-{call_id or ''}")
108
+ safe_label = html.escape(str(label))
109
+ return (
110
+ f'<div id="{status_id}" class="tool-status tool-status--running">{safe_label}</div><br />\n\n'
111
+ )
112
+
113
+
114
+ def _format_tool_status_done(args, call_id: str | None):
115
+ if isinstance(args, dict):
116
+ purpose = args.get("purpose")
117
+ if isinstance(purpose, str) and purpose.strip():
118
+ safe_label = purpose.strip()
119
+ else:
120
+ safe_label = "Completed"
121
+ else:
122
+ safe_label = "Completed"
123
+ status_id = _safe_dom_id(f"tool-status-{call_id or ''}")
124
+ safe_label_escaped = html.escape(safe_label)
125
+ return (
126
+ f'<div class="tool-status-update" data-target-id="{status_id}">'
127
+ f"{safe_label_escaped}</div><br />\n\n"
128
+ )
129
+
130
+
131
+ def _resolve_tool_html(result):
132
+ if isinstance(result, dict) and "_pylogue_html_id" in result:
133
+ token = result.get("_pylogue_html_id")
134
+ try:
135
+ from pylogue.embeds import take_html
136
+ except Exception:
137
+ return None
138
+ return take_html(token)
139
+ return None
140
+
141
+
142
+ def _should_render_tool_result_raw(tool_name: str | None, result) -> bool:
143
+ if not isinstance(result, str):
144
+ return False
145
+ stripped = result.lstrip()
146
+ if not stripped.startswith("<"):
147
+ return False
148
+ # Allow raw HTML for tool results (e.g., chart renderers).
149
+ return True
150
+
151
+
152
+ def _wrap_tool_html(result: str) -> str:
153
+ stripped = result.strip()
154
+ if stripped.startswith("<div") and stripped.endswith("</div>"):
155
+ return result
156
+ return f'<div class="tool-html">{result}</div>'
157
+
158
+
159
+ def _merge_user_into_deps(base_deps, context):
160
+ user = context.get("user") if isinstance(context, dict) else None
161
+ if not isinstance(user, dict):
162
+ return base_deps
163
+
164
+ # No baseline deps configured: pass a lightweight mapping as deps.
165
+ if base_deps is None:
166
+ return {"pylogue_user": user}
167
+
168
+ # Common case for dict-based deps.
169
+ if isinstance(base_deps, dict):
170
+ merged = dict(base_deps)
171
+ merged["pylogue_user"] = user
172
+ return merged
173
+
174
+ # Try to preserve existing deps type while attaching user context.
175
+ try:
176
+ merged = copy.copy(base_deps)
177
+ except Exception:
178
+ merged = base_deps
179
+ try:
180
+ setattr(merged, "pylogue_user", user)
181
+ return merged
182
+ except Exception:
183
+ return base_deps
184
+
185
+
186
+ def _extract_user_from_deps(deps):
187
+ if isinstance(deps, dict):
188
+ user = deps.get("pylogue_user")
189
+ else:
190
+ user = getattr(deps, "pylogue_user", None)
191
+ return user if isinstance(user, dict) else None
192
+
193
+
194
+ def _extract_user_from_context(context):
195
+ if not isinstance(context, dict):
196
+ return None
197
+ user = context.get("user")
198
+ return user if isinstance(user, dict) else None
199
+
200
+
201
+ class PydanticAIResponder:
202
+ """Streaming responder using Pydantic AI's run_stream_events."""
203
+ pylogue_instructions = (
204
+ "You are also a helpful AI assistant integrated with Pylogue enviroment."
205
+ "The enviroment supports auto injection of html, i.e., if you respond with raw HTML it will be rendered as HTML."
206
+ "The environment also supports markdown rendering, so you can use markdown syntax for formatting."
207
+ "Finally the environment supports mermaid diagrams, so you can create diagrams using mermaid syntax. with ```mermaid ... ``` blocks."
208
+ "Always generate (block appropriate) css based colorful mermaid diagrams (e.g., classDef evaporation fill:#add8e6,stroke:#333,stroke-width:2px) when appropriate to illustrate concepts."
209
+ "also ensure in mermaid blocks you wrap the text with double quotes to avoid syntax errors, and <br> for line breaks instead of \\n"
210
+ "prefer vertical layouts for flowcharts and sequence diagrams. "
211
+ "Render math using LaTeX syntax within $$ ... $$ blocks or inline with $ ... $."
212
+ "when embedding HTML do not wrap it inside ```html ... ``` blocks, just output the raw HTML directly. Do not add <html> or <body> tags."
213
+ "Just because you can respond with HTML or generate mermaid diagrams does not mean you should always do that. Apart from accuracy of response, your next biggest goals is to save as many tokens as possible while ensuring the response is clear and complete.")
214
+
215
+ def __init__(
216
+ self,
217
+ agent: Any,
218
+ agent_deps: Optional[Any] = None,
219
+ show_tool_details: bool = True,
220
+ ):
221
+ self.agent = agent
222
+ # Preserve any existing system prompt from the agent
223
+ existing_prompt = getattr(agent, 'system_prompt', None) or ""
224
+ base_prompt = existing_prompt if isinstance(existing_prompt, str) else ""
225
+ # Share state on the agent to avoid multiple registrations
226
+ state = getattr(agent, "_pylogue_prompt_state", None)
227
+ if state is None:
228
+ state = {
229
+ "base_prompt": base_prompt,
230
+ "additional": [],
231
+ }
232
+ agent._pylogue_prompt_state = state
233
+ self._prompt_state = state
234
+ self._base_agent_deps = agent_deps
235
+ self.agent_deps = agent_deps
236
+ self.message_history = None
237
+ self.show_tool_details = show_tool_details
238
+ self._active_user = None
239
+
240
+ # Register dynamic system prompt function once per agent
241
+ if not getattr(agent, "_pylogue_prompt_registered", False):
242
+ @self.agent.system_prompt
243
+ def custom_instructions(ctx) -> str:
244
+ user = _extract_user_from_deps(getattr(ctx, "deps", None)) or self._active_user
245
+ return self._compose_system_prompt(user)
246
+
247
+ agent._pylogue_prompt_registered = True
248
+
249
+ def append_instructions(self, additional_instructions: str) -> None:
250
+ """Append additional instructions to the agent's system prompt."""
251
+ if additional_instructions:
252
+ self._prompt_state["additional"].append(additional_instructions)
253
+
254
+ def _compose_system_prompt(self, user: Optional[dict] = None) -> str:
255
+ segments = []
256
+ if self._prompt_state.get("base_prompt"):
257
+ segments.append(self._prompt_state["base_prompt"])
258
+ segments.append(self.pylogue_instructions)
259
+ if isinstance(user, dict):
260
+ display_name = user.get("display_name") or user.get("name")
261
+ email = user.get("email")
262
+ user_parts = []
263
+ if display_name:
264
+ user_parts.append(f"name={display_name}")
265
+ if email:
266
+ user_parts.append(f"email={email}")
267
+ if user_parts:
268
+ segments.append(
269
+ "Authenticated user profile (source of truth): "
270
+ + ", ".join(user_parts)
271
+ + ". Use this identity when the user asks who they are or asks for personalization."
272
+ )
273
+ if self._prompt_state["additional"]:
274
+ segments.extend(self._prompt_state["additional"])
275
+ return "\n\n".join(segments)
276
+
277
+ def get_export_state(self) -> dict:
278
+ """Return exportable system instruction state."""
279
+ return {
280
+ "prompt_state": {
281
+ "base_prompt": self._prompt_state.get("base_prompt", ""),
282
+ "additional": list(self._prompt_state.get("additional", [])),
283
+ },
284
+ "system_prompt": self._compose_system_prompt(),
285
+ }
286
+
287
+ def load_state(self, meta: dict) -> None:
288
+ """Restore system instruction state from exported metadata."""
289
+ if not isinstance(meta, dict):
290
+ return
291
+ prompt_state = meta.get("prompt_state") if isinstance(meta.get("prompt_state"), dict) else {}
292
+ if "base_prompt" in prompt_state:
293
+ self._prompt_state["base_prompt"] = prompt_state.get("base_prompt") or ""
294
+ if "additional" in prompt_state and isinstance(prompt_state.get("additional"), list):
295
+ self._prompt_state["additional"] = list(prompt_state.get("additional", []))
296
+ elif isinstance(meta.get("system_prompt"), str):
297
+ self._prompt_state["additional"] = [meta["system_prompt"]]
298
+
299
+ def load_history(self, cards, context=None) -> None:
300
+ """Load conversation history from Pylogue cards."""
301
+ try:
302
+ from pydantic_ai import messages as pai_messages
303
+ except Exception:
304
+ return
305
+ history = []
306
+ user = _extract_user_from_context(context)
307
+ system_prompt = self._compose_system_prompt(user=user)
308
+ if system_prompt:
309
+ history.append(
310
+ pai_messages.ModelRequest(
311
+ parts=[pai_messages.SystemPromptPart(content=system_prompt)]
312
+ )
313
+ )
314
+ for card in cards or []:
315
+ question = card.get("question")
316
+ answer = card.get("answer")
317
+ if question is not None:
318
+ history.append(
319
+ pai_messages.ModelRequest(
320
+ parts=[pai_messages.UserPromptPart(content=str(question))]
321
+ )
322
+ )
323
+ answer_text = card.get("answer_text") if isinstance(card, dict) else None
324
+ if answer_text is None:
325
+ answer_text = answer
326
+ answer_text = _sanitize_history_answer(answer_text)
327
+ if answer_text:
328
+ history.append(
329
+ pai_messages.ModelResponse(
330
+ parts=[pai_messages.TextPart(content=str(answer_text))]
331
+ )
332
+ )
333
+ self.message_history = history
334
+
335
+ def set_context(self, context=None) -> None:
336
+ user = _extract_user_from_context(context)
337
+ self._active_user = user
338
+ self.agent_deps = _merge_user_into_deps(self._base_agent_deps, {"user": user} if user else None)
339
+
340
+ async def __call__(self, text: str, context=None):
341
+ from pydantic_ai import messages
342
+ from pydantic_ai.run import AgentRunResultEvent
343
+
344
+ pending_tool_calls = {}
345
+ tool_call_counter = 0
346
+
347
+ # Keep deps up to date for this request context.
348
+ self.set_context(context)
349
+
350
+ async for event in self.agent.run_stream_events(
351
+ text,
352
+ message_history=self.message_history,
353
+ deps=self.agent_deps,
354
+ ):
355
+ kind = getattr(event, "event_kind", "")
356
+
357
+ if kind == "part_start" and isinstance(event.part, messages.TextPart):
358
+ if event.part.content:
359
+ yield event.part.content
360
+ continue
361
+
362
+ if kind == "part_delta" and isinstance(event.delta, messages.TextPartDelta):
363
+ if event.delta.content_delta:
364
+ yield event.delta.content_delta
365
+ continue
366
+
367
+ if kind == "function_tool_call":
368
+ part = event.part
369
+ tool_call_counter += 1
370
+ call_id = _get_tool_call_id(part) or f"tool-{tool_call_counter}"
371
+ pending_tool_calls[call_id] = (part.tool_name, part.args)
372
+ if not self.show_tool_details:
373
+ yield _format_tool_status_running(part.tool_name, part.args, call_id)
374
+ await asyncio.sleep(0)
375
+ continue
376
+
377
+ if kind == "builtin_tool_call":
378
+ part = event.part
379
+ tool_call_counter += 1
380
+ call_id = _get_tool_call_id(part) or f"tool-{tool_call_counter}"
381
+ pending_tool_calls[call_id] = (part.tool_name, part.args)
382
+ if not self.show_tool_details:
383
+ yield _format_tool_status_running(part.tool_name, part.args, call_id)
384
+ await asyncio.sleep(0)
385
+ continue
386
+
387
+ if kind in {
388
+ "function_tool_result",
389
+ "builtin_tool_result",
390
+ "tool_result",
391
+ "function_tool_return",
392
+ "builtin_tool_return",
393
+ "tool_return",
394
+ }:
395
+ tool_name, result, call_id = _extract_tool_result(event, messages)
396
+ if call_id in pending_tool_calls:
397
+ tool_name, args = pending_tool_calls.pop(call_id)
398
+ else:
399
+ args = None
400
+ if tool_name or args or result:
401
+ resolved_html = _resolve_tool_html(result)
402
+ if not self.show_tool_details:
403
+ yield _format_tool_status_done(args, call_id)
404
+ if resolved_html:
405
+ yield _wrap_tool_html(resolved_html)
406
+ elif _should_render_tool_result_raw(tool_name, result):
407
+ yield _wrap_tool_html(result)
408
+ elif self.show_tool_details:
409
+ yield _format_tool_result_summary(tool_name, args, result)
410
+ await asyncio.sleep(0)
411
+ continue
412
+
413
+ if isinstance(event, AgentRunResultEvent):
414
+ self.message_history = event.result.all_messages()
415
+ if pending_tool_calls:
416
+ for tool_name, args in pending_tool_calls.values():
417
+ yield _format_tool_result_summary(tool_name, args, None)
@@ -0,0 +1,112 @@
1
+ # AUTOGENERATED! DO NOT EDIT! File to edit: ../../nbs/0-Card.ipynb.
2
+
3
+ # %% auto 0
4
+ __all__ = ['CHAT_DIV_ID', 'ChatCard', 'render_chat_list', 'mk_inp']
5
+
6
+ # %% ../../nbs/0-Card.ipynb 2
7
+ from fasthtml.common import *
8
+ from monsterui.all import TextPresets
9
+
10
+ # %% ../../nbs/0-Card.ipynb 4
11
+ class ChatCard:
12
+ def __init__(
13
+ self,
14
+ user_emoji: str = "🗣️",
15
+ assistant_emoji: str = "🕵️‍♂️",
16
+ user_align: str = "right",
17
+ assistant_align: str = "left",
18
+ user_self_align: str = "flex-end",
19
+ assistant_self_align: str = "flex-start",
20
+ content_white_space: str = "normal",
21
+ spinner_class: str = "spinner",
22
+ ):
23
+ self.emojis = {"User": user_emoji, "Assistant": assistant_emoji}
24
+
25
+ self.alignments = {
26
+ "User": {"text": user_align, "self": user_self_align},
27
+ "Assistant": {"text": assistant_align, "self": assistant_self_align},
28
+ }
29
+
30
+ self.content_white_space = content_white_space
31
+ self.spinner_class = spinner_class
32
+
33
+ def get_mobile_styles(self) -> str:
34
+ """Return mobile CSS handled in the renderer styles."""
35
+ return ""
36
+
37
+ def render(self, data: dict):
38
+ """
39
+ Render a chat card with the given data.
40
+
41
+ Args:
42
+ data: Dictionary containing:
43
+ - role: "User" or "Assistant"
44
+ - content: Message content (optional)
45
+ - pending: Boolean indicating if message is loading (optional)
46
+
47
+ Returns:
48
+ FastHTML Div element representing the chat card
49
+ """
50
+ role = data["role"]
51
+ content = data.get("content", "")
52
+ pending = data.get("pending", False)
53
+
54
+ emoji = self.emojis[role]
55
+ align = self.alignments[role]
56
+ row_cls = "chat-row chat-row--user" if role == "User" else "chat-row chat-row--assistant"
57
+ bubble_cls = "chat-bubble chat-bubble--user" if role == "User" else "chat-bubble chat-bubble--assistant"
58
+
59
+ if pending:
60
+ spinner = Span(cls=self.spinner_class)
61
+ return Div(
62
+ Div(
63
+ Span(f"{emoji} {role}", cls=("chat-role", TextPresets.muted_sm)),
64
+ Div(spinner, cls="chat-text"),
65
+ cls=bubble_cls,
66
+ style=f"text-align: {align['text']};",
67
+ ),
68
+ cls=row_cls,
69
+ style=f"justify-content: {align['self']};",
70
+ )
71
+
72
+ # Add 'marked' class to enable markdown rendering
73
+ return Div(
74
+ Div(
75
+ Span(f"{emoji} {role}", cls=("chat-role", TextPresets.muted_sm)),
76
+ Div(
77
+ content,
78
+ cls="marked chat-text",
79
+ style=f"white-space: {self.content_white_space};",
80
+ ),
81
+ cls=bubble_cls,
82
+ style=f"text-align: {align['text']};",
83
+ ),
84
+ cls=row_cls,
85
+ style=f"justify-content: {align['self']};",
86
+ )
87
+
88
+ def __call__(self, data: dict):
89
+ """Make the class callable like the original function."""
90
+ return self.render(data)
91
+
92
+ # %% ../../nbs/0-Card.ipynb 7
93
+ CHAT_DIV_ID = "chat-cards"
94
+
95
+
96
+ def render_chat_list(messages: List[Dict[str, str]], chat_card: ChatCard = None):
97
+ chat_card = chat_card or ChatCard()
98
+ return Div(
99
+ *[chat_card(m) for m in messages],
100
+ id=CHAT_DIV_ID,
101
+ cls="chat-stream",
102
+ )
103
+
104
+ # %% ../../nbs/0-Card.ipynb 9
105
+ def mk_inp():
106
+ return Input(
107
+ id="msg",
108
+ name="msg",
109
+ placeholder="Type a message...",
110
+ autofocus=True,
111
+ cls="uk-input chat-input-field",
112
+ )
@@ -5,7 +5,12 @@ __all__ = ['spinner_style', 'user_messages', 'echo_responder', 'get_initial_mess
5
5
 
6
6
  # %% ../../nbs/1-Chat.ipynb 1
7
7
  from fasthtml.common import *
8
- from .cards import ChatCard, render_chat_list, mk_inp
8
+ import asyncio
9
+ import inspect
10
+ from monsterui.all import Theme, Container, ContainerT, Card, CardT, TextPresets, Button, ButtonT
11
+ from .cards import render_chat_list, mk_inp
12
+ from .design_system import get_color
13
+ from .renderer import ChatRenderer
9
14
 
10
15
  # %% ../../nbs/1-Chat.ipynb 2
11
16
  async def echo_responder(text: str) -> str:
@@ -15,20 +20,20 @@ async def echo_responder(text: str) -> str:
15
20
 
16
21
 
17
22
  spinner_style = Style(
18
- """
19
- .spinner {
23
+ f"""
24
+ .spinner {{
20
25
  display: inline-block;
21
26
  width: 20px;
22
27
  height: 20px;
23
- border: 3px solid rgba(0, 0, 0, 0.1);
24
- border-top-color: #333;
28
+ border: 3px solid {get_color("spinner_light")};
29
+ border-top-color: {get_color("light_text")};
25
30
  border-radius: 50%;
26
31
  animation: spin 1s linear infinite;
27
- }
28
-
29
- @keyframes spin {
30
- to { transform: rotate(360deg); }
31
- }
32
+ }}
33
+
34
+ @keyframes spin {{
35
+ to {{ transform: rotate(360deg); }}
36
+ }}
32
37
  """
33
38
  )
34
39
 
@@ -57,34 +62,56 @@ def on_disconn(ws):
57
62
 
58
63
 
59
64
  def create_chat_app(rt, responder=None):
60
- app = FastHTML(
61
- exts="ws",
62
- hdrs=(
65
+ renderer = ChatRenderer()
66
+ headers = list(Theme.blue.headers())
67
+ headers.extend(
68
+ [
69
+ Link(rel="preconnect", href="https://fonts.googleapis.com"),
70
+ Link(rel="preconnect", href="https://fonts.gstatic.com", crossorigin="anonymous"),
71
+ Link(
72
+ rel="stylesheet",
73
+ href="https://fonts.googleapis.com/css2?family=Space+Grotesk:wght@400;500;600;700&display=swap",
74
+ ),
75
+ ]
76
+ )
77
+ headers.extend(
78
+ [
63
79
  MarkdownJS(),
64
80
  HighlightJS(langs=["python", "javascript", "html", "css"]),
65
81
  spinner_style,
66
- confirm_script,
67
- ),
82
+ Style(renderer.get_styles()),
83
+ ]
68
84
  )
85
+ app = FastHTML(exts="ws", hdrs=tuple(headers))
69
86
  rt = app.route
70
87
  responder = echo_responder if responder is None else responder
71
-
72
88
  @rt("/")
73
89
  def home():
74
90
  return (
75
91
  Title("Supply Chain Analyst Chat"),
76
- Div(
77
- H1("Supply Chain RCA", style="text-align: center; padding: 1em;"),
78
- render_chat_list(get_initial_messages()),
79
- Form(
80
- mk_inp(),
81
- id="form",
82
- ws_send=True,
83
- style="display: flex; justify-content: center; margin-top: 20px; padding: 20px;",
92
+ Meta(name="viewport", content="width=device-width, initial-scale=1.0"),
93
+ Body(
94
+ Container(
95
+ Card(
96
+ Div(render_chat_list(get_initial_messages()), cls="chat-scroll"),
97
+ footer=Form(
98
+ mk_inp(),
99
+ Button("Send", cls=("chat-send", ButtonT.primary), type="submit"),
100
+ id="form",
101
+ ws_send=True,
102
+ cls="chat-form",
103
+ ),
104
+ header=Div(
105
+ H2("Supply Chain RCA", cls="text-2xl font-semibold"),
106
+ P("Streaming analysis with a smarter interface.", cls=TextPresets.muted_sm),
107
+ cls="space-y-2",
108
+ ),
109
+ body_cls="space-y-4",
110
+ cls=("chat-shell", CardT.default),
111
+ ),
112
+ cls=("pylogue-container", "mt-10", ContainerT.xl),
84
113
  ),
85
- hx_ext="ws",
86
- ws_connect="/ws",
87
- style=f"font-family: monospace, sans-serif; margin: 0; padding: 0; background: {bg_color}; min-height: 100vh;",
114
+ cls="pylogue-app",
88
115
  ),
89
116
  )
90
117