llm-webchat 1.0.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (74) hide show
  1. llm_webchat-1.0.0/.github/workflows/ci.yml +43 -0
  2. llm_webchat-1.0.0/.gitignore +9 -0
  3. llm_webchat-1.0.0/AGENTS.md +33 -0
  4. llm_webchat-1.0.0/LICENSE +7 -0
  5. llm_webchat-1.0.0/PKG-INFO +77 -0
  6. llm_webchat-1.0.0/README.md +56 -0
  7. llm_webchat-1.0.0/docs/architecture.md +88 -0
  8. llm_webchat-1.0.0/extension_examples/conversation_search/README.md +16 -0
  9. llm_webchat-1.0.0/extension_examples/conversation_search/pyproject.toml +19 -0
  10. llm_webchat-1.0.0/extension_examples/conversation_search/src/llm_webchat_search/__init__.py +148 -0
  11. llm_webchat-1.0.0/extension_examples/conversation_search/src/llm_webchat_search/static/search.js +272 -0
  12. llm_webchat-1.0.0/extension_examples/custom_notifications/README.md +14 -0
  13. llm_webchat-1.0.0/extension_examples/custom_notifications/pyproject.toml +19 -0
  14. llm_webchat-1.0.0/extension_examples/custom_notifications/src/llm_webchat_notifications/__init__.py +131 -0
  15. llm_webchat-1.0.0/extension_examples/custom_notifications/src/llm_webchat_notifications/static/notifications.js +66 -0
  16. llm_webchat-1.0.0/extension_examples/cyberpunk_style_override/README.md +17 -0
  17. llm_webchat-1.0.0/extension_examples/cyberpunk_style_override/cyberpunk-loader.js +6 -0
  18. llm_webchat-1.0.0/extension_examples/cyberpunk_style_override/cyberpunk.css +364 -0
  19. llm_webchat-1.0.0/extension_examples/markdown_export_with_react/README.md +22 -0
  20. llm_webchat-1.0.0/extension_examples/markdown_export_with_react/markdown-export.js +264 -0
  21. llm_webchat-1.0.0/frontend/.prettierrc +8 -0
  22. llm_webchat-1.0.0/frontend/eslint.config.js +10 -0
  23. llm_webchat-1.0.0/frontend/index.html +13 -0
  24. llm_webchat-1.0.0/frontend/media/favicon.ico +0 -0
  25. llm_webchat-1.0.0/frontend/media/logo_raw.png +0 -0
  26. llm_webchat-1.0.0/frontend/package-lock.json +3453 -0
  27. llm_webchat-1.0.0/frontend/package.json +35 -0
  28. llm_webchat-1.0.0/frontend/src/hooks.ts +135 -0
  29. llm_webchat-1.0.0/frontend/src/index.ts +30 -0
  30. llm_webchat-1.0.0/frontend/src/lint-and-format.test.ts +26 -0
  31. llm_webchat-1.0.0/frontend/src/llm-api.ts +72 -0
  32. llm_webchat-1.0.0/frontend/src/markdown.ts +12 -0
  33. llm_webchat-1.0.0/frontend/src/models/Conversation.ts +118 -0
  34. llm_webchat-1.0.0/frontend/src/models/Model.ts +50 -0
  35. llm_webchat-1.0.0/frontend/src/models/Response.ts +99 -0
  36. llm_webchat-1.0.0/frontend/src/models/StreamingMessage.ts +179 -0
  37. llm_webchat-1.0.0/frontend/src/navigation.ts +18 -0
  38. llm_webchat-1.0.0/frontend/src/slots.ts +19 -0
  39. llm_webchat-1.0.0/frontend/src/style.css +250 -0
  40. llm_webchat-1.0.0/frontend/src/views/App.ts +40 -0
  41. llm_webchat-1.0.0/frontend/src/views/ConversationSelector.ts +97 -0
  42. llm_webchat-1.0.0/frontend/src/views/EmptySlot.ts +25 -0
  43. llm_webchat-1.0.0/frontend/src/views/MessageInput.ts +135 -0
  44. llm_webchat-1.0.0/frontend/src/views/MessageList.ts +396 -0
  45. llm_webchat-1.0.0/frontend/src/views/ModelSelector.ts +52 -0
  46. llm_webchat-1.0.0/frontend/src/views/NewConversation.ts +197 -0
  47. llm_webchat-1.0.0/frontend/src/views/Sidebar.ts +57 -0
  48. llm_webchat-1.0.0/frontend/src/views/Spinner.ts +15 -0
  49. llm_webchat-1.0.0/frontend/tsconfig.json +16 -0
  50. llm_webchat-1.0.0/frontend/vite.config.ts +18 -0
  51. llm_webchat-1.0.0/pyproject.toml +71 -0
  52. llm_webchat-1.0.0/src/llm_webchat/__init__.py +23 -0
  53. llm_webchat-1.0.0/src/llm_webchat/config.py +69 -0
  54. llm_webchat-1.0.0/src/llm_webchat/database.py +101 -0
  55. llm_webchat-1.0.0/src/llm_webchat/event_queues.py +86 -0
  56. llm_webchat-1.0.0/src/llm_webchat/events.py +17 -0
  57. llm_webchat-1.0.0/src/llm_webchat/hookspecs.py +34 -0
  58. llm_webchat-1.0.0/src/llm_webchat/models.py +65 -0
  59. llm_webchat-1.0.0/src/llm_webchat/plugins.py +13 -0
  60. llm_webchat-1.0.0/src/llm_webchat/server.py +323 -0
  61. llm_webchat-1.0.0/src/llm_webchat/static/assets/index-BngXOCG-.js +60 -0
  62. llm_webchat-1.0.0/src/llm_webchat/static/assets/index-DcMp03fU.css +1 -0
  63. llm_webchat-1.0.0/src/llm_webchat/static/favicon.ico +0 -0
  64. llm_webchat-1.0.0/src/llm_webchat/static/index.html +14 -0
  65. llm_webchat-1.0.0/src/llm_webchat/static/logo_raw.png +0 -0
  66. llm_webchat-1.0.0/tests/__init__.py +0 -0
  67. llm_webchat-1.0.0/tests/conftest.py +35 -0
  68. llm_webchat-1.0.0/tests/helpers.py +38 -0
  69. llm_webchat-1.0.0/tests/test_api.py +566 -0
  70. llm_webchat-1.0.0/tests/test_database.py +250 -0
  71. llm_webchat-1.0.0/tests/test_event_queues.py +291 -0
  72. llm_webchat-1.0.0/tests/test_hookspecs.py +90 -0
  73. llm_webchat-1.0.0/tests/test_lint_and_format.py +25 -0
  74. llm_webchat-1.0.0/uv.lock +903 -0
@@ -0,0 +1,43 @@
1
+ name: CI
2
+
3
+ on:
4
+ push:
5
+
6
+ jobs:
7
+ backend:
8
+ name: Backend Tests
9
+ runs-on: ubuntu-latest
10
+ steps:
11
+ - uses: actions/checkout@v4
12
+ - uses: astral-sh/setup-uv@v5
13
+ with:
14
+ version: "latest"
15
+ - uses: actions/setup-python@v5
16
+ with:
17
+ python-version: "3.12"
18
+ - name: Install dependencies
19
+ run: uv sync
20
+ - name: Lint
21
+ run: uv run ruff check .
22
+ - name: Run tests
23
+ run: uv run pytest
24
+
25
+ frontend:
26
+ name: Frontend Tests
27
+ runs-on: ubuntu-latest
28
+ defaults:
29
+ run:
30
+ working-directory: frontend
31
+ steps:
32
+ - uses: actions/checkout@v4
33
+ - uses: actions/setup-node@v4
34
+ with:
35
+ node-version: "22"
36
+ cache: "npm"
37
+ cache-dependency-path: frontend/package-lock.json
38
+ - name: Install dependencies
39
+ run: npm ci
40
+ - name: Lint
41
+ run: npm run lint
42
+ - name: Run tests
43
+ run: npm test
@@ -0,0 +1,9 @@
1
+ __pycache__/
2
+ *.pyc
3
+
4
+ # Frontend build output (regenerated by `npm run build` in frontend/)
5
+ src/llm_webchat/static/
6
+
7
+ # Frontend dependencies
8
+ frontend/node_modules/
9
+ frontend/dist/
@@ -0,0 +1,33 @@
1
+ # llm-webchat
2
+
3
+ See README.md for context and the vision for the project.
4
+
5
+ ## Coding style guide
6
+
7
+ - Prefer functional, stateless logic as much as possible.
8
+ - Use immutable data structures as much as possible.
9
+ - Do not use abbreviations in variable (class, function, ...) names. It's fine for names to be somewhat verbose.
10
+ - Omit docstrings and comments if they don't add any value beyond what can be obviously inferred from the function signature / class name.
11
+ - Do not throw builtin errors; always replace them with dedicated error subclasses.
12
+ - Make sure to use up-to-date versions of all libraries.
13
+
14
+ ### Python
15
+
16
+ - Use modern typed Python code (assume pyre check).
17
+ - Use uv for python project management.
18
+ - Use frozen pydantic models for most classes.
19
+ - Use pytest for testing.
20
+ - Always place imports at the top level.
21
+ - Avoid async code when possible; prefer synchronous implementations.
22
+ - When done, validate your changes by running `uv run pytest`.
23
+
24
+ ### Typescript
25
+
26
+ - Use modern Typescript code.
27
+ - When done, validate your changes by running `npm lint` and `npm test`.
28
+ - Use async / await instead of raw Promise primitives where possible.
29
+
30
+ ### CSS
31
+
32
+ - Add semantic class names to components make restyling easy.
33
+ - Make most important values part of `@theme` as variables that can be referenced by extension authors.
@@ -0,0 +1,7 @@
1
+ Copyright 2026 Imbue, Inc.
2
+
3
+ Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
4
+
5
+ The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6
+
7
+ THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
@@ -0,0 +1,77 @@
1
+ Metadata-Version: 2.4
2
+ Name: llm-webchat
3
+ Version: 1.0.0
4
+ Summary: A web chat interface plugin for llm
5
+ Project-URL: Homepage, https://github.com/imbue-ai/llm-webchat
6
+ Project-URL: Repository, https://github.com/imbue-ai/llm-webchat
7
+ Project-URL: Issues, https://github.com/imbue-ai/llm-webchat/issues
8
+ Project-URL: Changelog, https://github.com/imbue-ai/llm-webchat/releases
9
+ Author-email: Hynek Urban <hynek@imbue.com>
10
+ License: MIT
11
+ License-File: LICENSE
12
+ Requires-Python: >=3.10
13
+ Requires-Dist: fastapi>=0.115
14
+ Requires-Dist: llm>=0.28
15
+ Requires-Dist: pluggy>=1.5
16
+ Requires-Dist: pydantic-settings>=2.13.1
17
+ Requires-Dist: sqlite-utils>=3.39
18
+ Requires-Dist: starlette>=0.45
19
+ Requires-Dist: uvicorn>=0.34
20
+ Description-Content-Type: text/markdown
21
+
22
+ # LLM Webchat
23
+
24
+ A plugin for the [LLM](https://github.com/simonw/llm) tool.
25
+ When installed, running `llm webchat` starts a local webserver.
26
+ Visiting its address in the browser lets you see the
27
+ conversations in your `llm` database and chat with supported
28
+ language models.
29
+
30
+ The appearance and functionality of `llm-webchat` is heavily
31
+ customizable - styles, frontend appearance and behavior and even
32
+ the backend logic.
33
+
34
+ ## Quickstart
35
+
36
+ ```bash
37
+ llm install llm-webchat
38
+ llm webchat
39
+ ```
40
+
41
+ ## Development
42
+
43
+ ### Prerequisites
44
+
45
+ - Python 3.10+ with [uv](https://github.com/astral-sh/uv)
46
+ - Node.js 18+
47
+
48
+ ### Building and running
49
+
50
+ Build the frontend (output goes to `src/llm_webchat/static/`):
51
+
52
+ ```bash
53
+ cd frontend
54
+ npm install
55
+ npm run build
56
+ ```
57
+
58
+ Run the backend (serves the built frontend at `/`):
59
+
60
+ ```bash
61
+ uv run llm-webchat
62
+ ```
63
+
64
+ For frontend development with hot reload (proxies `/api` requests to the backend):
65
+
66
+ ```bash
67
+ cd frontend
68
+ npm run dev
69
+ ```
70
+
71
+ ### Running tests
72
+
73
+ ```bash
74
+ uv run pytest
75
+ cd frontend
76
+ npm test
77
+ ```
@@ -0,0 +1,56 @@
1
+ # LLM Webchat
2
+
3
+ A plugin for the [LLM](https://github.com/simonw/llm) tool.
4
+ When installed, running `llm webchat` starts a local webserver.
5
+ Visiting its address in the browser lets you see the
6
+ conversations in your `llm` database and chat with supported
7
+ language models.
8
+
9
+ The appearance and functionality of `llm-webchat` is heavily
10
+ customizable - styles, frontend appearance and behavior and even
11
+ the backend logic.
12
+
13
+ ## Quickstart
14
+
15
+ ```bash
16
+ llm install llm-webchat
17
+ llm webchat
18
+ ```
19
+
20
+ ## Development
21
+
22
+ ### Prerequisites
23
+
24
+ - Python 3.10+ with [uv](https://github.com/astral-sh/uv)
25
+ - Node.js 18+
26
+
27
+ ### Building and running
28
+
29
+ Build the frontend (output goes to `src/llm_webchat/static/`):
30
+
31
+ ```bash
32
+ cd frontend
33
+ npm install
34
+ npm run build
35
+ ```
36
+
37
+ Run the backend (serves the built frontend at `/`):
38
+
39
+ ```bash
40
+ uv run llm-webchat
41
+ ```
42
+
43
+ For frontend development with hot reload (proxies `/api` requests to the backend):
44
+
45
+ ```bash
46
+ cd frontend
47
+ npm run dev
48
+ ```
49
+
50
+ ### Running tests
51
+
52
+ ```bash
53
+ uv run pytest
54
+ cd frontend
55
+ npm test
56
+ ```
@@ -0,0 +1,88 @@
1
+ # LLM Webchat architecture
2
+
3
+ There is a backend in Python and a frontend written in Typescript. The
4
+ PyPI package contains compiled static frontend assets so users
5
+ don't need to have node or typescript installed at all.
6
+
7
+ ## Backend
8
+
9
+ Backend uses fastapi + uvicorn. It provides the following endpoints:
10
+
11
+ - GET "/api/models" to list all available LLM models
12
+ - GET "/api/conversations" to list the most recent conversations
13
+ - accepts the `?count=` parameter, by default `count=10`.
14
+ - GET "/api/conversations/:id/responses to get all responses in a given conversation
15
+ - POST "/api/conversations/ to create a conversation (requires `name` and `model` in the request body)
16
+ - POST "/api/conversations/:id/ to send a new user message to an existing conversation (requires `message` and `model` in the request body).
17
+ - GET /api/conversations/:id/stream get a stream of events in
18
+ a given conversation. There are several kinds of events:
19
+ - `user_message`: user message that arrived
20
+ - `message_start`, `message_delta`, `message_end`: to stream the current LLM response
21
+ - `error`: to provide details about potential errors
22
+
23
+ Static files are served from `/static`. The only exception is `index.html` which is served from the root.
24
+
25
+
26
+ The following environment variables are recognized:
27
+
28
+ - `LLM_WEBCHAT_CONVERSATION_IDS`: a comma-separated list. If provided, only these conversations are ever returned.
29
+ - `LLM_WEBCHAT_JAVASCRIPT_PLUGINS`: a comma separated list of .js files containing frontend plugins (see below).
30
+ - `LLM_WEBCHAT_STATIC_PATHS`: a comma separated list of additional paths to resources that should be served from under `/static`. Files and directories are both accepted.
31
+ - `LLM_WEBCHAT_HOST`: the host address the server binds to. Defaults to `127.0.0.1`.
32
+ - `LLM_WEBCHAT_PORT`: the port the server listens on. Defaults to `8000`.
33
+
34
+ The web server can be extended using [pluggy](https://github.com/pytest-dev/pluggy) by providing implementations of the following hookspecs (defined in `llm_webchat.hookspecs`):
35
+
36
+ - `endpoint(app)` — Register additional endpoints (including static file routes) on the FastAPI application.
37
+ - `register_event_broadcaster(broadcaster)` — Receive a reference to the event broadcaster callable. The broadcaster has the signature `(conversation_id: str, event: dict[str, str]) -> None` and can be stored and called at any time to inject custom events into the SSE stream for a given conversation.
38
+
39
+
40
+ ## Frontend
41
+
42
+ Frontend is written in Typescript, using mithril.js and
43
+ tailwind. Its appearance and functionality should be familiar to
44
+ users of other well-known chat-based web AI assistants.
45
+
46
+ Both the appearance and functionality are highly customizable
47
+ using a framework-agnostic plugin system. Plugins can run
48
+ arbitrary javascript making arbitrary changes to the page. To
49
+ make this easier, the default DOM will contain certain stable
50
+ elements / containers with well data attributes which plugins
51
+ can use as anchor points:
52
+
53
+ - `<div data-slot="header">` — the top header bar
54
+ - `<div data-slot="header-actions">` — empty container inside the header, right-aligned (for buttons, indicators)
55
+ - `<div data-slot="sidebar">` — the full sidebar
56
+ - `<div data-slot="sidebar-header">` — the sidebar title, collapse button, and "New Conversation" button
57
+ - `<div data-slot="sidebar-before-list">` — empty container between the sidebar header and the conversation list (for search, filters)
58
+ - `<div data-slot="conversation-selector-item">` — an individual conversation entry in the sidebar
59
+ - `<div data-slot="conversation-after-header">` — empty container below the header, above the message list (for banners, breadcrumbs)
60
+ - `<div data-slot="conversation-content">` — the scrollable message list area
61
+ - `<div data-slot="conversation-before-input">` — empty container inside the footer, above the message input (for toolbars, attachments)
62
+ - `<div data-slot="conversation-footer">` — the footer containing the message input
63
+ - `<div data-slot="new-conversation-content">` — the new conversation form area
64
+ - `<div data-slot="new-conversation-footer">` — the footer on the new conversation screen
65
+ - `<div data-slot="message" data-message-id="...">` — an individual assistant message
66
+
67
+ Slots marked "empty" render as empty `<div>` elements by default and exist purely as extension points — plugins can claim them and inject content without replacing any built-in UI.
68
+
69
+ Furthermore, there's a global "$llm" object that can be used to:
70
+
71
+ - Claim ownership of a specific component type:
72
+ - `$llm.claim("header")` (or `"sidebar"`, `"content"`, `"message"`, ...)
73
+ - When claimed, only the component container is rendered by the core loop (contents are expected to be provided by the plugin).
74
+ - Returns true when the claim succeeded, false otherwise (e.g. when already claimed by another plugin).
75
+ - (This is to prevent conflicts between the renders done by the core mithril.js loop and the renders done by the plugin.)
76
+ - Get specific parts of the current page state:
77
+ - `$llm.getMessage(messageId)`
78
+ - `$llm.getConversations()`
79
+ - `$llm.getConversation(conversationId)`
80
+ - `$llm.getModels()`
81
+ - Register for certain events:
82
+ - `$llm.on("ready")` - when the main APP is initialized
83
+ - `$llm.on("get_conversations")` - when a response to `GET /api/conversations` arrives.
84
+ - `$llm.on("get_conversation")`
85
+ - `$llm.on("post_conversation")`
86
+ - `$llm.on("post_conversation_message")`
87
+ - `$llm.on("get_message")`
88
+ - `$llm.on("stream_event")`
@@ -0,0 +1,16 @@
1
+ # Conversation Search
2
+
3
+ Adds full-text search across all conversation messages. Demonstrates:
4
+
5
+ - A custom backend endpoint (`GET /api/search?q=...`).
6
+ - A vanilla JS frontend plugin that adds a search box to the sidebar.
7
+ - Navigating to a specific response within a conversation via anchor links.
8
+
9
+ ## Installation and usage
10
+
11
+ From the repository root:
12
+
13
+ ```bash
14
+ uv pip install -e extension_examples/conversation_search
15
+ llm webchat-with-search
16
+ ```
@@ -0,0 +1,19 @@
1
+ [project]
2
+ name = "llm-webchat-search"
3
+ version = "0.1.0"
4
+ description = "Full-text conversation search for llm-webchat"
5
+ requires-python = ">=3.10"
6
+ license = { text = "MIT" }
7
+ dependencies = [
8
+ "llm-webchat",
9
+ ]
10
+
11
+ [project.entry-points.llm]
12
+ webchat-search = "llm_webchat_search"
13
+
14
+ [build-system]
15
+ requires = ["hatchling"]
16
+ build-backend = "hatchling.build"
17
+
18
+ [tool.hatch.build.targets.wheel]
19
+ packages = ["src/llm_webchat_search"]
@@ -0,0 +1,148 @@
1
+ from __future__ import annotations
2
+
3
+ import os
4
+ from pathlib import Path
5
+
6
+ import click
7
+ import llm
8
+ import uvicorn
9
+ from fastapi import Query
10
+ from fastapi import Request
11
+ from fastapi.responses import JSONResponse
12
+
13
+ from llm_webchat.hookspecs import hookimpl
14
+
15
+ STATIC_DIRECTORY = Path(__file__).parent / "static"
16
+
17
+ SNIPPET_CONTEXT_LENGTH = 80
18
+
19
+
20
+ class SearchResult:
21
+ def __init__(
22
+ self,
23
+ conversation_id: str,
24
+ conversation_name: str,
25
+ model: str,
26
+ response_id: str,
27
+ snippet: str,
28
+ field: str,
29
+ ) -> None:
30
+ self.conversation_id = conversation_id
31
+ self.conversation_name = conversation_name
32
+ self.model = model
33
+ self.response_id = response_id
34
+ self.snippet = snippet
35
+ self.field = field
36
+
37
+
38
+ def _extract_snippet(text: str, query: str, context_length: int = SNIPPET_CONTEXT_LENGTH) -> str:
39
+ lower_text = text.lower()
40
+ lower_query = query.lower()
41
+ position = lower_text.find(lower_query)
42
+ if position == -1:
43
+ return text[: context_length * 2] + ("…" if len(text) > context_length * 2 else "")
44
+
45
+ start = max(0, position - context_length)
46
+ end = min(len(text), position + len(query) + context_length)
47
+
48
+ snippet = text[start:end]
49
+ if start > 0:
50
+ snippet = "…" + snippet
51
+ if end < len(text):
52
+ snippet = snippet + "…"
53
+
54
+ return snippet
55
+
56
+
57
+ def _search_conversations(query: str, limit: int = 20) -> list[dict[str, str]]:
58
+ from llm_webchat.database import open_database
59
+
60
+ database = open_database()
61
+
62
+ if "responses" not in database.table_names():
63
+ return []
64
+ if "conversations" not in database.table_names():
65
+ return []
66
+
67
+ like_pattern = f"%{query}%"
68
+
69
+ rows = database.execute(
70
+ """
71
+ SELECT
72
+ r.id AS response_id,
73
+ r.conversation_id,
74
+ r.prompt,
75
+ r.response,
76
+ c.name AS conversation_name,
77
+ c.model
78
+ FROM responses r
79
+ JOIN conversations c ON c.id = r.conversation_id
80
+ WHERE r.prompt LIKE ? COLLATE NOCASE
81
+ OR r.response LIKE ? COLLATE NOCASE
82
+ ORDER BY r.datetime_utc DESC
83
+ LIMIT ?
84
+ """,
85
+ [like_pattern, like_pattern, limit],
86
+ ).fetchall()
87
+
88
+ results = []
89
+ for row in rows:
90
+ prompt = row["prompt"] or ""
91
+ response = row["response"] or ""
92
+
93
+ if query.lower() in prompt.lower():
94
+ snippet = _extract_snippet(prompt, query)
95
+ field = "prompt"
96
+ else:
97
+ snippet = _extract_snippet(response, query)
98
+ field = "response"
99
+
100
+ results.append(
101
+ {
102
+ "conversation_id": row["conversation_id"],
103
+ "conversation_name": row["conversation_name"] or "",
104
+ "model": row["model"] or "",
105
+ "response_id": row["response_id"],
106
+ "snippet": snippet,
107
+ "field": field,
108
+ }
109
+ )
110
+
111
+ return results
112
+
113
+
114
+ class SearchPlugin:
115
+ @hookimpl
116
+ def endpoint(self, app: object) -> None:
117
+ @app.get("/api/search") # type: ignore[union-attr]
118
+ def search_conversations(
119
+ request: Request,
120
+ q: str = Query(..., min_length=1),
121
+ limit: int = Query(default=20, ge=1, le=100),
122
+ ) -> JSONResponse:
123
+ results = _search_conversations(q, limit)
124
+ return JSONResponse(content={"query": q, "results": results})
125
+
126
+
127
+ @llm.hookimpl
128
+ def register_commands(cli: click.Group) -> None:
129
+ @cli.command(name="webchat-with-search")
130
+ def webchat_with_search() -> None:
131
+ """Open a web chat interface with conversation search."""
132
+ from llm_webchat.config import load_config
133
+ from llm_webchat.plugins import get_plugin_manager
134
+ from llm_webchat.server import create_application
135
+
136
+ search_plugin = SearchPlugin()
137
+ get_plugin_manager().register(search_plugin)
138
+
139
+ javascript_plugin_path = str(STATIC_DIRECTORY / "search.js")
140
+ existing_plugins = os.environ.get("LLM_WEBCHAT_JAVASCRIPT_PLUGINS", "")
141
+ if existing_plugins:
142
+ os.environ["LLM_WEBCHAT_JAVASCRIPT_PLUGINS"] = f"{existing_plugins},{javascript_plugin_path}"
143
+ else:
144
+ os.environ["LLM_WEBCHAT_JAVASCRIPT_PLUGINS"] = javascript_plugin_path
145
+
146
+ config = load_config()
147
+ application = create_application(config)
148
+ uvicorn.run(application, host=config.llm_webchat_host, port=config.llm_webchat_port)