@oh-my-pi/pi-web-ui 1.337.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. package/CHANGELOG.md +96 -0
  2. package/README.md +609 -0
  3. package/example/README.md +61 -0
  4. package/example/index.html +13 -0
  5. package/example/package.json +24 -0
  6. package/example/src/app.css +1 -0
  7. package/example/src/custom-messages.ts +99 -0
  8. package/example/src/main.ts +420 -0
  9. package/example/tsconfig.json +23 -0
  10. package/example/vite.config.ts +6 -0
  11. package/package.json +57 -0
  12. package/scripts/count-prompt-tokens.ts +88 -0
  13. package/src/ChatPanel.ts +218 -0
  14. package/src/app.css +68 -0
  15. package/src/components/AgentInterface.ts +390 -0
  16. package/src/components/AttachmentTile.ts +107 -0
  17. package/src/components/ConsoleBlock.ts +74 -0
  18. package/src/components/CustomProviderCard.ts +96 -0
  19. package/src/components/ExpandableSection.ts +46 -0
  20. package/src/components/Input.ts +113 -0
  21. package/src/components/MessageEditor.ts +404 -0
  22. package/src/components/MessageList.ts +97 -0
  23. package/src/components/Messages.ts +384 -0
  24. package/src/components/ProviderKeyInput.ts +152 -0
  25. package/src/components/SandboxedIframe.ts +626 -0
  26. package/src/components/StreamingMessageContainer.ts +107 -0
  27. package/src/components/ThinkingBlock.ts +45 -0
  28. package/src/components/message-renderer-registry.ts +28 -0
  29. package/src/components/sandbox/ArtifactsRuntimeProvider.ts +219 -0
  30. package/src/components/sandbox/AttachmentsRuntimeProvider.ts +66 -0
  31. package/src/components/sandbox/ConsoleRuntimeProvider.ts +186 -0
  32. package/src/components/sandbox/FileDownloadRuntimeProvider.ts +110 -0
  33. package/src/components/sandbox/RuntimeMessageBridge.ts +82 -0
  34. package/src/components/sandbox/RuntimeMessageRouter.ts +216 -0
  35. package/src/components/sandbox/SandboxRuntimeProvider.ts +52 -0
  36. package/src/dialogs/ApiKeyPromptDialog.ts +75 -0
  37. package/src/dialogs/AttachmentOverlay.ts +640 -0
  38. package/src/dialogs/CustomProviderDialog.ts +274 -0
  39. package/src/dialogs/ModelSelector.ts +314 -0
  40. package/src/dialogs/PersistentStorageDialog.ts +146 -0
  41. package/src/dialogs/ProvidersModelsTab.ts +212 -0
  42. package/src/dialogs/SessionListDialog.ts +157 -0
  43. package/src/dialogs/SettingsDialog.ts +216 -0
  44. package/src/index.ts +115 -0
  45. package/src/prompts/prompts.ts +282 -0
  46. package/src/storage/app-storage.ts +60 -0
  47. package/src/storage/backends/indexeddb-storage-backend.ts +193 -0
  48. package/src/storage/store.ts +33 -0
  49. package/src/storage/stores/custom-providers-store.ts +62 -0
  50. package/src/storage/stores/provider-keys-store.ts +33 -0
  51. package/src/storage/stores/sessions-store.ts +136 -0
  52. package/src/storage/stores/settings-store.ts +34 -0
  53. package/src/storage/types.ts +206 -0
  54. package/src/tools/artifacts/ArtifactElement.ts +14 -0
  55. package/src/tools/artifacts/ArtifactPill.ts +26 -0
  56. package/src/tools/artifacts/Console.ts +102 -0
  57. package/src/tools/artifacts/DocxArtifact.ts +213 -0
  58. package/src/tools/artifacts/ExcelArtifact.ts +231 -0
  59. package/src/tools/artifacts/GenericArtifact.ts +118 -0
  60. package/src/tools/artifacts/HtmlArtifact.ts +203 -0
  61. package/src/tools/artifacts/ImageArtifact.ts +116 -0
  62. package/src/tools/artifacts/MarkdownArtifact.ts +83 -0
  63. package/src/tools/artifacts/PdfArtifact.ts +201 -0
  64. package/src/tools/artifacts/SvgArtifact.ts +82 -0
  65. package/src/tools/artifacts/TextArtifact.ts +148 -0
  66. package/src/tools/artifacts/artifacts-tool-renderer.ts +371 -0
  67. package/src/tools/artifacts/artifacts.ts +713 -0
  68. package/src/tools/artifacts/index.ts +7 -0
  69. package/src/tools/extract-document.ts +271 -0
  70. package/src/tools/index.ts +46 -0
  71. package/src/tools/javascript-repl.ts +316 -0
  72. package/src/tools/renderer-registry.ts +127 -0
  73. package/src/tools/renderers/BashRenderer.ts +52 -0
  74. package/src/tools/renderers/CalculateRenderer.ts +58 -0
  75. package/src/tools/renderers/DefaultRenderer.ts +95 -0
  76. package/src/tools/renderers/GetCurrentTimeRenderer.ts +92 -0
  77. package/src/tools/types.ts +15 -0
  78. package/src/utils/attachment-utils.ts +472 -0
  79. package/src/utils/auth-token.ts +22 -0
  80. package/src/utils/format.ts +42 -0
  81. package/src/utils/i18n.ts +653 -0
  82. package/src/utils/model-discovery.ts +277 -0
  83. package/src/utils/proxy-utils.ts +134 -0
  84. package/src/utils/test-sessions.ts +2357 -0
  85. package/tsconfig.build.json +20 -0
  86. package/tsconfig.json +7 -0
@@ -0,0 +1,277 @@
1
+ import { LMStudioClient } from "@lmstudio/sdk";
2
+ import type { Model } from "@oh-my-pi/pi-ai";
3
+ import { Ollama } from "ollama/browser";
4
+
5
+ /**
6
+ * Discover models from an Ollama server.
7
+ * @param baseUrl - Base URL of the Ollama server (e.g., "http://localhost:11434")
8
+ * @param apiKey - Optional API key (currently unused by Ollama)
9
+ * @returns Array of discovered models
10
+ */
11
+ export async function discoverOllamaModels(baseUrl: string, _apiKey?: string): Promise<Model<any>[]> {
12
+ try {
13
+ // Create Ollama client
14
+ const ollama = new Ollama({ host: baseUrl });
15
+
16
+ // Get list of available models
17
+ const { models } = await ollama.list();
18
+
19
+ // Fetch details for each model and convert to Model format
20
+ const ollamaModelPromises: Promise<Model<any> | null>[] = models.map(async (model: any) => {
21
+ try {
22
+ // Get model details
23
+ const details = await ollama.show({
24
+ model: model.name,
25
+ });
26
+
27
+ // Check capabilities - filter out models that don't support tools
28
+ const capabilities: string[] = (details as any).capabilities || [];
29
+ if (!capabilities.includes("tools")) {
30
+ console.debug(`Skipping model ${model.name}: does not support tools`);
31
+ return null;
32
+ }
33
+
34
+ // Extract model info
35
+ const modelInfo: any = details.model_info || {};
36
+
37
+ // Get context window size - look for architecture-specific keys
38
+ const architecture = modelInfo["general.architecture"] || "";
39
+ const contextKey = `${architecture}.context_length`;
40
+ const contextWindow = parseInt(modelInfo[contextKey] || "8192", 10);
41
+
42
+ // Ollama caps max tokens at 10x context length
43
+ const maxTokens = contextWindow * 10;
44
+
45
+ // Ollama only supports completions API
46
+ const ollamaModel: Model<any> = {
47
+ id: model.name,
48
+ name: model.name,
49
+ api: "openai-completions" as any,
50
+ provider: "", // Will be set by caller
51
+ baseUrl: `${baseUrl}/v1`,
52
+ reasoning: capabilities.includes("thinking"),
53
+ input: ["text"],
54
+ cost: {
55
+ input: 0,
56
+ output: 0,
57
+ cacheRead: 0,
58
+ cacheWrite: 0,
59
+ },
60
+ contextWindow: contextWindow,
61
+ maxTokens: maxTokens,
62
+ };
63
+
64
+ return ollamaModel;
65
+ } catch (err) {
66
+ console.error(`Failed to fetch details for model ${model.name}:`, err);
67
+ return null;
68
+ }
69
+ });
70
+
71
+ const results = await Promise.all(ollamaModelPromises);
72
+ return results.filter((m): m is Model<any> => m !== null);
73
+ } catch (err) {
74
+ console.error("Failed to discover Ollama models:", err);
75
+ throw new Error(`Ollama discovery failed: ${err instanceof Error ? err.message : String(err)}`);
76
+ }
77
+ }
78
+
79
+ /**
80
+ * Discover models from a llama.cpp server via OpenAI-compatible /v1/models endpoint.
81
+ * @param baseUrl - Base URL of the llama.cpp server (e.g., "http://localhost:8080")
82
+ * @param apiKey - Optional API key
83
+ * @returns Array of discovered models
84
+ */
85
+ export async function discoverLlamaCppModels(baseUrl: string, apiKey?: string): Promise<Model<any>[]> {
86
+ try {
87
+ const headers: HeadersInit = {
88
+ "Content-Type": "application/json",
89
+ };
90
+
91
+ if (apiKey) {
92
+ headers.Authorization = `Bearer ${apiKey}`;
93
+ }
94
+
95
+ const response = await fetch(`${baseUrl}/v1/models`, {
96
+ method: "GET",
97
+ headers,
98
+ });
99
+
100
+ if (!response.ok) {
101
+ throw new Error(`HTTP ${response.status}: ${response.statusText}`);
102
+ }
103
+
104
+ const data = await response.json();
105
+
106
+ if (!data.data || !Array.isArray(data.data)) {
107
+ throw new Error("Invalid response format from llama.cpp server");
108
+ }
109
+
110
+ return data.data.map((model: any) => {
111
+ // llama.cpp doesn't always provide context window info
112
+ const contextWindow = model.context_length || 8192;
113
+ const maxTokens = model.max_tokens || 4096;
114
+
115
+ const llamaModel: Model<any> = {
116
+ id: model.id,
117
+ name: model.id,
118
+ api: "openai-completions" as any,
119
+ provider: "", // Will be set by caller
120
+ baseUrl: `${baseUrl}/v1`,
121
+ reasoning: false,
122
+ input: ["text"],
123
+ cost: {
124
+ input: 0,
125
+ output: 0,
126
+ cacheRead: 0,
127
+ cacheWrite: 0,
128
+ },
129
+ contextWindow: contextWindow,
130
+ maxTokens: maxTokens,
131
+ };
132
+
133
+ return llamaModel;
134
+ });
135
+ } catch (err) {
136
+ console.error("Failed to discover llama.cpp models:", err);
137
+ throw new Error(`llama.cpp discovery failed: ${err instanceof Error ? err.message : String(err)}`);
138
+ }
139
+ }
140
+
141
+ /**
142
+ * Discover models from a vLLM server via OpenAI-compatible /v1/models endpoint.
143
+ * @param baseUrl - Base URL of the vLLM server (e.g., "http://localhost:8000")
144
+ * @param apiKey - Optional API key
145
+ * @returns Array of discovered models
146
+ */
147
+ export async function discoverVLLMModels(baseUrl: string, apiKey?: string): Promise<Model<any>[]> {
148
+ try {
149
+ const headers: HeadersInit = {
150
+ "Content-Type": "application/json",
151
+ };
152
+
153
+ if (apiKey) {
154
+ headers.Authorization = `Bearer ${apiKey}`;
155
+ }
156
+
157
+ const response = await fetch(`${baseUrl}/v1/models`, {
158
+ method: "GET",
159
+ headers,
160
+ });
161
+
162
+ if (!response.ok) {
163
+ throw new Error(`HTTP ${response.status}: ${response.statusText}`);
164
+ }
165
+
166
+ const data = await response.json();
167
+
168
+ if (!data.data || !Array.isArray(data.data)) {
169
+ throw new Error("Invalid response format from vLLM server");
170
+ }
171
+
172
+ return data.data.map((model: any) => {
173
+ // vLLM provides max_model_len which is the context window
174
+ const contextWindow = model.max_model_len || 8192;
175
+ const maxTokens = Math.min(contextWindow, 4096); // Cap max tokens
176
+
177
+ const vllmModel: Model<any> = {
178
+ id: model.id,
179
+ name: model.id,
180
+ api: "openai-completions" as any,
181
+ provider: "", // Will be set by caller
182
+ baseUrl: `${baseUrl}/v1`,
183
+ reasoning: false,
184
+ input: ["text"],
185
+ cost: {
186
+ input: 0,
187
+ output: 0,
188
+ cacheRead: 0,
189
+ cacheWrite: 0,
190
+ },
191
+ contextWindow: contextWindow,
192
+ maxTokens: maxTokens,
193
+ };
194
+
195
+ return vllmModel;
196
+ });
197
+ } catch (err) {
198
+ console.error("Failed to discover vLLM models:", err);
199
+ throw new Error(`vLLM discovery failed: ${err instanceof Error ? err.message : String(err)}`);
200
+ }
201
+ }
202
+
203
+ /**
204
+ * Discover models from an LM Studio server using the LM Studio SDK.
205
+ * @param baseUrl - Base URL of the LM Studio server (e.g., "http://localhost:1234")
206
+ * @param apiKey - Optional API key (unused for LM Studio SDK)
207
+ * @returns Array of discovered models
208
+ */
209
+ export async function discoverLMStudioModels(baseUrl: string, _apiKey?: string): Promise<Model<any>[]> {
210
+ try {
211
+ // Extract host and port from baseUrl
212
+ const url = new URL(baseUrl);
213
+ const port = url.port ? parseInt(url.port, 10) : 1234;
214
+
215
+ // Create LM Studio client
216
+ const client = new LMStudioClient({ baseUrl: `ws://${url.hostname}:${port}` });
217
+
218
+ // List all downloaded models
219
+ const models = await client.system.listDownloadedModels();
220
+
221
+ // Filter to only LLM models and map to our Model format
222
+ return models
223
+ .filter((model) => model.type === "llm")
224
+ .map((model) => {
225
+ const contextWindow = model.maxContextLength;
226
+ // Use 10x context length like Ollama does
227
+ const maxTokens = contextWindow;
228
+
229
+ const lmStudioModel: Model<any> = {
230
+ id: model.path,
231
+ name: model.displayName || model.path,
232
+ api: "openai-completions" as any,
233
+ provider: "", // Will be set by caller
234
+ baseUrl: `${baseUrl}/v1`,
235
+ reasoning: model.trainedForToolUse || false,
236
+ input: model.vision ? ["text", "image"] : ["text"],
237
+ cost: {
238
+ input: 0,
239
+ output: 0,
240
+ cacheRead: 0,
241
+ cacheWrite: 0,
242
+ },
243
+ contextWindow: contextWindow,
244
+ maxTokens: maxTokens,
245
+ };
246
+
247
+ return lmStudioModel;
248
+ });
249
+ } catch (err) {
250
+ console.error("Failed to discover LM Studio models:", err);
251
+ throw new Error(`LM Studio discovery failed: ${err instanceof Error ? err.message : String(err)}`);
252
+ }
253
+ }
254
+
255
+ /**
256
+ * Convenience function to discover models based on provider type.
257
+ * @param type - Provider type
258
+ * @param baseUrl - Base URL of the server
259
+ * @param apiKey - Optional API key
260
+ * @returns Array of discovered models
261
+ */
262
+ export async function discoverModels(
263
+ type: "ollama" | "llama.cpp" | "vllm" | "lmstudio",
264
+ baseUrl: string,
265
+ apiKey?: string,
266
+ ): Promise<Model<any>[]> {
267
+ switch (type) {
268
+ case "ollama":
269
+ return discoverOllamaModels(baseUrl, apiKey);
270
+ case "llama.cpp":
271
+ return discoverLlamaCppModels(baseUrl, apiKey);
272
+ case "vllm":
273
+ return discoverVLLMModels(baseUrl, apiKey);
274
+ case "lmstudio":
275
+ return discoverLMStudioModels(baseUrl, apiKey);
276
+ }
277
+ }
@@ -0,0 +1,134 @@
1
+ import type { Api, Context, Model, SimpleStreamOptions } from "@oh-my-pi/pi-ai";
2
+ import { streamSimple } from "@oh-my-pi/pi-ai";
3
+
4
+ /**
5
+ * Centralized proxy decision logic.
6
+ *
7
+ * Determines whether to use a CORS proxy for LLM API requests based on:
8
+ * - Provider name
9
+ * - API key pattern (for providers where it matters)
10
+ */
11
+
12
+ /**
13
+ * Check if a provider/API key combination requires a CORS proxy.
14
+ *
15
+ * @param provider - Provider name (e.g., "anthropic", "openai", "zai")
16
+ * @param apiKey - API key for the provider
17
+ * @returns true if proxy is required, false otherwise
18
+ */
19
+ export function shouldUseProxyForProvider(provider: string, apiKey: string): boolean {
20
+ switch (provider.toLowerCase()) {
21
+ case "zai":
22
+ // Z-AI always requires proxy
23
+ return true;
24
+
25
+ case "anthropic":
26
+ // Anthropic OAuth tokens (sk-ant-oat-*) require proxy
27
+ // Regular API keys (sk-ant-api-*) do NOT require proxy
28
+ return apiKey.startsWith("sk-ant-oat");
29
+
30
+ // These providers work without proxy
31
+ case "openai":
32
+ case "google":
33
+ case "groq":
34
+ case "openrouter":
35
+ case "cerebras":
36
+ case "xai":
37
+ case "ollama":
38
+ case "lmstudio":
39
+ return false;
40
+
41
+ // Unknown providers - assume no proxy needed
42
+ // This allows new providers to work by default
43
+ default:
44
+ return false;
45
+ }
46
+ }
47
+
48
+ /**
49
+ * Apply CORS proxy to a model's baseUrl if needed.
50
+ *
51
+ * @param model - The model to potentially proxy
52
+ * @param apiKey - API key for the provider
53
+ * @param proxyUrl - CORS proxy URL (e.g., "https://proxy.mariozechner.at/proxy")
54
+ * @returns Model with modified baseUrl if proxy is needed, otherwise original model
55
+ */
56
+ export function applyProxyIfNeeded<T extends Api>(model: Model<T>, apiKey: string, proxyUrl?: string): Model<T> {
57
+ // If no proxy URL configured, return original model
58
+ if (!proxyUrl) {
59
+ return model;
60
+ }
61
+
62
+ // If model has no baseUrl, can't proxy it
63
+ if (!model.baseUrl) {
64
+ return model;
65
+ }
66
+
67
+ // Check if this provider/key needs proxy
68
+ if (!shouldUseProxyForProvider(model.provider, apiKey)) {
69
+ return model;
70
+ }
71
+
72
+ // Apply proxy to baseUrl
73
+ return {
74
+ ...model,
75
+ baseUrl: `${proxyUrl}/?url=${encodeURIComponent(model.baseUrl)}`,
76
+ };
77
+ }
78
+
79
+ /**
80
+ * Check if an error is likely a CORS error.
81
+ *
82
+ * CORS errors in browsers typically manifest as:
83
+ * - TypeError with message "Failed to fetch"
84
+ * - NetworkError
85
+ *
86
+ * @param error - The error to check
87
+ * @returns true if error is likely a CORS error
88
+ */
89
+ export function isCorsError(error: unknown): boolean {
90
+ if (!(error instanceof Error)) {
91
+ return false;
92
+ }
93
+
94
+ // Check for common CORS error patterns
95
+ const message = error.message.toLowerCase();
96
+
97
+ // "Failed to fetch" is the standard CORS error in most browsers
98
+ if (error.name === "TypeError" && message.includes("failed to fetch")) {
99
+ return true;
100
+ }
101
+
102
+ // Some browsers report "NetworkError"
103
+ if (error.name === "NetworkError") {
104
+ return true;
105
+ }
106
+
107
+ // CORS-specific messages
108
+ if (message.includes("cors") || message.includes("cross-origin")) {
109
+ return true;
110
+ }
111
+
112
+ return false;
113
+ }
114
+
115
+ /**
116
+ * Create a streamFn that applies CORS proxy when needed.
117
+ * Reads proxy settings from storage on each call.
118
+ *
119
+ * @param getProxyUrl - Async function to get current proxy URL (or undefined if disabled)
120
+ * @returns A streamFn compatible with Agent's streamFn option
121
+ */
122
+ export function createStreamFn(getProxyUrl: () => Promise<string | undefined>) {
123
+ return async (model: Model<any>, context: Context, options?: SimpleStreamOptions) => {
124
+ const apiKey = options?.apiKey;
125
+ const proxyUrl = await getProxyUrl();
126
+
127
+ if (!apiKey || !proxyUrl) {
128
+ return streamSimple(model, context, options);
129
+ }
130
+
131
+ const proxiedModel = applyProxyIfNeeded(model, apiKey, proxyUrl);
132
+ return streamSimple(proxiedModel, context, options);
133
+ };
134
+ }