llms-py 3.0.0b6__py3-none-any.whl → 3.0.0b7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. llms/__pycache__/main.cpython-314.pyc +0 -0
  2. llms/{ui/modules/analytics.mjs → extensions/analytics/ui/index.mjs} +4 -2
  3. llms/extensions/core_tools/__init__.py +358 -0
  4. llms/extensions/core_tools/__pycache__/__init__.cpython-314.pyc +0 -0
  5. llms/extensions/gallery/__init__.py +61 -0
  6. llms/extensions/gallery/__pycache__/__init__.cpython-314.pyc +0 -0
  7. llms/extensions/gallery/__pycache__/db.cpython-314.pyc +0 -0
  8. llms/extensions/gallery/db.py +298 -0
  9. llms/extensions/gallery/ui/index.mjs +480 -0
  10. llms/extensions/providers/__init__.py +18 -0
  11. llms/extensions/providers/__pycache__/__init__.cpython-314.pyc +0 -0
  12. llms/{providers → extensions/providers}/__pycache__/anthropic.cpython-314.pyc +0 -0
  13. llms/extensions/providers/__pycache__/chutes.cpython-314.pyc +0 -0
  14. llms/extensions/providers/__pycache__/google.cpython-314.pyc +0 -0
  15. llms/{providers → extensions/providers}/__pycache__/nvidia.cpython-314.pyc +0 -0
  16. llms/{providers → extensions/providers}/__pycache__/openai.cpython-314.pyc +0 -0
  17. llms/extensions/providers/__pycache__/openrouter.cpython-314.pyc +0 -0
  18. llms/{providers → extensions/providers}/anthropic.py +1 -4
  19. llms/{providers → extensions/providers}/chutes.py +21 -18
  20. llms/{providers → extensions/providers}/google.py +99 -27
  21. llms/{providers → extensions/providers}/nvidia.py +6 -8
  22. llms/{providers → extensions/providers}/openai.py +3 -6
  23. llms/{providers → extensions/providers}/openrouter.py +12 -10
  24. llms/extensions/system_prompts/__init__.py +45 -0
  25. llms/extensions/system_prompts/__pycache__/__init__.cpython-314.pyc +0 -0
  26. llms/extensions/system_prompts/ui/index.mjs +284 -0
  27. llms/extensions/system_prompts/ui/prompts.json +1067 -0
  28. llms/{ui/modules/tools.mjs → extensions/tools/ui/index.mjs} +4 -2
  29. llms/llms.json +17 -1
  30. llms/main.py +381 -170
  31. llms/providers-extra.json +0 -32
  32. llms/ui/App.mjs +17 -18
  33. llms/ui/ai.mjs +10 -3
  34. llms/ui/app.css +1553 -24
  35. llms/ui/ctx.mjs +70 -12
  36. llms/ui/index.mjs +13 -8
  37. llms/ui/modules/chat/ChatBody.mjs +11 -248
  38. llms/ui/modules/chat/HomeTools.mjs +254 -0
  39. llms/ui/modules/chat/SettingsDialog.mjs +1 -1
  40. llms/ui/modules/chat/index.mjs +278 -174
  41. llms/ui/modules/layout.mjs +2 -26
  42. llms/ui/modules/model-selector.mjs +1 -1
  43. llms/ui/modules/threads/index.mjs +5 -11
  44. llms/ui/modules/threads/threadStore.mjs +56 -2
  45. llms/ui/utils.mjs +21 -3
  46. {llms_py-3.0.0b6.dist-info → llms_py-3.0.0b7.dist-info}/METADATA +1 -1
  47. llms_py-3.0.0b7.dist-info/RECORD +80 -0
  48. llms/providers/__pycache__/chutes.cpython-314.pyc +0 -0
  49. llms/providers/__pycache__/google.cpython-314.pyc +0 -0
  50. llms/providers/__pycache__/openrouter.cpython-314.pyc +0 -0
  51. llms_py-3.0.0b6.dist-info/RECORD +0 -66
  52. {llms_py-3.0.0b6.dist-info → llms_py-3.0.0b7.dist-info}/WHEEL +0 -0
  53. {llms_py-3.0.0b6.dist-info → llms_py-3.0.0b7.dist-info}/entry_points.txt +0 -0
  54. {llms_py-3.0.0b6.dist-info → llms_py-3.0.0b7.dist-info}/licenses/LICENSE +0 -0
  55. {llms_py-3.0.0b6.dist-info → llms_py-3.0.0b7.dist-info}/top_level.txt +0 -0
@@ -1,5 +1,8 @@
1
+ import base64
2
+ import io
1
3
  import json
2
4
  import time
5
+ import wave
3
6
 
4
7
  import aiohttp
5
8
 
@@ -11,7 +14,7 @@ import aiohttp
11
14
  # self.chat_url = "https://generativelanguage.googleapis.com/v1beta/chat/completions"
12
15
 
13
16
 
14
- def install(ctx):
17
+ def install_google(ctx):
15
18
  from llms.main import OpenAiCompatible
16
19
 
17
20
  def gemini_chat_summary(gemini_chat):
@@ -68,6 +71,7 @@ def install(ctx):
68
71
  super().__init__(**new_kwargs)
69
72
  self.safety_settings = kwargs.get("safety_settings")
70
73
  self.thinking_config = kwargs.get("thinking_config")
74
+ self.speech_config = kwargs.get("speech_config")
71
75
  self.tools = kwargs.get("tools")
72
76
  self.curl = kwargs.get("curl")
73
77
  self.headers = kwargs.get("headers", {"Content-Type": "application/json"})
@@ -189,13 +193,15 @@ def install(ctx):
189
193
  gemini_chat["generationConfig"] = generation_config
190
194
 
191
195
  if "tools" in chat:
192
- gemini_chat["tools"] = chat["tools"]
196
+ # gemini_chat["tools"] = chat["tools"]
197
+ ctx.log("Error: tools not supported in Gemini")
193
198
  elif self.tools:
194
- gemini_chat["tools"] = self.tools.copy()
199
+ # gemini_chat["tools"] = self.tools.copy()
200
+ ctx.log("Error: tools not supported in Gemini")
195
201
 
196
202
  if "modalities" in chat:
197
203
  generation_config["responseModalities"] = [modality.upper() for modality in chat["modalities"]]
198
- if "image_config" in chat:
204
+ if "image" in chat["modalities"] and "image_config" in chat:
199
205
  # delete thinkingConfig
200
206
  del generation_config["thinkingConfig"]
201
207
  config_map = {
@@ -205,6 +211,11 @@ def install(ctx):
205
211
  generation_config["imageConfig"] = {
206
212
  config_map[k]: v for k, v in chat["image_config"].items() if k in config_map
207
213
  }
214
+ if "audio" in chat["modalities"] and self.speech_config:
215
+ del generation_config["thinkingConfig"]
216
+ generation_config["speechConfig"] = self.speech_config.copy()
217
+ # Currently Google Audio Models only accept AUDIO
218
+ generation_config["responseModalities"] = ["AUDIO"]
208
219
 
209
220
  started_at = int(time.time() * 1000)
210
221
  gemini_chat_url = f"https://generativelanguage.googleapis.com/v1beta/models/{chat['model']}:generateContent?key={self.api_key}"
@@ -218,13 +229,22 @@ def install(ctx):
218
229
  with open(f"{ctx.MOCK_DIR}/gemini-image.json") as f:
219
230
  obj = json.load(f)
220
231
  else:
221
- async with session.post(
222
- gemini_chat_url,
223
- headers=self.headers,
224
- data=json.dumps(gemini_chat),
225
- timeout=aiohttp.ClientTimeout(total=120),
226
- ) as res:
227
- obj = await self.response_json(res)
232
+ try:
233
+ async with session.post(
234
+ gemini_chat_url,
235
+ headers=self.headers,
236
+ data=json.dumps(gemini_chat),
237
+ timeout=aiohttp.ClientTimeout(total=120),
238
+ ) as res:
239
+ obj = await self.response_json(res)
240
+ except Exception as e:
241
+ ctx.log(f"Error: {res.status} {res.reason}: {e}")
242
+ text = await res.text()
243
+ try:
244
+ obj = json.loads(text)
245
+ except:
246
+ ctx.log(text)
247
+ raise e
228
248
 
229
249
  if "error" in obj:
230
250
  ctx.log(f"Error: {obj['error']}")
@@ -233,6 +253,18 @@ def install(ctx):
233
253
  if ctx.debug:
234
254
  ctx.dbg(json.dumps(gemini_response_summary(obj), indent=2))
235
255
 
256
+ # calculate cost per generation
257
+ cost = None
258
+ token_costs = obj.get("metadata", {}).get("pricing", "")
259
+ if token_costs:
260
+ input_price, output_price = token_costs.split("/")
261
+ input_per_token = float(input_price) / 1000000
262
+ output_per_token = float(output_price) / 1000000
263
+ if "usageMetadata" in obj:
264
+ input_tokens = obj["usageMetadata"].get("promptTokenCount", 0)
265
+ output_tokens = obj["usageMetadata"].get("candidatesTokenCount", 0)
266
+ cost = (input_per_token * input_tokens) + (output_per_token * output_tokens)
267
+
236
268
  response = {
237
269
  "id": f"chatcmpl-{started_at}",
238
270
  "created": started_at,
@@ -248,6 +280,7 @@ def install(ctx):
248
280
  content = ""
249
281
  reasoning = ""
250
282
  images = []
283
+ audios = []
251
284
  if "content" in candidate and "parts" in candidate["content"]:
252
285
  text_parts = []
253
286
  reasoning_parts = []
@@ -260,19 +293,59 @@ def install(ctx):
260
293
  if "inlineData" in part:
261
294
  inline_data = part["inlineData"]
262
295
  mime_type = inline_data.get("mimeType", "image/png")
263
- ext = mime_type.split("/")[1]
264
- base64_data = inline_data["data"]
265
- filename = f"{chat['model'].split('/')[-1]}-{len(images)}.{ext}"
266
- relative_url, info = ctx.save_image_to_cache(base64_data, filename, {})
267
- images.append(
268
- {
269
- "type": "image_url",
270
- "index": len(images),
271
- "image_url": {
272
- "url": relative_url,
273
- },
274
- }
275
- )
296
+ if mime_type.startswith("image"):
297
+ ext = mime_type.split("/")[1]
298
+ base64_data = inline_data["data"]
299
+ filename = f"{chat['model'].split('/')[-1]}-{len(images)}.{ext}"
300
+ ctx.log(f"inlineData {len(base64_data)} {mime_type} {filename}")
301
+ relative_url, info = ctx.save_image_to_cache(
302
+ base64_data,
303
+ filename,
304
+ ctx.to_file_info(chat, {"cost": cost}),
305
+ )
306
+ images.append(
307
+ {
308
+ "type": "image_url",
309
+ "index": len(images),
310
+ "image_url": {
311
+ "url": relative_url,
312
+ },
313
+ }
314
+ )
315
+ elif mime_type.startswith("audio"):
316
+ # mime_type audio/L16;codec=pcm;rate=24000
317
+ base64_data = inline_data["data"]
318
+
319
+ pcm = base64.b64decode(base64_data)
320
+ # Convert PCM to WAV
321
+ wav_io = io.BytesIO()
322
+ with wave.open(wav_io, "wb") as wf:
323
+ wf.setnchannels(1)
324
+ wf.setsampwidth(2)
325
+ wf.setframerate(24000)
326
+ wf.writeframes(pcm)
327
+ wav_data = wav_io.getvalue()
328
+
329
+ ext = mime_type.split("/")[1].split(";")[0]
330
+ pcm_filename = f"{chat['model'].split('/')[-1]}-{len(audios)}.{ext}"
331
+ filename = pcm_filename.replace(f".{ext}", ".wav")
332
+ ctx.log(f"inlineData {len(base64_data)} {mime_type} {filename}")
333
+
334
+ relative_url, info = ctx.save_bytes_to_cache(
335
+ wav_data,
336
+ filename,
337
+ ctx.to_file_info(chat, {"cost": cost}),
338
+ )
339
+
340
+ audios.append(
341
+ {
342
+ "type": "audio_url",
343
+ "index": len(audios),
344
+ "audio_url": {
345
+ "url": relative_url,
346
+ },
347
+ }
348
+ )
276
349
  content = " ".join(text_parts)
277
350
  reasoning = " ".join(reasoning_parts)
278
351
 
@@ -288,6 +361,8 @@ def install(ctx):
288
361
  choice["message"]["reasoning"] = reasoning
289
362
  if len(images) > 0:
290
363
  choice["message"]["images"] = images
364
+ if len(audios) > 0:
365
+ choice["message"]["audios"] = audios
291
366
  choices.append(choice)
292
367
  response["choices"] = choices
293
368
  if "usageMetadata" in obj:
@@ -301,6 +376,3 @@ def install(ctx):
301
376
  return ctx.log_json(self.to_response(response, chat, started_at))
302
377
 
303
378
  ctx.add_provider(GoogleProvider)
304
-
305
-
306
- __install__ = install
@@ -4,7 +4,7 @@ import time
4
4
  import aiohttp
5
5
 
6
6
 
7
- def install(ctx):
7
+ def install_nvidia(ctx):
8
8
  from llms.main import GeneratorBase
9
9
 
10
10
  class NvidiaGenAi(GeneratorBase):
@@ -29,10 +29,11 @@ def install(ctx):
29
29
  last_model = "/" in chat["model"] and chat["model"].split("/")[-1] or chat["model"]
30
30
  filename = f"{last_model}_{seed}.png"
31
31
 
32
- image_info = {
33
- "seed": seed,
34
- }
35
- relative_url, info = ctx.save_image_to_cache(base64, filename, image_info)
32
+ relative_url, info = ctx.save_image_to_cache(
33
+ base64,
34
+ filename,
35
+ ctx.to_file_info(chat, {"seed": seed}),
36
+ )
36
37
  return {
37
38
  "choices": [
38
39
  {
@@ -102,6 +103,3 @@ def install(ctx):
102
103
  return self.to_response(await self.response_json(response), chat, started_at)
103
104
 
104
105
  ctx.add_provider(NvidiaGenAi)
105
-
106
-
107
- __install__ = install
@@ -6,7 +6,7 @@ import time
6
6
  import aiohttp
7
7
 
8
8
 
9
- def install(ctx):
9
+ def install_openai(ctx):
10
10
  from llms.main import GeneratorBase, OpenAiCompatible
11
11
 
12
12
  class OpenAiProvider(OpenAiCompatible):
@@ -83,10 +83,7 @@ def install(ctx):
83
83
  relative_url, info = ctx.save_image_to_cache(
84
84
  image_data,
85
85
  f"{chat['model']}-{i}.{ext}",
86
- {
87
- "model": chat["model"],
88
- "prompt": ctx.last_user_prompt(chat),
89
- },
86
+ ctx.to_file_info(chat),
90
87
  )
91
88
  images.append(
92
89
  {
@@ -156,4 +153,4 @@ def install(ctx):
156
153
  ctx.add_provider(OpenAiGenerator)
157
154
 
158
155
 
159
- __install__ = install
156
+ __install__ = install_openai
@@ -4,7 +4,7 @@ import time
4
4
  import aiohttp
5
5
 
6
6
 
7
- def install(ctx):
7
+ def install_openrouter(ctx):
8
8
  from llms.main import GeneratorBase
9
9
 
10
10
  # https://openrouter.ai/docs/guides/overview/multimodal/image-generation
@@ -16,6 +16,9 @@ def install(ctx):
16
16
 
17
17
  def to_response(self, response, chat, started_at):
18
18
  # go through all image responses and save them to cache
19
+ cost = None
20
+ if "usage" in response and "cost" in response["usage"]:
21
+ cost = response["usage"]["cost"]
19
22
  for choice in response["choices"]:
20
23
  if "message" in choice and "images" in choice["message"]:
21
24
  for image in choice["message"]["images"]:
@@ -29,11 +32,9 @@ def install(ctx):
29
32
  base64_data = parts[1]
30
33
  model = chat["model"].split("/")[-1]
31
34
  filename = f"{model}-{choice['index']}.{ext}"
32
- info = {
33
- "model": model,
34
- "prompt": ctx.last_user_prompt(chat),
35
- }
36
- relative_url, info = ctx.save_image_to_cache(base64_data, filename, info)
35
+ relative_url, info = ctx.save_image_to_cache(
36
+ base64_data, filename, ctx.to_file_info(chat, {"cost": cost})
37
+ )
37
38
  image["image_url"]["url"] = relative_url
38
39
 
39
40
  return response
@@ -50,11 +51,13 @@ def install(ctx):
50
51
  return ctx.log_json(self.to_response(json.loads(text), chat, started_at))
51
52
  else:
52
53
  chat_url = provider.chat_url
54
+ # remove tools
55
+ chat.pop("tools", None)
53
56
  chat = await self.process_chat(chat, provider_id=self.id)
54
57
  ctx.log(f"POST {chat_url}")
55
58
  ctx.log(provider.chat_summary(chat))
56
59
  # remove metadata if any (conflicts with some providers, e.g. Z.ai)
57
- chat.pop("metadata", None)
60
+ metadata = chat.pop("metadata", None)
58
61
 
59
62
  async with aiohttp.ClientSession() as session, session.post(
60
63
  chat_url,
@@ -62,9 +65,8 @@ def install(ctx):
62
65
  data=json.dumps(chat),
63
66
  timeout=aiohttp.ClientTimeout(total=300),
64
67
  ) as response:
68
+ if metadata:
69
+ chat["metadata"] = metadata
65
70
  return ctx.log_json(self.to_response(await self.response_json(response), chat, started_at))
66
71
 
67
72
  ctx.add_provider(OpenRouterGenerator)
68
-
69
-
70
- __install__ = install
@@ -0,0 +1,45 @@
1
+ import json
2
+ import os
3
+
4
+ from aiohttp import web
5
+
6
+ default_prompts = [
7
+ {"name": "Helpful Assistant", "prompt": "You are a helpful assistant."},
8
+ ]
9
+
10
+
11
+ # runs after providers are configured but before server is run
12
+ def install(ctx):
13
+ # helper to get user or default prompts
14
+ def get_user_prompts(request):
15
+ candidate_paths = []
16
+ # check if user is signed in
17
+ username = ctx.get_username(request)
18
+ if username:
19
+ # if signed in (Github OAuth), return the prompts for this user if exists
20
+ candidate_paths.append(os.path.join(ctx.get_user_path(username), "system_prompts", "prompts.json"))
21
+ # return default prompts for all users if exists
22
+ candidate_paths.append(os.path.join(ctx.get_user_path(), "system_prompts", "prompts.json"))
23
+ # otherwise return the default prompts from this repo
24
+ candidate_paths.append(os.path.join(ctx.path, "ui", "prompts.json"))
25
+
26
+ # iterate all candidate paths and when exists return its json
27
+ for path in candidate_paths:
28
+ if os.path.exists(path):
29
+ with open(path, encoding="utf-8") as f:
30
+ txt = f.read()
31
+ return json.loads(txt)
32
+ return default_prompts
33
+
34
+ # API Handler to get prompts
35
+ async def get_prompts(request):
36
+ prompts_json = get_user_prompts(request)
37
+ return web.json_response(prompts_json)
38
+
39
+ ctx.add_get("prompts.json", get_prompts)
40
+
41
+
42
+ # register install extension handler
43
+ __install__ = install
44
+
45
+ __order__ = -10
@@ -0,0 +1,284 @@
1
+ import { ref, computed, inject, watch, onMounted, onUnmounted, nextTick } from "vue"
2
+ import { AppContext } from "ctx.mjs"
3
+
4
+ let ext
5
+
6
+ const PromptFinder = {
7
+ template: `
8
+ <div v-if="modelValue" class="absolute right-0 top-full z-10 mt-1 origin-top-right rounded-md bg-white dark:bg-gray-900 shadow-lg border border-gray-300 dark:border-gray-600 focus:outline-none"
9
+ style="width:400px"
10
+ role="menu" aria-orientation="vertical" aria-labelledby="menu-button" tabindex="-1">
11
+ <div class="p-2" role="none">
12
+ <div class="relative mb-2">
13
+ <div class="pointer-events-none absolute inset-y-0 left-0 flex items-center pl-3">
14
+ <svg class="h-4 w-4 text-gray-400" viewBox="0 0 20 20" fill="currentColor" aria-hidden="true">
15
+ <path fill-rule="evenodd" d="M9 3.5a5.5 5.5 0 100 11 5.5 5.5 0 000-11zM2 9a7 7 0 1112.452 4.391l3.328 3.329a.75.75 0 11-1.06 1.06l-3.329-3.328A7 7 0 012 9z" clip-rule="evenodd" />
16
+ </svg>
17
+ </div>
18
+ <input type="text"
19
+ ref="searchInput"
20
+ v-model="searchQuery"
21
+ @keydown="onKeydown"
22
+ class="block w-full rounded-md border-0 py-1.5 pl-10 text-gray-900 dark:text-gray-100 shadow-sm ring-1 ring-inset ring-gray-300 dark:ring-gray-600 placeholder:text-gray-400 focus:ring-2 focus:ring-inset focus:ring-blue-600 sm:text-xs sm:leading-6 bg-transparent"
23
+ placeholder="Search prompts...">
24
+ </div>
25
+
26
+ <div class="max-h-80 overflow-y-auto" ref="resultsList">
27
+ <div v-if="filteredPrompts.length === 0" class="p-4 text-center text-xs text-gray-500">
28
+ No prompts found
29
+ </div>
30
+ <div v-for="(prompt, index) in filteredPrompts" :key="prompt.id"
31
+ @click="selectPrompt(prompt)"
32
+ :class="['group relative flex gap-x-2 rounded-md p-2 cursor-pointer border-b border-gray-100 dark:border-gray-800 last:border-0',
33
+ selectedIndex === index ? 'bg-blue-50 dark:bg-blue-900/20' : 'hover:bg-gray-50 dark:hover:bg-gray-800']"
34
+ :data-index="index">
35
+ <div class="flex-auto">
36
+ <div class="flex items-center justify-between">
37
+ <h4 :class="['font-semibold text-sm', selectedIndex === index ? 'text-blue-700 dark:text-blue-300' : 'text-gray-900 dark:text-gray-100']">
38
+ {{ prompt.name }}
39
+ </h4>
40
+ </div>
41
+ <p class="text-xs leading-4 text-gray-500 dark:text-gray-400 line-clamp-2 mt-0.5">{{ prompt.value }}</p>
42
+ </div>
43
+ </div>
44
+ </div>
45
+ </div>
46
+ </div>
47
+ `,
48
+ props: {
49
+ modelValue: Boolean, // controls visibility
50
+ prompts: {
51
+ type: Array,
52
+ default: () => []
53
+ }
54
+ },
55
+ emits: ['update:modelValue', 'select'],
56
+ setup(props, { emit }) {
57
+ const searchQuery = ref('')
58
+ const searchInput = ref(null)
59
+ const resultsList = ref(null)
60
+ const selectedIndex = ref(-1)
61
+
62
+ const filteredPrompts = computed(() => {
63
+ if (!searchQuery.value) return props.prompts
64
+ const q = searchQuery.value.toLowerCase()
65
+ return props.prompts.filter(p =>
66
+ p.name.toLowerCase().includes(q) ||
67
+ p.value.toLowerCase().includes(q) ||
68
+ p.id.toLowerCase().includes(q)
69
+ )
70
+ })
71
+
72
+ function selectPrompt(prompt) {
73
+ emit('select', prompt)
74
+ emit('update:modelValue', false)
75
+ }
76
+
77
+ function scrollToSelected() {
78
+ nextTick(() => {
79
+ if (!resultsList.value) return
80
+ const el = resultsList.value.querySelector(`[data-index="${selectedIndex.value}"]`)
81
+ if (el) {
82
+ el.scrollIntoView({ block: 'nearest' })
83
+ }
84
+ })
85
+ }
86
+
87
+ function onKeydown(e) {
88
+ if (filteredPrompts.value.length === 0) return
89
+
90
+ if (e.key === 'ArrowDown') {
91
+ e.preventDefault()
92
+ selectedIndex.value = (selectedIndex.value + 1) % filteredPrompts.value.length
93
+ scrollToSelected()
94
+ } else if (e.key === 'ArrowUp') {
95
+ e.preventDefault()
96
+ selectedIndex.value = (selectedIndex.value - 1 + filteredPrompts.value.length) % filteredPrompts.value.length
97
+ scrollToSelected()
98
+ } else if (e.key === 'Enter') {
99
+ e.preventDefault()
100
+ if (selectedIndex.value >= 0 && selectedIndex.value < filteredPrompts.value.length) {
101
+ selectPrompt(filteredPrompts.value[selectedIndex.value])
102
+ }
103
+ }
104
+ }
105
+
106
+ watch(() => props.modelValue, (isOpen) => {
107
+ if (isOpen) {
108
+ // Focus search input when modal opens
109
+ nextTick(() => {
110
+ if (searchInput.value) {
111
+ searchInput.value.focus()
112
+ }
113
+ })
114
+ selectedIndex.value = -1
115
+ } else {
116
+ searchQuery.value = ''
117
+ }
118
+ })
119
+
120
+ watch(searchQuery, () => {
121
+ selectedIndex.value = 0 // Select first result on search
122
+ })
123
+
124
+ return {
125
+ searchQuery,
126
+ searchInput,
127
+ resultsList,
128
+ filteredPrompts,
129
+ selectedIndex,
130
+ selectPrompt,
131
+ onKeydown
132
+ }
133
+ }
134
+ }
135
+
136
+ const SystemPromptEditor = {
137
+ template: `
138
+ <div class="border-b border-gray-200 dark:border-gray-700 px-6 pb-4">
139
+ <div class="max-w-6xl mx-auto">
140
+ <div class="mt-2 h-10 flex justify-between items-center">
141
+ <label class="select-none block text-sm font-medium text-gray-700 dark:text-gray-300 mb-2">
142
+ System Prompt
143
+ </label>
144
+ <div v-if="hasMessages" class="text-sm text-gray-500 dark:text-gray-400">
145
+ {{ !threadSystemPrompt ? '' : prompts.find(x => x.value === threadSystemPrompt)?.name || 'Custom' }}
146
+ </div>
147
+ <div v-else class="mb-2 relative" ref="containerRef">
148
+ <div class="flex items-center gap-2">
149
+ <span v-if="selected" class="text-sm text-gray-500 dark:text-gray-400">
150
+ {{ selected.name }}
151
+ </span>
152
+ <button v-if="modelValue" type="button" title="Clear System Prompt" @click="$emit('update:modelValue', null)"
153
+ class="rounded-full p-1 hover:bg-gray-200 dark:hover:bg-gray-700 transition-colors">
154
+ <svg class="size-4 text-gray-500 dark:text-gray-400" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path fill="currentColor" d="M19 6.41L17.59 5L12 10.59L6.41 5L5 6.41L10.59 12L5 17.59L6.41 19L12 13.41L17.59 19L19 17.59L13.41 12z"/></svg>
155
+ </button>
156
+ <button type="button"
157
+ @click="showFinder = !showFinder"
158
+ class="inline-flex items-center gap-x-1.5 rounded-md bg-white dark:bg-gray-900 px-2.5 py-1.5 text-sm font-medium text-gray-700 dark:text-gray-300 shadow-sm border border-gray-300 dark:border-gray-600 hover:bg-gray-50 dark:hover:bg-gray-800">
159
+ Explore Prompts
160
+ </button>
161
+ </div>
162
+ <PromptFinder v-model="showFinder" :prompts="prompts" @select="onSelect" />
163
+ </div>
164
+ </div>
165
+ <div v-if="hasMessages" class="w-full rounded-md border border-gray-300 dark:border-gray-600 bg-white dark:bg-gray-900 text-gray-900 dark:text-gray-100 px-3 py-2 text-sm">
166
+ {{threadSystemPrompt || 'No System Prompt was used' }}
167
+ </div>
168
+ <div v-else>
169
+ <textarea
170
+ :value="modelValue" @input="$emit('update:modelValue', $event.target.value)"
171
+ placeholder="Enter a system prompt to guide AI's behavior..."
172
+ rows="6"
173
+ class="block w-full resize-vertical rounded-md border border-gray-300 dark:border-gray-600 bg-white dark:bg-gray-900 text-gray-900 dark:text-gray-100 px-3 py-2 text-sm placeholder-gray-500 dark:placeholder-gray-400 focus:border-blue-500 focus:outline-none focus:ring-1 focus:ring-blue-500"
174
+ ></textarea>
175
+ </div>
176
+ </div>
177
+ </div>
178
+ `,
179
+ emits: ['update:modelValue'],
180
+ props: {
181
+ prompts: Array,
182
+ selected: Object,
183
+ modelValue: String,
184
+ },
185
+ setup(props, { emit }) {
186
+ /**@type {AppContext} */
187
+ const ctx = inject('ctx')
188
+ const containerRef = ref()
189
+ const showFinder = ref(false)
190
+ const prefs = ext.getPrefs()
191
+ const hasMessages = computed(() => ctx.threads.currentThread.value?.messages?.length > 0)
192
+ const threadSystemPrompt = computed(() => ctx.threads.currentThread.value?.systemPrompt || '')
193
+ const selected = computed(() =>
194
+ props.prompts.find(x => x.value === props.modelValue) ?? { name: "Custom", value: props.modelValue })
195
+
196
+ function onSelect(prompt) {
197
+ emit('update:modelValue', prompt.value)
198
+ }
199
+
200
+ function closeFinder(e) {
201
+ if (showFinder.value && containerRef.value && !containerRef.value.contains(e.target)) {
202
+ showFinder.value = false
203
+ }
204
+ }
205
+
206
+ watch(() => props.modelValue, promptValue => {
207
+ prefs.prompt = selected.value
208
+ ext.setPrefs(prefs)
209
+ })
210
+
211
+ onMounted(() => {
212
+ document.addEventListener('click', closeFinder)
213
+ if (prefs.prompt) {
214
+ emit('update:modelValue', prefs.prompt.value)
215
+ }
216
+ })
217
+ onUnmounted(() => {
218
+ document.removeEventListener('click', closeFinder)
219
+ })
220
+
221
+ return {
222
+ threadSystemPrompt,
223
+ hasMessages,
224
+ selected,
225
+ containerRef,
226
+ showFinder,
227
+ onSelect,
228
+ }
229
+ }
230
+ }
231
+
232
+ export default {
233
+ order: 30 - 100,
234
+
235
+ install(ctx) {
236
+ ext = ctx.scope('system_prompts')
237
+ ctx.components({
238
+ PromptFinder,
239
+ SystemPromptEditor,
240
+ SystemPromptsPanel: {
241
+ template: `<SystemPromptEditor :prompts="$state.prompts" v-model="$state.selectedPrompt" />`,
242
+ }
243
+ })
244
+
245
+ ctx.setTopIcons({
246
+ system_prompts: {
247
+ component: {
248
+ template: `<svg @click="$ctx.toggleTop('SystemPromptsPanel')" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path fill="none" stroke="currentColor" stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="m5 7l5 5l-5 5m8 0h6"/></svg>`,
249
+ },
250
+ isActive({ top }) { return top === 'SystemPromptsPanel' }
251
+ }
252
+ })
253
+
254
+ ctx.createThreadFilters.push(thread => {
255
+ const prefs = ext.getPrefs()
256
+ thread.systemPrompt = prefs?.prompt?.value || ""
257
+ console.log('createThreadFilters', prefs, thread)
258
+ })
259
+
260
+ ctx.chatRequestFilters.push(({ request, thread }) => {
261
+
262
+ const hasSystemPrompt = request.messages.find(x => x.role === 'system')
263
+ if (hasSystemPrompt) {
264
+ console.log('Already has system prompt', hasSystemPrompt.content)
265
+ return
266
+ }
267
+
268
+ if (thread.systemPrompt) {
269
+ // add message to start
270
+ request.messages.unshift({
271
+ role: 'system',
272
+ content: thread.systemPrompt
273
+ })
274
+ }
275
+ })
276
+
277
+ ctx.setState({ prompts: [] })
278
+ },
279
+
280
+ async load(ctx) {
281
+ const prompts = await ext.getJson(`/prompts.json`)
282
+ ctx.setState({ prompts })
283
+ }
284
+ }