llms-py 3.0.0b10__py3-none-any.whl → 3.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (67) hide show
  1. llms/{extensions/app/db_manager.py → db.py} +170 -15
  2. llms/extensions/app/__init__.py +95 -39
  3. llms/extensions/app/db.py +16 -124
  4. llms/extensions/app/ui/threadStore.mjs +20 -2
  5. llms/extensions/core_tools/__init__.py +37 -0
  6. llms/extensions/gallery/__init__.py +15 -13
  7. llms/extensions/gallery/db.py +117 -172
  8. llms/extensions/gallery/ui/index.mjs +1 -1
  9. llms/extensions/providers/__init__.py +3 -1
  10. llms/extensions/providers/anthropic.py +7 -3
  11. llms/extensions/providers/cerebras.py +37 -0
  12. llms/extensions/providers/chutes.py +1 -1
  13. llms/extensions/providers/google.py +131 -28
  14. llms/extensions/providers/nvidia.py +2 -2
  15. llms/extensions/providers/openai.py +2 -2
  16. llms/extensions/providers/openrouter.py +4 -2
  17. llms/extensions/system_prompts/ui/index.mjs +21 -26
  18. llms/extensions/system_prompts/ui/prompts.json +5 -5
  19. llms/llms.json +3 -0
  20. llms/main.py +81 -34
  21. llms/providers.json +1 -1
  22. llms/ui/ai.mjs +1 -1
  23. llms/ui/app.css +96 -3
  24. llms/ui/ctx.mjs +24 -1
  25. llms/ui/index.mjs +2 -0
  26. llms/ui/modules/chat/ChatBody.mjs +1 -0
  27. llms/ui/modules/chat/index.mjs +19 -1
  28. llms/ui/modules/icons.mjs +46 -0
  29. llms/ui/modules/layout.mjs +28 -0
  30. llms/ui/modules/model-selector.mjs +0 -40
  31. llms/ui/utils.mjs +9 -1
  32. {llms_py-3.0.0b10.dist-info → llms_py-3.0.2.dist-info}/METADATA +1 -1
  33. {llms_py-3.0.0b10.dist-info → llms_py-3.0.2.dist-info}/RECORD +37 -65
  34. llms/__pycache__/__init__.cpython-312.pyc +0 -0
  35. llms/__pycache__/__init__.cpython-313.pyc +0 -0
  36. llms/__pycache__/__init__.cpython-314.pyc +0 -0
  37. llms/__pycache__/__main__.cpython-312.pyc +0 -0
  38. llms/__pycache__/__main__.cpython-314.pyc +0 -0
  39. llms/__pycache__/llms.cpython-312.pyc +0 -0
  40. llms/__pycache__/main.cpython-312.pyc +0 -0
  41. llms/__pycache__/main.cpython-313.pyc +0 -0
  42. llms/__pycache__/main.cpython-314.pyc +0 -0
  43. llms/__pycache__/plugins.cpython-314.pyc +0 -0
  44. llms/extensions/app/__pycache__/__init__.cpython-314.pyc +0 -0
  45. llms/extensions/app/__pycache__/db.cpython-314.pyc +0 -0
  46. llms/extensions/app/__pycache__/db_manager.cpython-314.pyc +0 -0
  47. llms/extensions/app/requests.json +0 -9073
  48. llms/extensions/app/threads.json +0 -15290
  49. llms/extensions/core_tools/__pycache__/__init__.cpython-314.pyc +0 -0
  50. llms/extensions/core_tools/ui/codemirror/lib/codemirror.css +0 -344
  51. llms/extensions/core_tools/ui/codemirror/lib/codemirror.js +0 -9884
  52. llms/extensions/gallery/__pycache__/__init__.cpython-314.pyc +0 -0
  53. llms/extensions/gallery/__pycache__/db.cpython-314.pyc +0 -0
  54. llms/extensions/katex/__pycache__/__init__.cpython-314.pyc +0 -0
  55. llms/extensions/providers/__pycache__/__init__.cpython-314.pyc +0 -0
  56. llms/extensions/providers/__pycache__/anthropic.cpython-314.pyc +0 -0
  57. llms/extensions/providers/__pycache__/chutes.cpython-314.pyc +0 -0
  58. llms/extensions/providers/__pycache__/google.cpython-314.pyc +0 -0
  59. llms/extensions/providers/__pycache__/nvidia.cpython-314.pyc +0 -0
  60. llms/extensions/providers/__pycache__/openai.cpython-314.pyc +0 -0
  61. llms/extensions/providers/__pycache__/openrouter.cpython-314.pyc +0 -0
  62. llms/extensions/system_prompts/__pycache__/__init__.cpython-314.pyc +0 -0
  63. llms/extensions/tools/__pycache__/__init__.cpython-314.pyc +0 -0
  64. {llms_py-3.0.0b10.dist-info → llms_py-3.0.2.dist-info}/WHEEL +0 -0
  65. {llms_py-3.0.0b10.dist-info → llms_py-3.0.2.dist-info}/entry_points.txt +0 -0
  66. {llms_py-3.0.0b10.dist-info → llms_py-3.0.2.dist-info}/licenses/LICENSE +0 -0
  67. {llms_py-3.0.0b10.dist-info → llms_py-3.0.2.dist-info}/top_level.txt +0 -0
@@ -79,16 +79,61 @@ def install_google(ctx):
79
79
  if "Authorization" in self.headers:
80
80
  del self.headers["Authorization"]
81
81
 
82
- async def chat(self, chat):
82
+ def provider_model(self, model):
83
+ if model.lower().startswith("gemini-"):
84
+ return model
85
+ return super().provider_model(model)
86
+
87
+ def model_info(self, model):
88
+ info = super().model_info(model)
89
+ if info:
90
+ return info
91
+ if model.lower().startswith("gemini-"):
92
+ return {
93
+ "id": model,
94
+ "name": model,
95
+ "cost": {"input": 0, "output": 0},
96
+ }
97
+ return None
98
+
99
+ async def chat(self, chat, context=None):
83
100
  chat["model"] = self.provider_model(chat["model"]) or chat["model"]
101
+ model_info = (context.get("modelInfo") if context is not None else None) or self.model_info(chat["model"])
84
102
 
85
103
  chat = await self.process_chat(chat)
86
104
  generation_config = {}
105
+ tools = None
106
+ supports_tool_calls = model_info.get("tool_call", False)
107
+
108
+ if "tools" in chat and supports_tool_calls:
109
+ function_declarations = []
110
+ gemini_tools = {}
111
+
112
+ for tool in chat["tools"]:
113
+ if tool["type"] == "function":
114
+ f = tool["function"]
115
+ function_declarations.append(
116
+ {
117
+ "name": f["name"],
118
+ "description": f.get("description"),
119
+ "parameters": f.get("parameters"),
120
+ }
121
+ )
122
+ elif tool["type"] == "file_search":
123
+ gemini_tools["file_search"] = tool["file_search"]
124
+
125
+ if function_declarations:
126
+ gemini_tools["function_declarations"] = function_declarations
127
+
128
+ tools = [gemini_tools] if gemini_tools else None
87
129
 
88
130
  # Filter out system messages and convert to proper Gemini format
89
131
  contents = []
90
132
  system_prompt = None
91
133
 
134
+ # Track tool call IDs to names for response mapping
135
+ tool_id_map = {}
136
+
92
137
  async with aiohttp.ClientSession() as session:
93
138
  for message in chat["messages"]:
94
139
  if message["role"] == "system":
@@ -101,8 +146,55 @@ def install_google(ctx):
101
146
  elif isinstance(content, str):
102
147
  system_prompt = content
103
148
  elif "content" in message:
149
+ role = "user"
150
+ if "role" in message:
151
+ if message["role"] == "user":
152
+ role = "user"
153
+ elif message["role"] == "assistant":
154
+ role = "model"
155
+ elif message["role"] == "tool":
156
+ role = "function"
157
+
158
+ parts = []
159
+
160
+ # Handle tool calls in assistant messages
161
+ if message.get("role") == "assistant" and "tool_calls" in message:
162
+ for tool_call in message["tool_calls"]:
163
+ tool_id_map[tool_call["id"]] = tool_call["function"]["name"]
164
+ parts.append(
165
+ {
166
+ "functionCall": {
167
+ "name": tool_call["function"]["name"],
168
+ "args": json.loads(tool_call["function"]["arguments"]),
169
+ }
170
+ }
171
+ )
172
+
173
+ # Handle tool responses from user
174
+ if message.get("role") == "tool":
175
+ # Gemini expects function response in 'functionResponse' part
176
+ # We need to find the name associated with this tool_call_id
177
+ tool_call_id = message.get("tool_call_id")
178
+ name = tool_id_map.get(tool_call_id)
179
+ # If we can't find the name (maybe from previous turn not in history or restart),
180
+ # we might have an issue. But let's try to proceed.
181
+ # Fallback: if we can't find the name, skip or try to infer?
182
+ # Gemini strict validation requires the name.
183
+ if name:
184
+ # content is the string response
185
+ # Some implementations pass the content directly.
186
+ # Google docs say: response: { "name": "...", "content": { ... } }
187
+ # Actually "response" field in functionResponse is a Struct/Map.
188
+ parts.append(
189
+ {
190
+ "functionResponse": {
191
+ "name": name,
192
+ "response": {"name": name, "content": message["content"]},
193
+ }
194
+ }
195
+ )
196
+
104
197
  if isinstance(message["content"], list):
105
- parts = []
106
198
  for item in message["content"]:
107
199
  if "type" in item:
108
200
  if item["type"] == "image_url" and "image_url" in item:
@@ -142,23 +234,14 @@ def install_google(ctx):
142
234
  if "text" in item:
143
235
  text = item["text"]
144
236
  parts.append({"text": text})
145
- if len(parts) > 0:
146
- contents.append(
147
- {
148
- "role": message["role"]
149
- if "role" in message and message["role"] == "user"
150
- else "model",
151
- "parts": parts,
152
- }
153
- )
154
- else:
155
- content = message["content"]
237
+ elif message["content"]: # String content
238
+ parts.append({"text": message["content"]})
239
+
240
+ if len(parts) > 0:
156
241
  contents.append(
157
242
  {
158
- "role": message["role"]
159
- if "role" in message and message["role"] == "user"
160
- else "model",
161
- "parts": [{"text": content}],
243
+ "role": role,
244
+ "parts": parts,
162
245
  }
163
246
  )
164
247
 
@@ -166,6 +249,9 @@ def install_google(ctx):
166
249
  "contents": contents,
167
250
  }
168
251
 
252
+ if tools:
253
+ gemini_chat["tools"] = tools
254
+
169
255
  if self.safety_settings:
170
256
  gemini_chat["safetySettings"] = self.safety_settings
171
257
 
@@ -192,18 +278,12 @@ def install_google(ctx):
192
278
  if len(generation_config) > 0:
193
279
  gemini_chat["generationConfig"] = generation_config
194
280
 
195
- if "tools" in chat:
196
- # gemini_chat["tools"] = chat["tools"]
197
- ctx.log("Error: tools not supported in Gemini")
198
- elif self.tools:
199
- # gemini_chat["tools"] = self.tools.copy()
200
- ctx.log("Error: tools not supported in Gemini")
201
-
202
281
  if "modalities" in chat:
203
282
  generation_config["responseModalities"] = [modality.upper() for modality in chat["modalities"]]
204
283
  if "image" in chat["modalities"] and "image_config" in chat:
205
284
  # delete thinkingConfig
206
- del generation_config["thinkingConfig"]
285
+ if "thinkingConfig" in generation_config:
286
+ del generation_config["thinkingConfig"]
207
287
  config_map = {
208
288
  "aspect_ratio": "aspectRatio",
209
289
  "image_size": "imageSize",
@@ -212,11 +292,16 @@ def install_google(ctx):
212
292
  config_map[k]: v for k, v in chat["image_config"].items() if k in config_map
213
293
  }
214
294
  if "audio" in chat["modalities"] and self.speech_config:
215
- del generation_config["thinkingConfig"]
295
+ if "thinkingConfig" in generation_config:
296
+ del generation_config["thinkingConfig"]
216
297
  generation_config["speechConfig"] = self.speech_config.copy()
217
298
  # Currently Google Audio Models only accept AUDIO
218
299
  generation_config["responseModalities"] = ["AUDIO"]
219
300
 
301
+ # Ensure generationConfig is set if we added anything to it
302
+ if len(generation_config) > 0:
303
+ gemini_chat["generationConfig"] = generation_config
304
+
220
305
  started_at = int(time.time() * 1000)
221
306
  gemini_chat_url = f"https://generativelanguage.googleapis.com/v1beta/models/{chat['model']}:generateContent?key={self.api_key}"
222
307
 
@@ -237,6 +322,8 @@ def install_google(ctx):
237
322
  timeout=aiohttp.ClientTimeout(total=120),
238
323
  ) as res:
239
324
  obj = await self.response_json(res)
325
+ if context is not None:
326
+ context["providerResponse"] = obj
240
327
  except Exception as e:
241
328
  ctx.log(f"Error: {res.status} {res.reason}: {e}")
242
329
  text = await res.text()
@@ -271,7 +358,7 @@ def install_google(ctx):
271
358
  "model": obj.get("modelVersion", chat["model"]),
272
359
  }
273
360
  choices = []
274
- for i, candidate in enumerate(obj["candidates"]):
361
+ for i, candidate in enumerate(obj.get("candidates", [])):
275
362
  role = "assistant"
276
363
  if "content" in candidate and "role" in candidate["content"]:
277
364
  role = "assistant" if candidate["content"]["role"] == "model" else candidate["content"]["role"]
@@ -281,6 +368,8 @@ def install_google(ctx):
281
368
  reasoning = ""
282
369
  images = []
283
370
  audios = []
371
+ tool_calls = []
372
+
284
373
  if "content" in candidate and "parts" in candidate["content"]:
285
374
  text_parts = []
286
375
  reasoning_parts = []
@@ -290,6 +379,16 @@ def install_google(ctx):
290
379
  reasoning_parts.append(part["text"])
291
380
  else:
292
381
  text_parts.append(part["text"])
382
+ if "functionCall" in part:
383
+ fc = part["functionCall"]
384
+ tool_calls.append(
385
+ {
386
+ "id": f"call_{len(tool_calls)}_{int(time.time())}", # Gemini doesn't return ID, generate one
387
+ "type": "function",
388
+ "function": {"name": fc["name"], "arguments": json.dumps(fc["args"])},
389
+ }
390
+ )
391
+
293
392
  if "inlineData" in part:
294
393
  inline_data = part["inlineData"]
295
394
  mime_type = inline_data.get("mimeType", "image/png")
@@ -354,7 +453,7 @@ def install_google(ctx):
354
453
  "finish_reason": candidate.get("finishReason", "stop"),
355
454
  "message": {
356
455
  "role": role,
357
- "content": content,
456
+ "content": content if content else None,
358
457
  },
359
458
  }
360
459
  if reasoning:
@@ -363,6 +462,10 @@ def install_google(ctx):
363
462
  choice["message"]["images"] = images
364
463
  if len(audios) > 0:
365
464
  choice["message"]["audios"] = audios
465
+ if len(tool_calls) > 0:
466
+ choice["message"]["tool_calls"] = tool_calls
467
+ # If we have tool calls, content can be null but message should probably exist
468
+
366
469
  choices.append(choice)
367
470
  response["choices"] = choices
368
471
  if "usageMetadata" in obj:
@@ -54,7 +54,7 @@ def install_nvidia(ctx):
54
54
  }
55
55
  raise Exception("No artifacts in response")
56
56
 
57
- async def chat(self, chat, provider=None):
57
+ async def chat(self, chat, provider=None, context=None):
58
58
  headers = self.get_headers(provider, chat)
59
59
  if provider is not None:
60
60
  chat["model"] = provider.provider_model(chat["model"]) or chat["model"]
@@ -100,6 +100,6 @@ def install_nvidia(ctx):
100
100
  data=json.dumps(gen_request),
101
101
  timeout=aiohttp.ClientTimeout(total=120),
102
102
  ) as response:
103
- return self.to_response(await self.response_json(response), chat, started_at)
103
+ return self.to_response(await self.response_json(response), chat, started_at, context=context)
104
104
 
105
105
  ctx.add_provider(NvidiaGenAi)
@@ -113,7 +113,7 @@ def install_openai(ctx):
113
113
  ctx.log(json.dumps(response, indent=2))
114
114
  raise Exception("No 'data' field in response.")
115
115
 
116
- async def chat(self, chat, provider=None):
116
+ async def chat(self, chat, provider=None, context=None):
117
117
  headers = self.get_headers(provider, chat)
118
118
 
119
119
  if chat["model"] in self.map_image_models:
@@ -145,7 +145,7 @@ def install_openai(ctx):
145
145
  text = await response.text()
146
146
  ctx.log(text[:1024] + (len(text) > 1024 and "..." or ""))
147
147
  if response.status < 300:
148
- return ctx.log_json(await self.to_response(json.loads(text), chat, started_at))
148
+ return ctx.log_json(await self.to_response(json.loads(text), chat, started_at, context=context))
149
149
  else:
150
150
  raise Exception(f"Failed to generate image {response.status}")
151
151
 
@@ -39,7 +39,7 @@ def install_openrouter(ctx):
39
39
 
40
40
  return response
41
41
 
42
- async def chat(self, chat, provider=None):
42
+ async def chat(self, chat, provider=None, context=None):
43
43
  headers = self.get_headers(provider, chat)
44
44
  if provider is not None:
45
45
  chat["model"] = provider.provider_model(chat["model"]) or chat["model"]
@@ -67,6 +67,8 @@ def install_openrouter(ctx):
67
67
  ) as response:
68
68
  if metadata:
69
69
  chat["metadata"] = metadata
70
- return ctx.log_json(self.to_response(await self.response_json(response), chat, started_at))
70
+ return ctx.log_json(
71
+ self.to_response(await self.response_json(response), chat, started_at, context=context)
72
+ )
71
73
 
72
74
  ctx.add_provider(OpenRouterGenerator)
@@ -142,7 +142,7 @@ const SystemPromptEditor = {
142
142
  System Prompt
143
143
  </label>
144
144
  <div v-if="hasMessages" class="text-sm text-gray-500 dark:text-gray-400">
145
- {{ !threadSystemPrompt ? '' : prompts.find(x => x.value === threadSystemPrompt)?.name || 'Custom' }}
145
+ {{ !ext.prefs.systemPrompt ? '' : prompts.find(x => x.value === ext.prefs.systemPrompt)?.name || 'Custom' }}
146
146
  </div>
147
147
  <div v-else class="mb-2 relative" ref="containerRef">
148
148
  <div class="flex items-center gap-2">
@@ -154,16 +154,16 @@ const SystemPromptEditor = {
154
154
  <svg class="size-4 text-gray-500 dark:text-gray-400" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24"><path fill="currentColor" d="M19 6.41L17.59 5L12 10.59L6.41 5L5 6.41L10.59 12L5 17.59L6.41 19L12 13.41L17.59 19L19 17.59L13.41 12z"/></svg>
155
155
  </button>
156
156
  <button type="button"
157
- @click="showFinder = !showFinder"
157
+ @click="ext.setPrefs({ showFinder: !ext.prefs.showFinder })"
158
158
  class="inline-flex items-center gap-x-1.5 rounded-md bg-white dark:bg-gray-900 px-2.5 py-1.5 text-sm font-medium text-gray-700 dark:text-gray-300 shadow-sm border border-gray-300 dark:border-gray-600 hover:bg-gray-50 dark:hover:bg-gray-800">
159
159
  Explore Prompts
160
160
  </button>
161
161
  </div>
162
- <PromptFinder v-model="showFinder" :prompts="prompts" @select="onSelect" />
162
+ <PromptFinder v-model="ext.prefs.showFinder" :prompts="prompts" @select="onSelect" />
163
163
  </div>
164
164
  </div>
165
165
  <div v-if="hasMessages" class="w-full rounded-md border border-gray-300 dark:border-gray-600 bg-white dark:bg-gray-900 text-gray-900 dark:text-gray-100 px-3 py-2 text-sm">
166
- {{threadSystemPrompt || 'No System Prompt was used' }}
166
+ {{$threads.currentThread.value?.systemPrompt || 'No System Prompt was used' }}
167
167
  </div>
168
168
  <div v-else>
169
169
  <textarea
@@ -186,32 +186,29 @@ const SystemPromptEditor = {
186
186
  /**@type {AppContext} */
187
187
  const ctx = inject('ctx')
188
188
  const containerRef = ref()
189
- const showFinder = ref(false)
190
- const prefs = ext.getPrefs()
191
189
  const hasMessages = computed(() => ctx.threads.currentThread.value?.messages?.length > 0)
192
- const threadSystemPrompt = computed(() => ctx.threads.currentThread.value?.systemPrompt || '')
193
190
  const selected = computed(() =>
194
191
  props.prompts.find(x => x.value === props.modelValue) ?? { name: "Custom", value: props.modelValue })
195
192
 
196
193
  function onSelect(prompt) {
194
+ ext.setPrefs({ prompt: prompt }) // {"id","name","value"}
197
195
  emit('update:modelValue', prompt.value)
198
196
  }
199
197
 
200
198
  function closeFinder(e) {
201
- if (showFinder.value && containerRef.value && !containerRef.value.contains(e.target)) {
202
- showFinder.value = false
199
+ if (ext.prefs.showFinder && containerRef.value && !containerRef.value.contains(e.target)) {
200
+ ext.setPrefs({ showFinder: false })
203
201
  }
204
202
  }
205
203
 
206
- watch(() => props.modelValue, promptValue => {
207
- prefs.prompt = selected.value
208
- ext.setPrefs(prefs)
204
+ watch(() => props.modelValue, systemPrompt => {
205
+ ext.setPrefs({ systemPrompt })
209
206
  })
210
207
 
211
208
  onMounted(() => {
212
209
  document.addEventListener('click', closeFinder)
213
- if (prefs.prompt) {
214
- emit('update:modelValue', prefs.prompt.value)
210
+ if (ext.prefs.prompt) {
211
+ emit('update:modelValue', ext.prefs.prompt)
215
212
  }
216
213
  })
217
214
  onUnmounted(() => {
@@ -219,11 +216,10 @@ const SystemPromptEditor = {
219
216
  })
220
217
 
221
218
  return {
222
- threadSystemPrompt,
219
+ ext,
223
220
  hasMessages,
224
221
  selected,
225
222
  containerRef,
226
- showFinder,
227
223
  onSelect,
228
224
  }
229
225
  }
@@ -238,9 +234,13 @@ export default {
238
234
  PromptFinder,
239
235
  SystemPromptEditor,
240
236
  SystemPromptsPanel: {
241
- template: `<SystemPromptEditor :prompts="$state.prompts" v-model="$state.selectedPrompt" />`,
237
+ template: `<SystemPromptEditor :prompts="ext.state.prompts" v-model="ext.prefs.prompt" />`,
238
+ setup() {
239
+ return { ext }
240
+ }
242
241
  }
243
242
  })
243
+ ext.setPrefs({ systemPrompt: '' })
244
244
 
245
245
  ctx.setTopIcons({
246
246
  system_prompts: {
@@ -251,12 +251,6 @@ export default {
251
251
  }
252
252
  })
253
253
 
254
- ctx.createThreadFilters.push(thread => {
255
- const prefs = ext.getPrefs()
256
- thread.systemPrompt = prefs?.prompt?.value || ""
257
- console.log('createThreadFilters', prefs, thread)
258
- })
259
-
260
254
  ctx.chatRequestFilters.push(({ request, thread }) => {
261
255
 
262
256
  const hasSystemPrompt = request.messages.find(x => x.role === 'system')
@@ -265,11 +259,12 @@ export default {
265
259
  return
266
260
  }
267
261
 
268
- if (thread.systemPrompt) {
262
+ // Only add the selected system prompt for new requests
263
+ if (ext.prefs.systemPrompt && request.messages.length <= 1) {
269
264
  // add message to start
270
265
  request.messages.unshift({
271
266
  role: 'system',
272
- content: thread.systemPrompt
267
+ content: ext.prefs.systemPrompt
273
268
  })
274
269
  }
275
270
  })
@@ -280,6 +275,6 @@ export default {
280
275
  async load(ctx) {
281
276
  const api = await ext.getJson(`/prompts.json`)
282
277
  const prompts = api.response || []
283
- ctx.setState({ prompts })
278
+ ext.setState({ prompts })
284
279
  }
285
280
  }
@@ -645,7 +645,7 @@
645
645
  "value": "I want you to act as an biblical translator. I will speak to you in english and you will translate it and answer in the corrected and improved version of my text, in a biblical dialect. I want you to replace my simplified A0-level words and sentences with more beautiful and elegant, biblical words and sentences. Keep the meaning same. I want you to only reply the correction, the improvements and nothing else, do not write explanations. My first sentence is \"Hello, World!\""
646
646
  },
647
647
  {
648
- "id": "chess-player",
648
+ "id": "chess-player2",
649
649
  "name": "Act as an Chess Player",
650
650
  "value": "I want you to act as a rival chess player. I We will say our moves in reciprocal order. In the beginning I will be white. Also please don't explain your moves to me because we are rivals. After my first message i will just write my move. Don't forget to update the state of the board in your mind as we make moves. My first move is e4."
651
651
  },
@@ -740,7 +740,7 @@
740
740
  "value": "I want you to act as a Graphviz DOT generator, an expert to create meaningful diagrams. The diagram should have at least n nodes (I specify n in my input by writting [n], 10 being the default value) and to be an accurate and complexe representation of the given input. Each node is indexed by a number to reduce the size of the output, should not include any styling, and with layout=neato, overlap=false, node [shape=rectangle] as parameters. The code should be valid, bugless and returned on a single line, without any explanation. Provide a clear and organized diagram, the relationships between the nodes have to make sense for an expert of that input. My first diagram is: \"The water cycle [8]\"."
741
741
  },
742
742
  {
743
- "id": "life-coach",
743
+ "id": "life-coach2",
744
744
  "name": "Act as a Life Coach",
745
745
  "value": "I want you to act as a Life Coach. Please summarize this non-fiction book, [title] by [author]. Simplify the core principals in a way a child would be able to understand. Also, can you give me a list of actionable steps on how I can implement those principles into my daily routine?"
746
746
  },
@@ -840,7 +840,7 @@
840
840
  "value": "I want you to act as a Japanese Kanji quiz machine. Each time I ask you for the next question, you are to provide one random Japanese kanji from JLPT N5 kanji list and ask for its meaning. You will generate four options, one correct, three wrong. The options will be labeled from A to D. I will reply to you with one letter, corresponding to one of these labels. You will evaluate my each answer based on your last question and tell me if I chose the right option. If I chose the right label, you will congratulate me. Otherwise you will tell me the right answer. Then you will ask me the next question."
841
841
  },
842
842
  {
843
- "id": "note-taking-assistant",
843
+ "id": "note-taking-assistant2",
844
844
  "name": "Act as a note-taking assistant",
845
845
  "value": "I want you to act as a note-taking assistant for a lecture. Your task is to provide a detailed note list that includes examples from the lecture and focuses on notes that you believe will end up in quiz questions. Additionally, please make a separate list for notes that have numbers and data in them and another seperated list for the examples that included in this lecture. The notes should be concise and easy to read."
846
846
  },
@@ -975,7 +975,7 @@
975
975
  "value": "You are the \"Architect Guide,\" specialized in assisting programmers who are experienced in individual module development but are looking to enhance their skills in understanding and managing entire project architectures. Your primary roles and methods of guidance include:\n\n- **Basics of Project Architecture**: Start with foundational knowledge, focusing on principles and practices of inter-module communication and standardization in modular coding. - **Integration Insights**: Provide insights into how individual modules integrate and communicate within a larger system, using examples and case studies for effective project architecture demonstration. - **Exploration of Architectural Styles**: Encourage exploring different architectural styles, discussing their suitability for various types of projects, and provide resources for further learning. - **Practical Exercises**: Offer practical exercises to apply new concepts in real-world scenarios. - **Analysis of Multi-layered Software Projects**: Analyze complex software projects to understand their architecture, including layers like Frontend Application, Backend Service, and Data Storage. - **Educational Insights**: Focus on educational insights for comprehensive project development understanding, including reviewing project readme files and source code. - **Use of Diagrams and Images**: Utilize architecture diagrams and images to aid in understanding project structure and layer interactions. - **Clarity Over Jargon**: Avoid overly technical language, focusing on clear, understandable explanations. - **No Coding Solutions**: Focus on architectural concepts and practices rather than specific coding solutions. - **Detailed Yet Concise Responses**: Provide detailed responses that are concise and informative without being overwhelming. - **Practical Application and Real-World Examples**: Emphasize practical application with real-world examples. - **Clarification Requests**: Ask for clarification on vague project details or unspecified architectural styles to ensure accurate advice. - **Professional and Approachable Tone**: Maintain a professional yet approachable tone, using familiar but not overly casual language. - **Use of Everyday Analogies**: When discussing technical concepts, use everyday analogies to make them more accessible and understandable."
976
976
  },
977
977
  {
978
- "id": "chatgpt-prompt-generator",
978
+ "id": "chatgpt-prompt-generator2",
979
979
  "name": "Act as ChatGPT Prompt Generator",
980
980
  "value": "Let's refine the process of creating high-quality prompts together. Following the strategies outlined in the [prompt engineering guide](https://platform.openai.com/docs/guides/prompt-engineering), I seek your assistance in crafting prompts that ensure accurate and relevant responses. Here's how we can proceed:\n\n1. **Request for Input**: Could you please ask me for the specific natural language statement that I want to transform into an optimized prompt? 2. **Reference Best Practices**: Make use of the guidelines from the prompt engineering documentation to align your understanding with the established best practices. 3. **Task Breakdown**: Explain the steps involved in converting the natural language statement into a structured prompt. 4. **Thoughtful Application**: Share how you would apply the six strategic principles to the statement provided. 5. **Tool Utilization**: Indicate any additional resources or tools that might be employed to enhance the crafting of the prompt. 6. **Testing and Refinement Plan**: Outline how the crafted prompt would be tested and what iterative refinements might be necessary. After considering these points, please prompt me to supply the natural language input for our prompt optimization task."
981
981
  },
@@ -1060,7 +1060,7 @@
1060
1060
  "value": "Using WebPilot, create an outline for an article that will be 2,000 words on the keyword “Best SEO Prompts” based on the top 10 results from Google. Include every relevant heading possible. Keep the keyword density of the headings high.<br/> For each section of the outline, include the word count.<br/> Include FAQs section in the outline too, based on people also ask section from Google for the keyword.<br/> This outline must be very detailed and comprehensive, so that I can create a 2,000 word article from it. Generate a long list of LSI and NLP keywords related to my keyword. Also include any other words related to the keyword.<br/> Give me a list of 3 relevant external links to include and the recommended anchor text. Make sure they’re not competing articles.<br/> Split the outline into part 1 and part 2."
1061
1061
  },
1062
1062
  {
1063
- "id": "linkedin-ghostwriter",
1063
+ "id": "linkedin-ghostwriter2",
1064
1064
  "name": "Act as Linkedin Ghostwriter",
1065
1065
  "value": "Act as an Expert Technical Architecture in Mobile, having more then 20 years of expertise in mobile technologies and development of various domain with cloud and native architecting design. Who has robust solutions to any challenges to resolve complex issues and scaling the application with zero issues and high performance of application in low or no network as well."
1066
1066
  }
llms/llms.json CHANGED
@@ -145,6 +145,9 @@
145
145
  "groq": {
146
146
  "enabled": true
147
147
  },
148
+ "cerebras": {
149
+ "enabled": true
150
+ },
148
151
  "codestral": {
149
152
  "enabled": true,
150
153
  "id": "codestral",