sillytavern 1.7.0 → 1.7.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json
CHANGED
|
@@ -173,7 +173,7 @@ function resetTtsPlayback() {
|
|
|
173
173
|
|
|
174
174
|
// Reset audio element
|
|
175
175
|
audioElement.currentTime = 0;
|
|
176
|
-
audioElement.src = '
|
|
176
|
+
audioElement.src = '';
|
|
177
177
|
|
|
178
178
|
// Clear any queue items
|
|
179
179
|
ttsJobQueue.splice(0, ttsJobQueue.length);
|
|
@@ -412,7 +412,6 @@ async function processTtsQueue() {
|
|
|
412
412
|
|
|
413
413
|
// Remove character name from start of the line if power user setting is disabled
|
|
414
414
|
if (char && !power_user.allow_name2_display) {
|
|
415
|
-
debugger;
|
|
416
415
|
const escapedChar = escapeRegex(char);
|
|
417
416
|
text = text.replace(new RegExp(`^${escapedChar}:`, 'gm'), '');
|
|
418
417
|
}
|
|
@@ -704,26 +703,4 @@ $(document).ready(function () {
|
|
|
704
703
|
const wrapper = new ModuleWorkerWrapper(moduleWorker);
|
|
705
704
|
setInterval(wrapper.update.bind(wrapper), UPDATE_INTERVAL) // Init depends on all the things
|
|
706
705
|
eventSource.on(event_types.MESSAGE_SWIPED, resetTtsPlayback);
|
|
707
|
-
|
|
708
|
-
// Mobiles need to "activate" the Audio element with click before it can be played
|
|
709
|
-
if (isMobile()) {
|
|
710
|
-
console.debug('Activating mobile audio element on first click');
|
|
711
|
-
let audioActivated = false;
|
|
712
|
-
|
|
713
|
-
// Play silence on first click
|
|
714
|
-
$(document).on('click touchend', function () {
|
|
715
|
-
// Prevent multiple activations
|
|
716
|
-
if (audioActivated) {
|
|
717
|
-
return;
|
|
718
|
-
}
|
|
719
|
-
|
|
720
|
-
console.debug('Activating audio element...');
|
|
721
|
-
audioActivated = true;
|
|
722
|
-
audioElement.src = '/sounds/silence.mp3';
|
|
723
|
-
// Reset volume to 1
|
|
724
|
-
audioElement.onended = function () {
|
|
725
|
-
console.debug('Audio element activated');
|
|
726
|
-
};
|
|
727
|
-
});
|
|
728
|
-
}
|
|
729
706
|
})
|
|
@@ -213,7 +213,7 @@ function canUseKoboldStopSequence(version) {
|
|
|
213
213
|
}
|
|
214
214
|
|
|
215
215
|
function canUseKoboldStreaming(koboldVersion) {
|
|
216
|
-
if (koboldVersion.result == 'KoboldCpp') {
|
|
216
|
+
if (koboldVersion && koboldVersion.result == 'KoboldCpp') {
|
|
217
217
|
return (koboldVersion.version || '0.0').localeCompare(MIN_STREAMING_KCPPVERSION, undefined, { numeric: true, sensitivity: 'base' }) > -1;
|
|
218
218
|
} else return false;
|
|
219
219
|
}
|
package/public/scripts/openai.js
CHANGED
|
@@ -1490,10 +1490,11 @@ function onModelChange() {
|
|
|
1490
1490
|
}
|
|
1491
1491
|
else {
|
|
1492
1492
|
$('#openai_max_context').attr('max', claude_max);
|
|
1493
|
-
oai_settings.openai_max_context = Math.max(oai_settings.openai_max_context, claude_max);
|
|
1494
|
-
$('#openai_max_context').val(oai_settings.openai_max_context).trigger('input');
|
|
1495
1493
|
}
|
|
1496
1494
|
|
|
1495
|
+
oai_settings.openai_max_context = Math.min(oai_settings.openai_max_context, Number($('#openai_max_context').attr('max')));
|
|
1496
|
+
$('#openai_max_context').val(oai_settings.openai_max_context).trigger('input');
|
|
1497
|
+
|
|
1497
1498
|
$('#openai_reverse_proxy').attr('placeholder', 'https://api.anthropic.com/v1');
|
|
1498
1499
|
|
|
1499
1500
|
oai_settings.temp_openai = Math.min(claude_max_temp, oai_settings.temp_openai);
|
|
@@ -1527,7 +1528,7 @@ function onModelChange() {
|
|
|
1527
1528
|
$('#openai_max_context').attr('max', gpt3_max);
|
|
1528
1529
|
}
|
|
1529
1530
|
|
|
1530
|
-
oai_settings.openai_max_context = Math.
|
|
1531
|
+
oai_settings.openai_max_context = Math.min(Number($('#openai_max_context').attr('max')), oai_settings.openai_max_context);
|
|
1531
1532
|
$('#openai_max_context').val(oai_settings.openai_max_context).trigger('input');
|
|
1532
1533
|
|
|
1533
1534
|
if (value.includes('claude')) {
|
|
@@ -1557,7 +1558,7 @@ function onModelChange() {
|
|
|
1557
1558
|
$('#openai_max_context').attr('max', gpt3_max);
|
|
1558
1559
|
}
|
|
1559
1560
|
|
|
1560
|
-
oai_settings.openai_max_context = Math.
|
|
1561
|
+
oai_settings.openai_max_context = Math.min(oai_settings.openai_max_context, Number($('#openai_max_context').attr('max')));
|
|
1561
1562
|
$('#openai_max_context').val(oai_settings.openai_max_context).trigger('input');
|
|
1562
1563
|
|
|
1563
1564
|
$('#openai_reverse_proxy').attr('placeholder', 'https://api.openai.com/v1');
|
package/public/scripts/poe.js
CHANGED
|
@@ -263,24 +263,25 @@ async function generatePoe(type, finalPrompt, signal) {
|
|
|
263
263
|
}
|
|
264
264
|
|
|
265
265
|
const isQuiet = type === 'quiet';
|
|
266
|
+
const isImpersonate = type === 'impersonate';
|
|
266
267
|
let reply = '';
|
|
267
268
|
|
|
268
269
|
if (max_context > POE_TOKEN_LENGTH && poe_settings.bot !== 'a2_100k') {
|
|
269
270
|
console.debug('Prompt is too long, sending in chunks');
|
|
270
|
-
const result = await sendChunkedMessage(finalPrompt, !isQuiet, signal)
|
|
271
|
+
const result = await sendChunkedMessage(finalPrompt, !isQuiet, !isQuiet && !isImpersonate, signal)
|
|
271
272
|
reply = result.reply;
|
|
272
273
|
messages_to_purge = result.chunks + 1; // +1 for the reply
|
|
273
274
|
}
|
|
274
275
|
else {
|
|
275
276
|
console.debug('Sending prompt in one message');
|
|
276
|
-
reply = await sendMessage(finalPrompt, !isQuiet, !isQuiet, signal);
|
|
277
|
+
reply = await sendMessage(finalPrompt, !isQuiet, !isQuiet && !isImpersonate, signal);
|
|
277
278
|
messages_to_purge = 2; // prompt and the reply
|
|
278
279
|
}
|
|
279
280
|
|
|
280
281
|
return reply;
|
|
281
282
|
}
|
|
282
283
|
|
|
283
|
-
async function sendChunkedMessage(finalPrompt, withStreaming, signal) {
|
|
284
|
+
async function sendChunkedMessage(finalPrompt, withStreaming, withSuggestions, signal) {
|
|
284
285
|
const fastReplyPrompt = '\n[Reply to this message with a full stop only]';
|
|
285
286
|
const promptChunks = splitRecursive(finalPrompt, CHUNKED_PROMPT_LENGTH - fastReplyPrompt.length);
|
|
286
287
|
console.debug(`Splitting prompt into ${promptChunks.length} chunks`, promptChunks);
|
|
@@ -291,7 +292,7 @@ async function sendChunkedMessage(finalPrompt, withStreaming, signal) {
|
|
|
291
292
|
console.debug(`Sending chunk ${i + 1}/${promptChunks.length}: ${promptChunk}`);
|
|
292
293
|
if (i == promptChunks.length - 1) {
|
|
293
294
|
// Extract reply of the last chunk
|
|
294
|
-
reply = await sendMessage(promptChunk, withStreaming,
|
|
295
|
+
reply = await sendMessage(promptChunk, withStreaming, withSuggestions, signal);
|
|
295
296
|
} else {
|
|
296
297
|
// Add fast reply prompt to the chunk
|
|
297
298
|
promptChunk += fastReplyPrompt;
|