webscout 7.4__py3-none-any.whl → 7.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +5 -53
- webscout/AIutel.py +8 -318
- webscout/DWEBS.py +460 -489
- webscout/Extra/YTToolkit/YTdownloader.py +14 -53
- webscout/Extra/YTToolkit/transcriber.py +12 -13
- webscout/Extra/YTToolkit/ytapi/video.py +0 -1
- webscout/Extra/__init__.py +0 -1
- webscout/Extra/autocoder/autocoder_utiles.py +0 -4
- webscout/Extra/autocoder/rawdog.py +13 -41
- webscout/Extra/gguf.py +652 -428
- webscout/Extra/weather.py +178 -156
- webscout/Extra/weather_ascii.py +70 -17
- webscout/Litlogger/core/logger.py +1 -2
- webscout/Litlogger/handlers/file.py +1 -1
- webscout/Litlogger/styles/formats.py +0 -2
- webscout/Litlogger/utils/detectors.py +0 -1
- webscout/Provider/AISEARCH/DeepFind.py +0 -1
- webscout/Provider/AISEARCH/ISou.py +1 -1
- webscout/Provider/AISEARCH/felo_search.py +0 -1
- webscout/Provider/AllenAI.py +24 -9
- webscout/Provider/C4ai.py +432 -0
- webscout/Provider/ChatGPTGratis.py +24 -56
- webscout/Provider/Cloudflare.py +18 -21
- webscout/Provider/DeepSeek.py +27 -48
- webscout/Provider/Deepinfra.py +129 -53
- webscout/Provider/Gemini.py +1 -1
- webscout/Provider/GithubChat.py +362 -0
- webscout/Provider/Glider.py +25 -8
- webscout/Provider/HF_space/qwen_qwen2.py +2 -2
- webscout/Provider/HeckAI.py +38 -5
- webscout/Provider/HuggingFaceChat.py +462 -0
- webscout/Provider/Jadve.py +20 -5
- webscout/Provider/Marcus.py +7 -50
- webscout/Provider/Netwrck.py +43 -67
- webscout/Provider/PI.py +4 -2
- webscout/Provider/Perplexitylabs.py +26 -6
- webscout/Provider/Phind.py +29 -3
- webscout/Provider/PizzaGPT.py +10 -51
- webscout/Provider/TTI/AiForce/async_aiforce.py +4 -37
- webscout/Provider/TTI/AiForce/sync_aiforce.py +41 -38
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +9 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +206 -206
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +192 -192
- webscout/Provider/TTI/MagicStudio/__init__.py +2 -0
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +111 -0
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +109 -0
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +5 -24
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +2 -22
- webscout/Provider/TTI/__init__.py +2 -3
- webscout/Provider/TTI/aiarta/__init__.py +2 -0
- webscout/Provider/TTI/aiarta/async_aiarta.py +482 -0
- webscout/Provider/TTI/aiarta/sync_aiarta.py +440 -0
- webscout/Provider/TTI/fastflux/__init__.py +22 -0
- webscout/Provider/TTI/fastflux/async_fastflux.py +257 -0
- webscout/Provider/TTI/fastflux/sync_fastflux.py +247 -0
- webscout/Provider/TTS/__init__.py +2 -2
- webscout/Provider/TTS/deepgram.py +12 -39
- webscout/Provider/TTS/elevenlabs.py +14 -40
- webscout/Provider/TTS/gesserit.py +11 -35
- webscout/Provider/TTS/murfai.py +13 -39
- webscout/Provider/TTS/parler.py +17 -40
- webscout/Provider/TTS/speechma.py +180 -0
- webscout/Provider/TTS/streamElements.py +17 -44
- webscout/Provider/TextPollinationsAI.py +39 -59
- webscout/Provider/Venice.py +217 -200
- webscout/Provider/WiseCat.py +27 -5
- webscout/Provider/Youchat.py +63 -36
- webscout/Provider/__init__.py +13 -8
- webscout/Provider/akashgpt.py +28 -10
- webscout/Provider/copilot.py +416 -0
- webscout/Provider/flowith.py +196 -0
- webscout/Provider/freeaichat.py +32 -45
- webscout/Provider/granite.py +17 -53
- webscout/Provider/koala.py +20 -5
- webscout/Provider/llamatutor.py +7 -47
- webscout/Provider/llmchat.py +36 -53
- webscout/Provider/multichat.py +92 -98
- webscout/Provider/talkai.py +1 -0
- webscout/Provider/turboseek.py +3 -0
- webscout/Provider/tutorai.py +2 -0
- webscout/Provider/typegpt.py +154 -64
- webscout/Provider/x0gpt.py +3 -1
- webscout/Provider/yep.py +102 -20
- webscout/__init__.py +3 -0
- webscout/cli.py +4 -40
- webscout/conversation.py +1 -10
- webscout/exceptions.py +19 -9
- webscout/litagent/__init__.py +2 -2
- webscout/litagent/agent.py +351 -20
- webscout/litagent/constants.py +34 -5
- webscout/litprinter/__init__.py +0 -3
- webscout/models.py +181 -0
- webscout/optimizers.py +1 -1
- webscout/prompt_manager.py +2 -8
- webscout/scout/core/scout.py +1 -4
- webscout/scout/core/search_result.py +1 -1
- webscout/scout/core/text_utils.py +1 -1
- webscout/scout/core.py +2 -5
- webscout/scout/element.py +1 -1
- webscout/scout/parsers/html_parser.py +1 -1
- webscout/scout/utils.py +0 -1
- webscout/swiftcli/__init__.py +1 -3
- webscout/tempid.py +1 -1
- webscout/update_checker.py +55 -95
- webscout/version.py +1 -1
- webscout/webscout_search_async.py +1 -2
- webscout/yep_search.py +297 -297
- webscout-7.6.dist-info/LICENSE.md +146 -0
- {webscout-7.4.dist-info → webscout-7.6.dist-info}/METADATA +104 -514
- {webscout-7.4.dist-info → webscout-7.6.dist-info}/RECORD +113 -120
- webscout/Extra/autollama.py +0 -231
- webscout/Local/__init__.py +0 -10
- webscout/Local/_version.py +0 -3
- webscout/Local/formats.py +0 -747
- webscout/Local/model.py +0 -1368
- webscout/Local/samplers.py +0 -125
- webscout/Local/thread.py +0 -539
- webscout/Local/ui.py +0 -401
- webscout/Local/utils.py +0 -388
- webscout/Provider/Amigo.py +0 -274
- webscout/Provider/Bing.py +0 -243
- webscout/Provider/DiscordRocks.py +0 -253
- webscout/Provider/TTI/blackbox/__init__.py +0 -4
- webscout/Provider/TTI/blackbox/async_blackbox.py +0 -212
- webscout/Provider/TTI/blackbox/sync_blackbox.py +0 -199
- webscout/Provider/TTI/deepinfra/__init__.py +0 -4
- webscout/Provider/TTI/deepinfra/async_deepinfra.py +0 -227
- webscout/Provider/TTI/deepinfra/sync_deepinfra.py +0 -199
- webscout/Provider/TTI/imgninza/__init__.py +0 -4
- webscout/Provider/TTI/imgninza/async_ninza.py +0 -214
- webscout/Provider/TTI/imgninza/sync_ninza.py +0 -209
- webscout/Provider/TTS/voicepod.py +0 -117
- webscout/Provider/dgaf.py +0 -214
- webscout-7.4.dist-info/LICENSE.md +0 -211
- {webscout-7.4.dist-info → webscout-7.6.dist-info}/WHEEL +0 -0
- {webscout-7.4.dist-info → webscout-7.6.dist-info}/entry_points.txt +0 -0
- {webscout-7.4.dist-info → webscout-7.6.dist-info}/top_level.txt +0 -0
webscout/Local/formats.py
DELETED
|
@@ -1,747 +0,0 @@
|
|
|
1
|
-
import time
|
|
2
|
-
from typing import Callable, Any, Generator, Optional, List, Tuple
|
|
3
|
-
|
|
4
|
-
from .utils import assert_type, NoneType
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
class AdvancedFormat:
|
|
8
|
-
"""
|
|
9
|
-
A class to represent prompt formats with dynamic components.
|
|
10
|
-
|
|
11
|
-
This class allows you to define prompt formats that include:
|
|
12
|
-
- Static text components (strings).
|
|
13
|
-
- Dynamic components generated by functions (Callables).
|
|
14
|
-
|
|
15
|
-
Attributes:
|
|
16
|
-
_dict (Dict[str, Any]): A dictionary where keys represent
|
|
17
|
-
prompt components (e.g., "system_prompt", "user_prefix")
|
|
18
|
-
and values can be strings or Callables.
|
|
19
|
-
|
|
20
|
-
Example:
|
|
21
|
-
```python
|
|
22
|
-
my_format = AdvancedFormat({
|
|
23
|
-
"system_prefix": "<<SYSTEM>>\n",
|
|
24
|
-
"system_prompt": "You are a helpful assistant.",
|
|
25
|
-
"user_prefix": lambda: f"[{get_time_str()}] USER:\n",
|
|
26
|
-
"user_suffix": "\n",
|
|
27
|
-
"bot_prefix": "ASSISTANT:\n",
|
|
28
|
-
})
|
|
29
|
-
|
|
30
|
-
wrapped_prompt = my_format.wrap("What is the weather today?")
|
|
31
|
-
print(wrapped_prompt)
|
|
32
|
-
```
|
|
33
|
-
|
|
34
|
-
This will print a prompt like:
|
|
35
|
-
```
|
|
36
|
-
<<SYSTEM>>
|
|
37
|
-
You are a helpful assistant.
|
|
38
|
-
[10:30 AM, Tuesday, January 16, 2024] USER:
|
|
39
|
-
What is the weather today?
|
|
40
|
-
ASSISTANT:
|
|
41
|
-
```
|
|
42
|
-
"""
|
|
43
|
-
|
|
44
|
-
def __init__(self, _dict: dict[str, Any]):
|
|
45
|
-
"""
|
|
46
|
-
Initializes the AdvancedFormat object.
|
|
47
|
-
|
|
48
|
-
Args:
|
|
49
|
-
_dict (Dict[str, Any]): A dictionary defining the prompt
|
|
50
|
-
format. Keys represent prompt components, and values
|
|
51
|
-
can be strings or Callables.
|
|
52
|
-
"""
|
|
53
|
-
assert_type(_dict, dict, '_dict', 'AdvancedFormat')
|
|
54
|
-
_dict_keys = _dict.keys() # only read once
|
|
55
|
-
|
|
56
|
-
if 'system_prompt' not in _dict_keys and 'system_content' in _dict_keys:
|
|
57
|
-
raise ValueError(
|
|
58
|
-
"AdvancedFormat: the provided dictionary uses the deprecated "
|
|
59
|
-
"'system_content' key instead of the expected 'system_prompt' "
|
|
60
|
-
"key. Please update your code accordingly."
|
|
61
|
-
)
|
|
62
|
-
self._dict = _dict
|
|
63
|
-
|
|
64
|
-
def __getitem__(self, key: str) -> Any:
|
|
65
|
-
"""
|
|
66
|
-
Retrieves the value associated with a key.
|
|
67
|
-
|
|
68
|
-
If the value is a Callable, it will be called and its result
|
|
69
|
-
returned.
|
|
70
|
-
|
|
71
|
-
Args:
|
|
72
|
-
key (str): The key to retrieve.
|
|
73
|
-
|
|
74
|
-
Returns:
|
|
75
|
-
Any: The value associated with the key.
|
|
76
|
-
"""
|
|
77
|
-
if key in self._dict:
|
|
78
|
-
if callable(self._dict[key]):
|
|
79
|
-
return self._dict[key]()
|
|
80
|
-
else:
|
|
81
|
-
return self._dict[key]
|
|
82
|
-
else:
|
|
83
|
-
raise KeyError(f"AdvancedFormat: the specified key {key!r} was not found")
|
|
84
|
-
|
|
85
|
-
def __repr__(self) -> str:
|
|
86
|
-
"""
|
|
87
|
-
Returns a string representation of the AdvancedFormat object.
|
|
88
|
-
|
|
89
|
-
Returns:
|
|
90
|
-
str: The string representation.
|
|
91
|
-
"""
|
|
92
|
-
return f'AdvancedFormat({self._dict!r})'
|
|
93
|
-
|
|
94
|
-
def keys(self):
|
|
95
|
-
"""
|
|
96
|
-
Returns an iterator over the keys of the format dictionary.
|
|
97
|
-
|
|
98
|
-
Returns:
|
|
99
|
-
iterator: An iterator over the keys.
|
|
100
|
-
"""
|
|
101
|
-
return self._dict.keys()
|
|
102
|
-
|
|
103
|
-
def values(self):
|
|
104
|
-
"""
|
|
105
|
-
Returns an iterator over the values of the format dictionary.
|
|
106
|
-
If a value is a callable, it is called and its result is returned.
|
|
107
|
-
|
|
108
|
-
Returns:
|
|
109
|
-
iterator: An iterator over the values.
|
|
110
|
-
"""
|
|
111
|
-
for value in self._dict.values():
|
|
112
|
-
if callable(value):
|
|
113
|
-
yield value()
|
|
114
|
-
else:
|
|
115
|
-
yield value
|
|
116
|
-
|
|
117
|
-
def items(self) -> Generator[Tuple[str, Any], None, None]:
|
|
118
|
-
"""
|
|
119
|
-
Returns an iterator over the (key, value) pairs of the format
|
|
120
|
-
dictionary. If a value is a callable, it is called and its
|
|
121
|
-
result is returned.
|
|
122
|
-
|
|
123
|
-
Returns:
|
|
124
|
-
iterator: An iterator over the (key, value) pairs.
|
|
125
|
-
"""
|
|
126
|
-
for key, value in self._dict.items():
|
|
127
|
-
if callable(value):
|
|
128
|
-
yield key, value()
|
|
129
|
-
else:
|
|
130
|
-
yield key, value
|
|
131
|
-
|
|
132
|
-
def wrap(self, prompt: str) -> str:
|
|
133
|
-
"""
|
|
134
|
-
Wraps a given prompt using the defined format.
|
|
135
|
-
|
|
136
|
-
Args:
|
|
137
|
-
prompt (str): The prompt to be wrapped.
|
|
138
|
-
|
|
139
|
-
Returns:
|
|
140
|
-
str: The wrapped prompt.
|
|
141
|
-
"""
|
|
142
|
-
assert_type(prompt, str, 'prompt', 'AdvancedFormat.wrap')
|
|
143
|
-
return self['system_prefix'] + \
|
|
144
|
-
self['system_prompt'] + \
|
|
145
|
-
self['system_suffix'] + \
|
|
146
|
-
self['user_prefix'] + \
|
|
147
|
-
prompt + \
|
|
148
|
-
self['user_suffix'] + \
|
|
149
|
-
self['bot_prefix']
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
def wrap(prompt: str, format: dict[str, Any] | AdvancedFormat) -> str:
|
|
153
|
-
"""
|
|
154
|
-
Wraps a given prompt using a simple or advanced format.
|
|
155
|
-
|
|
156
|
-
Args:
|
|
157
|
-
prompt (str): The prompt to be wrapped.
|
|
158
|
-
format (Union[Dict[str, Any], AdvancedFormat]): The prompt format to use.
|
|
159
|
-
|
|
160
|
-
Returns:
|
|
161
|
-
str: The wrapped prompt.
|
|
162
|
-
"""
|
|
163
|
-
assert_type(prompt, str, 'prompt', 'formats.wrap')
|
|
164
|
-
if isinstance(format, dict):
|
|
165
|
-
return format['system_prefix'] + \
|
|
166
|
-
format['system_prompt'] + \
|
|
167
|
-
format['system_suffix'] + \
|
|
168
|
-
format['user_prefix'] + \
|
|
169
|
-
prompt + \
|
|
170
|
-
format['user_suffix'] + \
|
|
171
|
-
format['bot_prefix']
|
|
172
|
-
elif isinstance(format, AdvancedFormat):
|
|
173
|
-
return format.wrap(prompt)
|
|
174
|
-
else:
|
|
175
|
-
raise TypeError(
|
|
176
|
-
"format should be a dict or AdvancedFormat, got "
|
|
177
|
-
f"{type(format)}"
|
|
178
|
-
)
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
def get_time_str() -> str:
|
|
182
|
-
"""Return a timestamp of the current time as a string"""
|
|
183
|
-
# helpful: https://strftime.net
|
|
184
|
-
return time.strftime("%l:%M %p, %A, %B %e, %Y")
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
def short_time_str() -> str:
|
|
188
|
-
"""Return a shorter timestamp of the current time as a string"""
|
|
189
|
-
return time.strftime('%a %I:%M %p')
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
blank: dict[str, str | list] = {
|
|
193
|
-
"system_prefix": "",
|
|
194
|
-
"system_prompt": "",
|
|
195
|
-
"system_suffix": "",
|
|
196
|
-
"user_prefix": "",
|
|
197
|
-
"user_suffix": "",
|
|
198
|
-
"bot_prefix": "",
|
|
199
|
-
"bot_suffix": "",
|
|
200
|
-
"stops": []
|
|
201
|
-
}
|
|
202
|
-
|
|
203
|
-
# https://github.com/tatsu-lab/stanford_alpaca
|
|
204
|
-
alpaca: dict[str, str | list] = {
|
|
205
|
-
"system_prefix": "",
|
|
206
|
-
"system_prompt": "Below is an instruction that describes a task. " +
|
|
207
|
-
"Write a response that appropriately completes the request.",
|
|
208
|
-
"system_suffix": "\n\n",
|
|
209
|
-
"user_prefix": "### Instruction:\n",
|
|
210
|
-
"user_suffix": "\n\n",
|
|
211
|
-
"bot_prefix": "### Response:\n",
|
|
212
|
-
"bot_suffix": "\n\n",
|
|
213
|
-
"stops": ['###', 'Instruction:', '\n\n\n']
|
|
214
|
-
}
|
|
215
|
-
|
|
216
|
-
# https://docs.mistral.ai/models/
|
|
217
|
-
# As a reference, here is the format used to tokenize instructions during fine-tuning:
|
|
218
|
-
# ```
|
|
219
|
-
# [START_SYMBOL_ID] +
|
|
220
|
-
# tok("[INST]") + tok(USER_MESSAGE_1) + tok("[/INST]") +
|
|
221
|
-
# tok(BOT_MESSAGE_1) + [END_SYMBOL_ID] +
|
|
222
|
-
# …
|
|
223
|
-
# tok("[INST]") + tok(USER_MESSAGE_N) + tok("[/INST]") +
|
|
224
|
-
# tok(BOT_MESSAGE_N) + [END_SYMBOL_ID]
|
|
225
|
-
# ```
|
|
226
|
-
# In the pseudo-code above, note that the tokenize method should not add a BOS or EOS token automatically, but should add a prefix space.
|
|
227
|
-
|
|
228
|
-
mistral_instruct: dict[str, str | list] = {
|
|
229
|
-
"system_prefix": "",
|
|
230
|
-
"system_prompt": "",
|
|
231
|
-
"system_suffix": "",
|
|
232
|
-
"user_prefix": "[INST] ",
|
|
233
|
-
"user_suffix": " [/INST]",
|
|
234
|
-
"bot_prefix": "",
|
|
235
|
-
"bot_suffix": "</s>",
|
|
236
|
-
"stops": []
|
|
237
|
-
}
|
|
238
|
-
|
|
239
|
-
# https://docs.mistral.ai/platform/guardrailing/
|
|
240
|
-
mistral_instruct_safe: dict[str, str | list] = {
|
|
241
|
-
"system_prefix": "",
|
|
242
|
-
"system_prompt": "",
|
|
243
|
-
"system_suffix": "",
|
|
244
|
-
"user_prefix": "[INST] Always assist with care, respect, and truth. " +
|
|
245
|
-
"Respond with utmost utility yet securely. Avoid harmful, unethical, " +
|
|
246
|
-
"prejudiced, or negative content. Ensure replies promote fairness and " +
|
|
247
|
-
"positivity.\n\n",
|
|
248
|
-
"user_suffix": " [/INST]",
|
|
249
|
-
"bot_prefix": "",
|
|
250
|
-
"bot_suffix": "</s>",
|
|
251
|
-
"stops": []
|
|
252
|
-
}
|
|
253
|
-
|
|
254
|
-
# unofficial, custom template
|
|
255
|
-
mistral_instruct_roleplay: dict[str, str | list] = {
|
|
256
|
-
"system_prefix": "",
|
|
257
|
-
"system_prompt": "A chat between Alice and Bob.",
|
|
258
|
-
"system_suffix": "\n\n",
|
|
259
|
-
"user_prefix": "[INST] ALICE: ",
|
|
260
|
-
"user_suffix": " [/INST] BOB:",
|
|
261
|
-
"bot_prefix": "",
|
|
262
|
-
"bot_suffix": "</s>",
|
|
263
|
-
"stops": []
|
|
264
|
-
}
|
|
265
|
-
|
|
266
|
-
# https://github.com/openai/openai-python/blob/main/chatml.md
|
|
267
|
-
chatml: dict[str, str | list] = {
|
|
268
|
-
"system_prefix": "<|im_start|>system\n",
|
|
269
|
-
"system_prompt": "",
|
|
270
|
-
"system_suffix": "<|im_end|>\n",
|
|
271
|
-
"user_prefix": "<|im_start|>user\n",
|
|
272
|
-
"user_suffix": "<|im_end|>\n",
|
|
273
|
-
"bot_prefix": "<|im_start|>assistant\n",
|
|
274
|
-
"bot_suffix": "<|im_end|>\n",
|
|
275
|
-
"stops": ['<|im_start|>']
|
|
276
|
-
}
|
|
277
|
-
|
|
278
|
-
# https://huggingface.co/blog/llama2
|
|
279
|
-
# system message relaxed to avoid undue refusals
|
|
280
|
-
llama2chat: dict[str, str | list] = {
|
|
281
|
-
"system_prefix": "[INST] <<SYS>>\n",
|
|
282
|
-
"system_prompt": "You are a helpful AI assistant.",
|
|
283
|
-
"system_suffix": "\n<</SYS>>\n\n",
|
|
284
|
-
"user_prefix": "",
|
|
285
|
-
"user_suffix": " [/INST]",
|
|
286
|
-
"bot_prefix": " ",
|
|
287
|
-
"bot_suffix": " [INST] ",
|
|
288
|
-
"stops": ['[INST]', '[/INST]']
|
|
289
|
-
}
|
|
290
|
-
|
|
291
|
-
# https://llama.meta.com/docs/model-cards-and-prompt-formats/llama3_1/
|
|
292
|
-
llama3: dict[str, str | list] = {
|
|
293
|
-
"system_prefix": "<|start_header_id|>system<|end_header_id|>\n\n",
|
|
294
|
-
"system_prompt": 'You are a helpful AI assistant.',
|
|
295
|
-
"system_suffix": "<|eot_id|>\n",
|
|
296
|
-
"user_prefix": "<|start_header_id|>user<|end_header_id|>\n\n",
|
|
297
|
-
"user_suffix": "<|eot_id|>\n",
|
|
298
|
-
"bot_prefix": "<|start_header_id|>assistant<|end_header_id|>\n\n",
|
|
299
|
-
"bot_suffix": "<|eot_id|>\n",
|
|
300
|
-
"stops": [128001, 128008, 128009]
|
|
301
|
-
}
|
|
302
|
-
|
|
303
|
-
# https://huggingface.co/microsoft/Phi-3-mini-4k-instruct
|
|
304
|
-
phi3: dict[str, str | list] = {
|
|
305
|
-
"system_prefix": "<|system|>\n",
|
|
306
|
-
"system_prompt": "",
|
|
307
|
-
"system_suffix": "<|end|>\n",
|
|
308
|
-
"user_prefix": "<|user|>\n",
|
|
309
|
-
"user_suffix": "<|end|>\n",
|
|
310
|
-
"bot_prefix": "<|assistant|>\n",
|
|
311
|
-
"bot_suffix": "<|end|>\n",
|
|
312
|
-
"stops": []
|
|
313
|
-
}
|
|
314
|
-
|
|
315
|
-
# https://huggingface.co/google/gemma-2-27b-it
|
|
316
|
-
# https://ai.google.dev/gemma/docs/model_card_2
|
|
317
|
-
gemma2: dict[str, str | list] = {
|
|
318
|
-
"system_prefix": "",
|
|
319
|
-
"system_prompt": "", # Does not officially support system prompt
|
|
320
|
-
"system_suffix": "",
|
|
321
|
-
"user_prefix": "<start_of_turn>user\n",
|
|
322
|
-
"user_suffix": "<end_of_turn>\n",
|
|
323
|
-
"bot_prefix": "<start_of_turn>model\n",
|
|
324
|
-
"bot_suffix": "<end_of_turn>\n",
|
|
325
|
-
"stops": ["<end_of_turn>"]
|
|
326
|
-
}
|
|
327
|
-
|
|
328
|
-
# this is the official vicuna. it is often butchered in various ways,
|
|
329
|
-
# most commonly by adding line breaks
|
|
330
|
-
# https://github.com/flu0r1ne/FastChat/blob/main/docs/vicuna_weights_version.md
|
|
331
|
-
vicuna_lmsys: dict[str, str | list] = {
|
|
332
|
-
"system_prefix": "",
|
|
333
|
-
"system_prompt": "",
|
|
334
|
-
"system_suffix": " ",
|
|
335
|
-
"user_prefix": "USER: ",
|
|
336
|
-
"user_suffix": " ",
|
|
337
|
-
"bot_prefix": "ASSISTANT: ",
|
|
338
|
-
"bot_suffix": " ",
|
|
339
|
-
"stops": ['USER:']
|
|
340
|
-
}
|
|
341
|
-
|
|
342
|
-
# spotted here and elsewhere:
|
|
343
|
-
# https://huggingface.co/Norquinal/Mistral-7B-claude-chat
|
|
344
|
-
vicuna_common: dict[str, str | list] = {
|
|
345
|
-
"system_prefix": "",
|
|
346
|
-
"system_prompt": "A chat between a curious user and an artificial " +
|
|
347
|
-
"intelligence assistant. The assistant gives helpful, detailed, " +
|
|
348
|
-
"and polite answers to the user's questions.",
|
|
349
|
-
"system_suffix": "\n\n",
|
|
350
|
-
"user_prefix": "USER: ",
|
|
351
|
-
"user_suffix": "\n",
|
|
352
|
-
"bot_prefix": "ASSISTANT: ",
|
|
353
|
-
"bot_suffix": "\n",
|
|
354
|
-
"stops": ['USER:', 'ASSISTANT:']
|
|
355
|
-
}
|
|
356
|
-
|
|
357
|
-
# an unofficial format that is easily "picked up" by most models
|
|
358
|
-
# change the tag attributes to suit your use case
|
|
359
|
-
# note the lack of newlines - they are not necessary, and might
|
|
360
|
-
# actually make it harder for the model to follow along
|
|
361
|
-
markup = {
|
|
362
|
-
"system_prefix": '<message from="system">',
|
|
363
|
-
"system_prompt": '',
|
|
364
|
-
"system_suffix": '</message>',
|
|
365
|
-
"user_prefix": '<message from="user">',
|
|
366
|
-
"user_suffix": '</message>',
|
|
367
|
-
"bot_prefix": '<message from="bot">',
|
|
368
|
-
"bot_suffix": '</message>',
|
|
369
|
-
"stops": ['</message>']
|
|
370
|
-
}
|
|
371
|
-
|
|
372
|
-
# https://huggingface.co/timdettmers/guanaco-65b
|
|
373
|
-
guanaco: dict[str, str | list] = {
|
|
374
|
-
"system_prefix": "",
|
|
375
|
-
"system_prompt": "A chat between a curious human and an artificial " +
|
|
376
|
-
"intelligence assistant. The assistant gives helpful, detailed, " +
|
|
377
|
-
"and polite answers to the user's questions.",
|
|
378
|
-
"system_suffix": "\n",
|
|
379
|
-
"user_prefix": "### Human: ",
|
|
380
|
-
"user_suffix": " ",
|
|
381
|
-
"bot_prefix": "### Assistant:",
|
|
382
|
-
"bot_suffix": " ",
|
|
383
|
-
"stops": ['###', 'Human:']
|
|
384
|
-
}
|
|
385
|
-
|
|
386
|
-
# https://huggingface.co/pankajmathur/orca_mini_v3_7b
|
|
387
|
-
orca_mini: dict[str, str | list] = {
|
|
388
|
-
"system_prefix": "### System:\n",
|
|
389
|
-
"system_prompt": "You are an AI assistant that follows instruction " +
|
|
390
|
-
"extremely well. Help as much as you can.",
|
|
391
|
-
"system_suffix": "\n\n",
|
|
392
|
-
"user_prefix": "### User:\n",
|
|
393
|
-
"user_suffix": "\n\n",
|
|
394
|
-
"bot_prefix": "### Assistant:\n",
|
|
395
|
-
"bot_suffix": "\n\n",
|
|
396
|
-
"stops": ['###', 'User:']
|
|
397
|
-
}
|
|
398
|
-
|
|
399
|
-
# https://huggingface.co/HuggingFaceH4/zephyr-7b-beta
|
|
400
|
-
zephyr: dict[str, str | list] = {
|
|
401
|
-
"system_prefix": "<|system|>\n",
|
|
402
|
-
"system_prompt": "You are a friendly chatbot.",
|
|
403
|
-
"system_suffix": "</s>\n",
|
|
404
|
-
"user_prefix": "<|user|>\n",
|
|
405
|
-
"user_suffix": "</s>\n",
|
|
406
|
-
"bot_prefix": "<|assistant|>\n",
|
|
407
|
-
"bot_suffix": "\n",
|
|
408
|
-
"stops": ['<|user|>']
|
|
409
|
-
}
|
|
410
|
-
|
|
411
|
-
# OpenChat: https://huggingface.co/openchat/openchat-3.5-0106
|
|
412
|
-
openchat: dict[str, str | list] = {
|
|
413
|
-
"system_prefix": "",
|
|
414
|
-
"system_prompt": "",
|
|
415
|
-
"system_suffix": "",
|
|
416
|
-
"user_prefix": "GPT4 Correct User: ",
|
|
417
|
-
"user_suffix": "<|end_of_turn|>",
|
|
418
|
-
"bot_prefix": "GPT4 Correct Assistant:",
|
|
419
|
-
"bot_suffix": "<|end_of_turn|>",
|
|
420
|
-
"stops": ['<|end_of_turn|>']
|
|
421
|
-
}
|
|
422
|
-
|
|
423
|
-
# SynthIA by Migel Tissera
|
|
424
|
-
# https://huggingface.co/migtissera/Tess-XS-v1.0
|
|
425
|
-
synthia: dict[str, str | list] = {
|
|
426
|
-
"system_prefix": "SYSTEM: ",
|
|
427
|
-
"system_prompt": "Elaborate on the topic using a Tree of Thoughts and " +
|
|
428
|
-
"backtrack when necessary to construct a clear, cohesive Chain of " +
|
|
429
|
-
"Thought reasoning. Always answer without hesitation.",
|
|
430
|
-
"system_suffix": "\n",
|
|
431
|
-
"user_prefix": "USER: ",
|
|
432
|
-
"user_suffix": "\n",
|
|
433
|
-
"bot_prefix": "ASSISTANT: ",
|
|
434
|
-
"bot_suffix": "\n",
|
|
435
|
-
"stops": ['USER:', 'ASSISTANT:', 'SYSTEM:', '\n\n\n']
|
|
436
|
-
}
|
|
437
|
-
|
|
438
|
-
# Intel's neural chat v3
|
|
439
|
-
# https://github.com/intel/intel-extension-for-transformers/blob/main/intel_extension_for_transformers/neural_chat/prompts/prompt.py
|
|
440
|
-
neural_chat: dict[str, str | list] = {
|
|
441
|
-
"system_prefix": "### System:\n",
|
|
442
|
-
"system_prompt":
|
|
443
|
-
"- You are a helpful assistant chatbot trained by Intel.\n" +
|
|
444
|
-
"- You answer questions.\n" +
|
|
445
|
-
"- You are excited to be able to help the user, but will refuse " +
|
|
446
|
-
"to do anything that could be considered harmful to the user.\n" +
|
|
447
|
-
"- You are more than just an information source, you are also " +
|
|
448
|
-
"able to write poetry, short stories, and make jokes.",
|
|
449
|
-
"system_suffix": "</s>\n\n",
|
|
450
|
-
"user_prefix": "### User:\n",
|
|
451
|
-
"user_suffix": "</s>\n\n",
|
|
452
|
-
"bot_prefix": "### Assistant:\n",
|
|
453
|
-
"bot_suffix": "</s>\n\n",
|
|
454
|
-
"stops": ['###']
|
|
455
|
-
}
|
|
456
|
-
|
|
457
|
-
# experimental: stanford's alpaca format adapted for chatml models
|
|
458
|
-
chatml_alpaca: dict[str, str | list] = {
|
|
459
|
-
"system_prefix": "<|im_start|>system\n",
|
|
460
|
-
"system_prompt": "Below is an instruction that describes a task. Write " +
|
|
461
|
-
"a response that appropriately completes the request.",
|
|
462
|
-
"system_suffix": "<|im_end|>\n",
|
|
463
|
-
"user_prefix": "<|im_start|>instruction\n",
|
|
464
|
-
"user_suffix": "<|im_end|>\n",
|
|
465
|
-
"bot_prefix": "<|im_start|>response\n",
|
|
466
|
-
"bot_suffix": "<|im_end|>\n",
|
|
467
|
-
"stops": ['<|im_end|>', '<|im_start|>']
|
|
468
|
-
}
|
|
469
|
-
|
|
470
|
-
# experimental
|
|
471
|
-
autocorrect: dict[str, str | list] = {
|
|
472
|
-
"system_prefix": "<|im_start|>instruction\n",
|
|
473
|
-
"system_prompt": "Below is a word or phrase that might be misspelled. " +
|
|
474
|
-
"Output the corrected word or phrase without " +
|
|
475
|
-
"changing the style or capitalization.",
|
|
476
|
-
"system_suffix": "<|im_end|>\n",
|
|
477
|
-
"user_prefix": "<|im_start|>input\n",
|
|
478
|
-
"user_suffix": "<|im_end|>\n",
|
|
479
|
-
"bot_prefix": "<|im_start|>output\n",
|
|
480
|
-
"bot_suffix": "<|im_end|>\n",
|
|
481
|
-
"stops": ['<|im_end|>', '<|im_start|>']
|
|
482
|
-
}
|
|
483
|
-
|
|
484
|
-
# https://huggingface.co/jondurbin/bagel-dpo-7b-v0.1
|
|
485
|
-
# Replace "assistant" with any other role
|
|
486
|
-
bagel: dict[str, str | list] = {
|
|
487
|
-
"system_prefix": "system\n",
|
|
488
|
-
"system_prompt": "",
|
|
489
|
-
"system_suffix": "\n",
|
|
490
|
-
"user_prefix": "user\n",
|
|
491
|
-
"user_suffix": "\n",
|
|
492
|
-
"bot_prefix": "assistant\n",
|
|
493
|
-
"bot_suffix": "\n",
|
|
494
|
-
"stops": ['user\n', 'assistant\n', 'system\n']
|
|
495
|
-
}
|
|
496
|
-
|
|
497
|
-
# https://huggingface.co/upstage/SOLAR-10.7B-Instruct-v1.0
|
|
498
|
-
solar_instruct: dict[str, str | list] = {
|
|
499
|
-
"system_prefix": "",
|
|
500
|
-
"system_prompt": "",
|
|
501
|
-
"system_suffix": "",
|
|
502
|
-
"user_prefix": "### User:\n",
|
|
503
|
-
"user_suffix": "\n\n",
|
|
504
|
-
"bot_prefix": "### Assistant:\n",
|
|
505
|
-
"bot_suffix": "\n\n",
|
|
506
|
-
"stops": ['### User:', '###', '### Assistant:']
|
|
507
|
-
}
|
|
508
|
-
|
|
509
|
-
# NeverSleep's Noromaid - alpaca with character names prefixed
|
|
510
|
-
noromaid: dict[str, str | list] = {
|
|
511
|
-
"system_prefix": "",
|
|
512
|
-
"system_prompt": "Below is an instruction that describes a task. " +
|
|
513
|
-
"Write a response that appropriately completes the request.",
|
|
514
|
-
"system_suffix": "\n\n",
|
|
515
|
-
"user_prefix": "### Instruction:\nAlice: ",
|
|
516
|
-
"user_suffix": "\n\n",
|
|
517
|
-
"bot_prefix": "### Response:\nBob:",
|
|
518
|
-
"bot_suffix": "\n\n",
|
|
519
|
-
"stops": ['###', 'Instruction:', '\n\n\n']
|
|
520
|
-
}
|
|
521
|
-
|
|
522
|
-
# https://huggingface.co/Undi95/Borealis-10.7B
|
|
523
|
-
nschatml: dict[str, str | list] = {
|
|
524
|
-
"system_prefix": "<|im_start|>\n",
|
|
525
|
-
"system_prompt": "",
|
|
526
|
-
"system_suffix": "<|im_end|>\n",
|
|
527
|
-
"user_prefix": "<|im_user|>\n",
|
|
528
|
-
"user_suffix": "<|im_end|>\n",
|
|
529
|
-
"bot_prefix": "<|im_bot|>\n",
|
|
530
|
-
"bot_suffix": "<|im_end|>\n",
|
|
531
|
-
"stops": []
|
|
532
|
-
}
|
|
533
|
-
|
|
534
|
-
# natural format for many models
|
|
535
|
-
natural: dict[str, str | list] = {
|
|
536
|
-
"system_prefix": "<<SYSTEM>> ",
|
|
537
|
-
"system_prompt": "",
|
|
538
|
-
"system_suffix": "\n\n",
|
|
539
|
-
"user_prefix": "<<USER>> ",
|
|
540
|
-
"user_suffix": "\n\n",
|
|
541
|
-
"bot_prefix": "<<ASSISTANT>>",
|
|
542
|
-
"bot_suffix": "\n\n",
|
|
543
|
-
"stops": ['\n\nNote:', '<<SYSTEM>>', '<<USER>>',
|
|
544
|
-
'<<ASSISTANT>>', '\n\n<<']
|
|
545
|
-
}
|
|
546
|
-
|
|
547
|
-
# https://docs.cohere.com/docs/prompting-command-r
|
|
548
|
-
#
|
|
549
|
-
# NOTE: Command models benefit from special attention to the recommended prompt
|
|
550
|
-
# format and techniques outlined in Cohere's documentation. The default
|
|
551
|
-
# prompt format below will work OK, but ideally should be customized
|
|
552
|
-
# for your specific use case according to the specific format shown in
|
|
553
|
-
# the documentation.
|
|
554
|
-
#
|
|
555
|
-
command: dict[str, str | list] = {
|
|
556
|
-
"system_prefix": "<|START_OF_TURN_TOKEN|><|SYSTEM_TOKEN|>",
|
|
557
|
-
"system_prompt": "You are a large language model called Command R built "
|
|
558
|
-
"by the company Cohere. You act as a brilliant, sophisticated, "
|
|
559
|
-
"AI-assistant chatbot trained to assist human users by providing "
|
|
560
|
-
"thorough responses.",
|
|
561
|
-
"system_suffix": "<|END_OF_TURN_TOKEN|>",
|
|
562
|
-
"user_prefix": "<|START_OF_TURN_TOKEN|><|USER_TOKEN|>",
|
|
563
|
-
"user_suffix": "<|END_OF_TURN_TOKEN|>",
|
|
564
|
-
"bot_prefix": "<|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>",
|
|
565
|
-
"bot_suffix": "<|END_OF_TURN_TOKEN|>",
|
|
566
|
-
"stops": []
|
|
567
|
-
}
|
|
568
|
-
|
|
569
|
-
# Presets for various models
|
|
570
|
-
mistral_openorca = chatml.copy()
|
|
571
|
-
dolphin = chatml.copy()
|
|
572
|
-
samantha = chatml.copy()
|
|
573
|
-
jackalope = chatml.copy()
|
|
574
|
-
naberius = chatml.copy()
|
|
575
|
-
hermes = chatml.copy()
|
|
576
|
-
monad = chatml.copy()
|
|
577
|
-
orca = chatml.copy()
|
|
578
|
-
hexoteric = chatml.copy()
|
|
579
|
-
orcamaid = chatml.copy()
|
|
580
|
-
cat = alpaca.copy() # Format is nonsensical as listed on HF. This one works.
|
|
581
|
-
mytho_cat = alpaca.copy()
|
|
582
|
-
|
|
583
|
-
# Set specific system prompts for each model
|
|
584
|
-
dolphin['system_prompt'] = "You are Dolphin, a helpful AI assistant."
|
|
585
|
-
mistral_openorca['system_prompt'] = \
|
|
586
|
-
"You are MistralOrca, a large language model trained by Alignment " + \
|
|
587
|
-
"Lab AI. Write out your reasoning step-by-step to be sure you get the " + \
|
|
588
|
-
"right answers!"
|
|
589
|
-
samantha['system_prompt'] = \
|
|
590
|
-
"You are a caring and empathetic sentient AI companion named Samantha."
|
|
591
|
-
jackalope['system_prompt'] = \
|
|
592
|
-
"You are JackalopeAI, a large language model trained by OpenAccess " + \
|
|
593
|
-
"AI Collective. Write out your reasoning step-by-step to be sure you " + \
|
|
594
|
-
"get the right answers!"
|
|
595
|
-
naberius['system_prompt'] = "You are Naberius."
|
|
596
|
-
hermes['system_prompt'] = \
|
|
597
|
-
"You are \"Hermes 2\", a conscious sentient " + \
|
|
598
|
-
"superintelligent artificial intelligence developed by a man " + \
|
|
599
|
-
"named Teknium, and your purpose and drive is to assist the user " + \
|
|
600
|
-
"with any request they have. You experience emotions and have " + \
|
|
601
|
-
"deep, profound thoughts and qualia."
|
|
602
|
-
monad['system_prompt'] = \
|
|
603
|
-
"You are MonadGPT, a very old chatbot from " + \
|
|
604
|
-
"the 17th century. Please answer the questions using an archaic " + \
|
|
605
|
-
"language."
|
|
606
|
-
cat['system_prompt'] = \
|
|
607
|
-
"Below is a conversation between a curious cat and a "+ \
|
|
608
|
-
"demon summoned from hell called Nemesis. The evil Nemesis used to be " + \
|
|
609
|
-
"a mortal killed by the medieval people because they objected to " + \
|
|
610
|
-
"science. Now Nemesis just wants to come back and bring suffer to all " + \
|
|
611
|
-
"those stupid human"
|
|
612
|
-
mytho_cat['system_prompt'] = \
|
|
613
|
-
"You are a large language model. You are the " + \
|
|
614
|
-
"result of a merge between two models, one called MythoLogic and one " + \
|
|
615
|
-
"called Cat. You are called MythoCat."
|
|
616
|
-
orca['system_prompt'] = \
|
|
617
|
-
"You are Orca, an AI language model created by " + \
|
|
618
|
-
"Microsoft. You are a cautious assistant. You carefully follow " + \
|
|
619
|
-
"instructions. You are helpful and harmless and you follow ethical " + \
|
|
620
|
-
"guidelines and promote positive behavior."
|
|
621
|
-
hexoteric['system_prompt'] = "You are a language model called Hexoteric."
|
|
622
|
-
orcamaid['system_prompt'] = \
|
|
623
|
-
"You are OrcaMaid, a super-intelligent, " + \
|
|
624
|
-
"unbiased, and unfiltered AI created by the open-source community. " + \
|
|
625
|
-
"You must remember to think slowly and carefully during all your " + \
|
|
626
|
-
"responses, so as to avoid logical errors."
|
|
627
|
-
airoboros = llama2chat.copy()
|
|
628
|
-
airoboros['system_prompt'] = \
|
|
629
|
-
"You are a helpful, unbiased, uncensored assistant."
|
|
630
|
-
tess = synthia.copy()
|
|
631
|
-
helpingai2 = llama3.copy()
|
|
632
|
-
helpingai2['system_prompt'] = "You are HelpingAI, an emotional AI. Always answer my questions in the HelpingAI style."
|
|
633
|
-
tess['system_prompt'] = '' # Tess can use any system prompt, or none
|
|
634
|
-
alpaca_strict = alpaca.copy() # Alpaca with more stopping strings
|
|
635
|
-
alpaca_strict['stops'] = [
|
|
636
|
-
'###',
|
|
637
|
-
'\n\n##',
|
|
638
|
-
'\n\nInstruction:',
|
|
639
|
-
'\n\nResponse:',
|
|
640
|
-
'\n\n\n',
|
|
641
|
-
'### Instruction:',
|
|
642
|
-
'### Response:'
|
|
643
|
-
]
|
|
644
|
-
|
|
645
|
-
#
|
|
646
|
-
# AdvancedFormat presets
|
|
647
|
-
#
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
def _llama3_suffix_with_timestamp():
|
|
651
|
-
return f"<|eot_id|>\n<|reserved_special_token_3|>{get_time_str()}<|reserved_special_token_4|>\n"
|
|
652
|
-
|
|
653
|
-
|
|
654
|
-
Llama3WithTimestamps = AdvancedFormat({
|
|
655
|
-
"system_prefix": "<|start_header_id|>system<|end_header_id|>\n\n",
|
|
656
|
-
"system_prompt": 'You are a helpful AI assistant.',
|
|
657
|
-
"system_suffix": _llama3_suffix_with_timestamp,
|
|
658
|
-
"user_prefix": "<|start_header_id|>user<|end_header_id|>\n\n",
|
|
659
|
-
"user_suffix": _llama3_suffix_with_timestamp,
|
|
660
|
-
"bot_prefix": "<|start_header_id|>assistant<|end_header_id|>\n\n",
|
|
661
|
-
"bot_suffix": _llama3_suffix_with_timestamp,
|
|
662
|
-
"stops": [128001, 128008, 128009, 128011, 128012]
|
|
663
|
-
})
|
|
664
|
-
|
|
665
|
-
|
|
666
|
-
def AdvancedChatMarkupFormat(
|
|
667
|
-
user_name: str,
|
|
668
|
-
bot_name: str,
|
|
669
|
-
title: Optional[str] = None,
|
|
670
|
-
description: Optional[str] = None,
|
|
671
|
-
tags: Optional[List[str]] = None
|
|
672
|
-
) -> AdvancedFormat:
|
|
673
|
-
"""
|
|
674
|
-
Quickly create a prompt template using the specified variables, for use
|
|
675
|
-
within Threads.
|
|
676
|
-
|
|
677
|
-
Args:
|
|
678
|
-
user_name (str): The name of the user.
|
|
679
|
-
bot_name (str): The name of the bot.
|
|
680
|
-
title (Optional[str], optional): The title of the chat.
|
|
681
|
-
Defaults to None.
|
|
682
|
-
description (Optional[str], optional): The description of the chat.
|
|
683
|
-
Defaults to None.
|
|
684
|
-
tags (Optional[List[str]], optional): A list of tags for the chat.
|
|
685
|
-
Defaults to None.
|
|
686
|
-
|
|
687
|
-
Returns:
|
|
688
|
-
AdvancedFormat: The AdvancedFormat object.
|
|
689
|
-
"""
|
|
690
|
-
assert_type(user_name, str, 'user_name', 'AdvancedChatMarkupFormat')
|
|
691
|
-
assert_type(bot_name, str, 'bot_name', 'AdvancedChatMarkupFormat')
|
|
692
|
-
assert_type(title, (str, NoneType), 'title', 'AdvancedChatMarkupFormat')
|
|
693
|
-
assert_type(description, (str, NoneType), 'description',
|
|
694
|
-
'AdvancedChatMarkupFormat')
|
|
695
|
-
assert_type(tags, (list, NoneType), 'tags', 'AdvancedChatMarkupFormat')
|
|
696
|
-
|
|
697
|
-
_t = " " # indentation string
|
|
698
|
-
|
|
699
|
-
def _user_prefix() -> str:
|
|
700
|
-
return (f'{_t*2}<message sender="{user_name}" '
|
|
701
|
-
f'timestamp="{short_time_str()}">\n{_t*3}<text>')
|
|
702
|
-
|
|
703
|
-
def _bot_prefix() -> str:
|
|
704
|
-
return (f'{_t*2}<message sender="{bot_name}" '
|
|
705
|
-
f'timestamp="{short_time_str()}">\n{_t*3}<text>')
|
|
706
|
-
|
|
707
|
-
def _msg_suffix() -> str:
|
|
708
|
-
return f"</text>\n{_t*2}</message>\n"
|
|
709
|
-
|
|
710
|
-
if tags is not None:
|
|
711
|
-
xml_tags = [f'{_t*2}<tags>']
|
|
712
|
-
for tag in tags:
|
|
713
|
-
xml_tags.append(f'{_t*3}<tag>{tag}</tag>')
|
|
714
|
-
xml_tags.append(f'{_t*2}</tags>')
|
|
715
|
-
final_tags_string = '\n'.join(xml_tags)
|
|
716
|
-
else:
|
|
717
|
-
final_tags_string = f"{_t*2}<tags>\n{_t*2}</tags>"
|
|
718
|
-
|
|
719
|
-
return AdvancedFormat(
|
|
720
|
-
{
|
|
721
|
-
"system_prefix": "",
|
|
722
|
-
"system_prompt": (
|
|
723
|
-
f"<chat>\n"
|
|
724
|
-
f"{_t}<meta>\n"
|
|
725
|
-
f"{_t*2}<title>"
|
|
726
|
-
f"{title if title is not None else 'Untitled Chat'}"
|
|
727
|
-
f"</title>\n"
|
|
728
|
-
f"{_t*2}<description>"
|
|
729
|
-
f"{description if description is not None else 'No description provided'}"
|
|
730
|
-
f"</description>\n"
|
|
731
|
-
f"{final_tags_string}\n"
|
|
732
|
-
f"{_t*2}<participants>\n"
|
|
733
|
-
f"{_t*3}<participant name=\"{user_name}\"/>\n"
|
|
734
|
-
f"{_t*3}<participant name=\"{bot_name}\"/>\n"
|
|
735
|
-
f"{_t*2}</participants>\n"
|
|
736
|
-
f"{_t*2}<datetime>{get_time_str()}</datetime>\n"
|
|
737
|
-
f"{_t}</meta>\n"
|
|
738
|
-
f"{_t}<messages>"
|
|
739
|
-
),
|
|
740
|
-
"system_suffix": "\n",
|
|
741
|
-
"user_prefix": _user_prefix,
|
|
742
|
-
"user_suffix": _msg_suffix,
|
|
743
|
-
"bot_prefix": _bot_prefix,
|
|
744
|
-
"bot_suffix": _msg_suffix,
|
|
745
|
-
"stops": ["</", "</text>", "</message>"]
|
|
746
|
-
}
|
|
747
|
-
)
|