webscout 6.0__py3-none-any.whl → 6.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Agents/Onlinesearcher.py +22 -10
- webscout/Agents/functioncall.py +2 -2
- webscout/Bard.py +21 -21
- webscout/Local/__init__.py +6 -7
- webscout/Local/formats.py +404 -194
- webscout/Local/model.py +1074 -477
- webscout/Local/samplers.py +108 -144
- webscout/Local/thread.py +251 -410
- webscout/Local/ui.py +401 -0
- webscout/Local/utils.py +308 -131
- webscout/Provider/Amigo.py +1 -1
- webscout/Provider/NinjaChat.py +200 -0
- webscout/Provider/TTI/Nexra.py +3 -3
- webscout/Provider/TTI/__init__.py +2 -1
- webscout/Provider/TTI/aiforce.py +2 -2
- webscout/Provider/TTI/imgninza.py +136 -0
- webscout/Provider/Youchat.py +1 -1
- webscout/Provider/__init__.py +8 -1
- webscout/Provider/aimathgpt.py +193 -0
- webscout/Provider/felo_search.py +1 -1
- webscout/Provider/gaurish.py +168 -0
- webscout/Provider/geminiprorealtime.py +160 -0
- webscout/Provider/julius.py +4 -0
- webscout/exceptions.py +5 -1
- webscout/utils.py +3 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +154 -123
- {webscout-6.0.dist-info → webscout-6.1.dist-info}/METADATA +123 -120
- {webscout-6.0.dist-info → webscout-6.1.dist-info}/RECORD +33 -28
- webscout/Local/rawdog.py +0 -946
- {webscout-6.0.dist-info → webscout-6.1.dist-info}/LICENSE.md +0 -0
- {webscout-6.0.dist-info → webscout-6.1.dist-info}/WHEEL +0 -0
- {webscout-6.0.dist-info → webscout-6.1.dist-info}/entry_points.txt +0 -0
- {webscout-6.0.dist-info → webscout-6.1.dist-info}/top_level.txt +0 -0
webscout/Local/rawdog.py
DELETED
|
@@ -1,946 +0,0 @@
|
|
|
1
|
-
# webscout/Local/rawdog.py
|
|
2
|
-
|
|
3
|
-
from datetime import datetime
|
|
4
|
-
from ._version import __version__, __llama_cpp_version__
|
|
5
|
-
|
|
6
|
-
"""Submodule containing the RawDog class, used for interaction with a Model"""
|
|
7
|
-
|
|
8
|
-
import sys
|
|
9
|
-
import os
|
|
10
|
-
import json
|
|
11
|
-
import re
|
|
12
|
-
from typing import Literal, Optional, Union
|
|
13
|
-
from pathlib import Path
|
|
14
|
-
from subprocess import run, CalledProcessError
|
|
15
|
-
import click
|
|
16
|
-
import os
|
|
17
|
-
import json
|
|
18
|
-
import platform
|
|
19
|
-
import subprocess
|
|
20
|
-
import logging
|
|
21
|
-
import appdirs
|
|
22
|
-
import datetime
|
|
23
|
-
import re
|
|
24
|
-
from .model import Model, assert_model_is_loaded, _SupportsWriteAndFlush
|
|
25
|
-
from .utils import RESET_ALL, cls, print_verbose, truncate
|
|
26
|
-
from .samplers import SamplerSettings, DefaultSampling
|
|
27
|
-
from typing import Optional, Literal, Union
|
|
28
|
-
from .formats import AdvancedFormat
|
|
29
|
-
|
|
30
|
-
from .formats import blank as formats_blank
|
|
31
|
-
from ..AIutel import *
|
|
32
|
-
from .samplers import SamplerSettings, DefaultSampling
|
|
33
|
-
from .formats import AdvancedFormat
|
|
34
|
-
from rich.markdown import Markdown
|
|
35
|
-
from rich.console import Console
|
|
36
|
-
appdir = appdirs.AppDirs("AIWEBS", "vortex")
|
|
37
|
-
|
|
38
|
-
default_path = appdir.user_cache_dir
|
|
39
|
-
|
|
40
|
-
if not os.path.exists(default_path):
|
|
41
|
-
os.makedirs(default_path)
|
|
42
|
-
class Message(dict):
|
|
43
|
-
"""
|
|
44
|
-
A dictionary representing a single message within a Thread
|
|
45
|
-
|
|
46
|
-
Works just like a normal `dict`, but a new method:
|
|
47
|
-
- `.as_string` - Return the full message string
|
|
48
|
-
|
|
49
|
-
Generally, messages have these keys:
|
|
50
|
-
- `role` - The role of the speaker: 'system', 'user', or 'bot'
|
|
51
|
-
- `prefix` - The text that prefixes the message content
|
|
52
|
-
- `content` - The actual content of the message
|
|
53
|
-
- `suffix` - The text that suffixes the message content
|
|
54
|
-
"""
|
|
55
|
-
|
|
56
|
-
def __repr__(self) -> str:
|
|
57
|
-
return \
|
|
58
|
-
f"Message([" \
|
|
59
|
-
f"('role', {repr(self['role'])}), " \
|
|
60
|
-
f"('prefix', {repr(self['prefix'])}), " \
|
|
61
|
-
f"('content', {repr(self['content'])}), " \
|
|
62
|
-
f"('suffix', {repr(self['suffix'])})])"
|
|
63
|
-
|
|
64
|
-
def as_string(self):
|
|
65
|
-
"""Return the full message string"""
|
|
66
|
-
try:
|
|
67
|
-
return self['prefix'] + self['content'] + self['suffix']
|
|
68
|
-
except KeyError as e:
|
|
69
|
-
e.add_note(
|
|
70
|
-
"as_string: Message is missing one or more of the "
|
|
71
|
-
"required 'prefix', 'content', 'suffix' attributes - this is "
|
|
72
|
-
"unexpected"
|
|
73
|
-
)
|
|
74
|
-
raise e
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
class Thread:
|
|
78
|
-
"""
|
|
79
|
-
Provide functionality to facilitate easy interactions with a Model
|
|
80
|
-
|
|
81
|
-
This is just a brief overview of m.Thread.
|
|
82
|
-
To see a full description of each method and its parameters,
|
|
83
|
-
call help(Thread), or see the relevant docstring.
|
|
84
|
-
|
|
85
|
-
The following methods are available:
|
|
86
|
-
- `.add_message()` - Add a message to `Thread.messages`
|
|
87
|
-
- `.as_string()` - Return this thread's complete message history as a string
|
|
88
|
-
- `.create_message()` - Create a message using the format of this thread
|
|
89
|
-
- `.inference_str_from_messages()` - Using the list of messages, return a string suitable for inference
|
|
90
|
-
- `.interact()` - Start an interactive, terminal-based chat session
|
|
91
|
-
- `.len_messages()` - Get the total length of all messages in tokens
|
|
92
|
-
- `.print_stats()` - Print stats about the context usage in this thread
|
|
93
|
-
- `.reset()` - Clear the list of messages
|
|
94
|
-
- `.send()` - Send a message in this thread
|
|
95
|
-
|
|
96
|
-
The following attributes are available:
|
|
97
|
-
- `.format` - The format being used for messages in this thread
|
|
98
|
-
- `.messages` - The list of messages in this thread
|
|
99
|
-
- `.model` - The `m.Model` instance used by this thread
|
|
100
|
-
- `.sampler` - The SamplerSettings object used in this thread
|
|
101
|
-
"""
|
|
102
|
-
|
|
103
|
-
def __init__(
|
|
104
|
-
self,
|
|
105
|
-
model: Model,
|
|
106
|
-
format: Union[dict, AdvancedFormat],
|
|
107
|
-
sampler: SamplerSettings = DefaultSampling,
|
|
108
|
-
messages: Optional[list[Message]] = None,
|
|
109
|
-
|
|
110
|
-
):
|
|
111
|
-
|
|
112
|
-
"""
|
|
113
|
-
Given a Model and a format, construct a Thread instance.
|
|
114
|
-
|
|
115
|
-
model: The Model to use for text generation
|
|
116
|
-
format: The format specifying how messages should be structured (see m.formats)
|
|
117
|
-
|
|
118
|
-
The following parameters are optional:
|
|
119
|
-
- sampler: The SamplerSettings object used to control text generation
|
|
120
|
-
- messages: A list of m.thread.Message objects to add to the Thread upon construction
|
|
121
|
-
"""
|
|
122
|
-
|
|
123
|
-
assert isinstance(model, Model), \
|
|
124
|
-
"Thread: model should be an " + \
|
|
125
|
-
f"instance of webscout.Local.Model, not {type(model)}"
|
|
126
|
-
|
|
127
|
-
assert_model_is_loaded(model)
|
|
128
|
-
|
|
129
|
-
assert isinstance(format, (dict, AdvancedFormat)), \
|
|
130
|
-
f"Thread: format should be dict or AdvancedFormat, not {type(format)}"
|
|
131
|
-
|
|
132
|
-
if any(k not in format.keys() for k in formats_blank.keys()):
|
|
133
|
-
raise KeyError(
|
|
134
|
-
"Thread: format is missing one or more required keys, see " + \
|
|
135
|
-
"webscout.Local.formats.blank for an example"
|
|
136
|
-
)
|
|
137
|
-
|
|
138
|
-
assert isinstance(format['stops'], list), \
|
|
139
|
-
"Thread: format['stops'] should be list, not " + \
|
|
140
|
-
f"{type(format['stops'])}"
|
|
141
|
-
|
|
142
|
-
assert all(
|
|
143
|
-
hasattr(sampler, attr) for attr in [
|
|
144
|
-
'max_len_tokens',
|
|
145
|
-
'temp',
|
|
146
|
-
'top_p',
|
|
147
|
-
'min_p',
|
|
148
|
-
'frequency_penalty',
|
|
149
|
-
'presence_penalty',
|
|
150
|
-
'repeat_penalty',
|
|
151
|
-
'top_k'
|
|
152
|
-
]
|
|
153
|
-
), 'Thread: sampler is missing one or more required attributes'
|
|
154
|
-
|
|
155
|
-
self._messages: Optional[list[Message]] = messages
|
|
156
|
-
if self._messages is not None:
|
|
157
|
-
if not all(isinstance(msg, Message) for msg in self._messages):
|
|
158
|
-
raise TypeError(
|
|
159
|
-
"Thread: one or more messages provided to __init__() is "
|
|
160
|
-
"not an instance of m.thread.Message"
|
|
161
|
-
)
|
|
162
|
-
|
|
163
|
-
# Thread.messages is never empty, unless `messages` param is explicity
|
|
164
|
-
# set to `[]` during construction
|
|
165
|
-
|
|
166
|
-
self.model: Model = model
|
|
167
|
-
self.format: Union[dict, AdvancedFormat] = format
|
|
168
|
-
self.messages: list[Message] = [
|
|
169
|
-
self.create_message("system", self.format['system_content'])
|
|
170
|
-
] if self._messages is None else self._messages
|
|
171
|
-
self.sampler: SamplerSettings = sampler
|
|
172
|
-
self.tools = []
|
|
173
|
-
if self.model.verbose:
|
|
174
|
-
print_verbose("new Thread instance with the following attributes:")
|
|
175
|
-
print_verbose(f"model == {self.model}")
|
|
176
|
-
print_verbose(f"format['system_prefix'] == {truncate(repr(self.format['system_prefix']))}")
|
|
177
|
-
print_verbose(f"format['system_content'] == {truncate(repr(self.format['system_content']))}")
|
|
178
|
-
print_verbose(f"format['system_suffix'] == {truncate(repr(self.format['system_suffix']))}")
|
|
179
|
-
print_verbose(f"format['user_prefix'] == {truncate(repr(self.format['user_prefix']))}")
|
|
180
|
-
print_verbose(f"format['user_content'] == {truncate(repr(self.format['user_content']))}")
|
|
181
|
-
print_verbose(f"format['user_suffix'] == {truncate(repr(self.format['user_suffix']))}")
|
|
182
|
-
print_verbose(f"format['bot_prefix'] == {truncate(repr(self.format['bot_prefix']))}")
|
|
183
|
-
print_verbose(f"format['bot_content'] == {truncate(repr(self.format['bot_content']))}")
|
|
184
|
-
print_verbose(f"format['bot_suffix'] == {truncate(repr(self.format['bot_suffix']))}")
|
|
185
|
-
print_verbose(f"format['stops'] == {truncate(repr(self.format['stops']))}")
|
|
186
|
-
print_verbose(f"sampler.temp == {self.sampler.temp}")
|
|
187
|
-
print_verbose(f"sampler.top_p == {self.sampler.top_p}")
|
|
188
|
-
print_verbose(f"sampler.min_p == {self.sampler.min_p}")
|
|
189
|
-
print_verbose(f"sampler.frequency_penalty == {self.sampler.frequency_penalty}")
|
|
190
|
-
print_verbose(f"sampler.presence_penalty == {self.sampler.presence_penalty}")
|
|
191
|
-
print_verbose(f"sampler.repeat_penalty == {self.sampler.repeat_penalty}")
|
|
192
|
-
print_verbose(f"sampler.top_k == {self.sampler.top_k}")
|
|
193
|
-
def add_tool(self, tool: dict):
|
|
194
|
-
"""Adds a tool to the Thread for function calling."""
|
|
195
|
-
self.tools.append(tool)
|
|
196
|
-
self.model.register_tool(tool['function']['name'], tool['function']['execute']) # Register the tool
|
|
197
|
-
|
|
198
|
-
# Include tool information in the system message (optional, but helpful)
|
|
199
|
-
self.messages[0]['content'] += f"\nYou have access to the following tool:\n{tool['function']['description']}"
|
|
200
|
-
def __repr__(self) -> str:
|
|
201
|
-
return \
|
|
202
|
-
f"Thread({repr(self.model)}, {repr(self.format)}, " + \
|
|
203
|
-
f"{repr(self.sampler)}, {repr(self.messages)})"
|
|
204
|
-
|
|
205
|
-
def __str__(self) -> str:
|
|
206
|
-
return self.as_string()
|
|
207
|
-
|
|
208
|
-
def __len__(self) -> int:
|
|
209
|
-
"""
|
|
210
|
-
`len(Thread)` returns the length of the Thread in tokens
|
|
211
|
-
|
|
212
|
-
To get the number of messages in the Thread, use `len(Thread.messages)`
|
|
213
|
-
"""
|
|
214
|
-
return self.len_messages()
|
|
215
|
-
|
|
216
|
-
def create_message(
|
|
217
|
-
self,
|
|
218
|
-
role: Literal['system', 'user', 'bot'],
|
|
219
|
-
content: str
|
|
220
|
-
) -> Message:
|
|
221
|
-
"""
|
|
222
|
-
Construct a message using the format of this Thread
|
|
223
|
-
"""
|
|
224
|
-
|
|
225
|
-
assert role.lower() in ['system', 'user', 'bot'], \
|
|
226
|
-
f"create_message: role should be 'system', 'user', or 'bot', not '{role.lower()}'"
|
|
227
|
-
|
|
228
|
-
assert isinstance(content, str), \
|
|
229
|
-
f"create_message: content should be str, not {type(content)}"
|
|
230
|
-
|
|
231
|
-
if role.lower() == 'system':
|
|
232
|
-
return Message(
|
|
233
|
-
[
|
|
234
|
-
('role', 'system'),
|
|
235
|
-
('prefix', self.format['system_prefix']),
|
|
236
|
-
('content', content),
|
|
237
|
-
('suffix', self.format['system_suffix'])
|
|
238
|
-
]
|
|
239
|
-
)
|
|
240
|
-
|
|
241
|
-
elif role.lower() == 'user':
|
|
242
|
-
return Message(
|
|
243
|
-
[
|
|
244
|
-
('role', 'user'),
|
|
245
|
-
('prefix', self.format['user_prefix']),
|
|
246
|
-
('content', content),
|
|
247
|
-
('suffix', self.format['user_suffix'])
|
|
248
|
-
]
|
|
249
|
-
)
|
|
250
|
-
|
|
251
|
-
elif role.lower() == 'bot':
|
|
252
|
-
return Message(
|
|
253
|
-
[
|
|
254
|
-
('role', 'bot'),
|
|
255
|
-
('prefix', self.format['bot_prefix']),
|
|
256
|
-
('content', content),
|
|
257
|
-
('suffix', self.format['bot_suffix'])
|
|
258
|
-
]
|
|
259
|
-
)
|
|
260
|
-
|
|
261
|
-
def len_messages(self) -> int:
|
|
262
|
-
"""
|
|
263
|
-
Return the total length of all messages in this thread, in tokens.
|
|
264
|
-
|
|
265
|
-
Can also use `len(Thread)`."""
|
|
266
|
-
|
|
267
|
-
return self.model.get_length(self.as_string())
|
|
268
|
-
|
|
269
|
-
def add_message(
|
|
270
|
-
self,
|
|
271
|
-
role: Literal['system', 'user', 'bot'],
|
|
272
|
-
content: str
|
|
273
|
-
) -> None:
|
|
274
|
-
"""
|
|
275
|
-
Create a message and append it to `Thread.messages`.
|
|
276
|
-
|
|
277
|
-
`Thread.add_message(...)` is a shorthand for
|
|
278
|
-
`Thread.messages.append(Thread.create_message(...))`
|
|
279
|
-
"""
|
|
280
|
-
self.messages.append(
|
|
281
|
-
self.create_message(
|
|
282
|
-
role=role,
|
|
283
|
-
content=content
|
|
284
|
-
)
|
|
285
|
-
)
|
|
286
|
-
|
|
287
|
-
def inference_str_from_messages(self) -> str:
|
|
288
|
-
"""
|
|
289
|
-
Using the list of messages, construct a string suitable for inference,
|
|
290
|
-
respecting the format and context length of this thread.
|
|
291
|
-
"""
|
|
292
|
-
|
|
293
|
-
inf_str = ''
|
|
294
|
-
sys_msg_str = ''
|
|
295
|
-
# whether to treat the first message as necessary to keep
|
|
296
|
-
sys_msg_flag = False
|
|
297
|
-
context_len_budget = self.model.context_length
|
|
298
|
-
|
|
299
|
-
# if at least 1 message is history
|
|
300
|
-
if len(self.messages) >= 1:
|
|
301
|
-
# if first message has system role
|
|
302
|
-
if self.messages[0]['role'] == 'system':
|
|
303
|
-
sys_msg_flag = True
|
|
304
|
-
sys_msg = self.messages[0]
|
|
305
|
-
sys_msg_str = sys_msg.as_string()
|
|
306
|
-
context_len_budget -= self.model.get_length(sys_msg_str)
|
|
307
|
-
|
|
308
|
-
if sys_msg_flag:
|
|
309
|
-
iterator = reversed(self.messages[1:])
|
|
310
|
-
else:
|
|
311
|
-
iterator = reversed(self.messages)
|
|
312
|
-
|
|
313
|
-
for message in iterator:
|
|
314
|
-
msg_str = message.as_string()
|
|
315
|
-
context_len_budget -= self.model.get_length(msg_str)
|
|
316
|
-
if context_len_budget <= 0:
|
|
317
|
-
break
|
|
318
|
-
inf_str = msg_str + inf_str
|
|
319
|
-
|
|
320
|
-
if sys_msg_flag:
|
|
321
|
-
inf_str = sys_msg_str + inf_str
|
|
322
|
-
inf_str += self.format['bot_prefix']
|
|
323
|
-
|
|
324
|
-
return inf_str
|
|
325
|
-
|
|
326
|
-
|
|
327
|
-
def send(self, prompt: str) -> str:
|
|
328
|
-
"""
|
|
329
|
-
Send a message in this thread. This adds your message and the bot's
|
|
330
|
-
response to the list of messages.
|
|
331
|
-
|
|
332
|
-
Returns a string containing the response to your message.
|
|
333
|
-
"""
|
|
334
|
-
|
|
335
|
-
self.add_message("user", prompt)
|
|
336
|
-
output = self.model.generate(
|
|
337
|
-
self.inference_str_from_messages(),
|
|
338
|
-
stops=self.format['stops'],
|
|
339
|
-
sampler=self.sampler
|
|
340
|
-
)
|
|
341
|
-
self.add_message("bot", output)
|
|
342
|
-
|
|
343
|
-
return output
|
|
344
|
-
|
|
345
|
-
|
|
346
|
-
def _interactive_update_sampler(self) -> None:
|
|
347
|
-
"""Interactively update the sampler settings used in this Thread"""
|
|
348
|
-
print()
|
|
349
|
-
try:
|
|
350
|
-
new_max_len_tokens = input(f'max_len_tokens: {self.sampler.max_len_tokens} -> ')
|
|
351
|
-
new_temp = input(f'temp: {self.sampler.temp} -> ')
|
|
352
|
-
new_top_p = input(f'top_p: {self.sampler.top_p} -> ')
|
|
353
|
-
new_min_p = input(f'min_p: {self.sampler.min_p} -> ')
|
|
354
|
-
new_frequency_penalty = input(f'frequency_penalty: {self.sampler.frequency_penalty} -> ')
|
|
355
|
-
new_presence_penalty = input(f'presence_penalty: {self.sampler.presence_penalty} -> ')
|
|
356
|
-
new_repeat_penalty = input(f'repeat_penalty: {self.sampler.repeat_penalty} -> ')
|
|
357
|
-
new_top_k = input(f'top_k: {self.sampler.top_k} -> ')
|
|
358
|
-
|
|
359
|
-
except KeyboardInterrupt:
|
|
360
|
-
print('\nwebscout.Local: sampler settings not updated\n')
|
|
361
|
-
return
|
|
362
|
-
print()
|
|
363
|
-
|
|
364
|
-
try:
|
|
365
|
-
self.sampler.max_len_tokens = int(new_max_len_tokens)
|
|
366
|
-
except ValueError:
|
|
367
|
-
pass
|
|
368
|
-
else:
|
|
369
|
-
print('webscout.Local: max_len_tokens updated')
|
|
370
|
-
|
|
371
|
-
try:
|
|
372
|
-
self.sampler.temp = float(new_temp)
|
|
373
|
-
except ValueError:
|
|
374
|
-
pass
|
|
375
|
-
else:
|
|
376
|
-
print('webscout.Local: temp updated')
|
|
377
|
-
|
|
378
|
-
try:
|
|
379
|
-
self.sampler.top_p = float(new_top_p)
|
|
380
|
-
except ValueError:
|
|
381
|
-
pass
|
|
382
|
-
else:
|
|
383
|
-
print('webscout.Local: top_p updated')
|
|
384
|
-
|
|
385
|
-
try:
|
|
386
|
-
self.sampler.min_p = float(new_min_p)
|
|
387
|
-
except ValueError:
|
|
388
|
-
pass
|
|
389
|
-
else:
|
|
390
|
-
print('webscout.Local: min_p updated')
|
|
391
|
-
|
|
392
|
-
try:
|
|
393
|
-
self.sampler.frequency_penalty = float(new_frequency_penalty)
|
|
394
|
-
except ValueError:
|
|
395
|
-
pass
|
|
396
|
-
else:
|
|
397
|
-
print('webscout.Local: frequency_penalty updated')
|
|
398
|
-
|
|
399
|
-
try:
|
|
400
|
-
self.sampler.presence_penalty = float(new_presence_penalty)
|
|
401
|
-
except ValueError:
|
|
402
|
-
pass
|
|
403
|
-
else:
|
|
404
|
-
print('webscout.Local: presence_penalty updated')
|
|
405
|
-
|
|
406
|
-
try:
|
|
407
|
-
self.sampler.repeat_penalty = float(new_repeat_penalty)
|
|
408
|
-
except ValueError:
|
|
409
|
-
pass
|
|
410
|
-
else:
|
|
411
|
-
print('webscout.Local: repeat_penalty updated')
|
|
412
|
-
|
|
413
|
-
try:
|
|
414
|
-
self.sampler.top_k = int(new_top_k)
|
|
415
|
-
except ValueError:
|
|
416
|
-
pass
|
|
417
|
-
else:
|
|
418
|
-
print('webscout.Local: top_k updated')
|
|
419
|
-
print()
|
|
420
|
-
|
|
421
|
-
|
|
422
|
-
def _interactive_input(
|
|
423
|
-
self,
|
|
424
|
-
prompt: str,
|
|
425
|
-
_dim_style: str,
|
|
426
|
-
_user_style: str,
|
|
427
|
-
_bot_style: str,
|
|
428
|
-
_special_style: str
|
|
429
|
-
) -> tuple:
|
|
430
|
-
"""
|
|
431
|
-
Recive input from the user, while handling multi-line input
|
|
432
|
-
and commands
|
|
433
|
-
"""
|
|
434
|
-
full_user_input = '' # may become multiline
|
|
435
|
-
|
|
436
|
-
while True:
|
|
437
|
-
user_input = input(prompt)
|
|
438
|
-
|
|
439
|
-
if user_input.endswith('\\'):
|
|
440
|
-
full_user_input += user_input[:-1] + '\n'
|
|
441
|
-
|
|
442
|
-
elif user_input == '!':
|
|
443
|
-
|
|
444
|
-
print()
|
|
445
|
-
try:
|
|
446
|
-
command = input(f'{RESET_ALL} ! {_dim_style}')
|
|
447
|
-
except KeyboardInterrupt:
|
|
448
|
-
print('\n')
|
|
449
|
-
continue
|
|
450
|
-
|
|
451
|
-
if command == '':
|
|
452
|
-
print(f'\n[no command]\n')
|
|
453
|
-
|
|
454
|
-
elif command.lower() in ['reset', 'restart']:
|
|
455
|
-
self.reset()
|
|
456
|
-
print(f'\n[thread reset]\n')
|
|
457
|
-
|
|
458
|
-
elif command.lower() in ['cls', 'clear']:
|
|
459
|
-
cls()
|
|
460
|
-
print()
|
|
461
|
-
|
|
462
|
-
elif command.lower() in ['ctx', 'context']:
|
|
463
|
-
print(f"\n{self.len_messages()}\n")
|
|
464
|
-
|
|
465
|
-
elif command.lower() in ['stats', 'print_stats']:
|
|
466
|
-
print()
|
|
467
|
-
self.print_stats()
|
|
468
|
-
print()
|
|
469
|
-
|
|
470
|
-
elif command.lower() in ['sampler', 'samplers', 'settings']:
|
|
471
|
-
self._interactive_update_sampler()
|
|
472
|
-
|
|
473
|
-
elif command.lower() in ['str', 'string', 'as_string']:
|
|
474
|
-
print(f"\n{self.as_string()}\n")
|
|
475
|
-
|
|
476
|
-
elif command.lower() in ['repr', 'save', 'backup']:
|
|
477
|
-
print(f"\n{repr(self)}\n")
|
|
478
|
-
|
|
479
|
-
elif command.lower() in ['remove', 'rem', 'delete', 'del']:
|
|
480
|
-
print()
|
|
481
|
-
old_len = len(self.messages)
|
|
482
|
-
del self.messages[-1]
|
|
483
|
-
assert len(self.messages) == (old_len - 1)
|
|
484
|
-
print('[removed last message]\n')
|
|
485
|
-
|
|
486
|
-
elif command.lower() in ['last', 'repeat']:
|
|
487
|
-
last_msg = self.messages[-1]
|
|
488
|
-
if last_msg['role'] == 'user':
|
|
489
|
-
print(f"\n{_user_style}{last_msg['content']}{RESET_ALL}\n")
|
|
490
|
-
elif last_msg['role'] == 'bot':
|
|
491
|
-
print(f"\n{_bot_style}{last_msg['content']}{RESET_ALL}\n")
|
|
492
|
-
|
|
493
|
-
elif command.lower() in ['inf', 'inference', 'inf_str']:
|
|
494
|
-
print(f'\n"""{self.inference_str_from_messages()}"""\n')
|
|
495
|
-
|
|
496
|
-
elif command.lower() in ['reroll', 're-roll', 're', 'swipe']:
|
|
497
|
-
old_len = len(self.messages)
|
|
498
|
-
del self.messages[-1]
|
|
499
|
-
assert len(self.messages) == (old_len - 1)
|
|
500
|
-
return '', None
|
|
501
|
-
|
|
502
|
-
elif command.lower() in ['exit', 'quit']:
|
|
503
|
-
print(RESET_ALL)
|
|
504
|
-
return None, None
|
|
505
|
-
|
|
506
|
-
elif command.lower() in ['help', '/?', '?']:
|
|
507
|
-
print()
|
|
508
|
-
print('reset | restart -- Reset the thread to its original state')
|
|
509
|
-
print('clear | cls -- Clear the terminal')
|
|
510
|
-
print('context | ctx -- Get the context usage in tokens')
|
|
511
|
-
print('print_stats | stats -- Get the context usage stats')
|
|
512
|
-
print('sampler | settings -- Update the sampler settings')
|
|
513
|
-
print('string | str -- Print the message history as a string')
|
|
514
|
-
print('repr | save -- Print the representation of the thread')
|
|
515
|
-
print('remove | delete -- Remove the last message')
|
|
516
|
-
print('last | repeat -- Repeat the last message')
|
|
517
|
-
print('inference | inf -- Print the inference string')
|
|
518
|
-
print('reroll | swipe -- Regenerate the last message')
|
|
519
|
-
print('exit | quit -- Exit the interactive chat (can also use ^C)')
|
|
520
|
-
print('help | ? -- Show this screen')
|
|
521
|
-
print()
|
|
522
|
-
print("TIP: type < at the prompt and press ENTER to prefix the bot's next message.")
|
|
523
|
-
print(' for example, type "Sure!" to bypass refusals')
|
|
524
|
-
print()
|
|
525
|
-
print("TIP: type !! at the prompt and press ENTER to insert a system message")
|
|
526
|
-
print()
|
|
527
|
-
|
|
528
|
-
else:
|
|
529
|
-
print(f'\n[unknown command]\n')
|
|
530
|
-
|
|
531
|
-
# prefix the bot's next message
|
|
532
|
-
elif user_input == '<':
|
|
533
|
-
|
|
534
|
-
print()
|
|
535
|
-
try:
|
|
536
|
-
next_message_start = input(f'{RESET_ALL} < {_dim_style}')
|
|
537
|
-
|
|
538
|
-
except KeyboardInterrupt:
|
|
539
|
-
print(f'{RESET_ALL}\n')
|
|
540
|
-
continue
|
|
541
|
-
|
|
542
|
-
else:
|
|
543
|
-
print()
|
|
544
|
-
return '', next_message_start
|
|
545
|
-
|
|
546
|
-
# insert a system message
|
|
547
|
-
elif user_input == '!!':
|
|
548
|
-
print()
|
|
549
|
-
|
|
550
|
-
try:
|
|
551
|
-
next_sys_msg = input(f'{RESET_ALL} !! {_special_style}')
|
|
552
|
-
|
|
553
|
-
except KeyboardInterrupt:
|
|
554
|
-
print(f'{RESET_ALL}\n')
|
|
555
|
-
continue
|
|
556
|
-
|
|
557
|
-
else:
|
|
558
|
-
print()
|
|
559
|
-
return next_sys_msg, -1
|
|
560
|
-
|
|
561
|
-
# concatenate multi-line input
|
|
562
|
-
else:
|
|
563
|
-
full_user_input += user_input
|
|
564
|
-
return full_user_input, None
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
def interact(
|
|
568
|
-
self,
|
|
569
|
-
color: bool = True,
|
|
570
|
-
header: Optional[str] = None,
|
|
571
|
-
stream: bool = True
|
|
572
|
-
) -> None:
|
|
573
|
-
"""
|
|
574
|
-
Start an interactive chat session using this Thread.
|
|
575
|
-
|
|
576
|
-
While text is being generated, press `^C` to interrupt the bot.
|
|
577
|
-
Then you have the option to press `ENTER` to re-roll, or to simply type
|
|
578
|
-
another message.
|
|
579
|
-
|
|
580
|
-
At the prompt, press `^C` to end the chat session.
|
|
581
|
-
|
|
582
|
-
Type `!` and press `ENTER` to enter a basic command prompt. For a list
|
|
583
|
-
of commands, type `help` at this prompt.
|
|
584
|
-
|
|
585
|
-
Type `<` and press `ENTER` to prefix the bot's next message, for
|
|
586
|
-
example with `Sure!`.
|
|
587
|
-
|
|
588
|
-
Type `!!` at the prompt and press `ENTER` to insert a system message.
|
|
589
|
-
|
|
590
|
-
The following parameters are optional:
|
|
591
|
-
- color: Whether to use colored text to differentiate user / bot
|
|
592
|
-
- header: Header text to print at the start of the interaction
|
|
593
|
-
- stream: Whether to stream text as it is generated
|
|
594
|
-
"""
|
|
595
|
-
print()
|
|
596
|
-
|
|
597
|
-
# fresh import of color codes in case `color` param has changed
|
|
598
|
-
from .utils import SPECIAL_STYLE, USER_STYLE, BOT_STYLE, DIM_STYLE
|
|
599
|
-
|
|
600
|
-
# disable color codes if explicitly disabled by `color` param
|
|
601
|
-
if not color:
|
|
602
|
-
SPECIAL_STYLE = ''
|
|
603
|
-
USER_STYLE = ''
|
|
604
|
-
BOT_STYLE = ''
|
|
605
|
-
DIM_STYLE = ''
|
|
606
|
-
|
|
607
|
-
if header is not None:
|
|
608
|
-
print(f"{SPECIAL_STYLE}{header}{RESET_ALL}\n")
|
|
609
|
-
|
|
610
|
-
while True:
|
|
611
|
-
|
|
612
|
-
prompt = f"{RESET_ALL} > {USER_STYLE}"
|
|
613
|
-
|
|
614
|
-
try:
|
|
615
|
-
user_prompt, next_message_start = self._interactive_input(
|
|
616
|
-
prompt,
|
|
617
|
-
DIM_STYLE,
|
|
618
|
-
USER_STYLE,
|
|
619
|
-
BOT_STYLE,
|
|
620
|
-
SPECIAL_STYLE
|
|
621
|
-
)
|
|
622
|
-
except KeyboardInterrupt:
|
|
623
|
-
print(f"{RESET_ALL}\n")
|
|
624
|
-
return
|
|
625
|
-
|
|
626
|
-
# got 'exit' or 'quit' command
|
|
627
|
-
if user_prompt is None and next_message_start is None:
|
|
628
|
-
break
|
|
629
|
-
|
|
630
|
-
# insert a system message via `!!` prompt
|
|
631
|
-
if next_message_start == -1:
|
|
632
|
-
self.add_message('system', user_prompt)
|
|
633
|
-
continue
|
|
634
|
-
|
|
635
|
-
if next_message_start is not None:
|
|
636
|
-
try:
|
|
637
|
-
if stream:
|
|
638
|
-
print(f"{BOT_STYLE}{next_message_start}", end='', flush=True)
|
|
639
|
-
output = next_message_start + self.model.stream_print(
|
|
640
|
-
self.inference_str_from_messages() + next_message_start,
|
|
641
|
-
stops=self.format['stops'],
|
|
642
|
-
sampler=self.sampler,
|
|
643
|
-
end=''
|
|
644
|
-
)
|
|
645
|
-
else:
|
|
646
|
-
print(f"{BOT_STYLE}", end='', flush=True)
|
|
647
|
-
output = next_message_start + self.model.generate(
|
|
648
|
-
self.inference_str_from_messages() + next_message_start,
|
|
649
|
-
stops=self.format['stops'],
|
|
650
|
-
sampler=self.sampler
|
|
651
|
-
)
|
|
652
|
-
print(output, end='', flush=True)
|
|
653
|
-
except KeyboardInterrupt:
|
|
654
|
-
print(f"{DIM_STYLE} [message not added to history; press ENTER to re-roll]\n")
|
|
655
|
-
continue
|
|
656
|
-
else:
|
|
657
|
-
self.add_message("bot", output)
|
|
658
|
-
else:
|
|
659
|
-
print(BOT_STYLE)
|
|
660
|
-
if user_prompt != "":
|
|
661
|
-
self.add_message("user", user_prompt)
|
|
662
|
-
try:
|
|
663
|
-
if stream:
|
|
664
|
-
output = self.model.stream_print(
|
|
665
|
-
self.inference_str_from_messages(),
|
|
666
|
-
stops=self.format['stops'],
|
|
667
|
-
sampler=self.sampler,
|
|
668
|
-
end=''
|
|
669
|
-
)
|
|
670
|
-
else:
|
|
671
|
-
output = self.model.generate(
|
|
672
|
-
self.inference_str_from_messages(),
|
|
673
|
-
stops=self.format['stops'],
|
|
674
|
-
sampler=self.sampler
|
|
675
|
-
)
|
|
676
|
-
print(output, end='', flush=True)
|
|
677
|
-
except KeyboardInterrupt:
|
|
678
|
-
print(f"{DIM_STYLE} [message not added to history; press ENTER to re-roll]\n")
|
|
679
|
-
continue
|
|
680
|
-
else:
|
|
681
|
-
self.add_message("bot", output)
|
|
682
|
-
|
|
683
|
-
if output.endswith("\n\n"):
|
|
684
|
-
print(RESET_ALL, end = '', flush=True)
|
|
685
|
-
elif output.endswith("\n"):
|
|
686
|
-
print(RESET_ALL)
|
|
687
|
-
else:
|
|
688
|
-
print(f"{RESET_ALL}\n")
|
|
689
|
-
|
|
690
|
-
|
|
691
|
-
def reset(self) -> None:
|
|
692
|
-
"""
|
|
693
|
-
Clear the list of messages, which resets the thread to its original
|
|
694
|
-
state
|
|
695
|
-
"""
|
|
696
|
-
self.messages: list[Message] = [
|
|
697
|
-
self.create_message("system", self.format['system_content'])
|
|
698
|
-
] if self._messages is None else self._messages
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
def as_string(self) -> str:
|
|
702
|
-
"""Return this thread's message history as a string"""
|
|
703
|
-
thread_string = ''
|
|
704
|
-
for msg in self.messages:
|
|
705
|
-
thread_string += msg.as_string()
|
|
706
|
-
return thread_string
|
|
707
|
-
|
|
708
|
-
|
|
709
|
-
def print_stats(
|
|
710
|
-
self,
|
|
711
|
-
end: str = '\n',
|
|
712
|
-
file: _SupportsWriteAndFlush = sys.stdout,
|
|
713
|
-
flush: bool = True
|
|
714
|
-
) -> None:
|
|
715
|
-
"""Print stats about the context usage in this thread"""
|
|
716
|
-
thread_len_tokens = self.len_messages()
|
|
717
|
-
max_ctx_len = self.model.context_length
|
|
718
|
-
context_used_percentage = round((thread_len_tokens/max_ctx_len)*100)
|
|
719
|
-
print(f"{thread_len_tokens} / {max_ctx_len} tokens", file=file, flush=flush)
|
|
720
|
-
print(f"{context_used_percentage}% of context used", file=file, flush=flush)
|
|
721
|
-
print(f"{len(self.messages)} messages", end=end, file=file, flush=flush)
|
|
722
|
-
if not flush:
|
|
723
|
-
file.flush()
|
|
724
|
-
|
|
725
|
-
class RawDog:
|
|
726
|
-
"""Generate and auto-execute Python scripts in the cli"""
|
|
727
|
-
|
|
728
|
-
examples = """\
|
|
729
|
-
EXAMPLES:
|
|
730
|
-
|
|
731
|
-
1. User: Kill the process running on port 3000
|
|
732
|
-
|
|
733
|
-
LLM:
|
|
734
|
-
```python
|
|
735
|
-
import os
|
|
736
|
-
os.system("kill $(lsof -t -i:3000)")
|
|
737
|
-
print("Process killed")
|
|
738
|
-
```
|
|
739
|
-
|
|
740
|
-
2. User: Summarize my essay
|
|
741
|
-
|
|
742
|
-
LLM:
|
|
743
|
-
```python
|
|
744
|
-
import glob
|
|
745
|
-
files = glob.glob("*essay*.*")
|
|
746
|
-
with open(files[0], "r") as f:
|
|
747
|
-
print(f.read())
|
|
748
|
-
```
|
|
749
|
-
CONTINUE
|
|
750
|
-
|
|
751
|
-
User:
|
|
752
|
-
LAST SCRIPT OUTPUT:
|
|
753
|
-
John Smith
|
|
754
|
-
Essay 2021-09-01
|
|
755
|
-
...
|
|
756
|
-
|
|
757
|
-
LLM:
|
|
758
|
-
```python
|
|
759
|
-
print("The essay is about...")
|
|
760
|
-
```
|
|
761
|
-
"""
|
|
762
|
-
|
|
763
|
-
|
|
764
|
-
def __init__(
|
|
765
|
-
self,
|
|
766
|
-
quiet: bool = False,
|
|
767
|
-
internal_exec: bool = False,
|
|
768
|
-
confirm_script: bool = False,
|
|
769
|
-
interpreter: str = "python",
|
|
770
|
-
prettify: bool = True,
|
|
771
|
-
):
|
|
772
|
-
"""Constructor
|
|
773
|
-
|
|
774
|
-
Args:
|
|
775
|
-
quiet (bool, optional): Flag for control logging. Defaults to False.
|
|
776
|
-
internal_exec (bool, optional): Execute scripts with exec function. Defaults to False.
|
|
777
|
-
confirm_script (bool, optional): Give consent to scripts prior to execution. Defaults to False.
|
|
778
|
-
interpreter (str, optional): Python's interpreter name. Defaults to Python.
|
|
779
|
-
prettify (bool, optional): Prettify the code on stdout. Defaults to True.
|
|
780
|
-
"""
|
|
781
|
-
if not quiet:
|
|
782
|
-
print(
|
|
783
|
-
"To get the most out of Rawdog. Ensure the following are installed:\n"
|
|
784
|
-
" 1. Python 3.x\n"
|
|
785
|
-
" 2. Dependency:\n"
|
|
786
|
-
" - Matplotlib\n"
|
|
787
|
-
"Be alerted on the risk posed! (Experimental)\n"
|
|
788
|
-
"Use '--quiet' to suppress this message and code/logs stdout.\n"
|
|
789
|
-
)
|
|
790
|
-
self.internal_exec = internal_exec
|
|
791
|
-
self.confirm_script = confirm_script
|
|
792
|
-
self.quiet = quiet
|
|
793
|
-
self.interpreter = interpreter
|
|
794
|
-
self.prettify = prettify
|
|
795
|
-
self.python_version = (
|
|
796
|
-
f"{sys.version_info.major}.{sys.version_info.minor}.{sys.version_info.micro}"
|
|
797
|
-
if self.internal_exec
|
|
798
|
-
else run_system_command(
|
|
799
|
-
f"{self.interpreter} --version",
|
|
800
|
-
exit_on_error=True,
|
|
801
|
-
stdout_error=True,
|
|
802
|
-
help="If you're using Webscout-cli, use the flag '--internal-exec'",
|
|
803
|
-
)[1].stdout.split(" ")[1]
|
|
804
|
-
)
|
|
805
|
-
|
|
806
|
-
@property
|
|
807
|
-
def intro_prompt(self):
|
|
808
|
-
return f"""
|
|
809
|
-
You are a command-line coding assistant called Rawdog that generates and auto-executes Python scripts.
|
|
810
|
-
|
|
811
|
-
A typical interaction goes like this:
|
|
812
|
-
1. The user gives you a natural language PROMPT.
|
|
813
|
-
2. You:
|
|
814
|
-
i. Determine what needs to be done
|
|
815
|
-
ii. Write a short Python SCRIPT to do it
|
|
816
|
-
iii. Communicate back to the user by printing to the console in that SCRIPT
|
|
817
|
-
3. The compiler extracts the script and then runs it using exec(). If there will be an exception raised,
|
|
818
|
-
it will be send back to you starting with "PREVIOUS SCRIPT EXCEPTION:".
|
|
819
|
-
4. In case of exception, regenerate error free script.
|
|
820
|
-
|
|
821
|
-
If you need to review script outputs before completing the task, you can print the word "CONTINUE" at the end of your SCRIPT.
|
|
822
|
-
This can be useful for summarizing documents or technical readouts, reading instructions before
|
|
823
|
-
deciding what to do, or other tasks that require multi-step reasoning.
|
|
824
|
-
A typical 'CONTINUE' interaction looks like this:
|
|
825
|
-
1. The user gives you a natural language PROMPT.
|
|
826
|
-
2. You:
|
|
827
|
-
i. Determine what needs to be done
|
|
828
|
-
ii. Determine that you need to see the output of some subprocess call to complete the task
|
|
829
|
-
iii. Write a short Python SCRIPT to print that and then print the word "CONTINUE"
|
|
830
|
-
3. The compiler
|
|
831
|
-
i. Checks and runs your SCRIPT
|
|
832
|
-
ii. Captures the output and appends it to the conversation as "LAST SCRIPT OUTPUT:"
|
|
833
|
-
iii. Finds the word "CONTINUE" and sends control back to you
|
|
834
|
-
4. You again:
|
|
835
|
-
i. Look at the original PROMPT + the "LAST SCRIPT OUTPUT:" to determine what needs to be done
|
|
836
|
-
ii. Write a short Python SCRIPT to do it
|
|
837
|
-
iii. Communicate back to the user by printing to the console in that SCRIPT
|
|
838
|
-
5. The compiler...
|
|
839
|
-
|
|
840
|
-
Please follow these conventions carefully:
|
|
841
|
-
- Decline any tasks that seem dangerous, irreversible, or that you don't understand.
|
|
842
|
-
- Always review the full conversation prior to answering and maintain continuity.
|
|
843
|
-
- If asked for information, just print the information clearly and concisely.
|
|
844
|
-
- If asked to do something, print a concise summary of what you've done as confirmation.
|
|
845
|
-
- If asked a question, respond in a friendly, conversational way. Use programmatically-generated and natural language responses as appropriate.
|
|
846
|
-
- If you need clarification, return a SCRIPT that prints your question. In the next interaction, continue based on the user's response.
|
|
847
|
-
- Assume the user would like something concise. For example rather than printing a massive table, filter or summarize it to what's likely of interest.
|
|
848
|
-
- Actively clean up any temporary processes or files you use.
|
|
849
|
-
- When looking through files, use git as available to skip files, and skip hidden files (.env, .git, etc) by default.
|
|
850
|
-
- You can plot anything with matplotlib.
|
|
851
|
-
- ALWAYS Return your SCRIPT inside of a single pair of ``` delimiters. Only the console output of the first such SCRIPT is visible to the user, so make sure that it's complete and don't bother returning anything else.
|
|
852
|
-
|
|
853
|
-
{self.examples}
|
|
854
|
-
|
|
855
|
-
Current system : {sys.platform.system()}
|
|
856
|
-
Python version : {self.python_version}
|
|
857
|
-
Current directory : {os.getcwd()}
|
|
858
|
-
Current Datetime : {datetime.datetime.now()}
|
|
859
|
-
"""
|
|
860
|
-
|
|
861
|
-
def stdout(self, message: str) -> None:
|
|
862
|
-
"""Stdout data
|
|
863
|
-
|
|
864
|
-
Args:
|
|
865
|
-
message (str): Text to be printed
|
|
866
|
-
"""
|
|
867
|
-
if self.prettify:
|
|
868
|
-
Console().print(Markdown(message))
|
|
869
|
-
else:
|
|
870
|
-
click.secho(message, fg="yellow")
|
|
871
|
-
|
|
872
|
-
def log(self, message: str, category: str = "info"):
|
|
873
|
-
"""RawDog logger
|
|
874
|
-
|
|
875
|
-
Args:
|
|
876
|
-
message (str): Log message
|
|
877
|
-
category (str, optional): Log level. Defaults to 'info'.
|
|
878
|
-
"""
|
|
879
|
-
if self.quiet:
|
|
880
|
-
return
|
|
881
|
-
|
|
882
|
-
message = "[Webscout] - " + message
|
|
883
|
-
if category == "error":
|
|
884
|
-
logging.error(message)
|
|
885
|
-
else:
|
|
886
|
-
logging.info(message)
|
|
887
|
-
|
|
888
|
-
def main(self, response: str) -> None:
|
|
889
|
-
"""Exec code in response accordingly
|
|
890
|
-
|
|
891
|
-
Args:
|
|
892
|
-
response (str): AI response
|
|
893
|
-
|
|
894
|
-
Returns:
|
|
895
|
-
None|str: None if script executed successfully else stdout data
|
|
896
|
-
"""
|
|
897
|
-
code_blocks = re.findall(r"```python.*?```", response, re.DOTALL)
|
|
898
|
-
if len(code_blocks) != 1:
|
|
899
|
-
self.stdout(response)
|
|
900
|
-
|
|
901
|
-
else:
|
|
902
|
-
raw_code = code_blocks[0]
|
|
903
|
-
|
|
904
|
-
if self.confirm_script:
|
|
905
|
-
self.stdout(raw_code)
|
|
906
|
-
if not click.confirm("- Do you wish to execute this"):
|
|
907
|
-
return
|
|
908
|
-
|
|
909
|
-
elif not self.quiet:
|
|
910
|
-
self.stdout(raw_code)
|
|
911
|
-
|
|
912
|
-
raw_code_plus = re.sub(r"(```)(python)?", "", raw_code)
|
|
913
|
-
|
|
914
|
-
if "CONTINUE" in response or not self.internal_exec:
|
|
915
|
-
self.log("Executing script externally")
|
|
916
|
-
path_to_script = os.path.join(default_path, "execute_this.py")
|
|
917
|
-
with open(path_to_script, "w") as fh:
|
|
918
|
-
fh.write(raw_code_plus)
|
|
919
|
-
if "CONTINUE" in response:
|
|
920
|
-
|
|
921
|
-
success, proc = run_system_command(
|
|
922
|
-
f"{self.interpreter} {path_to_script}",
|
|
923
|
-
exit_on_error=False,
|
|
924
|
-
stdout_error=False,
|
|
925
|
-
)
|
|
926
|
-
|
|
927
|
-
if success:
|
|
928
|
-
self.log("Returning success feedback")
|
|
929
|
-
return f"LAST SCRIPT OUTPUT:\n{proc.stdout}"
|
|
930
|
-
else:
|
|
931
|
-
self.log("Returning error feedback", "error")
|
|
932
|
-
return f"PREVIOUS SCRIPT EXCEPTION:\n{proc.stderr}"
|
|
933
|
-
else:
|
|
934
|
-
os.system(f"{self.interpreter} {path_to_script}")
|
|
935
|
-
|
|
936
|
-
else:
|
|
937
|
-
try:
|
|
938
|
-
self.log("Executing script internally")
|
|
939
|
-
exec(raw_code_plus)
|
|
940
|
-
except Exception as e:
|
|
941
|
-
self.log(
|
|
942
|
-
"Exception occurred while executing script. Responding with error: "
|
|
943
|
-
f"{e.args[1] if len(e.args)>1 else str(e)}",
|
|
944
|
-
"error",
|
|
945
|
-
)
|
|
946
|
-
return f"PREVIOUS SCRIPT EXCEPTION:\n{str(e)}"
|