webscout 8.0__py3-none-any.whl → 8.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- inferno/__init__.py +6 -0
- inferno/__main__.py +9 -0
- inferno/cli.py +6 -0
- webscout/Local/__init__.py +6 -0
- webscout/Local/__main__.py +9 -0
- webscout/Local/api.py +576 -0
- webscout/Local/cli.py +338 -0
- webscout/Local/config.py +75 -0
- webscout/Local/llm.py +188 -0
- webscout/Local/model_manager.py +205 -0
- webscout/Local/server.py +187 -0
- webscout/Local/utils.py +93 -0
- webscout/Provider/AISEARCH/DeepFind.py +1 -1
- webscout/Provider/AISEARCH/ISou.py +1 -1
- webscout/Provider/AISEARCH/Perplexity.py +359 -0
- webscout/Provider/AISEARCH/__init__.py +3 -1
- webscout/Provider/AISEARCH/felo_search.py +1 -1
- webscout/Provider/AISEARCH/genspark_search.py +1 -1
- webscout/Provider/AISEARCH/hika_search.py +1 -1
- webscout/Provider/AISEARCH/iask_search.py +436 -0
- webscout/Provider/AISEARCH/scira_search.py +9 -5
- webscout/Provider/AISEARCH/webpilotai_search.py +1 -1
- webscout/Provider/ExaAI.py +1 -1
- webscout/Provider/ExaChat.py +18 -8
- webscout/Provider/GithubChat.py +5 -1
- webscout/Provider/Glider.py +4 -2
- webscout/Provider/Jadve.py +2 -2
- webscout/Provider/OPENAI/__init__.py +24 -0
- webscout/Provider/OPENAI/base.py +46 -0
- webscout/Provider/OPENAI/c4ai.py +347 -0
- webscout/Provider/OPENAI/chatgpt.py +549 -0
- webscout/Provider/OPENAI/chatgptclone.py +460 -0
- webscout/Provider/OPENAI/deepinfra.py +284 -0
- webscout/Provider/OPENAI/exaai.py +419 -0
- webscout/Provider/OPENAI/exachat.py +433 -0
- webscout/Provider/OPENAI/freeaichat.py +355 -0
- webscout/Provider/OPENAI/glider.py +316 -0
- webscout/Provider/OPENAI/heckai.py +337 -0
- webscout/Provider/OPENAI/llmchatco.py +327 -0
- webscout/Provider/OPENAI/netwrck.py +348 -0
- webscout/Provider/OPENAI/opkfc.py +488 -0
- webscout/Provider/OPENAI/scirachat.py +463 -0
- webscout/Provider/OPENAI/sonus.py +294 -0
- webscout/Provider/OPENAI/standardinput.py +425 -0
- webscout/Provider/OPENAI/textpollinations.py +285 -0
- webscout/Provider/OPENAI/toolbaz.py +405 -0
- webscout/Provider/OPENAI/typegpt.py +361 -0
- webscout/Provider/OPENAI/uncovrAI.py +455 -0
- webscout/Provider/OPENAI/utils.py +211 -0
- webscout/Provider/OPENAI/venice.py +428 -0
- webscout/Provider/OPENAI/wisecat.py +381 -0
- webscout/Provider/OPENAI/writecream.py +158 -0
- webscout/Provider/OPENAI/x0gpt.py +389 -0
- webscout/Provider/OPENAI/yep.py +329 -0
- webscout/Provider/StandardInput.py +278 -0
- webscout/Provider/TextPollinationsAI.py +27 -28
- webscout/Provider/Venice.py +1 -1
- webscout/Provider/Writecream.py +211 -0
- webscout/Provider/WritingMate.py +197 -0
- webscout/Provider/Youchat.py +30 -26
- webscout/Provider/__init__.py +14 -6
- webscout/Provider/koala.py +2 -2
- webscout/Provider/llmchatco.py +5 -0
- webscout/Provider/scira_chat.py +18 -12
- webscout/Provider/scnet.py +187 -0
- webscout/Provider/toolbaz.py +320 -0
- webscout/Provider/typegpt.py +3 -184
- webscout/Provider/uncovr.py +3 -3
- webscout/conversation.py +32 -32
- webscout/prompt_manager.py +2 -1
- webscout/version.py +1 -1
- webscout-8.2.dist-info/METADATA +734 -0
- {webscout-8.0.dist-info → webscout-8.2.dist-info}/RECORD +77 -32
- webscout-8.2.dist-info/entry_points.txt +5 -0
- {webscout-8.0.dist-info → webscout-8.2.dist-info}/top_level.txt +1 -0
- webscout/Provider/flowith.py +0 -207
- webscout-8.0.dist-info/METADATA +0 -995
- webscout-8.0.dist-info/entry_points.txt +0 -3
- {webscout-8.0.dist-info → webscout-8.2.dist-info}/LICENSE.md +0 -0
- {webscout-8.0.dist-info → webscout-8.2.dist-info}/WHEEL +0 -0
webscout/Provider/typegpt.py
CHANGED
|
@@ -14,195 +14,14 @@ class TypeGPT(Provider):
|
|
|
14
14
|
"""
|
|
15
15
|
A class to interact with the TypeGPT.net API. Improved to match webscout standards.
|
|
16
16
|
"""
|
|
17
|
-
url = "https://chat.typegpt.net"
|
|
18
|
-
|
|
19
17
|
AVAILABLE_MODELS = [
|
|
20
|
-
#
|
|
21
|
-
"gpt-3.5-turbo",
|
|
22
|
-
"gpt-3.5-turbo-202201",
|
|
23
|
-
"gpt-4o",
|
|
24
|
-
"gpt-4o-2024-05-13",
|
|
25
|
-
"gpt-4o-2024-11-20",
|
|
26
|
-
"gpt-4o-mini",
|
|
18
|
+
# Working Models (based on testing)
|
|
27
19
|
"gpt-4o-mini-2024-07-18",
|
|
28
|
-
# "gpt-4o-mini-ddg", >>>> NOT WORKING
|
|
29
|
-
"o1",
|
|
30
|
-
# "o1-mini-2024-09-12", >>>> NOT WORKING
|
|
31
|
-
"o1-preview",
|
|
32
|
-
"o3-mini",
|
|
33
20
|
"chatgpt-4o-latest",
|
|
34
|
-
|
|
35
|
-
# Claude Models
|
|
36
|
-
# "claude", >>>> NOT WORKING
|
|
37
|
-
"claude-3-5-sonnet",
|
|
38
|
-
"claude-3-5-sonnet-20240620",
|
|
39
|
-
"claude-3-5-sonnet-x",
|
|
40
|
-
# "claude-3-haiku-ddg", >>>> NOT WORKING
|
|
41
|
-
"claude-hybridspace",
|
|
42
|
-
"claude-sonnet-3.5",
|
|
43
|
-
"Claude-sonnet-3.7",
|
|
44
|
-
"anthropic/claude-3.5-sonnet",
|
|
45
|
-
"anthropic/claude-3.7-sonnet",
|
|
46
|
-
|
|
47
|
-
# Meta/LLaMA Models
|
|
48
|
-
"@cf/meta/llama-2-7b-chat-fp16",
|
|
49
|
-
"@cf/meta/llama-2-7b-chat-int8",
|
|
50
|
-
"@cf/meta/llama-3-8b-instruct",
|
|
51
|
-
"@cf/meta/llama-3.1-8b-instruct",
|
|
52
|
-
"@cf/meta/llama-3.3-70b-instruct-fp8-fast",
|
|
53
|
-
# "@cf/meta-llama/llama-2-7b-chat-hf-lora", >>>> NOT WORKING
|
|
54
|
-
"llama-3.1-405b",
|
|
55
|
-
"llama-3.1-70b",
|
|
56
|
-
# "llama-3.1-70b-ddg", >>>> NOT WORKING
|
|
57
|
-
"llama-3.1-8b",
|
|
58
|
-
# "llama-scaleway", >>>> NOT WORKING
|
|
59
|
-
"llama3.1-8b", # >>>> NOT WORKING
|
|
60
|
-
"llama3.3-70b",
|
|
61
|
-
# "llamalight", >>>> NOT WORKING
|
|
62
|
-
"Meta-Llama-3.1-405B-Instruct-Turbo",
|
|
63
|
-
"Meta-Llama-3.3-70B-Instruct-Turbo",
|
|
64
|
-
# "meta-llama/Llama-2-7b-chat-hf", >>>> NOT WORKING
|
|
65
|
-
# "meta-llama/Llama-3.1-70B-Instruct", >>>> NOT WORKING
|
|
66
|
-
# "meta-llama/Llama-3.1-8B-Instruct", >>>> NOT WORKING
|
|
67
|
-
"meta-llama/Llama-3.2-11B-Vision-Instruct",
|
|
68
|
-
# "meta-llama/Llama-3.2-1B-Instruct", >>>> NOT WORKING
|
|
69
|
-
# "meta-llama/Llama-3.2-3B-Instruct", >>>> NOT WORKING
|
|
70
|
-
"meta-llama/Llama-3.2-90B-Vision-Instruct",
|
|
71
|
-
"meta-llama/Llama-3.3-70B-Instruct",
|
|
72
|
-
"meta-llama/Llama-3.3-70B-Instruct-Turbo",
|
|
73
|
-
# "meta-llama/Llama-Guard-3-8B", >>>> NOT WORKING
|
|
74
|
-
# "meta-llama/Meta-Llama-3-70B-Instruct", >>>> NOT WORKING
|
|
75
|
-
# "meta-llama/Meta-Llama-3-8B-Instruct", >>>> NOT WORKING
|
|
76
|
-
"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
|
|
77
|
-
"meta-llama/Meta-Llama-3.1-8B-Instruct",
|
|
78
|
-
"meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
|
|
79
|
-
|
|
80
|
-
# Mistral Models
|
|
81
|
-
"mistral",
|
|
82
|
-
"mistral-large",
|
|
83
|
-
"@cf/mistral/mistral-7b-instruct-v0.1",
|
|
84
|
-
# "@cf/mistral/mistral-7b-instruct-v0.2-lora", >>>> NOT WORKING
|
|
85
|
-
"@hf/mistralai/mistral-7b-instruct-v0.2",
|
|
86
|
-
"mistralai/Mistral-7B-Instruct-v0.2",
|
|
87
|
-
"mistralai/Mistral-7B-Instruct-v0.3",
|
|
88
|
-
"mistralai/Mixtral-8x22B-Instruct-v0.1",
|
|
89
|
-
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
|
90
|
-
# "mixtral-8x7b-ddg", >>>> NOT WORKING
|
|
91
|
-
"Mistral-7B-Instruct-v0.2",
|
|
92
|
-
|
|
93
|
-
# Qwen Models
|
|
94
|
-
"@cf/qwen/qwen1.5-0.5b-chat",
|
|
95
|
-
"@cf/qwen/qwen1.5-1.8b-chat",
|
|
96
|
-
"@cf/qwen/qwen1.5-14b-chat-awq",
|
|
97
|
-
"@cf/qwen/qwen1.5-7b-chat-awq",
|
|
98
|
-
"Qwen/Qwen2.5-3B-Instruct",
|
|
99
|
-
"Qwen/Qwen2.5-72B-Instruct",
|
|
100
|
-
"Qwen/Qwen2.5-Coder-32B-Instruct",
|
|
101
|
-
"Qwen/Qwen2-72B-Instruct",
|
|
102
|
-
"Qwen/QwQ-32B",
|
|
103
|
-
"Qwen/QwQ-32B-Preview",
|
|
104
|
-
"Qwen2.5-72B-Instruct",
|
|
105
|
-
"qwen",
|
|
106
|
-
"qwen-coder",
|
|
107
|
-
# "Qwen-QwQ-32B-Preview", >>>> NOT WORKING
|
|
108
|
-
|
|
109
|
-
# Google/Gemini Models
|
|
110
|
-
# "@cf/google/gemma-2b-it-lora", >>>> NOT WORKING
|
|
111
|
-
# "@cf/google/gemma-7b-it-lora", >>>> NOT WORKING
|
|
112
|
-
"@hf/google/gemma-7b-it",
|
|
113
|
-
"google/gemma-1.1-2b-it",
|
|
114
|
-
"google/gemma-1.1-7b-it",
|
|
115
|
-
"gemini-pro",
|
|
116
|
-
"gemini-1.5-pro",
|
|
117
|
-
"gemini-1.5-pro-latest",
|
|
118
|
-
"gemini-1.5-flash",
|
|
119
|
-
"gemini-flash-2.0",
|
|
120
|
-
"gemini-thinking",
|
|
121
|
-
|
|
122
|
-
# Microsoft Models
|
|
123
|
-
"@cf/microsoft/phi-2",
|
|
124
|
-
"microsoft/DialoGPT-medium",
|
|
125
|
-
"microsoft/Phi-3-medium-4k-instruct",
|
|
126
|
-
"microsoft/Phi-3-mini-4k-instruct",
|
|
127
|
-
"microsoft/Phi-3.5-mini-instruct",
|
|
128
|
-
"microsoft/phi-4",
|
|
129
|
-
"microsoft/WizardLM-2-8x22B",
|
|
130
|
-
|
|
131
|
-
# Yi Models
|
|
132
|
-
"01-ai/Yi-1.5-34B-Chat",
|
|
133
|
-
# "01-ai/Yi-34B-Chat", >>>> NOT WORKING
|
|
134
|
-
|
|
135
|
-
# DeepSeek Models
|
|
136
|
-
"@cf/deepseek-ai/deepseek-math-7b-base",
|
|
137
|
-
"@cf/deepseek-ai/deepseek-math-7b-instruct",
|
|
138
|
-
"@cf/deepseek-ai/deepseek-r1-distill-qwen-32b",
|
|
139
|
-
"deepseek",
|
|
140
|
-
"deepseek-ai/DeepSeek-R1",
|
|
141
|
-
"deepseek-ai/DeepSeek-R1-Distill-Llama-70B",
|
|
142
|
-
# "deepseek-ai/DeepSeek-R1-Distill-Qwen-32B", >>>> NOT WORKING
|
|
143
|
-
# "deepseek-ai/DeepSeek-V2.5", >>>> NOT WORKING
|
|
144
|
-
"deepseek-llm-67b-chat",
|
|
145
21
|
"deepseek-r1",
|
|
146
|
-
"deepseek-r1-distill-llama-70b",
|
|
147
|
-
# "deepseek-reasoner", >>>> NOT WORKING
|
|
148
22
|
"deepseek-v3",
|
|
149
|
-
"uncensored-r1",
|
|
150
|
-
|
|
151
|
-
# Specialized Models and Tools
|
|
152
|
-
"@cf/defog/sqlcoder-7b-2",
|
|
153
|
-
"@cf/thebloke/discolm-german-7b-v1-awq",
|
|
154
|
-
"@cf/tiiuae/falcon-7b-instruct",
|
|
155
|
-
# "@cf/tinyllama/tinyllama-1.1b-chat-v1.0", >>>> NOT WORKING
|
|
156
|
-
# "@hf/nexusflow/starling-lm-7b-beta", >>>> NOT WORKING
|
|
157
|
-
# "@hf/nousresearch/hermes-2-pro-mistral-7b", >>>> NOT WORKING
|
|
158
|
-
# "@hf/thebloke/deepseek-coder-6.7b-base-awq", >>>> NOT WORKING
|
|
159
|
-
# "@hf/thebloke/deepseek-coder-6.7b-instruct-awq", >>>> NOT WORKING
|
|
160
|
-
# "@hf/thebloke/llama-2-13b-chat-awq", >>>> NOT WORKING
|
|
161
|
-
# "@hf/thebloke/llamaguard-7b-awq", >>>> NOT WORKING
|
|
162
|
-
# "@hf/thebloke/mistral-7b-instruct-v0.1-awq", >>>> NOT WORKING
|
|
163
|
-
# "@hf/thebloke/neural-chat-7b-v3-1-awq", >>>> NOT WORKING
|
|
164
|
-
# "@hf/thebloke/openhermes-2.5-mistral-7b-awq", >>>> NOT WORKING
|
|
165
|
-
# "@hf/thebloke/zephyr-7b-beta-awq", >>>> NOT WORKING
|
|
23
|
+
"uncensored-r1",
|
|
166
24
|
"Image-Generator",
|
|
167
|
-
# "flux-1-schnell", >>>> NOT WORKING
|
|
168
|
-
# "HelpingAI-15B", >>>> NOT WORKING
|
|
169
|
-
# "HelpingAI2-3b", >>>> NOT WORKING
|
|
170
|
-
# "HelpingAI2-6B", >>>> NOT WORKING
|
|
171
|
-
# "HelpingAI2-9B", >>>> NOT WORKING
|
|
172
|
-
# "HelpingAI2.5-10B", >>>> NOT WORKING
|
|
173
|
-
# "Helpingai2.5-10b-1m", >>>> NOT WORKING
|
|
174
|
-
# "HelpingAI2.5-2B", >>>> NOT WORKING
|
|
175
|
-
# "HELVETE", >>>> NOT WORKING
|
|
176
|
-
# "HELVETE-X", >>>> NOT WORKING
|
|
177
|
-
# "evil", >>>> NOT WORKING
|
|
178
|
-
# "Image-Generator", >>>> NOT WORKING
|
|
179
|
-
# "Image-Generator-NSFW", >>>> NOT WORKING
|
|
180
|
-
# "midijourney", >>>> NOT WORKING
|
|
181
|
-
# "Niansuh", >>>> NOT WORKING
|
|
182
|
-
# "niansuh-t1", >>>> NOT WORKING
|
|
183
|
-
# "Nous-Hermes-2-Mixtral-8x7B-DPO", >>>> NOT WORKING
|
|
184
|
-
# "NousResearch/Hermes-3-Llama-3.1-8B", >>>> NOT WORKING
|
|
185
|
-
# "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO", >>>> NOT WORKING
|
|
186
|
-
# "nvidia/Llama-3.1-Nemotron-70B-Instruct", >>>> NOT WORKING
|
|
187
|
-
# "openai", >>>> NOT WORKING
|
|
188
|
-
# "openai-audio", >>>> NOT WORKING
|
|
189
|
-
# "openai-large", >>>> NOT WORKING
|
|
190
|
-
# "openai-reasoning", >>>> NOT WORKING
|
|
191
|
-
# "openai/whisper-large-v3", >>>> NOT WORKING
|
|
192
|
-
# "openai/whisper-large-v3-turbo", >>>> NOT WORKING
|
|
193
|
-
# "openbmb/MiniCPM-Llama3-V-2_5", >>>> NOT WORKING
|
|
194
|
-
# "openchat/openchat-3.6-8b", >>>> NOT WORKING
|
|
195
|
-
# "p1", >>>> NOT WORKING
|
|
196
|
-
# "phi", >>>> NOT WORKING
|
|
197
|
-
# "Phi-4-multilmodal-instruct", >>>> NOT WORKING
|
|
198
|
-
# "Priya-3B", >>>> NOT WORKING
|
|
199
|
-
# "rtist", >>>> NOT WORKING
|
|
200
|
-
# "searchgpt", >>>> NOT WORKING
|
|
201
|
-
# "sur", >>>> NOT WORKING
|
|
202
|
-
# "sur-mistral", >>>> NOT WORKING
|
|
203
|
-
# "tiiuae/falcon-7b-instruct", >>>> NOT WORKING
|
|
204
|
-
# "TirexAi", >>>> NOT WORKING
|
|
205
|
-
# "unity", >>>> NOT WORKING
|
|
206
25
|
]
|
|
207
26
|
|
|
208
27
|
def __init__(
|
|
@@ -230,7 +49,7 @@ class TypeGPT(Provider):
|
|
|
230
49
|
self.session = requests.Session()
|
|
231
50
|
self.is_conversation = is_conversation
|
|
232
51
|
self.max_tokens_to_sample = max_tokens
|
|
233
|
-
self.api_endpoint = "https://chat.typegpt.net/api/openai/
|
|
52
|
+
self.api_endpoint = "https://chat.typegpt.net/api/openai/v1/chat/completions"
|
|
234
53
|
self.timeout = timeout
|
|
235
54
|
self.last_response = {}
|
|
236
55
|
self.model = model
|
webscout/Provider/uncovr.py
CHANGED
|
@@ -20,14 +20,14 @@ class UncovrAI(Provider):
|
|
|
20
20
|
"gpt-4o-mini",
|
|
21
21
|
"gemini-2-flash",
|
|
22
22
|
"gemini-2-flash-lite",
|
|
23
|
-
"groq-llama-3-1-8b"
|
|
23
|
+
"groq-llama-3-1-8b",
|
|
24
|
+
"o3-mini",
|
|
25
|
+
"deepseek-r1-distill-qwen-32b",
|
|
24
26
|
# The following models are not available in the free plan:
|
|
25
|
-
# "o3-mini",
|
|
26
27
|
# "claude-3-7-sonnet",
|
|
27
28
|
# "gpt-4o",
|
|
28
29
|
# "claude-3-5-sonnet-v2",
|
|
29
30
|
# "deepseek-r1-distill-llama-70b",
|
|
30
|
-
# "deepseek-r1-distill-qwen-32b",
|
|
31
31
|
# "gemini-2-flash-lite-preview",
|
|
32
32
|
# "qwen-qwq-32b"
|
|
33
33
|
]
|
webscout/conversation.py
CHANGED
|
@@ -281,11 +281,11 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
|
|
|
281
281
|
if not os.path.exists(self.file):
|
|
282
282
|
with open(self.file, "w", encoding="utf-8") as fh:
|
|
283
283
|
fh.write(self.intro + "\n")
|
|
284
|
-
|
|
284
|
+
|
|
285
285
|
# Append new history
|
|
286
286
|
with open(self.file, "a", encoding="utf-8") as fh:
|
|
287
287
|
fh.write(new_history)
|
|
288
|
-
|
|
288
|
+
|
|
289
289
|
self.chat_history += new_history
|
|
290
290
|
# logger.info(f"Chat history updated with prompt: {prompt}")
|
|
291
291
|
|
|
@@ -317,21 +317,21 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
|
|
|
317
317
|
"tool": tool_name,
|
|
318
318
|
"result": tool_result
|
|
319
319
|
}
|
|
320
|
-
|
|
320
|
+
|
|
321
321
|
if self.file and self.update_file:
|
|
322
322
|
# Create file if it doesn't exist
|
|
323
323
|
if not os.path.exists(self.file):
|
|
324
324
|
with open(self.file, "w", encoding="utf-8") as fh:
|
|
325
325
|
fh.write(self.intro + "\n")
|
|
326
|
-
|
|
326
|
+
|
|
327
327
|
# Append new history
|
|
328
328
|
with open(self.file, "a", encoding="utf-8") as fh:
|
|
329
329
|
fh.write(new_history)
|
|
330
|
-
|
|
330
|
+
|
|
331
331
|
self.chat_history += new_history
|
|
332
332
|
|
|
333
333
|
def add_message(self, role: str, content: str) -> None:
|
|
334
|
-
"""Add a new message to the chat - simple and clean!
|
|
334
|
+
"""Add a new message to the chat - simple and clean!
|
|
335
335
|
|
|
336
336
|
This method:
|
|
337
337
|
- Validates the message role
|
|
@@ -379,10 +379,10 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
|
|
|
379
379
|
|
|
380
380
|
def _parse_function_call(self, response: str) -> FunctionCallData:
|
|
381
381
|
"""Parse a function call from the LLM's response.
|
|
382
|
-
|
|
382
|
+
|
|
383
383
|
Args:
|
|
384
384
|
response (str): The LLM's response containing a function call
|
|
385
|
-
|
|
385
|
+
|
|
386
386
|
Returns:
|
|
387
387
|
FunctionCallData: Parsed function call data or error
|
|
388
388
|
"""
|
|
@@ -399,13 +399,13 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
|
|
|
399
399
|
end_tag = "</tool_call>"
|
|
400
400
|
start_idx = response.find(start_tag)
|
|
401
401
|
end_idx = response.rfind(end_tag)
|
|
402
|
-
|
|
402
|
+
|
|
403
403
|
if start_idx == -1 or end_idx == -1 or end_idx <= start_idx:
|
|
404
404
|
raise ValueError("No valid <tool_call> JSON structure found in the response.")
|
|
405
|
-
|
|
405
|
+
|
|
406
406
|
# Extract JSON content - for the format without brackets
|
|
407
407
|
json_str: str = response[start_idx + len(start_tag):end_idx].strip()
|
|
408
|
-
|
|
408
|
+
|
|
409
409
|
# Try to parse the JSON directly
|
|
410
410
|
try:
|
|
411
411
|
parsed_response: Any = json.loads(json_str)
|
|
@@ -425,7 +425,7 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
|
|
|
425
425
|
# Extract JSON content - for the format with brackets
|
|
426
426
|
json_str: str = response[start_idx + len(start_tag):end_idx].strip()
|
|
427
427
|
parsed_response: Any = json.loads(json_str)
|
|
428
|
-
|
|
428
|
+
|
|
429
429
|
if isinstance(parsed_response, list):
|
|
430
430
|
return {"tool_calls": parsed_response}
|
|
431
431
|
elif isinstance(parsed_response, dict):
|
|
@@ -439,10 +439,10 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
|
|
|
439
439
|
|
|
440
440
|
def execute_function(self, function_call_data: FunctionCallData) -> str:
|
|
441
441
|
"""Execute a function call and return the result.
|
|
442
|
-
|
|
442
|
+
|
|
443
443
|
Args:
|
|
444
444
|
function_call_data (FunctionCallData): The function call data
|
|
445
|
-
|
|
445
|
+
|
|
446
446
|
Returns:
|
|
447
447
|
str: Result of the function execution
|
|
448
448
|
"""
|
|
@@ -450,7 +450,7 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
|
|
|
450
450
|
|
|
451
451
|
if not tool_calls or not isinstance(tool_calls, list):
|
|
452
452
|
return "Invalid tool_calls format."
|
|
453
|
-
|
|
453
|
+
|
|
454
454
|
results: List[str] = []
|
|
455
455
|
for tool_call in tool_calls:
|
|
456
456
|
function_name: str = tool_call.get("name")
|
|
@@ -465,19 +465,19 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
|
|
|
465
465
|
results.append(f"Executed {function_name} with arguments {arguments}")
|
|
466
466
|
|
|
467
467
|
return "; ".join(results)
|
|
468
|
-
|
|
468
|
+
|
|
469
469
|
def _convert_fns_to_tools(self, fns: Optional[List[Fn]]) -> List[ToolDefinition]:
|
|
470
470
|
"""Convert functions to tool definitions for the LLM.
|
|
471
|
-
|
|
471
|
+
|
|
472
472
|
Args:
|
|
473
473
|
fns (Optional[List[Fn]]): List of function definitions
|
|
474
|
-
|
|
474
|
+
|
|
475
475
|
Returns:
|
|
476
476
|
List[ToolDefinition]: List of tool definitions
|
|
477
477
|
"""
|
|
478
478
|
if not fns:
|
|
479
479
|
return []
|
|
480
|
-
|
|
480
|
+
|
|
481
481
|
tools: List[ToolDefinition] = []
|
|
482
482
|
for fn in fns:
|
|
483
483
|
tool: ToolDefinition = {
|
|
@@ -499,55 +499,55 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
|
|
|
499
499
|
}
|
|
500
500
|
tools.append(tool)
|
|
501
501
|
return tools
|
|
502
|
-
|
|
502
|
+
|
|
503
503
|
def get_tools_description(self) -> str:
|
|
504
504
|
"""Get a formatted string of available tools for the intro prompt.
|
|
505
|
-
|
|
505
|
+
|
|
506
506
|
Returns:
|
|
507
507
|
str: Formatted tools description
|
|
508
508
|
"""
|
|
509
509
|
if not self.tools:
|
|
510
510
|
return ""
|
|
511
|
-
|
|
511
|
+
|
|
512
512
|
tools_desc = []
|
|
513
513
|
for fn in self.tools:
|
|
514
514
|
params_desc = ", ".join([f"{name}: {typ}" for name, typ in fn.parameters.items()])
|
|
515
515
|
tools_desc.append(f"- {fn.name}: {fn.description} (Parameters: {params_desc})")
|
|
516
|
-
|
|
516
|
+
|
|
517
517
|
return "\n".join(tools_desc)
|
|
518
518
|
|
|
519
519
|
def handle_tool_response(self, response: str) -> Dict[str, Any]:
|
|
520
520
|
"""Process a response that might contain a tool call.
|
|
521
|
-
|
|
521
|
+
|
|
522
522
|
This method:
|
|
523
523
|
- Checks if the response contains a tool call
|
|
524
524
|
- Parses and executes the tool call if present
|
|
525
525
|
- Returns the appropriate result
|
|
526
|
-
|
|
526
|
+
|
|
527
527
|
Args:
|
|
528
528
|
response (str): The LLM's response
|
|
529
|
-
|
|
529
|
+
|
|
530
530
|
Returns:
|
|
531
531
|
Dict[str, Any]: Result containing 'is_tool_call', 'result', and 'original_response'
|
|
532
532
|
"""
|
|
533
533
|
# Check if response contains a tool call
|
|
534
534
|
if "<tool_call>" in response:
|
|
535
535
|
function_call_data = self._parse_function_call(response)
|
|
536
|
-
|
|
536
|
+
|
|
537
537
|
if "error" in function_call_data:
|
|
538
538
|
return {
|
|
539
|
-
"is_tool_call": True,
|
|
539
|
+
"is_tool_call": True,
|
|
540
540
|
"success": False,
|
|
541
541
|
"result": function_call_data["error"],
|
|
542
542
|
"original_response": response
|
|
543
543
|
}
|
|
544
|
-
|
|
544
|
+
|
|
545
545
|
# Execute the function call
|
|
546
546
|
result = self.execute_function(function_call_data)
|
|
547
|
-
|
|
547
|
+
|
|
548
548
|
# Add the result to chat history as a tool message
|
|
549
549
|
self.add_message("tool", result)
|
|
550
|
-
|
|
550
|
+
|
|
551
551
|
return {
|
|
552
552
|
"is_tool_call": True,
|
|
553
553
|
"success": True,
|
|
@@ -555,7 +555,7 @@ Your goal is to assist the user effectively. Analyze each query and choose one o
|
|
|
555
555
|
"tool_calls": function_call_data.get("tool_calls", []),
|
|
556
556
|
"original_response": response
|
|
557
557
|
}
|
|
558
|
-
|
|
558
|
+
|
|
559
559
|
return {
|
|
560
560
|
"is_tool_call": False,
|
|
561
561
|
"result": response,
|
webscout/prompt_manager.py
CHANGED
|
@@ -110,7 +110,8 @@ class AwesomePrompts:
|
|
|
110
110
|
self,
|
|
111
111
|
key: Union[str, int],
|
|
112
112
|
default: Optional[str] = None,
|
|
113
|
-
case_insensitive: bool = True
|
|
113
|
+
case_insensitive: bool = True,
|
|
114
|
+
raise_not_found: bool = False # currently unused
|
|
114
115
|
) -> Optional[str]:
|
|
115
116
|
"""Get that perfect prompt! 🎯
|
|
116
117
|
|
webscout/version.py
CHANGED
|
@@ -1,2 +1,2 @@
|
|
|
1
|
-
__version__ = "8.
|
|
1
|
+
__version__ = "8.2"
|
|
2
2
|
__prog__ = "webscout"
|