webscout 6.5__py3-none-any.whl → 6.6__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/Extra/autocoder/autocoder_utiles.py +119 -101
- webscout/Provider/AISEARCH/__init__.py +2 -0
- webscout/Provider/AISEARCH/ooai.py +155 -0
- webscout/Provider/Amigo.py +70 -85
- webscout/Provider/{prefind.py → Jadve.py} +72 -70
- webscout/Provider/Netwrck.py +235 -0
- webscout/Provider/Openai.py +4 -3
- webscout/Provider/PI.py +2 -2
- webscout/Provider/PizzaGPT.py +3 -3
- webscout/Provider/TeachAnything.py +15 -2
- webscout/Provider/Youchat.py +42 -8
- webscout/Provider/__init__.py +134 -147
- webscout/Provider/multichat.py +230 -0
- webscout/Provider/promptrefine.py +2 -2
- webscout/Provider/talkai.py +10 -13
- webscout/Provider/turboseek.py +5 -4
- webscout/Provider/tutorai.py +8 -112
- webscout/Provider/typegpt.py +4 -5
- webscout/Provider/x0gpt.py +81 -9
- webscout/Provider/yep.py +123 -361
- webscout/__init__.py +10 -1
- webscout/conversation.py +24 -9
- webscout/exceptions.py +188 -20
- webscout/litprinter/__init__.py +4 -117
- webscout/litprinter/colors.py +54 -0
- webscout/optimizers.py +335 -185
- webscout/scout/__init__.py +2 -5
- webscout/scout/core/__init__.py +7 -0
- webscout/scout/core/crawler.py +140 -0
- webscout/scout/core/scout.py +571 -0
- webscout/scout/core/search_result.py +96 -0
- webscout/scout/core/text_analyzer.py +63 -0
- webscout/scout/core/text_utils.py +277 -0
- webscout/scout/core/web_analyzer.py +52 -0
- webscout/scout/element.py +6 -5
- webscout/update_checker.py +117 -58
- webscout/version.py +1 -1
- webscout/zeroart/base.py +15 -16
- webscout/zeroart/effects.py +1 -1
- webscout/zeroart/fonts.py +1 -1
- {webscout-6.5.dist-info → webscout-6.6.dist-info}/METADATA +8 -165
- {webscout-6.5.dist-info → webscout-6.6.dist-info}/RECORD +59 -41
- webscout-6.6.dist-info/top_level.txt +2 -0
- webstoken/__init__.py +30 -0
- webstoken/classifier.py +189 -0
- webstoken/keywords.py +216 -0
- webstoken/language.py +128 -0
- webstoken/ner.py +164 -0
- webstoken/normalizer.py +35 -0
- webstoken/processor.py +77 -0
- webstoken/sentiment.py +206 -0
- webstoken/stemmer.py +73 -0
- webstoken/t.py +75 -0
- webstoken/tagger.py +60 -0
- webstoken/tokenizer.py +158 -0
- webscout/Provider/Perplexity.py +0 -591
- webscout/Provider/RoboCoders.py +0 -206
- webscout/Provider/genspark.py +0 -225
- webscout/Provider/perplexitylabs.py +0 -265
- webscout/Provider/twitterclone.py +0 -251
- webscout/Provider/upstage.py +0 -230
- webscout-6.5.dist-info/top_level.txt +0 -1
- /webscout/Provider/{felo_search.py → AISEARCH/felo_search.py} +0 -0
- {webscout-6.5.dist-info → webscout-6.6.dist-info}/LICENSE.md +0 -0
- {webscout-6.5.dist-info → webscout-6.6.dist-info}/WHEEL +0 -0
- {webscout-6.5.dist-info → webscout-6.6.dist-info}/entry_points.txt +0 -0
webscout/Provider/Youchat.py
CHANGED
|
@@ -1,4 +1,3 @@
|
|
|
1
|
-
|
|
2
1
|
from uuid import uuid4
|
|
3
2
|
from re import findall
|
|
4
3
|
import json
|
|
@@ -12,12 +11,41 @@ from typing import Any, AsyncGenerator, Dict
|
|
|
12
11
|
|
|
13
12
|
import cloudscraper
|
|
14
13
|
|
|
15
|
-
|
|
16
14
|
class YouChat(Provider):
|
|
17
15
|
"""
|
|
18
16
|
This class provides methods for interacting with the You.com chat API in a consistent provider structure.
|
|
19
17
|
"""
|
|
20
18
|
|
|
19
|
+
AVAILABLE_MODELS = [
|
|
20
|
+
"openai_o1",
|
|
21
|
+
"openai_o1_mini",
|
|
22
|
+
"gpt_4o_mini",
|
|
23
|
+
"gpt_4o",
|
|
24
|
+
"gpt_4_turbo",
|
|
25
|
+
"gpt_4",
|
|
26
|
+
"claude_3_5_sonnet",
|
|
27
|
+
"claude_3_opus",
|
|
28
|
+
"claude_3_sonnet",
|
|
29
|
+
"claude_3_5_haiku",
|
|
30
|
+
"claude_3_haiku",
|
|
31
|
+
"llama3_3_70b",
|
|
32
|
+
"llama3_2_90b",
|
|
33
|
+
"llama3_2_11b",
|
|
34
|
+
"llama3_1_405b",
|
|
35
|
+
"llama3_1_70b",
|
|
36
|
+
"llama3",
|
|
37
|
+
"mistral_large_2",
|
|
38
|
+
"gemini_1_5_flash",
|
|
39
|
+
"gemini_1_5_pro",
|
|
40
|
+
"databricks_dbrx_instruct",
|
|
41
|
+
"qwen2p5_72b",
|
|
42
|
+
"qwen2p5_coder_32b",
|
|
43
|
+
"command_r",
|
|
44
|
+
"command_r_plus",
|
|
45
|
+
"solar_1_mini",
|
|
46
|
+
"dolphin_2_5"
|
|
47
|
+
]
|
|
48
|
+
|
|
21
49
|
def __init__(
|
|
22
50
|
self,
|
|
23
51
|
is_conversation: bool = True,
|
|
@@ -29,6 +57,7 @@ class YouChat(Provider):
|
|
|
29
57
|
proxies: dict = {},
|
|
30
58
|
history_offset: int = 10250,
|
|
31
59
|
act: str = None,
|
|
60
|
+
model: str = "claude_3_5_haiku", # Default model set to claude_3_5_haiku
|
|
32
61
|
):
|
|
33
62
|
"""Instantiates YouChat
|
|
34
63
|
|
|
@@ -42,7 +71,11 @@ class YouChat(Provider):
|
|
|
42
71
|
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
43
72
|
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
44
73
|
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
74
|
+
model (str, optional): Model to use. Defaults to "claude_3_5_haiku".
|
|
45
75
|
"""
|
|
76
|
+
if model not in self.AVAILABLE_MODELS:
|
|
77
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
78
|
+
|
|
46
79
|
self.session = cloudscraper.create_scraper() # Create a Cloudscraper session
|
|
47
80
|
self.is_conversation = is_conversation
|
|
48
81
|
self.max_tokens_to_sample = max_tokens
|
|
@@ -50,6 +83,7 @@ class YouChat(Provider):
|
|
|
50
83
|
self.stream_chunk_size = 64
|
|
51
84
|
self.timeout = timeout
|
|
52
85
|
self.last_response = {}
|
|
86
|
+
self.model = model
|
|
53
87
|
self.headers = {
|
|
54
88
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
|
|
55
89
|
"Accept": "text/event-stream",
|
|
@@ -123,18 +157,18 @@ class YouChat(Provider):
|
|
|
123
157
|
|
|
124
158
|
payload = {
|
|
125
159
|
"q": conversation_prompt,
|
|
126
|
-
"page":
|
|
127
|
-
"count":
|
|
160
|
+
"page": 2,
|
|
161
|
+
"count": 20,
|
|
128
162
|
"safeSearch": "Moderate",
|
|
129
163
|
"mkt": "en-IN",
|
|
130
164
|
"domain": "youchat",
|
|
131
|
-
"use_personalization_extraction": "
|
|
165
|
+
"use_personalization_extraction": "false",
|
|
132
166
|
"queryTraceId": str(uuid4()),
|
|
133
167
|
"chatId": str(uuid4()),
|
|
134
168
|
"conversationTurnId": str(uuid4()),
|
|
135
169
|
"pastChatLength": 0,
|
|
136
170
|
"isSmallMediumDevice": "true",
|
|
137
|
-
"selectedChatMode":
|
|
171
|
+
"selectedChatMode": self.model, # Use the selected model
|
|
138
172
|
"traceId": str(uuid4()),
|
|
139
173
|
"chat": "[]"
|
|
140
174
|
}
|
|
@@ -224,6 +258,6 @@ class YouChat(Provider):
|
|
|
224
258
|
if __name__ == '__main__':
|
|
225
259
|
from rich import print
|
|
226
260
|
ai = YouChat(timeout=5000)
|
|
227
|
-
response = ai.chat("
|
|
261
|
+
response = ai.chat("hi", stream=True)
|
|
228
262
|
for chunk in response:
|
|
229
|
-
print(chunk, end="", flush=True)
|
|
263
|
+
print(chunk, end="", flush=True)
|
webscout/Provider/__init__.py
CHANGED
|
@@ -1,147 +1,134 @@
|
|
|
1
|
-
# webscout/providers/__init__.py
|
|
2
|
-
from .PI import *
|
|
3
|
-
from .Llama import LLAMA
|
|
4
|
-
from .Cohere import Cohere
|
|
5
|
-
from .Reka import REKA
|
|
6
|
-
from .Groq import GROQ
|
|
7
|
-
from .Groq import AsyncGROQ
|
|
8
|
-
from .Openai import OPENAI
|
|
9
|
-
from .Openai import AsyncOPENAI
|
|
10
|
-
from .Koboldai import KOBOLDAI
|
|
11
|
-
from .Koboldai import AsyncKOBOLDAI
|
|
12
|
-
from .
|
|
13
|
-
from .
|
|
14
|
-
from .
|
|
15
|
-
from .
|
|
16
|
-
from .
|
|
17
|
-
from .
|
|
18
|
-
from .
|
|
19
|
-
from .
|
|
20
|
-
from .
|
|
21
|
-
from .
|
|
22
|
-
from .
|
|
23
|
-
from .
|
|
24
|
-
from .
|
|
25
|
-
from .
|
|
26
|
-
from .
|
|
27
|
-
from .
|
|
28
|
-
from .
|
|
29
|
-
from .
|
|
30
|
-
from .
|
|
31
|
-
from .
|
|
32
|
-
from .
|
|
33
|
-
from .
|
|
34
|
-
from .
|
|
35
|
-
from .
|
|
36
|
-
from .
|
|
37
|
-
from .
|
|
38
|
-
from .
|
|
39
|
-
from .
|
|
40
|
-
from .
|
|
41
|
-
from .
|
|
42
|
-
from .
|
|
43
|
-
from .
|
|
44
|
-
from .
|
|
45
|
-
from .
|
|
46
|
-
from .
|
|
47
|
-
from .
|
|
48
|
-
from .
|
|
49
|
-
from .
|
|
50
|
-
from .
|
|
51
|
-
from .
|
|
52
|
-
from .
|
|
53
|
-
|
|
54
|
-
from .
|
|
55
|
-
from .
|
|
56
|
-
from .
|
|
57
|
-
from .
|
|
58
|
-
from .
|
|
59
|
-
from .
|
|
60
|
-
from .
|
|
61
|
-
from .
|
|
62
|
-
|
|
63
|
-
from .
|
|
64
|
-
from .
|
|
65
|
-
from .
|
|
66
|
-
from .
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
'
|
|
76
|
-
'
|
|
77
|
-
'
|
|
78
|
-
'
|
|
79
|
-
'
|
|
80
|
-
'
|
|
81
|
-
'
|
|
82
|
-
'
|
|
83
|
-
'
|
|
84
|
-
'
|
|
85
|
-
'
|
|
86
|
-
'
|
|
87
|
-
'
|
|
88
|
-
'
|
|
89
|
-
'
|
|
90
|
-
'
|
|
91
|
-
'
|
|
92
|
-
'
|
|
93
|
-
'
|
|
94
|
-
'
|
|
95
|
-
'
|
|
96
|
-
'
|
|
97
|
-
'
|
|
98
|
-
'
|
|
99
|
-
'
|
|
100
|
-
'
|
|
101
|
-
'
|
|
102
|
-
'
|
|
103
|
-
'
|
|
104
|
-
'
|
|
105
|
-
'
|
|
106
|
-
'
|
|
107
|
-
'
|
|
108
|
-
'
|
|
109
|
-
'
|
|
110
|
-
'
|
|
111
|
-
'
|
|
112
|
-
'
|
|
113
|
-
'
|
|
114
|
-
'
|
|
115
|
-
'
|
|
116
|
-
'
|
|
117
|
-
'
|
|
118
|
-
'
|
|
119
|
-
'
|
|
120
|
-
'
|
|
121
|
-
'
|
|
122
|
-
'
|
|
123
|
-
'
|
|
124
|
-
'
|
|
125
|
-
'
|
|
126
|
-
|
|
127
|
-
'
|
|
128
|
-
'
|
|
129
|
-
'
|
|
130
|
-
'
|
|
131
|
-
'
|
|
132
|
-
'
|
|
133
|
-
'
|
|
134
|
-
|
|
135
|
-
# 'ChatHub',
|
|
136
|
-
'AIMathGPT',
|
|
137
|
-
'GaurishCerebras',
|
|
138
|
-
'GeminiPro',
|
|
139
|
-
'NinjaChat',
|
|
140
|
-
'LLMChat',
|
|
141
|
-
'Talkai',
|
|
142
|
-
'Llama3Mitril',
|
|
143
|
-
'Marcus',
|
|
144
|
-
'RoboCoders',
|
|
145
|
-
'TypeGPT',
|
|
146
|
-
'Mhystical',
|
|
147
|
-
]
|
|
1
|
+
# webscout/providers/__init__.py
|
|
2
|
+
from .PI import *
|
|
3
|
+
from .Llama import LLAMA
|
|
4
|
+
from .Cohere import Cohere
|
|
5
|
+
from .Reka import REKA
|
|
6
|
+
from .Groq import GROQ
|
|
7
|
+
from .Groq import AsyncGROQ
|
|
8
|
+
from .Openai import OPENAI
|
|
9
|
+
from .Openai import AsyncOPENAI
|
|
10
|
+
from .Koboldai import KOBOLDAI
|
|
11
|
+
from .Koboldai import AsyncKOBOLDAI
|
|
12
|
+
from .Blackboxai import BLACKBOXAI
|
|
13
|
+
from .Phind import PhindSearch
|
|
14
|
+
from .Phind import Phindv2
|
|
15
|
+
from .ai4chat import *
|
|
16
|
+
from .Gemini import GEMINI
|
|
17
|
+
from .Deepseek import DeepSeek
|
|
18
|
+
from .Deepinfra import DeepInfra
|
|
19
|
+
from .Farfalle import *
|
|
20
|
+
from .cleeai import *
|
|
21
|
+
from .OLLAMA import OLLAMA
|
|
22
|
+
from .Andi import AndiSearch
|
|
23
|
+
from .PizzaGPT import *
|
|
24
|
+
from .Llama3 import *
|
|
25
|
+
from .DARKAI import *
|
|
26
|
+
from .koala import *
|
|
27
|
+
from .RUBIKSAI import *
|
|
28
|
+
from .meta import *
|
|
29
|
+
from .DiscordRocks import *
|
|
30
|
+
from .julius import *
|
|
31
|
+
from .Youchat import *
|
|
32
|
+
from .yep import *
|
|
33
|
+
from .Cloudflare import *
|
|
34
|
+
from .turboseek import *
|
|
35
|
+
from .Free2GPT import *
|
|
36
|
+
from .EDITEE import *
|
|
37
|
+
from .TeachAnything import *
|
|
38
|
+
from .AI21 import *
|
|
39
|
+
from .Chatify import *
|
|
40
|
+
from .x0gpt import *
|
|
41
|
+
from .cerebras import *
|
|
42
|
+
from .lepton import *
|
|
43
|
+
from .geminiapi import *
|
|
44
|
+
from .elmo import *
|
|
45
|
+
from .Bing import *
|
|
46
|
+
from .GPTWeb import *
|
|
47
|
+
from .Netwrck import Netwrck
|
|
48
|
+
from .llamatutor import *
|
|
49
|
+
from .promptrefine import *
|
|
50
|
+
from .tutorai import *
|
|
51
|
+
from .ChatGPTES import *
|
|
52
|
+
from .Amigo import *
|
|
53
|
+
from .bagoodex import *
|
|
54
|
+
from .aimathgpt import *
|
|
55
|
+
from .gaurish import *
|
|
56
|
+
from .geminiprorealtime import *
|
|
57
|
+
from .NinjaChat import *
|
|
58
|
+
from .llmchat import *
|
|
59
|
+
from .talkai import *
|
|
60
|
+
from .askmyai import *
|
|
61
|
+
from .llama3mitril import *
|
|
62
|
+
from .Marcus import *
|
|
63
|
+
from .typegpt import *
|
|
64
|
+
from .mhystical import *
|
|
65
|
+
from .multichat import *
|
|
66
|
+
from .Jadve import *
|
|
67
|
+
__all__ = [
|
|
68
|
+
'Farfalle',
|
|
69
|
+
'LLAMA',
|
|
70
|
+
'Cohere',
|
|
71
|
+
'REKA',
|
|
72
|
+
'GROQ',
|
|
73
|
+
'AsyncGROQ',
|
|
74
|
+
'OPENAI',
|
|
75
|
+
'AsyncOPENAI',
|
|
76
|
+
'KOBOLDAI',
|
|
77
|
+
'AsyncKOBOLDAI',
|
|
78
|
+
'BLACKBOXAI',
|
|
79
|
+
'PhindSearch',
|
|
80
|
+
'GEMINI',
|
|
81
|
+
'DeepSeek',
|
|
82
|
+
'DeepInfra',
|
|
83
|
+
'AI4Chat',
|
|
84
|
+
'Phindv2',
|
|
85
|
+
'OLLAMA',
|
|
86
|
+
'AndiSearch',
|
|
87
|
+
'PIZZAGPT',
|
|
88
|
+
'LLAMA3',
|
|
89
|
+
'DARKAI',
|
|
90
|
+
'KOALA',
|
|
91
|
+
'RUBIKSAI',
|
|
92
|
+
'Meta',
|
|
93
|
+
'AskMyAI',
|
|
94
|
+
'DiscordRocks',
|
|
95
|
+
'PiAI',
|
|
96
|
+
'Julius',
|
|
97
|
+
'YouChat',
|
|
98
|
+
'YEPCHAT',
|
|
99
|
+
'Cloudflare',
|
|
100
|
+
'TurboSeek',
|
|
101
|
+
'Editee',
|
|
102
|
+
'TeachAnything',
|
|
103
|
+
'AI21',
|
|
104
|
+
'Chatify',
|
|
105
|
+
'X0GPT',
|
|
106
|
+
'Cerebras',
|
|
107
|
+
'Lepton',
|
|
108
|
+
'GEMINIAPI',
|
|
109
|
+
'Cleeai',
|
|
110
|
+
'Elmo',
|
|
111
|
+
'Free2GPT',
|
|
112
|
+
'Bing',
|
|
113
|
+
'GPTWeb',
|
|
114
|
+
'Netwrck',
|
|
115
|
+
'LlamaTutor',
|
|
116
|
+
'PromptRefine',
|
|
117
|
+
'TutorAI',
|
|
118
|
+
'ChatGPTES',
|
|
119
|
+
'AmigoChat',
|
|
120
|
+
'Bagoodex',
|
|
121
|
+
'AIMathGPT',
|
|
122
|
+
'GaurishCerebras',
|
|
123
|
+
'GeminiPro',
|
|
124
|
+
'NinjaChat',
|
|
125
|
+
'LLMChat',
|
|
126
|
+
'Talkai',
|
|
127
|
+
'Llama3Mitril',
|
|
128
|
+
'Marcus',
|
|
129
|
+
'TypeGPT',
|
|
130
|
+
'Mhystical',
|
|
131
|
+
'Netwrck',
|
|
132
|
+
'MultiChatAI',
|
|
133
|
+
'JadveOpenAI',
|
|
134
|
+
]
|
|
@@ -0,0 +1,230 @@
|
|
|
1
|
+
import requests
|
|
2
|
+
import json
|
|
3
|
+
from typing import Any, Dict, Optional, Generator
|
|
4
|
+
|
|
5
|
+
from webscout.AIutel import Optimizers
|
|
6
|
+
from webscout.AIutel import Conversation
|
|
7
|
+
from webscout.AIutel import AwesomePrompts
|
|
8
|
+
from webscout.AIbase import Provider
|
|
9
|
+
from webscout import exceptions
|
|
10
|
+
|
|
11
|
+
# Model configurations
|
|
12
|
+
MODEL_CONFIGS = {
|
|
13
|
+
"llama": {
|
|
14
|
+
"endpoint": "https://www.multichatai.com/api/chat/meta",
|
|
15
|
+
"models": {
|
|
16
|
+
"llama-3.1-70b-versatile": {"contextLength": 8192},
|
|
17
|
+
"llama-3.2-90b-vision-preview": {"contextLength": 32768},
|
|
18
|
+
"llama-3.2-11b-vision-preview": {"contextLength": 32768},
|
|
19
|
+
},
|
|
20
|
+
},
|
|
21
|
+
"alibaba": {
|
|
22
|
+
"endpoint": "https://www.multichatai.com/api/chat/alibaba",
|
|
23
|
+
"models": {
|
|
24
|
+
"Qwen/Qwen2.5-72B-Instruct": {"contextLength": 32768},
|
|
25
|
+
"Qwen/Qwen2.5-Coder-32B-Instruct": {"contextLength": 32768},
|
|
26
|
+
},
|
|
27
|
+
},
|
|
28
|
+
"cohere": {
|
|
29
|
+
"endpoint": "https://www.multichatai.com/api/chat/cohere",
|
|
30
|
+
"models": {"command-r": {"contextLength": 128000}},
|
|
31
|
+
},
|
|
32
|
+
}
|
|
33
|
+
|
|
34
|
+
class MultiChatAI(Provider):
|
|
35
|
+
"""
|
|
36
|
+
A class to interact with the MultiChatAI API.
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
def __init__(
|
|
40
|
+
self,
|
|
41
|
+
is_conversation: bool = True,
|
|
42
|
+
max_tokens: int = 4000,
|
|
43
|
+
timeout: int = 30,
|
|
44
|
+
intro: str = None,
|
|
45
|
+
filepath: str = None,
|
|
46
|
+
update_file: bool = True,
|
|
47
|
+
proxies: dict = {},
|
|
48
|
+
history_offset: int = 10250,
|
|
49
|
+
act: str = None,
|
|
50
|
+
model: str = "llama-3.1-70b-versatile", # Default model
|
|
51
|
+
system_prompt: str = "You are a helpful assistant.",
|
|
52
|
+
temperature: float = 0.5,
|
|
53
|
+
presence_penalty: int = 0,
|
|
54
|
+
frequency_penalty: int = 0,
|
|
55
|
+
top_p: float = 1,
|
|
56
|
+
):
|
|
57
|
+
"""Initializes the MultiChatAI API client."""
|
|
58
|
+
self.session = requests.Session()
|
|
59
|
+
self.is_conversation = is_conversation
|
|
60
|
+
self.max_tokens_to_sample = max_tokens
|
|
61
|
+
self.timeout = timeout
|
|
62
|
+
self.last_response = {}
|
|
63
|
+
self.model = model
|
|
64
|
+
self.system_prompt = system_prompt
|
|
65
|
+
self.temperature = temperature
|
|
66
|
+
self.presence_penalty = presence_penalty
|
|
67
|
+
self.frequency_penalty = frequency_penalty
|
|
68
|
+
self.top_p = top_p
|
|
69
|
+
self.headers = {
|
|
70
|
+
"accept": "*/*",
|
|
71
|
+
"accept-language": "en-US,en;q=0.9",
|
|
72
|
+
"content-type": "text/plain;charset=UTF-8",
|
|
73
|
+
"origin": "https://www.multichatai.com",
|
|
74
|
+
"referer": "https://www.multichatai.com/",
|
|
75
|
+
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36",
|
|
76
|
+
}
|
|
77
|
+
self.session.headers.update(self.headers)
|
|
78
|
+
self.session.proxies = proxies
|
|
79
|
+
|
|
80
|
+
self.__available_optimizers = (
|
|
81
|
+
method
|
|
82
|
+
for method in dir(Optimizers)
|
|
83
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
84
|
+
)
|
|
85
|
+
Conversation.intro = (
|
|
86
|
+
AwesomePrompts().get_act(
|
|
87
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
88
|
+
)
|
|
89
|
+
if act
|
|
90
|
+
else intro or Conversation.intro
|
|
91
|
+
)
|
|
92
|
+
self.conversation = Conversation(
|
|
93
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
94
|
+
)
|
|
95
|
+
self.conversation.history_offset = history_offset
|
|
96
|
+
|
|
97
|
+
# Parse provider and model name
|
|
98
|
+
self.provider = "llama" # Default provider
|
|
99
|
+
self.model_name = self.model
|
|
100
|
+
|
|
101
|
+
# Check if model exists in any provider
|
|
102
|
+
model_found = False
|
|
103
|
+
for provider, config in MODEL_CONFIGS.items():
|
|
104
|
+
if self.model in config["models"]:
|
|
105
|
+
self.provider = provider
|
|
106
|
+
self.model_name = self.model
|
|
107
|
+
model_found = True
|
|
108
|
+
break
|
|
109
|
+
|
|
110
|
+
if not model_found:
|
|
111
|
+
available_models = []
|
|
112
|
+
for provider, config in MODEL_CONFIGS.items():
|
|
113
|
+
for model in config["models"].keys():
|
|
114
|
+
available_models.append(f"{provider}/{model}")
|
|
115
|
+
raise ValueError(
|
|
116
|
+
f"Invalid model: {self.model}\nAvailable models: {', '.join(available_models)}"
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
def _get_endpoint(self) -> str:
|
|
120
|
+
"""Get the API endpoint for the current provider."""
|
|
121
|
+
return MODEL_CONFIGS[self.provider]["endpoint"]
|
|
122
|
+
|
|
123
|
+
def _get_chat_settings(self) -> Dict[str, Any]:
|
|
124
|
+
"""Get chat settings for the current model."""
|
|
125
|
+
base_settings = MODEL_CONFIGS[self.provider]["models"][self.model_name]
|
|
126
|
+
return {
|
|
127
|
+
"model": self.model,
|
|
128
|
+
"prompt": self.system_prompt,
|
|
129
|
+
"temperature": self.temperature,
|
|
130
|
+
"contextLength": base_settings["contextLength"],
|
|
131
|
+
"includeProfileContext": True,
|
|
132
|
+
"includeWorkspaceInstructions": True,
|
|
133
|
+
"embeddingsProvider": "openai"
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
def ask(
|
|
137
|
+
self,
|
|
138
|
+
prompt: str,
|
|
139
|
+
stream: bool = False,
|
|
140
|
+
raw: bool = False,
|
|
141
|
+
optimizer: str = None,
|
|
142
|
+
conversationally: bool = False,
|
|
143
|
+
) -> Dict[str, Any] | Generator:
|
|
144
|
+
"""Sends a prompt to the MultiChatAI API and returns the response."""
|
|
145
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
146
|
+
if optimizer:
|
|
147
|
+
if optimizer in self.__available_optimizers:
|
|
148
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
149
|
+
conversation_prompt if conversationally else prompt
|
|
150
|
+
)
|
|
151
|
+
else:
|
|
152
|
+
raise exceptions.FailedToGenerateResponseError(
|
|
153
|
+
f"Optimizer is not one of {self.__available_optimizers}"
|
|
154
|
+
)
|
|
155
|
+
|
|
156
|
+
payload = {
|
|
157
|
+
"chatSettings": self._get_chat_settings(),
|
|
158
|
+
"messages": [
|
|
159
|
+
{"role": "system", "content": self.system_prompt},
|
|
160
|
+
{"role": "user", "content": conversation_prompt},
|
|
161
|
+
],
|
|
162
|
+
"customModelId": "",
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
try:
|
|
166
|
+
response = self.session.post(
|
|
167
|
+
self._get_endpoint(),
|
|
168
|
+
headers=self.headers,
|
|
169
|
+
json=payload,
|
|
170
|
+
stream=True,
|
|
171
|
+
timeout=self.timeout,
|
|
172
|
+
)
|
|
173
|
+
response.raise_for_status()
|
|
174
|
+
|
|
175
|
+
full_response = ""
|
|
176
|
+
for line in response.iter_lines():
|
|
177
|
+
if line:
|
|
178
|
+
decoded_line = line.decode("utf-8")
|
|
179
|
+
if stream:
|
|
180
|
+
yield {"text": decoded_line}
|
|
181
|
+
full_response += decoded_line
|
|
182
|
+
|
|
183
|
+
self.last_response = {"text": full_response.strip()}
|
|
184
|
+
self.conversation.update_chat_history(prompt, full_response.strip())
|
|
185
|
+
|
|
186
|
+
if not stream:
|
|
187
|
+
return self.last_response
|
|
188
|
+
|
|
189
|
+
except requests.exceptions.RequestException as e:
|
|
190
|
+
raise exceptions.ProviderConnectionError(f"API request failed: {e}") from e
|
|
191
|
+
except json.JSONDecodeError as e:
|
|
192
|
+
raise exceptions.InvalidResponseError(f"Invalid JSON response: {e}") from e
|
|
193
|
+
except Exception as e:
|
|
194
|
+
raise exceptions.FailedToGenerateResponseError(f"Unexpected error: {e}") from e
|
|
195
|
+
|
|
196
|
+
def chat(
|
|
197
|
+
self,
|
|
198
|
+
prompt: str,
|
|
199
|
+
stream: bool = False,
|
|
200
|
+
optimizer: str = None,
|
|
201
|
+
conversationally: bool = False,
|
|
202
|
+
) -> str | Generator[str, None, None]:
|
|
203
|
+
"""Generate response."""
|
|
204
|
+
if stream:
|
|
205
|
+
for chunk in self.ask(
|
|
206
|
+
prompt, stream=True, optimizer=optimizer, conversationally=conversationally
|
|
207
|
+
):
|
|
208
|
+
if isinstance(chunk, dict):
|
|
209
|
+
yield chunk.get("text", "")
|
|
210
|
+
else:
|
|
211
|
+
yield str(chunk)
|
|
212
|
+
else:
|
|
213
|
+
response = self.ask(
|
|
214
|
+
prompt, stream=False, optimizer=optimizer, conversationally=conversationally
|
|
215
|
+
)
|
|
216
|
+
return response.get("text", "") if isinstance(response, dict) else str(response)
|
|
217
|
+
|
|
218
|
+
def get_message(self, response: Dict[str, Any] | str) -> str:
|
|
219
|
+
"""Retrieves message from response."""
|
|
220
|
+
if isinstance(response, dict):
|
|
221
|
+
return response.get("text", "")
|
|
222
|
+
return str(response)
|
|
223
|
+
|
|
224
|
+
if __name__ == "__main__":
|
|
225
|
+
from rich import print
|
|
226
|
+
|
|
227
|
+
ai = MultiChatAI(model="llama-3.1-70b-versatile")
|
|
228
|
+
response = ai.chat("What is the meaning of life?", stream=True)
|
|
229
|
+
for chunk in response:
|
|
230
|
+
print(chunk, end="", flush=True)
|
|
@@ -6,7 +6,7 @@ from webscout.AIutel import Optimizers
|
|
|
6
6
|
from webscout.AIutel import Conversation
|
|
7
7
|
from webscout.AIutel import AwesomePrompts
|
|
8
8
|
from webscout.AIbase import Provider
|
|
9
|
-
from
|
|
9
|
+
from webscout import LitAgent as UserAgent
|
|
10
10
|
|
|
11
11
|
class PromptRefine(Provider):
|
|
12
12
|
"""
|
|
@@ -55,7 +55,7 @@ class PromptRefine(Provider):
|
|
|
55
55
|
self.headers = {
|
|
56
56
|
'origin': 'https://www.promptrefine.com',
|
|
57
57
|
'referer': 'https://www.promptrefine.com/prompt/new',
|
|
58
|
-
'user-agent': UserAgent().random
|
|
58
|
+
'user-agent': UserAgent().random()
|
|
59
59
|
}
|
|
60
60
|
|
|
61
61
|
self.__available_optimizers = (
|