webscout 8.2.2__py3-none-any.whl → 8.2.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +112 -22
- webscout/AIbase.py +144 -7
- webscout/AIutel.py +249 -131
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/__init__.py +0 -1
- webscout/cli.py +256 -0
- webscout/conversation.py +307 -436
- webscout/exceptions.py +23 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info}/METADATA +172 -52
- webscout-8.2.7.dist-info/RECORD +26 -0
- {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info}/WHEEL +1 -1
- webscout-8.2.7.dist-info/entry_points.txt +3 -0
- webscout-8.2.7.dist-info/top_level.txt +1 -0
- inferno/__init__.py +0 -6
- inferno/__main__.py +0 -9
- inferno/cli.py +0 -6
- webscout/Extra/GitToolkit/__init__.py +0 -10
- webscout/Extra/GitToolkit/gitapi/__init__.py +0 -12
- webscout/Extra/GitToolkit/gitapi/repository.py +0 -195
- webscout/Extra/GitToolkit/gitapi/user.py +0 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +0 -62
- webscout/Extra/YTToolkit/YTdownloader.py +0 -957
- webscout/Extra/YTToolkit/__init__.py +0 -3
- webscout/Extra/YTToolkit/transcriber.py +0 -476
- webscout/Extra/YTToolkit/ytapi/__init__.py +0 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +0 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +0 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +0 -45
- webscout/Extra/YTToolkit/ytapi/https.py +0 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +0 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +0 -59
- webscout/Extra/YTToolkit/ytapi/pool.py +0 -8
- webscout/Extra/YTToolkit/ytapi/query.py +0 -40
- webscout/Extra/YTToolkit/ytapi/stream.py +0 -63
- webscout/Extra/YTToolkit/ytapi/utils.py +0 -62
- webscout/Extra/YTToolkit/ytapi/video.py +0 -232
- webscout/Extra/__init__.py +0 -7
- webscout/Extra/autocoder/__init__.py +0 -9
- webscout/Extra/autocoder/autocoder.py +0 -849
- webscout/Extra/autocoder/autocoder_utiles.py +0 -332
- webscout/Extra/gguf.py +0 -682
- webscout/Extra/tempmail/__init__.py +0 -28
- webscout/Extra/tempmail/async_utils.py +0 -141
- webscout/Extra/tempmail/base.py +0 -161
- webscout/Extra/tempmail/cli.py +0 -187
- webscout/Extra/tempmail/emailnator.py +0 -84
- webscout/Extra/tempmail/mail_tm.py +0 -361
- webscout/Extra/tempmail/temp_mail_io.py +0 -292
- webscout/Extra/weather.py +0 -194
- webscout/Extra/weather_ascii.py +0 -76
- webscout/LLM.py +0 -442
- webscout/Litlogger/__init__.py +0 -67
- webscout/Litlogger/core/__init__.py +0 -6
- webscout/Litlogger/core/level.py +0 -23
- webscout/Litlogger/core/logger.py +0 -165
- webscout/Litlogger/handlers/__init__.py +0 -12
- webscout/Litlogger/handlers/console.py +0 -33
- webscout/Litlogger/handlers/file.py +0 -143
- webscout/Litlogger/handlers/network.py +0 -173
- webscout/Litlogger/styles/__init__.py +0 -7
- webscout/Litlogger/styles/colors.py +0 -249
- webscout/Litlogger/styles/formats.py +0 -458
- webscout/Litlogger/styles/text.py +0 -87
- webscout/Litlogger/utils/__init__.py +0 -6
- webscout/Litlogger/utils/detectors.py +0 -153
- webscout/Litlogger/utils/formatters.py +0 -200
- webscout/Local/__init__.py +0 -12
- webscout/Local/__main__.py +0 -9
- webscout/Local/api.py +0 -576
- webscout/Local/cli.py +0 -516
- webscout/Local/config.py +0 -75
- webscout/Local/llm.py +0 -287
- webscout/Local/model_manager.py +0 -253
- webscout/Local/server.py +0 -721
- webscout/Local/utils.py +0 -93
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/AISEARCH/DeepFind.py +0 -250
- webscout/Provider/AISEARCH/ISou.py +0 -256
- webscout/Provider/AISEARCH/Perplexity.py +0 -359
- webscout/Provider/AISEARCH/__init__.py +0 -10
- webscout/Provider/AISEARCH/felo_search.py +0 -228
- webscout/Provider/AISEARCH/genspark_search.py +0 -208
- webscout/Provider/AISEARCH/hika_search.py +0 -194
- webscout/Provider/AISEARCH/iask_search.py +0 -436
- webscout/Provider/AISEARCH/monica_search.py +0 -246
- webscout/Provider/AISEARCH/scira_search.py +0 -324
- webscout/Provider/AISEARCH/webpilotai_search.py +0 -281
- webscout/Provider/Aitopia.py +0 -292
- webscout/Provider/AllenAI.py +0 -413
- webscout/Provider/Andi.py +0 -228
- webscout/Provider/Blackboxai.py +0 -229
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTClone.py +0 -226
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/Chatify.py +0 -175
- webscout/Provider/Cloudflare.py +0 -273
- webscout/Provider/Cohere.py +0 -208
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Deepinfra.py +0 -297
- webscout/Provider/ElectronHub.py +0 -709
- webscout/Provider/ExaAI.py +0 -261
- webscout/Provider/ExaChat.py +0 -342
- webscout/Provider/Free2GPT.py +0 -241
- webscout/Provider/GPTWeb.py +0 -193
- webscout/Provider/Gemini.py +0 -169
- webscout/Provider/GithubChat.py +0 -367
- webscout/Provider/Glider.py +0 -211
- webscout/Provider/Groq.py +0 -670
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +0 -206
- webscout/Provider/HeckAI.py +0 -233
- webscout/Provider/HuggingFaceChat.py +0 -462
- webscout/Provider/Hunyuan.py +0 -272
- webscout/Provider/Jadve.py +0 -266
- webscout/Provider/Koboldai.py +0 -381
- webscout/Provider/LambdaChat.py +0 -392
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Llama3.py +0 -204
- webscout/Provider/Marcus.py +0 -148
- webscout/Provider/Netwrck.py +0 -228
- webscout/Provider/OLLAMA.py +0 -396
- webscout/Provider/OPENAI/__init__.py +0 -25
- webscout/Provider/OPENAI/base.py +0 -46
- webscout/Provider/OPENAI/c4ai.py +0 -367
- webscout/Provider/OPENAI/chatgpt.py +0 -549
- webscout/Provider/OPENAI/chatgptclone.py +0 -460
- webscout/Provider/OPENAI/deepinfra.py +0 -272
- webscout/Provider/OPENAI/e2b.py +0 -1350
- webscout/Provider/OPENAI/exaai.py +0 -404
- webscout/Provider/OPENAI/exachat.py +0 -433
- webscout/Provider/OPENAI/freeaichat.py +0 -352
- webscout/Provider/OPENAI/glider.py +0 -316
- webscout/Provider/OPENAI/heckai.py +0 -337
- webscout/Provider/OPENAI/llmchatco.py +0 -327
- webscout/Provider/OPENAI/netwrck.py +0 -348
- webscout/Provider/OPENAI/opkfc.py +0 -488
- webscout/Provider/OPENAI/scirachat.py +0 -463
- webscout/Provider/OPENAI/sonus.py +0 -294
- webscout/Provider/OPENAI/standardinput.py +0 -425
- webscout/Provider/OPENAI/textpollinations.py +0 -285
- webscout/Provider/OPENAI/toolbaz.py +0 -405
- webscout/Provider/OPENAI/typegpt.py +0 -346
- webscout/Provider/OPENAI/uncovrAI.py +0 -455
- webscout/Provider/OPENAI/utils.py +0 -211
- webscout/Provider/OPENAI/venice.py +0 -413
- webscout/Provider/OPENAI/wisecat.py +0 -381
- webscout/Provider/OPENAI/writecream.py +0 -156
- webscout/Provider/OPENAI/x0gpt.py +0 -371
- webscout/Provider/OPENAI/yep.py +0 -327
- webscout/Provider/OpenGPT.py +0 -199
- webscout/Provider/Openai.py +0 -496
- webscout/Provider/PI.py +0 -344
- webscout/Provider/Perplexitylabs.py +0 -415
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/PizzaGPT.py +0 -198
- webscout/Provider/QwenLM.py +0 -254
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/StandardInput.py +0 -278
- webscout/Provider/TTI/AiForce/__init__.py +0 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
- webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
- webscout/Provider/TTI/ImgSys/__init__.py +0 -23
- webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
- webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
- webscout/Provider/TTI/Nexra/__init__.py +0 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
- webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
- webscout/Provider/TTI/__init__.py +0 -12
- webscout/Provider/TTI/aiarta/__init__.py +0 -2
- webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
- webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
- webscout/Provider/TTI/artbit/__init__.py +0 -22
- webscout/Provider/TTI/artbit/async_artbit.py +0 -155
- webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
- webscout/Provider/TTI/fastflux/__init__.py +0 -22
- webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
- webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
- webscout/Provider/TTI/huggingface/__init__.py +0 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
- webscout/Provider/TTI/piclumen/__init__.py +0 -23
- webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
- webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
- webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
- webscout/Provider/TTI/talkai/__init__.py +0 -4
- webscout/Provider/TTI/talkai/async_talkai.py +0 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
- webscout/Provider/TTS/__init__.py +0 -7
- webscout/Provider/TTS/deepgram.py +0 -156
- webscout/Provider/TTS/elevenlabs.py +0 -111
- webscout/Provider/TTS/gesserit.py +0 -127
- webscout/Provider/TTS/murfai.py +0 -113
- webscout/Provider/TTS/parler.py +0 -111
- webscout/Provider/TTS/speechma.py +0 -180
- webscout/Provider/TTS/streamElements.py +0 -333
- webscout/Provider/TTS/utils.py +0 -280
- webscout/Provider/TeachAnything.py +0 -187
- webscout/Provider/TextPollinationsAI.py +0 -231
- webscout/Provider/TwoAI.py +0 -199
- webscout/Provider/Venice.py +0 -219
- webscout/Provider/VercelAI.py +0 -234
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/WiseCat.py +0 -196
- webscout/Provider/Writecream.py +0 -211
- webscout/Provider/WritingMate.py +0 -197
- webscout/Provider/Youchat.py +0 -330
- webscout/Provider/__init__.py +0 -198
- webscout/Provider/ai4chat.py +0 -202
- webscout/Provider/aimathgpt.py +0 -189
- webscout/Provider/akashgpt.py +0 -342
- webscout/Provider/askmyai.py +0 -158
- webscout/Provider/asksteve.py +0 -203
- webscout/Provider/bagoodex.py +0 -145
- webscout/Provider/cerebras.py +0 -242
- webscout/Provider/chatglm.py +0 -205
- webscout/Provider/cleeai.py +0 -213
- webscout/Provider/copilot.py +0 -428
- webscout/Provider/elmo.py +0 -234
- webscout/Provider/freeaichat.py +0 -271
- webscout/Provider/gaurish.py +0 -244
- webscout/Provider/geminiapi.py +0 -208
- webscout/Provider/geminiprorealtime.py +0 -160
- webscout/Provider/granite.py +0 -187
- webscout/Provider/hermes.py +0 -219
- webscout/Provider/julius.py +0 -223
- webscout/Provider/koala.py +0 -268
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/learnfastai.py +0 -266
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llama3mitril.py +0 -180
- webscout/Provider/llamatutor.py +0 -192
- webscout/Provider/llmchat.py +0 -213
- webscout/Provider/llmchatco.py +0 -311
- webscout/Provider/meta.py +0 -794
- webscout/Provider/multichat.py +0 -325
- webscout/Provider/promptrefine.py +0 -193
- webscout/Provider/scira_chat.py +0 -277
- webscout/Provider/scnet.py +0 -187
- webscout/Provider/searchchat.py +0 -293
- webscout/Provider/sonus.py +0 -208
- webscout/Provider/talkai.py +0 -194
- webscout/Provider/toolbaz.py +0 -320
- webscout/Provider/turboseek.py +0 -219
- webscout/Provider/tutorai.py +0 -252
- webscout/Provider/typefully.py +0 -280
- webscout/Provider/typegpt.py +0 -232
- webscout/Provider/uncovr.py +0 -312
- webscout/Provider/x0gpt.py +0 -256
- webscout/Provider/yep.py +0 -376
- webscout/litagent/__init__.py +0 -29
- webscout/litagent/agent.py +0 -455
- webscout/litagent/constants.py +0 -60
- webscout/litprinter/__init__.py +0 -59
- webscout/scout/__init__.py +0 -8
- webscout/scout/core/__init__.py +0 -7
- webscout/scout/core/crawler.py +0 -140
- webscout/scout/core/scout.py +0 -568
- webscout/scout/core/search_result.py +0 -96
- webscout/scout/core/text_analyzer.py +0 -63
- webscout/scout/core/text_utils.py +0 -277
- webscout/scout/core/web_analyzer.py +0 -52
- webscout/scout/core.py +0 -881
- webscout/scout/element.py +0 -460
- webscout/scout/parsers/__init__.py +0 -69
- webscout/scout/parsers/html5lib_parser.py +0 -172
- webscout/scout/parsers/html_parser.py +0 -236
- webscout/scout/parsers/lxml_parser.py +0 -178
- webscout/scout/utils.py +0 -37
- webscout/swiftcli/__init__.py +0 -809
- webscout/zeroart/__init__.py +0 -55
- webscout/zeroart/base.py +0 -60
- webscout/zeroart/effects.py +0 -99
- webscout/zeroart/fonts.py +0 -816
- webscout-8.2.2.dist-info/RECORD +0 -309
- webscout-8.2.2.dist-info/entry_points.txt +0 -5
- webscout-8.2.2.dist-info/top_level.txt +0 -3
- webstoken/__init__.py +0 -30
- webstoken/classifier.py +0 -189
- webstoken/keywords.py +0 -216
- webstoken/language.py +0 -128
- webstoken/ner.py +0 -164
- webstoken/normalizer.py +0 -35
- webstoken/processor.py +0 -77
- webstoken/sentiment.py +0 -206
- webstoken/stemmer.py +0 -73
- webstoken/tagger.py +0 -60
- webstoken/tokenizer.py +0 -158
- {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info/licenses}/LICENSE.md +0 -0
webscout/Provider/LambdaChat.py
DELETED
|
@@ -1,392 +0,0 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
import json
|
|
3
|
-
import time
|
|
4
|
-
import random
|
|
5
|
-
import re
|
|
6
|
-
import uuid
|
|
7
|
-
from typing import Any, Dict, List, Optional, Union, Generator
|
|
8
|
-
|
|
9
|
-
from webscout.AIutel import Conversation
|
|
10
|
-
from webscout.AIbase import Provider
|
|
11
|
-
from webscout import exceptions
|
|
12
|
-
from webscout.litagent import LitAgent
|
|
13
|
-
|
|
14
|
-
class LambdaChat(Provider):
|
|
15
|
-
"""
|
|
16
|
-
A class to interact with the Lambda Chat API.
|
|
17
|
-
Supports streaming responses.
|
|
18
|
-
"""
|
|
19
|
-
url = "https://lambda.chat"
|
|
20
|
-
|
|
21
|
-
AVAILABLE_MODELS = [
|
|
22
|
-
"deepseek-llama3.3-70b",
|
|
23
|
-
"deepseek-r1",
|
|
24
|
-
"hermes-3-llama-3.1-405b-fp8",
|
|
25
|
-
"llama3.1-nemotron-70b-instruct",
|
|
26
|
-
"lfm-40b",
|
|
27
|
-
"llama3.3-70b-instruct-fp8",
|
|
28
|
-
"qwen25-coder-32b-instruct"
|
|
29
|
-
]
|
|
30
|
-
|
|
31
|
-
def __init__(
|
|
32
|
-
self,
|
|
33
|
-
is_conversation: bool = True,
|
|
34
|
-
max_tokens: int = 2000,
|
|
35
|
-
timeout: int = 60,
|
|
36
|
-
filepath: str = None,
|
|
37
|
-
update_file: bool = True,
|
|
38
|
-
proxies: dict = {},
|
|
39
|
-
model: str = "deepseek-llama3.3-70b",
|
|
40
|
-
assistantId: str = None,
|
|
41
|
-
system_prompt: str = "You are a helpful assistant. Please answer the following question.",
|
|
42
|
-
):
|
|
43
|
-
"""Initialize the LambdaChat client."""
|
|
44
|
-
if model not in self.AVAILABLE_MODELS:
|
|
45
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
46
|
-
|
|
47
|
-
self.model = model
|
|
48
|
-
self.session = requests.Session()
|
|
49
|
-
self.session.proxies.update(proxies)
|
|
50
|
-
self.assistantId = assistantId
|
|
51
|
-
self.system_prompt = system_prompt
|
|
52
|
-
|
|
53
|
-
# Set up headers for all requests
|
|
54
|
-
self.headers = {
|
|
55
|
-
"Content-Type": "application/json",
|
|
56
|
-
"User-Agent": LitAgent().random(),
|
|
57
|
-
"Accept": "*/*",
|
|
58
|
-
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
59
|
-
"Accept-Language": "en-US,en;q=0.9",
|
|
60
|
-
"Origin": self.url,
|
|
61
|
-
"Referer": f"{self.url}/",
|
|
62
|
-
"Sec-Ch-Ua": "\"Chromium\";v=\"120\"",
|
|
63
|
-
"Sec-Ch-Ua-Mobile": "?0",
|
|
64
|
-
"Sec-Ch-Ua-Platform": "\"Windows\"",
|
|
65
|
-
"Sec-Fetch-Dest": "empty",
|
|
66
|
-
"Sec-Fetch-Mode": "cors",
|
|
67
|
-
"Sec-Fetch-Site": "same-origin",
|
|
68
|
-
"DNT": "1",
|
|
69
|
-
"Priority": "u=1, i"
|
|
70
|
-
}
|
|
71
|
-
|
|
72
|
-
# Provider settings
|
|
73
|
-
self.is_conversation = is_conversation
|
|
74
|
-
self.max_tokens_to_sample = max_tokens
|
|
75
|
-
self.timeout = timeout
|
|
76
|
-
self.last_response = {}
|
|
77
|
-
|
|
78
|
-
# Initialize conversation history
|
|
79
|
-
self.conversation = Conversation(is_conversation, max_tokens, filepath, update_file)
|
|
80
|
-
|
|
81
|
-
# Store conversation data for different models
|
|
82
|
-
self._conversation_data = {}
|
|
83
|
-
|
|
84
|
-
def create_conversation(self, model: str):
|
|
85
|
-
"""Create a new conversation with the specified model."""
|
|
86
|
-
url = f"{self.url}/conversation"
|
|
87
|
-
payload = {
|
|
88
|
-
"model": model
|
|
89
|
-
}
|
|
90
|
-
|
|
91
|
-
# Update referer for this specific request
|
|
92
|
-
headers = self.headers.copy()
|
|
93
|
-
headers["Referer"] = f"{self.url}/models/{model}"
|
|
94
|
-
|
|
95
|
-
try:
|
|
96
|
-
response = self.session.post(url, json=payload, headers=headers)
|
|
97
|
-
|
|
98
|
-
if response.status_code == 401:
|
|
99
|
-
raise exceptions.AuthenticationError("Authentication failed.")
|
|
100
|
-
|
|
101
|
-
# Handle other error codes
|
|
102
|
-
if response.status_code != 200:
|
|
103
|
-
return None
|
|
104
|
-
|
|
105
|
-
data = response.json()
|
|
106
|
-
conversation_id = data.get("conversationId")
|
|
107
|
-
|
|
108
|
-
# Store conversation data
|
|
109
|
-
if model not in self._conversation_data:
|
|
110
|
-
self._conversation_data[model] = {
|
|
111
|
-
"conversationId": conversation_id,
|
|
112
|
-
"messageId": str(uuid.uuid4()) # Initial message ID
|
|
113
|
-
}
|
|
114
|
-
|
|
115
|
-
return conversation_id
|
|
116
|
-
except requests.exceptions.RequestException:
|
|
117
|
-
return None
|
|
118
|
-
|
|
119
|
-
def fetch_message_id(self, conversation_id: str) -> str:
|
|
120
|
-
"""Fetch the latest message ID for a conversation."""
|
|
121
|
-
try:
|
|
122
|
-
url = f"{self.url}/conversation/{conversation_id}/__data.json?x-sveltekit-invalidated=11"
|
|
123
|
-
response = self.session.get(url, headers=self.headers)
|
|
124
|
-
response.raise_for_status()
|
|
125
|
-
|
|
126
|
-
# Parse the JSON data from the response
|
|
127
|
-
json_data = None
|
|
128
|
-
for line in response.text.split('\n'):
|
|
129
|
-
if line.strip():
|
|
130
|
-
try:
|
|
131
|
-
parsed = json.loads(line)
|
|
132
|
-
if isinstance(parsed, dict) and "nodes" in parsed:
|
|
133
|
-
json_data = parsed
|
|
134
|
-
break
|
|
135
|
-
except json.JSONDecodeError:
|
|
136
|
-
continue
|
|
137
|
-
|
|
138
|
-
if not json_data:
|
|
139
|
-
# Fall back to a UUID if we can't parse the response
|
|
140
|
-
return str(uuid.uuid4())
|
|
141
|
-
|
|
142
|
-
# Extract message ID using the same pattern as in the example
|
|
143
|
-
if json_data.get("nodes", []) and json_data["nodes"][-1].get("type") == "error":
|
|
144
|
-
return str(uuid.uuid4())
|
|
145
|
-
|
|
146
|
-
data = json_data["nodes"][1]["data"]
|
|
147
|
-
keys = data[data[0]["messages"]]
|
|
148
|
-
message_keys = data[keys[-1]]
|
|
149
|
-
message_id = data[message_keys["id"]]
|
|
150
|
-
|
|
151
|
-
return message_id
|
|
152
|
-
|
|
153
|
-
except Exception:
|
|
154
|
-
# Fall back to a UUID if there's an error
|
|
155
|
-
return str(uuid.uuid4())
|
|
156
|
-
|
|
157
|
-
def generate_boundary(self):
|
|
158
|
-
"""Generate a random boundary for multipart/form-data requests"""
|
|
159
|
-
boundary_chars = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
|
160
|
-
boundary = "----WebKitFormBoundary"
|
|
161
|
-
boundary += "".join(random.choice(boundary_chars) for _ in range(16))
|
|
162
|
-
return boundary
|
|
163
|
-
|
|
164
|
-
def process_response(self, response, prompt: str):
|
|
165
|
-
"""Process streaming response and extract content."""
|
|
166
|
-
full_text = ""
|
|
167
|
-
sources = None
|
|
168
|
-
reasoning_text = ""
|
|
169
|
-
has_reasoning = False
|
|
170
|
-
|
|
171
|
-
for line in response.iter_lines(decode_unicode=True):
|
|
172
|
-
if not line:
|
|
173
|
-
continue
|
|
174
|
-
|
|
175
|
-
try:
|
|
176
|
-
# Parse each line as JSON
|
|
177
|
-
data = json.loads(line)
|
|
178
|
-
|
|
179
|
-
# Handle different response types
|
|
180
|
-
if "type" not in data:
|
|
181
|
-
continue
|
|
182
|
-
|
|
183
|
-
if data["type"] == "stream" and "token" in data:
|
|
184
|
-
token = data["token"].replace("\u0000", "")
|
|
185
|
-
full_text += token
|
|
186
|
-
resp = {"text": token}
|
|
187
|
-
yield resp
|
|
188
|
-
elif data["type"] == "finalAnswer":
|
|
189
|
-
final_text = data.get("text", "")
|
|
190
|
-
if final_text and not full_text:
|
|
191
|
-
full_text = final_text
|
|
192
|
-
resp = {"text": final_text}
|
|
193
|
-
yield resp
|
|
194
|
-
elif data["type"] == "webSearch" and "sources" in data:
|
|
195
|
-
sources = data["sources"]
|
|
196
|
-
elif data["type"] == "reasoning":
|
|
197
|
-
has_reasoning = True
|
|
198
|
-
if data.get("subtype") == "stream" and "token" in data:
|
|
199
|
-
reasoning_text += data["token"]
|
|
200
|
-
|
|
201
|
-
# If we have reasoning, prepend it to the next text output
|
|
202
|
-
if reasoning_text and not full_text:
|
|
203
|
-
resp = {"text": f"<think>\n{reasoning_text}\n</think>\n", "is_reasoning": True}
|
|
204
|
-
yield resp
|
|
205
|
-
|
|
206
|
-
except json.JSONDecodeError:
|
|
207
|
-
continue
|
|
208
|
-
|
|
209
|
-
# Update conversation history only for saving to file if needed
|
|
210
|
-
if full_text and self.conversation.file:
|
|
211
|
-
if has_reasoning:
|
|
212
|
-
full_text_with_reasoning = f"<think>\n{reasoning_text}\n</think>\n{full_text}"
|
|
213
|
-
self.last_response = {"text": full_text_with_reasoning}
|
|
214
|
-
self.conversation.update_chat_history(prompt, full_text_with_reasoning)
|
|
215
|
-
else:
|
|
216
|
-
self.last_response = {"text": full_text}
|
|
217
|
-
self.conversation.update_chat_history(prompt, full_text)
|
|
218
|
-
|
|
219
|
-
return full_text
|
|
220
|
-
|
|
221
|
-
def ask(
|
|
222
|
-
self,
|
|
223
|
-
prompt: str,
|
|
224
|
-
stream: bool = False,
|
|
225
|
-
raw: bool = False,
|
|
226
|
-
optimizer: str = None,
|
|
227
|
-
conversationally: bool = False,
|
|
228
|
-
web_search: bool = False,
|
|
229
|
-
) -> Union[Dict[str, Any], Generator]:
|
|
230
|
-
"""Send a message to the Lambda Chat API"""
|
|
231
|
-
model = self.model
|
|
232
|
-
|
|
233
|
-
# Check if we have a conversation for this model
|
|
234
|
-
if model not in self._conversation_data:
|
|
235
|
-
conversation_id = self.create_conversation(model)
|
|
236
|
-
if not conversation_id:
|
|
237
|
-
raise exceptions.FailedToGenerateResponseError(f"Failed to create conversation with model {model}")
|
|
238
|
-
else:
|
|
239
|
-
conversation_id = self._conversation_data[model]["conversationId"]
|
|
240
|
-
# Refresh message ID
|
|
241
|
-
self._conversation_data[model]["messageId"] = self.fetch_message_id(conversation_id)
|
|
242
|
-
|
|
243
|
-
url = f"{self.url}/conversation/{conversation_id}"
|
|
244
|
-
message_id = self._conversation_data[model]["messageId"]
|
|
245
|
-
|
|
246
|
-
# Data to send
|
|
247
|
-
request_data = {
|
|
248
|
-
"inputs": prompt,
|
|
249
|
-
"id": message_id,
|
|
250
|
-
"is_retry": False,
|
|
251
|
-
"is_continue": False,
|
|
252
|
-
"web_search": web_search,
|
|
253
|
-
"tools": ["66e85bb396d054c5771bc6cb", "00000000000000000000000a"]
|
|
254
|
-
}
|
|
255
|
-
|
|
256
|
-
# Update headers for this specific request
|
|
257
|
-
headers = self.headers.copy()
|
|
258
|
-
headers["Referer"] = f"{self.url}/conversation/{conversation_id}"
|
|
259
|
-
|
|
260
|
-
# Create multipart form data
|
|
261
|
-
boundary = self.generate_boundary()
|
|
262
|
-
multipart_headers = headers.copy()
|
|
263
|
-
multipart_headers["Content-Type"] = f"multipart/form-data; boundary={boundary}"
|
|
264
|
-
|
|
265
|
-
# Serialize the data to JSON
|
|
266
|
-
data_json = json.dumps(request_data, separators=(',', ':'))
|
|
267
|
-
|
|
268
|
-
# Create the multipart form data body
|
|
269
|
-
body = f"--{boundary}\r\n"
|
|
270
|
-
body += f'Content-Disposition: form-data; name="data"\r\n'
|
|
271
|
-
body += f"Content-Type: application/json\r\n\r\n"
|
|
272
|
-
body += f"{data_json}\r\n"
|
|
273
|
-
body += f"--{boundary}--\r\n"
|
|
274
|
-
|
|
275
|
-
multipart_headers["Content-Length"] = str(len(body))
|
|
276
|
-
|
|
277
|
-
def for_stream():
|
|
278
|
-
try:
|
|
279
|
-
# Try with multipart/form-data first
|
|
280
|
-
response = None
|
|
281
|
-
try:
|
|
282
|
-
response = self.session.post(
|
|
283
|
-
url,
|
|
284
|
-
data=body,
|
|
285
|
-
headers=multipart_headers,
|
|
286
|
-
stream=True,
|
|
287
|
-
timeout=self.timeout
|
|
288
|
-
)
|
|
289
|
-
except requests.exceptions.RequestException:
|
|
290
|
-
pass
|
|
291
|
-
|
|
292
|
-
# If multipart fails or returns error, try with regular JSON
|
|
293
|
-
if not response or response.status_code != 200:
|
|
294
|
-
response = self.session.post(
|
|
295
|
-
url,
|
|
296
|
-
json=request_data,
|
|
297
|
-
headers=headers,
|
|
298
|
-
stream=True,
|
|
299
|
-
timeout=self.timeout
|
|
300
|
-
)
|
|
301
|
-
|
|
302
|
-
# If both methods fail, raise exception
|
|
303
|
-
if response.status_code != 200:
|
|
304
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed with status code {response.status_code}")
|
|
305
|
-
|
|
306
|
-
# Process the streaming response
|
|
307
|
-
yield from self.process_response(response, prompt)
|
|
308
|
-
|
|
309
|
-
except Exception as e:
|
|
310
|
-
if isinstance(e, requests.exceptions.RequestException):
|
|
311
|
-
if hasattr(e, 'response') and e.response is not None:
|
|
312
|
-
status_code = e.response.status_code
|
|
313
|
-
if status_code == 401:
|
|
314
|
-
raise exceptions.AuthenticationError("Authentication failed.")
|
|
315
|
-
|
|
316
|
-
# Try another model if current one fails
|
|
317
|
-
if len(self.AVAILABLE_MODELS) > 1:
|
|
318
|
-
current_model_index = self.AVAILABLE_MODELS.index(self.model) if self.model in self.AVAILABLE_MODELS else 0
|
|
319
|
-
next_model_index = (current_model_index + 1) % len(self.AVAILABLE_MODELS)
|
|
320
|
-
self.model = self.AVAILABLE_MODELS[next_model_index]
|
|
321
|
-
|
|
322
|
-
# Create new conversation with the alternate model
|
|
323
|
-
conversation_id = self.create_conversation(self.model)
|
|
324
|
-
if conversation_id:
|
|
325
|
-
# Try again with the new model
|
|
326
|
-
yield from self.ask(prompt, stream=True, raw=raw, optimizer=optimizer,
|
|
327
|
-
conversationally=conversationally, web_search=web_search)
|
|
328
|
-
return
|
|
329
|
-
|
|
330
|
-
# If we get here, all models failed
|
|
331
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
|
|
332
|
-
|
|
333
|
-
def for_non_stream():
|
|
334
|
-
response_text = ""
|
|
335
|
-
for response in for_stream():
|
|
336
|
-
if "text" in response:
|
|
337
|
-
response_text += response["text"]
|
|
338
|
-
self.last_response = {"text": response_text}
|
|
339
|
-
return self.last_response
|
|
340
|
-
|
|
341
|
-
return for_stream() if stream else for_non_stream()
|
|
342
|
-
|
|
343
|
-
def chat(
|
|
344
|
-
self,
|
|
345
|
-
prompt: str,
|
|
346
|
-
stream: bool = False,
|
|
347
|
-
optimizer: str = None,
|
|
348
|
-
conversationally: bool = False,
|
|
349
|
-
web_search: bool = False
|
|
350
|
-
) -> Union[str, Generator]:
|
|
351
|
-
"""Generate a response to a prompt"""
|
|
352
|
-
def for_stream():
|
|
353
|
-
for response in self.ask(
|
|
354
|
-
prompt, True, optimizer=optimizer, conversationally=conversationally, web_search=web_search
|
|
355
|
-
):
|
|
356
|
-
yield self.get_message(response)
|
|
357
|
-
|
|
358
|
-
def for_non_stream():
|
|
359
|
-
return self.get_message(
|
|
360
|
-
self.ask(
|
|
361
|
-
prompt, False, optimizer=optimizer, conversationally=conversationally, web_search=web_search
|
|
362
|
-
)
|
|
363
|
-
)
|
|
364
|
-
|
|
365
|
-
return for_stream() if stream else for_non_stream()
|
|
366
|
-
|
|
367
|
-
def get_message(self, response: dict) -> str:
|
|
368
|
-
"""Extract message text from response"""
|
|
369
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
370
|
-
return response.get("text", "")
|
|
371
|
-
|
|
372
|
-
if __name__ == "__main__":
|
|
373
|
-
print("-" * 80)
|
|
374
|
-
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
375
|
-
print("-" * 80)
|
|
376
|
-
|
|
377
|
-
for model in LambdaChat.AVAILABLE_MODELS:
|
|
378
|
-
try:
|
|
379
|
-
test_ai = LambdaChat(model=model, timeout=60)
|
|
380
|
-
response = test_ai.chat("Say 'Hello' in one word")
|
|
381
|
-
response_text = response
|
|
382
|
-
|
|
383
|
-
if response_text and len(response_text.strip()) > 0:
|
|
384
|
-
status = "✓"
|
|
385
|
-
# Truncate response if too long
|
|
386
|
-
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
387
|
-
else:
|
|
388
|
-
status = "✗"
|
|
389
|
-
display_text = "Empty or invalid response"
|
|
390
|
-
print(f"{model:<50} {status:<10} {display_text}")
|
|
391
|
-
except Exception as e:
|
|
392
|
-
print(f"{model:<50} {'✗':<10} {str(e)}")
|
webscout/Provider/Llama.py
DELETED
|
@@ -1,200 +0,0 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
import json
|
|
3
|
-
|
|
4
|
-
from webscout.AIutel import Optimizers
|
|
5
|
-
from webscout.AIutel import Conversation
|
|
6
|
-
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
7
|
-
from webscout.AIbase import Provider, AsyncProvider
|
|
8
|
-
from webscout import exceptions
|
|
9
|
-
from typing import Any, AsyncGenerator, Dict, Union
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
class LLAMA(Provider):
|
|
13
|
-
def __init__(
|
|
14
|
-
self,
|
|
15
|
-
is_conversation: bool = True,
|
|
16
|
-
max_tokens: int = 600,
|
|
17
|
-
timeout: int = 30,
|
|
18
|
-
intro: str = None,
|
|
19
|
-
filepath: str = None,
|
|
20
|
-
update_file: bool = True,
|
|
21
|
-
proxies: dict = {},
|
|
22
|
-
history_offset: int = 10250,
|
|
23
|
-
act: str = None,
|
|
24
|
-
):
|
|
25
|
-
"""Instantiates LLAMA
|
|
26
|
-
|
|
27
|
-
Args:
|
|
28
|
-
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
29
|
-
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
30
|
-
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
31
|
-
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
32
|
-
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
33
|
-
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
34
|
-
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
35
|
-
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
36
|
-
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
37
|
-
model (str, optional): LLM model name. Defaults to "llama3-70b-8192".
|
|
38
|
-
"""
|
|
39
|
-
self.is_conversation = is_conversation
|
|
40
|
-
self.max_tokens_to_sample = max_tokens
|
|
41
|
-
self.timeout = timeout
|
|
42
|
-
self.last_response = {}
|
|
43
|
-
self.model = "llama3-70b-8192",
|
|
44
|
-
self.api_endpoint = "https://api.safone.dev/llama"
|
|
45
|
-
self.headers = {
|
|
46
|
-
"accept": "application/json",
|
|
47
|
-
}
|
|
48
|
-
|
|
49
|
-
self.__available_optimizers = (
|
|
50
|
-
method
|
|
51
|
-
for method in dir(Optimizers)
|
|
52
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
53
|
-
)
|
|
54
|
-
Conversation.intro = (
|
|
55
|
-
AwesomePrompts().get_act(
|
|
56
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
57
|
-
)
|
|
58
|
-
if act
|
|
59
|
-
else intro or Conversation.intro
|
|
60
|
-
)
|
|
61
|
-
self.conversation = Conversation(
|
|
62
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
63
|
-
)
|
|
64
|
-
self.conversation.history_offset = history_offset
|
|
65
|
-
self.session = requests.Session()
|
|
66
|
-
self.session.proxies = proxies
|
|
67
|
-
|
|
68
|
-
def ask(
|
|
69
|
-
self,
|
|
70
|
-
prompt: str,
|
|
71
|
-
stream: bool = False,
|
|
72
|
-
raw: bool = False,
|
|
73
|
-
optimizer: str = None,
|
|
74
|
-
conversationally: bool = False,
|
|
75
|
-
) -> Union[dict, AsyncGenerator]:
|
|
76
|
-
"""Chat with AI
|
|
77
|
-
|
|
78
|
-
Args:
|
|
79
|
-
prompt (str): Prompt to be send.
|
|
80
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
81
|
-
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
82
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
83
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
84
|
-
Returns:
|
|
85
|
-
dict|AsyncGenerator : ai content
|
|
86
|
-
```json
|
|
87
|
-
{
|
|
88
|
-
"text" : "print('How may I help you today?')"
|
|
89
|
-
}
|
|
90
|
-
```
|
|
91
|
-
"""
|
|
92
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
93
|
-
if optimizer:
|
|
94
|
-
if optimizer in self.__available_optimizers:
|
|
95
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
96
|
-
conversation_prompt if conversationally else prompt
|
|
97
|
-
)
|
|
98
|
-
else:
|
|
99
|
-
raise Exception(
|
|
100
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
101
|
-
)
|
|
102
|
-
|
|
103
|
-
self.session.headers.update(self.headers)
|
|
104
|
-
payload = {
|
|
105
|
-
"message": conversation_prompt
|
|
106
|
-
}
|
|
107
|
-
|
|
108
|
-
def for_stream():
|
|
109
|
-
response = self.session.get(
|
|
110
|
-
self.api_endpoint, params=payload, stream=True, timeout=self.timeout
|
|
111
|
-
)
|
|
112
|
-
if not response.ok:
|
|
113
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
114
|
-
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
115
|
-
)
|
|
116
|
-
|
|
117
|
-
message_load = ""
|
|
118
|
-
for chunk in response.iter_lines():
|
|
119
|
-
try:
|
|
120
|
-
resp = json.loads(chunk)
|
|
121
|
-
message_load += resp['message']
|
|
122
|
-
yield chunk if raw else dict(text=message_load)
|
|
123
|
-
self.last_response.update(resp)
|
|
124
|
-
except:
|
|
125
|
-
pass
|
|
126
|
-
self.conversation.update_chat_history(
|
|
127
|
-
prompt, self.get_message(self.last_response)
|
|
128
|
-
)
|
|
129
|
-
|
|
130
|
-
def for_non_stream():
|
|
131
|
-
response = self.session.get(
|
|
132
|
-
self.api_endpoint, params=payload, stream=False, timeout=self.timeout
|
|
133
|
-
)
|
|
134
|
-
if not response.ok:
|
|
135
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
136
|
-
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
137
|
-
)
|
|
138
|
-
resp = response.json()
|
|
139
|
-
self.last_response.update(resp)
|
|
140
|
-
self.conversation.update_chat_history(
|
|
141
|
-
prompt, self.get_message(self.last_response)
|
|
142
|
-
)
|
|
143
|
-
return resp
|
|
144
|
-
|
|
145
|
-
return for_stream() if stream else for_non_stream()
|
|
146
|
-
|
|
147
|
-
def chat(
|
|
148
|
-
self,
|
|
149
|
-
prompt: str,
|
|
150
|
-
stream: bool = False,
|
|
151
|
-
optimizer: str = None,
|
|
152
|
-
conversationally: bool = False,
|
|
153
|
-
) -> Union[str, AsyncGenerator]:
|
|
154
|
-
"""Generate response `str`
|
|
155
|
-
Args:
|
|
156
|
-
prompt (str): Prompt to be send.
|
|
157
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
158
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
159
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
160
|
-
Returns:
|
|
161
|
-
str: Response generated
|
|
162
|
-
"""
|
|
163
|
-
|
|
164
|
-
def for_stream():
|
|
165
|
-
for response in self.ask(
|
|
166
|
-
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
167
|
-
):
|
|
168
|
-
yield self.get_message(response)
|
|
169
|
-
|
|
170
|
-
def for_non_stream():
|
|
171
|
-
return self.get_message(
|
|
172
|
-
self.ask(
|
|
173
|
-
prompt,
|
|
174
|
-
False,
|
|
175
|
-
optimizer=optimizer,
|
|
176
|
-
conversationally=conversationally,
|
|
177
|
-
)
|
|
178
|
-
)
|
|
179
|
-
|
|
180
|
-
return for_stream() if stream else for_non_stream()
|
|
181
|
-
|
|
182
|
-
def get_message(self, response: dict) -> str:
|
|
183
|
-
"""Retrieves message only from response
|
|
184
|
-
|
|
185
|
-
Args:
|
|
186
|
-
response (dict): Response generated by `self.ask`
|
|
187
|
-
|
|
188
|
-
Returns:
|
|
189
|
-
str: Message extracted
|
|
190
|
-
"""
|
|
191
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
192
|
-
return response["message"]
|
|
193
|
-
if __name__ == "__main__":
|
|
194
|
-
from rich import print
|
|
195
|
-
|
|
196
|
-
ai = LLAMA()
|
|
197
|
-
# Stream the response
|
|
198
|
-
response = ai.chat(input(">>> "))
|
|
199
|
-
for chunk in response:
|
|
200
|
-
print(chunk, end="", flush=True)
|