webscout 8.2.2__py3-none-any.whl → 8.2.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +112 -22
- webscout/AIbase.py +144 -7
- webscout/AIutel.py +249 -131
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/__init__.py +0 -1
- webscout/cli.py +256 -0
- webscout/conversation.py +307 -436
- webscout/exceptions.py +23 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info}/METADATA +172 -52
- webscout-8.2.7.dist-info/RECORD +26 -0
- {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info}/WHEEL +1 -1
- webscout-8.2.7.dist-info/entry_points.txt +3 -0
- webscout-8.2.7.dist-info/top_level.txt +1 -0
- inferno/__init__.py +0 -6
- inferno/__main__.py +0 -9
- inferno/cli.py +0 -6
- webscout/Extra/GitToolkit/__init__.py +0 -10
- webscout/Extra/GitToolkit/gitapi/__init__.py +0 -12
- webscout/Extra/GitToolkit/gitapi/repository.py +0 -195
- webscout/Extra/GitToolkit/gitapi/user.py +0 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +0 -62
- webscout/Extra/YTToolkit/YTdownloader.py +0 -957
- webscout/Extra/YTToolkit/__init__.py +0 -3
- webscout/Extra/YTToolkit/transcriber.py +0 -476
- webscout/Extra/YTToolkit/ytapi/__init__.py +0 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +0 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +0 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +0 -45
- webscout/Extra/YTToolkit/ytapi/https.py +0 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +0 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +0 -59
- webscout/Extra/YTToolkit/ytapi/pool.py +0 -8
- webscout/Extra/YTToolkit/ytapi/query.py +0 -40
- webscout/Extra/YTToolkit/ytapi/stream.py +0 -63
- webscout/Extra/YTToolkit/ytapi/utils.py +0 -62
- webscout/Extra/YTToolkit/ytapi/video.py +0 -232
- webscout/Extra/__init__.py +0 -7
- webscout/Extra/autocoder/__init__.py +0 -9
- webscout/Extra/autocoder/autocoder.py +0 -849
- webscout/Extra/autocoder/autocoder_utiles.py +0 -332
- webscout/Extra/gguf.py +0 -682
- webscout/Extra/tempmail/__init__.py +0 -28
- webscout/Extra/tempmail/async_utils.py +0 -141
- webscout/Extra/tempmail/base.py +0 -161
- webscout/Extra/tempmail/cli.py +0 -187
- webscout/Extra/tempmail/emailnator.py +0 -84
- webscout/Extra/tempmail/mail_tm.py +0 -361
- webscout/Extra/tempmail/temp_mail_io.py +0 -292
- webscout/Extra/weather.py +0 -194
- webscout/Extra/weather_ascii.py +0 -76
- webscout/LLM.py +0 -442
- webscout/Litlogger/__init__.py +0 -67
- webscout/Litlogger/core/__init__.py +0 -6
- webscout/Litlogger/core/level.py +0 -23
- webscout/Litlogger/core/logger.py +0 -165
- webscout/Litlogger/handlers/__init__.py +0 -12
- webscout/Litlogger/handlers/console.py +0 -33
- webscout/Litlogger/handlers/file.py +0 -143
- webscout/Litlogger/handlers/network.py +0 -173
- webscout/Litlogger/styles/__init__.py +0 -7
- webscout/Litlogger/styles/colors.py +0 -249
- webscout/Litlogger/styles/formats.py +0 -458
- webscout/Litlogger/styles/text.py +0 -87
- webscout/Litlogger/utils/__init__.py +0 -6
- webscout/Litlogger/utils/detectors.py +0 -153
- webscout/Litlogger/utils/formatters.py +0 -200
- webscout/Local/__init__.py +0 -12
- webscout/Local/__main__.py +0 -9
- webscout/Local/api.py +0 -576
- webscout/Local/cli.py +0 -516
- webscout/Local/config.py +0 -75
- webscout/Local/llm.py +0 -287
- webscout/Local/model_manager.py +0 -253
- webscout/Local/server.py +0 -721
- webscout/Local/utils.py +0 -93
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/AISEARCH/DeepFind.py +0 -250
- webscout/Provider/AISEARCH/ISou.py +0 -256
- webscout/Provider/AISEARCH/Perplexity.py +0 -359
- webscout/Provider/AISEARCH/__init__.py +0 -10
- webscout/Provider/AISEARCH/felo_search.py +0 -228
- webscout/Provider/AISEARCH/genspark_search.py +0 -208
- webscout/Provider/AISEARCH/hika_search.py +0 -194
- webscout/Provider/AISEARCH/iask_search.py +0 -436
- webscout/Provider/AISEARCH/monica_search.py +0 -246
- webscout/Provider/AISEARCH/scira_search.py +0 -324
- webscout/Provider/AISEARCH/webpilotai_search.py +0 -281
- webscout/Provider/Aitopia.py +0 -292
- webscout/Provider/AllenAI.py +0 -413
- webscout/Provider/Andi.py +0 -228
- webscout/Provider/Blackboxai.py +0 -229
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTClone.py +0 -226
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/Chatify.py +0 -175
- webscout/Provider/Cloudflare.py +0 -273
- webscout/Provider/Cohere.py +0 -208
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Deepinfra.py +0 -297
- webscout/Provider/ElectronHub.py +0 -709
- webscout/Provider/ExaAI.py +0 -261
- webscout/Provider/ExaChat.py +0 -342
- webscout/Provider/Free2GPT.py +0 -241
- webscout/Provider/GPTWeb.py +0 -193
- webscout/Provider/Gemini.py +0 -169
- webscout/Provider/GithubChat.py +0 -367
- webscout/Provider/Glider.py +0 -211
- webscout/Provider/Groq.py +0 -670
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +0 -206
- webscout/Provider/HeckAI.py +0 -233
- webscout/Provider/HuggingFaceChat.py +0 -462
- webscout/Provider/Hunyuan.py +0 -272
- webscout/Provider/Jadve.py +0 -266
- webscout/Provider/Koboldai.py +0 -381
- webscout/Provider/LambdaChat.py +0 -392
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Llama3.py +0 -204
- webscout/Provider/Marcus.py +0 -148
- webscout/Provider/Netwrck.py +0 -228
- webscout/Provider/OLLAMA.py +0 -396
- webscout/Provider/OPENAI/__init__.py +0 -25
- webscout/Provider/OPENAI/base.py +0 -46
- webscout/Provider/OPENAI/c4ai.py +0 -367
- webscout/Provider/OPENAI/chatgpt.py +0 -549
- webscout/Provider/OPENAI/chatgptclone.py +0 -460
- webscout/Provider/OPENAI/deepinfra.py +0 -272
- webscout/Provider/OPENAI/e2b.py +0 -1350
- webscout/Provider/OPENAI/exaai.py +0 -404
- webscout/Provider/OPENAI/exachat.py +0 -433
- webscout/Provider/OPENAI/freeaichat.py +0 -352
- webscout/Provider/OPENAI/glider.py +0 -316
- webscout/Provider/OPENAI/heckai.py +0 -337
- webscout/Provider/OPENAI/llmchatco.py +0 -327
- webscout/Provider/OPENAI/netwrck.py +0 -348
- webscout/Provider/OPENAI/opkfc.py +0 -488
- webscout/Provider/OPENAI/scirachat.py +0 -463
- webscout/Provider/OPENAI/sonus.py +0 -294
- webscout/Provider/OPENAI/standardinput.py +0 -425
- webscout/Provider/OPENAI/textpollinations.py +0 -285
- webscout/Provider/OPENAI/toolbaz.py +0 -405
- webscout/Provider/OPENAI/typegpt.py +0 -346
- webscout/Provider/OPENAI/uncovrAI.py +0 -455
- webscout/Provider/OPENAI/utils.py +0 -211
- webscout/Provider/OPENAI/venice.py +0 -413
- webscout/Provider/OPENAI/wisecat.py +0 -381
- webscout/Provider/OPENAI/writecream.py +0 -156
- webscout/Provider/OPENAI/x0gpt.py +0 -371
- webscout/Provider/OPENAI/yep.py +0 -327
- webscout/Provider/OpenGPT.py +0 -199
- webscout/Provider/Openai.py +0 -496
- webscout/Provider/PI.py +0 -344
- webscout/Provider/Perplexitylabs.py +0 -415
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/PizzaGPT.py +0 -198
- webscout/Provider/QwenLM.py +0 -254
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/StandardInput.py +0 -278
- webscout/Provider/TTI/AiForce/__init__.py +0 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
- webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
- webscout/Provider/TTI/ImgSys/__init__.py +0 -23
- webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
- webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
- webscout/Provider/TTI/Nexra/__init__.py +0 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
- webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
- webscout/Provider/TTI/__init__.py +0 -12
- webscout/Provider/TTI/aiarta/__init__.py +0 -2
- webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
- webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
- webscout/Provider/TTI/artbit/__init__.py +0 -22
- webscout/Provider/TTI/artbit/async_artbit.py +0 -155
- webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
- webscout/Provider/TTI/fastflux/__init__.py +0 -22
- webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
- webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
- webscout/Provider/TTI/huggingface/__init__.py +0 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
- webscout/Provider/TTI/piclumen/__init__.py +0 -23
- webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
- webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
- webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
- webscout/Provider/TTI/talkai/__init__.py +0 -4
- webscout/Provider/TTI/talkai/async_talkai.py +0 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
- webscout/Provider/TTS/__init__.py +0 -7
- webscout/Provider/TTS/deepgram.py +0 -156
- webscout/Provider/TTS/elevenlabs.py +0 -111
- webscout/Provider/TTS/gesserit.py +0 -127
- webscout/Provider/TTS/murfai.py +0 -113
- webscout/Provider/TTS/parler.py +0 -111
- webscout/Provider/TTS/speechma.py +0 -180
- webscout/Provider/TTS/streamElements.py +0 -333
- webscout/Provider/TTS/utils.py +0 -280
- webscout/Provider/TeachAnything.py +0 -187
- webscout/Provider/TextPollinationsAI.py +0 -231
- webscout/Provider/TwoAI.py +0 -199
- webscout/Provider/Venice.py +0 -219
- webscout/Provider/VercelAI.py +0 -234
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/WiseCat.py +0 -196
- webscout/Provider/Writecream.py +0 -211
- webscout/Provider/WritingMate.py +0 -197
- webscout/Provider/Youchat.py +0 -330
- webscout/Provider/__init__.py +0 -198
- webscout/Provider/ai4chat.py +0 -202
- webscout/Provider/aimathgpt.py +0 -189
- webscout/Provider/akashgpt.py +0 -342
- webscout/Provider/askmyai.py +0 -158
- webscout/Provider/asksteve.py +0 -203
- webscout/Provider/bagoodex.py +0 -145
- webscout/Provider/cerebras.py +0 -242
- webscout/Provider/chatglm.py +0 -205
- webscout/Provider/cleeai.py +0 -213
- webscout/Provider/copilot.py +0 -428
- webscout/Provider/elmo.py +0 -234
- webscout/Provider/freeaichat.py +0 -271
- webscout/Provider/gaurish.py +0 -244
- webscout/Provider/geminiapi.py +0 -208
- webscout/Provider/geminiprorealtime.py +0 -160
- webscout/Provider/granite.py +0 -187
- webscout/Provider/hermes.py +0 -219
- webscout/Provider/julius.py +0 -223
- webscout/Provider/koala.py +0 -268
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/learnfastai.py +0 -266
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llama3mitril.py +0 -180
- webscout/Provider/llamatutor.py +0 -192
- webscout/Provider/llmchat.py +0 -213
- webscout/Provider/llmchatco.py +0 -311
- webscout/Provider/meta.py +0 -794
- webscout/Provider/multichat.py +0 -325
- webscout/Provider/promptrefine.py +0 -193
- webscout/Provider/scira_chat.py +0 -277
- webscout/Provider/scnet.py +0 -187
- webscout/Provider/searchchat.py +0 -293
- webscout/Provider/sonus.py +0 -208
- webscout/Provider/talkai.py +0 -194
- webscout/Provider/toolbaz.py +0 -320
- webscout/Provider/turboseek.py +0 -219
- webscout/Provider/tutorai.py +0 -252
- webscout/Provider/typefully.py +0 -280
- webscout/Provider/typegpt.py +0 -232
- webscout/Provider/uncovr.py +0 -312
- webscout/Provider/x0gpt.py +0 -256
- webscout/Provider/yep.py +0 -376
- webscout/litagent/__init__.py +0 -29
- webscout/litagent/agent.py +0 -455
- webscout/litagent/constants.py +0 -60
- webscout/litprinter/__init__.py +0 -59
- webscout/scout/__init__.py +0 -8
- webscout/scout/core/__init__.py +0 -7
- webscout/scout/core/crawler.py +0 -140
- webscout/scout/core/scout.py +0 -568
- webscout/scout/core/search_result.py +0 -96
- webscout/scout/core/text_analyzer.py +0 -63
- webscout/scout/core/text_utils.py +0 -277
- webscout/scout/core/web_analyzer.py +0 -52
- webscout/scout/core.py +0 -881
- webscout/scout/element.py +0 -460
- webscout/scout/parsers/__init__.py +0 -69
- webscout/scout/parsers/html5lib_parser.py +0 -172
- webscout/scout/parsers/html_parser.py +0 -236
- webscout/scout/parsers/lxml_parser.py +0 -178
- webscout/scout/utils.py +0 -37
- webscout/swiftcli/__init__.py +0 -809
- webscout/zeroart/__init__.py +0 -55
- webscout/zeroart/base.py +0 -60
- webscout/zeroart/effects.py +0 -99
- webscout/zeroart/fonts.py +0 -816
- webscout-8.2.2.dist-info/RECORD +0 -309
- webscout-8.2.2.dist-info/entry_points.txt +0 -5
- webscout-8.2.2.dist-info/top_level.txt +0 -3
- webstoken/__init__.py +0 -30
- webstoken/classifier.py +0 -189
- webstoken/keywords.py +0 -216
- webstoken/language.py +0 -128
- webstoken/ner.py +0 -164
- webstoken/normalizer.py +0 -35
- webstoken/processor.py +0 -77
- webstoken/sentiment.py +0 -206
- webstoken/stemmer.py +0 -73
- webstoken/tagger.py +0 -60
- webstoken/tokenizer.py +0 -158
- {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info/licenses}/LICENSE.md +0 -0
webscout/Provider/learnfastai.py
DELETED
|
@@ -1,266 +0,0 @@
|
|
|
1
|
-
import os
|
|
2
|
-
import json
|
|
3
|
-
from typing import Optional, Union, Generator
|
|
4
|
-
import uuid
|
|
5
|
-
import requests
|
|
6
|
-
import cloudscraper
|
|
7
|
-
|
|
8
|
-
from webscout.AIutel import Optimizers
|
|
9
|
-
from webscout.AIutel import Conversation
|
|
10
|
-
from webscout.AIutel import AwesomePrompts
|
|
11
|
-
from webscout.AIbase import Provider
|
|
12
|
-
from webscout import exceptions
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
class LearnFast(Provider):
|
|
16
|
-
"""
|
|
17
|
-
A class to interact with the LearnFast.ai API.
|
|
18
|
-
"""
|
|
19
|
-
|
|
20
|
-
def __init__(
|
|
21
|
-
self,
|
|
22
|
-
is_conversation: bool = True,
|
|
23
|
-
max_tokens: int = 600,
|
|
24
|
-
timeout: int = 30,
|
|
25
|
-
intro: str = None,
|
|
26
|
-
filepath: str = None,
|
|
27
|
-
update_file: bool = True,
|
|
28
|
-
proxies: dict = {},
|
|
29
|
-
history_offset: int = 10250,
|
|
30
|
-
act: str = None,
|
|
31
|
-
system_prompt: str = "You are a helpful AI assistant.",
|
|
32
|
-
):
|
|
33
|
-
"""
|
|
34
|
-
Initializes the LearnFast.ai API with given parameters.
|
|
35
|
-
"""
|
|
36
|
-
self.session = cloudscraper.create_scraper()
|
|
37
|
-
self.is_conversation = is_conversation
|
|
38
|
-
self.max_tokens_to_sample = max_tokens
|
|
39
|
-
self.api_endpoint = 'https://autosite.erweima.ai/api/v1/chat'
|
|
40
|
-
self.stream_chunk_size = 64
|
|
41
|
-
self.timeout = timeout
|
|
42
|
-
self.last_response = {}
|
|
43
|
-
self.system_prompt = system_prompt
|
|
44
|
-
self.headers = {
|
|
45
|
-
"authority": "autosite.erweima.ai",
|
|
46
|
-
"accept": "*/*",
|
|
47
|
-
"accept-encoding": "gzip, deflate, br, zstd",
|
|
48
|
-
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
49
|
-
"authorization": "", # Always empty
|
|
50
|
-
"content-type": "application/json",
|
|
51
|
-
"dnt": "1",
|
|
52
|
-
"origin": "https://learnfast.ai",
|
|
53
|
-
"priority": "u=1, i",
|
|
54
|
-
"referer": "https://learnfast.ai/",
|
|
55
|
-
"sec-ch-ua": '"Microsoft Edge";v="129", "Not=A?Brand";v="8", "Chromium";v="129"',
|
|
56
|
-
"sec-ch-ua-mobile": "?0",
|
|
57
|
-
"sec-ch-ua-platform": '"Windows"',
|
|
58
|
-
"sec-fetch-dest": "empty",
|
|
59
|
-
"sec-fetch-mode": "cors",
|
|
60
|
-
"sec-fetch-site": "cross-site",
|
|
61
|
-
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0",
|
|
62
|
-
}
|
|
63
|
-
|
|
64
|
-
self.__available_optimizers = (
|
|
65
|
-
method
|
|
66
|
-
for method in dir(Optimizers)
|
|
67
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
68
|
-
)
|
|
69
|
-
self.session.headers.update(self.headers)
|
|
70
|
-
Conversation.intro = (
|
|
71
|
-
AwesomePrompts().get_act(
|
|
72
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
73
|
-
)
|
|
74
|
-
if act
|
|
75
|
-
else intro or Conversation.intro
|
|
76
|
-
)
|
|
77
|
-
self.conversation = Conversation(
|
|
78
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
79
|
-
)
|
|
80
|
-
self.conversation.history_offset = history_offset
|
|
81
|
-
self.session.proxies = proxies
|
|
82
|
-
|
|
83
|
-
def generate_unique_id(self) -> str:
|
|
84
|
-
"""Generate a 32-character hexadecimal unique ID."""
|
|
85
|
-
return uuid.uuid4().hex
|
|
86
|
-
|
|
87
|
-
def generate_session_id(self) -> str:
|
|
88
|
-
"""Generate a 32-character hexadecimal session ID."""
|
|
89
|
-
return uuid.uuid4().hex
|
|
90
|
-
|
|
91
|
-
def upload_image_to_0x0(self, image_path: str) -> str:
|
|
92
|
-
"""
|
|
93
|
-
Uploads an image to 0x0.st and returns the public URL.
|
|
94
|
-
"""
|
|
95
|
-
if not os.path.isfile(image_path):
|
|
96
|
-
raise FileNotFoundError(f"The file '{image_path}' does not exist.")
|
|
97
|
-
|
|
98
|
-
with open(image_path, "rb") as img_file:
|
|
99
|
-
files = {"file": img_file}
|
|
100
|
-
try:
|
|
101
|
-
response = requests.post("https://0x0.st", files=files)
|
|
102
|
-
response.raise_for_status()
|
|
103
|
-
image_url = response.text.strip()
|
|
104
|
-
if not image_url.startswith("http"):
|
|
105
|
-
raise ValueError("Received an invalid URL from 0x0.st.")
|
|
106
|
-
return image_url
|
|
107
|
-
except requests.exceptions.RequestException as e:
|
|
108
|
-
raise Exception(f"Failed to upload image to 0x0.st: {e}") from e
|
|
109
|
-
|
|
110
|
-
def create_payload(
|
|
111
|
-
self,
|
|
112
|
-
session_id: str,
|
|
113
|
-
conversation_prompt: str,
|
|
114
|
-
image_url: Optional[str] = None
|
|
115
|
-
) -> dict:
|
|
116
|
-
"""
|
|
117
|
-
Creates the JSON payload for the request.
|
|
118
|
-
"""
|
|
119
|
-
payload = {
|
|
120
|
-
"prompt": conversation_prompt,
|
|
121
|
-
"firstQuestionFlag": True,
|
|
122
|
-
"sessionId": session_id,
|
|
123
|
-
"attachments": []
|
|
124
|
-
}
|
|
125
|
-
if image_url:
|
|
126
|
-
payload["attachments"] = [
|
|
127
|
-
{
|
|
128
|
-
"fileType": "image/jpeg",
|
|
129
|
-
"file": {},
|
|
130
|
-
"fileContent": image_url
|
|
131
|
-
}
|
|
132
|
-
]
|
|
133
|
-
return payload
|
|
134
|
-
|
|
135
|
-
def ask(
|
|
136
|
-
self,
|
|
137
|
-
prompt: str,
|
|
138
|
-
stream: bool = False,
|
|
139
|
-
raw: bool = False,
|
|
140
|
-
optimizer: str = None,
|
|
141
|
-
conversationally: bool = False,
|
|
142
|
-
image_path: Optional[str] = None,
|
|
143
|
-
) -> Union[dict, Generator[dict, None, None]]:
|
|
144
|
-
"""Chat with LearnFast
|
|
145
|
-
|
|
146
|
-
Args:
|
|
147
|
-
prompt (str): Prompt to be send.
|
|
148
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
149
|
-
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
150
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
151
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
152
|
-
image_path (Optional[str], optional): Path to the image to be uploaded.
|
|
153
|
-
Defaults to None.
|
|
154
|
-
|
|
155
|
-
Returns:
|
|
156
|
-
Union[dict, Generator[dict, None, None]]: Response generated
|
|
157
|
-
"""
|
|
158
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
159
|
-
if optimizer:
|
|
160
|
-
if optimizer in self.__available_optimizers:
|
|
161
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
162
|
-
conversation_prompt if conversationally else prompt
|
|
163
|
-
)
|
|
164
|
-
else:
|
|
165
|
-
raise Exception(
|
|
166
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
167
|
-
)
|
|
168
|
-
|
|
169
|
-
# Generate unique ID and session ID
|
|
170
|
-
unique_id = self.generate_unique_id()
|
|
171
|
-
session_id = self.generate_session_id()
|
|
172
|
-
|
|
173
|
-
# Update headers with the unique ID
|
|
174
|
-
self.headers["uniqueid"] = unique_id
|
|
175
|
-
|
|
176
|
-
# Upload image and get URL if image_path is provided
|
|
177
|
-
image_url = None
|
|
178
|
-
if image_path:
|
|
179
|
-
try:
|
|
180
|
-
image_url = self.upload_image_to_0x0(image_path)
|
|
181
|
-
except Exception as e:
|
|
182
|
-
raise exceptions.FailedToGenerateResponseError(f"Error uploading image: {e}") from e
|
|
183
|
-
|
|
184
|
-
# Create the payload
|
|
185
|
-
payload = self.create_payload(session_id, conversation_prompt, image_url)
|
|
186
|
-
|
|
187
|
-
# Convert the payload to a JSON string
|
|
188
|
-
data = json.dumps(payload)
|
|
189
|
-
|
|
190
|
-
try:
|
|
191
|
-
# Send the POST request with streaming enabled
|
|
192
|
-
response = self.session.post(self.api_endpoint, headers=self.headers, data=data, stream=True, timeout=self.timeout)
|
|
193
|
-
response.raise_for_status() # Check for HTTP errors
|
|
194
|
-
|
|
195
|
-
# Process the streamed response
|
|
196
|
-
full_response = ""
|
|
197
|
-
for line in response.iter_lines(decode_unicode=True):
|
|
198
|
-
if line:
|
|
199
|
-
line = line.strip()
|
|
200
|
-
if line == "[DONE]":
|
|
201
|
-
break
|
|
202
|
-
try:
|
|
203
|
-
json_response = json.loads(line)
|
|
204
|
-
if json_response.get('code') == 200 and json_response.get('data'):
|
|
205
|
-
message = json_response['data'].get('message', '')
|
|
206
|
-
if message:
|
|
207
|
-
full_response += message
|
|
208
|
-
if stream:
|
|
209
|
-
yield {"text": message}
|
|
210
|
-
except json.JSONDecodeError:
|
|
211
|
-
pass
|
|
212
|
-
self.last_response.update({"text": full_response})
|
|
213
|
-
self.conversation.update_chat_history(prompt, full_response)
|
|
214
|
-
|
|
215
|
-
if not stream:
|
|
216
|
-
return self.last_response
|
|
217
|
-
except requests.exceptions.RequestException as e:
|
|
218
|
-
raise exceptions.FailedToGenerateResponseError(f"An error occurred: {e}")
|
|
219
|
-
|
|
220
|
-
def chat(
|
|
221
|
-
self,
|
|
222
|
-
prompt: str,
|
|
223
|
-
stream: bool = False,
|
|
224
|
-
optimizer: str = None,
|
|
225
|
-
conversationally: bool = False,
|
|
226
|
-
image_path: Optional[str] = None,
|
|
227
|
-
) -> Union[str, Generator[str, None, None]]:
|
|
228
|
-
"""Generate response `str`
|
|
229
|
-
Args:
|
|
230
|
-
prompt (str): Prompt to be send.
|
|
231
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
232
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
233
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
234
|
-
image_path (Optional[str], optional): Path to the image to be uploaded.
|
|
235
|
-
Defaults to None.
|
|
236
|
-
Returns:
|
|
237
|
-
Union[str, Generator[str, None, None]]: Response generated
|
|
238
|
-
"""
|
|
239
|
-
try:
|
|
240
|
-
response = self.ask(prompt, stream, optimizer=optimizer, conversationally=conversationally, image_path=image_path)
|
|
241
|
-
if stream:
|
|
242
|
-
for chunk in response:
|
|
243
|
-
yield chunk["text"]
|
|
244
|
-
else:
|
|
245
|
-
return str(response)
|
|
246
|
-
except Exception as e:
|
|
247
|
-
return f"Error: {str(e)}"
|
|
248
|
-
|
|
249
|
-
def get_message(self, response: dict) -> str:
|
|
250
|
-
"""Retrieves message only from response
|
|
251
|
-
|
|
252
|
-
Args:
|
|
253
|
-
response (dict): Response generated by `self.ask`
|
|
254
|
-
|
|
255
|
-
Returns:
|
|
256
|
-
str: Message extracted
|
|
257
|
-
"""
|
|
258
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
259
|
-
return response["text"]
|
|
260
|
-
|
|
261
|
-
if __name__ == "__main__":
|
|
262
|
-
from rich import print
|
|
263
|
-
ai = LearnFast()
|
|
264
|
-
response = ai.chat(input(">>> "), stream=True)
|
|
265
|
-
for chunk in response:
|
|
266
|
-
print(chunk, end="", flush=True)
|
webscout/Provider/lepton.py
DELETED
|
@@ -1,194 +0,0 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
import re
|
|
3
|
-
import json
|
|
4
|
-
|
|
5
|
-
from webscout.AIutel import Optimizers
|
|
6
|
-
from webscout.AIutel import Conversation
|
|
7
|
-
from webscout.AIutel import AwesomePrompts
|
|
8
|
-
from webscout.AIbase import Provider
|
|
9
|
-
from webscout.litagent import LitAgent as Lit
|
|
10
|
-
class Lepton(Provider):
|
|
11
|
-
"""
|
|
12
|
-
A class to interact with the Lepton.run API.
|
|
13
|
-
"""
|
|
14
|
-
def __init__(
|
|
15
|
-
self,
|
|
16
|
-
is_conversation: bool = True,
|
|
17
|
-
max_tokens: int = 600,
|
|
18
|
-
timeout: int = 30,
|
|
19
|
-
intro: str = None,
|
|
20
|
-
filepath: str = None,
|
|
21
|
-
update_file: bool = True,
|
|
22
|
-
proxies: dict = {},
|
|
23
|
-
history_offset: int = 10250,
|
|
24
|
-
act: str = None,
|
|
25
|
-
) -> None:
|
|
26
|
-
"""Instantiates Lepton
|
|
27
|
-
|
|
28
|
-
Args:
|
|
29
|
-
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
30
|
-
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
31
|
-
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
32
|
-
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
33
|
-
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
34
|
-
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
35
|
-
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
36
|
-
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
37
|
-
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
38
|
-
"""
|
|
39
|
-
self.session = requests.Session()
|
|
40
|
-
self.is_conversation = is_conversation
|
|
41
|
-
self.max_tokens_to_sample = max_tokens
|
|
42
|
-
self.api_endpoint = "https://search.lepton.run/api/query"
|
|
43
|
-
self.stream_chunk_size = 64
|
|
44
|
-
self.timeout = timeout
|
|
45
|
-
self.last_response = {}
|
|
46
|
-
self.headers = {
|
|
47
|
-
"accept": "*/*",
|
|
48
|
-
"accept-encoding": "gzip, deflate, br, zstd",
|
|
49
|
-
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
50
|
-
"content-type": "text/plain;charset=UTF-8",
|
|
51
|
-
"dnt": "1",
|
|
52
|
-
"origin": "https://search.lepton.run",
|
|
53
|
-
"priority": "u=1, i",
|
|
54
|
-
"referer": "https://search.lepton.run/search?q=BYSyA&rid=aqZSHQomzwBBF3fyHnrND",
|
|
55
|
-
"sec-ch-ua": '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
|
|
56
|
-
"sec-ch-ua-mobile": "?0",
|
|
57
|
-
"sec-ch-ua-platform": '"Windows"',
|
|
58
|
-
"sec-fetch-dest": "empty",
|
|
59
|
-
"sec-fetch-mode": "cors",
|
|
60
|
-
"sec-fetch-site": "same-origin",
|
|
61
|
-
"user-agent": Lit().random(),
|
|
62
|
-
}
|
|
63
|
-
|
|
64
|
-
self.__available_optimizers = (
|
|
65
|
-
method
|
|
66
|
-
for method in dir(Optimizers)
|
|
67
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
68
|
-
)
|
|
69
|
-
self.session.headers.update(self.headers)
|
|
70
|
-
Conversation.intro = (
|
|
71
|
-
AwesomePrompts().get_act(
|
|
72
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
73
|
-
)
|
|
74
|
-
if act
|
|
75
|
-
else intro or Conversation.intro
|
|
76
|
-
)
|
|
77
|
-
self.conversation = Conversation(
|
|
78
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
79
|
-
)
|
|
80
|
-
self.conversation.history_offset = history_offset
|
|
81
|
-
self.session.proxies = proxies
|
|
82
|
-
|
|
83
|
-
def ask(
|
|
84
|
-
self,
|
|
85
|
-
prompt: str,
|
|
86
|
-
stream: bool = False,
|
|
87
|
-
raw: bool = False,
|
|
88
|
-
optimizer: str = None,
|
|
89
|
-
conversationally: bool = False,
|
|
90
|
-
) -> dict:
|
|
91
|
-
"""Chat with AI
|
|
92
|
-
|
|
93
|
-
Args:
|
|
94
|
-
prompt (str): Prompt to be send.
|
|
95
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
96
|
-
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
97
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
98
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
99
|
-
Returns:
|
|
100
|
-
dict : {}
|
|
101
|
-
```json
|
|
102
|
-
{
|
|
103
|
-
"text" : "How may I assist you today?"
|
|
104
|
-
}
|
|
105
|
-
```
|
|
106
|
-
"""
|
|
107
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
108
|
-
if optimizer:
|
|
109
|
-
if optimizer in self.__available_optimizers:
|
|
110
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
111
|
-
conversation_prompt if conversationally else prompt
|
|
112
|
-
)
|
|
113
|
-
else:
|
|
114
|
-
raise Exception(
|
|
115
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
116
|
-
)
|
|
117
|
-
|
|
118
|
-
self.session.headers.update(self.headers)
|
|
119
|
-
payload = json.dumps({"query": conversation_prompt})
|
|
120
|
-
|
|
121
|
-
def for_non_stream():
|
|
122
|
-
response = self.session.post(
|
|
123
|
-
self.api_endpoint, data=payload, headers=self.headers, timeout=self.timeout
|
|
124
|
-
)
|
|
125
|
-
if not response.ok:
|
|
126
|
-
raise Exception(
|
|
127
|
-
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
128
|
-
)
|
|
129
|
-
|
|
130
|
-
response_text = response.text
|
|
131
|
-
start_marker = "__LLM_RESPONSE__"
|
|
132
|
-
end_marker = "__RELATED_QUESTIONS__"
|
|
133
|
-
|
|
134
|
-
start_index = response_text.find(start_marker) + len(start_marker)
|
|
135
|
-
end_index = response_text.find(end_marker)
|
|
136
|
-
|
|
137
|
-
if start_index != -1 and end_index != -1:
|
|
138
|
-
extracted_text = response_text[start_index:end_index].strip()
|
|
139
|
-
|
|
140
|
-
# Remove citations using regular expression
|
|
141
|
-
cleaned_text = re.sub(r'\[citation:\d+\]', '', extracted_text)
|
|
142
|
-
|
|
143
|
-
self.last_response.update(dict(text=cleaned_text))
|
|
144
|
-
|
|
145
|
-
self.conversation.update_chat_history(
|
|
146
|
-
prompt, self.get_message(self.last_response)
|
|
147
|
-
)
|
|
148
|
-
return self.last_response
|
|
149
|
-
|
|
150
|
-
return for_non_stream()
|
|
151
|
-
|
|
152
|
-
def chat(
|
|
153
|
-
self,
|
|
154
|
-
prompt: str,
|
|
155
|
-
stream: bool = False,
|
|
156
|
-
optimizer: str = None,
|
|
157
|
-
conversationally: bool = False,
|
|
158
|
-
) -> str:
|
|
159
|
-
"""Generate response `str`
|
|
160
|
-
Args:
|
|
161
|
-
prompt (str): Prompt to be send.
|
|
162
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
163
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
164
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
165
|
-
Returns:
|
|
166
|
-
str: Response generated
|
|
167
|
-
"""
|
|
168
|
-
|
|
169
|
-
return self.get_message(
|
|
170
|
-
self.ask(
|
|
171
|
-
prompt,
|
|
172
|
-
optimizer=optimizer,
|
|
173
|
-
conversationally=conversationally,
|
|
174
|
-
)
|
|
175
|
-
)
|
|
176
|
-
|
|
177
|
-
def get_message(self, response: dict) -> str:
|
|
178
|
-
"""Retrieves message only from response
|
|
179
|
-
|
|
180
|
-
Args:
|
|
181
|
-
response (dict): Response generated by `self.ask`
|
|
182
|
-
|
|
183
|
-
Returns:
|
|
184
|
-
str: Message extracted
|
|
185
|
-
"""
|
|
186
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
187
|
-
return response["text"]
|
|
188
|
-
|
|
189
|
-
if __name__ == '__main__':
|
|
190
|
-
from rich import print
|
|
191
|
-
ai = Lepton()
|
|
192
|
-
response = ai.chat("hi")
|
|
193
|
-
for chunk in response:
|
|
194
|
-
print(chunk, end="", flush=True)
|
|
@@ -1,180 +0,0 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
import json
|
|
3
|
-
import re
|
|
4
|
-
from typing import Union, Any, Dict, Optional, Generator
|
|
5
|
-
from webscout.AIutel import Optimizers
|
|
6
|
-
from webscout.AIutel import Conversation
|
|
7
|
-
from webscout.AIutel import AwesomePrompts
|
|
8
|
-
from webscout.AIbase import Provider
|
|
9
|
-
from webscout import exceptions
|
|
10
|
-
from webscout.litagent import LitAgent as Lit
|
|
11
|
-
|
|
12
|
-
class Llama3Mitril(Provider):
|
|
13
|
-
"""
|
|
14
|
-
A class to interact with the Llama3 Mitril API. Implements the WebScout provider interface.
|
|
15
|
-
"""
|
|
16
|
-
|
|
17
|
-
def __init__(
|
|
18
|
-
self,
|
|
19
|
-
is_conversation: bool = True,
|
|
20
|
-
max_tokens: int = 2048,
|
|
21
|
-
timeout: int = 30,
|
|
22
|
-
intro: str = None,
|
|
23
|
-
filepath: str = None,
|
|
24
|
-
update_file: bool = True,
|
|
25
|
-
proxies: dict = {},
|
|
26
|
-
history_offset: int = 10250,
|
|
27
|
-
act: str = None,
|
|
28
|
-
system_prompt: str = "You are a helpful, respectful and honest assistant.",
|
|
29
|
-
temperature: float = 0.8,
|
|
30
|
-
):
|
|
31
|
-
"""Initializes the Llama3Mitril API."""
|
|
32
|
-
self.session = requests.Session()
|
|
33
|
-
self.is_conversation = is_conversation
|
|
34
|
-
self.max_tokens = max_tokens
|
|
35
|
-
self.temperature = temperature
|
|
36
|
-
self.api_endpoint = "https://llama3.mithrilsecurity.io/generate_stream"
|
|
37
|
-
self.timeout = timeout
|
|
38
|
-
self.last_response = {}
|
|
39
|
-
self.system_prompt = system_prompt
|
|
40
|
-
self.headers = {
|
|
41
|
-
"Content-Type": "application/json",
|
|
42
|
-
"DNT": "1",
|
|
43
|
-
"User-Agent": Lit().random(),
|
|
44
|
-
}
|
|
45
|
-
self.__available_optimizers = (
|
|
46
|
-
method
|
|
47
|
-
for method in dir(Optimizers)
|
|
48
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
49
|
-
)
|
|
50
|
-
Conversation.intro = (
|
|
51
|
-
AwesomePrompts().get_act(
|
|
52
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
53
|
-
)
|
|
54
|
-
if act
|
|
55
|
-
else intro or Conversation.intro
|
|
56
|
-
)
|
|
57
|
-
self.conversation = Conversation(
|
|
58
|
-
is_conversation, self.max_tokens, filepath, update_file
|
|
59
|
-
)
|
|
60
|
-
self.conversation.history_offset = history_offset
|
|
61
|
-
self.session.proxies = proxies
|
|
62
|
-
|
|
63
|
-
def _format_prompt(self, prompt: str) -> str:
|
|
64
|
-
"""Format the prompt for the Llama3 model"""
|
|
65
|
-
return (
|
|
66
|
-
f"<|begin_of_text|>"
|
|
67
|
-
f"<|start_header_id|>system<|end_header_id|>{self.system_prompt}<|eot_id|>"
|
|
68
|
-
f"<|start_header_id|>user<|end_header_id|>{prompt}<|eot_id|>"
|
|
69
|
-
f"<|start_header_id|>assistant<|end_header_id|><|eot_id|>"
|
|
70
|
-
f"<|start_header_id|>assistant<|end_header_id|>"
|
|
71
|
-
)
|
|
72
|
-
|
|
73
|
-
def ask(
|
|
74
|
-
self,
|
|
75
|
-
prompt: str,
|
|
76
|
-
stream: bool = True,
|
|
77
|
-
raw: bool = False,
|
|
78
|
-
optimizer: str = None,
|
|
79
|
-
conversationally: bool = False,
|
|
80
|
-
) -> Union[Dict[str, Any], Generator[Any, None, None]]:
|
|
81
|
-
"""Sends a prompt to the Llama3 Mitril API and returns the response."""
|
|
82
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
83
|
-
if optimizer:
|
|
84
|
-
if optimizer in self.__available_optimizers:
|
|
85
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
86
|
-
conversation_prompt if conversationally else prompt
|
|
87
|
-
)
|
|
88
|
-
else:
|
|
89
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
90
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
91
|
-
)
|
|
92
|
-
|
|
93
|
-
data = {
|
|
94
|
-
"inputs": self._format_prompt(conversation_prompt),
|
|
95
|
-
"parameters": {
|
|
96
|
-
"max_new_tokens": self.max_tokens,
|
|
97
|
-
"temperature": self.temperature,
|
|
98
|
-
"return_full_text": False
|
|
99
|
-
}
|
|
100
|
-
}
|
|
101
|
-
|
|
102
|
-
def for_stream():
|
|
103
|
-
response = self.session.post(
|
|
104
|
-
self.api_endpoint,
|
|
105
|
-
headers=self.headers,
|
|
106
|
-
json=data,
|
|
107
|
-
stream=True,
|
|
108
|
-
timeout=self.timeout
|
|
109
|
-
)
|
|
110
|
-
if not response.ok:
|
|
111
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
112
|
-
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
113
|
-
)
|
|
114
|
-
|
|
115
|
-
streaming_response = ""
|
|
116
|
-
for line in response.iter_lines(decode_unicode=True):
|
|
117
|
-
if line:
|
|
118
|
-
try:
|
|
119
|
-
chunk = json.loads(line.split('data: ')[1])
|
|
120
|
-
if token_text := chunk.get('token', {}).get('text'):
|
|
121
|
-
if '<|eot_id|>' not in token_text:
|
|
122
|
-
streaming_response += token_text
|
|
123
|
-
yield token_text if raw else {"text": token_text}
|
|
124
|
-
except (json.JSONDecodeError, IndexError) as e:
|
|
125
|
-
continue
|
|
126
|
-
|
|
127
|
-
self.last_response.update({"text": streaming_response})
|
|
128
|
-
self.conversation.update_chat_history(
|
|
129
|
-
prompt, self.get_message(self.last_response)
|
|
130
|
-
)
|
|
131
|
-
|
|
132
|
-
def for_non_stream():
|
|
133
|
-
full_response = ""
|
|
134
|
-
for chunk in for_stream():
|
|
135
|
-
full_response += chunk if raw else chunk['text']
|
|
136
|
-
return {"text": full_response}
|
|
137
|
-
|
|
138
|
-
return for_stream() if stream else for_non_stream()
|
|
139
|
-
|
|
140
|
-
def chat(
|
|
141
|
-
self,
|
|
142
|
-
prompt: str,
|
|
143
|
-
stream: bool = True,
|
|
144
|
-
optimizer: str = None,
|
|
145
|
-
conversationally: bool = False,
|
|
146
|
-
) -> Union[str, Generator[str, None, None]]:
|
|
147
|
-
"""Generates a response from the Llama3 Mitril API."""
|
|
148
|
-
|
|
149
|
-
def for_stream():
|
|
150
|
-
for response in self.ask(
|
|
151
|
-
prompt, stream=True, optimizer=optimizer, conversationally=conversationally
|
|
152
|
-
):
|
|
153
|
-
yield self.get_message(response)
|
|
154
|
-
|
|
155
|
-
def for_non_stream():
|
|
156
|
-
return self.get_message(
|
|
157
|
-
self.ask(
|
|
158
|
-
prompt, stream=False, optimizer=optimizer, conversationally=conversationally
|
|
159
|
-
)
|
|
160
|
-
)
|
|
161
|
-
|
|
162
|
-
return for_stream() if stream else for_non_stream()
|
|
163
|
-
|
|
164
|
-
def get_message(self, response: Dict[str, Any]) -> str:
|
|
165
|
-
"""Extracts the message from the API response."""
|
|
166
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
167
|
-
return response["text"]
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
if __name__ == "__main__":
|
|
171
|
-
from rich import print
|
|
172
|
-
|
|
173
|
-
ai = Llama3Mitril(
|
|
174
|
-
max_tokens=2048,
|
|
175
|
-
temperature=0.8,
|
|
176
|
-
timeout=30
|
|
177
|
-
)
|
|
178
|
-
|
|
179
|
-
for response in ai.chat("Hello", stream=True):
|
|
180
|
-
print(response, end="", flush=True)
|