webscout 8.2.6__py3-none-any.whl → 8.2.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIutel.py +97 -87
- webscout/version.py +1 -1
- {webscout-8.2.6.dist-info → webscout-8.2.7.dist-info}/METADATA +2 -15
- webscout-8.2.7.dist-info/RECORD +26 -0
- {webscout-8.2.6.dist-info → webscout-8.2.7.dist-info}/WHEEL +1 -1
- webscout-8.2.7.dist-info/entry_points.txt +3 -0
- webscout-8.2.7.dist-info/top_level.txt +1 -0
- webscout/Extra/GitToolkit/__init__.py +0 -10
- webscout/Extra/GitToolkit/gitapi/__init__.py +0 -12
- webscout/Extra/GitToolkit/gitapi/repository.py +0 -195
- webscout/Extra/GitToolkit/gitapi/user.py +0 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +0 -62
- webscout/Extra/YTToolkit/YTdownloader.py +0 -957
- webscout/Extra/YTToolkit/__init__.py +0 -3
- webscout/Extra/YTToolkit/transcriber.py +0 -476
- webscout/Extra/YTToolkit/ytapi/__init__.py +0 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +0 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +0 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +0 -45
- webscout/Extra/YTToolkit/ytapi/https.py +0 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +0 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +0 -59
- webscout/Extra/YTToolkit/ytapi/pool.py +0 -8
- webscout/Extra/YTToolkit/ytapi/query.py +0 -40
- webscout/Extra/YTToolkit/ytapi/stream.py +0 -63
- webscout/Extra/YTToolkit/ytapi/utils.py +0 -62
- webscout/Extra/YTToolkit/ytapi/video.py +0 -232
- webscout/Extra/__init__.py +0 -7
- webscout/Extra/autocoder/__init__.py +0 -9
- webscout/Extra/autocoder/autocoder.py +0 -910
- webscout/Extra/autocoder/autocoder_utiles.py +0 -332
- webscout/Extra/gguf.py +0 -684
- webscout/Extra/tempmail/__init__.py +0 -28
- webscout/Extra/tempmail/async_utils.py +0 -141
- webscout/Extra/tempmail/base.py +0 -161
- webscout/Extra/tempmail/cli.py +0 -187
- webscout/Extra/tempmail/emailnator.py +0 -84
- webscout/Extra/tempmail/mail_tm.py +0 -361
- webscout/Extra/tempmail/temp_mail_io.py +0 -292
- webscout/Extra/weather.py +0 -194
- webscout/Extra/weather_ascii.py +0 -76
- webscout/Litlogger/__init__.py +0 -67
- webscout/Litlogger/core/__init__.py +0 -6
- webscout/Litlogger/core/level.py +0 -23
- webscout/Litlogger/core/logger.py +0 -165
- webscout/Litlogger/handlers/__init__.py +0 -12
- webscout/Litlogger/handlers/console.py +0 -33
- webscout/Litlogger/handlers/file.py +0 -143
- webscout/Litlogger/handlers/network.py +0 -173
- webscout/Litlogger/styles/__init__.py +0 -7
- webscout/Litlogger/styles/colors.py +0 -249
- webscout/Litlogger/styles/formats.py +0 -458
- webscout/Litlogger/styles/text.py +0 -87
- webscout/Litlogger/utils/__init__.py +0 -6
- webscout/Litlogger/utils/detectors.py +0 -153
- webscout/Litlogger/utils/formatters.py +0 -200
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/AISEARCH/DeepFind.py +0 -250
- webscout/Provider/AISEARCH/ISou.py +0 -256
- webscout/Provider/AISEARCH/Perplexity.py +0 -359
- webscout/Provider/AISEARCH/__init__.py +0 -10
- webscout/Provider/AISEARCH/felo_search.py +0 -228
- webscout/Provider/AISEARCH/genspark_search.py +0 -208
- webscout/Provider/AISEARCH/hika_search.py +0 -198
- webscout/Provider/AISEARCH/iask_search.py +0 -436
- webscout/Provider/AISEARCH/monica_search.py +0 -246
- webscout/Provider/AISEARCH/scira_search.py +0 -322
- webscout/Provider/AISEARCH/webpilotai_search.py +0 -281
- webscout/Provider/Aitopia.py +0 -316
- webscout/Provider/AllenAI.py +0 -447
- webscout/Provider/Andi.py +0 -228
- webscout/Provider/Blackboxai.py +0 -229
- webscout/Provider/ChatGPTClone.py +0 -237
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/ChatSandbox.py +0 -342
- webscout/Provider/Cloudflare.py +0 -325
- webscout/Provider/Cohere.py +0 -208
- webscout/Provider/Deepinfra.py +0 -338
- webscout/Provider/ElectronHub.py +0 -773
- webscout/Provider/ExaAI.py +0 -261
- webscout/Provider/ExaChat.py +0 -358
- webscout/Provider/Free2GPT.py +0 -241
- webscout/Provider/GPTWeb.py +0 -249
- webscout/Provider/Gemini.py +0 -169
- webscout/Provider/GithubChat.py +0 -370
- webscout/Provider/GizAI.py +0 -285
- webscout/Provider/Glider.py +0 -222
- webscout/Provider/Groq.py +0 -801
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +0 -206
- webscout/Provider/HeckAI.py +0 -257
- webscout/Provider/HuggingFaceChat.py +0 -469
- webscout/Provider/Hunyuan.py +0 -283
- webscout/Provider/Jadve.py +0 -291
- webscout/Provider/Koboldai.py +0 -381
- webscout/Provider/LambdaChat.py +0 -411
- webscout/Provider/Llama3.py +0 -259
- webscout/Provider/MCPCore.py +0 -315
- webscout/Provider/Marcus.py +0 -206
- webscout/Provider/Nemotron.py +0 -218
- webscout/Provider/Netwrck.py +0 -270
- webscout/Provider/OLLAMA.py +0 -396
- webscout/Provider/OPENAI/__init__.py +0 -28
- webscout/Provider/OPENAI/ai4chat.py +0 -286
- webscout/Provider/OPENAI/base.py +0 -46
- webscout/Provider/OPENAI/c4ai.py +0 -367
- webscout/Provider/OPENAI/chatgpt.py +0 -549
- webscout/Provider/OPENAI/chatgptclone.py +0 -481
- webscout/Provider/OPENAI/deepinfra.py +0 -309
- webscout/Provider/OPENAI/e2b.py +0 -1350
- webscout/Provider/OPENAI/exaai.py +0 -404
- webscout/Provider/OPENAI/exachat.py +0 -437
- webscout/Provider/OPENAI/freeaichat.py +0 -352
- webscout/Provider/OPENAI/glider.py +0 -316
- webscout/Provider/OPENAI/groq.py +0 -354
- webscout/Provider/OPENAI/heckai.py +0 -341
- webscout/Provider/OPENAI/llmchatco.py +0 -327
- webscout/Provider/OPENAI/mcpcore.py +0 -376
- webscout/Provider/OPENAI/multichat.py +0 -368
- webscout/Provider/OPENAI/netwrck.py +0 -350
- webscout/Provider/OPENAI/opkfc.py +0 -488
- webscout/Provider/OPENAI/scirachat.py +0 -462
- webscout/Provider/OPENAI/sonus.py +0 -294
- webscout/Provider/OPENAI/standardinput.py +0 -425
- webscout/Provider/OPENAI/textpollinations.py +0 -329
- webscout/Provider/OPENAI/toolbaz.py +0 -406
- webscout/Provider/OPENAI/typegpt.py +0 -346
- webscout/Provider/OPENAI/uncovrAI.py +0 -455
- webscout/Provider/OPENAI/utils.py +0 -211
- webscout/Provider/OPENAI/venice.py +0 -413
- webscout/Provider/OPENAI/wisecat.py +0 -381
- webscout/Provider/OPENAI/writecream.py +0 -156
- webscout/Provider/OPENAI/x0gpt.py +0 -371
- webscout/Provider/OPENAI/yep.py +0 -327
- webscout/Provider/OpenGPT.py +0 -209
- webscout/Provider/Openai.py +0 -496
- webscout/Provider/PI.py +0 -429
- webscout/Provider/Perplexitylabs.py +0 -415
- webscout/Provider/QwenLM.py +0 -254
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/StandardInput.py +0 -290
- webscout/Provider/TTI/AiForce/__init__.py +0 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
- webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
- webscout/Provider/TTI/ImgSys/__init__.py +0 -23
- webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
- webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
- webscout/Provider/TTI/Nexra/__init__.py +0 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
- webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
- webscout/Provider/TTI/__init__.py +0 -12
- webscout/Provider/TTI/aiarta/__init__.py +0 -2
- webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
- webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
- webscout/Provider/TTI/artbit/__init__.py +0 -22
- webscout/Provider/TTI/artbit/async_artbit.py +0 -155
- webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
- webscout/Provider/TTI/fastflux/__init__.py +0 -22
- webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
- webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
- webscout/Provider/TTI/huggingface/__init__.py +0 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
- webscout/Provider/TTI/piclumen/__init__.py +0 -23
- webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
- webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
- webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
- webscout/Provider/TTI/talkai/__init__.py +0 -4
- webscout/Provider/TTI/talkai/async_talkai.py +0 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
- webscout/Provider/TTS/__init__.py +0 -8
- webscout/Provider/TTS/base.py +0 -159
- webscout/Provider/TTS/deepgram.py +0 -156
- webscout/Provider/TTS/elevenlabs.py +0 -111
- webscout/Provider/TTS/gesserit.py +0 -128
- webscout/Provider/TTS/murfai.py +0 -113
- webscout/Provider/TTS/parler.py +0 -111
- webscout/Provider/TTS/speechma.py +0 -180
- webscout/Provider/TTS/streamElements.py +0 -333
- webscout/Provider/TTS/utils.py +0 -280
- webscout/Provider/TeachAnything.py +0 -233
- webscout/Provider/TextPollinationsAI.py +0 -306
- webscout/Provider/TwoAI.py +0 -280
- webscout/Provider/TypliAI.py +0 -305
- webscout/Provider/Venice.py +0 -258
- webscout/Provider/VercelAI.py +0 -253
- webscout/Provider/WiseCat.py +0 -233
- webscout/Provider/WrDoChat.py +0 -370
- webscout/Provider/Writecream.py +0 -237
- webscout/Provider/WritingMate.py +0 -269
- webscout/Provider/Youchat.py +0 -330
- webscout/Provider/__init__.py +0 -178
- webscout/Provider/ai4chat.py +0 -203
- webscout/Provider/aimathgpt.py +0 -189
- webscout/Provider/akashgpt.py +0 -335
- webscout/Provider/asksteve.py +0 -212
- webscout/Provider/bagoodex.py +0 -145
- webscout/Provider/cerebras.py +0 -288
- webscout/Provider/chatglm.py +0 -215
- webscout/Provider/cleeai.py +0 -213
- webscout/Provider/copilot.py +0 -425
- webscout/Provider/elmo.py +0 -283
- webscout/Provider/freeaichat.py +0 -285
- webscout/Provider/geminiapi.py +0 -208
- webscout/Provider/geminiprorealtime.py +0 -160
- webscout/Provider/granite.py +0 -235
- webscout/Provider/hermes.py +0 -266
- webscout/Provider/julius.py +0 -223
- webscout/Provider/koala.py +0 -268
- webscout/Provider/learnfastai.py +0 -325
- webscout/Provider/llama3mitril.py +0 -215
- webscout/Provider/llmchat.py +0 -255
- webscout/Provider/llmchatco.py +0 -306
- webscout/Provider/meta.py +0 -798
- webscout/Provider/multichat.py +0 -364
- webscout/Provider/scira_chat.py +0 -297
- webscout/Provider/scnet.py +0 -243
- webscout/Provider/searchchat.py +0 -292
- webscout/Provider/sonus.py +0 -258
- webscout/Provider/talkai.py +0 -194
- webscout/Provider/toolbaz.py +0 -353
- webscout/Provider/turboseek.py +0 -266
- webscout/Provider/typefully.py +0 -330
- webscout/Provider/typegpt.py +0 -289
- webscout/Provider/uncovr.py +0 -368
- webscout/Provider/x0gpt.py +0 -299
- webscout/Provider/yep.py +0 -389
- webscout/litagent/__init__.py +0 -29
- webscout/litagent/agent.py +0 -455
- webscout/litagent/constants.py +0 -60
- webscout/litprinter/__init__.py +0 -59
- webscout/scout/__init__.py +0 -8
- webscout/scout/core/__init__.py +0 -7
- webscout/scout/core/crawler.py +0 -140
- webscout/scout/core/scout.py +0 -568
- webscout/scout/core/search_result.py +0 -96
- webscout/scout/core/text_analyzer.py +0 -63
- webscout/scout/core/text_utils.py +0 -277
- webscout/scout/core/web_analyzer.py +0 -52
- webscout/scout/core.py +0 -881
- webscout/scout/element.py +0 -460
- webscout/scout/parsers/__init__.py +0 -69
- webscout/scout/parsers/html5lib_parser.py +0 -172
- webscout/scout/parsers/html_parser.py +0 -236
- webscout/scout/parsers/lxml_parser.py +0 -178
- webscout/scout/utils.py +0 -37
- webscout/swiftcli/__init__.py +0 -95
- webscout/swiftcli/core/__init__.py +0 -7
- webscout/swiftcli/core/cli.py +0 -297
- webscout/swiftcli/core/context.py +0 -104
- webscout/swiftcli/core/group.py +0 -241
- webscout/swiftcli/decorators/__init__.py +0 -28
- webscout/swiftcli/decorators/command.py +0 -221
- webscout/swiftcli/decorators/options.py +0 -220
- webscout/swiftcli/decorators/output.py +0 -252
- webscout/swiftcli/exceptions.py +0 -21
- webscout/swiftcli/plugins/__init__.py +0 -9
- webscout/swiftcli/plugins/base.py +0 -135
- webscout/swiftcli/plugins/manager.py +0 -262
- webscout/swiftcli/utils/__init__.py +0 -59
- webscout/swiftcli/utils/formatting.py +0 -252
- webscout/swiftcli/utils/parsing.py +0 -267
- webscout/zeroart/__init__.py +0 -55
- webscout/zeroart/base.py +0 -60
- webscout/zeroart/effects.py +0 -99
- webscout/zeroart/fonts.py +0 -816
- webscout-8.2.6.dist-info/RECORD +0 -307
- webscout-8.2.6.dist-info/entry_points.txt +0 -3
- webscout-8.2.6.dist-info/top_level.txt +0 -2
- webstoken/__init__.py +0 -30
- webstoken/classifier.py +0 -189
- webstoken/keywords.py +0 -216
- webstoken/language.py +0 -128
- webstoken/ner.py +0 -164
- webstoken/normalizer.py +0 -35
- webstoken/processor.py +0 -77
- webstoken/sentiment.py +0 -206
- webstoken/stemmer.py +0 -73
- webstoken/tagger.py +0 -60
- webstoken/tokenizer.py +0 -158
- {webscout-8.2.6.dist-info → webscout-8.2.7.dist-info}/licenses/LICENSE.md +0 -0
webscout/Provider/koala.py
DELETED
|
@@ -1,268 +0,0 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
import json
|
|
3
|
-
from typing import Union, Any, Dict, Optional
|
|
4
|
-
from webscout.AIutel import Optimizers
|
|
5
|
-
from webscout.AIutel import Conversation
|
|
6
|
-
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
7
|
-
from webscout.AIbase import Provider
|
|
8
|
-
from webscout import exceptions
|
|
9
|
-
|
|
10
|
-
class KOALA(Provider):
|
|
11
|
-
"""
|
|
12
|
-
A class to interact with the Koala.sh API.
|
|
13
|
-
"""
|
|
14
|
-
|
|
15
|
-
AVAILABLE_MODELS = [
|
|
16
|
-
"gpt-4.1-mini",
|
|
17
|
-
"gpt-4.1",
|
|
18
|
-
]
|
|
19
|
-
|
|
20
|
-
def __init__(
|
|
21
|
-
self,
|
|
22
|
-
is_conversation: bool = True,
|
|
23
|
-
max_tokens: int = 600,
|
|
24
|
-
timeout: int = 30,
|
|
25
|
-
intro: str = None,
|
|
26
|
-
filepath: str = None,
|
|
27
|
-
update_file: bool = True,
|
|
28
|
-
proxies: dict = {},
|
|
29
|
-
history_offset: int = 10250,
|
|
30
|
-
act: str = None,
|
|
31
|
-
model: str = "gpt-4.1",
|
|
32
|
-
web_search: bool = True,
|
|
33
|
-
|
|
34
|
-
) -> None:
|
|
35
|
-
"""
|
|
36
|
-
Initializes the KOALASH API with given parameters.
|
|
37
|
-
|
|
38
|
-
Args:
|
|
39
|
-
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
40
|
-
max_tokens (int, optional): Maximum number of tokens to be generated upon completion.
|
|
41
|
-
Defaults to 600.
|
|
42
|
-
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
43
|
-
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
44
|
-
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
45
|
-
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
46
|
-
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
47
|
-
history_offset (int, optional): Limit conversation history to this number of last texts.
|
|
48
|
-
Defaults to 10250.
|
|
49
|
-
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
50
|
-
model (str, optional): AI model to use. Defaults to "gpt-4o-mini".
|
|
51
|
-
"""
|
|
52
|
-
if model not in self.AVAILABLE_MODELS:
|
|
53
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
54
|
-
|
|
55
|
-
self.session = requests.Session()
|
|
56
|
-
self.is_conversation = is_conversation
|
|
57
|
-
self.max_tokens_to_sample = max_tokens
|
|
58
|
-
self.api_endpoint = "https://koala.sh/api/gpt/"
|
|
59
|
-
self.stream_chunk_size = 64
|
|
60
|
-
self.timeout = timeout
|
|
61
|
-
self.last_response = {}
|
|
62
|
-
self.model = model
|
|
63
|
-
self.headers = {
|
|
64
|
-
"accept": "text/event-stream",
|
|
65
|
-
"accept-encoding": "gzip, deflate, br, zstd",
|
|
66
|
-
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
67
|
-
"content-length": "73",
|
|
68
|
-
"content-type": "application/json",
|
|
69
|
-
"dnt": "1",
|
|
70
|
-
"flag-real-time-data": "true" if web_search else "false",
|
|
71
|
-
"origin": "https://koala.sh",
|
|
72
|
-
"priority": "u=1, i",
|
|
73
|
-
"referer": "https://koala.sh/chat",
|
|
74
|
-
"sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
75
|
-
"sec-ch-ua-mobile": "?0",
|
|
76
|
-
"sec-ch-ua-platform": '"Windows"',
|
|
77
|
-
"sec-fetch-dest": "empty",
|
|
78
|
-
"sec-fetch-mode": "cors",
|
|
79
|
-
"sec-fetch-site": "same-origin",
|
|
80
|
-
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
|
|
81
|
-
}
|
|
82
|
-
|
|
83
|
-
self.__available_optimizers = (
|
|
84
|
-
method
|
|
85
|
-
for method in dir(Optimizers)
|
|
86
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
87
|
-
)
|
|
88
|
-
self.session.headers.update(self.headers)
|
|
89
|
-
Conversation.intro = (
|
|
90
|
-
AwesomePrompts().get_act(
|
|
91
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
92
|
-
)
|
|
93
|
-
if act
|
|
94
|
-
else intro or Conversation.intro
|
|
95
|
-
)
|
|
96
|
-
self.conversation = Conversation(
|
|
97
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
98
|
-
)
|
|
99
|
-
self.conversation.history_offset = history_offset
|
|
100
|
-
self.session.proxies = proxies
|
|
101
|
-
|
|
102
|
-
def ask(
|
|
103
|
-
self,
|
|
104
|
-
prompt: str,
|
|
105
|
-
stream: bool = False,
|
|
106
|
-
raw: bool = False,
|
|
107
|
-
optimizer: str = None,
|
|
108
|
-
conversationally: bool = False,
|
|
109
|
-
) -> Dict[str, Any]:
|
|
110
|
-
"""
|
|
111
|
-
Sends a prompt to the Koala.sh API and returns the response.
|
|
112
|
-
|
|
113
|
-
Args:
|
|
114
|
-
prompt: The text prompt to generate text from.
|
|
115
|
-
stream (bool, optional): Whether to stream the response. Defaults to False.
|
|
116
|
-
raw (bool, optional): Whether to return the raw response. Defaults to False.
|
|
117
|
-
optimizer (str, optional): The name of the optimizer to use. Defaults to None.
|
|
118
|
-
conversationally (bool, optional): Whether to chat conversationally. Defaults to False.
|
|
119
|
-
|
|
120
|
-
Returns:
|
|
121
|
-
The response from the API.
|
|
122
|
-
"""
|
|
123
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
124
|
-
if optimizer:
|
|
125
|
-
if optimizer in self.__available_optimizers:
|
|
126
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
127
|
-
conversation_prompt if conversationally else prompt
|
|
128
|
-
)
|
|
129
|
-
else:
|
|
130
|
-
raise Exception(
|
|
131
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
132
|
-
)
|
|
133
|
-
|
|
134
|
-
payload = {
|
|
135
|
-
"input": conversation_prompt,
|
|
136
|
-
"model": self.model
|
|
137
|
-
}
|
|
138
|
-
|
|
139
|
-
def for_stream():
|
|
140
|
-
response = self.session.post(
|
|
141
|
-
self.api_endpoint, json=payload, headers=self.headers, stream=True, timeout=self.timeout
|
|
142
|
-
)
|
|
143
|
-
|
|
144
|
-
if not response.ok:
|
|
145
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
146
|
-
f"Failed to generate response - ({response.status_code}, {response.reason})"
|
|
147
|
-
)
|
|
148
|
-
|
|
149
|
-
streaming_response = ""
|
|
150
|
-
for line in response.iter_lines(decode_unicode=True):
|
|
151
|
-
if line:
|
|
152
|
-
if line.startswith("data:"):
|
|
153
|
-
data = line[len("data:"):].strip()
|
|
154
|
-
if data:
|
|
155
|
-
try:
|
|
156
|
-
event = json.loads(data)
|
|
157
|
-
streaming_response += event.get("choices", [{}])[0].get("delta", {}).get("content", "")
|
|
158
|
-
yield event if raw else dict(text=streaming_response)
|
|
159
|
-
except json.decoder.JSONDecodeError:
|
|
160
|
-
continue
|
|
161
|
-
self.last_response.update(dict(text=streaming_response))
|
|
162
|
-
self.conversation.update_chat_history(
|
|
163
|
-
prompt, self.get_message(self.last_response)
|
|
164
|
-
)
|
|
165
|
-
def for_non_stream():
|
|
166
|
-
response = self.session.post(
|
|
167
|
-
self.api_endpoint, json=payload, headers=self.headers, timeout=self.timeout
|
|
168
|
-
)
|
|
169
|
-
|
|
170
|
-
if not response.ok:
|
|
171
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
172
|
-
f"Failed to generate response - ({response.status_code}, {response.reason})"
|
|
173
|
-
)
|
|
174
|
-
|
|
175
|
-
response_content = response.content.decode('utf-8')
|
|
176
|
-
data_parts = response_content.strip().split('\n\n')
|
|
177
|
-
formatted_response = ''.join([part.replace('data: ', '') for part in data_parts if part.startswith('data: ')])
|
|
178
|
-
|
|
179
|
-
# Remove extra quotes from the formatted response
|
|
180
|
-
formatted_response = formatted_response.replace('""', '')
|
|
181
|
-
|
|
182
|
-
# Split the response into lines and format with new lines before headers
|
|
183
|
-
lines = formatted_response.split('\n')
|
|
184
|
-
formatted_lines = []
|
|
185
|
-
for line in lines:
|
|
186
|
-
if line.startswith('###'):
|
|
187
|
-
formatted_lines.append('\n' + line)
|
|
188
|
-
else:
|
|
189
|
-
formatted_lines.append(line)
|
|
190
|
-
|
|
191
|
-
# Join the formatted lines back into a single string
|
|
192
|
-
final_response = '\n'.join(formatted_lines)
|
|
193
|
-
|
|
194
|
-
# self.last_response.update(dict(text=streaming_response))
|
|
195
|
-
self.conversation.update_chat_history(
|
|
196
|
-
prompt, final_response
|
|
197
|
-
)
|
|
198
|
-
return dict(text=final_response)
|
|
199
|
-
|
|
200
|
-
return for_stream() if stream else for_non_stream()
|
|
201
|
-
|
|
202
|
-
def chat(
|
|
203
|
-
self,
|
|
204
|
-
prompt: str,
|
|
205
|
-
stream: bool = False,
|
|
206
|
-
optimizer: str = None,
|
|
207
|
-
conversationally: bool = False,
|
|
208
|
-
) -> str:
|
|
209
|
-
"""Generate response `str`
|
|
210
|
-
Args:
|
|
211
|
-
prompt (str): Prompt to be send.
|
|
212
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
213
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
214
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
215
|
-
Returns:
|
|
216
|
-
str: Response generated
|
|
217
|
-
"""
|
|
218
|
-
|
|
219
|
-
def for_stream():
|
|
220
|
-
for response in self.ask(
|
|
221
|
-
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
222
|
-
):
|
|
223
|
-
yield self.get_message(response)
|
|
224
|
-
|
|
225
|
-
def for_non_stream():
|
|
226
|
-
return self.get_message(
|
|
227
|
-
self.ask(
|
|
228
|
-
prompt,
|
|
229
|
-
False,
|
|
230
|
-
optimizer=optimizer,
|
|
231
|
-
conversationally=conversationally,
|
|
232
|
-
)
|
|
233
|
-
)
|
|
234
|
-
|
|
235
|
-
return for_stream() if stream else for_non_stream()
|
|
236
|
-
|
|
237
|
-
def get_message(self, response: dict) -> str:
|
|
238
|
-
"""Retrieves message only from response
|
|
239
|
-
|
|
240
|
-
Args:
|
|
241
|
-
response (dict): Response generated by `self.ask`
|
|
242
|
-
|
|
243
|
-
Returns:
|
|
244
|
-
str: Message extracted
|
|
245
|
-
"""
|
|
246
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
247
|
-
return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
248
|
-
if __name__ == '__main__':
|
|
249
|
-
print("-" * 80)
|
|
250
|
-
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
251
|
-
print("-" * 80)
|
|
252
|
-
|
|
253
|
-
for model in KOALA.AVAILABLE_MODELS:
|
|
254
|
-
try:
|
|
255
|
-
test_ai = KOALA(model=model, timeout=60)
|
|
256
|
-
response = test_ai.chat("Say 'Hello' in one word")
|
|
257
|
-
response_text = response
|
|
258
|
-
|
|
259
|
-
if response_text and len(response_text.strip()) > 0:
|
|
260
|
-
status = "✓"
|
|
261
|
-
# Truncate response if too long
|
|
262
|
-
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
263
|
-
else:
|
|
264
|
-
status = "✗"
|
|
265
|
-
display_text = "Empty or invalid response"
|
|
266
|
-
print(f"{model:<50} {status:<10} {display_text}")
|
|
267
|
-
except Exception as e:
|
|
268
|
-
print(f"{model:<50} {'✗':<10} {str(e)}")
|
webscout/Provider/learnfastai.py
DELETED
|
@@ -1,325 +0,0 @@
|
|
|
1
|
-
import os
|
|
2
|
-
import json
|
|
3
|
-
from typing import Any, Dict, Optional, Union, Generator
|
|
4
|
-
import uuid
|
|
5
|
-
from curl_cffi.requests import Session
|
|
6
|
-
from curl_cffi import CurlError
|
|
7
|
-
|
|
8
|
-
from webscout.AIutel import Optimizers
|
|
9
|
-
from webscout.AIutel import Conversation, sanitize_stream # Import sanitize_stream
|
|
10
|
-
from webscout.AIutel import AwesomePrompts
|
|
11
|
-
from webscout.AIbase import Provider
|
|
12
|
-
from webscout import exceptions
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
class LearnFast(Provider):
|
|
16
|
-
"""
|
|
17
|
-
A class to interact with the LearnFast.ai API.
|
|
18
|
-
"""
|
|
19
|
-
|
|
20
|
-
def __init__(
|
|
21
|
-
self,
|
|
22
|
-
is_conversation: bool = True,
|
|
23
|
-
max_tokens: int = 600, # Note: max_tokens is not used by this API
|
|
24
|
-
timeout: int = 30,
|
|
25
|
-
intro: str = None,
|
|
26
|
-
filepath: str = None,
|
|
27
|
-
update_file: bool = True,
|
|
28
|
-
proxies: dict = {},
|
|
29
|
-
history_offset: int = 10250,
|
|
30
|
-
act: str = None,
|
|
31
|
-
system_prompt: str = "You are a helpful AI assistant.", # Note: system_prompt is not used by this API
|
|
32
|
-
):
|
|
33
|
-
"""
|
|
34
|
-
Initializes the LearnFast.ai API with given parameters.
|
|
35
|
-
"""
|
|
36
|
-
# Initialize curl_cffi Session
|
|
37
|
-
self.session = Session()
|
|
38
|
-
self.is_conversation = is_conversation
|
|
39
|
-
self.max_tokens_to_sample = max_tokens
|
|
40
|
-
self.api_endpoint = 'https://autosite.erweima.ai/api/v1/chat'
|
|
41
|
-
self.stream_chunk_size = 64
|
|
42
|
-
self.timeout = timeout
|
|
43
|
-
self.last_response = {}
|
|
44
|
-
self.system_prompt = system_prompt
|
|
45
|
-
self.headers = {
|
|
46
|
-
"authority": "autosite.erweima.ai",
|
|
47
|
-
"accept": "*/*",
|
|
48
|
-
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
49
|
-
"authorization": "", # Always empty
|
|
50
|
-
"content-type": "application/json",
|
|
51
|
-
"dnt": "1",
|
|
52
|
-
"origin": "https://learnfast.ai",
|
|
53
|
-
"priority": "u=1, i", # Keep priority header if needed
|
|
54
|
-
"referer": "https://learnfast.ai/",
|
|
55
|
-
"sec-fetch-dest": "empty",
|
|
56
|
-
"sec-fetch-mode": "cors",
|
|
57
|
-
"sec-fetch-site": "cross-site",
|
|
58
|
-
# uniqueid will be added dynamically in ask()
|
|
59
|
-
}
|
|
60
|
-
|
|
61
|
-
self.__available_optimizers = (
|
|
62
|
-
method
|
|
63
|
-
for method in dir(Optimizers)
|
|
64
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
65
|
-
)
|
|
66
|
-
# Update curl_cffi session headers and proxies
|
|
67
|
-
self.session.headers.update(self.headers)
|
|
68
|
-
self.session.proxies = proxies # Assign proxies directly
|
|
69
|
-
|
|
70
|
-
Conversation.intro = (
|
|
71
|
-
AwesomePrompts().get_act(
|
|
72
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
73
|
-
)
|
|
74
|
-
if act
|
|
75
|
-
else intro or Conversation.intro
|
|
76
|
-
)
|
|
77
|
-
self.conversation = Conversation(
|
|
78
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
79
|
-
)
|
|
80
|
-
self.conversation.history_offset = history_offset
|
|
81
|
-
|
|
82
|
-
@staticmethod
|
|
83
|
-
def _learnfast_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
84
|
-
"""Extracts message content from LearnFast stream JSON objects."""
|
|
85
|
-
if isinstance(chunk, dict) and chunk.get('code') == 200 and chunk.get('data'):
|
|
86
|
-
return chunk['data'].get('message')
|
|
87
|
-
return None
|
|
88
|
-
|
|
89
|
-
def generate_unique_id(self) -> str:
|
|
90
|
-
"""Generate a 32-character hexadecimal unique ID."""
|
|
91
|
-
return uuid.uuid4().hex
|
|
92
|
-
|
|
93
|
-
def generate_session_id(self) -> str:
|
|
94
|
-
"""Generate a 32-character hexadecimal session ID."""
|
|
95
|
-
return uuid.uuid4().hex
|
|
96
|
-
|
|
97
|
-
def upload_image_to_0x0(self, image_path: str) -> str:
|
|
98
|
-
"""
|
|
99
|
-
Uploads an image to 0x0.st and returns the public URL.
|
|
100
|
-
"""
|
|
101
|
-
if not os.path.isfile(image_path):
|
|
102
|
-
raise FileNotFoundError(f"The file '{image_path}' does not exist.")
|
|
103
|
-
|
|
104
|
-
with open(image_path, "rb") as img_file:
|
|
105
|
-
files = {"file": img_file}
|
|
106
|
-
try:
|
|
107
|
-
response = self.session.post(
|
|
108
|
-
"https://0x0.st",
|
|
109
|
-
files=files,
|
|
110
|
-
# Add impersonate if using the main session
|
|
111
|
-
impersonate="chrome110"
|
|
112
|
-
)
|
|
113
|
-
response.raise_for_status()
|
|
114
|
-
image_url = response.text.strip()
|
|
115
|
-
if not image_url.startswith("http"):
|
|
116
|
-
raise ValueError("Received an invalid URL from 0x0.st.")
|
|
117
|
-
return image_url
|
|
118
|
-
except CurlError as e: # Catch CurlError
|
|
119
|
-
raise Exception(f"Failed to upload image to 0x0.st (CurlError): {e}") from e
|
|
120
|
-
except Exception as e: # Catch other potential errors
|
|
121
|
-
raise Exception(f"Failed to upload image to 0x0.st: {e}") from e
|
|
122
|
-
|
|
123
|
-
def create_payload(
|
|
124
|
-
self,
|
|
125
|
-
session_id: str,
|
|
126
|
-
conversation_prompt: str,
|
|
127
|
-
image_url: Optional[str] = None
|
|
128
|
-
) -> dict:
|
|
129
|
-
"""
|
|
130
|
-
Creates the JSON payload for the request.
|
|
131
|
-
"""
|
|
132
|
-
payload = {
|
|
133
|
-
"prompt": conversation_prompt,
|
|
134
|
-
"firstQuestionFlag": True,
|
|
135
|
-
"sessionId": session_id,
|
|
136
|
-
"attachments": []
|
|
137
|
-
}
|
|
138
|
-
if image_url:
|
|
139
|
-
payload["attachments"] = [
|
|
140
|
-
{
|
|
141
|
-
"fileType": "image/jpeg",
|
|
142
|
-
"file": {},
|
|
143
|
-
"fileContent": image_url
|
|
144
|
-
}
|
|
145
|
-
]
|
|
146
|
-
return payload
|
|
147
|
-
|
|
148
|
-
def ask(
|
|
149
|
-
self,
|
|
150
|
-
prompt: str,
|
|
151
|
-
stream: bool = False, # API supports streaming
|
|
152
|
-
raw: bool = False,
|
|
153
|
-
optimizer: str = None,
|
|
154
|
-
conversationally: bool = False,
|
|
155
|
-
image_path: Optional[str] = None,
|
|
156
|
-
) -> Union[dict, Generator[dict, None, None]]:
|
|
157
|
-
"""Chat with LearnFast
|
|
158
|
-
|
|
159
|
-
Args:
|
|
160
|
-
prompt (str): Prompt to be send.
|
|
161
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
162
|
-
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
163
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
164
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
165
|
-
image_path (Optional[str], optional): Path to the image to be uploaded.
|
|
166
|
-
Defaults to None.
|
|
167
|
-
|
|
168
|
-
Returns:
|
|
169
|
-
Union[dict, Generator[dict, None, None]]: Response generated
|
|
170
|
-
"""
|
|
171
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
172
|
-
if optimizer:
|
|
173
|
-
if optimizer in self.__available_optimizers:
|
|
174
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
175
|
-
conversation_prompt if conversationally else prompt
|
|
176
|
-
)
|
|
177
|
-
else:
|
|
178
|
-
raise Exception(
|
|
179
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
180
|
-
)
|
|
181
|
-
|
|
182
|
-
# Generate unique ID and session ID
|
|
183
|
-
unique_id = self.generate_unique_id()
|
|
184
|
-
session_id = self.generate_session_id()
|
|
185
|
-
|
|
186
|
-
# Update headers with the unique ID for this request
|
|
187
|
-
current_headers = self.headers.copy()
|
|
188
|
-
current_headers["uniqueid"] = unique_id
|
|
189
|
-
|
|
190
|
-
# Upload image and get URL if image_path is provided
|
|
191
|
-
image_url = None
|
|
192
|
-
if image_path:
|
|
193
|
-
try:
|
|
194
|
-
image_url = self.upload_image_to_0x0(image_path)
|
|
195
|
-
except Exception as e:
|
|
196
|
-
raise exceptions.FailedToGenerateResponseError(f"Error uploading image: {e}") from e
|
|
197
|
-
|
|
198
|
-
# Create the payload
|
|
199
|
-
payload = self.create_payload(session_id, conversation_prompt, image_url)
|
|
200
|
-
|
|
201
|
-
# Convert the payload to a JSON string
|
|
202
|
-
data = json.dumps(payload)
|
|
203
|
-
|
|
204
|
-
def for_stream():
|
|
205
|
-
full_response = "" # Initialize outside try block
|
|
206
|
-
try:
|
|
207
|
-
# Use curl_cffi session post with impersonate
|
|
208
|
-
response = self.session.post(
|
|
209
|
-
self.api_endpoint,
|
|
210
|
-
headers=current_headers, # Use headers with uniqueid
|
|
211
|
-
data=data,
|
|
212
|
-
stream=True,
|
|
213
|
-
timeout=self.timeout,
|
|
214
|
-
# proxies are set on the session
|
|
215
|
-
impersonate="chrome110" # Use a common impersonation profile
|
|
216
|
-
)
|
|
217
|
-
response.raise_for_status() # Check for HTTP errors
|
|
218
|
-
|
|
219
|
-
# Use sanitize_stream
|
|
220
|
-
processed_stream = sanitize_stream(
|
|
221
|
-
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
222
|
-
intro_value=None, # No prefix
|
|
223
|
-
to_json=True, # Stream sends JSON lines
|
|
224
|
-
skip_markers=["[DONE]"],
|
|
225
|
-
content_extractor=self._learnfast_extractor, # Use the specific extractor
|
|
226
|
-
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
227
|
-
)
|
|
228
|
-
|
|
229
|
-
for content_chunk in processed_stream:
|
|
230
|
-
# content_chunk is the string extracted by _learnfast_extractor
|
|
231
|
-
if content_chunk and isinstance(content_chunk, str):
|
|
232
|
-
full_response += content_chunk
|
|
233
|
-
resp = {"text": content_chunk}
|
|
234
|
-
yield resp if not raw else content_chunk
|
|
235
|
-
|
|
236
|
-
# Update history after stream finishes
|
|
237
|
-
self.last_response = {"text": full_response}
|
|
238
|
-
self.conversation.update_chat_history(prompt, full_response)
|
|
239
|
-
|
|
240
|
-
except CurlError as e: # Catch CurlError
|
|
241
|
-
raise exceptions.FailedToGenerateResponseError(f"An error occurred (CurlError): {e}") from e
|
|
242
|
-
except Exception as e: # Catch other potential exceptions (like HTTPError)
|
|
243
|
-
err_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
244
|
-
raise exceptions.FailedToGenerateResponseError(f"An error occurred ({type(e).__name__}): {e} - {err_text}") from e
|
|
245
|
-
|
|
246
|
-
def for_non_stream():
|
|
247
|
-
# Aggregate the stream using the updated for_stream logic
|
|
248
|
-
full_response_text = ""
|
|
249
|
-
try:
|
|
250
|
-
# Ensure raw=False so for_stream yields dicts
|
|
251
|
-
for chunk_data in for_stream():
|
|
252
|
-
if isinstance(chunk_data, dict) and "text" in chunk_data:
|
|
253
|
-
full_response_text += chunk_data["text"]
|
|
254
|
-
# Handle raw string case if raw=True was passed
|
|
255
|
-
elif raw and isinstance(chunk_data, str):
|
|
256
|
-
full_response_text += chunk_data
|
|
257
|
-
except Exception as e:
|
|
258
|
-
# If aggregation fails but some text was received, use it. Otherwise, re-raise.
|
|
259
|
-
if not full_response_text:
|
|
260
|
-
raise exceptions.FailedToGenerateResponseError(f"Failed to get non-stream response: {str(e)}") from e
|
|
261
|
-
|
|
262
|
-
# last_response and history are updated within for_stream
|
|
263
|
-
# Return the final aggregated response dict or raw string
|
|
264
|
-
return full_response_text if raw else self.last_response
|
|
265
|
-
|
|
266
|
-
|
|
267
|
-
return for_stream() if stream else for_non_stream()
|
|
268
|
-
|
|
269
|
-
def chat(
|
|
270
|
-
self,
|
|
271
|
-
prompt: str,
|
|
272
|
-
stream: bool = False,
|
|
273
|
-
optimizer: str = None,
|
|
274
|
-
conversationally: bool = False,
|
|
275
|
-
image_path: Optional[str] = None,
|
|
276
|
-
) -> Union[str, Generator[str, None, None]]:
|
|
277
|
-
"""Generate response `str`
|
|
278
|
-
Args:
|
|
279
|
-
prompt (str): Prompt to be send.
|
|
280
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
281
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
282
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
283
|
-
image_path (Optional[str], optional): Path to the image to be uploaded.
|
|
284
|
-
Defaults to None.
|
|
285
|
-
Returns:
|
|
286
|
-
Union[str, Generator[str, None, None]]: Response generated
|
|
287
|
-
"""
|
|
288
|
-
try:
|
|
289
|
-
# ask() yields dicts or strings when streaming
|
|
290
|
-
response_gen = self.ask(
|
|
291
|
-
prompt, stream=stream, raw=False, # Ensure ask yields dicts/dict
|
|
292
|
-
optimizer=optimizer, conversationally=conversationally,
|
|
293
|
-
image_path=image_path
|
|
294
|
-
)
|
|
295
|
-
if stream:
|
|
296
|
-
def stream_wrapper():
|
|
297
|
-
for chunk_dict in response_gen:
|
|
298
|
-
yield self.get_message(chunk_dict) # get_message expects dict
|
|
299
|
-
return stream_wrapper()
|
|
300
|
-
else:
|
|
301
|
-
# response_gen is the final dict in non-stream mode
|
|
302
|
-
return self.get_message(response_gen) # get_message expects dict
|
|
303
|
-
except Exception as e:
|
|
304
|
-
# Return error message directly, consider raising instead for better error handling upstream
|
|
305
|
-
return f"Error: {str(e)}"
|
|
306
|
-
|
|
307
|
-
def get_message(self, response: dict) -> str:
|
|
308
|
-
"""Retrieves message only from response
|
|
309
|
-
|
|
310
|
-
Args:
|
|
311
|
-
response (dict): Response generated by `self.ask`
|
|
312
|
-
|
|
313
|
-
Returns:
|
|
314
|
-
str: Message extracted
|
|
315
|
-
"""
|
|
316
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
317
|
-
return response["text"]
|
|
318
|
-
|
|
319
|
-
if __name__ == "__main__":
|
|
320
|
-
# Ensure curl_cffi is installed
|
|
321
|
-
from rich import print
|
|
322
|
-
ai = LearnFast()
|
|
323
|
-
response = ai.chat(input(">>> "), stream=True)
|
|
324
|
-
for chunk in response:
|
|
325
|
-
print(chunk, end="", flush=True)
|