webscout 8.2.2__py3-none-any.whl → 8.2.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +112 -22
- webscout/AIbase.py +144 -7
- webscout/AIutel.py +249 -131
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/__init__.py +0 -1
- webscout/cli.py +256 -0
- webscout/conversation.py +307 -436
- webscout/exceptions.py +23 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info}/METADATA +172 -52
- webscout-8.2.7.dist-info/RECORD +26 -0
- {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info}/WHEEL +1 -1
- webscout-8.2.7.dist-info/entry_points.txt +3 -0
- webscout-8.2.7.dist-info/top_level.txt +1 -0
- inferno/__init__.py +0 -6
- inferno/__main__.py +0 -9
- inferno/cli.py +0 -6
- webscout/Extra/GitToolkit/__init__.py +0 -10
- webscout/Extra/GitToolkit/gitapi/__init__.py +0 -12
- webscout/Extra/GitToolkit/gitapi/repository.py +0 -195
- webscout/Extra/GitToolkit/gitapi/user.py +0 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +0 -62
- webscout/Extra/YTToolkit/YTdownloader.py +0 -957
- webscout/Extra/YTToolkit/__init__.py +0 -3
- webscout/Extra/YTToolkit/transcriber.py +0 -476
- webscout/Extra/YTToolkit/ytapi/__init__.py +0 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +0 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +0 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +0 -45
- webscout/Extra/YTToolkit/ytapi/https.py +0 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +0 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +0 -59
- webscout/Extra/YTToolkit/ytapi/pool.py +0 -8
- webscout/Extra/YTToolkit/ytapi/query.py +0 -40
- webscout/Extra/YTToolkit/ytapi/stream.py +0 -63
- webscout/Extra/YTToolkit/ytapi/utils.py +0 -62
- webscout/Extra/YTToolkit/ytapi/video.py +0 -232
- webscout/Extra/__init__.py +0 -7
- webscout/Extra/autocoder/__init__.py +0 -9
- webscout/Extra/autocoder/autocoder.py +0 -849
- webscout/Extra/autocoder/autocoder_utiles.py +0 -332
- webscout/Extra/gguf.py +0 -682
- webscout/Extra/tempmail/__init__.py +0 -28
- webscout/Extra/tempmail/async_utils.py +0 -141
- webscout/Extra/tempmail/base.py +0 -161
- webscout/Extra/tempmail/cli.py +0 -187
- webscout/Extra/tempmail/emailnator.py +0 -84
- webscout/Extra/tempmail/mail_tm.py +0 -361
- webscout/Extra/tempmail/temp_mail_io.py +0 -292
- webscout/Extra/weather.py +0 -194
- webscout/Extra/weather_ascii.py +0 -76
- webscout/LLM.py +0 -442
- webscout/Litlogger/__init__.py +0 -67
- webscout/Litlogger/core/__init__.py +0 -6
- webscout/Litlogger/core/level.py +0 -23
- webscout/Litlogger/core/logger.py +0 -165
- webscout/Litlogger/handlers/__init__.py +0 -12
- webscout/Litlogger/handlers/console.py +0 -33
- webscout/Litlogger/handlers/file.py +0 -143
- webscout/Litlogger/handlers/network.py +0 -173
- webscout/Litlogger/styles/__init__.py +0 -7
- webscout/Litlogger/styles/colors.py +0 -249
- webscout/Litlogger/styles/formats.py +0 -458
- webscout/Litlogger/styles/text.py +0 -87
- webscout/Litlogger/utils/__init__.py +0 -6
- webscout/Litlogger/utils/detectors.py +0 -153
- webscout/Litlogger/utils/formatters.py +0 -200
- webscout/Local/__init__.py +0 -12
- webscout/Local/__main__.py +0 -9
- webscout/Local/api.py +0 -576
- webscout/Local/cli.py +0 -516
- webscout/Local/config.py +0 -75
- webscout/Local/llm.py +0 -287
- webscout/Local/model_manager.py +0 -253
- webscout/Local/server.py +0 -721
- webscout/Local/utils.py +0 -93
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/AISEARCH/DeepFind.py +0 -250
- webscout/Provider/AISEARCH/ISou.py +0 -256
- webscout/Provider/AISEARCH/Perplexity.py +0 -359
- webscout/Provider/AISEARCH/__init__.py +0 -10
- webscout/Provider/AISEARCH/felo_search.py +0 -228
- webscout/Provider/AISEARCH/genspark_search.py +0 -208
- webscout/Provider/AISEARCH/hika_search.py +0 -194
- webscout/Provider/AISEARCH/iask_search.py +0 -436
- webscout/Provider/AISEARCH/monica_search.py +0 -246
- webscout/Provider/AISEARCH/scira_search.py +0 -324
- webscout/Provider/AISEARCH/webpilotai_search.py +0 -281
- webscout/Provider/Aitopia.py +0 -292
- webscout/Provider/AllenAI.py +0 -413
- webscout/Provider/Andi.py +0 -228
- webscout/Provider/Blackboxai.py +0 -229
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTClone.py +0 -226
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/Chatify.py +0 -175
- webscout/Provider/Cloudflare.py +0 -273
- webscout/Provider/Cohere.py +0 -208
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Deepinfra.py +0 -297
- webscout/Provider/ElectronHub.py +0 -709
- webscout/Provider/ExaAI.py +0 -261
- webscout/Provider/ExaChat.py +0 -342
- webscout/Provider/Free2GPT.py +0 -241
- webscout/Provider/GPTWeb.py +0 -193
- webscout/Provider/Gemini.py +0 -169
- webscout/Provider/GithubChat.py +0 -367
- webscout/Provider/Glider.py +0 -211
- webscout/Provider/Groq.py +0 -670
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +0 -206
- webscout/Provider/HeckAI.py +0 -233
- webscout/Provider/HuggingFaceChat.py +0 -462
- webscout/Provider/Hunyuan.py +0 -272
- webscout/Provider/Jadve.py +0 -266
- webscout/Provider/Koboldai.py +0 -381
- webscout/Provider/LambdaChat.py +0 -392
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Llama3.py +0 -204
- webscout/Provider/Marcus.py +0 -148
- webscout/Provider/Netwrck.py +0 -228
- webscout/Provider/OLLAMA.py +0 -396
- webscout/Provider/OPENAI/__init__.py +0 -25
- webscout/Provider/OPENAI/base.py +0 -46
- webscout/Provider/OPENAI/c4ai.py +0 -367
- webscout/Provider/OPENAI/chatgpt.py +0 -549
- webscout/Provider/OPENAI/chatgptclone.py +0 -460
- webscout/Provider/OPENAI/deepinfra.py +0 -272
- webscout/Provider/OPENAI/e2b.py +0 -1350
- webscout/Provider/OPENAI/exaai.py +0 -404
- webscout/Provider/OPENAI/exachat.py +0 -433
- webscout/Provider/OPENAI/freeaichat.py +0 -352
- webscout/Provider/OPENAI/glider.py +0 -316
- webscout/Provider/OPENAI/heckai.py +0 -337
- webscout/Provider/OPENAI/llmchatco.py +0 -327
- webscout/Provider/OPENAI/netwrck.py +0 -348
- webscout/Provider/OPENAI/opkfc.py +0 -488
- webscout/Provider/OPENAI/scirachat.py +0 -463
- webscout/Provider/OPENAI/sonus.py +0 -294
- webscout/Provider/OPENAI/standardinput.py +0 -425
- webscout/Provider/OPENAI/textpollinations.py +0 -285
- webscout/Provider/OPENAI/toolbaz.py +0 -405
- webscout/Provider/OPENAI/typegpt.py +0 -346
- webscout/Provider/OPENAI/uncovrAI.py +0 -455
- webscout/Provider/OPENAI/utils.py +0 -211
- webscout/Provider/OPENAI/venice.py +0 -413
- webscout/Provider/OPENAI/wisecat.py +0 -381
- webscout/Provider/OPENAI/writecream.py +0 -156
- webscout/Provider/OPENAI/x0gpt.py +0 -371
- webscout/Provider/OPENAI/yep.py +0 -327
- webscout/Provider/OpenGPT.py +0 -199
- webscout/Provider/Openai.py +0 -496
- webscout/Provider/PI.py +0 -344
- webscout/Provider/Perplexitylabs.py +0 -415
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/PizzaGPT.py +0 -198
- webscout/Provider/QwenLM.py +0 -254
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/StandardInput.py +0 -278
- webscout/Provider/TTI/AiForce/__init__.py +0 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
- webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
- webscout/Provider/TTI/ImgSys/__init__.py +0 -23
- webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
- webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
- webscout/Provider/TTI/Nexra/__init__.py +0 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
- webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
- webscout/Provider/TTI/__init__.py +0 -12
- webscout/Provider/TTI/aiarta/__init__.py +0 -2
- webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
- webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
- webscout/Provider/TTI/artbit/__init__.py +0 -22
- webscout/Provider/TTI/artbit/async_artbit.py +0 -155
- webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
- webscout/Provider/TTI/fastflux/__init__.py +0 -22
- webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
- webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
- webscout/Provider/TTI/huggingface/__init__.py +0 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
- webscout/Provider/TTI/piclumen/__init__.py +0 -23
- webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
- webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
- webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
- webscout/Provider/TTI/talkai/__init__.py +0 -4
- webscout/Provider/TTI/talkai/async_talkai.py +0 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
- webscout/Provider/TTS/__init__.py +0 -7
- webscout/Provider/TTS/deepgram.py +0 -156
- webscout/Provider/TTS/elevenlabs.py +0 -111
- webscout/Provider/TTS/gesserit.py +0 -127
- webscout/Provider/TTS/murfai.py +0 -113
- webscout/Provider/TTS/parler.py +0 -111
- webscout/Provider/TTS/speechma.py +0 -180
- webscout/Provider/TTS/streamElements.py +0 -333
- webscout/Provider/TTS/utils.py +0 -280
- webscout/Provider/TeachAnything.py +0 -187
- webscout/Provider/TextPollinationsAI.py +0 -231
- webscout/Provider/TwoAI.py +0 -199
- webscout/Provider/Venice.py +0 -219
- webscout/Provider/VercelAI.py +0 -234
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/WiseCat.py +0 -196
- webscout/Provider/Writecream.py +0 -211
- webscout/Provider/WritingMate.py +0 -197
- webscout/Provider/Youchat.py +0 -330
- webscout/Provider/__init__.py +0 -198
- webscout/Provider/ai4chat.py +0 -202
- webscout/Provider/aimathgpt.py +0 -189
- webscout/Provider/akashgpt.py +0 -342
- webscout/Provider/askmyai.py +0 -158
- webscout/Provider/asksteve.py +0 -203
- webscout/Provider/bagoodex.py +0 -145
- webscout/Provider/cerebras.py +0 -242
- webscout/Provider/chatglm.py +0 -205
- webscout/Provider/cleeai.py +0 -213
- webscout/Provider/copilot.py +0 -428
- webscout/Provider/elmo.py +0 -234
- webscout/Provider/freeaichat.py +0 -271
- webscout/Provider/gaurish.py +0 -244
- webscout/Provider/geminiapi.py +0 -208
- webscout/Provider/geminiprorealtime.py +0 -160
- webscout/Provider/granite.py +0 -187
- webscout/Provider/hermes.py +0 -219
- webscout/Provider/julius.py +0 -223
- webscout/Provider/koala.py +0 -268
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/learnfastai.py +0 -266
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llama3mitril.py +0 -180
- webscout/Provider/llamatutor.py +0 -192
- webscout/Provider/llmchat.py +0 -213
- webscout/Provider/llmchatco.py +0 -311
- webscout/Provider/meta.py +0 -794
- webscout/Provider/multichat.py +0 -325
- webscout/Provider/promptrefine.py +0 -193
- webscout/Provider/scira_chat.py +0 -277
- webscout/Provider/scnet.py +0 -187
- webscout/Provider/searchchat.py +0 -293
- webscout/Provider/sonus.py +0 -208
- webscout/Provider/talkai.py +0 -194
- webscout/Provider/toolbaz.py +0 -320
- webscout/Provider/turboseek.py +0 -219
- webscout/Provider/tutorai.py +0 -252
- webscout/Provider/typefully.py +0 -280
- webscout/Provider/typegpt.py +0 -232
- webscout/Provider/uncovr.py +0 -312
- webscout/Provider/x0gpt.py +0 -256
- webscout/Provider/yep.py +0 -376
- webscout/litagent/__init__.py +0 -29
- webscout/litagent/agent.py +0 -455
- webscout/litagent/constants.py +0 -60
- webscout/litprinter/__init__.py +0 -59
- webscout/scout/__init__.py +0 -8
- webscout/scout/core/__init__.py +0 -7
- webscout/scout/core/crawler.py +0 -140
- webscout/scout/core/scout.py +0 -568
- webscout/scout/core/search_result.py +0 -96
- webscout/scout/core/text_analyzer.py +0 -63
- webscout/scout/core/text_utils.py +0 -277
- webscout/scout/core/web_analyzer.py +0 -52
- webscout/scout/core.py +0 -881
- webscout/scout/element.py +0 -460
- webscout/scout/parsers/__init__.py +0 -69
- webscout/scout/parsers/html5lib_parser.py +0 -172
- webscout/scout/parsers/html_parser.py +0 -236
- webscout/scout/parsers/lxml_parser.py +0 -178
- webscout/scout/utils.py +0 -37
- webscout/swiftcli/__init__.py +0 -809
- webscout/zeroart/__init__.py +0 -55
- webscout/zeroart/base.py +0 -60
- webscout/zeroart/effects.py +0 -99
- webscout/zeroart/fonts.py +0 -816
- webscout-8.2.2.dist-info/RECORD +0 -309
- webscout-8.2.2.dist-info/entry_points.txt +0 -5
- webscout-8.2.2.dist-info/top_level.txt +0 -3
- webstoken/__init__.py +0 -30
- webstoken/classifier.py +0 -189
- webstoken/keywords.py +0 -216
- webstoken/language.py +0 -128
- webstoken/ner.py +0 -164
- webstoken/normalizer.py +0 -35
- webstoken/processor.py +0 -77
- webstoken/sentiment.py +0 -206
- webstoken/stemmer.py +0 -73
- webstoken/tagger.py +0 -60
- webstoken/tokenizer.py +0 -158
- {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info/licenses}/LICENSE.md +0 -0
webscout/Provider/PizzaGPT.py
DELETED
|
@@ -1,198 +0,0 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
import json
|
|
3
|
-
import re
|
|
4
|
-
from typing import Any, Dict, Optional, Union, Generator
|
|
5
|
-
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
6
|
-
from webscout.AIbase import Provider
|
|
7
|
-
from webscout import exceptions
|
|
8
|
-
from webscout.litagent import LitAgent as Lit
|
|
9
|
-
|
|
10
|
-
class PIZZAGPT(Provider):
|
|
11
|
-
"""
|
|
12
|
-
PIZZAGPT is a provider class for interacting with the PizzaGPT API.
|
|
13
|
-
Supports web search integration and handles responses using regex.
|
|
14
|
-
"""
|
|
15
|
-
AVAILABLE_MODELS = ["gpt-4o-mini"]
|
|
16
|
-
|
|
17
|
-
def __init__(
|
|
18
|
-
self,
|
|
19
|
-
is_conversation: bool = True,
|
|
20
|
-
max_tokens: int = 600,
|
|
21
|
-
timeout: int = 30,
|
|
22
|
-
intro: str = None,
|
|
23
|
-
filepath: str = None,
|
|
24
|
-
update_file: bool = True,
|
|
25
|
-
proxies: dict = {},
|
|
26
|
-
history_offset: int = 10250,
|
|
27
|
-
act: str = None,
|
|
28
|
-
model: str = "gpt-4o-mini"
|
|
29
|
-
) -> None:
|
|
30
|
-
"""Initialize PizzaGPT with enhanced configuration options."""
|
|
31
|
-
if model not in self.AVAILABLE_MODELS:
|
|
32
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
33
|
-
|
|
34
|
-
self.session = requests.Session()
|
|
35
|
-
self.is_conversation = is_conversation
|
|
36
|
-
self.max_tokens_to_sample = max_tokens
|
|
37
|
-
self.api_endpoint = "https://www.pizzagpt.it/api/chatx-completion"
|
|
38
|
-
self.stream_chunk_size = 64
|
|
39
|
-
self.timeout = timeout
|
|
40
|
-
self.last_response = {}
|
|
41
|
-
self.model = model
|
|
42
|
-
|
|
43
|
-
self.headers = {
|
|
44
|
-
"accept": "application/json",
|
|
45
|
-
"accept-language": "en-US,en;q=0.9",
|
|
46
|
-
"content-type": "application/json",
|
|
47
|
-
"origin": "https://www.pizzagpt.it",
|
|
48
|
-
"referer": "https://www.pizzagpt.it/en",
|
|
49
|
-
"user-agent": Lit().random(),
|
|
50
|
-
"x-secret": "Marinara",
|
|
51
|
-
"sec-ch-ua": '"Chromium";v="134", "Not:A-Brand";v="24"',
|
|
52
|
-
"sec-ch-ua-platform": '"Windows"'
|
|
53
|
-
}
|
|
54
|
-
|
|
55
|
-
self.__available_optimizers = (
|
|
56
|
-
method for method in dir(Optimizers)
|
|
57
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
58
|
-
)
|
|
59
|
-
|
|
60
|
-
self.session.headers.update(self.headers)
|
|
61
|
-
Conversation.intro = (
|
|
62
|
-
AwesomePrompts().get_act(
|
|
63
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
64
|
-
)
|
|
65
|
-
if act
|
|
66
|
-
else intro or Conversation.intro
|
|
67
|
-
)
|
|
68
|
-
|
|
69
|
-
self.conversation = Conversation(
|
|
70
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
71
|
-
)
|
|
72
|
-
self.conversation.history_offset = history_offset
|
|
73
|
-
self.session.proxies = proxies
|
|
74
|
-
|
|
75
|
-
def _extract_content(self, text: str) -> Dict[str, Any]:
|
|
76
|
-
"""
|
|
77
|
-
Extract content from response text using regex.
|
|
78
|
-
"""
|
|
79
|
-
try:
|
|
80
|
-
# Look for content pattern
|
|
81
|
-
content_match = re.search(r'"content"\s*:\s*"(.*?)"(?=\s*[,}])', text, re.DOTALL)
|
|
82
|
-
if not content_match:
|
|
83
|
-
raise exceptions.FailedToGenerateResponseError("Content not found in response")
|
|
84
|
-
|
|
85
|
-
content = content_match.group(1)
|
|
86
|
-
# Unescape special characters
|
|
87
|
-
content = content.encode().decode('unicode_escape')
|
|
88
|
-
|
|
89
|
-
# Look for citations if present
|
|
90
|
-
citations = []
|
|
91
|
-
citations_match = re.search(r'"citations"\s*:\s*\[(.*?)\]', text, re.DOTALL)
|
|
92
|
-
if citations_match:
|
|
93
|
-
citations_text = citations_match.group(1)
|
|
94
|
-
citations = re.findall(r'"(.*?)"', citations_text)
|
|
95
|
-
|
|
96
|
-
return {
|
|
97
|
-
"content": content,
|
|
98
|
-
"citations": citations
|
|
99
|
-
}
|
|
100
|
-
|
|
101
|
-
except Exception as e:
|
|
102
|
-
raise exceptions.FailedToGenerateResponseError(f"Failed to extract content: {str(e)}")
|
|
103
|
-
|
|
104
|
-
def ask(
|
|
105
|
-
self,
|
|
106
|
-
prompt: str,
|
|
107
|
-
stream: bool = False,
|
|
108
|
-
raw: bool = False,
|
|
109
|
-
optimizer: str = None,
|
|
110
|
-
conversationally: bool = False,
|
|
111
|
-
web_search: bool = False,
|
|
112
|
-
) -> Dict[str, Any]:
|
|
113
|
-
"""
|
|
114
|
-
Send a prompt to PizzaGPT API with optional web search capability.
|
|
115
|
-
"""
|
|
116
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
117
|
-
if optimizer:
|
|
118
|
-
if optimizer in self.__available_optimizers:
|
|
119
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
120
|
-
conversation_prompt if conversationally else prompt
|
|
121
|
-
)
|
|
122
|
-
else:
|
|
123
|
-
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
124
|
-
|
|
125
|
-
payload = {
|
|
126
|
-
"question": conversation_prompt,
|
|
127
|
-
"model": self.model,
|
|
128
|
-
"searchEnabled": web_search
|
|
129
|
-
}
|
|
130
|
-
|
|
131
|
-
try:
|
|
132
|
-
response = self.session.post(
|
|
133
|
-
self.api_endpoint,
|
|
134
|
-
json=payload,
|
|
135
|
-
timeout=self.timeout
|
|
136
|
-
)
|
|
137
|
-
|
|
138
|
-
if not response.ok:
|
|
139
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
140
|
-
f"Failed to generate response - ({response.status_code}, {response.reason})"
|
|
141
|
-
)
|
|
142
|
-
|
|
143
|
-
response_text = response.text
|
|
144
|
-
if not response_text:
|
|
145
|
-
raise exceptions.FailedToGenerateResponseError("Empty response received from API")
|
|
146
|
-
|
|
147
|
-
try:
|
|
148
|
-
resp = self._extract_content(response_text)
|
|
149
|
-
|
|
150
|
-
self.last_response.update(dict(text=resp['content']))
|
|
151
|
-
self.conversation.update_chat_history(
|
|
152
|
-
prompt, self.get_message(self.last_response)
|
|
153
|
-
)
|
|
154
|
-
return self.last_response
|
|
155
|
-
|
|
156
|
-
except Exception as e:
|
|
157
|
-
raise exceptions.FailedToGenerateResponseError(f"Failed to parse response: {str(e)}")
|
|
158
|
-
|
|
159
|
-
except requests.exceptions.RequestException as e:
|
|
160
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
|
|
161
|
-
|
|
162
|
-
def chat(
|
|
163
|
-
self,
|
|
164
|
-
prompt: str,
|
|
165
|
-
stream: bool = False,
|
|
166
|
-
optimizer: str = None,
|
|
167
|
-
conversationally: bool = False,
|
|
168
|
-
web_search: bool = False,
|
|
169
|
-
) -> str:
|
|
170
|
-
"""
|
|
171
|
-
Chat with PizzaGPT with optional web search capability.
|
|
172
|
-
"""
|
|
173
|
-
try:
|
|
174
|
-
response = self.ask(
|
|
175
|
-
prompt,
|
|
176
|
-
optimizer=optimizer,
|
|
177
|
-
conversationally=conversationally,
|
|
178
|
-
web_search=web_search
|
|
179
|
-
)
|
|
180
|
-
return self.get_message(response)
|
|
181
|
-
except Exception as e:
|
|
182
|
-
raise
|
|
183
|
-
|
|
184
|
-
def get_message(self, response: dict) -> str:
|
|
185
|
-
"""Extract message from response dictionary."""
|
|
186
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
187
|
-
return response.get("text", "")
|
|
188
|
-
|
|
189
|
-
if __name__ == "__main__":
|
|
190
|
-
from rich import print
|
|
191
|
-
|
|
192
|
-
# Example usage with web search enabled
|
|
193
|
-
ai = PIZZAGPT()
|
|
194
|
-
try:
|
|
195
|
-
response = ai.chat("hi")
|
|
196
|
-
print(response)
|
|
197
|
-
except Exception as e:
|
|
198
|
-
print(f"Error: {str(e)}")
|
webscout/Provider/QwenLM.py
DELETED
|
@@ -1,254 +0,0 @@
|
|
|
1
|
-
import json
|
|
2
|
-
from typing import Union, Any, Dict, Generator, Optional
|
|
3
|
-
|
|
4
|
-
import cloudscraper
|
|
5
|
-
|
|
6
|
-
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
7
|
-
from webscout.AIbase import Provider
|
|
8
|
-
from webscout import exceptions
|
|
9
|
-
|
|
10
|
-
class QwenLM(Provider):
|
|
11
|
-
"""
|
|
12
|
-
A class to interact with the QwenLM API
|
|
13
|
-
"""
|
|
14
|
-
|
|
15
|
-
AVAILABLE_MODELS = [
|
|
16
|
-
"qwen-max-latest",
|
|
17
|
-
"qwen-plus-latest",
|
|
18
|
-
"qwen2.5-14b-instruct-1m",
|
|
19
|
-
"qwq-32b",
|
|
20
|
-
"qwen2.5-coder-32b-instruct",
|
|
21
|
-
"qwen-turbo-latest",
|
|
22
|
-
"qwen2.5-72b-instruct",
|
|
23
|
-
"qwen2.5-vl-72b-instruct",
|
|
24
|
-
"qvq-72b-preview"
|
|
25
|
-
]
|
|
26
|
-
|
|
27
|
-
def __init__(
|
|
28
|
-
self,
|
|
29
|
-
cookies_path: str,
|
|
30
|
-
is_conversation: bool = True,
|
|
31
|
-
max_tokens: int = 600,
|
|
32
|
-
timeout: int = 30,
|
|
33
|
-
intro: Optional[str] = None,
|
|
34
|
-
filepath: Optional[str] = None,
|
|
35
|
-
update_file: bool = True,
|
|
36
|
-
proxies: dict = {},
|
|
37
|
-
history_offset: int = 10250,
|
|
38
|
-
act: Optional[str] = None,
|
|
39
|
-
model: str = "qwen-plus-latest",
|
|
40
|
-
system_prompt: str = "You are a helpful AI assistant."
|
|
41
|
-
):
|
|
42
|
-
"""Initializes the QwenLM API client."""
|
|
43
|
-
if model not in self.AVAILABLE_MODELS:
|
|
44
|
-
raise ValueError(
|
|
45
|
-
f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}"
|
|
46
|
-
)
|
|
47
|
-
|
|
48
|
-
self.session = cloudscraper.create_scraper()
|
|
49
|
-
self.is_conversation = is_conversation
|
|
50
|
-
self.max_tokens_to_sample = max_tokens
|
|
51
|
-
self.api_endpoint = "https://chat.qwenlm.ai/api/chat/completions"
|
|
52
|
-
self.stream_chunk_size = 64
|
|
53
|
-
self.timeout = timeout
|
|
54
|
-
self.last_response = {}
|
|
55
|
-
self.model = model
|
|
56
|
-
self.system_prompt = system_prompt
|
|
57
|
-
self.cookies_path = cookies_path
|
|
58
|
-
self.cookie_string, self.token = self._load_cookies()
|
|
59
|
-
|
|
60
|
-
self.headers = {
|
|
61
|
-
"accept": "*/*",
|
|
62
|
-
"accept-language": "en-US,en;q=0.9",
|
|
63
|
-
"content-type": "application/json",
|
|
64
|
-
"origin": "https://chat.qwenlm.ai",
|
|
65
|
-
"referer": "https://chat.qwenlm.ai/",
|
|
66
|
-
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
|
|
67
|
-
"authorization": f"Bearer {self.token}" if self.token else '',
|
|
68
|
-
}
|
|
69
|
-
self.session.headers.update(self.headers)
|
|
70
|
-
self.session.proxies = proxies
|
|
71
|
-
self.chat_type = "t2t" # search - used WEB, t2t - chatbot, t2i - image_gen
|
|
72
|
-
if self.chat_type != "t2t":
|
|
73
|
-
AVAILABLE_MODELS = [
|
|
74
|
-
'qwen-plus-latest', 'qvq-72b-preview',
|
|
75
|
-
'qvq-32b', 'qwen-turbo-latest',
|
|
76
|
-
'qwen-max-latest'
|
|
77
|
-
]
|
|
78
|
-
|
|
79
|
-
self.__available_optimizers = (
|
|
80
|
-
method
|
|
81
|
-
for method in dir(Optimizers)
|
|
82
|
-
if callable(getattr(Optimizers, method))
|
|
83
|
-
and not method.startswith("__")
|
|
84
|
-
)
|
|
85
|
-
Conversation.intro = (
|
|
86
|
-
AwesomePrompts().get_act(
|
|
87
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
88
|
-
)
|
|
89
|
-
if act
|
|
90
|
-
else intro or Conversation.intro
|
|
91
|
-
)
|
|
92
|
-
self.conversation = Conversation(
|
|
93
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
94
|
-
)
|
|
95
|
-
self.conversation.history_offset = history_offset
|
|
96
|
-
|
|
97
|
-
def _load_cookies(self) -> tuple[str, str]:
|
|
98
|
-
"""Load cookies from a JSON file and build a cookie header string."""
|
|
99
|
-
try:
|
|
100
|
-
with open(self.cookies_path, "r") as f:
|
|
101
|
-
cookies = json.load(f)
|
|
102
|
-
cookie_string = "; ".join(
|
|
103
|
-
f"{cookie['name']}={cookie['value']}" for cookie in cookies
|
|
104
|
-
)
|
|
105
|
-
token = next(
|
|
106
|
-
(cookie.get("value") for cookie in cookies if cookie.get("name") == "token"),
|
|
107
|
-
"",
|
|
108
|
-
)
|
|
109
|
-
return cookie_string, token
|
|
110
|
-
except FileNotFoundError:
|
|
111
|
-
raise exceptions.InvalidAuthenticationError(
|
|
112
|
-
"Error: cookies.json file not found!"
|
|
113
|
-
)
|
|
114
|
-
except json.JSONDecodeError:
|
|
115
|
-
raise exceptions.InvalidAuthenticationError(
|
|
116
|
-
"Error: Invalid JSON format in cookies.json!"
|
|
117
|
-
)
|
|
118
|
-
|
|
119
|
-
def ask(
|
|
120
|
-
self,
|
|
121
|
-
prompt: str,
|
|
122
|
-
stream: bool = False,
|
|
123
|
-
raw: bool = False,
|
|
124
|
-
optimizer: Optional[str] = None,
|
|
125
|
-
conversationally: bool = False,
|
|
126
|
-
) -> Union[Dict[str, Any], Generator[Any, None, None]]:
|
|
127
|
-
"""Chat with AI."""
|
|
128
|
-
|
|
129
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
130
|
-
if optimizer:
|
|
131
|
-
if optimizer in self.__available_optimizers:
|
|
132
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
133
|
-
conversation_prompt if conversationally else prompt
|
|
134
|
-
)
|
|
135
|
-
else:
|
|
136
|
-
raise Exception(
|
|
137
|
-
f"Optimizer is not one of {list(self.__available_optimizers)}"
|
|
138
|
-
)
|
|
139
|
-
|
|
140
|
-
payload = {
|
|
141
|
-
'chat_type': self.chat_type,
|
|
142
|
-
"messages": [
|
|
143
|
-
{"role": "system", "content": self.system_prompt},
|
|
144
|
-
{"role": "user", "content": conversation_prompt}
|
|
145
|
-
],
|
|
146
|
-
"model": self.model,
|
|
147
|
-
"stream": stream,
|
|
148
|
-
"max_tokens": self.max_tokens_to_sample
|
|
149
|
-
}
|
|
150
|
-
|
|
151
|
-
def for_stream() -> Generator[Dict[str, Any], None, None]:
|
|
152
|
-
response = self.session.post(
|
|
153
|
-
self.api_endpoint, json=payload, headers=self.headers, stream=True, timeout=self.timeout
|
|
154
|
-
)
|
|
155
|
-
if not response.ok:
|
|
156
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
157
|
-
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
158
|
-
)
|
|
159
|
-
|
|
160
|
-
cumulative_text = ""
|
|
161
|
-
for line in response.iter_lines(decode_unicode=True):
|
|
162
|
-
if line and line.startswith("data: "):
|
|
163
|
-
data = line[6:]
|
|
164
|
-
if data == "[DONE]":
|
|
165
|
-
break
|
|
166
|
-
try:
|
|
167
|
-
json_data = json.loads(data)
|
|
168
|
-
# Handle multiple response formats
|
|
169
|
-
if "choices" in json_data:
|
|
170
|
-
new_content = json_data.get("choices")[0].get("delta", {}).get("content", "")
|
|
171
|
-
elif "messages" in json_data:
|
|
172
|
-
assistant_msg = next(
|
|
173
|
-
(msg for msg in reversed(json_data["messages"]) if msg.get("role") == "assistant"),
|
|
174
|
-
{}
|
|
175
|
-
)
|
|
176
|
-
content_field = assistant_msg.get("content", "")
|
|
177
|
-
if isinstance(content_field, list):
|
|
178
|
-
new_content = "".join(item.get("text", "") for item in content_field)
|
|
179
|
-
else:
|
|
180
|
-
new_content = content_field
|
|
181
|
-
else:
|
|
182
|
-
new_content = ""
|
|
183
|
-
delta = new_content[len(cumulative_text):]
|
|
184
|
-
cumulative_text = new_content
|
|
185
|
-
if delta:
|
|
186
|
-
yield delta if raw else {"text": delta}
|
|
187
|
-
except json.JSONDecodeError:
|
|
188
|
-
continue
|
|
189
|
-
self.last_response.update(dict(text=cumulative_text))
|
|
190
|
-
self.conversation.update_chat_history(
|
|
191
|
-
prompt, self.get_message(self.last_response)
|
|
192
|
-
)
|
|
193
|
-
|
|
194
|
-
def for_non_stream() -> Dict[str, Any]:
|
|
195
|
-
"""
|
|
196
|
-
Handles non-streaming responses by aggregating all streamed chunks into a single string.
|
|
197
|
-
"""
|
|
198
|
-
|
|
199
|
-
# Initialize an empty string to accumulate the full response
|
|
200
|
-
full_response = ""
|
|
201
|
-
|
|
202
|
-
# Iterate through the stream generator and accumulate the text
|
|
203
|
-
try:
|
|
204
|
-
for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
|
|
205
|
-
if isinstance(response, dict): # Check if the response is a dictionary
|
|
206
|
-
full_response += response.get("text", "") # Extract and append the "text" field
|
|
207
|
-
elif isinstance(response, str): # If the response is a string, directly append it
|
|
208
|
-
full_response += response
|
|
209
|
-
except Exception as e:
|
|
210
|
-
raise
|
|
211
|
-
|
|
212
|
-
# Ensure last_response is updated with the aggregated text
|
|
213
|
-
self.last_response.update({"text": full_response})
|
|
214
|
-
|
|
215
|
-
# Update conversation history with the final response
|
|
216
|
-
self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
|
|
217
|
-
|
|
218
|
-
return {"text": full_response} # Return the dictionary containing the full response
|
|
219
|
-
|
|
220
|
-
return for_stream() if stream else for_non_stream()
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
def chat(
|
|
224
|
-
self,
|
|
225
|
-
prompt: str,
|
|
226
|
-
stream: bool = False,
|
|
227
|
-
optimizer: Optional[str] = None,
|
|
228
|
-
conversationally: bool = False,
|
|
229
|
-
) -> Union[str, Generator[str, None, None]]:
|
|
230
|
-
"""Generate response string from chat."""
|
|
231
|
-
|
|
232
|
-
def for_stream() -> Generator[str, None, None]:
|
|
233
|
-
for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
|
|
234
|
-
yield response if isinstance(response, str) else response["text"]
|
|
235
|
-
|
|
236
|
-
def for_non_stream() -> str:
|
|
237
|
-
result = self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally)
|
|
238
|
-
return self.get_message(result)
|
|
239
|
-
|
|
240
|
-
return for_stream() if stream else for_non_stream()
|
|
241
|
-
|
|
242
|
-
def get_message(self, response: dict) -> str:
|
|
243
|
-
"""Extracts the message content from a response dict."""
|
|
244
|
-
assert isinstance(response, dict), "Response should be a dict"
|
|
245
|
-
return response.get("text", "")
|
|
246
|
-
|
|
247
|
-
if __name__ == "__main__":
|
|
248
|
-
from rich import print
|
|
249
|
-
ai = QwenLM(cookies_path="cookies.json")
|
|
250
|
-
response = ai.chat(input(">>> "), stream=False)
|
|
251
|
-
ai.chat_type = "search" # search - used WEB, t2t - chatbot, t2i - image_gen
|
|
252
|
-
print(response)
|
|
253
|
-
# for chunk in response:
|
|
254
|
-
# print(chunk, end="", flush=True)
|
webscout/Provider/Reka.py
DELETED
|
@@ -1,214 +0,0 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
|
|
3
|
-
|
|
4
|
-
import json
|
|
5
|
-
|
|
6
|
-
from webscout.AIutel import Optimizers
|
|
7
|
-
from webscout.AIutel import Conversation
|
|
8
|
-
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
9
|
-
from webscout.AIbase import Provider, AsyncProvider
|
|
10
|
-
from webscout import exceptions
|
|
11
|
-
|
|
12
|
-
#-----------------------------------------------REKA-----------------------------------------------
|
|
13
|
-
class REKA(Provider):
|
|
14
|
-
def __init__(
|
|
15
|
-
self,
|
|
16
|
-
api_key: str,
|
|
17
|
-
is_conversation: bool = True,
|
|
18
|
-
max_tokens: int = 600,
|
|
19
|
-
timeout: int = 30,
|
|
20
|
-
intro: str = None,
|
|
21
|
-
filepath: str = None,
|
|
22
|
-
update_file: bool = True,
|
|
23
|
-
proxies: dict = {},
|
|
24
|
-
history_offset: int = 10250,
|
|
25
|
-
act: str = None,
|
|
26
|
-
model: str = "reka-core",
|
|
27
|
-
system_prompt: str = "Be Helpful and Friendly. Keep your response straightforward, short and concise",
|
|
28
|
-
use_search_engine: bool = False,
|
|
29
|
-
use_code_interpreter: bool = False,
|
|
30
|
-
):
|
|
31
|
-
"""Instantiates REKA
|
|
32
|
-
|
|
33
|
-
Args:
|
|
34
|
-
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True
|
|
35
|
-
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
36
|
-
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
37
|
-
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
38
|
-
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
39
|
-
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
40
|
-
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
41
|
-
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
42
|
-
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
43
|
-
model (str, optional): REKA model name. Defaults to "reka-core".
|
|
44
|
-
system_prompt (str, optional): System prompt for REKA. Defaults to "Be Helpful and Friendly. Keep your response straightforward, short and concise".
|
|
45
|
-
use_search_engine (bool, optional): Whether to use the search engine. Defaults to False.
|
|
46
|
-
use_code_interpreter (bool, optional): Whether to use the code interpreter. Defaults to False.
|
|
47
|
-
"""
|
|
48
|
-
self.session = requests.Session()
|
|
49
|
-
self.is_conversation = is_conversation
|
|
50
|
-
self.max_tokens_to_sample = max_tokens
|
|
51
|
-
self.api_endpoint = "https://chat.reka.ai/api/chat"
|
|
52
|
-
self.stream_chunk_size = 64
|
|
53
|
-
self.timeout = timeout
|
|
54
|
-
self.last_response = {}
|
|
55
|
-
self.model = model
|
|
56
|
-
self.system_prompt = system_prompt
|
|
57
|
-
self.use_search_engine = use_search_engine
|
|
58
|
-
self.use_code_interpreter = use_code_interpreter
|
|
59
|
-
self.access_token = api_key
|
|
60
|
-
self.headers = {
|
|
61
|
-
"Authorization": f"Bearer {self.access_token}",
|
|
62
|
-
}
|
|
63
|
-
|
|
64
|
-
self.__available_optimizers = (
|
|
65
|
-
method
|
|
66
|
-
for method in dir(Optimizers)
|
|
67
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
68
|
-
)
|
|
69
|
-
self.session.headers.update(self.headers)
|
|
70
|
-
Conversation.intro = (
|
|
71
|
-
AwesomePrompts().get_act(
|
|
72
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
73
|
-
)
|
|
74
|
-
if act
|
|
75
|
-
else intro or Conversation.intro
|
|
76
|
-
)
|
|
77
|
-
self.conversation = Conversation(
|
|
78
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
79
|
-
)
|
|
80
|
-
self.conversation.history_offset = history_offset
|
|
81
|
-
self.session.proxies = proxies
|
|
82
|
-
|
|
83
|
-
def ask(
|
|
84
|
-
self,
|
|
85
|
-
prompt: str,
|
|
86
|
-
stream: bool = False,
|
|
87
|
-
raw: bool = False,
|
|
88
|
-
optimizer: str = None,
|
|
89
|
-
conversationally: bool = False,
|
|
90
|
-
) -> dict:
|
|
91
|
-
"""Chat with AI
|
|
92
|
-
|
|
93
|
-
Args:
|
|
94
|
-
prompt (str): Prompt to be send.
|
|
95
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
96
|
-
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
97
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
98
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
99
|
-
Returns:
|
|
100
|
-
dict : {}
|
|
101
|
-
```json
|
|
102
|
-
{
|
|
103
|
-
"text" : "How may I assist you today?"
|
|
104
|
-
}
|
|
105
|
-
```
|
|
106
|
-
"""
|
|
107
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
108
|
-
if optimizer:
|
|
109
|
-
if optimizer in self.__available_optimizers:
|
|
110
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
111
|
-
conversation_prompt if conversationally else prompt
|
|
112
|
-
)
|
|
113
|
-
else:
|
|
114
|
-
raise Exception(
|
|
115
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
116
|
-
)
|
|
117
|
-
|
|
118
|
-
self.session.headers.update(self.headers)
|
|
119
|
-
payload = {
|
|
120
|
-
|
|
121
|
-
"conversation_history": [
|
|
122
|
-
{"type": "human", "text": f"## SYSTEM PROMPT: {self.system_prompt}\n\n## QUERY: {conversation_prompt}"},
|
|
123
|
-
],
|
|
124
|
-
|
|
125
|
-
"stream": stream,
|
|
126
|
-
"use_search_engine": self.use_search_engine,
|
|
127
|
-
"use_code_interpreter": self.use_code_interpreter,
|
|
128
|
-
"model_name": self.model,
|
|
129
|
-
# "model_name": "reka-flash",
|
|
130
|
-
# "model_name": "reka-edge",
|
|
131
|
-
}
|
|
132
|
-
|
|
133
|
-
def for_stream():
|
|
134
|
-
response = self.session.post(self.api_endpoint, json=payload, stream=True, timeout=self.timeout)
|
|
135
|
-
if not response.ok:
|
|
136
|
-
raise Exception(
|
|
137
|
-
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
138
|
-
)
|
|
139
|
-
|
|
140
|
-
for value in response.iter_lines(
|
|
141
|
-
decode_unicode=True,
|
|
142
|
-
chunk_size=self.stream_chunk_size,
|
|
143
|
-
):
|
|
144
|
-
try:
|
|
145
|
-
resp = json.loads(value)
|
|
146
|
-
self.last_response.update(resp)
|
|
147
|
-
yield value if raw else resp
|
|
148
|
-
except json.decoder.JSONDecodeError:
|
|
149
|
-
pass
|
|
150
|
-
self.conversation.update_chat_history(
|
|
151
|
-
prompt, self.get_message(self.last_response)
|
|
152
|
-
)
|
|
153
|
-
|
|
154
|
-
def for_non_stream():
|
|
155
|
-
# let's make use of stream
|
|
156
|
-
for _ in for_stream():
|
|
157
|
-
pass
|
|
158
|
-
return self.last_response
|
|
159
|
-
|
|
160
|
-
return for_stream() if stream else for_non_stream()
|
|
161
|
-
|
|
162
|
-
def chat(
|
|
163
|
-
self,
|
|
164
|
-
prompt: str,
|
|
165
|
-
stream: bool = False,
|
|
166
|
-
optimizer: str = None,
|
|
167
|
-
conversationally: bool = False,
|
|
168
|
-
) -> str:
|
|
169
|
-
"""Generate response `str`
|
|
170
|
-
Args:
|
|
171
|
-
prompt (str): Prompt to be send.
|
|
172
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
173
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
174
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
175
|
-
Returns:
|
|
176
|
-
str: Response generated
|
|
177
|
-
"""
|
|
178
|
-
|
|
179
|
-
def for_stream():
|
|
180
|
-
for response in self.ask(
|
|
181
|
-
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
182
|
-
):
|
|
183
|
-
yield self.get_message(response)
|
|
184
|
-
|
|
185
|
-
def for_non_stream():
|
|
186
|
-
return self.get_message(
|
|
187
|
-
self.ask(
|
|
188
|
-
prompt,
|
|
189
|
-
False,
|
|
190
|
-
optimizer=optimizer,
|
|
191
|
-
conversationally=conversationally,
|
|
192
|
-
)
|
|
193
|
-
)
|
|
194
|
-
|
|
195
|
-
return for_stream() if stream else for_non_stream()
|
|
196
|
-
|
|
197
|
-
def get_message(self, response: dict) -> str:
|
|
198
|
-
"""Retrieves message only from response
|
|
199
|
-
|
|
200
|
-
Args:
|
|
201
|
-
response (dict): Response generated by `self.ask`
|
|
202
|
-
|
|
203
|
-
Returns:
|
|
204
|
-
str: Message extracted
|
|
205
|
-
"""
|
|
206
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
207
|
-
return response.get("text")
|
|
208
|
-
if __name__ == "__main__":
|
|
209
|
-
|
|
210
|
-
from rich import print
|
|
211
|
-
ai = REKA(api_key="YOUR_API_KEY", timeout=5000)
|
|
212
|
-
response = ai.chat("write a poem about AI", stream=True)
|
|
213
|
-
for chunk in response:
|
|
214
|
-
print(chunk, end="", flush=True)
|