webscout 8.2.2__py3-none-any.whl → 8.2.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +112 -22
- webscout/AIbase.py +144 -7
- webscout/AIutel.py +249 -131
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/__init__.py +0 -1
- webscout/cli.py +256 -0
- webscout/conversation.py +307 -436
- webscout/exceptions.py +23 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info}/METADATA +172 -52
- webscout-8.2.7.dist-info/RECORD +26 -0
- {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info}/WHEEL +1 -1
- webscout-8.2.7.dist-info/entry_points.txt +3 -0
- webscout-8.2.7.dist-info/top_level.txt +1 -0
- inferno/__init__.py +0 -6
- inferno/__main__.py +0 -9
- inferno/cli.py +0 -6
- webscout/Extra/GitToolkit/__init__.py +0 -10
- webscout/Extra/GitToolkit/gitapi/__init__.py +0 -12
- webscout/Extra/GitToolkit/gitapi/repository.py +0 -195
- webscout/Extra/GitToolkit/gitapi/user.py +0 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +0 -62
- webscout/Extra/YTToolkit/YTdownloader.py +0 -957
- webscout/Extra/YTToolkit/__init__.py +0 -3
- webscout/Extra/YTToolkit/transcriber.py +0 -476
- webscout/Extra/YTToolkit/ytapi/__init__.py +0 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +0 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +0 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +0 -45
- webscout/Extra/YTToolkit/ytapi/https.py +0 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +0 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +0 -59
- webscout/Extra/YTToolkit/ytapi/pool.py +0 -8
- webscout/Extra/YTToolkit/ytapi/query.py +0 -40
- webscout/Extra/YTToolkit/ytapi/stream.py +0 -63
- webscout/Extra/YTToolkit/ytapi/utils.py +0 -62
- webscout/Extra/YTToolkit/ytapi/video.py +0 -232
- webscout/Extra/__init__.py +0 -7
- webscout/Extra/autocoder/__init__.py +0 -9
- webscout/Extra/autocoder/autocoder.py +0 -849
- webscout/Extra/autocoder/autocoder_utiles.py +0 -332
- webscout/Extra/gguf.py +0 -682
- webscout/Extra/tempmail/__init__.py +0 -28
- webscout/Extra/tempmail/async_utils.py +0 -141
- webscout/Extra/tempmail/base.py +0 -161
- webscout/Extra/tempmail/cli.py +0 -187
- webscout/Extra/tempmail/emailnator.py +0 -84
- webscout/Extra/tempmail/mail_tm.py +0 -361
- webscout/Extra/tempmail/temp_mail_io.py +0 -292
- webscout/Extra/weather.py +0 -194
- webscout/Extra/weather_ascii.py +0 -76
- webscout/LLM.py +0 -442
- webscout/Litlogger/__init__.py +0 -67
- webscout/Litlogger/core/__init__.py +0 -6
- webscout/Litlogger/core/level.py +0 -23
- webscout/Litlogger/core/logger.py +0 -165
- webscout/Litlogger/handlers/__init__.py +0 -12
- webscout/Litlogger/handlers/console.py +0 -33
- webscout/Litlogger/handlers/file.py +0 -143
- webscout/Litlogger/handlers/network.py +0 -173
- webscout/Litlogger/styles/__init__.py +0 -7
- webscout/Litlogger/styles/colors.py +0 -249
- webscout/Litlogger/styles/formats.py +0 -458
- webscout/Litlogger/styles/text.py +0 -87
- webscout/Litlogger/utils/__init__.py +0 -6
- webscout/Litlogger/utils/detectors.py +0 -153
- webscout/Litlogger/utils/formatters.py +0 -200
- webscout/Local/__init__.py +0 -12
- webscout/Local/__main__.py +0 -9
- webscout/Local/api.py +0 -576
- webscout/Local/cli.py +0 -516
- webscout/Local/config.py +0 -75
- webscout/Local/llm.py +0 -287
- webscout/Local/model_manager.py +0 -253
- webscout/Local/server.py +0 -721
- webscout/Local/utils.py +0 -93
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/AISEARCH/DeepFind.py +0 -250
- webscout/Provider/AISEARCH/ISou.py +0 -256
- webscout/Provider/AISEARCH/Perplexity.py +0 -359
- webscout/Provider/AISEARCH/__init__.py +0 -10
- webscout/Provider/AISEARCH/felo_search.py +0 -228
- webscout/Provider/AISEARCH/genspark_search.py +0 -208
- webscout/Provider/AISEARCH/hika_search.py +0 -194
- webscout/Provider/AISEARCH/iask_search.py +0 -436
- webscout/Provider/AISEARCH/monica_search.py +0 -246
- webscout/Provider/AISEARCH/scira_search.py +0 -324
- webscout/Provider/AISEARCH/webpilotai_search.py +0 -281
- webscout/Provider/Aitopia.py +0 -292
- webscout/Provider/AllenAI.py +0 -413
- webscout/Provider/Andi.py +0 -228
- webscout/Provider/Blackboxai.py +0 -229
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTClone.py +0 -226
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/Chatify.py +0 -175
- webscout/Provider/Cloudflare.py +0 -273
- webscout/Provider/Cohere.py +0 -208
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Deepinfra.py +0 -297
- webscout/Provider/ElectronHub.py +0 -709
- webscout/Provider/ExaAI.py +0 -261
- webscout/Provider/ExaChat.py +0 -342
- webscout/Provider/Free2GPT.py +0 -241
- webscout/Provider/GPTWeb.py +0 -193
- webscout/Provider/Gemini.py +0 -169
- webscout/Provider/GithubChat.py +0 -367
- webscout/Provider/Glider.py +0 -211
- webscout/Provider/Groq.py +0 -670
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +0 -206
- webscout/Provider/HeckAI.py +0 -233
- webscout/Provider/HuggingFaceChat.py +0 -462
- webscout/Provider/Hunyuan.py +0 -272
- webscout/Provider/Jadve.py +0 -266
- webscout/Provider/Koboldai.py +0 -381
- webscout/Provider/LambdaChat.py +0 -392
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Llama3.py +0 -204
- webscout/Provider/Marcus.py +0 -148
- webscout/Provider/Netwrck.py +0 -228
- webscout/Provider/OLLAMA.py +0 -396
- webscout/Provider/OPENAI/__init__.py +0 -25
- webscout/Provider/OPENAI/base.py +0 -46
- webscout/Provider/OPENAI/c4ai.py +0 -367
- webscout/Provider/OPENAI/chatgpt.py +0 -549
- webscout/Provider/OPENAI/chatgptclone.py +0 -460
- webscout/Provider/OPENAI/deepinfra.py +0 -272
- webscout/Provider/OPENAI/e2b.py +0 -1350
- webscout/Provider/OPENAI/exaai.py +0 -404
- webscout/Provider/OPENAI/exachat.py +0 -433
- webscout/Provider/OPENAI/freeaichat.py +0 -352
- webscout/Provider/OPENAI/glider.py +0 -316
- webscout/Provider/OPENAI/heckai.py +0 -337
- webscout/Provider/OPENAI/llmchatco.py +0 -327
- webscout/Provider/OPENAI/netwrck.py +0 -348
- webscout/Provider/OPENAI/opkfc.py +0 -488
- webscout/Provider/OPENAI/scirachat.py +0 -463
- webscout/Provider/OPENAI/sonus.py +0 -294
- webscout/Provider/OPENAI/standardinput.py +0 -425
- webscout/Provider/OPENAI/textpollinations.py +0 -285
- webscout/Provider/OPENAI/toolbaz.py +0 -405
- webscout/Provider/OPENAI/typegpt.py +0 -346
- webscout/Provider/OPENAI/uncovrAI.py +0 -455
- webscout/Provider/OPENAI/utils.py +0 -211
- webscout/Provider/OPENAI/venice.py +0 -413
- webscout/Provider/OPENAI/wisecat.py +0 -381
- webscout/Provider/OPENAI/writecream.py +0 -156
- webscout/Provider/OPENAI/x0gpt.py +0 -371
- webscout/Provider/OPENAI/yep.py +0 -327
- webscout/Provider/OpenGPT.py +0 -199
- webscout/Provider/Openai.py +0 -496
- webscout/Provider/PI.py +0 -344
- webscout/Provider/Perplexitylabs.py +0 -415
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/PizzaGPT.py +0 -198
- webscout/Provider/QwenLM.py +0 -254
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/StandardInput.py +0 -278
- webscout/Provider/TTI/AiForce/__init__.py +0 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
- webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
- webscout/Provider/TTI/ImgSys/__init__.py +0 -23
- webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
- webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
- webscout/Provider/TTI/Nexra/__init__.py +0 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
- webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
- webscout/Provider/TTI/__init__.py +0 -12
- webscout/Provider/TTI/aiarta/__init__.py +0 -2
- webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
- webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
- webscout/Provider/TTI/artbit/__init__.py +0 -22
- webscout/Provider/TTI/artbit/async_artbit.py +0 -155
- webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
- webscout/Provider/TTI/fastflux/__init__.py +0 -22
- webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
- webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
- webscout/Provider/TTI/huggingface/__init__.py +0 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
- webscout/Provider/TTI/piclumen/__init__.py +0 -23
- webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
- webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
- webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
- webscout/Provider/TTI/talkai/__init__.py +0 -4
- webscout/Provider/TTI/talkai/async_talkai.py +0 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
- webscout/Provider/TTS/__init__.py +0 -7
- webscout/Provider/TTS/deepgram.py +0 -156
- webscout/Provider/TTS/elevenlabs.py +0 -111
- webscout/Provider/TTS/gesserit.py +0 -127
- webscout/Provider/TTS/murfai.py +0 -113
- webscout/Provider/TTS/parler.py +0 -111
- webscout/Provider/TTS/speechma.py +0 -180
- webscout/Provider/TTS/streamElements.py +0 -333
- webscout/Provider/TTS/utils.py +0 -280
- webscout/Provider/TeachAnything.py +0 -187
- webscout/Provider/TextPollinationsAI.py +0 -231
- webscout/Provider/TwoAI.py +0 -199
- webscout/Provider/Venice.py +0 -219
- webscout/Provider/VercelAI.py +0 -234
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/WiseCat.py +0 -196
- webscout/Provider/Writecream.py +0 -211
- webscout/Provider/WritingMate.py +0 -197
- webscout/Provider/Youchat.py +0 -330
- webscout/Provider/__init__.py +0 -198
- webscout/Provider/ai4chat.py +0 -202
- webscout/Provider/aimathgpt.py +0 -189
- webscout/Provider/akashgpt.py +0 -342
- webscout/Provider/askmyai.py +0 -158
- webscout/Provider/asksteve.py +0 -203
- webscout/Provider/bagoodex.py +0 -145
- webscout/Provider/cerebras.py +0 -242
- webscout/Provider/chatglm.py +0 -205
- webscout/Provider/cleeai.py +0 -213
- webscout/Provider/copilot.py +0 -428
- webscout/Provider/elmo.py +0 -234
- webscout/Provider/freeaichat.py +0 -271
- webscout/Provider/gaurish.py +0 -244
- webscout/Provider/geminiapi.py +0 -208
- webscout/Provider/geminiprorealtime.py +0 -160
- webscout/Provider/granite.py +0 -187
- webscout/Provider/hermes.py +0 -219
- webscout/Provider/julius.py +0 -223
- webscout/Provider/koala.py +0 -268
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/learnfastai.py +0 -266
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llama3mitril.py +0 -180
- webscout/Provider/llamatutor.py +0 -192
- webscout/Provider/llmchat.py +0 -213
- webscout/Provider/llmchatco.py +0 -311
- webscout/Provider/meta.py +0 -794
- webscout/Provider/multichat.py +0 -325
- webscout/Provider/promptrefine.py +0 -193
- webscout/Provider/scira_chat.py +0 -277
- webscout/Provider/scnet.py +0 -187
- webscout/Provider/searchchat.py +0 -293
- webscout/Provider/sonus.py +0 -208
- webscout/Provider/talkai.py +0 -194
- webscout/Provider/toolbaz.py +0 -320
- webscout/Provider/turboseek.py +0 -219
- webscout/Provider/tutorai.py +0 -252
- webscout/Provider/typefully.py +0 -280
- webscout/Provider/typegpt.py +0 -232
- webscout/Provider/uncovr.py +0 -312
- webscout/Provider/x0gpt.py +0 -256
- webscout/Provider/yep.py +0 -376
- webscout/litagent/__init__.py +0 -29
- webscout/litagent/agent.py +0 -455
- webscout/litagent/constants.py +0 -60
- webscout/litprinter/__init__.py +0 -59
- webscout/scout/__init__.py +0 -8
- webscout/scout/core/__init__.py +0 -7
- webscout/scout/core/crawler.py +0 -140
- webscout/scout/core/scout.py +0 -568
- webscout/scout/core/search_result.py +0 -96
- webscout/scout/core/text_analyzer.py +0 -63
- webscout/scout/core/text_utils.py +0 -277
- webscout/scout/core/web_analyzer.py +0 -52
- webscout/scout/core.py +0 -881
- webscout/scout/element.py +0 -460
- webscout/scout/parsers/__init__.py +0 -69
- webscout/scout/parsers/html5lib_parser.py +0 -172
- webscout/scout/parsers/html_parser.py +0 -236
- webscout/scout/parsers/lxml_parser.py +0 -178
- webscout/scout/utils.py +0 -37
- webscout/swiftcli/__init__.py +0 -809
- webscout/zeroart/__init__.py +0 -55
- webscout/zeroart/base.py +0 -60
- webscout/zeroart/effects.py +0 -99
- webscout/zeroart/fonts.py +0 -816
- webscout-8.2.2.dist-info/RECORD +0 -309
- webscout-8.2.2.dist-info/entry_points.txt +0 -5
- webscout-8.2.2.dist-info/top_level.txt +0 -3
- webstoken/__init__.py +0 -30
- webstoken/classifier.py +0 -189
- webstoken/keywords.py +0 -216
- webstoken/language.py +0 -128
- webstoken/ner.py +0 -164
- webstoken/normalizer.py +0 -35
- webstoken/processor.py +0 -77
- webstoken/sentiment.py +0 -206
- webstoken/stemmer.py +0 -73
- webstoken/tagger.py +0 -60
- webstoken/tokenizer.py +0 -158
- {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info/licenses}/LICENSE.md +0 -0
webscout/Provider/Free2GPT.py
DELETED
|
@@ -1,241 +0,0 @@
|
|
|
1
|
-
|
|
2
|
-
#!/usr/bin/env python3
|
|
3
|
-
"""
|
|
4
|
-
A merged API client for Free2GPT that supports both GPT and Claude variants
|
|
5
|
-
in a non-streaming manner. The client sends requests to the appropriate endpoint
|
|
6
|
-
based on the chosen variant and returns the complete response as text.
|
|
7
|
-
|
|
8
|
-
Usage:
|
|
9
|
-
python Free2GPT.py
|
|
10
|
-
|
|
11
|
-
Select the variant by passing the 'variant' parameter in the constructor:
|
|
12
|
-
variant="claude" --> Uses https://claude3.free2gpt.xyz/api/generate
|
|
13
|
-
variant="gpt" --> Uses https://chat1.free2gpt.com/api/generate
|
|
14
|
-
"""
|
|
15
|
-
|
|
16
|
-
from typing import Union, Optional, Dict
|
|
17
|
-
import time
|
|
18
|
-
import json
|
|
19
|
-
import requests
|
|
20
|
-
from hashlib import sha256
|
|
21
|
-
|
|
22
|
-
from webscout.AIutel import Optimizers, Conversation, AwesomePrompts
|
|
23
|
-
from webscout.AIbase import Provider
|
|
24
|
-
from webscout import exceptions
|
|
25
|
-
from webscout.Litlogger import Logger, LogFormat
|
|
26
|
-
from webscout.litagent import LitAgent
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
class Free2GPT(Provider):
|
|
30
|
-
"""
|
|
31
|
-
A class to interact with the Free2GPT API in a non-streaming way.
|
|
32
|
-
Supports both GPT and Claude variants via the 'variant' parameter.
|
|
33
|
-
"""
|
|
34
|
-
|
|
35
|
-
def __init__(
|
|
36
|
-
self,
|
|
37
|
-
is_conversation: bool = True,
|
|
38
|
-
max_tokens: int = 600,
|
|
39
|
-
timeout: int = 30,
|
|
40
|
-
intro: Optional[str] = None,
|
|
41
|
-
filepath: Optional[str] = None,
|
|
42
|
-
update_file: bool = True,
|
|
43
|
-
proxies: dict = {},
|
|
44
|
-
history_offset: int = 10250,
|
|
45
|
-
act: Optional[str] = None,
|
|
46
|
-
system_prompt: str = "You are a helpful AI assistant.",
|
|
47
|
-
variant: str = "claude" # "claude" or "gpt"
|
|
48
|
-
):
|
|
49
|
-
"""
|
|
50
|
-
Initializes the Free2GPT API client.
|
|
51
|
-
|
|
52
|
-
Args:
|
|
53
|
-
is_conversation (bool): Enable conversational mode. Defaults to True.
|
|
54
|
-
max_tokens (int): Maximum tokens to generate. Defaults to 600.
|
|
55
|
-
timeout (int): HTTP request timeout. Defaults to 30.
|
|
56
|
-
intro (str, optional): Introductory prompt for the conversation. Defaults to None.
|
|
57
|
-
filepath (str, optional): Path to conversation history file. Defaults to None.
|
|
58
|
-
update_file (bool): Whether to update the conversation file. Defaults to True.
|
|
59
|
-
proxies (dict): HTTP proxy settings. Defaults to empty dict.
|
|
60
|
-
history_offset (int): Limit for conversation history. Defaults to 10250.
|
|
61
|
-
act (str, optional): Awesome prompt key/index. Defaults to None.
|
|
62
|
-
system_prompt (str): System prompt. Defaults to "You are a helpful AI assistant.".
|
|
63
|
-
variant (str): Select API variant: "claude" or "gpt". Defaults to "claude".
|
|
64
|
-
"""
|
|
65
|
-
self.session = requests.Session()
|
|
66
|
-
self.is_conversation = is_conversation
|
|
67
|
-
self.max_tokens_to_sample = max_tokens
|
|
68
|
-
|
|
69
|
-
# Select API endpoint and header origins based on variant.
|
|
70
|
-
if variant.lower() == "gpt":
|
|
71
|
-
self.api_endpoint = "https://chat1.free2gpt.com/api/generate"
|
|
72
|
-
origin = "https://chat1.free2gpt.co"
|
|
73
|
-
else:
|
|
74
|
-
self.api_endpoint = "https://claude3.free2gpt.xyz/api/generate"
|
|
75
|
-
origin = "https://claude3.free2gpt.xyz"
|
|
76
|
-
|
|
77
|
-
self.timeout = timeout
|
|
78
|
-
self.last_response = {}
|
|
79
|
-
self.system_prompt = system_prompt
|
|
80
|
-
self.headers = {
|
|
81
|
-
"accept": "*/*",
|
|
82
|
-
"accept-encoding": "gzip, deflate, br, zstd",
|
|
83
|
-
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
84
|
-
"content-type": "text/plain;charset=UTF-8",
|
|
85
|
-
"dnt": "1",
|
|
86
|
-
"origin": origin,
|
|
87
|
-
"referer": origin,
|
|
88
|
-
"sec-ch-ua": '"Chromium";v="128", "Not;A=Brand";v="24", "Microsoft Edge";v="128"',
|
|
89
|
-
"sec-ch-ua-mobile": "?0",
|
|
90
|
-
"sec-ch-ua-platform": '"Windows"',
|
|
91
|
-
"sec-fetch-dest": "empty",
|
|
92
|
-
"sec-fetch-mode": "cors",
|
|
93
|
-
"sec-fetch-site": "same-origin",
|
|
94
|
-
"user-agent": LitAgent().random(),
|
|
95
|
-
}
|
|
96
|
-
self.session.headers.update(self.headers)
|
|
97
|
-
self.session.proxies = proxies
|
|
98
|
-
|
|
99
|
-
# Prepare available optimizers from Optimizers module.
|
|
100
|
-
self.__available_optimizers = (
|
|
101
|
-
method
|
|
102
|
-
for method in dir(Optimizers)
|
|
103
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
104
|
-
)
|
|
105
|
-
|
|
106
|
-
Conversation.intro = (
|
|
107
|
-
AwesomePrompts().get_act(
|
|
108
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
109
|
-
)
|
|
110
|
-
if act
|
|
111
|
-
else intro or Conversation.intro
|
|
112
|
-
)
|
|
113
|
-
self.conversation = Conversation(is_conversation, self.max_tokens_to_sample, filepath, update_file)
|
|
114
|
-
self.conversation.history_offset = history_offset
|
|
115
|
-
|
|
116
|
-
def generate_signature(self, time_val: int, text: str, secret: str = "") -> str:
|
|
117
|
-
"""
|
|
118
|
-
Generates a signature for the request.
|
|
119
|
-
|
|
120
|
-
Args:
|
|
121
|
-
time_val (int): Timestamp value.
|
|
122
|
-
text (str): Text to sign.
|
|
123
|
-
secret (str, optional): Optional secret. Defaults to "".
|
|
124
|
-
|
|
125
|
-
Returns:
|
|
126
|
-
str: Hexadecimal signature.
|
|
127
|
-
"""
|
|
128
|
-
message = f"{time_val}:{text}:{secret}"
|
|
129
|
-
return sha256(message.encode()).hexdigest()
|
|
130
|
-
|
|
131
|
-
def ask(
|
|
132
|
-
self,
|
|
133
|
-
prompt: str,
|
|
134
|
-
stream: bool = False, # Ignored; always non-streaming.
|
|
135
|
-
raw: bool = False,
|
|
136
|
-
optimizer: Optional[str] = None,
|
|
137
|
-
conversationally: bool = False,
|
|
138
|
-
) -> Dict[str, any]:
|
|
139
|
-
"""
|
|
140
|
-
Sends a prompt to the API in a non-streaming manner.
|
|
141
|
-
|
|
142
|
-
Args:
|
|
143
|
-
prompt (str): The prompt text.
|
|
144
|
-
stream (bool): Ignored; response is always non-streamed.
|
|
145
|
-
raw (bool): Whether to return the raw response. Defaults to False.
|
|
146
|
-
optimizer (str, optional): Optimizer name. Defaults to None.
|
|
147
|
-
conversationally (bool): Whether to use conversational optimization. Defaults to False.
|
|
148
|
-
|
|
149
|
-
Returns:
|
|
150
|
-
dict: A dictionary containing the generated text.
|
|
151
|
-
Example:
|
|
152
|
-
{
|
|
153
|
-
"text": "How may I assist you today?"
|
|
154
|
-
}
|
|
155
|
-
"""
|
|
156
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
157
|
-
if optimizer:
|
|
158
|
-
if optimizer in self.__available_optimizers:
|
|
159
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
160
|
-
conversation_prompt if conversationally else prompt
|
|
161
|
-
)
|
|
162
|
-
else:
|
|
163
|
-
raise Exception(f"Optimizer is not one of {list(self.__available_optimizers)}")
|
|
164
|
-
|
|
165
|
-
# Generate timestamp and signature.
|
|
166
|
-
timestamp = int(time.time() * 1e3)
|
|
167
|
-
signature = self.generate_signature(timestamp, conversation_prompt)
|
|
168
|
-
|
|
169
|
-
payload = {
|
|
170
|
-
"messages": [
|
|
171
|
-
{"role": "system", "content": self.system_prompt},
|
|
172
|
-
{"role": "user", "content": conversation_prompt},
|
|
173
|
-
],
|
|
174
|
-
"time": timestamp,
|
|
175
|
-
"pass": None,
|
|
176
|
-
"sign": signature,
|
|
177
|
-
}
|
|
178
|
-
|
|
179
|
-
try:
|
|
180
|
-
response = requests.post(
|
|
181
|
-
self.api_endpoint,
|
|
182
|
-
headers=self.headers,
|
|
183
|
-
data=json.dumps(payload),
|
|
184
|
-
timeout=self.timeout
|
|
185
|
-
)
|
|
186
|
-
response.raise_for_status()
|
|
187
|
-
full_response = response.text
|
|
188
|
-
self.last_response.update(dict(text=full_response))
|
|
189
|
-
self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
|
|
190
|
-
return self.last_response
|
|
191
|
-
except requests.exceptions.RequestException as e:
|
|
192
|
-
raise exceptions.FailedToGenerateResponseError(f"An error occurred: {e}")
|
|
193
|
-
|
|
194
|
-
def chat(
|
|
195
|
-
self,
|
|
196
|
-
prompt: str,
|
|
197
|
-
stream: bool = False, # Ignored; always non-streaming.
|
|
198
|
-
optimizer: Optional[str] = None,
|
|
199
|
-
conversationally: bool = False,
|
|
200
|
-
) -> str:
|
|
201
|
-
"""
|
|
202
|
-
Sends a prompt and returns the generated response as a string.
|
|
203
|
-
|
|
204
|
-
Args:
|
|
205
|
-
prompt (str): The prompt to send.
|
|
206
|
-
stream (bool): Ignored; response is always non-streamed.
|
|
207
|
-
optimizer (str, optional): Optimizer name. Defaults to None.
|
|
208
|
-
conversationally (bool): Whether to use conversational optimization. Defaults to False.
|
|
209
|
-
|
|
210
|
-
Returns:
|
|
211
|
-
str: Generated response.
|
|
212
|
-
"""
|
|
213
|
-
response = self.ask(
|
|
214
|
-
prompt,
|
|
215
|
-
stream=False,
|
|
216
|
-
optimizer=optimizer,
|
|
217
|
-
conversationally=conversationally,
|
|
218
|
-
)
|
|
219
|
-
return self.get_message(response)
|
|
220
|
-
|
|
221
|
-
def get_message(self, response: Dict[str, any]) -> str:
|
|
222
|
-
"""
|
|
223
|
-
Extracts the message text from the API response.
|
|
224
|
-
|
|
225
|
-
Args:
|
|
226
|
-
response (dict): The API response.
|
|
227
|
-
|
|
228
|
-
Returns:
|
|
229
|
-
str: Extracted message text.
|
|
230
|
-
"""
|
|
231
|
-
assert isinstance(response, dict), "Response should be a dictionary"
|
|
232
|
-
return response["text"].replace('\\n', '\n').replace('\\n\\n', '\n\n')
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
if __name__ == "__main__":
|
|
236
|
-
from rich import print
|
|
237
|
-
prompt_input = input(">>> ")
|
|
238
|
-
# Choose variant: "claude" or "gpt"
|
|
239
|
-
client = Free2GPT(variant="gpt")
|
|
240
|
-
result = client.chat(prompt_input)
|
|
241
|
-
print(result)
|
webscout/Provider/GPTWeb.py
DELETED
|
@@ -1,193 +0,0 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
import json
|
|
3
|
-
|
|
4
|
-
from webscout.AIutel import Optimizers
|
|
5
|
-
from webscout.AIutel import Conversation
|
|
6
|
-
from webscout.AIutel import AwesomePrompts
|
|
7
|
-
from webscout.AIbase import Provider
|
|
8
|
-
|
|
9
|
-
class GPTWeb(Provider):
|
|
10
|
-
"""
|
|
11
|
-
A class to interact with the Nexra GPTWeb API.
|
|
12
|
-
"""
|
|
13
|
-
|
|
14
|
-
def __init__(
|
|
15
|
-
self,
|
|
16
|
-
is_conversation: bool = True,
|
|
17
|
-
max_tokens: int = 600,
|
|
18
|
-
timeout: int = 30,
|
|
19
|
-
intro: str = None,
|
|
20
|
-
filepath: str = None,
|
|
21
|
-
update_file: bool = True,
|
|
22
|
-
proxies: dict = {},
|
|
23
|
-
history_offset: int = 10250,
|
|
24
|
-
act: str = None,
|
|
25
|
-
|
|
26
|
-
):
|
|
27
|
-
"""
|
|
28
|
-
Initializes the Nexra GPTWeb API with given parameters.
|
|
29
|
-
|
|
30
|
-
Args:
|
|
31
|
-
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
32
|
-
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
33
|
-
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
34
|
-
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
35
|
-
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
36
|
-
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
37
|
-
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
38
|
-
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
39
|
-
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
40
|
-
system_prompt (str, optional): System prompt for GPTWeb. Defaults to "You are a helpful AI assistant.".
|
|
41
|
-
"""
|
|
42
|
-
self.session = requests.Session()
|
|
43
|
-
self.is_conversation = is_conversation
|
|
44
|
-
self.max_tokens_to_sample = max_tokens
|
|
45
|
-
self.api_endpoint = 'https://nexra.aryahcr.cc/api/chat/gptweb'
|
|
46
|
-
self.stream_chunk_size = 64
|
|
47
|
-
self.timeout = timeout
|
|
48
|
-
self.last_response = {}
|
|
49
|
-
self.headers = {
|
|
50
|
-
"Content-Type": "application/json"
|
|
51
|
-
}
|
|
52
|
-
|
|
53
|
-
self.__available_optimizers = (
|
|
54
|
-
method
|
|
55
|
-
for method in dir(Optimizers)
|
|
56
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
57
|
-
)
|
|
58
|
-
self.session.headers.update(self.headers)
|
|
59
|
-
Conversation.intro = (
|
|
60
|
-
AwesomePrompts().get_act(
|
|
61
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
62
|
-
)
|
|
63
|
-
if act
|
|
64
|
-
else intro or Conversation.intro
|
|
65
|
-
)
|
|
66
|
-
self.conversation = Conversation(
|
|
67
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
68
|
-
)
|
|
69
|
-
self.conversation.history_offset = history_offset
|
|
70
|
-
self.session.proxies = proxies
|
|
71
|
-
|
|
72
|
-
def ask(
|
|
73
|
-
self,
|
|
74
|
-
prompt: str,
|
|
75
|
-
stream: bool = False,
|
|
76
|
-
raw: bool = False,
|
|
77
|
-
optimizer: str = None,
|
|
78
|
-
conversationally: bool = False,
|
|
79
|
-
) -> dict:
|
|
80
|
-
"""Chat with GPTWeb
|
|
81
|
-
|
|
82
|
-
Args:
|
|
83
|
-
prompt (str): Prompt to be send.
|
|
84
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
85
|
-
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
86
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
87
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
88
|
-
Returns:
|
|
89
|
-
dict : {}
|
|
90
|
-
```json
|
|
91
|
-
{
|
|
92
|
-
"text" : "How may I assist you today?"
|
|
93
|
-
}
|
|
94
|
-
```
|
|
95
|
-
"""
|
|
96
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
97
|
-
if optimizer:
|
|
98
|
-
if optimizer in self.__available_optimizers:
|
|
99
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
100
|
-
conversation_prompt if conversationally else prompt
|
|
101
|
-
)
|
|
102
|
-
else:
|
|
103
|
-
raise Exception(
|
|
104
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
105
|
-
)
|
|
106
|
-
|
|
107
|
-
data = {
|
|
108
|
-
"prompt": conversation_prompt,
|
|
109
|
-
"markdown": False
|
|
110
|
-
}
|
|
111
|
-
|
|
112
|
-
def for_stream():
|
|
113
|
-
response = self.session.post(self.api_endpoint, headers=self.headers, data=json.dumps(data), stream=True, timeout=self.timeout)
|
|
114
|
-
if not response.ok:
|
|
115
|
-
raise Exception(
|
|
116
|
-
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
117
|
-
)
|
|
118
|
-
|
|
119
|
-
full_response = ''
|
|
120
|
-
for line in response.iter_lines(decode_unicode=True):
|
|
121
|
-
if line:
|
|
122
|
-
line = line.lstrip('_') # Remove "_"
|
|
123
|
-
try:
|
|
124
|
-
# Attempt to parse the entire line as JSON
|
|
125
|
-
json_data = json.loads(line)
|
|
126
|
-
full_response = json_data.get("gpt", "")
|
|
127
|
-
yield full_response if raw else dict(text=full_response)
|
|
128
|
-
except json.JSONDecodeError:
|
|
129
|
-
print(f"Skipping invalid JSON line: {line}")
|
|
130
|
-
self.last_response.update(dict(text=full_response))
|
|
131
|
-
self.conversation.update_chat_history(
|
|
132
|
-
prompt, self.get_message(self.last_response)
|
|
133
|
-
)
|
|
134
|
-
def for_non_stream():
|
|
135
|
-
for _ in for_stream():
|
|
136
|
-
pass
|
|
137
|
-
return self.last_response
|
|
138
|
-
|
|
139
|
-
return for_stream() if stream else for_non_stream()
|
|
140
|
-
|
|
141
|
-
def chat(
|
|
142
|
-
self,
|
|
143
|
-
prompt: str,
|
|
144
|
-
stream: bool = False,
|
|
145
|
-
optimizer: str = None,
|
|
146
|
-
conversationally: bool = False,
|
|
147
|
-
) -> str:
|
|
148
|
-
"""Generate response `str`
|
|
149
|
-
Args:
|
|
150
|
-
prompt (str): Prompt to be send.
|
|
151
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
152
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
153
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
154
|
-
Returns:
|
|
155
|
-
str: Response generated
|
|
156
|
-
"""
|
|
157
|
-
|
|
158
|
-
def for_stream():
|
|
159
|
-
for response in self.ask(
|
|
160
|
-
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
161
|
-
):
|
|
162
|
-
yield self.get_message(response)
|
|
163
|
-
|
|
164
|
-
def for_non_stream():
|
|
165
|
-
return self.get_message(
|
|
166
|
-
self.ask(
|
|
167
|
-
prompt,
|
|
168
|
-
False,
|
|
169
|
-
optimizer=optimizer,
|
|
170
|
-
conversationally=conversationally,
|
|
171
|
-
)
|
|
172
|
-
)
|
|
173
|
-
|
|
174
|
-
return for_stream() if stream else for_non_stream()
|
|
175
|
-
|
|
176
|
-
def get_message(self, response: dict) -> str:
|
|
177
|
-
"""Retrieves message only from response
|
|
178
|
-
|
|
179
|
-
Args:
|
|
180
|
-
response (dict): Response generated by `self.ask`
|
|
181
|
-
|
|
182
|
-
Returns:
|
|
183
|
-
str: Message extracted
|
|
184
|
-
"""
|
|
185
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
186
|
-
return response["text"]
|
|
187
|
-
|
|
188
|
-
if __name__ == '__main__':
|
|
189
|
-
from rich import print
|
|
190
|
-
ai = GPTWeb()
|
|
191
|
-
response = ai.chat("tell me about Abhay koul, HelpingAI", stream=True)
|
|
192
|
-
for chunk in response:
|
|
193
|
-
print(chunk, end='', flush=True)
|
webscout/Provider/Gemini.py
DELETED
|
@@ -1,169 +0,0 @@
|
|
|
1
|
-
from os import path
|
|
2
|
-
from json import load, dumps
|
|
3
|
-
import warnings
|
|
4
|
-
from typing import Union, Any, Dict
|
|
5
|
-
|
|
6
|
-
# Import internal modules and dependencies
|
|
7
|
-
from ..AIutel import Optimizers, Conversation, AwesomePrompts, sanitize_stream
|
|
8
|
-
from ..AIbase import Provider, AsyncProvider
|
|
9
|
-
from ..Bard import Chatbot, Model
|
|
10
|
-
|
|
11
|
-
warnings.simplefilter("ignore", category=UserWarning)
|
|
12
|
-
|
|
13
|
-
# Define model aliases for easy usage
|
|
14
|
-
MODEL_ALIASES: Dict[str, Model] = {
|
|
15
|
-
"unspecified": Model.UNSPECIFIED,
|
|
16
|
-
"gemini-2.0-flash": Model.G_2_0_FLASH,
|
|
17
|
-
"gemini-2.0-flash-thinking": Model.G_2_0_FLASH_THINKING,
|
|
18
|
-
"gemini-2.5-pro": Model.G_2_5_PRO,
|
|
19
|
-
"gemini-2.0-exp-advanced": Model.G_2_0_EXP_ADVANCED,
|
|
20
|
-
"gemini-2.5-exp-advanced": Model.G_2_5_EXP_ADVANCED,
|
|
21
|
-
"gemini-2.5-flash": Model.G_2_5_FLASH,
|
|
22
|
-
# Add shorter aliases for convenience
|
|
23
|
-
"flash": Model.G_2_0_FLASH,
|
|
24
|
-
"flash-2.5": Model.G_2_5_FLASH,
|
|
25
|
-
"thinking": Model.G_2_0_FLASH_THINKING,
|
|
26
|
-
"pro": Model.G_2_5_PRO,
|
|
27
|
-
"advanced": Model.G_2_0_EXP_ADVANCED,
|
|
28
|
-
"advanced-2.5": Model.G_2_5_EXP_ADVANCED,
|
|
29
|
-
}
|
|
30
|
-
|
|
31
|
-
# List of available models (friendly names)
|
|
32
|
-
AVAILABLE_MODELS = list(MODEL_ALIASES.keys())
|
|
33
|
-
|
|
34
|
-
class GEMINI(Provider):
|
|
35
|
-
def __init__(
|
|
36
|
-
self,
|
|
37
|
-
cookie_file: str,
|
|
38
|
-
model: str = "flash", # Accepts either a Model enum or a str alias.
|
|
39
|
-
proxy: dict = {},
|
|
40
|
-
timeout: int = 30,
|
|
41
|
-
):
|
|
42
|
-
"""
|
|
43
|
-
Initializes GEMINI with model support.
|
|
44
|
-
|
|
45
|
-
Args:
|
|
46
|
-
cookie_file (str): Path to the cookies JSON file.
|
|
47
|
-
model (Model or str): Selected model for the session. Can be a Model enum
|
|
48
|
-
or a string alias. Available aliases: flash, flash-exp, thinking, thinking-with-apps,
|
|
49
|
-
exp-advanced, 2.5-exp-advanced, 2.5-pro, 1.5-flash, 1.5-pro, 1.5-pro-research.
|
|
50
|
-
proxy (dict, optional): HTTP request proxy. Defaults to {}.
|
|
51
|
-
timeout (int, optional): HTTP request timeout in seconds. Defaults to 30.
|
|
52
|
-
"""
|
|
53
|
-
self.conversation = Conversation(False)
|
|
54
|
-
|
|
55
|
-
# Ensure cookie_file existence.
|
|
56
|
-
if not isinstance(cookie_file, str):
|
|
57
|
-
raise TypeError(f"cookie_file should be of type str, not '{type(cookie_file)}'")
|
|
58
|
-
if not path.isfile(cookie_file):
|
|
59
|
-
raise Exception(f"{cookie_file} is not a valid file path")
|
|
60
|
-
|
|
61
|
-
# If model is provided as alias (str), convert to Model enum.
|
|
62
|
-
if isinstance(model, str):
|
|
63
|
-
alias = model.lower()
|
|
64
|
-
if alias in MODEL_ALIASES:
|
|
65
|
-
selected_model = MODEL_ALIASES[alias]
|
|
66
|
-
else:
|
|
67
|
-
raise Exception(f"Unknown model alias: '{model}'. Available aliases: {', '.join(AVAILABLE_MODELS)}")
|
|
68
|
-
elif isinstance(model, Model):
|
|
69
|
-
selected_model = model
|
|
70
|
-
else:
|
|
71
|
-
raise TypeError("model must be a string alias or an instance of Model")
|
|
72
|
-
|
|
73
|
-
# Initialize the Chatbot session using the cookie file.
|
|
74
|
-
self.session = Chatbot(cookie_file, proxy, timeout, selected_model)
|
|
75
|
-
self.last_response = {}
|
|
76
|
-
self.__available_optimizers = (
|
|
77
|
-
method for method in dir(Optimizers) if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
78
|
-
)
|
|
79
|
-
# Store cookies from Chatbot for later use (e.g. image generation)
|
|
80
|
-
self.session_auth1 = self.session.secure_1psid
|
|
81
|
-
self.session_auth2 = self.session.secure_1psidts
|
|
82
|
-
|
|
83
|
-
def ask(
|
|
84
|
-
self,
|
|
85
|
-
prompt: str,
|
|
86
|
-
stream: bool = False,
|
|
87
|
-
raw: bool = False,
|
|
88
|
-
optimizer: str = None,
|
|
89
|
-
conversationally: bool = False,
|
|
90
|
-
) -> dict:
|
|
91
|
-
"""Chat with AI.
|
|
92
|
-
|
|
93
|
-
Args:
|
|
94
|
-
prompt (str): Prompt to be sent.
|
|
95
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
96
|
-
raw (bool, optional): Stream back raw response as received. Defaults to False.
|
|
97
|
-
optimizer (str, optional): Prompt optimizer name (e.g., 'code', 'shell_command'). Defaults to None.
|
|
98
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
99
|
-
|
|
100
|
-
Returns:
|
|
101
|
-
dict: Response generated by the underlying Chatbot.
|
|
102
|
-
"""
|
|
103
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
104
|
-
if optimizer:
|
|
105
|
-
if optimizer in self.__available_optimizers:
|
|
106
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
107
|
-
conversation_prompt if conversationally else prompt
|
|
108
|
-
)
|
|
109
|
-
else:
|
|
110
|
-
raise Exception(f"Optimizer is not one of {', '.join(self.__available_optimizers)}")
|
|
111
|
-
|
|
112
|
-
def for_stream():
|
|
113
|
-
response = self.session.ask(prompt)
|
|
114
|
-
self.last_response.update(response)
|
|
115
|
-
self.conversation.update_chat_history(prompt, self.get_message(self.last_response))
|
|
116
|
-
yield dumps(response) if raw else response
|
|
117
|
-
|
|
118
|
-
def for_non_stream():
|
|
119
|
-
for _ in for_stream():
|
|
120
|
-
pass
|
|
121
|
-
return self.last_response
|
|
122
|
-
|
|
123
|
-
return for_stream() if stream else for_non_stream()
|
|
124
|
-
|
|
125
|
-
def chat(
|
|
126
|
-
self,
|
|
127
|
-
prompt: str,
|
|
128
|
-
stream: bool = False,
|
|
129
|
-
optimizer: str = None,
|
|
130
|
-
conversationally: bool = False,
|
|
131
|
-
) -> str:
|
|
132
|
-
"""Generate response text.
|
|
133
|
-
|
|
134
|
-
Args:
|
|
135
|
-
prompt (str): Prompt to be sent.
|
|
136
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
137
|
-
optimizer (str, optional): Prompt optimizer name. Defaults to None.
|
|
138
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
139
|
-
|
|
140
|
-
Returns:
|
|
141
|
-
str: Response generated.
|
|
142
|
-
"""
|
|
143
|
-
def for_stream():
|
|
144
|
-
for response in self.ask(prompt, True, optimizer=optimizer, conversationally=conversationally):
|
|
145
|
-
yield self.get_message(response)
|
|
146
|
-
|
|
147
|
-
def for_non_stream():
|
|
148
|
-
return self.get_message(self.ask(prompt, False, optimizer=optimizer, conversationally=conversationally))
|
|
149
|
-
|
|
150
|
-
return for_stream() if stream else for_non_stream()
|
|
151
|
-
|
|
152
|
-
def get_message(self, response: dict) -> str:
|
|
153
|
-
"""Retrieves message content from the response.
|
|
154
|
-
|
|
155
|
-
Args:
|
|
156
|
-
response (dict): Response generated by `self.ask`.
|
|
157
|
-
|
|
158
|
-
Returns:
|
|
159
|
-
str: Extracted message content.
|
|
160
|
-
"""
|
|
161
|
-
if not isinstance(response, dict):
|
|
162
|
-
raise TypeError("Response should be of type dict")
|
|
163
|
-
return response["content"]
|
|
164
|
-
|
|
165
|
-
def reset(self):
|
|
166
|
-
"""Reset the current conversation."""
|
|
167
|
-
self.session.async_chatbot.conversation_id = ""
|
|
168
|
-
self.session.async_chatbot.response_id = ""
|
|
169
|
-
self.session.async_chatbot.choice_id = ""
|