webscout 8.2.2__py3-none-any.whl → 8.2.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +112 -22
- webscout/AIbase.py +144 -7
- webscout/AIutel.py +249 -131
- webscout/Bard.py +579 -206
- webscout/DWEBS.py +78 -35
- webscout/__init__.py +0 -1
- webscout/cli.py +256 -0
- webscout/conversation.py +307 -436
- webscout/exceptions.py +23 -0
- webscout/prompt_manager.py +56 -42
- webscout/version.py +1 -1
- webscout/webscout_search.py +65 -47
- webscout/webscout_search_async.py +81 -126
- webscout/yep_search.py +93 -43
- {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info}/METADATA +172 -52
- webscout-8.2.7.dist-info/RECORD +26 -0
- {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info}/WHEEL +1 -1
- webscout-8.2.7.dist-info/entry_points.txt +3 -0
- webscout-8.2.7.dist-info/top_level.txt +1 -0
- inferno/__init__.py +0 -6
- inferno/__main__.py +0 -9
- inferno/cli.py +0 -6
- webscout/Extra/GitToolkit/__init__.py +0 -10
- webscout/Extra/GitToolkit/gitapi/__init__.py +0 -12
- webscout/Extra/GitToolkit/gitapi/repository.py +0 -195
- webscout/Extra/GitToolkit/gitapi/user.py +0 -96
- webscout/Extra/GitToolkit/gitapi/utils.py +0 -62
- webscout/Extra/YTToolkit/YTdownloader.py +0 -957
- webscout/Extra/YTToolkit/__init__.py +0 -3
- webscout/Extra/YTToolkit/transcriber.py +0 -476
- webscout/Extra/YTToolkit/ytapi/__init__.py +0 -6
- webscout/Extra/YTToolkit/ytapi/channel.py +0 -307
- webscout/Extra/YTToolkit/ytapi/errors.py +0 -13
- webscout/Extra/YTToolkit/ytapi/extras.py +0 -45
- webscout/Extra/YTToolkit/ytapi/https.py +0 -88
- webscout/Extra/YTToolkit/ytapi/patterns.py +0 -61
- webscout/Extra/YTToolkit/ytapi/playlist.py +0 -59
- webscout/Extra/YTToolkit/ytapi/pool.py +0 -8
- webscout/Extra/YTToolkit/ytapi/query.py +0 -40
- webscout/Extra/YTToolkit/ytapi/stream.py +0 -63
- webscout/Extra/YTToolkit/ytapi/utils.py +0 -62
- webscout/Extra/YTToolkit/ytapi/video.py +0 -232
- webscout/Extra/__init__.py +0 -7
- webscout/Extra/autocoder/__init__.py +0 -9
- webscout/Extra/autocoder/autocoder.py +0 -849
- webscout/Extra/autocoder/autocoder_utiles.py +0 -332
- webscout/Extra/gguf.py +0 -682
- webscout/Extra/tempmail/__init__.py +0 -28
- webscout/Extra/tempmail/async_utils.py +0 -141
- webscout/Extra/tempmail/base.py +0 -161
- webscout/Extra/tempmail/cli.py +0 -187
- webscout/Extra/tempmail/emailnator.py +0 -84
- webscout/Extra/tempmail/mail_tm.py +0 -361
- webscout/Extra/tempmail/temp_mail_io.py +0 -292
- webscout/Extra/weather.py +0 -194
- webscout/Extra/weather_ascii.py +0 -76
- webscout/LLM.py +0 -442
- webscout/Litlogger/__init__.py +0 -67
- webscout/Litlogger/core/__init__.py +0 -6
- webscout/Litlogger/core/level.py +0 -23
- webscout/Litlogger/core/logger.py +0 -165
- webscout/Litlogger/handlers/__init__.py +0 -12
- webscout/Litlogger/handlers/console.py +0 -33
- webscout/Litlogger/handlers/file.py +0 -143
- webscout/Litlogger/handlers/network.py +0 -173
- webscout/Litlogger/styles/__init__.py +0 -7
- webscout/Litlogger/styles/colors.py +0 -249
- webscout/Litlogger/styles/formats.py +0 -458
- webscout/Litlogger/styles/text.py +0 -87
- webscout/Litlogger/utils/__init__.py +0 -6
- webscout/Litlogger/utils/detectors.py +0 -153
- webscout/Litlogger/utils/formatters.py +0 -200
- webscout/Local/__init__.py +0 -12
- webscout/Local/__main__.py +0 -9
- webscout/Local/api.py +0 -576
- webscout/Local/cli.py +0 -516
- webscout/Local/config.py +0 -75
- webscout/Local/llm.py +0 -287
- webscout/Local/model_manager.py +0 -253
- webscout/Local/server.py +0 -721
- webscout/Local/utils.py +0 -93
- webscout/Provider/AI21.py +0 -177
- webscout/Provider/AISEARCH/DeepFind.py +0 -250
- webscout/Provider/AISEARCH/ISou.py +0 -256
- webscout/Provider/AISEARCH/Perplexity.py +0 -359
- webscout/Provider/AISEARCH/__init__.py +0 -10
- webscout/Provider/AISEARCH/felo_search.py +0 -228
- webscout/Provider/AISEARCH/genspark_search.py +0 -208
- webscout/Provider/AISEARCH/hika_search.py +0 -194
- webscout/Provider/AISEARCH/iask_search.py +0 -436
- webscout/Provider/AISEARCH/monica_search.py +0 -246
- webscout/Provider/AISEARCH/scira_search.py +0 -324
- webscout/Provider/AISEARCH/webpilotai_search.py +0 -281
- webscout/Provider/Aitopia.py +0 -292
- webscout/Provider/AllenAI.py +0 -413
- webscout/Provider/Andi.py +0 -228
- webscout/Provider/Blackboxai.py +0 -229
- webscout/Provider/C4ai.py +0 -432
- webscout/Provider/ChatGPTClone.py +0 -226
- webscout/Provider/ChatGPTES.py +0 -237
- webscout/Provider/ChatGPTGratis.py +0 -194
- webscout/Provider/Chatify.py +0 -175
- webscout/Provider/Cloudflare.py +0 -273
- webscout/Provider/Cohere.py +0 -208
- webscout/Provider/DeepSeek.py +0 -196
- webscout/Provider/Deepinfra.py +0 -297
- webscout/Provider/ElectronHub.py +0 -709
- webscout/Provider/ExaAI.py +0 -261
- webscout/Provider/ExaChat.py +0 -342
- webscout/Provider/Free2GPT.py +0 -241
- webscout/Provider/GPTWeb.py +0 -193
- webscout/Provider/Gemini.py +0 -169
- webscout/Provider/GithubChat.py +0 -367
- webscout/Provider/Glider.py +0 -211
- webscout/Provider/Groq.py +0 -670
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +0 -206
- webscout/Provider/HeckAI.py +0 -233
- webscout/Provider/HuggingFaceChat.py +0 -462
- webscout/Provider/Hunyuan.py +0 -272
- webscout/Provider/Jadve.py +0 -266
- webscout/Provider/Koboldai.py +0 -381
- webscout/Provider/LambdaChat.py +0 -392
- webscout/Provider/Llama.py +0 -200
- webscout/Provider/Llama3.py +0 -204
- webscout/Provider/Marcus.py +0 -148
- webscout/Provider/Netwrck.py +0 -228
- webscout/Provider/OLLAMA.py +0 -396
- webscout/Provider/OPENAI/__init__.py +0 -25
- webscout/Provider/OPENAI/base.py +0 -46
- webscout/Provider/OPENAI/c4ai.py +0 -367
- webscout/Provider/OPENAI/chatgpt.py +0 -549
- webscout/Provider/OPENAI/chatgptclone.py +0 -460
- webscout/Provider/OPENAI/deepinfra.py +0 -272
- webscout/Provider/OPENAI/e2b.py +0 -1350
- webscout/Provider/OPENAI/exaai.py +0 -404
- webscout/Provider/OPENAI/exachat.py +0 -433
- webscout/Provider/OPENAI/freeaichat.py +0 -352
- webscout/Provider/OPENAI/glider.py +0 -316
- webscout/Provider/OPENAI/heckai.py +0 -337
- webscout/Provider/OPENAI/llmchatco.py +0 -327
- webscout/Provider/OPENAI/netwrck.py +0 -348
- webscout/Provider/OPENAI/opkfc.py +0 -488
- webscout/Provider/OPENAI/scirachat.py +0 -463
- webscout/Provider/OPENAI/sonus.py +0 -294
- webscout/Provider/OPENAI/standardinput.py +0 -425
- webscout/Provider/OPENAI/textpollinations.py +0 -285
- webscout/Provider/OPENAI/toolbaz.py +0 -405
- webscout/Provider/OPENAI/typegpt.py +0 -346
- webscout/Provider/OPENAI/uncovrAI.py +0 -455
- webscout/Provider/OPENAI/utils.py +0 -211
- webscout/Provider/OPENAI/venice.py +0 -413
- webscout/Provider/OPENAI/wisecat.py +0 -381
- webscout/Provider/OPENAI/writecream.py +0 -156
- webscout/Provider/OPENAI/x0gpt.py +0 -371
- webscout/Provider/OPENAI/yep.py +0 -327
- webscout/Provider/OpenGPT.py +0 -199
- webscout/Provider/Openai.py +0 -496
- webscout/Provider/PI.py +0 -344
- webscout/Provider/Perplexitylabs.py +0 -415
- webscout/Provider/Phind.py +0 -535
- webscout/Provider/PizzaGPT.py +0 -198
- webscout/Provider/QwenLM.py +0 -254
- webscout/Provider/Reka.py +0 -214
- webscout/Provider/StandardInput.py +0 -278
- webscout/Provider/TTI/AiForce/__init__.py +0 -22
- webscout/Provider/TTI/AiForce/async_aiforce.py +0 -224
- webscout/Provider/TTI/AiForce/sync_aiforce.py +0 -245
- webscout/Provider/TTI/FreeAIPlayground/__init__.py +0 -9
- webscout/Provider/TTI/FreeAIPlayground/async_freeaiplayground.py +0 -181
- webscout/Provider/TTI/FreeAIPlayground/sync_freeaiplayground.py +0 -180
- webscout/Provider/TTI/ImgSys/__init__.py +0 -23
- webscout/Provider/TTI/ImgSys/async_imgsys.py +0 -202
- webscout/Provider/TTI/ImgSys/sync_imgsys.py +0 -195
- webscout/Provider/TTI/MagicStudio/__init__.py +0 -2
- webscout/Provider/TTI/MagicStudio/async_magicstudio.py +0 -111
- webscout/Provider/TTI/MagicStudio/sync_magicstudio.py +0 -109
- webscout/Provider/TTI/Nexra/__init__.py +0 -22
- webscout/Provider/TTI/Nexra/async_nexra.py +0 -286
- webscout/Provider/TTI/Nexra/sync_nexra.py +0 -258
- webscout/Provider/TTI/PollinationsAI/__init__.py +0 -23
- webscout/Provider/TTI/PollinationsAI/async_pollinations.py +0 -311
- webscout/Provider/TTI/PollinationsAI/sync_pollinations.py +0 -265
- webscout/Provider/TTI/__init__.py +0 -12
- webscout/Provider/TTI/aiarta/__init__.py +0 -2
- webscout/Provider/TTI/aiarta/async_aiarta.py +0 -482
- webscout/Provider/TTI/aiarta/sync_aiarta.py +0 -440
- webscout/Provider/TTI/artbit/__init__.py +0 -22
- webscout/Provider/TTI/artbit/async_artbit.py +0 -155
- webscout/Provider/TTI/artbit/sync_artbit.py +0 -148
- webscout/Provider/TTI/fastflux/__init__.py +0 -22
- webscout/Provider/TTI/fastflux/async_fastflux.py +0 -261
- webscout/Provider/TTI/fastflux/sync_fastflux.py +0 -252
- webscout/Provider/TTI/huggingface/__init__.py +0 -22
- webscout/Provider/TTI/huggingface/async_huggingface.py +0 -199
- webscout/Provider/TTI/huggingface/sync_huggingface.py +0 -195
- webscout/Provider/TTI/piclumen/__init__.py +0 -23
- webscout/Provider/TTI/piclumen/async_piclumen.py +0 -268
- webscout/Provider/TTI/piclumen/sync_piclumen.py +0 -233
- webscout/Provider/TTI/pixelmuse/__init__.py +0 -4
- webscout/Provider/TTI/pixelmuse/async_pixelmuse.py +0 -249
- webscout/Provider/TTI/pixelmuse/sync_pixelmuse.py +0 -182
- webscout/Provider/TTI/talkai/__init__.py +0 -4
- webscout/Provider/TTI/talkai/async_talkai.py +0 -229
- webscout/Provider/TTI/talkai/sync_talkai.py +0 -207
- webscout/Provider/TTS/__init__.py +0 -7
- webscout/Provider/TTS/deepgram.py +0 -156
- webscout/Provider/TTS/elevenlabs.py +0 -111
- webscout/Provider/TTS/gesserit.py +0 -127
- webscout/Provider/TTS/murfai.py +0 -113
- webscout/Provider/TTS/parler.py +0 -111
- webscout/Provider/TTS/speechma.py +0 -180
- webscout/Provider/TTS/streamElements.py +0 -333
- webscout/Provider/TTS/utils.py +0 -280
- webscout/Provider/TeachAnything.py +0 -187
- webscout/Provider/TextPollinationsAI.py +0 -231
- webscout/Provider/TwoAI.py +0 -199
- webscout/Provider/Venice.py +0 -219
- webscout/Provider/VercelAI.py +0 -234
- webscout/Provider/WebSim.py +0 -228
- webscout/Provider/WiseCat.py +0 -196
- webscout/Provider/Writecream.py +0 -211
- webscout/Provider/WritingMate.py +0 -197
- webscout/Provider/Youchat.py +0 -330
- webscout/Provider/__init__.py +0 -198
- webscout/Provider/ai4chat.py +0 -202
- webscout/Provider/aimathgpt.py +0 -189
- webscout/Provider/akashgpt.py +0 -342
- webscout/Provider/askmyai.py +0 -158
- webscout/Provider/asksteve.py +0 -203
- webscout/Provider/bagoodex.py +0 -145
- webscout/Provider/cerebras.py +0 -242
- webscout/Provider/chatglm.py +0 -205
- webscout/Provider/cleeai.py +0 -213
- webscout/Provider/copilot.py +0 -428
- webscout/Provider/elmo.py +0 -234
- webscout/Provider/freeaichat.py +0 -271
- webscout/Provider/gaurish.py +0 -244
- webscout/Provider/geminiapi.py +0 -208
- webscout/Provider/geminiprorealtime.py +0 -160
- webscout/Provider/granite.py +0 -187
- webscout/Provider/hermes.py +0 -219
- webscout/Provider/julius.py +0 -223
- webscout/Provider/koala.py +0 -268
- webscout/Provider/labyrinth.py +0 -340
- webscout/Provider/learnfastai.py +0 -266
- webscout/Provider/lepton.py +0 -194
- webscout/Provider/llama3mitril.py +0 -180
- webscout/Provider/llamatutor.py +0 -192
- webscout/Provider/llmchat.py +0 -213
- webscout/Provider/llmchatco.py +0 -311
- webscout/Provider/meta.py +0 -794
- webscout/Provider/multichat.py +0 -325
- webscout/Provider/promptrefine.py +0 -193
- webscout/Provider/scira_chat.py +0 -277
- webscout/Provider/scnet.py +0 -187
- webscout/Provider/searchchat.py +0 -293
- webscout/Provider/sonus.py +0 -208
- webscout/Provider/talkai.py +0 -194
- webscout/Provider/toolbaz.py +0 -320
- webscout/Provider/turboseek.py +0 -219
- webscout/Provider/tutorai.py +0 -252
- webscout/Provider/typefully.py +0 -280
- webscout/Provider/typegpt.py +0 -232
- webscout/Provider/uncovr.py +0 -312
- webscout/Provider/x0gpt.py +0 -256
- webscout/Provider/yep.py +0 -376
- webscout/litagent/__init__.py +0 -29
- webscout/litagent/agent.py +0 -455
- webscout/litagent/constants.py +0 -60
- webscout/litprinter/__init__.py +0 -59
- webscout/scout/__init__.py +0 -8
- webscout/scout/core/__init__.py +0 -7
- webscout/scout/core/crawler.py +0 -140
- webscout/scout/core/scout.py +0 -568
- webscout/scout/core/search_result.py +0 -96
- webscout/scout/core/text_analyzer.py +0 -63
- webscout/scout/core/text_utils.py +0 -277
- webscout/scout/core/web_analyzer.py +0 -52
- webscout/scout/core.py +0 -881
- webscout/scout/element.py +0 -460
- webscout/scout/parsers/__init__.py +0 -69
- webscout/scout/parsers/html5lib_parser.py +0 -172
- webscout/scout/parsers/html_parser.py +0 -236
- webscout/scout/parsers/lxml_parser.py +0 -178
- webscout/scout/utils.py +0 -37
- webscout/swiftcli/__init__.py +0 -809
- webscout/zeroart/__init__.py +0 -55
- webscout/zeroart/base.py +0 -60
- webscout/zeroart/effects.py +0 -99
- webscout/zeroart/fonts.py +0 -816
- webscout-8.2.2.dist-info/RECORD +0 -309
- webscout-8.2.2.dist-info/entry_points.txt +0 -5
- webscout-8.2.2.dist-info/top_level.txt +0 -3
- webstoken/__init__.py +0 -30
- webstoken/classifier.py +0 -189
- webstoken/keywords.py +0 -216
- webstoken/language.py +0 -128
- webstoken/ner.py +0 -164
- webstoken/normalizer.py +0 -35
- webstoken/processor.py +0 -77
- webstoken/sentiment.py +0 -206
- webstoken/stemmer.py +0 -73
- webstoken/tagger.py +0 -60
- webstoken/tokenizer.py +0 -158
- {webscout-8.2.2.dist-info → webscout-8.2.7.dist-info/licenses}/LICENSE.md +0 -0
webscout/Provider/AllenAI.py
DELETED
|
@@ -1,413 +0,0 @@
|
|
|
1
|
-
import requests
|
|
2
|
-
import json
|
|
3
|
-
import os
|
|
4
|
-
from uuid import uuid4
|
|
5
|
-
from typing import Any, Dict, Optional, Generator, Union
|
|
6
|
-
|
|
7
|
-
from webscout.AIutel import Optimizers
|
|
8
|
-
from webscout.AIutel import Conversation
|
|
9
|
-
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
10
|
-
from webscout.AIbase import Provider, AsyncProvider
|
|
11
|
-
from webscout import exceptions
|
|
12
|
-
from webscout.litagent import LitAgent
|
|
13
|
-
|
|
14
|
-
class AllenAI(Provider):
|
|
15
|
-
"""
|
|
16
|
-
A class to interact with the AllenAI (Ai2 Playground) API.
|
|
17
|
-
"""
|
|
18
|
-
|
|
19
|
-
AVAILABLE_MODELS = [
|
|
20
|
-
'OLMo-2-1124-13B-Instruct',
|
|
21
|
-
'Llama-3-1-Tulu-3-8B',
|
|
22
|
-
'olmo-2-0325-32b-instruct',
|
|
23
|
-
'Llama-3-1-Tulu-3-70B',
|
|
24
|
-
'OLMoE-1B-7B-0924-Instruct',
|
|
25
|
-
'tulu3-405b',
|
|
26
|
-
'olmo-2-0325-32b-instruct',
|
|
27
|
-
'tulu-3-1-8b',
|
|
28
|
-
'olmoe-0125'
|
|
29
|
-
]
|
|
30
|
-
|
|
31
|
-
# Default model options from JS implementation
|
|
32
|
-
DEFAULT_OPTIONS = {
|
|
33
|
-
"max_tokens": 2048,
|
|
34
|
-
"temperature": 0.7,
|
|
35
|
-
"top_p": 1,
|
|
36
|
-
"n": 1,
|
|
37
|
-
"stop": None,
|
|
38
|
-
"logprobs": None
|
|
39
|
-
}
|
|
40
|
-
|
|
41
|
-
# Host mapping for models - some models work best with specific hosts
|
|
42
|
-
MODEL_HOST_MAP = {
|
|
43
|
-
'tulu3-405b': 'inferd',
|
|
44
|
-
'tulu2': 'inferd',
|
|
45
|
-
'olmo-7b-instruct': 'inferd'
|
|
46
|
-
}
|
|
47
|
-
|
|
48
|
-
def __init__(
|
|
49
|
-
self,
|
|
50
|
-
is_conversation: bool = True,
|
|
51
|
-
max_tokens: int = 2048,
|
|
52
|
-
timeout: int = 30,
|
|
53
|
-
intro: str = None,
|
|
54
|
-
filepath: str = None,
|
|
55
|
-
update_file: bool = True,
|
|
56
|
-
proxies: dict = {},
|
|
57
|
-
history_offset: int = 10250,
|
|
58
|
-
act: str = None,
|
|
59
|
-
model: str = "OLMo-2-1124-13B-Instruct",
|
|
60
|
-
host: str = None # Now optional - will auto-detect if not provided
|
|
61
|
-
):
|
|
62
|
-
"""Initializes the AllenAI API client."""
|
|
63
|
-
if model not in self.AVAILABLE_MODELS:
|
|
64
|
-
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
65
|
-
|
|
66
|
-
self.url = "https://playground.allenai.org"
|
|
67
|
-
# Updated API endpoint to v3 from v4
|
|
68
|
-
self.api_endpoint = "https://olmo-api.allen.ai/v3/message/stream"
|
|
69
|
-
self.whoami_endpoint = "https://olmo-api.allen.ai/v3/whoami"
|
|
70
|
-
|
|
71
|
-
# Updated headers based on JS implementation
|
|
72
|
-
self.headers = {
|
|
73
|
-
'User-Agent': "Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Mobile Safari/537.36",
|
|
74
|
-
'Accept': '*/*',
|
|
75
|
-
'Accept-Language': 'id-ID,id;q=0.9',
|
|
76
|
-
'Origin': self.url,
|
|
77
|
-
'Referer': f"{self.url}/",
|
|
78
|
-
'Connection': 'keep-alive',
|
|
79
|
-
'Cache-Control': 'no-cache',
|
|
80
|
-
'Pragma': 'no-cache',
|
|
81
|
-
'Priority': 'u=1, i',
|
|
82
|
-
'Sec-Fetch-Dest': 'empty',
|
|
83
|
-
'Sec-Fetch-Mode': 'cors',
|
|
84
|
-
'Sec-Fetch-Site': 'cross-site',
|
|
85
|
-
'sec-ch-ua': '"Chromium";v="131", "Not_A Brand";v="24", "Microsoft Edge Simulate";v="131", "Lemur";v="131"',
|
|
86
|
-
'sec-ch-ua-mobile': '?1',
|
|
87
|
-
'sec-ch-ua-platform': '"Android"',
|
|
88
|
-
'Content-Type': 'application/json'
|
|
89
|
-
}
|
|
90
|
-
|
|
91
|
-
self.session = requests.Session()
|
|
92
|
-
self.session.headers.update(self.headers)
|
|
93
|
-
self.session.proxies.update(proxies)
|
|
94
|
-
self.model = model
|
|
95
|
-
|
|
96
|
-
# Auto-detect host if not provided
|
|
97
|
-
if not host:
|
|
98
|
-
# Use the preferred host from the model-host map, or default to modal
|
|
99
|
-
self.host = self.MODEL_HOST_MAP.get(model, 'modal')
|
|
100
|
-
else:
|
|
101
|
-
self.host = host
|
|
102
|
-
|
|
103
|
-
self.is_conversation = is_conversation
|
|
104
|
-
self.max_tokens_to_sample = max_tokens
|
|
105
|
-
self.timeout = timeout
|
|
106
|
-
self.last_response = {}
|
|
107
|
-
# Generate user ID if needed
|
|
108
|
-
self.x_anonymous_user_id = None
|
|
109
|
-
self.parent = None
|
|
110
|
-
|
|
111
|
-
# Default options
|
|
112
|
-
self.options = self.DEFAULT_OPTIONS.copy()
|
|
113
|
-
self.options["max_tokens"] = max_tokens
|
|
114
|
-
|
|
115
|
-
self.__available_optimizers = (
|
|
116
|
-
method
|
|
117
|
-
for method in dir(Optimizers)
|
|
118
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
119
|
-
)
|
|
120
|
-
Conversation.intro = (
|
|
121
|
-
AwesomePrompts().get_act(
|
|
122
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
123
|
-
)
|
|
124
|
-
if act
|
|
125
|
-
else intro or Conversation.intro
|
|
126
|
-
)
|
|
127
|
-
|
|
128
|
-
self.conversation = Conversation(
|
|
129
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
130
|
-
)
|
|
131
|
-
self.conversation.history_offset = history_offset
|
|
132
|
-
|
|
133
|
-
def whoami(self):
|
|
134
|
-
"""Gets or creates a user ID for authentication with Allen AI API"""
|
|
135
|
-
temp_id = str(uuid4())
|
|
136
|
-
headers = self.session.headers.copy()
|
|
137
|
-
headers.update({"x-anonymous-user-id": temp_id})
|
|
138
|
-
|
|
139
|
-
try:
|
|
140
|
-
response = self.session.get(
|
|
141
|
-
self.whoami_endpoint,
|
|
142
|
-
headers=headers,
|
|
143
|
-
timeout=self.timeout
|
|
144
|
-
)
|
|
145
|
-
|
|
146
|
-
if response.status_code == 200:
|
|
147
|
-
data = response.json()
|
|
148
|
-
self.x_anonymous_user_id = data.get("client", temp_id)
|
|
149
|
-
return data
|
|
150
|
-
else:
|
|
151
|
-
self.x_anonymous_user_id = temp_id
|
|
152
|
-
return {"client": temp_id}
|
|
153
|
-
|
|
154
|
-
except Exception as e:
|
|
155
|
-
self.x_anonymous_user_id = temp_id
|
|
156
|
-
return {"client": temp_id, "error": str(e)}
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
def parse_stream(self, raw_data):
|
|
160
|
-
"""Parse the raw streaming data according to the JS implementation"""
|
|
161
|
-
result = ""
|
|
162
|
-
for line in raw_data.splitlines():
|
|
163
|
-
try:
|
|
164
|
-
parsed = json.loads(line)
|
|
165
|
-
# Check if message starts with msg_ pattern
|
|
166
|
-
if parsed.get("message", "").startswith("msg_"):
|
|
167
|
-
result += parsed.get("content", "")
|
|
168
|
-
except:
|
|
169
|
-
continue
|
|
170
|
-
return result
|
|
171
|
-
|
|
172
|
-
def ask(
|
|
173
|
-
self,
|
|
174
|
-
prompt: str,
|
|
175
|
-
stream: bool = False,
|
|
176
|
-
raw: bool = False,
|
|
177
|
-
optimizer: str = None,
|
|
178
|
-
conversationally: bool = False,
|
|
179
|
-
host: str = None,
|
|
180
|
-
private: bool = False,
|
|
181
|
-
top_p: float = None,
|
|
182
|
-
temperature: float = None,
|
|
183
|
-
options: dict = None,
|
|
184
|
-
) -> Union[Dict[str, Any], Generator]:
|
|
185
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
186
|
-
if optimizer:
|
|
187
|
-
if optimizer in self.__available_optimizers:
|
|
188
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
189
|
-
conversation_prompt if conversationally else prompt
|
|
190
|
-
)
|
|
191
|
-
else:
|
|
192
|
-
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
193
|
-
|
|
194
|
-
# Ensure we have a user ID
|
|
195
|
-
if not self.x_anonymous_user_id:
|
|
196
|
-
self.whoami()
|
|
197
|
-
|
|
198
|
-
# Prepare the API request
|
|
199
|
-
self.session.headers.update({
|
|
200
|
-
"x-anonymous-user-id": self.x_anonymous_user_id,
|
|
201
|
-
"Content-Type": "application/json"
|
|
202
|
-
})
|
|
203
|
-
|
|
204
|
-
# Create options dictionary
|
|
205
|
-
opts = self.options.copy()
|
|
206
|
-
if temperature is not None:
|
|
207
|
-
opts["temperature"] = temperature
|
|
208
|
-
if top_p is not None:
|
|
209
|
-
opts["top_p"] = top_p
|
|
210
|
-
if options:
|
|
211
|
-
opts.update(options)
|
|
212
|
-
|
|
213
|
-
# Use the host param or the default host
|
|
214
|
-
use_host = host or self.host
|
|
215
|
-
|
|
216
|
-
# List of hosts to try - start with provided host, then try alternative hosts
|
|
217
|
-
hosts_to_try = [use_host]
|
|
218
|
-
if use_host == 'modal':
|
|
219
|
-
hosts_to_try.append('inferd')
|
|
220
|
-
else:
|
|
221
|
-
hosts_to_try.append('modal')
|
|
222
|
-
|
|
223
|
-
last_error = None
|
|
224
|
-
|
|
225
|
-
# Try each host until one works
|
|
226
|
-
for current_host in hosts_to_try:
|
|
227
|
-
# Create the JSON payload as per the JS implementation
|
|
228
|
-
payload = {
|
|
229
|
-
"content": conversation_prompt,
|
|
230
|
-
"private": private,
|
|
231
|
-
"model": self.model,
|
|
232
|
-
"host": current_host,
|
|
233
|
-
"opts": opts
|
|
234
|
-
}
|
|
235
|
-
|
|
236
|
-
# Add parent if exists
|
|
237
|
-
if self.parent:
|
|
238
|
-
payload["parent"] = self.parent
|
|
239
|
-
|
|
240
|
-
try:
|
|
241
|
-
if stream:
|
|
242
|
-
return self._stream_request(payload, prompt, raw)
|
|
243
|
-
else:
|
|
244
|
-
return self._non_stream_request(payload, prompt)
|
|
245
|
-
except exceptions.FailedToGenerateResponseError as e:
|
|
246
|
-
last_error = e
|
|
247
|
-
# Log the error but continue to try other hosts
|
|
248
|
-
print(f"Host '{current_host}' failed for model '{self.model}', trying next host...")
|
|
249
|
-
continue
|
|
250
|
-
|
|
251
|
-
# If we've tried all hosts and none worked, raise the last error
|
|
252
|
-
raise last_error or exceptions.FailedToGenerateResponseError("All hosts failed. Unable to complete request.")
|
|
253
|
-
|
|
254
|
-
def _stream_request(self, payload, prompt, raw=False):
|
|
255
|
-
"""Handle streaming requests with the given payload"""
|
|
256
|
-
try:
|
|
257
|
-
response = self.session.post(
|
|
258
|
-
self.api_endpoint,
|
|
259
|
-
json=payload,
|
|
260
|
-
stream=True,
|
|
261
|
-
timeout=self.timeout
|
|
262
|
-
)
|
|
263
|
-
|
|
264
|
-
if response.status_code != 200:
|
|
265
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
266
|
-
f"Request failed with status code {response.status_code}: {response.text}"
|
|
267
|
-
)
|
|
268
|
-
|
|
269
|
-
streaming_text = ""
|
|
270
|
-
current_parent = None
|
|
271
|
-
|
|
272
|
-
for chunk in response.iter_content(chunk_size=1024, decode_unicode=False):
|
|
273
|
-
if not chunk:
|
|
274
|
-
continue
|
|
275
|
-
|
|
276
|
-
decoded = chunk.decode(errors="ignore")
|
|
277
|
-
for line in decoded.splitlines():
|
|
278
|
-
line = line.strip()
|
|
279
|
-
if not line:
|
|
280
|
-
continue
|
|
281
|
-
|
|
282
|
-
try:
|
|
283
|
-
data = json.loads(line)
|
|
284
|
-
except json.JSONDecodeError:
|
|
285
|
-
continue
|
|
286
|
-
|
|
287
|
-
if isinstance(data, dict):
|
|
288
|
-
# Check for message pattern from JS implementation
|
|
289
|
-
if data.get("message", "").startswith("msg_") and "content" in data:
|
|
290
|
-
content = data.get("content", "")
|
|
291
|
-
if content:
|
|
292
|
-
streaming_text += content
|
|
293
|
-
resp = dict(text=content)
|
|
294
|
-
yield resp if raw else resp
|
|
295
|
-
|
|
296
|
-
# Legacy handling for older API
|
|
297
|
-
elif "message" in data and data.get("content"):
|
|
298
|
-
content = data.get("content")
|
|
299
|
-
if content.strip():
|
|
300
|
-
streaming_text += content
|
|
301
|
-
resp = dict(text=content)
|
|
302
|
-
yield resp if raw else resp
|
|
303
|
-
|
|
304
|
-
# Update parent ID if present
|
|
305
|
-
if data.get("id"):
|
|
306
|
-
current_parent = data.get("id")
|
|
307
|
-
elif data.get("children"):
|
|
308
|
-
for child in data["children"]:
|
|
309
|
-
if child.get("role") == "assistant":
|
|
310
|
-
current_parent = child.get("id")
|
|
311
|
-
break
|
|
312
|
-
|
|
313
|
-
# Handle completion
|
|
314
|
-
if data.get("final") or data.get("finish_reason") == "stop":
|
|
315
|
-
if current_parent:
|
|
316
|
-
self.parent = current_parent
|
|
317
|
-
|
|
318
|
-
# Update conversation history
|
|
319
|
-
self.conversation.update_chat_history(prompt, streaming_text)
|
|
320
|
-
self.last_response = {"text": streaming_text}
|
|
321
|
-
return
|
|
322
|
-
|
|
323
|
-
except requests.RequestException as e:
|
|
324
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
|
|
325
|
-
|
|
326
|
-
def _non_stream_request(self, payload, prompt):
|
|
327
|
-
"""Handle non-streaming requests with the given payload"""
|
|
328
|
-
try:
|
|
329
|
-
# For non-streaming requests, we can directly send without stream=True
|
|
330
|
-
response = self.session.post(
|
|
331
|
-
self.api_endpoint,
|
|
332
|
-
json=payload,
|
|
333
|
-
stream=False,
|
|
334
|
-
timeout=self.timeout
|
|
335
|
-
)
|
|
336
|
-
|
|
337
|
-
if response.status_code != 200:
|
|
338
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
339
|
-
f"Request failed with status code {response.status_code}: {response.text}"
|
|
340
|
-
)
|
|
341
|
-
|
|
342
|
-
# Parse the response as per JS implementation
|
|
343
|
-
raw_response = response.text
|
|
344
|
-
parsed_response = self.parse_stream(raw_response)
|
|
345
|
-
self.conversation.update_chat_history(prompt, parsed_response)
|
|
346
|
-
self.last_response = {"text": parsed_response}
|
|
347
|
-
return self.last_response
|
|
348
|
-
|
|
349
|
-
except Exception as e:
|
|
350
|
-
raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
|
|
351
|
-
|
|
352
|
-
def chat(
|
|
353
|
-
self,
|
|
354
|
-
prompt: str,
|
|
355
|
-
stream: bool = False,
|
|
356
|
-
optimizer: str = None,
|
|
357
|
-
conversationally: bool = False,
|
|
358
|
-
host: str = None,
|
|
359
|
-
options: dict = None,
|
|
360
|
-
) -> str:
|
|
361
|
-
def for_stream():
|
|
362
|
-
for response in self.ask(
|
|
363
|
-
prompt,
|
|
364
|
-
True,
|
|
365
|
-
optimizer=optimizer,
|
|
366
|
-
conversationally=conversationally,
|
|
367
|
-
host=host,
|
|
368
|
-
options=options
|
|
369
|
-
):
|
|
370
|
-
yield self.get_message(response)
|
|
371
|
-
def for_non_stream():
|
|
372
|
-
return self.get_message(
|
|
373
|
-
self.ask(
|
|
374
|
-
prompt,
|
|
375
|
-
False,
|
|
376
|
-
optimizer=optimizer,
|
|
377
|
-
conversationally=conversationally,
|
|
378
|
-
host=host,
|
|
379
|
-
options=options
|
|
380
|
-
)
|
|
381
|
-
)
|
|
382
|
-
return for_stream() if stream else for_non_stream()
|
|
383
|
-
|
|
384
|
-
def get_message(self, response: dict) -> str:
|
|
385
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
386
|
-
return response["text"]
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
if __name__ == "__main__":
|
|
391
|
-
print("-" * 80)
|
|
392
|
-
print(f"{'Model':<50} {'Status':<10} {'Response'}")
|
|
393
|
-
print("-" * 80)
|
|
394
|
-
|
|
395
|
-
for model in AllenAI.AVAILABLE_MODELS:
|
|
396
|
-
try:
|
|
397
|
-
# Auto-detect host
|
|
398
|
-
test_ai = AllenAI(model=model, timeout=60)
|
|
399
|
-
# Pass the host explicitly to display accurate error messages
|
|
400
|
-
response = test_ai.chat("Say 'Hello' in one word")
|
|
401
|
-
response_text = response
|
|
402
|
-
|
|
403
|
-
if response_text and len(response_text.strip()) > 0:
|
|
404
|
-
status = "✓"
|
|
405
|
-
# Truncate response if too long
|
|
406
|
-
display_text = response_text.strip()[:50] + "..." if len(response_text.strip()) > 50 else response_text.strip()
|
|
407
|
-
print(f"{model:<50} {status:<10} {display_text} (host: {test_ai.host})")
|
|
408
|
-
else:
|
|
409
|
-
status = "✗"
|
|
410
|
-
display_text = "Empty or invalid response"
|
|
411
|
-
print(f"{model:<50} {status:<10} {display_text}")
|
|
412
|
-
except Exception as e:
|
|
413
|
-
print(f"{model:<50} {'✗':<10} {str(e)}")
|
webscout/Provider/Andi.py
DELETED
|
@@ -1,228 +0,0 @@
|
|
|
1
|
-
from uuid import uuid4
|
|
2
|
-
import requests
|
|
3
|
-
import json
|
|
4
|
-
from webscout.AIutel import Optimizers
|
|
5
|
-
from webscout.AIutel import Conversation
|
|
6
|
-
from webscout.AIutel import AwesomePrompts, sanitize_stream
|
|
7
|
-
from webscout.AIbase import Provider, AsyncProvider
|
|
8
|
-
from webscout import exceptions
|
|
9
|
-
from typing import Union, Any, AsyncGenerator, Dict
|
|
10
|
-
from webscout import WEBS
|
|
11
|
-
from webscout.litagent import LitAgent
|
|
12
|
-
|
|
13
|
-
class AndiSearch(Provider):
|
|
14
|
-
def __init__(
|
|
15
|
-
self,
|
|
16
|
-
is_conversation: bool = True,
|
|
17
|
-
max_tokens: int = 600,
|
|
18
|
-
timeout: int = 30,
|
|
19
|
-
intro: str = None,
|
|
20
|
-
filepath: str = None,
|
|
21
|
-
update_file: bool = True,
|
|
22
|
-
proxies: dict = {},
|
|
23
|
-
history_offset: int = 10250,
|
|
24
|
-
act: str = None,
|
|
25
|
-
):
|
|
26
|
-
"""Instantiates AndiSearch
|
|
27
|
-
|
|
28
|
-
Args:
|
|
29
|
-
is_conversation (bool, optional): Flag for chatting conversationally. Defaults to True.
|
|
30
|
-
max_tokens (int, optional): Maximum number of tokens to be generated upon completion. Defaults to 600.
|
|
31
|
-
timeout (int, optional): Http request timeout. Defaults to 30.
|
|
32
|
-
intro (str, optional): Conversation introductory prompt. Defaults to None.
|
|
33
|
-
filepath (str, optional): Path to file containing conversation history. Defaults to None.
|
|
34
|
-
update_file (bool, optional): Add new prompts and responses to the file. Defaults to True.
|
|
35
|
-
proxies (dict, optional): Http request proxies. Defaults to {}.
|
|
36
|
-
history_offset (int, optional): Limit conversation history to this number of last texts. Defaults to 10250.
|
|
37
|
-
act (str|int, optional): Awesome prompt key or index. (Used as intro). Defaults to None.
|
|
38
|
-
"""
|
|
39
|
-
self.session = requests.Session()
|
|
40
|
-
self.is_conversation = is_conversation
|
|
41
|
-
self.max_tokens_to_sample = max_tokens
|
|
42
|
-
self.chat_endpoint = "https://write.andisearch.com/v1/write_streaming"
|
|
43
|
-
self.stream_chunk_size = 64
|
|
44
|
-
self.timeout = timeout
|
|
45
|
-
self.last_response = {}
|
|
46
|
-
self.headers = {
|
|
47
|
-
"accept": "text/event-stream",
|
|
48
|
-
"accept-encoding": "gzip, deflate, br, zstd",
|
|
49
|
-
"accept-language": "en-US,en;q=0.9,en-IN;q=0.8",
|
|
50
|
-
"andi-auth-key": "andi-summarizer",
|
|
51
|
-
"andi-origin": "x-andi-origin",
|
|
52
|
-
"authorization": str(uuid4()),
|
|
53
|
-
"content-type": "application/json",
|
|
54
|
-
"dnt": "1",
|
|
55
|
-
"origin": "https://andisearch.com",
|
|
56
|
-
"priority": "u=1, i",
|
|
57
|
-
"sec-ch-ua": '"Not)A;Brand";v="99", "Microsoft Edge";v="127", "Chromium";v="127"',
|
|
58
|
-
"sec-ch-ua-mobile": "?0",
|
|
59
|
-
"sec-ch-ua-platform": '"Windows"',
|
|
60
|
-
"sec-fetch-dest": "empty",
|
|
61
|
-
"sec-fetch-mode": "cors",
|
|
62
|
-
"sec-fetch-site": "same-site",
|
|
63
|
-
"user-agent": LitAgent().random(),
|
|
64
|
-
"x-amz-date": "20240730T031106Z",
|
|
65
|
-
"x-amz-security-token": str(uuid4()),
|
|
66
|
-
}
|
|
67
|
-
|
|
68
|
-
self.__available_optimizers = (
|
|
69
|
-
method
|
|
70
|
-
for method in dir(Optimizers)
|
|
71
|
-
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
72
|
-
)
|
|
73
|
-
self.session.headers.update(self.headers)
|
|
74
|
-
Conversation.intro = (
|
|
75
|
-
AwesomePrompts().get_act(
|
|
76
|
-
act, raise_not_found=True, default=None, case_insensitive=True
|
|
77
|
-
)
|
|
78
|
-
if act
|
|
79
|
-
else intro or Conversation.intro
|
|
80
|
-
)
|
|
81
|
-
self.conversation = Conversation(
|
|
82
|
-
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
83
|
-
)
|
|
84
|
-
self.conversation.history_offset = history_offset
|
|
85
|
-
self.session.proxies = proxies
|
|
86
|
-
|
|
87
|
-
def ask(
|
|
88
|
-
self,
|
|
89
|
-
prompt: str,
|
|
90
|
-
stream: bool = False,
|
|
91
|
-
raw: bool = False,
|
|
92
|
-
optimizer: str = None,
|
|
93
|
-
conversationally: bool = False,
|
|
94
|
-
) -> dict:
|
|
95
|
-
|
|
96
|
-
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
97
|
-
if optimizer:
|
|
98
|
-
if optimizer in self.__available_optimizers:
|
|
99
|
-
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
100
|
-
conversation_prompt if conversationally else prompt
|
|
101
|
-
)
|
|
102
|
-
else:
|
|
103
|
-
raise Exception(
|
|
104
|
-
f"Optimizer is not one of {self.__available_optimizers}"
|
|
105
|
-
)
|
|
106
|
-
|
|
107
|
-
# Initialize the webscout instance
|
|
108
|
-
webs = WEBS()
|
|
109
|
-
|
|
110
|
-
# Fetch search results
|
|
111
|
-
search_query = prompt
|
|
112
|
-
search_results = webs.text(search_query, max_results=7)
|
|
113
|
-
|
|
114
|
-
# Format the search results into the required serp payload structure
|
|
115
|
-
serp_payload = {
|
|
116
|
-
"query": search_query,
|
|
117
|
-
"serp": {
|
|
118
|
-
"results_type": "Search",
|
|
119
|
-
"answer": "",
|
|
120
|
-
"type": "navigation",
|
|
121
|
-
"title": "",
|
|
122
|
-
"description": "",
|
|
123
|
-
"image": "",
|
|
124
|
-
"link": "",
|
|
125
|
-
"source": "liftndrift.com",
|
|
126
|
-
"engine": "andi-b",
|
|
127
|
-
"results": [
|
|
128
|
-
{
|
|
129
|
-
"title": result["title"],
|
|
130
|
-
"link": result["href"],
|
|
131
|
-
"desc": result["body"],
|
|
132
|
-
"image": "",
|
|
133
|
-
"type": "website",
|
|
134
|
-
"source": result["href"].split("//")[1].split("/")[0] # Extract the domain name
|
|
135
|
-
}
|
|
136
|
-
for result in search_results
|
|
137
|
-
]
|
|
138
|
-
}
|
|
139
|
-
}
|
|
140
|
-
self.session.headers.update(self.headers)
|
|
141
|
-
payload = serp_payload
|
|
142
|
-
|
|
143
|
-
def for_stream():
|
|
144
|
-
response = self.session.post(
|
|
145
|
-
self.chat_endpoint, json=payload, stream=True, timeout=self.timeout
|
|
146
|
-
)
|
|
147
|
-
if not response.ok:
|
|
148
|
-
raise exceptions.FailedToGenerateResponseError(
|
|
149
|
-
f"Failed to generate response - ({response.status_code}, {response.reason}) - {response.text}"
|
|
150
|
-
)
|
|
151
|
-
|
|
152
|
-
streaming_text = ""
|
|
153
|
-
for value in response.iter_lines(
|
|
154
|
-
decode_unicode=True,
|
|
155
|
-
chunk_size=self.stream_chunk_size,
|
|
156
|
-
delimiter="\n",
|
|
157
|
-
):
|
|
158
|
-
try:
|
|
159
|
-
if bool(value):
|
|
160
|
-
streaming_text += value + ("\n" if stream else "")
|
|
161
|
-
resp = dict(text=streaming_text)
|
|
162
|
-
self.last_response.update(resp)
|
|
163
|
-
yield value if raw else resp
|
|
164
|
-
except json.decoder.JSONDecodeError:
|
|
165
|
-
pass
|
|
166
|
-
self.conversation.update_chat_history(
|
|
167
|
-
prompt, self.get_message(self.last_response)
|
|
168
|
-
)
|
|
169
|
-
|
|
170
|
-
def for_non_stream():
|
|
171
|
-
for _ in for_stream():
|
|
172
|
-
pass
|
|
173
|
-
return self.last_response
|
|
174
|
-
|
|
175
|
-
return for_stream() if stream else for_non_stream()
|
|
176
|
-
|
|
177
|
-
def chat(
|
|
178
|
-
self,
|
|
179
|
-
prompt: str,
|
|
180
|
-
stream: bool = False,
|
|
181
|
-
optimizer: str = None,
|
|
182
|
-
conversationally: bool = False,
|
|
183
|
-
) -> str:
|
|
184
|
-
"""Generate response `str`
|
|
185
|
-
Args:
|
|
186
|
-
prompt (str): Prompt to be send.
|
|
187
|
-
stream (bool, optional): Flag for streaming response. Defaults to False.
|
|
188
|
-
optimizer (str, optional): Prompt optimizer name - `[code, shell_command]`. Defaults to None.
|
|
189
|
-
conversationally (bool, optional): Chat conversationally when using optimizer. Defaults to False.
|
|
190
|
-
Returns:
|
|
191
|
-
str: Response generated
|
|
192
|
-
"""
|
|
193
|
-
|
|
194
|
-
def for_stream():
|
|
195
|
-
for response in self.ask(
|
|
196
|
-
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
197
|
-
):
|
|
198
|
-
yield self.get_message(response)
|
|
199
|
-
|
|
200
|
-
def for_non_stream():
|
|
201
|
-
return self.get_message(
|
|
202
|
-
self.ask(
|
|
203
|
-
prompt,
|
|
204
|
-
False,
|
|
205
|
-
optimizer=optimizer,
|
|
206
|
-
conversationally=conversationally,
|
|
207
|
-
)
|
|
208
|
-
)
|
|
209
|
-
|
|
210
|
-
return for_stream() if stream else for_non_stream()
|
|
211
|
-
|
|
212
|
-
def get_message(self, response: dict) -> str:
|
|
213
|
-
"""Retrieves message only from response
|
|
214
|
-
|
|
215
|
-
Args:
|
|
216
|
-
response (dict): Response generated by `self.ask`
|
|
217
|
-
|
|
218
|
-
Returns:
|
|
219
|
-
str: Message extracted
|
|
220
|
-
"""
|
|
221
|
-
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
222
|
-
return response["text"]
|
|
223
|
-
if __name__ == '__main__':
|
|
224
|
-
from rich import print
|
|
225
|
-
ai = AndiSearch()
|
|
226
|
-
response = ai.chat("tell me about india")
|
|
227
|
-
for chunk in response:
|
|
228
|
-
print(chunk, end="", flush=True)
|