webscout 8.2.7__py3-none-any.whl → 8.2.9__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- webscout/AIauto.py +33 -15
- webscout/AIbase.py +96 -37
- webscout/AIutel.py +703 -250
- webscout/Bard.py +441 -323
- webscout/Extra/Act.md +309 -0
- webscout/Extra/GitToolkit/__init__.py +10 -0
- webscout/Extra/GitToolkit/gitapi/README.md +110 -0
- webscout/Extra/GitToolkit/gitapi/__init__.py +12 -0
- webscout/Extra/GitToolkit/gitapi/repository.py +195 -0
- webscout/Extra/GitToolkit/gitapi/user.py +96 -0
- webscout/Extra/GitToolkit/gitapi/utils.py +62 -0
- webscout/Extra/YTToolkit/README.md +375 -0
- webscout/Extra/YTToolkit/YTdownloader.py +957 -0
- webscout/Extra/YTToolkit/__init__.py +3 -0
- webscout/Extra/YTToolkit/transcriber.py +476 -0
- webscout/Extra/YTToolkit/ytapi/README.md +44 -0
- webscout/Extra/YTToolkit/ytapi/__init__.py +6 -0
- webscout/Extra/YTToolkit/ytapi/channel.py +307 -0
- webscout/Extra/YTToolkit/ytapi/errors.py +13 -0
- webscout/Extra/YTToolkit/ytapi/extras.py +118 -0
- webscout/Extra/YTToolkit/ytapi/https.py +88 -0
- webscout/Extra/YTToolkit/ytapi/patterns.py +61 -0
- webscout/Extra/YTToolkit/ytapi/playlist.py +59 -0
- webscout/Extra/YTToolkit/ytapi/pool.py +8 -0
- webscout/Extra/YTToolkit/ytapi/query.py +40 -0
- webscout/Extra/YTToolkit/ytapi/stream.py +63 -0
- webscout/Extra/YTToolkit/ytapi/utils.py +62 -0
- webscout/Extra/YTToolkit/ytapi/video.py +232 -0
- webscout/Extra/__init__.py +7 -0
- webscout/Extra/autocoder/__init__.py +9 -0
- webscout/Extra/autocoder/autocoder.py +1105 -0
- webscout/Extra/autocoder/autocoder_utiles.py +332 -0
- webscout/Extra/gguf.md +430 -0
- webscout/Extra/gguf.py +684 -0
- webscout/Extra/tempmail/README.md +488 -0
- webscout/Extra/tempmail/__init__.py +28 -0
- webscout/Extra/tempmail/async_utils.py +141 -0
- webscout/Extra/tempmail/base.py +161 -0
- webscout/Extra/tempmail/cli.py +187 -0
- webscout/Extra/tempmail/emailnator.py +84 -0
- webscout/Extra/tempmail/mail_tm.py +361 -0
- webscout/Extra/tempmail/temp_mail_io.py +292 -0
- webscout/Extra/weather.md +281 -0
- webscout/Extra/weather.py +194 -0
- webscout/Extra/weather_ascii.py +76 -0
- webscout/Litlogger/README.md +10 -0
- webscout/Litlogger/__init__.py +15 -0
- webscout/Litlogger/formats.py +4 -0
- webscout/Litlogger/handlers.py +103 -0
- webscout/Litlogger/levels.py +13 -0
- webscout/Litlogger/logger.py +92 -0
- webscout/Provider/AI21.py +177 -0
- webscout/Provider/AISEARCH/DeepFind.py +254 -0
- webscout/Provider/AISEARCH/Perplexity.py +333 -0
- webscout/Provider/AISEARCH/README.md +279 -0
- webscout/Provider/AISEARCH/__init__.py +9 -0
- webscout/Provider/AISEARCH/felo_search.py +202 -0
- webscout/Provider/AISEARCH/genspark_search.py +324 -0
- webscout/Provider/AISEARCH/hika_search.py +186 -0
- webscout/Provider/AISEARCH/iask_search.py +410 -0
- webscout/Provider/AISEARCH/monica_search.py +220 -0
- webscout/Provider/AISEARCH/scira_search.py +298 -0
- webscout/Provider/AISEARCH/webpilotai_search.py +255 -0
- webscout/Provider/Aitopia.py +316 -0
- webscout/Provider/AllenAI.py +440 -0
- webscout/Provider/Andi.py +228 -0
- webscout/Provider/Blackboxai.py +791 -0
- webscout/Provider/ChatGPTClone.py +237 -0
- webscout/Provider/ChatGPTGratis.py +194 -0
- webscout/Provider/ChatSandbox.py +342 -0
- webscout/Provider/Cloudflare.py +324 -0
- webscout/Provider/Cohere.py +208 -0
- webscout/Provider/Deepinfra.py +340 -0
- webscout/Provider/ExaAI.py +261 -0
- webscout/Provider/ExaChat.py +358 -0
- webscout/Provider/Flowith.py +217 -0
- webscout/Provider/FreeGemini.py +250 -0
- webscout/Provider/Gemini.py +169 -0
- webscout/Provider/GithubChat.py +369 -0
- webscout/Provider/GizAI.py +295 -0
- webscout/Provider/Glider.py +225 -0
- webscout/Provider/Groq.py +801 -0
- webscout/Provider/HF_space/__init__.py +0 -0
- webscout/Provider/HF_space/qwen_qwen2.py +206 -0
- webscout/Provider/HeckAI.py +375 -0
- webscout/Provider/HuggingFaceChat.py +469 -0
- webscout/Provider/Hunyuan.py +283 -0
- webscout/Provider/Jadve.py +291 -0
- webscout/Provider/Koboldai.py +384 -0
- webscout/Provider/LambdaChat.py +411 -0
- webscout/Provider/Llama3.py +259 -0
- webscout/Provider/MCPCore.py +315 -0
- webscout/Provider/Marcus.py +198 -0
- webscout/Provider/Nemotron.py +218 -0
- webscout/Provider/Netwrck.py +270 -0
- webscout/Provider/OLLAMA.py +396 -0
- webscout/Provider/OPENAI/BLACKBOXAI.py +766 -0
- webscout/Provider/OPENAI/Cloudflare.py +378 -0
- webscout/Provider/OPENAI/FreeGemini.py +283 -0
- webscout/Provider/OPENAI/NEMOTRON.py +232 -0
- webscout/Provider/OPENAI/Qwen3.py +283 -0
- webscout/Provider/OPENAI/README.md +952 -0
- webscout/Provider/OPENAI/TwoAI.py +357 -0
- webscout/Provider/OPENAI/__init__.py +40 -0
- webscout/Provider/OPENAI/ai4chat.py +293 -0
- webscout/Provider/OPENAI/api.py +969 -0
- webscout/Provider/OPENAI/base.py +249 -0
- webscout/Provider/OPENAI/c4ai.py +373 -0
- webscout/Provider/OPENAI/chatgpt.py +556 -0
- webscout/Provider/OPENAI/chatgptclone.py +494 -0
- webscout/Provider/OPENAI/chatsandbox.py +173 -0
- webscout/Provider/OPENAI/copilot.py +242 -0
- webscout/Provider/OPENAI/deepinfra.py +322 -0
- webscout/Provider/OPENAI/e2b.py +1414 -0
- webscout/Provider/OPENAI/exaai.py +417 -0
- webscout/Provider/OPENAI/exachat.py +444 -0
- webscout/Provider/OPENAI/flowith.py +162 -0
- webscout/Provider/OPENAI/freeaichat.py +359 -0
- webscout/Provider/OPENAI/glider.py +326 -0
- webscout/Provider/OPENAI/groq.py +364 -0
- webscout/Provider/OPENAI/heckai.py +308 -0
- webscout/Provider/OPENAI/llmchatco.py +335 -0
- webscout/Provider/OPENAI/mcpcore.py +389 -0
- webscout/Provider/OPENAI/multichat.py +376 -0
- webscout/Provider/OPENAI/netwrck.py +357 -0
- webscout/Provider/OPENAI/oivscode.py +287 -0
- webscout/Provider/OPENAI/opkfc.py +496 -0
- webscout/Provider/OPENAI/pydantic_imports.py +172 -0
- webscout/Provider/OPENAI/scirachat.py +477 -0
- webscout/Provider/OPENAI/sonus.py +304 -0
- webscout/Provider/OPENAI/standardinput.py +433 -0
- webscout/Provider/OPENAI/textpollinations.py +339 -0
- webscout/Provider/OPENAI/toolbaz.py +413 -0
- webscout/Provider/OPENAI/typefully.py +355 -0
- webscout/Provider/OPENAI/typegpt.py +364 -0
- webscout/Provider/OPENAI/uncovrAI.py +463 -0
- webscout/Provider/OPENAI/utils.py +318 -0
- webscout/Provider/OPENAI/venice.py +431 -0
- webscout/Provider/OPENAI/wisecat.py +387 -0
- webscout/Provider/OPENAI/writecream.py +163 -0
- webscout/Provider/OPENAI/x0gpt.py +365 -0
- webscout/Provider/OPENAI/yep.py +382 -0
- webscout/Provider/OpenGPT.py +209 -0
- webscout/Provider/Openai.py +496 -0
- webscout/Provider/PI.py +429 -0
- webscout/Provider/Perplexitylabs.py +415 -0
- webscout/Provider/QwenLM.py +254 -0
- webscout/Provider/Reka.py +214 -0
- webscout/Provider/StandardInput.py +290 -0
- webscout/Provider/TTI/README.md +82 -0
- webscout/Provider/TTI/__init__.py +7 -0
- webscout/Provider/TTI/aiarta.py +365 -0
- webscout/Provider/TTI/artbit.py +0 -0
- webscout/Provider/TTI/base.py +64 -0
- webscout/Provider/TTI/fastflux.py +200 -0
- webscout/Provider/TTI/magicstudio.py +201 -0
- webscout/Provider/TTI/piclumen.py +203 -0
- webscout/Provider/TTI/pixelmuse.py +225 -0
- webscout/Provider/TTI/pollinations.py +221 -0
- webscout/Provider/TTI/utils.py +11 -0
- webscout/Provider/TTS/README.md +192 -0
- webscout/Provider/TTS/__init__.py +10 -0
- webscout/Provider/TTS/base.py +159 -0
- webscout/Provider/TTS/deepgram.py +156 -0
- webscout/Provider/TTS/elevenlabs.py +111 -0
- webscout/Provider/TTS/gesserit.py +128 -0
- webscout/Provider/TTS/murfai.py +113 -0
- webscout/Provider/TTS/openai_fm.py +129 -0
- webscout/Provider/TTS/parler.py +111 -0
- webscout/Provider/TTS/speechma.py +580 -0
- webscout/Provider/TTS/sthir.py +94 -0
- webscout/Provider/TTS/streamElements.py +333 -0
- webscout/Provider/TTS/utils.py +280 -0
- webscout/Provider/TeachAnything.py +229 -0
- webscout/Provider/TextPollinationsAI.py +308 -0
- webscout/Provider/TwoAI.py +475 -0
- webscout/Provider/TypliAI.py +305 -0
- webscout/Provider/UNFINISHED/ChatHub.py +209 -0
- webscout/Provider/UNFINISHED/Youchat.py +330 -0
- webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
- webscout/Provider/UNFINISHED/puterjs.py +635 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
- webscout/Provider/Venice.py +258 -0
- webscout/Provider/VercelAI.py +253 -0
- webscout/Provider/WiseCat.py +233 -0
- webscout/Provider/WrDoChat.py +370 -0
- webscout/Provider/Writecream.py +246 -0
- webscout/Provider/WritingMate.py +269 -0
- webscout/Provider/__init__.py +174 -0
- webscout/Provider/ai4chat.py +174 -0
- webscout/Provider/akashgpt.py +335 -0
- webscout/Provider/asksteve.py +220 -0
- webscout/Provider/cerebras.py +290 -0
- webscout/Provider/chatglm.py +215 -0
- webscout/Provider/cleeai.py +213 -0
- webscout/Provider/copilot.py +425 -0
- webscout/Provider/elmo.py +283 -0
- webscout/Provider/freeaichat.py +285 -0
- webscout/Provider/geminiapi.py +208 -0
- webscout/Provider/granite.py +235 -0
- webscout/Provider/hermes.py +266 -0
- webscout/Provider/julius.py +223 -0
- webscout/Provider/koala.py +170 -0
- webscout/Provider/learnfastai.py +325 -0
- webscout/Provider/llama3mitril.py +215 -0
- webscout/Provider/llmchat.py +258 -0
- webscout/Provider/llmchatco.py +306 -0
- webscout/Provider/lmarena.py +198 -0
- webscout/Provider/meta.py +801 -0
- webscout/Provider/multichat.py +364 -0
- webscout/Provider/oivscode.py +309 -0
- webscout/Provider/samurai.py +224 -0
- webscout/Provider/scira_chat.py +299 -0
- webscout/Provider/scnet.py +243 -0
- webscout/Provider/searchchat.py +292 -0
- webscout/Provider/sonus.py +258 -0
- webscout/Provider/talkai.py +194 -0
- webscout/Provider/toolbaz.py +353 -0
- webscout/Provider/turboseek.py +266 -0
- webscout/Provider/typefully.py +202 -0
- webscout/Provider/typegpt.py +289 -0
- webscout/Provider/uncovr.py +368 -0
- webscout/Provider/x0gpt.py +299 -0
- webscout/Provider/yep.py +389 -0
- webscout/__init__.py +4 -2
- webscout/cli.py +3 -28
- webscout/client.py +70 -0
- webscout/conversation.py +35 -35
- webscout/litagent/Readme.md +276 -0
- webscout/litagent/__init__.py +29 -0
- webscout/litagent/agent.py +455 -0
- webscout/litagent/constants.py +60 -0
- webscout/litprinter/__init__.py +59 -0
- webscout/optimizers.py +419 -419
- webscout/scout/README.md +404 -0
- webscout/scout/__init__.py +8 -0
- webscout/scout/core/__init__.py +7 -0
- webscout/scout/core/crawler.py +210 -0
- webscout/scout/core/scout.py +607 -0
- webscout/scout/core/search_result.py +96 -0
- webscout/scout/core/text_analyzer.py +63 -0
- webscout/scout/core/text_utils.py +277 -0
- webscout/scout/core/web_analyzer.py +52 -0
- webscout/scout/element.py +478 -0
- webscout/scout/parsers/__init__.py +69 -0
- webscout/scout/parsers/html5lib_parser.py +172 -0
- webscout/scout/parsers/html_parser.py +236 -0
- webscout/scout/parsers/lxml_parser.py +178 -0
- webscout/scout/utils.py +37 -0
- webscout/swiftcli/Readme.md +323 -0
- webscout/swiftcli/__init__.py +95 -0
- webscout/swiftcli/core/__init__.py +7 -0
- webscout/swiftcli/core/cli.py +297 -0
- webscout/swiftcli/core/context.py +104 -0
- webscout/swiftcli/core/group.py +241 -0
- webscout/swiftcli/decorators/__init__.py +28 -0
- webscout/swiftcli/decorators/command.py +221 -0
- webscout/swiftcli/decorators/options.py +220 -0
- webscout/swiftcli/decorators/output.py +252 -0
- webscout/swiftcli/exceptions.py +21 -0
- webscout/swiftcli/plugins/__init__.py +9 -0
- webscout/swiftcli/plugins/base.py +135 -0
- webscout/swiftcli/plugins/manager.py +269 -0
- webscout/swiftcli/utils/__init__.py +59 -0
- webscout/swiftcli/utils/formatting.py +252 -0
- webscout/swiftcli/utils/parsing.py +267 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +2 -182
- webscout/webscout_search_async.py +1 -179
- webscout/zeroart/README.md +89 -0
- webscout/zeroart/__init__.py +135 -0
- webscout/zeroart/base.py +66 -0
- webscout/zeroart/effects.py +101 -0
- webscout/zeroart/fonts.py +1239 -0
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/METADATA +262 -83
- webscout-8.2.9.dist-info/RECORD +289 -0
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/WHEEL +1 -1
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/entry_points.txt +1 -0
- webscout-8.2.7.dist-info/RECORD +0 -26
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/licenses/LICENSE.md +0 -0
- {webscout-8.2.7.dist-info → webscout-8.2.9.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,369 @@
|
|
|
1
|
+
from curl_cffi import CurlError
|
|
2
|
+
from curl_cffi.requests import Session
|
|
3
|
+
import json
|
|
4
|
+
import time
|
|
5
|
+
from typing import Any, Dict, List, Optional, Union, Generator
|
|
6
|
+
|
|
7
|
+
from webscout.AIutel import Conversation
|
|
8
|
+
from webscout.AIutel import Optimizers
|
|
9
|
+
from webscout.AIutel import AwesomePrompts, sanitize_stream # Import sanitize_stream
|
|
10
|
+
from webscout.AIbase import Provider
|
|
11
|
+
from webscout import exceptions
|
|
12
|
+
from webscout.litagent import LitAgent
|
|
13
|
+
|
|
14
|
+
class GithubChat(Provider):
|
|
15
|
+
"""
|
|
16
|
+
A class to interact with the GitHub Copilot Chat API.
|
|
17
|
+
Uses cookies for authentication and supports streaming responses.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
# Available models
|
|
21
|
+
AVAILABLE_MODELS = [
|
|
22
|
+
"gpt-4o",
|
|
23
|
+
"o3-mini",
|
|
24
|
+
"o1",
|
|
25
|
+
"claude-3.5-sonnet",
|
|
26
|
+
"claude-3.7-sonnet",
|
|
27
|
+
"claude-3.7-sonnet-thought",
|
|
28
|
+
"claude-sonnet-4",
|
|
29
|
+
"gemini-2.0-flash-001",
|
|
30
|
+
"gemini-2.5-pro",
|
|
31
|
+
"gpt-4.1",
|
|
32
|
+
"o4-mini"
|
|
33
|
+
|
|
34
|
+
]
|
|
35
|
+
|
|
36
|
+
def __init__(
|
|
37
|
+
self,
|
|
38
|
+
is_conversation: bool = True,
|
|
39
|
+
max_tokens: int = 2000,
|
|
40
|
+
timeout: int = 60,
|
|
41
|
+
intro: str = None,
|
|
42
|
+
filepath: str = None,
|
|
43
|
+
update_file: bool = True,
|
|
44
|
+
proxies: dict = {},
|
|
45
|
+
history_offset: int = 10250,
|
|
46
|
+
act: str = None,
|
|
47
|
+
model: str = "gpt-4o",
|
|
48
|
+
cookie_path: str = "cookies.json"
|
|
49
|
+
):
|
|
50
|
+
"""Initialize the GithubChat client."""
|
|
51
|
+
if model not in self.AVAILABLE_MODELS:
|
|
52
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {', '.join(self.AVAILABLE_MODELS)}")
|
|
53
|
+
|
|
54
|
+
self.url = "https://github.com/copilot"
|
|
55
|
+
self.api_url = "https://api.individual.githubcopilot.com"
|
|
56
|
+
self.cookie_path = cookie_path
|
|
57
|
+
self.session = Session() # Use curl_cffi Session
|
|
58
|
+
self.session.proxies.update(proxies)
|
|
59
|
+
|
|
60
|
+
# Load cookies for authentication
|
|
61
|
+
self.cookies = self.load_cookies()
|
|
62
|
+
|
|
63
|
+
# Set up headers for all requests
|
|
64
|
+
self.headers = {
|
|
65
|
+
"Content-Type": "application/json",
|
|
66
|
+
"User-Agent": LitAgent().random(),
|
|
67
|
+
"Accept": "*/*",
|
|
68
|
+
"Accept-Encoding": "gzip, deflate, br",
|
|
69
|
+
"Accept-Language": "en-US,en;q=0.5",
|
|
70
|
+
"Origin": "https://github.com",
|
|
71
|
+
"Referer": "https://github.com/copilot",
|
|
72
|
+
"GitHub-Verified-Fetch": "true",
|
|
73
|
+
"X-Requested-With": "XMLHttpRequest",
|
|
74
|
+
"Connection": "keep-alive",
|
|
75
|
+
"Sec-Fetch-Dest": "empty",
|
|
76
|
+
"Sec-Fetch-Mode": "cors",
|
|
77
|
+
"Sec-Fetch-Site": "same-origin",
|
|
78
|
+
}
|
|
79
|
+
|
|
80
|
+
# Apply cookies to session
|
|
81
|
+
if self.cookies:
|
|
82
|
+
self.session.cookies.update(self.cookies)
|
|
83
|
+
|
|
84
|
+
# Set default model
|
|
85
|
+
self.model = model
|
|
86
|
+
|
|
87
|
+
# Provider settings
|
|
88
|
+
self.is_conversation = is_conversation
|
|
89
|
+
self.max_tokens_to_sample = max_tokens
|
|
90
|
+
self.timeout = timeout
|
|
91
|
+
self.last_response = {}
|
|
92
|
+
|
|
93
|
+
# Available optimizers
|
|
94
|
+
self.__available_optimizers = (
|
|
95
|
+
method
|
|
96
|
+
for method in dir(Optimizers)
|
|
97
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
98
|
+
)
|
|
99
|
+
|
|
100
|
+
# Set up conversation
|
|
101
|
+
Conversation.intro = (
|
|
102
|
+
AwesomePrompts().get_act(
|
|
103
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
104
|
+
)
|
|
105
|
+
if act
|
|
106
|
+
else intro or Conversation.intro
|
|
107
|
+
)
|
|
108
|
+
|
|
109
|
+
self.conversation = Conversation(
|
|
110
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
111
|
+
)
|
|
112
|
+
self.conversation.history_offset = history_offset
|
|
113
|
+
|
|
114
|
+
# Store conversation data
|
|
115
|
+
self._conversation_id = None
|
|
116
|
+
self._access_token = None
|
|
117
|
+
|
|
118
|
+
def load_cookies(self):
|
|
119
|
+
"""Load cookies from a JSON file"""
|
|
120
|
+
try:
|
|
121
|
+
with open(self.cookie_path, 'r') as f:
|
|
122
|
+
cookies_data = json.load(f)
|
|
123
|
+
|
|
124
|
+
# Convert the cookie list to a dictionary format for requests
|
|
125
|
+
cookies = {}
|
|
126
|
+
for cookie in cookies_data:
|
|
127
|
+
# Only include cookies that are not expired and have a name and value
|
|
128
|
+
if 'name' in cookie and 'value':
|
|
129
|
+
# Check if the cookie hasn't expired
|
|
130
|
+
if 'expirationDate' not in cookie or cookie['expirationDate'] > time.time():
|
|
131
|
+
cookies[cookie['name']] = cookie['value']
|
|
132
|
+
|
|
133
|
+
return cookies
|
|
134
|
+
except Exception:
|
|
135
|
+
return {}
|
|
136
|
+
|
|
137
|
+
def get_access_token(self):
|
|
138
|
+
"""Get GitHub Copilot access token."""
|
|
139
|
+
if self._access_token:
|
|
140
|
+
return self._access_token
|
|
141
|
+
|
|
142
|
+
url = "https://github.com/github-copilot/chat/token"
|
|
143
|
+
|
|
144
|
+
try:
|
|
145
|
+
response = self.session.post(url, headers=self.headers)
|
|
146
|
+
|
|
147
|
+
if response.status_code == 401:
|
|
148
|
+
raise exceptions.AuthenticationError("Authentication failed. Please check your cookies.")
|
|
149
|
+
|
|
150
|
+
if response.status_code != 200:
|
|
151
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to get access token: {response.status_code}")
|
|
152
|
+
|
|
153
|
+
data = response.json()
|
|
154
|
+
self._access_token = data.get("token")
|
|
155
|
+
|
|
156
|
+
if not self._access_token:
|
|
157
|
+
raise exceptions.FailedToGenerateResponseError("Failed to extract access token from response")
|
|
158
|
+
|
|
159
|
+
return self._access_token
|
|
160
|
+
|
|
161
|
+
except:
|
|
162
|
+
pass
|
|
163
|
+
|
|
164
|
+
@staticmethod
|
|
165
|
+
def _github_extractor(chunk: Union[str, Dict[str, Any]]) -> Optional[str]:
|
|
166
|
+
"""Extracts content from GitHub Copilot stream JSON objects."""
|
|
167
|
+
if isinstance(chunk, dict) and chunk.get("type") == "content":
|
|
168
|
+
return chunk.get("body")
|
|
169
|
+
return None
|
|
170
|
+
|
|
171
|
+
def create_conversation(self):
|
|
172
|
+
"""Create a new conversation with GitHub Copilot."""
|
|
173
|
+
if self._conversation_id:
|
|
174
|
+
return self._conversation_id
|
|
175
|
+
|
|
176
|
+
access_token = self.get_access_token()
|
|
177
|
+
url = f"{self.api_url}/github/chat/threads"
|
|
178
|
+
|
|
179
|
+
headers = self.headers.copy()
|
|
180
|
+
headers["Authorization"] = f"GitHub-Bearer {access_token}"
|
|
181
|
+
|
|
182
|
+
try:
|
|
183
|
+
response = self.session.post(
|
|
184
|
+
url, headers=headers,
|
|
185
|
+
impersonate="chrome120" # Add impersonate
|
|
186
|
+
)
|
|
187
|
+
|
|
188
|
+
if response.status_code == 401:
|
|
189
|
+
# Token might be expired, try refreshing
|
|
190
|
+
self._access_token = None
|
|
191
|
+
access_token = self.get_access_token()
|
|
192
|
+
headers["Authorization"] = f"GitHub-Bearer {access_token}"
|
|
193
|
+
response = self.session.post(url, headers=headers)
|
|
194
|
+
|
|
195
|
+
# Check status after potential retry
|
|
196
|
+
response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
|
|
197
|
+
|
|
198
|
+
if response.status_code not in [200, 201]:
|
|
199
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to create conversation: {response.status_code}")
|
|
200
|
+
|
|
201
|
+
data = response.json()
|
|
202
|
+
self._conversation_id = data.get("thread_id")
|
|
203
|
+
|
|
204
|
+
if not self._conversation_id:
|
|
205
|
+
raise exceptions.FailedToGenerateResponseError("Failed to extract conversation ID from response")
|
|
206
|
+
|
|
207
|
+
return self._conversation_id
|
|
208
|
+
except (CurlError, exceptions.FailedToGenerateResponseError, Exception) as e: # Catch CurlError and others
|
|
209
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to create conversation: {str(e)}")
|
|
210
|
+
|
|
211
|
+
def ask(
|
|
212
|
+
self,
|
|
213
|
+
prompt: str,
|
|
214
|
+
stream: bool = False,
|
|
215
|
+
raw: bool = False,
|
|
216
|
+
optimizer: str = None,
|
|
217
|
+
conversationally: bool = False,
|
|
218
|
+
) -> Union[Dict[str, Any], Generator]:
|
|
219
|
+
"""Send a message to the GitHub Copilot Chat API"""
|
|
220
|
+
|
|
221
|
+
# Apply optimizers if specified
|
|
222
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
223
|
+
if optimizer:
|
|
224
|
+
if optimizer in self.__available_optimizers:
|
|
225
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
226
|
+
conversation_prompt if conversationally else prompt
|
|
227
|
+
)
|
|
228
|
+
else:
|
|
229
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
230
|
+
|
|
231
|
+
# Make sure we have a conversation ID
|
|
232
|
+
try:
|
|
233
|
+
conversation_id = self.create_conversation()
|
|
234
|
+
except exceptions.FailedToGenerateResponseError as e:
|
|
235
|
+
raise exceptions.FailedToGenerateResponseError(f"Failed to create conversation: {e}")
|
|
236
|
+
|
|
237
|
+
access_token = self.get_access_token()
|
|
238
|
+
|
|
239
|
+
url = f"{self.api_url}/github/chat/threads/{conversation_id}/messages"
|
|
240
|
+
|
|
241
|
+
# Update headers for this specific request
|
|
242
|
+
headers = self.headers.copy()
|
|
243
|
+
headers["Authorization"] = f"GitHub-Bearer {access_token}"
|
|
244
|
+
|
|
245
|
+
# Prepare the request payload
|
|
246
|
+
request_data = {
|
|
247
|
+
"content": conversation_prompt,
|
|
248
|
+
"intent": "conversation",
|
|
249
|
+
"references": [],
|
|
250
|
+
"context": [],
|
|
251
|
+
"currentURL": f"https://github.com/copilot/c/{conversation_id}",
|
|
252
|
+
"streaming": True,
|
|
253
|
+
"confirmations": [],
|
|
254
|
+
"customInstructions": [],
|
|
255
|
+
"model": self.model,
|
|
256
|
+
"mode": "immersive"
|
|
257
|
+
}
|
|
258
|
+
|
|
259
|
+
streaming_text = "" # Initialize for history update
|
|
260
|
+
def for_stream():
|
|
261
|
+
nonlocal streaming_text # Allow modification of outer scope variable
|
|
262
|
+
try:
|
|
263
|
+
response = self.session.post(
|
|
264
|
+
url,
|
|
265
|
+
json=request_data,
|
|
266
|
+
headers=headers, # Use updated headers with Authorization
|
|
267
|
+
stream=True,
|
|
268
|
+
timeout=self.timeout
|
|
269
|
+
)
|
|
270
|
+
|
|
271
|
+
if response.status_code == 401:
|
|
272
|
+
# Token might be expired, try refreshing
|
|
273
|
+
self._access_token = None
|
|
274
|
+
access_token = self.get_access_token()
|
|
275
|
+
headers["Authorization"] = f"GitHub-Bearer {access_token}"
|
|
276
|
+
response = self.session.post(
|
|
277
|
+
url,
|
|
278
|
+
json=request_data, # Use original payload
|
|
279
|
+
headers=headers,
|
|
280
|
+
stream=True,
|
|
281
|
+
timeout=self.timeout
|
|
282
|
+
)
|
|
283
|
+
|
|
284
|
+
# If still not successful, raise exception
|
|
285
|
+
response.raise_for_status() # Raise HTTPError for bad responses (4xx or 5xx)
|
|
286
|
+
|
|
287
|
+
# Process the streaming response
|
|
288
|
+
# Use sanitize_stream
|
|
289
|
+
processed_stream = sanitize_stream(
|
|
290
|
+
data=response.iter_content(chunk_size=None), # Pass byte iterator
|
|
291
|
+
intro_value="data:",
|
|
292
|
+
to_json=True, # Stream sends JSON
|
|
293
|
+
skip_markers=["[DONE]"],
|
|
294
|
+
content_extractor=self._github_extractor, # Use the specific extractor
|
|
295
|
+
yield_raw_on_error=False # Skip non-JSON lines or lines where extractor fails
|
|
296
|
+
)
|
|
297
|
+
|
|
298
|
+
for content_chunk in processed_stream:
|
|
299
|
+
# content_chunk is the string extracted by _github_extractor
|
|
300
|
+
if content_chunk and isinstance(content_chunk, str):
|
|
301
|
+
streaming_text += content_chunk
|
|
302
|
+
resp = {"text": content_chunk}
|
|
303
|
+
yield resp if not raw else content_chunk
|
|
304
|
+
|
|
305
|
+
except Exception as e:
|
|
306
|
+
if isinstance(e, CurlError): # Check for CurlError
|
|
307
|
+
if hasattr(e, 'response') and e.response is not None:
|
|
308
|
+
status_code = e.response.status_code
|
|
309
|
+
if status_code == 401:
|
|
310
|
+
raise exceptions.AuthenticationError("Authentication failed. Please check your cookies.")
|
|
311
|
+
|
|
312
|
+
# If anything else fails
|
|
313
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed: {str(e)}")
|
|
314
|
+
finally:
|
|
315
|
+
# Update history after stream finishes or fails (if text was generated)
|
|
316
|
+
if streaming_text:
|
|
317
|
+
self.last_response = {"text": streaming_text}
|
|
318
|
+
self.conversation.update_chat_history(prompt, streaming_text)
|
|
319
|
+
|
|
320
|
+
def for_non_stream():
|
|
321
|
+
response_text = ""
|
|
322
|
+
for response in for_stream():
|
|
323
|
+
if "text" in response:
|
|
324
|
+
response_text += response["text"]
|
|
325
|
+
# self.last_response and history are updated in for_stream's finally block
|
|
326
|
+
return self.last_response
|
|
327
|
+
|
|
328
|
+
return for_stream() if stream else for_non_stream()
|
|
329
|
+
|
|
330
|
+
def chat(
|
|
331
|
+
self,
|
|
332
|
+
prompt: str,
|
|
333
|
+
stream: bool = False,
|
|
334
|
+
optimizer: str = None,
|
|
335
|
+
conversationally: bool = False,
|
|
336
|
+
) -> Union[str, Generator]:
|
|
337
|
+
"""Generate a response to a prompt"""
|
|
338
|
+
def for_stream():
|
|
339
|
+
for response in self.ask(
|
|
340
|
+
prompt, True, optimizer=optimizer, conversationally=conversationally
|
|
341
|
+
):
|
|
342
|
+
yield self.get_message(response)
|
|
343
|
+
|
|
344
|
+
def for_non_stream():
|
|
345
|
+
return self.get_message(
|
|
346
|
+
self.ask(
|
|
347
|
+
prompt, False, optimizer=optimizer, conversationally=conversationally
|
|
348
|
+
)
|
|
349
|
+
)
|
|
350
|
+
|
|
351
|
+
return for_stream() if stream else for_non_stream()
|
|
352
|
+
|
|
353
|
+
def get_message(self, response: dict) -> str:
|
|
354
|
+
"""Extract message text from response"""
|
|
355
|
+
assert isinstance(response, dict), "Response should be of dict data-type only"
|
|
356
|
+
return response.get("text", "")
|
|
357
|
+
|
|
358
|
+
if __name__ == "__main__":
|
|
359
|
+
# Simple test code
|
|
360
|
+
from rich import print
|
|
361
|
+
|
|
362
|
+
try:
|
|
363
|
+
ai = GithubChat("cookies.json")
|
|
364
|
+
response = ai.chat("Python code to count r in strawberry", stream=True)
|
|
365
|
+
for chunk in response:
|
|
366
|
+
print(chunk, end="", flush=True)
|
|
367
|
+
print()
|
|
368
|
+
except Exception as e:
|
|
369
|
+
print(f"An error occurred: {e}")
|
|
@@ -0,0 +1,295 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import base64
|
|
3
|
+
import random
|
|
4
|
+
import json
|
|
5
|
+
from typing import Union, Dict, Any, Optional, Generator
|
|
6
|
+
from urllib import response
|
|
7
|
+
|
|
8
|
+
from curl_cffi import CurlError
|
|
9
|
+
from curl_cffi.requests import Session
|
|
10
|
+
from curl_cffi.const import CurlHttpVersion
|
|
11
|
+
|
|
12
|
+
from webscout.AIutel import Optimizers
|
|
13
|
+
from webscout.AIutel import Conversation
|
|
14
|
+
from webscout.AIutel import AwesomePrompts
|
|
15
|
+
from webscout.AIbase import Provider
|
|
16
|
+
from webscout import exceptions
|
|
17
|
+
from webscout.litagent import LitAgent
|
|
18
|
+
|
|
19
|
+
class GizAI(Provider):
|
|
20
|
+
"""
|
|
21
|
+
A class to interact with the GizAI API.
|
|
22
|
+
|
|
23
|
+
Attributes:
|
|
24
|
+
system_prompt (str): The system prompt to define the assistant's role.
|
|
25
|
+
|
|
26
|
+
Examples:
|
|
27
|
+
>>> from webscout.Provider.GizAI import GizAI
|
|
28
|
+
>>> ai = GizAI()
|
|
29
|
+
>>> response = ai.chat("What's the weather today?")
|
|
30
|
+
>>> print(response)
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
AVAILABLE_MODELS = [
|
|
34
|
+
"azure-gpt-4-1",
|
|
35
|
+
"chat-gpt4",
|
|
36
|
+
"chat-grok-2",
|
|
37
|
+
"chat-o4-mini",
|
|
38
|
+
"chat-o4-mini-high",
|
|
39
|
+
"chat-o4-mini-medium",
|
|
40
|
+
"claude-haiku",
|
|
41
|
+
"claude-sonnet",
|
|
42
|
+
"deepinfra-llama-4-maverick",
|
|
43
|
+
"deepseek",
|
|
44
|
+
"deepseek-r1-distill-llama-70b",
|
|
45
|
+
"gemini-2.0-flash-lite",
|
|
46
|
+
"gemini-2.5-flash",
|
|
47
|
+
"gemini-2.5-pro",
|
|
48
|
+
"gpt-4-1-mini",
|
|
49
|
+
"gpt-4-1-nano",
|
|
50
|
+
"gpt-4o-image",
|
|
51
|
+
"hyperbolic-deepseek-r1",
|
|
52
|
+
"llama-3-70b",
|
|
53
|
+
"llama-4-scout",
|
|
54
|
+
"o3",
|
|
55
|
+
"phi-4",
|
|
56
|
+
"qwq-32b"
|
|
57
|
+
]
|
|
58
|
+
|
|
59
|
+
def __init__(
|
|
60
|
+
self,
|
|
61
|
+
is_conversation: bool = True,
|
|
62
|
+
max_tokens: int = 2049,
|
|
63
|
+
timeout: int = 30,
|
|
64
|
+
intro: str = None,
|
|
65
|
+
filepath: str = None,
|
|
66
|
+
update_file: bool = True,
|
|
67
|
+
proxies: dict = {},
|
|
68
|
+
history_offset: int = 10250,
|
|
69
|
+
act: str = None,
|
|
70
|
+
model: str = "gemini-2.0-flash-lite",
|
|
71
|
+
system_prompt: str = "You are a helpful assistant."
|
|
72
|
+
):
|
|
73
|
+
"""Initializes the GizAI API client."""
|
|
74
|
+
if model not in self.AVAILABLE_MODELS:
|
|
75
|
+
raise ValueError(f"Invalid model: {model}. Choose from: {self.AVAILABLE_MODELS}")
|
|
76
|
+
|
|
77
|
+
self.api_url = "https://app.giz.ai/api/data/users/inferenceServer.infer"
|
|
78
|
+
|
|
79
|
+
# Initialize LitAgent for user-agent generation
|
|
80
|
+
self.agent = LitAgent()
|
|
81
|
+
|
|
82
|
+
# Initialize curl_cffi Session
|
|
83
|
+
self.session = Session()
|
|
84
|
+
|
|
85
|
+
# Set up the headers
|
|
86
|
+
self.headers = {
|
|
87
|
+
"accept": "application/json, text/plain, */*",
|
|
88
|
+
"content-type": "application/json",
|
|
89
|
+
"user-agent": self.agent.random(),
|
|
90
|
+
"origin": "https://app.giz.ai",
|
|
91
|
+
"referer": "https://app.giz.ai/",
|
|
92
|
+
"sec-fetch-dest": "empty",
|
|
93
|
+
"sec-fetch-mode": "cors",
|
|
94
|
+
"sec-fetch-site": "same-origin"
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
# Update session headers and proxies
|
|
98
|
+
self.session.headers.update(self.headers)
|
|
99
|
+
self.session.proxies = proxies
|
|
100
|
+
|
|
101
|
+
# Store configuration
|
|
102
|
+
self.system_prompt = system_prompt
|
|
103
|
+
self.is_conversation = is_conversation
|
|
104
|
+
self.max_tokens_to_sample = max_tokens
|
|
105
|
+
self.timeout = timeout
|
|
106
|
+
self.last_response = {}
|
|
107
|
+
self.model = model
|
|
108
|
+
|
|
109
|
+
self.__available_optimizers = (
|
|
110
|
+
method
|
|
111
|
+
for method in dir(Optimizers)
|
|
112
|
+
if callable(getattr(Optimizers, method)) and not method.startswith("__")
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
Conversation.intro = (
|
|
116
|
+
AwesomePrompts().get_act(
|
|
117
|
+
act, raise_not_found=True, default=None, case_insensitive=True
|
|
118
|
+
)
|
|
119
|
+
if act
|
|
120
|
+
else intro or Conversation.intro
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
self.conversation = Conversation(
|
|
124
|
+
is_conversation, self.max_tokens_to_sample, filepath, update_file
|
|
125
|
+
)
|
|
126
|
+
self.conversation.history_offset = history_offset
|
|
127
|
+
|
|
128
|
+
def _generate_id(self, length: int = 21) -> str:
|
|
129
|
+
"""Generates a random URL-safe base64 string."""
|
|
130
|
+
random_bytes = os.urandom(length * 2) # Generate more bytes initially
|
|
131
|
+
b64_encoded = base64.urlsafe_b64encode(random_bytes).decode('utf-8')
|
|
132
|
+
return b64_encoded[:length]
|
|
133
|
+
|
|
134
|
+
def _get_random_ip(self) -> str:
|
|
135
|
+
"""Generates a random IPv4 address string."""
|
|
136
|
+
return f"{random.randint(0, 255)}.{random.randint(0, 255)}.{random.randint(0, 255)}.{random.randint(0, 255)}"
|
|
137
|
+
|
|
138
|
+
def ask(
|
|
139
|
+
self,
|
|
140
|
+
prompt: str,
|
|
141
|
+
stream: bool = False, # Parameter kept for compatibility but not used
|
|
142
|
+
raw: bool = False,
|
|
143
|
+
optimizer: str = None,
|
|
144
|
+
conversationally: bool = False,
|
|
145
|
+
) -> Dict[str, Any]:
|
|
146
|
+
"""
|
|
147
|
+
Sends a prompt to the GizAI API and returns the response.
|
|
148
|
+
|
|
149
|
+
Args:
|
|
150
|
+
prompt (str): The prompt to send to the API.
|
|
151
|
+
stream (bool): Not supported by GizAI, kept for compatibility.
|
|
152
|
+
raw (bool): Whether to return the raw response.
|
|
153
|
+
optimizer (str): Optimizer to use for the prompt.
|
|
154
|
+
conversationally (bool): Whether to generate the prompt conversationally.
|
|
155
|
+
|
|
156
|
+
Returns:
|
|
157
|
+
Dict[str, Any]: The API response.
|
|
158
|
+
|
|
159
|
+
Examples:
|
|
160
|
+
>>> ai = GizAI()
|
|
161
|
+
>>> response = ai.ask("Tell me a joke!")
|
|
162
|
+
"""
|
|
163
|
+
conversation_prompt = self.conversation.gen_complete_prompt(prompt)
|
|
164
|
+
if optimizer:
|
|
165
|
+
if optimizer in self.__available_optimizers:
|
|
166
|
+
conversation_prompt = getattr(Optimizers, optimizer)(
|
|
167
|
+
conversation_prompt if conversationally else prompt
|
|
168
|
+
)
|
|
169
|
+
else:
|
|
170
|
+
raise Exception(f"Optimizer is not one of {self.__available_optimizers}")
|
|
171
|
+
|
|
172
|
+
# Generate random IDs for request
|
|
173
|
+
instance_id = self._generate_id()
|
|
174
|
+
subscribe_id = self._generate_id()
|
|
175
|
+
x_forwarded_for = self._get_random_ip()
|
|
176
|
+
|
|
177
|
+
# Set up request body - GizAI doesn't support streaming
|
|
178
|
+
request_body = {
|
|
179
|
+
"model": "chat",
|
|
180
|
+
"baseModel": self.model, # Use the specific model ID here
|
|
181
|
+
"input": {
|
|
182
|
+
"messages": [{
|
|
183
|
+
"type": "human",
|
|
184
|
+
"content": conversation_prompt
|
|
185
|
+
}],
|
|
186
|
+
"mode": "plan"
|
|
187
|
+
},
|
|
188
|
+
"noStream": True,
|
|
189
|
+
"instanceId": instance_id,
|
|
190
|
+
"subscribeId": subscribe_id
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
# Combine default headers with the dynamic x-forwarded-for header
|
|
194
|
+
request_headers = {**self.headers, "x-forwarded-for": x_forwarded_for}
|
|
195
|
+
|
|
196
|
+
try:
|
|
197
|
+
# Use curl_cffi session post with impersonate
|
|
198
|
+
response = self.session.post(
|
|
199
|
+
self.api_url,
|
|
200
|
+
headers=request_headers,
|
|
201
|
+
json=request_body,
|
|
202
|
+
timeout=self.timeout,
|
|
203
|
+
impersonate="chrome120", # Use a common impersonation profile
|
|
204
|
+
http_version=CurlHttpVersion.V2_0 # Use HTTP/2
|
|
205
|
+
)
|
|
206
|
+
response.raise_for_status() # Check for HTTP errors
|
|
207
|
+
|
|
208
|
+
# Process the response
|
|
209
|
+
try:
|
|
210
|
+
response_json = response.json()
|
|
211
|
+
# GizAI responses have "status" and "output" fields
|
|
212
|
+
if response_json.get("status") == "completed" and "output" in response_json:
|
|
213
|
+
content = response_json["output"]
|
|
214
|
+
else:
|
|
215
|
+
content = ""
|
|
216
|
+
# Try to extract content from any available field that might contain the response
|
|
217
|
+
for key, value in response_json.items():
|
|
218
|
+
if isinstance(value, str) and len(value) > 10:
|
|
219
|
+
content = value
|
|
220
|
+
break
|
|
221
|
+
except json.JSONDecodeError:
|
|
222
|
+
# Handle case where response is not valid JSON
|
|
223
|
+
content = response.text
|
|
224
|
+
|
|
225
|
+
# Update conversation history
|
|
226
|
+
self.last_response = {"text": content}
|
|
227
|
+
self.conversation.update_chat_history(prompt, content)
|
|
228
|
+
|
|
229
|
+
return self.last_response if not raw else content
|
|
230
|
+
|
|
231
|
+
except CurlError as e:
|
|
232
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed (CurlError): {str(e)}")
|
|
233
|
+
except Exception as e:
|
|
234
|
+
error_text = getattr(e, 'response', None) and getattr(e.response, 'text', '')
|
|
235
|
+
raise exceptions.FailedToGenerateResponseError(f"Request failed ({type(e).__name__}): {str(e)} - {error_text}")
|
|
236
|
+
|
|
237
|
+
def chat(
|
|
238
|
+
self,
|
|
239
|
+
prompt: str,
|
|
240
|
+
stream: bool = False, # Parameter kept for compatibility but not used
|
|
241
|
+
optimizer: str = None,
|
|
242
|
+
conversationally: bool = False,
|
|
243
|
+
) -> 'Generator[str, None, None]':
|
|
244
|
+
"""
|
|
245
|
+
Generates a response from the GizAI API.
|
|
246
|
+
|
|
247
|
+
Args:
|
|
248
|
+
prompt (str): The prompt to send to the API.
|
|
249
|
+
stream (bool): Not supported by GizAI, kept for compatibility.
|
|
250
|
+
optimizer (str): Optimizer to use for the prompt.
|
|
251
|
+
conversationally (bool): Whether to generate the prompt conversationally.
|
|
252
|
+
|
|
253
|
+
Returns:
|
|
254
|
+
Generator[str, None, None]: The API response text as a generator.
|
|
255
|
+
|
|
256
|
+
Examples:
|
|
257
|
+
>>> ai = GizAI()
|
|
258
|
+
>>> response = ai.chat("What's the weather today?")
|
|
259
|
+
"""
|
|
260
|
+
# GizAI doesn't support streaming, so ignore the stream parameter
|
|
261
|
+
response_data = self.ask(
|
|
262
|
+
prompt, stream=False, raw=False,
|
|
263
|
+
optimizer=optimizer, conversationally=conversationally
|
|
264
|
+
)
|
|
265
|
+
result = self.get_message(response_data)
|
|
266
|
+
if stream:
|
|
267
|
+
yield result
|
|
268
|
+
else:
|
|
269
|
+
return result
|
|
270
|
+
|
|
271
|
+
def get_message(self, response: Union[dict, str]) -> str:
|
|
272
|
+
"""
|
|
273
|
+
Extracts the message from the API response.
|
|
274
|
+
|
|
275
|
+
Args:
|
|
276
|
+
response (Union[dict, str]): The API response.
|
|
277
|
+
|
|
278
|
+
Returns:
|
|
279
|
+
str: The message content.
|
|
280
|
+
|
|
281
|
+
Examples:
|
|
282
|
+
>>> ai = GizAI()
|
|
283
|
+
>>> response = ai.ask("Tell me a joke!")
|
|
284
|
+
>>> message = ai.get_message(response)
|
|
285
|
+
"""
|
|
286
|
+
if isinstance(response, str):
|
|
287
|
+
return response
|
|
288
|
+
assert isinstance(response, dict), "Response should be either dict or str"
|
|
289
|
+
return response.get("text", "")
|
|
290
|
+
|
|
291
|
+
if __name__ == "__main__":
|
|
292
|
+
ai = GizAI()
|
|
293
|
+
response = ai.chat("Hello, how are you?", stream=True)
|
|
294
|
+
for chunk in response:
|
|
295
|
+
print(chunk, end="", flush=True)
|