webscout 8.2.6__py3-none-any.whl → 8.2.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of webscout might be problematic. Click here for more details.
- webscout/AIauto.py +1 -1
- webscout/AIutel.py +298 -239
- webscout/Extra/Act.md +309 -0
- webscout/Extra/GitToolkit/gitapi/README.md +110 -0
- webscout/Extra/YTToolkit/README.md +375 -0
- webscout/Extra/YTToolkit/ytapi/README.md +44 -0
- webscout/Extra/YTToolkit/ytapi/extras.py +92 -19
- webscout/Extra/autocoder/autocoder.py +309 -114
- webscout/Extra/autocoder/autocoder_utiles.py +15 -15
- webscout/Extra/gguf.md +430 -0
- webscout/Extra/tempmail/README.md +488 -0
- webscout/Extra/weather.md +281 -0
- webscout/Litlogger/Readme.md +175 -0
- webscout/Provider/AISEARCH/DeepFind.py +41 -37
- webscout/Provider/AISEARCH/README.md +279 -0
- webscout/Provider/AISEARCH/__init__.py +0 -1
- webscout/Provider/AISEARCH/genspark_search.py +228 -86
- webscout/Provider/AISEARCH/hika_search.py +11 -11
- webscout/Provider/AISEARCH/scira_search.py +324 -322
- webscout/Provider/AllenAI.py +7 -14
- webscout/Provider/Blackboxai.py +518 -74
- webscout/Provider/Cloudflare.py +0 -1
- webscout/Provider/Deepinfra.py +23 -21
- webscout/Provider/Flowith.py +217 -0
- webscout/Provider/FreeGemini.py +250 -0
- webscout/Provider/GizAI.py +15 -5
- webscout/Provider/Glider.py +11 -8
- webscout/Provider/HeckAI.py +80 -52
- webscout/Provider/Koboldai.py +7 -4
- webscout/Provider/LambdaChat.py +2 -2
- webscout/Provider/Marcus.py +10 -18
- webscout/Provider/OPENAI/BLACKBOXAI.py +735 -0
- webscout/Provider/OPENAI/Cloudflare.py +378 -0
- webscout/Provider/OPENAI/FreeGemini.py +282 -0
- webscout/Provider/OPENAI/NEMOTRON.py +244 -0
- webscout/Provider/OPENAI/README.md +1253 -0
- webscout/Provider/OPENAI/__init__.py +8 -0
- webscout/Provider/OPENAI/ai4chat.py +293 -286
- webscout/Provider/OPENAI/api.py +810 -0
- webscout/Provider/OPENAI/base.py +217 -14
- webscout/Provider/OPENAI/c4ai.py +373 -367
- webscout/Provider/OPENAI/chatgpt.py +7 -0
- webscout/Provider/OPENAI/chatgptclone.py +7 -0
- webscout/Provider/OPENAI/chatsandbox.py +172 -0
- webscout/Provider/OPENAI/deepinfra.py +30 -20
- webscout/Provider/OPENAI/e2b.py +6 -0
- webscout/Provider/OPENAI/exaai.py +7 -0
- webscout/Provider/OPENAI/exachat.py +6 -0
- webscout/Provider/OPENAI/flowith.py +162 -0
- webscout/Provider/OPENAI/freeaichat.py +359 -352
- webscout/Provider/OPENAI/glider.py +323 -316
- webscout/Provider/OPENAI/groq.py +361 -354
- webscout/Provider/OPENAI/heckai.py +30 -64
- webscout/Provider/OPENAI/llmchatco.py +8 -0
- webscout/Provider/OPENAI/mcpcore.py +7 -0
- webscout/Provider/OPENAI/multichat.py +8 -0
- webscout/Provider/OPENAI/netwrck.py +356 -350
- webscout/Provider/OPENAI/opkfc.py +8 -0
- webscout/Provider/OPENAI/scirachat.py +471 -462
- webscout/Provider/OPENAI/sonus.py +9 -0
- webscout/Provider/OPENAI/standardinput.py +9 -1
- webscout/Provider/OPENAI/textpollinations.py +339 -329
- webscout/Provider/OPENAI/toolbaz.py +7 -0
- webscout/Provider/OPENAI/typefully.py +355 -0
- webscout/Provider/OPENAI/typegpt.py +358 -346
- webscout/Provider/OPENAI/uncovrAI.py +7 -0
- webscout/Provider/OPENAI/utils.py +103 -7
- webscout/Provider/OPENAI/venice.py +12 -0
- webscout/Provider/OPENAI/wisecat.py +19 -19
- webscout/Provider/OPENAI/writecream.py +7 -0
- webscout/Provider/OPENAI/x0gpt.py +7 -0
- webscout/Provider/OPENAI/yep.py +50 -21
- webscout/Provider/OpenGPT.py +1 -1
- webscout/Provider/TTI/AiForce/README.md +159 -0
- webscout/Provider/TTI/FreeAIPlayground/README.md +99 -0
- webscout/Provider/TTI/ImgSys/README.md +174 -0
- webscout/Provider/TTI/MagicStudio/README.md +101 -0
- webscout/Provider/TTI/Nexra/README.md +155 -0
- webscout/Provider/TTI/PollinationsAI/README.md +146 -0
- webscout/Provider/TTI/README.md +128 -0
- webscout/Provider/TTI/aiarta/README.md +134 -0
- webscout/Provider/TTI/artbit/README.md +100 -0
- webscout/Provider/TTI/fastflux/README.md +129 -0
- webscout/Provider/TTI/huggingface/README.md +114 -0
- webscout/Provider/TTI/piclumen/README.md +161 -0
- webscout/Provider/TTI/pixelmuse/README.md +79 -0
- webscout/Provider/TTI/talkai/README.md +139 -0
- webscout/Provider/TTS/README.md +192 -0
- webscout/Provider/TTS/__init__.py +2 -1
- webscout/Provider/TTS/speechma.py +500 -100
- webscout/Provider/TTS/sthir.py +94 -0
- webscout/Provider/TeachAnything.py +3 -7
- webscout/Provider/TextPollinationsAI.py +4 -2
- webscout/Provider/{aimathgpt.py → UNFINISHED/ChatHub.py} +88 -68
- webscout/Provider/UNFINISHED/liner_api_request.py +263 -0
- webscout/Provider/UNFINISHED/oivscode.py +351 -0
- webscout/Provider/UNFINISHED/test_lmarena.py +119 -0
- webscout/Provider/Writecream.py +11 -2
- webscout/Provider/__init__.py +8 -14
- webscout/Provider/ai4chat.py +4 -58
- webscout/Provider/asksteve.py +17 -9
- webscout/Provider/cerebras.py +3 -1
- webscout/Provider/koala.py +170 -268
- webscout/Provider/llmchat.py +3 -0
- webscout/Provider/lmarena.py +198 -0
- webscout/Provider/meta.py +7 -4
- webscout/Provider/samurai.py +223 -0
- webscout/Provider/scira_chat.py +4 -2
- webscout/Provider/typefully.py +23 -151
- webscout/__init__.py +4 -2
- webscout/cli.py +3 -28
- webscout/conversation.py +35 -35
- webscout/litagent/Readme.md +276 -0
- webscout/scout/README.md +402 -0
- webscout/swiftcli/Readme.md +323 -0
- webscout/version.py +1 -1
- webscout/webscout_search.py +2 -182
- webscout/webscout_search_async.py +1 -179
- webscout/zeroart/README.md +89 -0
- webscout/zeroart/__init__.py +134 -54
- webscout/zeroart/base.py +19 -13
- webscout/zeroart/effects.py +101 -99
- webscout/zeroart/fonts.py +1239 -816
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/METADATA +116 -74
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/RECORD +130 -103
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/WHEEL +1 -1
- webscout-8.2.8.dist-info/entry_points.txt +3 -0
- webscout-8.2.8.dist-info/top_level.txt +1 -0
- webscout/Provider/AISEARCH/ISou.py +0 -256
- webscout/Provider/ElectronHub.py +0 -773
- webscout/Provider/Free2GPT.py +0 -241
- webscout/Provider/GPTWeb.py +0 -249
- webscout/Provider/bagoodex.py +0 -145
- webscout/Provider/geminiprorealtime.py +0 -160
- webscout/scout/core.py +0 -881
- webscout-8.2.6.dist-info/entry_points.txt +0 -3
- webscout-8.2.6.dist-info/top_level.txt +0 -2
- webstoken/__init__.py +0 -30
- webstoken/classifier.py +0 -189
- webstoken/keywords.py +0 -216
- webstoken/language.py +0 -128
- webstoken/ner.py +0 -164
- webstoken/normalizer.py +0 -35
- webstoken/processor.py +0 -77
- webstoken/sentiment.py +0 -206
- webstoken/stemmer.py +0 -73
- webstoken/tagger.py +0 -60
- webstoken/tokenizer.py +0 -158
- /webscout/Provider/{Youchat.py → UNFINISHED/Youchat.py} +0 -0
- {webscout-8.2.6.dist-info → webscout-8.2.8.dist-info}/licenses/LICENSE.md +0 -0
|
@@ -1,9 +1,8 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: webscout
|
|
3
|
-
Version: 8.2.
|
|
3
|
+
Version: 8.2.8
|
|
4
4
|
Summary: Search for anything using Google, DuckDuckGo, phind.com, Contains AI models, can transcribe yt videos, temporary email and phone number generation, has TTS support, webai (terminal gpt and open interpreter) and offline LLMs and more
|
|
5
|
-
Author: OEvortex
|
|
6
|
-
Author-email: helpingai5@gmail.com
|
|
5
|
+
Author-email: OEvortex <helpingai5@gmail.com>
|
|
7
6
|
License: HelpingAI
|
|
8
7
|
Project-URL: Source, https://github.com/OE-LUCIFER/Webscout
|
|
9
8
|
Project-URL: Tracker, https://github.com/OE-LUCIFER/Webscout/issues
|
|
@@ -40,7 +39,6 @@ Requires-Dist: wheel
|
|
|
40
39
|
Requires-Dist: pip
|
|
41
40
|
Requires-Dist: nodriver
|
|
42
41
|
Requires-Dist: mistune
|
|
43
|
-
Requires-Dist: tenacity
|
|
44
42
|
Requires-Dist: curl_cffi
|
|
45
43
|
Requires-Dist: nest-asyncio
|
|
46
44
|
Requires-Dist: websocket-client
|
|
@@ -50,7 +48,6 @@ Requires-Dist: markdownify
|
|
|
50
48
|
Requires-Dist: requests
|
|
51
49
|
Requires-Dist: google-generativeai
|
|
52
50
|
Requires-Dist: lxml>=5.2.2
|
|
53
|
-
Requires-Dist: termcolor
|
|
54
51
|
Requires-Dist: orjson
|
|
55
52
|
Requires-Dist: PyYAML
|
|
56
53
|
Requires-Dist: ollama
|
|
@@ -60,27 +57,18 @@ Requires-Dist: cloudscraper
|
|
|
60
57
|
Requires-Dist: html5lib
|
|
61
58
|
Requires-Dist: aiofiles
|
|
62
59
|
Requires-Dist: openai
|
|
63
|
-
Requires-Dist: prompt-toolkit
|
|
64
|
-
Requires-Dist: pyreqwest_impersonate
|
|
65
60
|
Requires-Dist: gradio_client
|
|
66
61
|
Requires-Dist: psutil
|
|
67
62
|
Requires-Dist: aiohttp
|
|
68
63
|
Provides-Extra: dev
|
|
69
64
|
Requires-Dist: ruff>=0.1.6; extra == "dev"
|
|
70
65
|
Requires-Dist: pytest>=7.4.2; extra == "dev"
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
Dynamic: keywords
|
|
77
|
-
Dynamic: license
|
|
66
|
+
Provides-Extra: api
|
|
67
|
+
Requires-Dist: fastapi; extra == "api"
|
|
68
|
+
Requires-Dist: uvicorn[standard]; extra == "api"
|
|
69
|
+
Requires-Dist: pydantic; extra == "api"
|
|
70
|
+
Requires-Dist: python-multipart; extra == "api"
|
|
78
71
|
Dynamic: license-file
|
|
79
|
-
Dynamic: project-url
|
|
80
|
-
Dynamic: provides-extra
|
|
81
|
-
Dynamic: requires-dist
|
|
82
|
-
Dynamic: requires-python
|
|
83
|
-
Dynamic: summary
|
|
84
72
|
|
|
85
73
|
<div align="center">
|
|
86
74
|
<a href="https://github.com/OEvortex/Webscout">
|
|
@@ -101,6 +89,7 @@ Dynamic: summary
|
|
|
101
89
|
<a href="https://pepy.tech/project/webscout"><img src="https://static.pepy.tech/badge/webscout/month?style=flat-square" alt="Monthly Downloads"></a>
|
|
102
90
|
<a href="https://pepy.tech/project/webscout"><img src="https://static.pepy.tech/badge/webscout?style=flat-square" alt="Total Downloads"></a>
|
|
103
91
|
<a href="#"><img src="https://img.shields.io/pypi/pyversions/webscout?style=flat-square&logo=python" alt="Python Version"></a>
|
|
92
|
+
<a href="https://deepwiki.com/OEvortex/Webscout"><img src="https://deepwiki.com/badge.svg" alt="Ask DeepWiki"></a>
|
|
104
93
|
</p>
|
|
105
94
|
</div>
|
|
106
95
|
|
|
@@ -111,6 +100,7 @@ Dynamic: summary
|
|
|
111
100
|
- [🌟 Key Features](#-features)
|
|
112
101
|
- [⚙️ Installation](#️-installation)
|
|
113
102
|
- [🖥️ Command Line Interface](#️-command-line-interface)
|
|
103
|
+
- [🔄 OpenAI-Compatible API Server](#-openai-compatible-api-server)
|
|
114
104
|
- [🔍 Search Engines](#-search-engines)
|
|
115
105
|
- [🦆 DuckDuckGo Search](#-duckduckgo-search-with-webs-and-asyncwebs)
|
|
116
106
|
- [💻 WEBS API Reference](#-webs-api-reference)
|
|
@@ -128,7 +118,7 @@ Dynamic: summary
|
|
|
128
118
|
> - **OpenAI Compatibility:** Use providers with OpenAI-compatible interfaces
|
|
129
119
|
> - **Local LLM Compatibility:** Run local models with [Inferno](https://github.com/HelpingAI/inferno), an OpenAI-compatible server (now a standalone package)
|
|
130
120
|
>
|
|
131
|
-
> Choose the approach that best fits your needs! For OpenAI compatibility, check the [OpenAI Providers README](webscout/Provider/OPENAI/README.md).
|
|
121
|
+
> Choose the approach that best fits your needs! For OpenAI compatibility, check the [OpenAI Providers README](webscout/Provider/OPENAI/README.md) or see the [OpenAI-Compatible API Server](#-openai-compatible-api-server) section below.
|
|
132
122
|
|
|
133
123
|
> [!NOTE]
|
|
134
124
|
> Webscout supports over 90 AI providers including: LLAMA, C4ai, Venice, Copilot, HuggingFaceChat, PerplexityLabs, DeepSeek, WiseCat, GROQ, OPENAI, GEMINI, DeepInfra, Meta, YEPCHAT, TypeGPT, ChatGPTClone, ExaAI, Claude, Anthropic, Cloudflare, AI21, Cerebras, and many more. All providers follow similar usage patterns with consistent interfaces.
|
|
@@ -225,7 +215,6 @@ python -m webscout --help
|
|
|
225
215
|
| Command | Description |
|
|
226
216
|
|---------|-------------|
|
|
227
217
|
| `python -m webscout answers -k "query"` | Perform an answers search |
|
|
228
|
-
| `python -m webscout chat` | Start an interactive AI chat session |
|
|
229
218
|
| `python -m webscout images -k "query"` | Search for images |
|
|
230
219
|
| `python -m webscout maps -k "query"` | Perform a maps search |
|
|
231
220
|
| `python -m webscout news -k "query"` | Search for news articles |
|
|
@@ -282,6 +271,111 @@ For more information, visit the [Inferno GitHub repository](https://github.com/H
|
|
|
282
271
|
> - 32 GB of RAM for 33B models
|
|
283
272
|
> - GPU acceleration is recommended for better performance
|
|
284
273
|
|
|
274
|
+
<details open>
|
|
275
|
+
<summary><b>🔄 OpenAI-Compatible API Server</b></summary>
|
|
276
|
+
<p>
|
|
277
|
+
|
|
278
|
+
Webscout includes an OpenAI-compatible API server that allows you to use any supported provider with tools and applications designed for OpenAI's API.
|
|
279
|
+
|
|
280
|
+
### Starting the API Server
|
|
281
|
+
|
|
282
|
+
#### From Command Line
|
|
283
|
+
|
|
284
|
+
```bash
|
|
285
|
+
# Start with default settings (port 8000)
|
|
286
|
+
python -m webscout.Provider.OPENAI.api
|
|
287
|
+
|
|
288
|
+
# Start with custom port
|
|
289
|
+
python -m webscout.Provider.OPENAI.api --port 8080
|
|
290
|
+
|
|
291
|
+
# Start with API key authentication
|
|
292
|
+
python -m webscout.Provider.OPENAI.api --api-key "your-secret-key"
|
|
293
|
+
|
|
294
|
+
# Specify a default provider
|
|
295
|
+
python -m webscout.Provider.OPENAI.api --default-provider "Claude"
|
|
296
|
+
|
|
297
|
+
# Run in debug mode
|
|
298
|
+
python -m webscout.Provider.OPENAI.api --debug
|
|
299
|
+
```
|
|
300
|
+
|
|
301
|
+
#### From Python Code
|
|
302
|
+
|
|
303
|
+
```python
|
|
304
|
+
# Method 1: Using the helper function
|
|
305
|
+
from webscout.Provider.OPENAI.api import start_server
|
|
306
|
+
|
|
307
|
+
# Start with default settings
|
|
308
|
+
start_server()
|
|
309
|
+
|
|
310
|
+
# Start with custom settings
|
|
311
|
+
start_server(port=8080, api_key="your-secret-key", default_provider="Claude")
|
|
312
|
+
|
|
313
|
+
# Method 2: Using the run_api function for more control
|
|
314
|
+
from webscout.Provider.OPENAI.api import run_api
|
|
315
|
+
|
|
316
|
+
run_api(
|
|
317
|
+
host="0.0.0.0",
|
|
318
|
+
port=8080,
|
|
319
|
+
api_key="your-secret-key",
|
|
320
|
+
default_provider="Claude",
|
|
321
|
+
debug=True
|
|
322
|
+
)
|
|
323
|
+
```
|
|
324
|
+
|
|
325
|
+
### Using the API
|
|
326
|
+
|
|
327
|
+
Once the server is running, you can use it with any OpenAI client library or tool:
|
|
328
|
+
|
|
329
|
+
```python
|
|
330
|
+
# Using the OpenAI Python client
|
|
331
|
+
from openai import OpenAI
|
|
332
|
+
|
|
333
|
+
client = OpenAI(
|
|
334
|
+
api_key="your-secret-key", # Only needed if you set an API key
|
|
335
|
+
base_url="http://localhost:8000/v1" # Point to your local server
|
|
336
|
+
)
|
|
337
|
+
|
|
338
|
+
# Chat completion
|
|
339
|
+
response = client.chat.completions.create(
|
|
340
|
+
model="gpt-4", # This can be any model name registered with Webscout
|
|
341
|
+
messages=[
|
|
342
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
|
343
|
+
{"role": "user", "content": "Hello, how are you?"}
|
|
344
|
+
]
|
|
345
|
+
)
|
|
346
|
+
|
|
347
|
+
print(response.choices[0].message.content)
|
|
348
|
+
```
|
|
349
|
+
|
|
350
|
+
### Using with cURL
|
|
351
|
+
|
|
352
|
+
```bash
|
|
353
|
+
# Basic chat completion request
|
|
354
|
+
curl http://localhost:8000/v1/chat/completions \
|
|
355
|
+
-H "Content-Type: application/json" \
|
|
356
|
+
-H "Authorization: Bearer your-secret-key" \
|
|
357
|
+
-d '{
|
|
358
|
+
"model": "gpt-4",
|
|
359
|
+
"messages": [
|
|
360
|
+
{"role": "system", "content": "You are a helpful assistant."},
|
|
361
|
+
{"role": "user", "content": "Hello, how are you?"}
|
|
362
|
+
]
|
|
363
|
+
}'
|
|
364
|
+
|
|
365
|
+
# List available models
|
|
366
|
+
curl http://localhost:8000/v1/models \
|
|
367
|
+
-H "Authorization: Bearer your-secret-key"
|
|
368
|
+
```
|
|
369
|
+
|
|
370
|
+
### Available Endpoints
|
|
371
|
+
|
|
372
|
+
- `GET /v1/models` - List all available models
|
|
373
|
+
- `GET /v1/models/{model_name}` - Get information about a specific model
|
|
374
|
+
- `POST /v1/chat/completions` - Create a chat completion
|
|
375
|
+
|
|
376
|
+
</p>
|
|
377
|
+
</details>
|
|
378
|
+
|
|
285
379
|
<hr/>
|
|
286
380
|
|
|
287
381
|
## 🔍 Search Engines
|
|
@@ -618,30 +712,12 @@ Webscout offers a comprehensive collection of AI chat providers, giving you acce
|
|
|
618
712
|
| `DeepInfra` | Various open models | Multiple model options |
|
|
619
713
|
| `Cohere` | Cohere's language models | Command models |
|
|
620
714
|
| `PerplexityLabs` | Perplexity AI | Web search integration |
|
|
621
|
-
| `Anthropic` | Claude models | Long context windows |
|
|
622
715
|
| `YEPCHAT` | Yep.com's AI | Streaming responses |
|
|
623
716
|
| `ChatGPTClone` | ChatGPT-like interface | Multiple model options |
|
|
624
|
-
| `TypeGPT` | TypeChat models |
|
|
717
|
+
| `TypeGPT` | TypeChat models | Multiple model options |
|
|
625
718
|
|
|
626
719
|
</div>
|
|
627
720
|
|
|
628
|
-
<details>
|
|
629
|
-
<summary><b>Example: Using Duckchat</b></summary>
|
|
630
|
-
<p>
|
|
631
|
-
|
|
632
|
-
```python
|
|
633
|
-
from webscout import WEBS
|
|
634
|
-
|
|
635
|
-
# Initialize and use Duckchat
|
|
636
|
-
with WEBS() as webs:
|
|
637
|
-
response = webs.chat(
|
|
638
|
-
"Explain quantum computing in simple terms",
|
|
639
|
-
model='gpt-4o-mini' # Options: mixtral-8x7b, llama-3.1-70b, claude-3-haiku, etc.
|
|
640
|
-
)
|
|
641
|
-
print(response)
|
|
642
|
-
```
|
|
643
|
-
</p>
|
|
644
|
-
</details>
|
|
645
721
|
|
|
646
722
|
<details>
|
|
647
723
|
<summary><b>Example: Using Meta AI</b></summary>
|
|
@@ -753,41 +829,7 @@ print(response)
|
|
|
753
829
|
</p>
|
|
754
830
|
</details>
|
|
755
831
|
|
|
756
|
-
<hr/>
|
|
757
|
-
|
|
758
|
-
## 👨💻 Advanced AI Interfaces
|
|
759
|
-
|
|
760
|
-
<details open>
|
|
761
|
-
<summary><b>Direct Model Access with LLM and VLM</b></summary>
|
|
762
|
-
<p>
|
|
763
|
-
|
|
764
|
-
Webscout provides direct interfaces to language and vision-language models through the `LLM` and `VLM` classes.
|
|
765
|
-
|
|
766
|
-
```python
|
|
767
|
-
from webscout.LLM import LLM, VLM
|
|
768
832
|
|
|
769
|
-
# Text-only model interaction
|
|
770
|
-
llm = LLM("meta-llama/Meta-Llama-3-70B-Instruct")
|
|
771
|
-
response = llm.chat([
|
|
772
|
-
{"role": "user", "content": "Explain the concept of neural networks"}
|
|
773
|
-
])
|
|
774
|
-
print(response)
|
|
775
|
-
|
|
776
|
-
# Vision-language model interaction
|
|
777
|
-
vlm = VLM("cogvlm-grounding-generalist")
|
|
778
|
-
response = vlm.chat([
|
|
779
|
-
{
|
|
780
|
-
"role": "user",
|
|
781
|
-
"content": [
|
|
782
|
-
{"type": "image", "image_url": "path/to/image.jpg"},
|
|
783
|
-
{"type": "text", "text": "Describe what you see in this image"}
|
|
784
|
-
]
|
|
785
|
-
}
|
|
786
|
-
])
|
|
787
|
-
print(response)
|
|
788
|
-
```
|
|
789
|
-
</p>
|
|
790
|
-
</details>
|
|
791
833
|
|
|
792
834
|
<details open>
|
|
793
835
|
<summary><b>GGUF Model Conversion</b></summary>
|