webscout 8.2.4__py3-none-any.whl → 8.2.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of webscout might be problematic. Click here for more details.

Files changed (110) hide show
  1. webscout/AIauto.py +112 -22
  2. webscout/AIutel.py +240 -344
  3. webscout/Extra/autocoder/autocoder.py +66 -5
  4. webscout/Extra/gguf.py +2 -0
  5. webscout/Provider/AISEARCH/scira_search.py +3 -5
  6. webscout/Provider/Aitopia.py +75 -51
  7. webscout/Provider/AllenAI.py +64 -67
  8. webscout/Provider/ChatGPTClone.py +33 -34
  9. webscout/Provider/ChatSandbox.py +342 -0
  10. webscout/Provider/Cloudflare.py +79 -32
  11. webscout/Provider/Deepinfra.py +69 -56
  12. webscout/Provider/ElectronHub.py +48 -39
  13. webscout/Provider/ExaChat.py +36 -20
  14. webscout/Provider/GPTWeb.py +24 -18
  15. webscout/Provider/GithubChat.py +52 -49
  16. webscout/Provider/GizAI.py +285 -0
  17. webscout/Provider/Glider.py +39 -28
  18. webscout/Provider/Groq.py +48 -20
  19. webscout/Provider/HeckAI.py +18 -36
  20. webscout/Provider/Jadve.py +30 -37
  21. webscout/Provider/LambdaChat.py +36 -59
  22. webscout/Provider/MCPCore.py +18 -21
  23. webscout/Provider/Marcus.py +23 -14
  24. webscout/Provider/Nemotron.py +218 -0
  25. webscout/Provider/Netwrck.py +35 -26
  26. webscout/Provider/OPENAI/__init__.py +1 -1
  27. webscout/Provider/OPENAI/exachat.py +4 -0
  28. webscout/Provider/OPENAI/scirachat.py +3 -4
  29. webscout/Provider/OPENAI/textpollinations.py +20 -22
  30. webscout/Provider/OPENAI/toolbaz.py +1 -0
  31. webscout/Provider/PI.py +22 -13
  32. webscout/Provider/StandardInput.py +42 -30
  33. webscout/Provider/TeachAnything.py +24 -12
  34. webscout/Provider/TextPollinationsAI.py +78 -76
  35. webscout/Provider/TwoAI.py +120 -88
  36. webscout/Provider/TypliAI.py +305 -0
  37. webscout/Provider/Venice.py +24 -22
  38. webscout/Provider/VercelAI.py +31 -12
  39. webscout/Provider/WiseCat.py +1 -1
  40. webscout/Provider/WrDoChat.py +370 -0
  41. webscout/Provider/__init__.py +11 -13
  42. webscout/Provider/ai4chat.py +5 -3
  43. webscout/Provider/akashgpt.py +59 -66
  44. webscout/Provider/asksteve.py +53 -44
  45. webscout/Provider/cerebras.py +77 -31
  46. webscout/Provider/chatglm.py +47 -37
  47. webscout/Provider/elmo.py +38 -32
  48. webscout/Provider/freeaichat.py +57 -43
  49. webscout/Provider/granite.py +24 -21
  50. webscout/Provider/hermes.py +27 -20
  51. webscout/Provider/learnfastai.py +25 -20
  52. webscout/Provider/llmchatco.py +48 -78
  53. webscout/Provider/multichat.py +13 -3
  54. webscout/Provider/scira_chat.py +50 -30
  55. webscout/Provider/scnet.py +27 -21
  56. webscout/Provider/searchchat.py +16 -24
  57. webscout/Provider/sonus.py +37 -39
  58. webscout/Provider/toolbaz.py +24 -46
  59. webscout/Provider/turboseek.py +37 -41
  60. webscout/Provider/typefully.py +30 -22
  61. webscout/Provider/typegpt.py +47 -51
  62. webscout/Provider/uncovr.py +46 -40
  63. webscout/__init__.py +0 -1
  64. webscout/cli.py +256 -0
  65. webscout/conversation.py +305 -448
  66. webscout/exceptions.py +3 -0
  67. webscout/swiftcli/__init__.py +80 -794
  68. webscout/swiftcli/core/__init__.py +7 -0
  69. webscout/swiftcli/core/cli.py +297 -0
  70. webscout/swiftcli/core/context.py +104 -0
  71. webscout/swiftcli/core/group.py +241 -0
  72. webscout/swiftcli/decorators/__init__.py +28 -0
  73. webscout/swiftcli/decorators/command.py +221 -0
  74. webscout/swiftcli/decorators/options.py +220 -0
  75. webscout/swiftcli/decorators/output.py +252 -0
  76. webscout/swiftcli/exceptions.py +21 -0
  77. webscout/swiftcli/plugins/__init__.py +9 -0
  78. webscout/swiftcli/plugins/base.py +135 -0
  79. webscout/swiftcli/plugins/manager.py +262 -0
  80. webscout/swiftcli/utils/__init__.py +59 -0
  81. webscout/swiftcli/utils/formatting.py +252 -0
  82. webscout/swiftcli/utils/parsing.py +267 -0
  83. webscout/version.py +1 -1
  84. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/METADATA +166 -45
  85. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/RECORD +89 -89
  86. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/WHEEL +1 -1
  87. webscout-8.2.6.dist-info/entry_points.txt +3 -0
  88. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/top_level.txt +0 -1
  89. inferno/__init__.py +0 -6
  90. inferno/__main__.py +0 -9
  91. inferno/cli.py +0 -6
  92. inferno/lol.py +0 -589
  93. webscout/LLM.py +0 -442
  94. webscout/Local/__init__.py +0 -12
  95. webscout/Local/__main__.py +0 -9
  96. webscout/Local/api.py +0 -576
  97. webscout/Local/cli.py +0 -516
  98. webscout/Local/config.py +0 -75
  99. webscout/Local/llm.py +0 -287
  100. webscout/Local/model_manager.py +0 -253
  101. webscout/Local/server.py +0 -721
  102. webscout/Local/utils.py +0 -93
  103. webscout/Provider/Chatify.py +0 -175
  104. webscout/Provider/PizzaGPT.py +0 -228
  105. webscout/Provider/askmyai.py +0 -158
  106. webscout/Provider/gaurish.py +0 -244
  107. webscout/Provider/promptrefine.py +0 -193
  108. webscout/Provider/tutorai.py +0 -270
  109. webscout-8.2.4.dist-info/entry_points.txt +0 -5
  110. {webscout-8.2.4.dist-info → webscout-8.2.6.dist-info}/licenses/LICENSE.md +0 -0
webscout/Local/cli.py DELETED
@@ -1,516 +0,0 @@
1
- """
2
- Command-line interface for webscout.Local
3
- """
4
-
5
- import typer
6
- from rich.console import Console
7
- from rich.table import Table
8
- from rich.prompt import Prompt
9
- from typing import Optional
10
-
11
- from .model_manager import ModelManager
12
- from .llm import LLMInterface
13
- from .server import start_server
14
-
15
- app: typer.Typer = typer.Typer(help="webscout.Local - A llama-cpp-python based LLM serving tool")
16
- console: Console = Console()
17
-
18
- model_manager: ModelManager = ModelManager()
19
-
20
- # RAM requirements for different model sizes
21
- RAM_REQUIREMENTS = {
22
- "1B": "2 GB",
23
- "3B": "4 GB",
24
- "7B": "8 GB",
25
- "13B": "16 GB",
26
- "33B": "32 GB",
27
- "70B": "64 GB",
28
- }
29
-
30
- @app.command("serve")
31
- def run_model(
32
- model_string: str = typer.Argument(..., help="Model to run (format: 'name', 'repo_id' or 'repo_id:filename')"),
33
- host: Optional[str] = typer.Option(None, help="Host to bind the server to"),
34
- port: Optional[int] = typer.Option(None, help="Port to bind the server to"),
35
- ) -> None:
36
- """
37
- Start a model server (downloads if needed).
38
- """
39
- # First check if this is a filename that already exists
40
- model_path = model_manager.get_model_path(model_string)
41
- if model_path:
42
- # This is a filename that exists, find the model name
43
- for model_info in model_manager.list_models():
44
- if model_info.get("filename") == model_string or model_info.get("path") == model_path:
45
- model_name = model_info.get("name")
46
- break
47
- else:
48
- # Fallback to using the string as model name
49
- model_name = model_string
50
- else:
51
- # Parse the model string to see if it's a repo_id:filename format
52
- repo_id, _ = model_manager.parse_model_string(model_string)
53
- model_name = repo_id.split("/")[-1] if "/" in repo_id else repo_id
54
-
55
- # Check if model exists, if not try to download it
56
- if not model_manager.get_model_path(model_name):
57
- console.print(f"[yellow]Model {model_name} not found locally. Attempting to download...[/yellow]")
58
- try:
59
- # We don't need to use the parsed values directly as download_model handles this
60
- _ = model_manager.parse_model_string(model_string) # Just to validate the format
61
- # Download the model
62
- model_name, _ = model_manager.download_model(model_string)
63
- console.print(f"[bold green]Model {model_name} downloaded successfully[/bold green]")
64
- except Exception as e:
65
- console.print(f"[bold red]Error downloading model: {str(e)}[/bold red]")
66
- return
67
-
68
- # Check RAM requirements
69
- ram_requirement = "Unknown"
70
- for size, ram in RAM_REQUIREMENTS.items():
71
- if size in model_name:
72
- ram_requirement = ram
73
- break
74
-
75
- if ram_requirement != "Unknown":
76
- console.print(f"[yellow]This model requires approximately {ram_requirement} of RAM[/yellow]")
77
-
78
- # Try to load the model to verify it works
79
- try:
80
- llm = LLMInterface(model_name)
81
- llm.load_model(verbose=False)
82
- console.print(f"[bold green]Model {model_name} loaded successfully[/bold green]")
83
- except Exception as e:
84
- console.print(f"[bold red]Error loading model: {str(e)}[/bold red]")
85
- return
86
-
87
- # Start the server
88
- console.print(f"[bold blue]Starting webscout.Local server with model {model_name}...[/bold blue]")
89
- start_server(host=host, port=port)
90
-
91
- @app.command("pull")
92
- def pull_model(
93
- model_string: str = typer.Argument(..., help="Model to download (format: 'repo_id' or 'repo_id:filename')"),
94
- ) -> None:
95
- """
96
- Download a model from Hugging Face without running it.
97
- """
98
- try:
99
- model_name, model_path = model_manager.download_model(model_string)
100
- console.print(f"[bold green]Model {model_name} downloaded successfully to {model_path}[/bold green]")
101
- except Exception as e:
102
- console.print(f"[bold red]Error downloading model: {str(e)}[/bold red]")
103
-
104
- @app.command("list")
105
- def list_models() -> None:
106
- """
107
- List downloaded models.
108
- """
109
- models = model_manager.list_models()
110
-
111
- if not models:
112
- console.print("[yellow]No models found. Use 'webscout.Local pull' to download a model.[/yellow]")
113
- return
114
-
115
- table = Table(title="Downloaded Models")
116
- table.add_column("Name", style="cyan")
117
- table.add_column("Repository", style="green")
118
- table.add_column("Filename", style="blue")
119
- table.add_column("Size", style="magenta")
120
- table.add_column("Downloaded", style="yellow")
121
-
122
- for model in models:
123
- # Get file size in human-readable format
124
- file_path = model.get("path")
125
- file_size = "Unknown"
126
- if file_path:
127
- try:
128
- import os
129
- size_bytes = os.path.getsize(file_path)
130
- # Convert to human-readable format
131
- for unit in ['B', 'KB', 'MB', 'GB', 'TB']:
132
- if size_bytes < 1024.0 or unit == 'TB':
133
- file_size = f"{size_bytes:.2f} {unit}"
134
- break
135
- size_bytes /= 1024.0
136
- except Exception:
137
- pass
138
-
139
- # Format downloaded date
140
- downloaded_at = model.get("downloaded_at", "Unknown")
141
- if downloaded_at != "Unknown":
142
- try:
143
- import datetime
144
- dt = datetime.datetime.fromisoformat(downloaded_at)
145
- downloaded_at = dt.strftime("%Y-%m-%d %H:%M")
146
- except Exception:
147
- pass
148
-
149
- table.add_row(
150
- model["name"],
151
- model.get("repo_id", "Unknown"),
152
- model.get("filename", "Unknown"),
153
- file_size,
154
- downloaded_at,
155
- )
156
-
157
- console.print(table)
158
-
159
- @app.command(name="remove", help="Remove a downloaded model")
160
- def remove_model(
161
- model_string: str = typer.Argument(..., help="Name or filename of the model to remove"),
162
- force: bool = typer.Option(False, "--force", "-f", help="Force removal without confirmation"),
163
- ) -> None:
164
- """
165
- Remove a downloaded model.
166
- """
167
- # First check if this is a model name
168
- model_info = model_manager.get_model_info(model_string)
169
-
170
- # If not found by name, check if it's a filename
171
- if not model_info:
172
- for info in model_manager.list_models():
173
- if info.get("filename") == model_string:
174
- model_info = info
175
- model_string = info["name"]
176
- break
177
-
178
- if not model_info:
179
- console.print(f"[yellow]Model {model_string} not found.[/yellow]")
180
- return
181
-
182
- if not force:
183
- confirm = Prompt.ask(
184
- f"Are you sure you want to remove model {model_string}?",
185
- choices=["y", "n"],
186
- default="n",
187
- )
188
-
189
- if confirm.lower() != "y":
190
- console.print("[yellow]Operation cancelled.[/yellow]")
191
- return
192
-
193
- if model_manager.remove_model(model_string):
194
- console.print(f"[bold green]Model {model_string} removed successfully[/bold green]")
195
- else:
196
- console.print(f"[bold red]Error removing model {model_string}[/bold red]")
197
-
198
- @app.command("run")
199
- def chat(
200
- model_string: str = typer.Argument(..., help="Name or filename of the model to chat with"),
201
- ) -> None:
202
- """
203
- Interactive chat with a model.
204
- """
205
- # First check if this is a filename that already exists
206
- model_path = model_manager.get_model_path(model_string)
207
- if model_path:
208
- # This is a filename that exists, find the model name
209
- for model_info in model_manager.list_models():
210
- if model_info.get("filename") == model_string or model_info.get("path") == model_path:
211
- model_name = model_info.get("name")
212
- break
213
- else:
214
- # Fallback to using the string as model name
215
- model_name = model_string
216
- else:
217
- # Use the string as model name
218
- model_name = model_string
219
-
220
- # Check if model exists, if not try to download it
221
- if not model_manager.get_model_path(model_name):
222
- console.print(f"[yellow]Model {model_name} not found locally. Attempting to download...[/yellow]")
223
- try:
224
- # Parse the model string to see if it's a repo_id:filename format
225
- # We don't need to use the parsed values directly as download_model handles this
226
- _ = model_manager.parse_model_string(model_string) # Just to validate the format
227
- # Download the model
228
- model_name, _ = model_manager.download_model(model_string)
229
- console.print(f"[bold green]Model {model_name} downloaded successfully[/bold green]")
230
- except Exception as e:
231
- console.print(f"[bold red]Error downloading model: {str(e)}[/bold red]")
232
- return
233
-
234
- # Check RAM requirements
235
- ram_requirement = "Unknown"
236
- for size, ram in RAM_REQUIREMENTS.items():
237
- if size in model_name:
238
- ram_requirement = ram
239
- break
240
-
241
- if ram_requirement != "Unknown":
242
- console.print(f"[yellow]This model requires approximately {ram_requirement} of RAM[/yellow]")
243
-
244
- # Load the model
245
- try:
246
- llm = LLMInterface(model_name)
247
- llm.load_model(verbose=False)
248
- except Exception as e:
249
- console.print(f"[bold red]Error loading model: {str(e)}[/bold red]")
250
- return
251
-
252
- console.print(f"[bold green]Chat with {model_name}. Type '/help' for available commands or '/bye' to exit.[bold green]")
253
-
254
- # Chat history
255
- messages = []
256
- system_prompt = None
257
-
258
- # Initialize with empty system prompt
259
- messages.append({"role": "system", "content": ""})
260
-
261
- # Define help text
262
- help_text = """
263
- Available commands:
264
- /help or /? - Show this help message
265
- /bye - Exit the chat
266
- /set system <prompt> - Set the system prompt
267
- /set context <size> - Set context window size (default: 4096)
268
- /clear or /cls - Clear the terminal screen
269
- /reset - Reset all settings
270
- """
271
-
272
- while True:
273
- # Get user input
274
- user_input = input("\n> ")
275
-
276
- # Handle commands
277
- if user_input.startswith("/"):
278
- cmd_parts = user_input.split(maxsplit=2)
279
- cmd = cmd_parts[0].lower()
280
-
281
- if cmd == "/bye" or user_input.lower() in ["exit", "quit"]:
282
- console.print("[yellow]Goodbye![/yellow]")
283
- break
284
-
285
- elif cmd == "/help" or cmd == "/?":
286
- console.print(help_text)
287
- continue
288
-
289
- elif cmd == "/clear" or cmd == "/cls":
290
- # Do not clear history, just clear the terminal screen
291
- import os
292
- os.system('cls' if os.name == 'nt' else 'clear')
293
- console.print(f"[bold green]Chat with {model_name}. Type '/help' for available commands or '/bye' to exit.[/bold green]")
294
- console.print("[yellow]Screen cleared. Chat history preserved.[/yellow]")
295
- continue
296
-
297
- elif cmd == "/reset":
298
- messages = [{"role": "system", "content": ""}]
299
- system_prompt = None
300
- console.print("[yellow]All settings reset.[/yellow]")
301
- continue
302
-
303
- elif cmd == "/set" and len(cmd_parts) >= 2:
304
- if len(cmd_parts) < 3:
305
- console.print("[red]Error: Missing value for setting[/red]")
306
- continue
307
-
308
- setting = cmd_parts[1].lower()
309
- value = cmd_parts[2]
310
-
311
- if setting == "system":
312
- # Remove quotes if present
313
- if value.startswith('"') and value.endswith('"'):
314
- value = value[1:-1]
315
-
316
- system_prompt = value
317
- # Update system message
318
- if messages and messages[0].get("role") == "system":
319
- messages[0]["content"] = system_prompt
320
- else:
321
- # Clear messages and add system prompt
322
- messages = [{"role": "system", "content": system_prompt}]
323
-
324
- # Print confirmation that it's been applied
325
- console.print(f"[yellow]System prompt set to:[/yellow]")
326
- console.print(f"[cyan]\"{system_prompt}\"[/cyan]")
327
- console.print(f"[green]System prompt applied. Next responses will follow this instruction.[/green]")
328
-
329
- # Force a test message to ensure the system prompt is applied
330
- test_messages = messages.copy()
331
- test_messages.append({"role": "user", "content": "Say 'System prompt active'."})
332
-
333
- # Test if the system prompt is working
334
- console.print("[dim]Testing system prompt...[/dim]")
335
- response = llm.create_chat_completion(
336
- messages=test_messages,
337
- stream=False,
338
- max_tokens=20
339
- )
340
- console.print("[dim]System prompt test complete.[/dim]")
341
- elif setting == "context":
342
- try:
343
- context_size = int(value)
344
- # Reload the model with new context size
345
- console.print(f"[yellow]Reloading model with context size: {context_size}...[/yellow]")
346
- llm.load_model(n_ctx=context_size, verbose=False)
347
- console.print(f"[green]Context size set to: {context_size}[/green]")
348
- except ValueError:
349
- console.print(f"[red]Invalid context size: {value}. Must be an integer.[/red]")
350
- else:
351
- console.print(f"[red]Unknown setting: {setting}[/red]")
352
- continue
353
- else:
354
- console.print(f"[red]Unknown command: {cmd}[/red]")
355
- continue
356
-
357
- # Add user message to history
358
- messages.append({"role": "user", "content": user_input})
359
-
360
- # Generate response
361
- console.print("\n") # Add extra spacing between user input and response
362
-
363
- # Use a buffer to collect the response
364
- response_buffer = ""
365
-
366
- def print_token(token):
367
- nonlocal response_buffer
368
- response_buffer += token
369
- console.print(token, end="", highlight=False)
370
-
371
- llm.stream_chat_completion(
372
- messages=messages,
373
- callback=print_token,
374
- )
375
-
376
- # Get the full response to add to history
377
- response = llm.create_chat_completion(
378
- messages=messages,
379
- stream=False,
380
- )
381
-
382
- assistant_message = response["choices"][0]["message"]["content"]
383
- messages.append({"role": "assistant", "content": assistant_message})
384
-
385
- # Add extra spacing after the response
386
- console.print("")
387
-
388
- @app.command("copy")
389
- def copy_model(
390
- source: str = typer.Argument(..., help="Name of the source model"),
391
- destination: str = typer.Argument(..., help="Name for the destination model"),
392
- ) -> None:
393
- """
394
- Copy a model to a new name.
395
- """
396
- try:
397
- if model_manager.copy_model(source, destination):
398
- console.print(f"[bold green]Model {source} copied to {destination} successfully[/bold green]")
399
- else:
400
- console.print(f"[bold red]Failed to copy model {source} to {destination}[/bold red]")
401
- except Exception as e:
402
- console.print(f"[bold red]Error copying model: {str(e)}[/bold red]")
403
-
404
- @app.command("show")
405
- def show_model(
406
- model_name: str = typer.Argument(..., help="Name of the model to show information for"),
407
- verbose: bool = typer.Option(False, "--verbose", "-v", help="Show detailed information"),
408
- ) -> None:
409
- """
410
- Show detailed information about a model.
411
- """
412
- model_info = model_manager.get_model_info(model_name)
413
-
414
- if not model_info:
415
- console.print(f"[yellow]Model {model_name} not found.[/yellow]")
416
- return
417
-
418
- # Create a table for basic information
419
- table = Table(title=f"Model Information: {model_name}")
420
- table.add_column("Property", style="cyan")
421
- table.add_column("Value", style="green")
422
-
423
- # Add basic properties
424
- table.add_row("Name", model_info["name"])
425
- table.add_row("Repository", model_info.get("repo_id", "Unknown"))
426
- table.add_row("Filename", model_info.get("filename", "Unknown"))
427
-
428
- # Get file size in human-readable format
429
- file_path = model_info.get("path")
430
- if file_path:
431
- try:
432
- import os
433
- size_bytes = os.path.getsize(file_path)
434
- # Convert to human-readable format
435
- for unit in ['B', 'KB', 'MB', 'GB', 'TB']:
436
- if size_bytes < 1024.0 or unit == 'TB':
437
- file_size = f"{size_bytes:.2f} {unit}"
438
- break
439
- size_bytes /= 1024.0
440
- table.add_row("Size", file_size)
441
- except Exception:
442
- table.add_row("Size", "Unknown")
443
-
444
- # Format downloaded date
445
- downloaded_at = model_info.get("downloaded_at", "Unknown")
446
- if downloaded_at != "Unknown":
447
- try:
448
- import datetime
449
- dt = datetime.datetime.fromisoformat(downloaded_at)
450
- downloaded_at = dt.strftime("%Y-%m-%d %H:%M")
451
- except Exception:
452
- pass
453
- table.add_row("Downloaded", downloaded_at)
454
-
455
- # Add copied information if available
456
- if "copied_from" in model_info:
457
- table.add_row("Copied From", model_info["copied_from"])
458
- copied_at = model_info.get("copied_at", "Unknown")
459
- if copied_at != "Unknown":
460
- try:
461
- import datetime
462
- dt = datetime.datetime.fromisoformat(copied_at)
463
- copied_at = dt.strftime("%Y-%m-%d %H:%M")
464
- except Exception:
465
- pass
466
- table.add_row("Copied At", copied_at)
467
-
468
- # Estimate RAM requirements based on model name
469
- ram_requirement = "Unknown"
470
- for size, ram in RAM_REQUIREMENTS.items():
471
- if size in model_name:
472
- ram_requirement = ram
473
- break
474
- table.add_row("Estimated RAM", ram_requirement)
475
-
476
- # Print the table
477
- console.print(table)
478
-
479
- # If verbose, show all properties
480
- if verbose:
481
- console.print("\n[bold]Detailed Information:[/bold]")
482
- for key, value in model_info.items():
483
- if key not in ["name", "repo_id", "filename", "path", "downloaded_at", "copied_from", "copied_at"]:
484
- console.print(f"[cyan]{key}:[/cyan] {value}")
485
-
486
- @app.command("ps")
487
- def list_running_models() -> None:
488
- """
489
- List running models.
490
- """
491
- from .server import loaded_models
492
-
493
- if not loaded_models:
494
- console.print("[yellow]No models currently running.[/yellow]")
495
- return
496
-
497
- table = Table(title="Running Models")
498
- table.add_column("Name", style="cyan")
499
- table.add_column("Status", style="green")
500
-
501
- for name in loaded_models.keys():
502
- table.add_row(name, "Running")
503
-
504
- console.print(table)
505
-
506
- @app.command("version")
507
- def version() -> None:
508
- """
509
- Show version information.
510
- """
511
- from webscout.Local import __version__
512
- console.print(f"[bold]webscout.Local[/bold] version [cyan]{__version__}[/cyan]")
513
- console.print("A llama-cpp-python based LLM serving tool")
514
-
515
- if __name__ == "__main__":
516
- app()
webscout/Local/config.py DELETED
@@ -1,75 +0,0 @@
1
- """
2
- Configuration management for webscout
3
- """
4
-
5
- import os
6
- import json
7
- from pathlib import Path
8
- from typing import Dict, Any, Optional, List
9
-
10
- # Default configuration
11
- default_config: Dict[str, Any] = {
12
- "models_dir": "~/.webscout/models",
13
- "api_host": "127.0.0.1",
14
- "api_port": 8000,
15
- "default_context_length": 4096,
16
- "default_gpu_layers": -1, # -1 means use all available GPU layers
17
- }
18
-
19
- class Config:
20
- """
21
- Configuration manager for webscout.
22
- Handles loading, saving, and accessing configuration values.
23
- """
24
- config_dir: Path
25
- config_file: Path
26
- models_dir: Path
27
- config: Dict[str, Any]
28
-
29
- def __init__(self) -> None:
30
- self.config_dir = Path(os.path.expanduser("~/.webscout"))
31
- self.config_file = self.config_dir / "config.json"
32
- self.models_dir = Path(os.path.expanduser(default_config["models_dir"]))
33
- self._ensure_dirs()
34
- self._load_config()
35
-
36
- def _ensure_dirs(self) -> None:
37
- """Ensure configuration and models directories exist."""
38
- self.config_dir.mkdir(exist_ok=True, parents=True)
39
- self.models_dir.mkdir(exist_ok=True, parents=True)
40
-
41
- def _load_config(self) -> None:
42
- """Load configuration from file or create default."""
43
- if not self.config_file.exists():
44
- self._save_config(default_config)
45
- self.config = default_config.copy()
46
- else:
47
- with open(self.config_file, "r") as f:
48
- self.config = json.load(f)
49
-
50
- def _save_config(self, config: Dict[str, Any]) -> None:
51
- """Save configuration to file."""
52
- with open(self.config_file, "w") as f:
53
- json.dump(config, f, indent=2)
54
-
55
- def get(self, key: str, default: Any = None) -> Any:
56
- """Get configuration value by key."""
57
- return self.config.get(key, default)
58
-
59
- def set(self, key: str, value: Any) -> None:
60
- """Set configuration value by key."""
61
- self.config[key] = value
62
- self._save_config(self.config)
63
-
64
- def get_model_path(self, model_name: str) -> Path:
65
- """Get the path to a model directory by model name."""
66
- return self.models_dir / model_name
67
-
68
- def list_models(self) -> List[str]:
69
- """List all downloaded model names."""
70
- if not self.models_dir.exists():
71
- return []
72
- return [d.name for d in self.models_dir.iterdir() if d.is_dir() and ":" not in d.name]
73
-
74
- # Global configuration instance
75
- config: Config = Config()