llms-py 2.0.8__py3-none-any.whl → 2.0.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. llms.py +144 -13
  2. llms_py-2.0.10.data/data/index.html +80 -0
  3. {llms_py-2.0.8.data → llms_py-2.0.10.data}/data/llms.json +16 -10
  4. llms_py-2.0.10.data/data/ui/Avatar.mjs +28 -0
  5. llms_py-2.0.10.data/data/ui/Brand.mjs +23 -0
  6. {llms_py-2.0.8.data → llms_py-2.0.10.data}/data/ui/ChatPrompt.mjs +101 -69
  7. {llms_py-2.0.8.data → llms_py-2.0.10.data}/data/ui/Main.mjs +43 -183
  8. llms_py-2.0.10.data/data/ui/ModelSelector.mjs +29 -0
  9. llms_py-2.0.10.data/data/ui/ProviderStatus.mjs +105 -0
  10. {llms_py-2.0.8.data → llms_py-2.0.10.data}/data/ui/Recents.mjs +2 -1
  11. llms_py-2.0.10.data/data/ui/SettingsDialog.mjs +374 -0
  12. {llms_py-2.0.8.data → llms_py-2.0.10.data}/data/ui/Sidebar.mjs +11 -27
  13. llms_py-2.0.10.data/data/ui/SignIn.mjs +64 -0
  14. llms_py-2.0.10.data/data/ui/SystemPromptEditor.mjs +31 -0
  15. llms_py-2.0.10.data/data/ui/SystemPromptSelector.mjs +36 -0
  16. llms_py-2.0.10.data/data/ui/Welcome.mjs +8 -0
  17. llms_py-2.0.10.data/data/ui/ai.mjs +80 -0
  18. {llms_py-2.0.8.data → llms_py-2.0.10.data}/data/ui/app.css +76 -10
  19. llms_py-2.0.10.data/data/ui/lib/servicestack-vue.mjs +37 -0
  20. {llms_py-2.0.8.data → llms_py-2.0.10.data}/data/ui/markdown.mjs +9 -2
  21. {llms_py-2.0.8.data → llms_py-2.0.10.data}/data/ui/tailwind.input.css +13 -4
  22. {llms_py-2.0.8.data → llms_py-2.0.10.data}/data/ui/threadStore.mjs +2 -2
  23. {llms_py-2.0.8.data → llms_py-2.0.10.data}/data/ui/typography.css +109 -1
  24. {llms_py-2.0.8.data → llms_py-2.0.10.data}/data/ui/utils.mjs +8 -2
  25. {llms_py-2.0.8.dist-info → llms_py-2.0.10.dist-info}/METADATA +124 -39
  26. llms_py-2.0.10.dist-info/RECORD +40 -0
  27. llms_py-2.0.8.data/data/index.html +0 -64
  28. llms_py-2.0.8.data/data/ui/lib/servicestack-vue.min.mjs +0 -37
  29. llms_py-2.0.8.dist-info/RECORD +0 -30
  30. {llms_py-2.0.8.data → llms_py-2.0.10.data}/data/requirements.txt +0 -0
  31. {llms_py-2.0.8.data → llms_py-2.0.10.data}/data/ui/App.mjs +0 -0
  32. {llms_py-2.0.8.data → llms_py-2.0.10.data}/data/ui/fav.svg +0 -0
  33. {llms_py-2.0.8.data → llms_py-2.0.10.data}/data/ui/lib/highlight.min.mjs +0 -0
  34. {llms_py-2.0.8.data → llms_py-2.0.10.data}/data/ui/lib/idb.min.mjs +0 -0
  35. {llms_py-2.0.8.data → llms_py-2.0.10.data}/data/ui/lib/marked.min.mjs +0 -0
  36. /llms_py-2.0.8.data/data/ui/lib/servicestack-client.min.mjs → /llms_py-2.0.10.data/data/ui/lib/servicestack-client.mjs +0 -0
  37. {llms_py-2.0.8.data → llms_py-2.0.10.data}/data/ui/lib/vue-router.min.mjs +0 -0
  38. {llms_py-2.0.8.data → llms_py-2.0.10.data}/data/ui/lib/vue.min.mjs +0 -0
  39. {llms_py-2.0.8.data → llms_py-2.0.10.data}/data/ui.json +0 -0
  40. {llms_py-2.0.8.dist-info → llms_py-2.0.10.dist-info}/WHEEL +0 -0
  41. {llms_py-2.0.8.dist-info → llms_py-2.0.10.dist-info}/entry_points.txt +0 -0
  42. {llms_py-2.0.8.dist-info → llms_py-2.0.10.dist-info}/licenses/LICENSE +0 -0
  43. {llms_py-2.0.8.dist-info → llms_py-2.0.10.dist-info}/top_level.txt +0 -0
@@ -1,11 +1,18 @@
1
- import { Marked } from "./lib/marked.min.mjs"
2
- import hljs from "./lib/highlight.min.mjs"
1
+ import { Marked } from "marked"
2
+ import hljs from "highlight.js"
3
3
 
4
4
  export const marked = (() => {
5
+ const aliases = {
6
+ vue: 'html',
7
+ }
8
+
5
9
  const ret = new Marked(
6
10
  markedHighlight({
7
11
  langPrefix: 'hljs language-',
8
12
  highlight(code, lang, info) {
13
+ if (aliases[lang]) {
14
+ lang = aliases[lang]
15
+ }
9
16
  const language = hljs.getLanguage(lang) ? lang : 'plaintext'
10
17
  return hljs.highlight(code, { language }).value
11
18
  }
@@ -183,6 +183,9 @@
183
183
  /* @tailwindcss/forms css */
184
184
  [type='text'],[type='email'],[type='url'],[type='password'],[type='number'],[type='date'],[type='datetime-local'],[type='month'],[type='week'],[type='search'],[type='tel'],[type='time'],[type='color'],[multiple],textarea,select
185
185
  {-webkit-appearance:none;-moz-appearance:none;appearance:none;background-color:#fff;border-width:1px;padding:0.5rem 0.75rem;font-size:1rem}
186
+ .dark [type='text'],.dark [type='email'],.dark [type='url'],.dark [type='password'],.dark [type='number'],.dark [type='date'],.dark [type='datetime-local'],.dark [type='month'],.dark [type='week'],.dark [type='search'],.dark [type='tel'],.dark [type='time'],.dark [type='color'],.dark [multiple],.dark textarea,.dark select
187
+ {background-color:var(--color-gray-800)}
188
+
186
189
  [type='text']:focus,[type='email']:focus,[type='url']:focus,[type='password']:focus,[type='number']:focus,[type='date']:focus,[type='datetime-local']:focus,[type='month']:focus,[type='week']:focus,[type='search']:focus,[type='tel']:focus,[type='time']:focus,[type='color']:focus,[multiple]:focus,textarea:focus,select:focus{
187
190
  outline:2px solid transparent;outline-offset:2px;--tw-ring-inset:var(--tw-empty,/*!*/ /*!*/);
188
191
  --tw-ring-offset-width:0px;--tw-ring-offset-color:#fff;--tw-ring-color:#2563eb;
@@ -197,23 +200,29 @@
197
200
  background-position:right 0.5rem center;background-repeat:no-repeat;background-size:1.5em 1.5em;padding-right:2.5rem;-webkit-print-color-adjust:exact;color-adjust:exact}
198
201
  [multiple]{
199
202
  background-image:initial;background-position:initial;background-repeat:unset;background-size:initial;padding-right:0.75rem;-webkit-print-color-adjust:unset;color-adjust:unset;}
200
- [type='radio']{
203
+ [type='checkbox'],[type='radio']{
201
204
  -webkit-appearance:none;-moz-appearance:none;appearance:none;padding:0;-webkit-print-color-adjust:exact;color-adjust:exact;display:inline-block;
202
205
  vertical-align:middle;background-origin:border-box;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;
203
206
  flex-shrink:0;height:1rem;width:1rem;color:#2563eb;background-color:#fff;border-width:1px}
204
207
  [type='radio']{border-radius:100%}
205
- [type='radio']:focus{
208
+ [type='checkbox']:focus,[type='radio']:focus{
206
209
  outline:2px solid transparent;outline-offset:2px;
207
210
  --tw-ring-inset:var(--tw-empty,/*!*/ /*!*/);--tw-ring-offset-width:2px;--tw-ring-offset-color:#fff;--tw-ring-color:#2563eb;
208
211
  --tw-ring-offset-shadow:var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
209
212
  --tw-ring-shadow:var(--tw-ring-inset) 0 0 0 calc(2px + var(--tw-ring-offset-width)) var(--tw-ring-color);
210
213
  box-shadow:var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000)}
211
- [type='radio']:checked{
214
+ [type='checkbox']:checked,[type='radio']:checked{
212
215
  border-color:transparent;background-color:currentColor;background-size:100% 100%;background-position:center;background-repeat:no-repeat}
216
+ [type='checkbox']:checked{
217
+ background-image:url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3cpath d='M12.207 4.793a1 1 0 010 1.414l-5 5a1 1 0 01-1.414 0l-2-2a1 1 0 011.414-1.414L6.5 9.086l4.293-4.293a1 1 0 011.414 0z'/%3e%3c/svg%3e")}
213
218
  [type='radio']:checked{
214
219
  background-image:url("data:image/svg+xml,%3csvg viewBox='0 0 16 16' fill='white' xmlns='http://www.w3.org/2000/svg'%3e%3ccircle cx='8' cy='8' r='3'/%3e%3c/svg%3e")}
215
- [type='radio']:checked:hover,[type='radio']:checked:focus{
220
+ [type='checkbox']:checked:hover,[type='checkbox']:checked:focus,[type='radio']:checked:hover,[type='radio']:checked:focus{
216
221
  border-color:transparent;background-color:currentColor}
222
+ [type='checkbox']:indeterminate{
223
+ background-image:url("data:image/svg+xml,%3csvg xmlns='http://www.w3.org/2000/svg' fill='none' viewBox='0 0 16 16'%3e%3cpath stroke='white' stroke-linecap='round' stroke-linejoin='round' stroke-width='2' d='M4 8h8'/%3e%3c/svg%3e");
224
+ border-color:transparent;background-color:currentColor;background-size:100% 100%;background-position:center;background-repeat:no-repeat}
225
+ [type='checkbox']:indeterminate:hover,[type='checkbox']:indeterminate:focus{border-color:transparent;background-color:currentColor}
217
226
  [type='file']{background:unset;border-color:inherit;border-width:0;border-radius:0;padding:0;font-size:unset;line-height:inherit}
218
227
  [type='file']:focus{outline:1px auto -webkit-focus-ring-color;}
219
228
  [type='color']{height:2.4rem;padding:2px 3px}
@@ -1,5 +1,5 @@
1
1
  import { ref, computed } from 'vue'
2
- import { openDB } from './lib/idb.min.mjs'
2
+ import { openDB } from 'idb'
3
3
  import { nextId } from './utils.mjs'
4
4
 
5
5
  // Thread store for managing chat threads with IndexedDB
@@ -193,7 +193,7 @@ async function setCurrentThreadFromRoute(threadId, router) {
193
193
  } else {
194
194
  // Thread not found, redirect to home
195
195
  if (router) {
196
- router.push('/')
196
+ router.push((globalThis.ai?.base || '') + '/')
197
197
  }
198
198
  currentThread.value = null
199
199
  return null
@@ -31,7 +31,7 @@
31
31
  .prose :where(h4 strong):not(:where([class~="not-prose"] *)) {font-weight: 700 }
32
32
  .prose :where(figure > *):not(:where([class~="not-prose"] *)) {margin-top: 0;margin-bottom: 0 }
33
33
  .prose :where(figcaption):not(:where([class~="not-prose"] *)) {color: var(--tw-prose-captions);font-size: .875em;line-height: 1.4285714;margin-top: .8571429em }
34
- .prose :where(code):not(:where([class~="not-prose"] *)) {color: #3b82f6;font-weight: 400;font-size: .875em;background-color: #eff6ff;border:1px solid #3b83f680;border-radius: .25rem;padding: .2rem .25rem .1rem .25rem }
34
+ .prose :where(code):not(:where([class~="not-prose"] *)) {color: #3b82f6;font-weight: 400;font-size: .875em;background-color: #eff6ff;border:1px solid #3b83f680;border-radius: .25rem;padding: .2rem .25rem .1rem .25rem}
35
35
  .prose :where(code):not(:where([class~="not-prose"] *)):before {content: "" }
36
36
  .prose :where(code):not(:where([class~="not-prose"] *)):after {content: "" }
37
37
  .prose :where(a code):not(:where([class~="not-prose"] *)) {color: var(--tw-prose-links) }
@@ -340,3 +340,111 @@ h1:hover .header-anchor, h1 .header-anchor:focus, h2:hover .header-anchor, h2 .h
340
340
  margin: 1rem 0rem;
341
341
  line-height: 1;
342
342
  }
343
+
344
+
345
+ /* Chat-specific styles */
346
+ .prose {
347
+ max-width: none;
348
+ }
349
+ .prose > pre {
350
+ margin-top: 0;
351
+ margin-bottom: 0;
352
+ }
353
+ .prose pre {
354
+ background-color: #1f2937;
355
+ color: #f9fafb;
356
+ border-radius: 0.5rem;
357
+ padding: 1rem;
358
+ overflow-x: auto;
359
+ }
360
+ .prose code {
361
+ /*background-color: #f3f4f6;*/
362
+ /*color: #1f2937;*/
363
+ padding: 0.125rem 0.25rem;
364
+ border-radius: 0.25rem;
365
+ font-size: 0.875em;
366
+ }
367
+ .prose pre code {
368
+ background-color: transparent;
369
+ color: inherit;
370
+ padding: 0;
371
+ }
372
+ .prose blockquote {
373
+ border-left: 4px solid #e5e7eb;
374
+ padding-left: 1rem;
375
+ font-style: italic;
376
+ color: #6b7280;
377
+ }
378
+ .prose table {
379
+ border-collapse: collapse;
380
+ width: 100%;
381
+ }
382
+ .prose th,
383
+ .prose td {
384
+ border: 1px solid #e5e7eb;
385
+ padding: 0.5rem;
386
+ text-align: left;
387
+ }
388
+ .prose th {
389
+ background-color: #f9fafb;
390
+ font-weight: 600;
391
+ }
392
+
393
+ /* highlight.js - vs.css */
394
+ .hljs {background:white;color:black}
395
+ .hljs-comment,.hljs-quote,.hljs-variable{color:#008000}
396
+ .hljs-keyword,.hljs-selector-tag,.hljs-built_in,.hljs-name,.hljs-tag{color:#00f}
397
+ .hljs-string,.hljs-title,.hljs-section,.hljs-attribute,.hljs-literal,.hljs-template-tag,.hljs-template-variable,.hljs-type,.hljs-addition{color:#a31515}
398
+ .hljs-deletion,.hljs-selector-attr,.hljs-selector-pseudo,.hljs-meta{color:#2b91af}
399
+ .hljs-doctag{color:#808080}
400
+ .hljs-attr{color: #f00}
401
+ .hljs-symbol,.hljs-bullet,.hljs-link{color:#00b0e8}
402
+ .hljs-emphasis{font-style:italic}
403
+ .hljs-strong{font-weight:bold}
404
+
405
+ /* https://unpkg.com/@highlightjs/cdn-assets/styles/atom-one-dark.min.css */
406
+ pre code.hljs{display:block;overflow-x:auto;padding:1em}code.hljs{padding:3px 5px}.hljs{color:#abb2bf;background:#282c34}
407
+ .hljs-comment,.hljs-quote{color:#5c6370;font-style:italic}.hljs-doctag,.hljs-formula,.hljs-keyword{color:#c678dd}
408
+ .hljs-deletion,.hljs-name,.hljs-section,.hljs-selector-tag,.hljs-subst,.hljs-tag{color:#e06c75}
409
+ .hljs-literal{color:#56b6c2}
410
+ .hljs-addition,.hljs-attribute,.hljs-meta .hljs-string,.hljs-regexp,.hljs-string{color:#98c379}
411
+ .hljs-attr,.hljs-number,.hljs-selector-attr,.hljs-selector-class,.hljs-selector-pseudo,.hljs-template-variable,.hljs-type,.hljs-variable{color:#d19a66}
412
+ .hljs-bullet,.hljs-link,.hljs-meta,.hljs-selector-id,.hljs-symbol,.hljs-title{color:#61aeee}
413
+ .hljs-built_in,.hljs-class .hljs-title,.hljs-title.class_{color:#e6c07b}.hljs-emphasis{font-style:italic}.hljs-strong{font-weight:700}
414
+ .hljs-link{text-decoration:underline}
415
+
416
+ /*highlightjs*/
417
+ .hljs, .prose :where(pre):not(:where([class~="not-prose"] *)) .hljs {
418
+ color: #e5e7eb !important;
419
+ background-color: #282c34 !important;
420
+ }
421
+ .hljs-comment, .hljs-quote {
422
+ color: rgb(148 163 184); /*text-slate-400*/
423
+ }
424
+
425
+ pre {
426
+ overflow-x: auto;
427
+ font-weight: 400;
428
+ font-size: .875em;
429
+ line-height: 1.7142857;
430
+ margin-top: 1.7142857em;
431
+ margin-bottom: 1.7142857em;
432
+ border-radius: .375rem;
433
+ padding: .8571429em 1.1428571em;
434
+ max-width: calc(100vw - 1rem);
435
+ min-width: fit-content;
436
+ background-color: #282c34 !important;
437
+ }
438
+ pre code.hljs {
439
+ display: block;
440
+ overflow-x: auto;
441
+ padding: 1em;
442
+ }
443
+ .message pre {
444
+ max-width: 100%;
445
+ min-width: auto;
446
+ }
447
+ .message pre code.hljs {
448
+ overflow-x: unset;
449
+ width: 100%;
450
+ }
@@ -16,11 +16,17 @@ export function toJsonObject(json) {
16
16
  }
17
17
  }
18
18
 
19
- export function storageArray(key) {
19
+ export function storageArray(key, save) {
20
+ if (save && Array.isArray(save)) {
21
+ localStorage.setItem(key, JSON.stringify(save))
22
+ }
20
23
  return toJsonArray(localStorage.getItem(key)) ?? []
21
24
  }
22
25
 
23
- export function storageObject(key) {
26
+ export function storageObject(key, save) {
27
+ if (typeof save == 'object') {
28
+ localStorage.setItem(key, JSON.stringify(save))
29
+ }
24
30
  return toJsonObject(localStorage.getItem(key)) ?? {}
25
31
  }
26
32
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: llms-py
3
- Version: 2.0.8
3
+ Version: 2.0.10
4
4
  Summary: A lightweight CLI tool and OpenAI-compatible server for querying multiple Large Language Model (LLM) providers
5
5
  Home-page: https://github.com/ServiceStack/llms
6
6
  Author: ServiceStack
@@ -51,7 +51,7 @@ Configure additional providers and models in [llms.json](llms.json)
51
51
  ## Features
52
52
 
53
53
  - **Lightweight**: Single [llms.py](llms.py) Python file with single `aiohttp` dependency
54
- - **Multi-Provider Support**: OpenRouter, Ollama, Anthropic, Google, OpenAI, Grok, Groq, Qwen, Mistral
54
+ - **Multi-Provider Support**: OpenRouter, Ollama, Anthropic, Google, OpenAI, Grok, Groq, Qwen, Z.ai, Mistral
55
55
  - **OpenAI-Compatible API**: Works with any client that supports OpenAI's chat completion API
56
56
  - **Configuration Management**: Easy provider enable/disable and configuration management
57
57
  - **CLI Interface**: Simple command-line interface for quick interactions
@@ -510,7 +510,52 @@ llms --default grok-4
510
510
 
511
511
  # Update llms.py to latest version
512
512
  llms --update
513
- ```
513
+
514
+ # Pass custom parameters to chat request (URL-encoded)
515
+ llms --args "temperature=0.7&seed=111" "What is 2+2?"
516
+
517
+ # Multiple parameters with different types
518
+ llms --args "temperature=0.5&max_completion_tokens=50" "Tell me a joke"
519
+
520
+ # URL-encoded special characters (stop sequences)
521
+ llms --args "stop=Two,Words" "Count to 5"
522
+
523
+ # Combine with other options
524
+ llms --system "You are helpful" --args "temperature=0.3" --raw "Hello"
525
+ ```
526
+
527
+ #### Custom Parameters with `--args`
528
+
529
+ The `--args` option allows you to pass URL-encoded parameters to customize the chat request sent to LLM providers:
530
+
531
+ **Parameter Types:**
532
+ - **Floats**: `temperature=0.7`, `frequency_penalty=0.2`
533
+ - **Integers**: `max_completion_tokens=100`
534
+ - **Booleans**: `store=true`, `verbose=false`, `logprobs=true`
535
+ - **Strings**: `stop=one`
536
+ - **Lists**: `stop=two,words`
537
+
538
+ **Common Parameters:**
539
+ - `temperature`: Controls randomness (0.0 to 2.0)
540
+ - `max_completion_tokens`: Maximum tokens in response
541
+ - `seed`: For reproducible outputs
542
+ - `top_p`: Nucleus sampling parameter
543
+ - `stop`: Stop sequences (URL-encode special chars)
544
+ - `store`: Whether or not to store the output
545
+ - `frequency_penalty`: Penalize new tokens based on frequency
546
+ - `presence_penalty`: Penalize new tokens based on presence
547
+ - `logprobs`: Include log probabilities in response
548
+ - `parallel_tool_calls`: Enable parallel tool calls
549
+ - `prompt_cache_key`: Cache key for prompt
550
+ - `reasoning_effort`: Reasoning effort (low, medium, high, *minimal, *none, *default)
551
+ - `safety_identifier`: A string that uniquely identifies each user
552
+ - `seed`: For reproducible outputs
553
+ - `service_tier`: Service tier (free, standard, premium, *default)
554
+ - `top_logprobs`: Number of top logprobs to return
555
+ - `top_p`: Nucleus sampling parameter
556
+ - `verbosity`: Verbosity level (0, 1, 2, 3, *default)
557
+ - `enable_thinking`: Enable thinking mode (Qwen)
558
+ - `stream`: Enable streaming responses
514
559
 
515
560
  ### Default Model Configuration
516
561
 
@@ -558,6 +603,42 @@ llms "Explain quantum computing" | glow
558
603
 
559
604
  ## Supported Providers
560
605
 
606
+ Any OpenAI-compatible providers and their models can be added by configuring them in [llms.json](./llms.json). By default only AI Providers with free tiers are enabled which will only be "available" if their API Key is set.
607
+
608
+ You can list the available providers, their models and which are enabled or disabled with:
609
+
610
+ ```bash
611
+ llms ls
612
+ ```
613
+
614
+ They can be enabled/disabled in your `llms.json` file or with:
615
+
616
+ ```bash
617
+ llms --enable <provider>
618
+ llms --disable <provider>
619
+ ```
620
+
621
+ For a provider to be available, they also require their API Key configured in either your Environment Variables
622
+ or directly in your `llms.json`.
623
+
624
+ ### Environment Variables
625
+
626
+ | Provider | Variable | Description | Example |
627
+ |-----------------|---------------------------|---------------------|---------|
628
+ | openrouter_free | `OPENROUTER_FREE_API_KEY` | OpenRouter FREE models API key | `sk-or-...` |
629
+ | groq | `GROQ_API_KEY` | Groq API key | `gsk_...` |
630
+ | google_free | `GOOGLE_FREE_API_KEY` | Google FREE API key | `AIza...` |
631
+ | codestral | `CODESTRAL_API_KEY` | Codestral API key | `...` |
632
+ | ollama | N/A | No API key required | |
633
+ | openrouter | `OPENROUTER_API_KEY` | OpenRouter API key | `sk-or-...` |
634
+ | google | `GOOGLE_API_KEY` | Google API key | `AIza...` |
635
+ | anthropic | `ANTHROPIC_API_KEY` | Anthropic API key | `sk-ant-...` |
636
+ | openai | `OPENAI_API_KEY` | OpenAI API key | `sk-...` |
637
+ | grok | `GROK_API_KEY` | Grok (X.AI) API key | `xai-...` |
638
+ | qwen | `DASHSCOPE_API_KEY` | Qwen (Alibaba) API key | `sk-...` |
639
+ | z.ai | `ZAI_API_KEY` | Z.ai API key | `sk-...` |
640
+ | mistral | `MISTRAL_API_KEY` | Mistral API key | `...` |
641
+
561
642
  ### OpenAI
562
643
  - **Type**: `OpenAiProvider`
563
644
  - **Models**: GPT-5, GPT-5 Codex, GPT-4o, GPT-4o-mini, o3, etc.
@@ -588,6 +669,26 @@ export GOOGLE_API_KEY="your-key"
588
669
  llms --enable google_free
589
670
  ```
590
671
 
672
+ ### OpenRouter
673
+ - **Type**: `OpenAiProvider`
674
+ - **Models**: 100+ models from various providers
675
+ - **Features**: Access to latest models, free tier available
676
+
677
+ ```bash
678
+ export OPENROUTER_API_KEY="your-key"
679
+ llms --enable openrouter
680
+ ```
681
+
682
+ ### Grok (X.AI)
683
+ - **Type**: `OpenAiProvider`
684
+ - **Models**: Grok-4, Grok-3, Grok-3-mini, Grok-code-fast-1, etc.
685
+ - **Features**: Real-time information, humor, uncensored responses
686
+
687
+ ```bash
688
+ export GROK_API_KEY="your-key"
689
+ llms --enable grok
690
+ ```
691
+
591
692
  ### Groq
592
693
  - **Type**: `OpenAiProvider`
593
694
  - **Models**: Llama 3.3, Gemma 2, Kimi K2, etc.
@@ -608,44 +709,44 @@ llms --enable groq
608
709
  llms --enable ollama
609
710
  ```
610
711
 
611
- ### OpenRouter
712
+ ### Qwen (Alibaba Cloud)
612
713
  - **Type**: `OpenAiProvider`
613
- - **Models**: 100+ models from various providers
614
- - **Features**: Access to latest models, free tier available
714
+ - **Models**: Qwen3-max, Qwen-max, Qwen-plus, Qwen2.5-VL, QwQ-plus, etc.
715
+ - **Features**: Multilingual, vision models, coding, reasoning, audio processing
615
716
 
616
717
  ```bash
617
- export OPENROUTER_API_KEY="your-key"
618
- llms --enable openrouter
718
+ export DASHSCOPE_API_KEY="your-key"
719
+ llms --enable qwen
619
720
  ```
620
721
 
621
- ### Mistral
722
+ ### Z.ai
622
723
  - **Type**: `OpenAiProvider`
623
- - **Models**: Mistral Large, Codestral, Pixtral, etc.
624
- - **Features**: Code generation, multilingual
724
+ - **Models**: GLM-4.6, GLM-4.5, GLM-4.5-air, GLM-4.5-x, GLM-4.5-airx, GLM-4.5-flash, GLM-4:32b
725
+ - **Features**: Advanced language models with strong reasoning capabilities
625
726
 
626
727
  ```bash
627
- export MISTRAL_API_KEY="your-key"
628
- llms --enable mistral
728
+ export ZAI_API_KEY="your-key"
729
+ llms --enable z.ai
629
730
  ```
630
731
 
631
- ### Grok (X.AI)
732
+ ### Mistral
632
733
  - **Type**: `OpenAiProvider`
633
- - **Models**: Grok-4, Grok-3, Grok-3-mini, Grok-code-fast-1, etc.
634
- - **Features**: Real-time information, humor, uncensored responses
734
+ - **Models**: Mistral Large, Codestral, Pixtral, etc.
735
+ - **Features**: Code generation, multilingual
635
736
 
636
737
  ```bash
637
- export GROK_API_KEY="your-key"
638
- llms --enable grok
738
+ export MISTRAL_API_KEY="your-key"
739
+ llms --enable mistral
639
740
  ```
640
741
 
641
- ### Qwen (Alibaba Cloud)
742
+ ### Codestral
642
743
  - **Type**: `OpenAiProvider`
643
- - **Models**: Qwen3-max, Qwen-max, Qwen-plus, Qwen2.5-VL, QwQ-plus, etc.
644
- - **Features**: Multilingual, vision models, coding, reasoning, audio processing
744
+ - **Models**: Codestral
745
+ - **Features**: Code generation
645
746
 
646
747
  ```bash
647
- export DASHSCOPE_API_KEY="your-key"
648
- llms --enable qwen
748
+ export CODESTRAL_API_KEY="your-key"
749
+ llms --enable codestral
649
750
  ```
650
751
 
651
752
  ## Model Routing
@@ -654,22 +755,6 @@ The tool automatically routes requests to the first available provider that supp
654
755
 
655
756
  Example: If both OpenAI and OpenRouter support `kimi-k2`, the request will first try OpenRouter (free), then fall back to Groq than OpenRouter (Paid) if requests fails.
656
757
 
657
- ## Environment Variables
658
-
659
- | Variable | Description | Example |
660
- |----------|-------------|---------|
661
- | `LLMS_CONFIG_PATH` | Custom config file path | `/path/to/llms.json` |
662
- | `OPENAI_API_KEY` | OpenAI API key | `sk-...` |
663
- | `ANTHROPIC_API_KEY` | Anthropic API key | `sk-ant-...` |
664
- | `GOOGLE_API_KEY` | Google API key | `AIza...` |
665
- | `GROQ_API_KEY` | Groq API key | `gsk_...` |
666
- | `MISTRAL_API_KEY` | Mistral API key | `...` |
667
- | `OPENROUTER_API_KEY` | OpenRouter API key | `sk-or-...` |
668
- | `OPENROUTER_FREE_API_KEY` | OpenRouter free tier key | `sk-or-...` |
669
- | `CODESTRAL_API_KEY` | Codestral API key | `...` |
670
- | `GROK_API_KEY` | Grok (X.AI) API key | `xai-...` |
671
- | `DASHSCOPE_API_KEY` | Qwen (Alibaba Cloud) API key | `sk-...` |
672
-
673
758
  ## Configuration Examples
674
759
 
675
760
  ### Minimal Configuration
@@ -0,0 +1,40 @@
1
+ llms.py,sha256=iMwBSI1z17dDaNV-BKE9pldrgBsY4sJwcB8cGWpvmrU,59146
2
+ llms_py-2.0.10.data/data/index.html,sha256=RmvPW7H49tHR3DdQvvSrcR_OeCt1InYd5dirBxfZCpo,2417
3
+ llms_py-2.0.10.data/data/llms.json,sha256=icOeciZqWXon6ZnrfEAN6Q3YRxn2SkX1xxiidf2pazM,19427
4
+ llms_py-2.0.10.data/data/requirements.txt,sha256=iv-JIaacmTl-hSho3QmphcKnbRRYx1st47yjz_178Ro,8
5
+ llms_py-2.0.10.data/data/ui.json,sha256=iBOmpNeD5-o8AgUa51ymS-KemovJ7bm9J1fnL0nf8jk,134025
6
+ llms_py-2.0.10.data/data/ui/App.mjs,sha256=hXtUjaL3GrcIHieEK3BzIG72OVzrorBBS4RkE1DOGc4,439
7
+ llms_py-2.0.10.data/data/ui/Avatar.mjs,sha256=3rHpxe_LuCDiNP895F3FOjWx4j377JA9rD1FLluvtgA,851
8
+ llms_py-2.0.10.data/data/ui/Brand.mjs,sha256=ZkJ1Yd9ZgdTbs7f3ezM0UtsHMcm6v4-L-m3avnSXAXU,1184
9
+ llms_py-2.0.10.data/data/ui/ChatPrompt.mjs,sha256=IbfCx3W0SlX8x9lvaFjB1rqJqNkbeUvfqFHP9iKzZ9s,19776
10
+ llms_py-2.0.10.data/data/ui/Main.mjs,sha256=kLNWyXWUgHAaX3C3kVkAHOLvEI3H4bd5k9yMkUIRQPU,27348
11
+ llms_py-2.0.10.data/data/ui/ModelSelector.mjs,sha256=qiI-7DBwif5ipNZtzgaZQ2o_wHc23dBRMY6zVkMOCak,947
12
+ llms_py-2.0.10.data/data/ui/ProviderStatus.mjs,sha256=qF_rPdhyt9GffKdPCJdU0yanrDJ3cw1HLPygFP_KjEs,5744
13
+ llms_py-2.0.10.data/data/ui/Recents.mjs,sha256=hmj7V-RXVw-DqMXjUr3OhFHTYQTkvkEhuNEDTGBf3Qw,8448
14
+ llms_py-2.0.10.data/data/ui/SettingsDialog.mjs,sha256=Jm21s5CcZT97ZEhPc7c1WgnLOLdfSiC1SDroYsh7zM4,18095
15
+ llms_py-2.0.10.data/data/ui/Sidebar.mjs,sha256=ERm2Q7pftKTHLCjf-Q76joy1dsvtNWLoYH-wO2mS1LI,9781
16
+ llms_py-2.0.10.data/data/ui/SignIn.mjs,sha256=df3b-7L3ZIneDGbJWUk93K9RGo40gVeuR5StzT1ZH9g,2324
17
+ llms_py-2.0.10.data/data/ui/SystemPromptEditor.mjs,sha256=2CyIUvkIubqYPyIp5zC6_I8CMxvYINuYNjDxvMz4VRU,1265
18
+ llms_py-2.0.10.data/data/ui/SystemPromptSelector.mjs,sha256=AuEtRwUf_RkGgene3nVA9bw8AeMb-b5_6ZLJCTWA8KQ,3051
19
+ llms_py-2.0.10.data/data/ui/Welcome.mjs,sha256=QFAxN7sjWlhMvOIJCmHjNFCQcvpM_T-b4ze1ld9Hj7I,912
20
+ llms_py-2.0.10.data/data/ui/ai.mjs,sha256=O7YnQUhrlwjT0nYxxV5P439tFLItwerVzNH9TEon99o,2323
21
+ llms_py-2.0.10.data/data/ui/app.css,sha256=1Z4coREGsjMw7lYabejdaG0ZMwPpP5lmjRHRDYygq-g,94964
22
+ llms_py-2.0.10.data/data/ui/fav.svg,sha256=_R6MFeXl6wBFT0lqcUxYQIDWgm246YH_3hSTW0oO8qw,734
23
+ llms_py-2.0.10.data/data/ui/markdown.mjs,sha256=O5UspOeD8-E23rxOLWcS4eyy2YejMbPwszCYteVtuoU,6221
24
+ llms_py-2.0.10.data/data/ui/tailwind.input.css,sha256=P6QxCwbTBAaG2079ddgtFuuXDLIztEkJDFwj0wmOvkk,11978
25
+ llms_py-2.0.10.data/data/ui/threadStore.mjs,sha256=nM53p1E78Jc7tPOoyS3J3SW9F1njcZ-Fw5FlR9l-3yY,7215
26
+ llms_py-2.0.10.data/data/ui/typography.css,sha256=Z5Fe2IQWnh7bu1CMXniYt0SkaN2fXOFlOuniXUW8oGM,19325
27
+ llms_py-2.0.10.data/data/ui/utils.mjs,sha256=UKyaBsuJYEc-MWoL9b1M5-t3x6h5_HV9HsNBoDVtBRw,4226
28
+ llms_py-2.0.10.data/data/ui/lib/highlight.min.mjs,sha256=sG7wq8bF-IKkfie7S4QSyh5DdHBRf0NqQxMOEH8-MT0,127458
29
+ llms_py-2.0.10.data/data/ui/lib/idb.min.mjs,sha256=CeTXyV4I_pB5vnibvJuyXdMs0iVF2ZL0Z7cdm3w_QaI,3853
30
+ llms_py-2.0.10.data/data/ui/lib/marked.min.mjs,sha256=QRHb_VZugcBJRD2EP6gYlVFEsJw5C2fQ8ImMf_pA2_s,39488
31
+ llms_py-2.0.10.data/data/ui/lib/servicestack-client.mjs,sha256=UVafVbzhJ_0N2lzv7rlzIbzwnWpoqXxGk3N3FSKgOOc,54534
32
+ llms_py-2.0.10.data/data/ui/lib/servicestack-vue.mjs,sha256=r_-khYokisXJAIPDLh8Wq6YtcLAY6HNjtJlCZJjLy74,215181
33
+ llms_py-2.0.10.data/data/ui/lib/vue-router.min.mjs,sha256=fR30GHoXI1u81zyZ26YEU105pZgbbAKSXbpnzFKIxls,30418
34
+ llms_py-2.0.10.data/data/ui/lib/vue.min.mjs,sha256=iXh97m5hotl0eFllb3aoasQTImvp7mQoRJ_0HoxmZkw,163811
35
+ llms_py-2.0.10.dist-info/licenses/LICENSE,sha256=rRryrddGfVftpde-rmAZpW0R8IJihqJ8t8wpfDXoKiQ,1549
36
+ llms_py-2.0.10.dist-info/METADATA,sha256=TbJw3zhUdpdKU65grhe1DTFpob4YSIpBnDawjnHW9o4,27315
37
+ llms_py-2.0.10.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
38
+ llms_py-2.0.10.dist-info/entry_points.txt,sha256=svUoTt28bIJeo6Mb1ffDBLyzhD90DWtFbfp0ShyRCgU,35
39
+ llms_py-2.0.10.dist-info/top_level.txt,sha256=gC7hk9BKSeog8gyg-EM_g2gxm1mKHwFRfK-10BxOsa4,5
40
+ llms_py-2.0.10.dist-info/RECORD,,
@@ -1,64 +0,0 @@
1
- <html>
2
- <head>
3
- <title>llms.py</title>
4
- <link rel="stylesheet" href="/ui/typography.css">
5
- <link rel="stylesheet" href="/ui/app.css">
6
- <link rel="icon" type="image/svg" href="/ui/fav.svg">
7
- </head>
8
- <script type="importmap">
9
- {
10
- "imports": {
11
- "vue": "/ui/lib/vue.min.mjs",
12
- "vue-router": "/ui/lib/vue-router.min.mjs",
13
- "@servicestack/client": "/ui/lib/servicestack-client.min.mjs",
14
- "@servicestack/vue": "/ui/lib/servicestack-vue.min.mjs",
15
- "highlight.mjs": "/ui/lib/highlight.min.mjs"
16
- }
17
- }
18
- </script>
19
- <body>
20
- <div id="app"></div>
21
- </body>
22
- <script type="module">
23
- import { createApp, defineAsyncComponent } from 'vue'
24
- import { createWebHistory, createRouter } from "vue-router"
25
- import ServiceStackVue from "@servicestack/vue"
26
- import { useThreadStore } from '/ui/threadStore.mjs'
27
- import App from '/ui/App.mjs'
28
-
29
- async function init() {
30
- // Load models and prompts
31
- const { initDB } = useThreadStore()
32
- const [_, configRes, modelsRes] = await Promise.all([
33
- await initDB(),
34
- fetch('/ui.json'),
35
- fetch('/models'),
36
- ])
37
- const config = await configRes.json()
38
- const models = await modelsRes.json()
39
- return { config, models }
40
- }
41
-
42
- const { config, models } = await init()
43
- const MainComponent = defineAsyncComponent(() => import('/ui/Main.mjs'))
44
- const RecentsComponent = defineAsyncComponent(() => import('/ui/Recents.mjs'))
45
-
46
- const routes = [
47
- { path: '/', component: MainComponent },
48
- { path: '/c/:id', component: MainComponent },
49
- { path: '/recents', component: RecentsComponent },
50
- { path: '/:fallback(.*)*', component: MainComponent }
51
- ]
52
- const router = createRouter({
53
- history: createWebHistory(),
54
- routes,
55
- })
56
- const app = createApp(App, { config, models })
57
- app.use(router)
58
- app.use(ServiceStackVue)
59
- app.provide('config', config)
60
- app.provide('models', models)
61
-
62
- app.mount('#app')
63
- </script>
64
- </html>