llms-py 2.0.26__tar.gz → 2.0.28__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. {llms_py-2.0.26/llms_py.egg-info → llms_py-2.0.28}/PKG-INFO +32 -9
  2. {llms_py-2.0.26 → llms_py-2.0.28}/README.md +31 -8
  3. {llms_py-2.0.26 → llms_py-2.0.28}/llms/index.html +17 -1
  4. {llms_py-2.0.26 → llms_py-2.0.28}/llms/llms.json +12 -2
  5. {llms_py-2.0.26 → llms_py-2.0.28}/llms/main.py +125 -4
  6. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/Analytics.mjs +85 -77
  7. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/App.mjs +1 -1
  8. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/Brand.mjs +4 -4
  9. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/ChatPrompt.mjs +110 -9
  10. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/Main.mjs +38 -35
  11. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/ModelSelector.mjs +3 -4
  12. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/OAuthSignIn.mjs +4 -4
  13. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/ProviderStatus.mjs +12 -12
  14. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/Recents.mjs +13 -13
  15. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/SettingsDialog.mjs +65 -65
  16. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/Sidebar.mjs +17 -17
  17. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/SystemPromptEditor.mjs +5 -5
  18. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/SystemPromptSelector.mjs +4 -4
  19. llms_py-2.0.28/llms/ui/Welcome.mjs +8 -0
  20. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/ai.mjs +1 -1
  21. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/app.css +343 -27
  22. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/markdown.mjs +8 -8
  23. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/tailwind.input.css +2 -0
  24. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/typography.css +54 -36
  25. {llms_py-2.0.26 → llms_py-2.0.28/llms_py.egg-info}/PKG-INFO +32 -9
  26. {llms_py-2.0.26 → llms_py-2.0.28}/pyproject.toml +1 -1
  27. {llms_py-2.0.26 → llms_py-2.0.28}/requirements.txt +1 -0
  28. {llms_py-2.0.26 → llms_py-2.0.28}/setup.py +1 -1
  29. llms_py-2.0.26/llms/ui/Welcome.mjs +0 -8
  30. {llms_py-2.0.26 → llms_py-2.0.28}/LICENSE +0 -0
  31. {llms_py-2.0.26 → llms_py-2.0.28}/MANIFEST.in +0 -0
  32. {llms_py-2.0.26 → llms_py-2.0.28}/llms/__init__.py +0 -0
  33. {llms_py-2.0.26 → llms_py-2.0.28}/llms/__main__.py +0 -0
  34. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/Avatar.mjs +0 -0
  35. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/ProviderIcon.mjs +0 -0
  36. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/SignIn.mjs +0 -0
  37. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/fav.svg +0 -0
  38. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/lib/chart.js +0 -0
  39. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/lib/charts.mjs +0 -0
  40. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/lib/color.js +0 -0
  41. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/lib/highlight.min.mjs +0 -0
  42. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/lib/idb.min.mjs +0 -0
  43. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/lib/marked.min.mjs +0 -0
  44. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/lib/servicestack-client.mjs +0 -0
  45. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/lib/servicestack-vue.mjs +0 -0
  46. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/lib/vue-router.min.mjs +0 -0
  47. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/lib/vue.min.mjs +0 -0
  48. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/lib/vue.mjs +0 -0
  49. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/threadStore.mjs +0 -0
  50. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui/utils.mjs +0 -0
  51. {llms_py-2.0.26 → llms_py-2.0.28}/llms/ui.json +0 -0
  52. {llms_py-2.0.26 → llms_py-2.0.28}/llms_py.egg-info/SOURCES.txt +0 -0
  53. {llms_py-2.0.26 → llms_py-2.0.28}/llms_py.egg-info/dependency_links.txt +0 -0
  54. {llms_py-2.0.26 → llms_py-2.0.28}/llms_py.egg-info/entry_points.txt +0 -0
  55. {llms_py-2.0.26 → llms_py-2.0.28}/llms_py.egg-info/not-zip-safe +0 -0
  56. {llms_py-2.0.26 → llms_py-2.0.28}/llms_py.egg-info/requires.txt +0 -0
  57. {llms_py-2.0.26 → llms_py-2.0.28}/llms_py.egg-info/top_level.txt +0 -0
  58. {llms_py-2.0.26 → llms_py-2.0.28}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: llms-py
3
- Version: 2.0.26
3
+ Version: 2.0.28
4
4
  Summary: A lightweight CLI tool and OpenAI-compatible server for querying multiple Large Language Model (LLM) providers
5
5
  Home-page: https://github.com/ServiceStack/llms
6
6
  Author: ServiceStack
@@ -50,7 +50,7 @@ Configure additional providers and models in [llms.json](llms/llms.json)
50
50
 
51
51
  ## Features
52
52
 
53
- - **Lightweight**: Single [llms.py](https://github.com/ServiceStack/llms/blob/main/llms/main.py) Python file with single `aiohttp` dependency
53
+ - **Lightweight**: Single [llms.py](https://github.com/ServiceStack/llms/blob/main/llms/main.py) Python file with single `aiohttp` dependency (Pillow optional)
54
54
  - **Multi-Provider Support**: OpenRouter, Ollama, Anthropic, Google, OpenAI, Grok, Groq, Qwen, Z.ai, Mistral
55
55
  - **OpenAI-Compatible API**: Works with any client that supports OpenAI's chat completion API
56
56
  - **Built-in Analytics**: Built-in analytics UI to visualize costs, requests, and token usage
@@ -58,6 +58,7 @@ Configure additional providers and models in [llms.json](llms/llms.json)
58
58
  - **CLI Interface**: Simple command-line interface for quick interactions
59
59
  - **Server Mode**: Run an OpenAI-compatible HTTP server at `http://localhost:{PORT}/v1/chat/completions`
60
60
  - **Image Support**: Process images through vision-capable models
61
+ - Auto resizes and converts to webp if exceeds configured limits
61
62
  - **Audio Support**: Process audio through audio-capable models
62
63
  - **Custom Chat Templates**: Configurable chat completion request templates for different modalities
63
64
  - **Auto-Discovery**: Automatically discover available Ollama models
@@ -68,23 +69,27 @@ Configure additional providers and models in [llms.json](llms/llms.json)
68
69
 
69
70
  Access all your local all remote LLMs with a single ChatGPT-like UI:
70
71
 
71
- [![](https://servicestack.net/img/posts/llms-py-ui/bg.webp?)](https://servicestack.net/posts/llms-py-ui)
72
+ [![](https://servicestack.net/img/posts/llms-py-ui/bg.webp)](https://servicestack.net/posts/llms-py-ui)
72
73
 
73
- **Monthly Costs Analysis**
74
+ #### Dark Mode Support
75
+
76
+ [![](https://servicestack.net/img/posts/llms-py-ui/dark-attach-image.webp)](https://servicestack.net/posts/llms-py-ui)
77
+
78
+ #### Monthly Costs Analysis
74
79
 
75
80
  [![](https://servicestack.net/img/posts/llms-py-ui/analytics-costs.webp)](https://servicestack.net/posts/llms-py-ui)
76
81
 
77
- **Monthly Token Usage**
82
+ #### Monthly Token Usage (Dark Mode)
78
83
 
79
- [![](https://servicestack.net/img/posts/llms-py-ui/analytics-tokens.webp)](https://servicestack.net/posts/llms-py-ui)
84
+ [![](https://servicestack.net/img/posts/llms-py-ui/dark-analytics-tokens.webp)](https://servicestack.net/posts/llms-py-ui)
80
85
 
81
- **Monthly Activity Log**
86
+ #### Monthly Activity Log
82
87
 
83
88
  [![](https://servicestack.net/img/posts/llms-py-ui/analytics-activity.webp)](https://servicestack.net/posts/llms-py-ui)
84
89
 
85
90
  [More Features and Screenshots](https://servicestack.net/posts/llms-py-ui).
86
91
 
87
- **Check Provider Reliability and Response Times**
92
+ #### Check Provider Reliability and Response Times
88
93
 
89
94
  Check the status of configured providers to test if they're configured correctly, reachable and what their response times is for the simplest `1+1=` request:
90
95
 
@@ -230,6 +235,22 @@ See [DOCKER.md](DOCKER.md) for detailed instructions on customizing configuratio
230
235
 
231
236
  llms.py supports optional GitHub OAuth authentication to secure your web UI and API endpoints. When enabled, users must sign in with their GitHub account before accessing the application.
232
237
 
238
+ ```json
239
+ {
240
+ "auth": {
241
+ "enabled": true,
242
+ "github": {
243
+ "client_id": "$GITHUB_CLIENT_ID",
244
+ "client_secret": "$GITHUB_CLIENT_SECRET",
245
+ "redirect_uri": "http://localhost:8000/auth/github/callback",
246
+ "restrict_to": "$GITHUB_USERS"
247
+ }
248
+ }
249
+ }
250
+ ```
251
+
252
+ `GITHUB_USERS` is optional but if set will only allow access to the specified users.
253
+
233
254
  See [GITHUB_OAUTH_SETUP.md](GITHUB_OAUTH_SETUP.md) for detailed setup instructions.
234
255
 
235
256
  ## Configuration
@@ -243,6 +264,8 @@ The configuration file [llms.json](llms/llms.json) is saved to `~/.llms/llms.jso
243
264
  - `audio`: Default chat completion request template for audio prompts
244
265
  - `file`: Default chat completion request template for file prompts
245
266
  - `check`: Check request template for testing provider connectivity
267
+ - `limits`: Override Request size limits
268
+ - `convert`: Max image size and length limits and auto conversion settings
246
269
 
247
270
  ### Providers
248
271
 
@@ -1211,7 +1234,7 @@ This shows:
1211
1234
  - `llms/main.py` - Main script with CLI and server functionality
1212
1235
  - `llms/llms.json` - Default configuration file
1213
1236
  - `llms/ui.json` - UI configuration file
1214
- - `requirements.txt` - Python dependencies (aiohttp)
1237
+ - `requirements.txt` - Python dependencies, required: `aiohttp`, optional: `Pillow`
1215
1238
 
1216
1239
  ### Provider Classes
1217
1240
 
@@ -10,7 +10,7 @@ Configure additional providers and models in [llms.json](llms/llms.json)
10
10
 
11
11
  ## Features
12
12
 
13
- - **Lightweight**: Single [llms.py](https://github.com/ServiceStack/llms/blob/main/llms/main.py) Python file with single `aiohttp` dependency
13
+ - **Lightweight**: Single [llms.py](https://github.com/ServiceStack/llms/blob/main/llms/main.py) Python file with single `aiohttp` dependency (Pillow optional)
14
14
  - **Multi-Provider Support**: OpenRouter, Ollama, Anthropic, Google, OpenAI, Grok, Groq, Qwen, Z.ai, Mistral
15
15
  - **OpenAI-Compatible API**: Works with any client that supports OpenAI's chat completion API
16
16
  - **Built-in Analytics**: Built-in analytics UI to visualize costs, requests, and token usage
@@ -18,6 +18,7 @@ Configure additional providers and models in [llms.json](llms/llms.json)
18
18
  - **CLI Interface**: Simple command-line interface for quick interactions
19
19
  - **Server Mode**: Run an OpenAI-compatible HTTP server at `http://localhost:{PORT}/v1/chat/completions`
20
20
  - **Image Support**: Process images through vision-capable models
21
+ - Auto resizes and converts to webp if exceeds configured limits
21
22
  - **Audio Support**: Process audio through audio-capable models
22
23
  - **Custom Chat Templates**: Configurable chat completion request templates for different modalities
23
24
  - **Auto-Discovery**: Automatically discover available Ollama models
@@ -28,23 +29,27 @@ Configure additional providers and models in [llms.json](llms/llms.json)
28
29
 
29
30
  Access all your local all remote LLMs with a single ChatGPT-like UI:
30
31
 
31
- [![](https://servicestack.net/img/posts/llms-py-ui/bg.webp?)](https://servicestack.net/posts/llms-py-ui)
32
+ [![](https://servicestack.net/img/posts/llms-py-ui/bg.webp)](https://servicestack.net/posts/llms-py-ui)
32
33
 
33
- **Monthly Costs Analysis**
34
+ #### Dark Mode Support
35
+
36
+ [![](https://servicestack.net/img/posts/llms-py-ui/dark-attach-image.webp)](https://servicestack.net/posts/llms-py-ui)
37
+
38
+ #### Monthly Costs Analysis
34
39
 
35
40
  [![](https://servicestack.net/img/posts/llms-py-ui/analytics-costs.webp)](https://servicestack.net/posts/llms-py-ui)
36
41
 
37
- **Monthly Token Usage**
42
+ #### Monthly Token Usage (Dark Mode)
38
43
 
39
- [![](https://servicestack.net/img/posts/llms-py-ui/analytics-tokens.webp)](https://servicestack.net/posts/llms-py-ui)
44
+ [![](https://servicestack.net/img/posts/llms-py-ui/dark-analytics-tokens.webp)](https://servicestack.net/posts/llms-py-ui)
40
45
 
41
- **Monthly Activity Log**
46
+ #### Monthly Activity Log
42
47
 
43
48
  [![](https://servicestack.net/img/posts/llms-py-ui/analytics-activity.webp)](https://servicestack.net/posts/llms-py-ui)
44
49
 
45
50
  [More Features and Screenshots](https://servicestack.net/posts/llms-py-ui).
46
51
 
47
- **Check Provider Reliability and Response Times**
52
+ #### Check Provider Reliability and Response Times
48
53
 
49
54
  Check the status of configured providers to test if they're configured correctly, reachable and what their response times is for the simplest `1+1=` request:
50
55
 
@@ -190,6 +195,22 @@ See [DOCKER.md](DOCKER.md) for detailed instructions on customizing configuratio
190
195
 
191
196
  llms.py supports optional GitHub OAuth authentication to secure your web UI and API endpoints. When enabled, users must sign in with their GitHub account before accessing the application.
192
197
 
198
+ ```json
199
+ {
200
+ "auth": {
201
+ "enabled": true,
202
+ "github": {
203
+ "client_id": "$GITHUB_CLIENT_ID",
204
+ "client_secret": "$GITHUB_CLIENT_SECRET",
205
+ "redirect_uri": "http://localhost:8000/auth/github/callback",
206
+ "restrict_to": "$GITHUB_USERS"
207
+ }
208
+ }
209
+ }
210
+ ```
211
+
212
+ `GITHUB_USERS` is optional but if set will only allow access to the specified users.
213
+
193
214
  See [GITHUB_OAUTH_SETUP.md](GITHUB_OAUTH_SETUP.md) for detailed setup instructions.
194
215
 
195
216
  ## Configuration
@@ -203,6 +224,8 @@ The configuration file [llms.json](llms/llms.json) is saved to `~/.llms/llms.jso
203
224
  - `audio`: Default chat completion request template for audio prompts
204
225
  - `file`: Default chat completion request template for file prompts
205
226
  - `check`: Check request template for testing provider connectivity
227
+ - `limits`: Override Request size limits
228
+ - `convert`: Max image size and length limits and auto conversion settings
206
229
 
207
230
  ### Providers
208
231
 
@@ -1171,7 +1194,7 @@ This shows:
1171
1194
  - `llms/main.py` - Main script with CLI and server functionality
1172
1195
  - `llms/llms.json` - Default configuration file
1173
1196
  - `llms/ui.json` - UI configuration file
1174
- - `requirements.txt` - Python dependencies (aiohttp)
1197
+ - `requirements.txt` - Python dependencies, required: `aiohttp`, optional: `Pillow`
1175
1198
 
1176
1199
  ### Provider Classes
1177
1200
 
@@ -1,8 +1,8 @@
1
1
  <html>
2
2
  <head>
3
3
  <title>llms.py</title>
4
- <link rel="stylesheet" href="/ui/typography.css">
5
4
  <link rel="stylesheet" href="/ui/app.css">
5
+ <link rel="stylesheet" href="/ui/typography.css">
6
6
  <link rel="icon" type="image/svg" href="/ui/fav.svg">
7
7
  <style>
8
8
  [type='button'],button[type='submit']{cursor:pointer}
@@ -38,6 +38,22 @@
38
38
  <body>
39
39
  <div id="app"></div>
40
40
  </body>
41
+ <script>
42
+ let colorScheme = location.search === "?dark"
43
+ ? "dark"
44
+ : location.search === "?light"
45
+ ? "light"
46
+ : localStorage.getItem('color-scheme')
47
+ let darkMode = colorScheme != null
48
+ ? colorScheme === 'dark'
49
+ : window.matchMedia('(prefers-color-scheme: dark)').matches
50
+ let html = document.documentElement
51
+ html.classList.toggle('dark', darkMode)
52
+ html.style.setProperty('color-scheme', darkMode ? 'dark' : null)
53
+ if (localStorage.getItem('color-scheme') === null) {
54
+ localStorage.setItem('color-scheme', darkMode ? 'dark' : 'light')
55
+ }
56
+ </script>
41
57
  <script type="module">
42
58
  import { createApp, defineAsyncComponent } from 'vue'
43
59
  import { createWebHistory, createRouter } from "vue-router"
@@ -1,10 +1,11 @@
1
1
  {
2
2
  "auth": {
3
- "enabled": true,
3
+ "enabled": false,
4
4
  "github": {
5
5
  "client_id": "$GITHUB_CLIENT_ID",
6
6
  "client_secret": "$GITHUB_CLIENT_SECRET",
7
- "redirect_uri": "http://localhost:8000/auth/github/callback"
7
+ "redirect_uri": "http://localhost:8000/auth/github/callback",
8
+ "restrict_to": "$GITHUB_USERS"
8
9
  }
9
10
  },
10
11
  "defaults": {
@@ -104,6 +105,15 @@
104
105
  "stream": false
105
106
  }
106
107
  },
108
+ "limits": {
109
+ "client_max_size": 20971520
110
+ },
111
+ "convert": {
112
+ "image": {
113
+ "max_size": "1536x1024",
114
+ "max_length": 1572864
115
+ }
116
+ },
107
117
  "providers": {
108
118
  "openrouter_free": {
109
119
  "enabled": true,
@@ -15,6 +15,8 @@ import traceback
15
15
  import sys
16
16
  import site
17
17
  import secrets
18
+ import re
19
+ from io import BytesIO
18
20
  from urllib.parse import parse_qs, urlencode
19
21
 
20
22
  import aiohttp
@@ -23,7 +25,13 @@ from aiohttp import web
23
25
  from pathlib import Path
24
26
  from importlib import resources # Py≥3.9 (pip install importlib_resources for 3.7/3.8)
25
27
 
26
- VERSION = "2.0.26"
28
+ try:
29
+ from PIL import Image
30
+ HAS_PIL = True
31
+ except ImportError:
32
+ HAS_PIL = False
33
+
34
+ VERSION = "2.0.28"
27
35
  _ROOT = None
28
36
  g_config_path = None
29
37
  g_ui_path = None
@@ -200,6 +208,77 @@ def price_to_string(price: float | int | str | None) -> str | None:
200
208
  except (ValueError, TypeError):
201
209
  return None
202
210
 
211
+ def convert_image_if_needed(image_bytes, mimetype='image/png'):
212
+ """
213
+ Convert and resize image to WebP if it exceeds configured limits.
214
+
215
+ Args:
216
+ image_bytes: Raw image bytes
217
+ mimetype: Original image MIME type
218
+
219
+ Returns:
220
+ tuple: (converted_bytes, new_mimetype) or (original_bytes, original_mimetype) if no conversion needed
221
+ """
222
+ if not HAS_PIL:
223
+ return image_bytes, mimetype
224
+
225
+ # Get conversion config
226
+ convert_config = g_config.get('convert', {}).get('image', {}) if g_config else {}
227
+ if not convert_config:
228
+ return image_bytes, mimetype
229
+
230
+ max_size_str = convert_config.get('max_size', '1536x1024')
231
+ max_length = convert_config.get('max_length', 1.5*1024*1024) # 1.5MB
232
+
233
+ try:
234
+ # Parse max_size (e.g., "1536x1024")
235
+ max_width, max_height = map(int, max_size_str.split('x'))
236
+
237
+ # Open image
238
+ with Image.open(BytesIO(image_bytes)) as img:
239
+ original_width, original_height = img.size
240
+
241
+ # Check if image exceeds limits
242
+ needs_resize = original_width > max_width or original_height > max_height
243
+
244
+ # Check if base64 length would exceed max_length (in KB)
245
+ # Base64 encoding increases size by ~33%, so check raw bytes * 1.33 / 1024
246
+ estimated_kb = (len(image_bytes) * 1.33) / 1024
247
+ needs_conversion = estimated_kb > max_length
248
+
249
+ if not needs_resize and not needs_conversion:
250
+ return image_bytes, mimetype
251
+
252
+ # Convert RGBA to RGB if necessary (WebP doesn't support transparency in RGB mode)
253
+ if img.mode in ('RGBA', 'LA', 'P'):
254
+ # Create a white background
255
+ background = Image.new('RGB', img.size, (255, 255, 255))
256
+ if img.mode == 'P':
257
+ img = img.convert('RGBA')
258
+ background.paste(img, mask=img.split()[-1] if img.mode in ('RGBA', 'LA') else None)
259
+ img = background
260
+ elif img.mode != 'RGB':
261
+ img = img.convert('RGB')
262
+
263
+ # Resize if needed (preserve aspect ratio)
264
+ if needs_resize:
265
+ img.thumbnail((max_width, max_height), Image.Resampling.LANCZOS)
266
+ _log(f"Resized image from {original_width}x{original_height} to {img.size[0]}x{img.size[1]}")
267
+
268
+ # Convert to WebP
269
+ output = BytesIO()
270
+ img.save(output, format='WEBP', quality=85, method=6)
271
+ converted_bytes = output.getvalue()
272
+
273
+ _log(f"Converted image to WebP: {len(image_bytes)} bytes -> {len(converted_bytes)} bytes ({len(converted_bytes)*100//len(image_bytes)}%)")
274
+
275
+ return converted_bytes, 'image/webp'
276
+
277
+ except Exception as e:
278
+ _log(f"Error converting image: {e}")
279
+ # Return original if conversion fails
280
+ return image_bytes, mimetype
281
+
203
282
  async def process_chat(chat):
204
283
  if not chat:
205
284
  raise Exception("No chat provided")
@@ -230,19 +309,31 @@ async def process_chat(chat):
230
309
  mimetype = get_file_mime_type(get_filename(url))
231
310
  if 'Content-Type' in response.headers:
232
311
  mimetype = response.headers['Content-Type']
312
+ # convert/resize image if needed
313
+ content, mimetype = convert_image_if_needed(content, mimetype)
233
314
  # convert to data uri
234
315
  image_url['url'] = f"data:{mimetype};base64,{base64.b64encode(content).decode('utf-8')}"
235
316
  elif is_file_path(url):
236
317
  _log(f"Reading image: {url}")
237
318
  with open(url, "rb") as f:
238
319
  content = f.read()
239
- ext = os.path.splitext(url)[1].lower().lstrip('.') if '.' in url else 'png'
240
320
  # get mimetype from file extension
241
321
  mimetype = get_file_mime_type(get_filename(url))
322
+ # convert/resize image if needed
323
+ content, mimetype = convert_image_if_needed(content, mimetype)
242
324
  # convert to data uri
243
325
  image_url['url'] = f"data:{mimetype};base64,{base64.b64encode(content).decode('utf-8')}"
244
326
  elif url.startswith('data:'):
245
- pass
327
+ # Extract existing data URI and process it
328
+ if ';base64,' in url:
329
+ prefix = url.split(';base64,')[0]
330
+ mimetype = prefix.split(':')[1] if ':' in prefix else 'image/png'
331
+ base64_data = url.split(';base64,')[1]
332
+ content = base64.b64decode(base64_data)
333
+ # convert/resize image if needed
334
+ content, mimetype = convert_image_if_needed(content, mimetype)
335
+ # update data uri with potentially converted image
336
+ image_url['url'] = f"data:{mimetype};base64,{base64.b64encode(content).decode('utf-8')}"
246
337
  else:
247
338
  raise Exception(f"Invalid image: {url}")
248
339
  elif item['type'] == 'input_audio' and 'input_audio' in item:
@@ -1480,7 +1571,9 @@ def main():
1480
1571
 
1481
1572
  _log("Authentication enabled - GitHub OAuth configured")
1482
1573
 
1483
- app = web.Application()
1574
+ client_max_size = g_config.get('limits', {}).get('client_max_size', 20*1024*1024) # 20MB max request size (to handle base64 encoding overhead)
1575
+ _log(f"client_max_size set to {client_max_size} bytes ({client_max_size/1024/1024:.1f}MB)")
1576
+ app = web.Application(client_max_size=client_max_size)
1484
1577
 
1485
1578
  # Authentication middleware helper
1486
1579
  def check_auth(request):
@@ -1601,6 +1694,29 @@ def main():
1601
1694
  auth_url = f"https://github.com/login/oauth/authorize?{urlencode(params)}"
1602
1695
 
1603
1696
  return web.HTTPFound(auth_url)
1697
+
1698
+ def validate_user(github_username):
1699
+ auth_config = g_config['auth']['github']
1700
+ # Check if user is restricted
1701
+ restrict_to = auth_config.get('restrict_to', '')
1702
+
1703
+ # Expand environment variables
1704
+ if restrict_to.startswith('$'):
1705
+ restrict_to = os.environ.get(restrict_to[1:], '')
1706
+
1707
+ # If restrict_to is configured, validate the user
1708
+ if restrict_to:
1709
+ # Parse allowed users (comma or space delimited)
1710
+ allowed_users = [u.strip() for u in re.split(r'[,\s]+', restrict_to) if u.strip()]
1711
+
1712
+ # Check if user is in the allowed list
1713
+ if not github_username or github_username not in allowed_users:
1714
+ _log(f"Access denied for user: {github_username}. Not in allowed list: {allowed_users}")
1715
+ return web.Response(
1716
+ text=f"Access denied. User '{github_username}' is not authorized to access this application.",
1717
+ status=403
1718
+ )
1719
+ return None
1604
1720
 
1605
1721
  async def github_callback_handler(request):
1606
1722
  """Handle GitHub OAuth callback"""
@@ -1664,6 +1780,11 @@ def main():
1664
1780
  async with session.get(user_url, headers=headers) as resp:
1665
1781
  user_data = await resp.json()
1666
1782
 
1783
+ # Validate user
1784
+ error_response = validate_user(user_data.get('login', ''))
1785
+ if error_response:
1786
+ return error_response
1787
+
1667
1788
  # Create session
1668
1789
  session_token = secrets.token_urlsafe(32)
1669
1790
  g_sessions[session_token] = {