llms-py 3.0.22__py3-none-any.whl → 3.0.23__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- llms/llms.json +16 -4
- llms/main.py +17 -5
- llms/providers.json +1 -1
- llms/ui/ai.mjs +1 -1
- llms/ui/app.css +15 -3
- {llms_py-3.0.22.dist-info → llms_py-3.0.23.dist-info}/METADATA +1 -1
- {llms_py-3.0.22.dist-info → llms_py-3.0.23.dist-info}/RECORD +11 -11
- {llms_py-3.0.22.dist-info → llms_py-3.0.23.dist-info}/WHEEL +0 -0
- {llms_py-3.0.22.dist-info → llms_py-3.0.23.dist-info}/entry_points.txt +0 -0
- {llms_py-3.0.22.dist-info → llms_py-3.0.23.dist-info}/licenses/LICENSE +0 -0
- {llms_py-3.0.22.dist-info → llms_py-3.0.23.dist-info}/top_level.txt +0 -0
llms/llms.json
CHANGED
|
@@ -235,17 +235,29 @@
|
|
|
235
235
|
"enabled": true,
|
|
236
236
|
"temperature": 1.0
|
|
237
237
|
},
|
|
238
|
+
"lmstudio": {
|
|
239
|
+
"enabled": false,
|
|
240
|
+
"npm": "lmstudio",
|
|
241
|
+
"api": "http://127.0.0.1:1234/v1",
|
|
242
|
+
"models": {}
|
|
243
|
+
},
|
|
238
244
|
"ollama": {
|
|
239
245
|
"enabled": false,
|
|
240
246
|
"id": "ollama",
|
|
241
247
|
"npm": "ollama",
|
|
242
248
|
"api": "http://localhost:11434"
|
|
243
249
|
},
|
|
244
|
-
"
|
|
250
|
+
"ollama-cloud": {
|
|
251
|
+
"enabled": true,
|
|
252
|
+
"env": [
|
|
253
|
+
"OLLAMA_API_KEY"
|
|
254
|
+
]
|
|
255
|
+
},
|
|
256
|
+
"openai-local": {
|
|
245
257
|
"enabled": false,
|
|
246
|
-
"npm": "
|
|
247
|
-
"api": "http://
|
|
248
|
-
"
|
|
258
|
+
"npm": "openai-local",
|
|
259
|
+
"api": "http://localhost:8000/v1",
|
|
260
|
+
"api_key": "$OPENAI_LOCAL_API_KEY"
|
|
249
261
|
},
|
|
250
262
|
"google": {
|
|
251
263
|
"enabled": true,
|
llms/main.py
CHANGED
|
@@ -57,7 +57,7 @@ try:
|
|
|
57
57
|
except ImportError:
|
|
58
58
|
HAS_PIL = False
|
|
59
59
|
|
|
60
|
-
VERSION = "3.0.
|
|
60
|
+
VERSION = "3.0.23"
|
|
61
61
|
_ROOT = None
|
|
62
62
|
DEBUG = os.getenv("DEBUG") == "1"
|
|
63
63
|
MOCK = os.getenv("MOCK") == "1"
|
|
@@ -871,8 +871,7 @@ def save_image_to_cache(base64_data, filename, image_info, ignore_info=False):
|
|
|
871
871
|
return url, info
|
|
872
872
|
|
|
873
873
|
|
|
874
|
-
|
|
875
|
-
text = await response.text()
|
|
874
|
+
def http_error_to_message(response, text):
|
|
876
875
|
if response.status >= 400:
|
|
877
876
|
message = "HTTP " + str(response.status) + " " + response.reason
|
|
878
877
|
_dbg(f"HTTP {response.status} {response.reason}\n{dict(response.headers)}\n{text}")
|
|
@@ -885,6 +884,13 @@ async def response_json(response):
|
|
|
885
884
|
except Exception:
|
|
886
885
|
if text:
|
|
887
886
|
message += ": " + text[:100]
|
|
887
|
+
return message
|
|
888
|
+
|
|
889
|
+
|
|
890
|
+
async def response_json(response):
|
|
891
|
+
text = await response.text()
|
|
892
|
+
if response.status >= 400:
|
|
893
|
+
message = http_error_to_message(response, text)
|
|
888
894
|
raise Exception(message)
|
|
889
895
|
response.raise_for_status()
|
|
890
896
|
body = json.loads(text)
|
|
@@ -1394,6 +1400,7 @@ class OllamaProvider(OpenAiCompatible):
|
|
|
1394
1400
|
"id": k,
|
|
1395
1401
|
"name": v.replace(":", " "),
|
|
1396
1402
|
"modalities": {"input": ["text"], "output": ["text"]},
|
|
1403
|
+
"tool_call": True,
|
|
1397
1404
|
"cost": {
|
|
1398
1405
|
"input": 0,
|
|
1399
1406
|
"output": 0,
|
|
@@ -1431,6 +1438,10 @@ class LMStudioProvider(OllamaProvider):
|
|
|
1431
1438
|
return ret
|
|
1432
1439
|
|
|
1433
1440
|
|
|
1441
|
+
class OpenAiLocalProvider(LMStudioProvider):
|
|
1442
|
+
sdk = "openai-local"
|
|
1443
|
+
|
|
1444
|
+
|
|
1434
1445
|
def get_provider_model(model_name):
|
|
1435
1446
|
for provider in g_handlers.values():
|
|
1436
1447
|
provider_model = provider.provider_model(model_name)
|
|
@@ -2229,7 +2240,7 @@ async def get_text(url):
|
|
|
2229
2240
|
async with session.get(url) as resp:
|
|
2230
2241
|
text = await resp.text()
|
|
2231
2242
|
if resp.status >= 400:
|
|
2232
|
-
raise
|
|
2243
|
+
raise Exception(http_error_to_message(resp, text))
|
|
2233
2244
|
return text
|
|
2234
2245
|
|
|
2235
2246
|
|
|
@@ -2838,6 +2849,7 @@ class AppExtensions:
|
|
|
2838
2849
|
CodestralProvider,
|
|
2839
2850
|
OllamaProvider,
|
|
2840
2851
|
LMStudioProvider,
|
|
2852
|
+
OpenAiLocalProvider,
|
|
2841
2853
|
]
|
|
2842
2854
|
self.aspect_ratios = {
|
|
2843
2855
|
"1:1": "1024×1024",
|
|
@@ -2953,7 +2965,7 @@ class AppExtensions:
|
|
|
2953
2965
|
for filter_func in self.chat_error_filters:
|
|
2954
2966
|
try:
|
|
2955
2967
|
task = filter_func(e, context)
|
|
2956
|
-
if
|
|
2968
|
+
if inspect.iscoroutine(task):
|
|
2957
2969
|
await task
|
|
2958
2970
|
except Exception as e:
|
|
2959
2971
|
_err("chat error filter failed", e)
|