@elizaos/plugin-local-ai 1.0.0-beta.4 → 1.0.0-beta.41
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +1 -1
- package/README.md +0 -39
- package/dist/index.js +419 -767
- package/dist/index.js.map +1 -1
- package/package.json +3 -4
package/LICENSE
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
MIT License
|
|
2
2
|
|
|
3
|
-
Copyright (c) 2025 Shaw Walters
|
|
3
|
+
Copyright (c) 2025 Shaw Walters and elizaOS Contributors
|
|
4
4
|
|
|
5
5
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
6
|
of this software and associated documentation files (the "Software"), to deal
|
package/README.md
CHANGED
|
@@ -18,15 +18,6 @@ The plugin requires these environment variables (can be set in .env file or char
|
|
|
18
18
|
"settings": {
|
|
19
19
|
"USE_LOCAL_AI": true,
|
|
20
20
|
"USE_STUDIOLM_TEXT_MODELS": false,
|
|
21
|
-
"USE_OLLAMA_TEXT_MODELS": false,
|
|
22
|
-
|
|
23
|
-
"OLLAMA_SERVER_URL": "http://localhost:11434",
|
|
24
|
-
"OLLAMA_MODEL": "deepseek-r1-distill-qwen-7b",
|
|
25
|
-
"USE_OLLAMA_EMBEDDING": false,
|
|
26
|
-
"OLLAMA_EMBEDDING_MODEL": "",
|
|
27
|
-
"SMALL_OLLAMA_MODEL": "deepseek-r1:1.5b",
|
|
28
|
-
"MEDIUM_OLLAMA_MODEL": "deepseek-r1:7b",
|
|
29
|
-
"LARGE_OLLAMA_MODEL": "deepseek-r1:7b",
|
|
30
21
|
|
|
31
22
|
"STUDIOLM_SERVER_URL": "http://localhost:1234",
|
|
32
23
|
"STUDIOLM_SMALL_MODEL": "lmstudio-community/deepseek-r1-distill-qwen-1.5b",
|
|
@@ -41,16 +32,6 @@ Or in `.env` file:
|
|
|
41
32
|
# Local AI Configuration
|
|
42
33
|
USE_LOCAL_AI=true
|
|
43
34
|
USE_STUDIOLM_TEXT_MODELS=false
|
|
44
|
-
USE_OLLAMA_TEXT_MODELS=false
|
|
45
|
-
|
|
46
|
-
# Ollama Configuration
|
|
47
|
-
OLLAMA_SERVER_URL=http://localhost:11434
|
|
48
|
-
OLLAMA_MODEL=deepseek-r1-distill-qwen-7b
|
|
49
|
-
USE_OLLAMA_EMBEDDING=false
|
|
50
|
-
OLLAMA_EMBEDDING_MODEL=
|
|
51
|
-
SMALL_OLLAMA_MODEL=deepseek-r1:1.5b
|
|
52
|
-
MEDIUM_OLLAMA_MODEL=deepseek-r1:7b
|
|
53
|
-
LARGE_OLLAMA_MODEL=deepseek-r1:7b
|
|
54
35
|
|
|
55
36
|
# StudioLM Configuration
|
|
56
37
|
STUDIOLM_SERVER_URL=http://localhost:1234
|
|
@@ -64,18 +45,6 @@ STUDIOLM_EMBEDDING_MODEL=false
|
|
|
64
45
|
#### Text Model Source (Choose One)
|
|
65
46
|
|
|
66
47
|
- `USE_STUDIOLM_TEXT_MODELS`: Enable StudioLM text models
|
|
67
|
-
- `USE_OLLAMA_TEXT_MODELS`: Enable Ollama text models
|
|
68
|
-
Note: Only one text model source can be enabled at a time
|
|
69
|
-
|
|
70
|
-
#### Ollama Settings
|
|
71
|
-
|
|
72
|
-
- `OLLAMA_SERVER_URL`: Ollama API endpoint (default: http://localhost:11434)
|
|
73
|
-
- `OLLAMA_MODEL`: Default model for general use
|
|
74
|
-
- `USE_OLLAMA_EMBEDDING`: Enable Ollama for embeddings
|
|
75
|
-
- `OLLAMA_EMBEDDING_MODEL`: Model for embeddings when enabled
|
|
76
|
-
- `SMALL_OLLAMA_MODEL`: Model for lighter tasks
|
|
77
|
-
- `MEDIUM_OLLAMA_MODEL`: Model for standard tasks
|
|
78
|
-
- `LARGE_OLLAMA_MODEL`: Model for complex tasks
|
|
79
48
|
|
|
80
49
|
#### StudioLM Settings
|
|
81
50
|
|
|
@@ -141,12 +110,4 @@ const largeResponse = await runtime.useModel(ModelType.TEXT_LARGE, {
|
|
|
141
110
|
- Supports both small and medium-sized models
|
|
142
111
|
- Optional embedding model support
|
|
143
112
|
|
|
144
|
-
### 2. Ollama
|
|
145
|
-
|
|
146
|
-
- Local model server with optimized inference
|
|
147
|
-
- Supports various open models in GGUF format
|
|
148
|
-
- Configure with `USE_OLLAMA_TEXT_MODELS=true`
|
|
149
|
-
- Supports small, medium, and large models
|
|
150
|
-
- Optional embedding model support
|
|
151
|
-
|
|
152
113
|
Note: The plugin validates that only one text model source is enabled at a time to prevent conflicts.
|