aimodelsfree 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +280 -0
- package/README.md.bak +280 -0
- package/dist/index.cjs +132 -0
- package/dist/index.js +150 -0
- package/package.json +28 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2025 Ado
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,280 @@
|
|
|
1
|
+
# <h1><img alt="aimodelsfree logo" src="https://cdn.skyultraplus.com/uploads/u44/9f2341d40b68a3e9.png" height="75"/></h1>
|
|
2
|
+
|
|
3
|
+
# 🐢 aimodelsfree — Modelos de IA gratis
|
|
4
|
+
|
|
5
|
+
[](https://www.npmjs.com/package/aimodelsfree)
|
|
6
|
+
[](https://www.npmjs.com/package/aimodelsfree)
|
|
7
|
+
[](https://opensource.org/licenses/MIT)
|
|
8
|
+
[](https://nodejs.org)
|
|
9
|
+
|
|
10
|
+
> 🌱 Cliente Node.js ligero y directo para listar modelos y enviar prompts a endpoints tipo OpenAI.
|
|
11
|
+
> Ideal para bots, prototipos y scripts.
|
|
12
|
+
|
|
13
|
+
---
|
|
14
|
+
|
|
15
|
+
> [!CAUTION]
|
|
16
|
+
> **AVISO IMPORTANTE**
|
|
17
|
+
>
|
|
18
|
+
> aimodelsfree consume endpoints compatibles con OpenAI (por ejemplo: `/openapi/v1/chat/completions`).
|
|
19
|
+
> Verifica siempre el `baseURL`, la licencia y los términos del proveedor. Úsalo responsablemente.
|
|
20
|
+
|
|
21
|
+
---
|
|
22
|
+
|
|
23
|
+
## 🔥 Lo que hace (en 1 línea)
|
|
24
|
+
Lista modelos y envía preguntas a modelos IA de forma sencilla:
|
|
25
|
+
- `GET /openapi/v1/models`
|
|
26
|
+
- `POST /openapi/v1/chat/completions`
|
|
27
|
+
|
|
28
|
+
---
|
|
29
|
+
|
|
30
|
+
## 🧭 Por qué elegir aimodelsfree?
|
|
31
|
+
- 🌿 **Ligero:** pensado para integrarse rápido en bots y proyectos Node.js.
|
|
32
|
+
- 🦅 **Compatible:** payload OpenAI-like (`model`, `messages`, `max_completion_tokens`, etc.).
|
|
33
|
+
- 🌱 **Cache de modelos:** reduce llamadas repetidas.
|
|
34
|
+
- 🦋 **Configurable:** `apiKey`, `headers`, `timeoutMs`, `userAgent`, `referer`, etc.
|
|
35
|
+
- 🐢 **Simple:** dos llamadas principales (`listModels()` y `ask()`).
|
|
36
|
+
|
|
37
|
+
---
|
|
38
|
+
|
|
39
|
+
## 📦 Instalación
|
|
40
|
+
|
|
41
|
+
```bash
|
|
42
|
+
npm install aimodelsfree
|
|
43
|
+
# o
|
|
44
|
+
yarn add aimodelsfree
|
|
45
|
+
# o
|
|
46
|
+
pnpm add aimodelsfree
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
> Requiere **Node.js >= 18**.
|
|
50
|
+
|
|
51
|
+
---
|
|
52
|
+
|
|
53
|
+
## ⚡ Quick Start — Rápido y bonito
|
|
54
|
+
|
|
55
|
+
### ESModules (import)
|
|
56
|
+
```js
|
|
57
|
+
import AIModelsFree from 'aimodelsfree'
|
|
58
|
+
|
|
59
|
+
const ai = new AIModelsFree({
|
|
60
|
+
baseURL: 'https://mj.gpt7.icu' // sin /openapi/v1
|
|
61
|
+
})
|
|
62
|
+
|
|
63
|
+
const models = await ai.listModels()
|
|
64
|
+
console.log('🌿 Modelos (top 5):', models.slice(0, 5).map(m => m.id))
|
|
65
|
+
|
|
66
|
+
if (!models.length) {
|
|
67
|
+
console.log('No hay modelos disponibles con ese baseURL.')
|
|
68
|
+
} else {
|
|
69
|
+
const res = await ai.ask({
|
|
70
|
+
model: models[0].id,
|
|
71
|
+
question: '¡Hola! ¿Qué puedes hacer por mí hoy?',
|
|
72
|
+
maxCompletionTokens: 512
|
|
73
|
+
})
|
|
74
|
+
|
|
75
|
+
console.log('📝 Respuesta:', res.text)
|
|
76
|
+
}
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
### CommonJS (require)
|
|
80
|
+
```js
|
|
81
|
+
const AIModelsFree = require('aimodelsfree')
|
|
82
|
+
|
|
83
|
+
const ai = new AIModelsFree({ baseURL: 'https://mj.gpt7.icu' })
|
|
84
|
+
|
|
85
|
+
;(async () => {
|
|
86
|
+
const models = await ai.listModels()
|
|
87
|
+
if (!models.length) return console.log('No hay modelos disponibles.')
|
|
88
|
+
|
|
89
|
+
const out = await ai.ask({
|
|
90
|
+
model: models[0].id,
|
|
91
|
+
question: '¡Hola!',
|
|
92
|
+
maxCompletionTokens: 256
|
|
93
|
+
})
|
|
94
|
+
|
|
95
|
+
console.log('🌱', out.text)
|
|
96
|
+
})()
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
---
|
|
100
|
+
|
|
101
|
+
## 🧩 API — Referencia rápida
|
|
102
|
+
|
|
103
|
+
### `new AIModelsFree(options)`
|
|
104
|
+
Crea la instancia del cliente.
|
|
105
|
+
|
|
106
|
+
**Opciones**
|
|
107
|
+
- `baseURL` (string) — URL base del proveedor **sin** `/openapi/v1`.
|
|
108
|
+
Default: `https://mj.gpt7.icu`
|
|
109
|
+
- `apiKey` (string) — se envía como `Authorization: Bearer <apiKey>` si se provee
|
|
110
|
+
- `timeoutMs` (number) — timeout de axios en ms. Default: `60000`
|
|
111
|
+
- `userAgent` (string) — default: `aimodelsfree/1.0 (+https://www.npmjs.com/package/aimodelsfree)`
|
|
112
|
+
- `referer` (string) — opcional
|
|
113
|
+
- `headers` (object) — headers adicionales (se mezclan con los internos)
|
|
114
|
+
|
|
115
|
+
---
|
|
116
|
+
|
|
117
|
+
### `await ai.listModels({ refresh, cacheTtlMs } = {})`
|
|
118
|
+
Devuelve `Array<Model>`.
|
|
119
|
+
|
|
120
|
+
**Parámetros**
|
|
121
|
+
- `refresh` (boolean) — fuerza refrescar la lista (ignora cache). Default: `false`
|
|
122
|
+
- `cacheTtlMs` (number) — TTL del cache en ms. Default: `5 * 60 * 1000` (5 min)
|
|
123
|
+
|
|
124
|
+
**Retorno**
|
|
125
|
+
- Array de objetos modelo (por ejemplo `{ id, object, ... }`)
|
|
126
|
+
|
|
127
|
+
> Nota: el cliente normaliza payloads tipo OpenAI (`{ data: [...] }`) y también acepta arrays directos.
|
|
128
|
+
|
|
129
|
+
---
|
|
130
|
+
|
|
131
|
+
### `await ai.ask(params)`
|
|
132
|
+
Envía una pregunta y devuelve `{ text, raw }`.
|
|
133
|
+
|
|
134
|
+
**Parámetros principales**
|
|
135
|
+
- `model` (string) — **obligatorio**
|
|
136
|
+
- `question` (string) — **obligatorio**
|
|
137
|
+
- `system` (string) — opcional (se añade como primer mensaje `role: "system"`)
|
|
138
|
+
- `maxCompletionTokens` (number) — se envía como `max_completion_tokens` (default interno: `3072`)
|
|
139
|
+
- `temperature` (number) — opcional (solo se envía si es number)
|
|
140
|
+
- `topP` (number) — se envía como `top_p` (default: `1`)
|
|
141
|
+
- `presencePenalty` (number) — `presence_penalty` (default: `0`)
|
|
142
|
+
- `frequencyPenalty` (number) — `frequency_penalty` (default: `0`)
|
|
143
|
+
- `stream` (boolean) — se envía al endpoint (default: `false`)
|
|
144
|
+
|
|
145
|
+
**Retorno**
|
|
146
|
+
- `{ text, raw }`
|
|
147
|
+
- `text` → texto extraído de `choices[0].message.content` (o `choices[0].text` si el proxy lo usa)
|
|
148
|
+
- `raw` → respuesta completa del endpoint
|
|
149
|
+
|
|
150
|
+
> Nota: aunque puedes enviar `stream: true`, este cliente **no implementa streaming**; siempre devuelve la respuesta final parseada.
|
|
151
|
+
|
|
152
|
+
---
|
|
153
|
+
|
|
154
|
+
## 🎯 Ejemplos útiles
|
|
155
|
+
|
|
156
|
+
### Comparar respuestas de 3 modelos
|
|
157
|
+
```js
|
|
158
|
+
const models = await ai.listModels()
|
|
159
|
+
const pregunta = '¿Qué es la inteligencia artificial?'
|
|
160
|
+
|
|
161
|
+
for (let i = 0; i < Math.min(3, models.length); i++) {
|
|
162
|
+
const r = await ai.ask({
|
|
163
|
+
model: models[i].id,
|
|
164
|
+
question: pregunta,
|
|
165
|
+
maxCompletionTokens: 200
|
|
166
|
+
})
|
|
167
|
+
console.log(`🌿 ${models[i].id} →`, r.text)
|
|
168
|
+
}
|
|
169
|
+
```
|
|
170
|
+
|
|
171
|
+
### Integración rápida en un bot (handler WhatsApp)
|
|
172
|
+
```js
|
|
173
|
+
import AIModelsFree from 'aimodelsfree'
|
|
174
|
+
const ai = new AIModelsFree({ baseURL: 'https://mj.gpt7.icu' })
|
|
175
|
+
|
|
176
|
+
let handler = async (m, { conn, text, usedPrefix, command }) => {
|
|
177
|
+
const chatId = m?.chat || m?.key?.remoteJid
|
|
178
|
+
if (!chatId) return
|
|
179
|
+
|
|
180
|
+
if (command === 'aimodels') {
|
|
181
|
+
await conn.sendMessage(chatId, { react: { text: '🕒', key: m.key } })
|
|
182
|
+
|
|
183
|
+
try {
|
|
184
|
+
const models = await ai.listModels()
|
|
185
|
+
const top = models.slice(0, 30)
|
|
186
|
+
|
|
187
|
+
const msg = [
|
|
188
|
+
'「✦」Modelos disponibles (top 30):',
|
|
189
|
+
...top.map((x, i) => `> ${i + 1}. *${x.id}*`),
|
|
190
|
+
'',
|
|
191
|
+
`> ✐ Uso » *${usedPrefix}ai <modelo>|<pregunta>*`,
|
|
192
|
+
`> ✐ Ejemplo » *${usedPrefix}ai gpt-4o-mini|Hola*`
|
|
193
|
+
].join('\n')
|
|
194
|
+
|
|
195
|
+
await conn.sendMessage(chatId, { text: msg }, { quoted: m })
|
|
196
|
+
await conn.sendMessage(chatId, { react: { text: '✔️', key: m.key } })
|
|
197
|
+
} catch (e) {
|
|
198
|
+
await conn.sendMessage(chatId, { text: `「✦」Error listando modelos.\n> ${String(e?.message || e)}` }, { quoted: m })
|
|
199
|
+
}
|
|
200
|
+
return
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
if (!text || !text.includes('|')) {
|
|
204
|
+
return conn.sendMessage(
|
|
205
|
+
chatId,
|
|
206
|
+
{ text: `「✦」Formato: *${usedPrefix + command} <modelo>|<pregunta>*\n> ✐ Ejemplo » *${usedPrefix + command} gpt-4o-mini|Hola*` },
|
|
207
|
+
{ quoted: m }
|
|
208
|
+
)
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
const [model, ...rest] = text.split('|')
|
|
212
|
+
const question = rest.join('|').trim()
|
|
213
|
+
|
|
214
|
+
await conn.sendMessage(chatId, { react: { text: '🕒', key: m.key } })
|
|
215
|
+
|
|
216
|
+
try {
|
|
217
|
+
const out = await ai.ask({
|
|
218
|
+
model: model.trim(),
|
|
219
|
+
question,
|
|
220
|
+
maxCompletionTokens: 1024
|
|
221
|
+
})
|
|
222
|
+
|
|
223
|
+
await conn.sendMessage(
|
|
224
|
+
chatId,
|
|
225
|
+
{ text: `「✦」*Modelo:* ${model.trim()}\n\n${out.text || 'Sin respuesta.'}` },
|
|
226
|
+
{ quoted: m }
|
|
227
|
+
)
|
|
228
|
+
await conn.sendMessage(chatId, { react: { text: '✔️', key: m.key } })
|
|
229
|
+
} catch (e) {
|
|
230
|
+
await conn.sendMessage(chatId, { text: `「✦」Error consultando IA.\n> ${String(e?.message || e)}` }, { quoted: m })
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
handler.help = ['aimodels', 'ai <modelo>|<pregunta>']
|
|
235
|
+
handler.tags = ['ai']
|
|
236
|
+
handler.command = ['aimodels', 'ai']
|
|
237
|
+
|
|
238
|
+
export default handler
|
|
239
|
+
```
|
|
240
|
+
|
|
241
|
+
---
|
|
242
|
+
|
|
243
|
+
## 🛠️ Troubleshooting — Soluciones rápidas
|
|
244
|
+
|
|
245
|
+
- **ETIMEDOUT** — aumenta `timeoutMs`:
|
|
246
|
+
```js
|
|
247
|
+
new AIModelsFree({ baseURL: 'https://mj.gpt7.icu', timeoutMs: 180000 })
|
|
248
|
+
```
|
|
249
|
+
|
|
250
|
+
- **401 Unauthorized** — revisa `apiKey`.
|
|
251
|
+
|
|
252
|
+
- **No models available** — revisa `baseURL` y prueba refrescar:
|
|
253
|
+
```js
|
|
254
|
+
await ai.listModels({ refresh: true })
|
|
255
|
+
```
|
|
256
|
+
|
|
257
|
+
---
|
|
258
|
+
|
|
259
|
+
## ⚖️ Disclaimer
|
|
260
|
+
> Este proyecto no está afiliado ni es oficial de ningún proveedor de IA. Usa los endpoints respetando los términos del servicio del proveedor. El autor no se hace responsable por el uso indebido.
|
|
261
|
+
|
|
262
|
+
---
|
|
263
|
+
|
|
264
|
+
## 🧾 Licencia
|
|
265
|
+
MIT © 2025 Ado — consulta el archivo [LICENSE](LICENSE).
|
|
266
|
+
|
|
267
|
+
---
|
|
268
|
+
|
|
269
|
+
## 👨💻 Autor
|
|
270
|
+
|
|
271
|
+
<p>
|
|
272
|
+
<img src="https://github.com/Ado21.png" alt="Ado21" width="80" height="80" style="border-radius:50%;vertical-align:middle;margin-right:10px" />
|
|
273
|
+
<strong></strong><br/>
|
|
274
|
+
🐣 <a href="https://github.com/Ado21">Ado21</a> · 📦 <a href="https://www.npmjs.com/package/aimodelsfree">NPM</a>
|
|
275
|
+
</p>
|
|
276
|
+
|
|
277
|
+
---
|
|
278
|
+
|
|
279
|
+
## 🌾 Changelog (rápido)
|
|
280
|
+
- **1.1.0** — Versión inicial (2025-12-30)
|
package/README.md.bak
ADDED
|
@@ -0,0 +1,280 @@
|
|
|
1
|
+
# <h1><img alt="aimodelsfree logo" src="https://cdn.skyultraplus.com/uploads/u44/9f2341d40b68a3e9.png" height="75"/></h1>
|
|
2
|
+
|
|
3
|
+
# 🐢 aimodelsfree — Modelos de IA gratis
|
|
4
|
+
|
|
5
|
+
[](https://www.npmjs.com/package/aimodelsfree)
|
|
6
|
+
[](https://www.npmjs.com/package/aimodelsfree)
|
|
7
|
+
[](https://opensource.org/licenses/MIT)
|
|
8
|
+
[](https://nodejs.org)
|
|
9
|
+
|
|
10
|
+
> 🌱 Cliente Node.js ligero y directo para listar modelos y enviar prompts a endpoints tipo OpenAI.
|
|
11
|
+
> Ideal para bots, prototipos y scripts.
|
|
12
|
+
|
|
13
|
+
---
|
|
14
|
+
|
|
15
|
+
> [!CAUTION]
|
|
16
|
+
> **AVISO IMPORTANTE**
|
|
17
|
+
>
|
|
18
|
+
> aimodelsfree consume endpoints compatibles con OpenAI (por ejemplo: `/openapi/v1/chat/completions`).
|
|
19
|
+
> Verifica siempre el `baseURL`, la licencia y los términos del proveedor. Úsalo responsablemente.
|
|
20
|
+
|
|
21
|
+
---
|
|
22
|
+
|
|
23
|
+
## 🔥 Lo que hace (en 1 línea)
|
|
24
|
+
Lista modelos y envía preguntas a modelos IA de forma sencilla:
|
|
25
|
+
- `GET /openapi/v1/models`
|
|
26
|
+
- `POST /openapi/v1/chat/completions`
|
|
27
|
+
|
|
28
|
+
---
|
|
29
|
+
|
|
30
|
+
## 🧭 Por qué elegir aimodelsfree?
|
|
31
|
+
- 🌿 **Ligero:** pensado para integrarse rápido en bots y proyectos Node.js.
|
|
32
|
+
- 🦅 **Compatible:** payload OpenAI-like (`model`, `messages`, `max_completion_tokens`, etc.).
|
|
33
|
+
- 🌱 **Cache de modelos:** reduce llamadas repetidas.
|
|
34
|
+
- 🦋 **Configurable:** `apiKey`, `headers`, `timeoutMs`, `userAgent`, `referer`, etc.
|
|
35
|
+
- 🐢 **Simple:** dos llamadas principales (`listModels()` y `ask()`).
|
|
36
|
+
|
|
37
|
+
---
|
|
38
|
+
|
|
39
|
+
## 📦 Instalación
|
|
40
|
+
|
|
41
|
+
```bash
|
|
42
|
+
npm install aimodelsfree
|
|
43
|
+
# o
|
|
44
|
+
yarn add aimodelsfree
|
|
45
|
+
# o
|
|
46
|
+
pnpm add aimodelsfree
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
> Requiere **Node.js >= 18**.
|
|
50
|
+
|
|
51
|
+
---
|
|
52
|
+
|
|
53
|
+
## ⚡ Quick Start — Rápido y bonito
|
|
54
|
+
|
|
55
|
+
### ESModules (import)
|
|
56
|
+
```js
|
|
57
|
+
import AIModelsFree from 'aimodelsfree'
|
|
58
|
+
|
|
59
|
+
const ai = new AIModelsFree({
|
|
60
|
+
baseURL: 'https://mj.gpt7.icu' // sin /openapi/v1
|
|
61
|
+
})
|
|
62
|
+
|
|
63
|
+
const models = await ai.listModels()
|
|
64
|
+
console.log('🌿 Modelos (top 5):', models.slice(0, 5).map(m => m.id))
|
|
65
|
+
|
|
66
|
+
if (!models.length) {
|
|
67
|
+
console.log('No hay modelos disponibles con ese baseURL.')
|
|
68
|
+
} else {
|
|
69
|
+
const res = await ai.ask({
|
|
70
|
+
model: models[0].id,
|
|
71
|
+
question: '¡Hola! ¿Qué puedes hacer por mí hoy?',
|
|
72
|
+
maxCompletionTokens: 512
|
|
73
|
+
})
|
|
74
|
+
|
|
75
|
+
console.log('📝 Respuesta:', res.text)
|
|
76
|
+
}
|
|
77
|
+
```
|
|
78
|
+
|
|
79
|
+
### CommonJS (require)
|
|
80
|
+
```js
|
|
81
|
+
const AIModelsFree = require('aimodelsfree')
|
|
82
|
+
|
|
83
|
+
const ai = new AIModelsFree({ baseURL: 'https://mj.gpt7.icu' })
|
|
84
|
+
|
|
85
|
+
;(async () => {
|
|
86
|
+
const models = await ai.listModels()
|
|
87
|
+
if (!models.length) return console.log('No hay modelos disponibles.')
|
|
88
|
+
|
|
89
|
+
const out = await ai.ask({
|
|
90
|
+
model: models[0].id,
|
|
91
|
+
question: '¡Hola!',
|
|
92
|
+
maxCompletionTokens: 256
|
|
93
|
+
})
|
|
94
|
+
|
|
95
|
+
console.log('🌱', out.text)
|
|
96
|
+
})()
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
---
|
|
100
|
+
|
|
101
|
+
## 🧩 API — Referencia rápida
|
|
102
|
+
|
|
103
|
+
### `new AIModelsFree(options)`
|
|
104
|
+
Crea la instancia del cliente.
|
|
105
|
+
|
|
106
|
+
**Opciones**
|
|
107
|
+
- `baseURL` (string) — URL base del proveedor **sin** `/openapi/v1`.
|
|
108
|
+
Default: `https://mj.gpt7.icu`
|
|
109
|
+
- `apiKey` (string) — se envía como `Authorization: Bearer <apiKey>` si se provee
|
|
110
|
+
- `timeoutMs` (number) — timeout de axios en ms. Default: `60000`
|
|
111
|
+
- `userAgent` (string) — default: `aimodelsfree/1.0 (+https://www.npmjs.com/package/aimodelsfree)`
|
|
112
|
+
- `referer` (string) — opcional
|
|
113
|
+
- `headers` (object) — headers adicionales (se mezclan con los internos)
|
|
114
|
+
|
|
115
|
+
---
|
|
116
|
+
|
|
117
|
+
### `await ai.listModels({ refresh, cacheTtlMs } = {})`
|
|
118
|
+
Devuelve `Array<Model>`.
|
|
119
|
+
|
|
120
|
+
**Parámetros**
|
|
121
|
+
- `refresh` (boolean) — fuerza refrescar la lista (ignora cache). Default: `false`
|
|
122
|
+
- `cacheTtlMs` (number) — TTL del cache en ms. Default: `5 * 60 * 1000` (5 min)
|
|
123
|
+
|
|
124
|
+
**Retorno**
|
|
125
|
+
- Array de objetos modelo (por ejemplo `{ id, object, ... }`)
|
|
126
|
+
|
|
127
|
+
> Nota: el cliente normaliza payloads tipo OpenAI (`{ data: [...] }`) y también acepta arrays directos.
|
|
128
|
+
|
|
129
|
+
---
|
|
130
|
+
|
|
131
|
+
### `await ai.ask(params)`
|
|
132
|
+
Envía una pregunta y devuelve `{ text, raw }`.
|
|
133
|
+
|
|
134
|
+
**Parámetros principales**
|
|
135
|
+
- `model` (string) — **obligatorio**
|
|
136
|
+
- `question` (string) — **obligatorio**
|
|
137
|
+
- `system` (string) — opcional (se añade como primer mensaje `role: "system"`)
|
|
138
|
+
- `maxCompletionTokens` (number) — se envía como `max_completion_tokens` (default interno: `3072`)
|
|
139
|
+
- `temperature` (number) — opcional (solo se envía si es number)
|
|
140
|
+
- `topP` (number) — se envía como `top_p` (default: `1`)
|
|
141
|
+
- `presencePenalty` (number) — `presence_penalty` (default: `0`)
|
|
142
|
+
- `frequencyPenalty` (number) — `frequency_penalty` (default: `0`)
|
|
143
|
+
- `stream` (boolean) — se envía al endpoint (default: `false`)
|
|
144
|
+
|
|
145
|
+
**Retorno**
|
|
146
|
+
- `{ text, raw }`
|
|
147
|
+
- `text` → texto extraído de `choices[0].message.content` (o `choices[0].text` si el proxy lo usa)
|
|
148
|
+
- `raw` → respuesta completa del endpoint
|
|
149
|
+
|
|
150
|
+
> Nota: aunque puedes enviar `stream: true`, este cliente **no implementa streaming**; siempre devuelve la respuesta final parseada.
|
|
151
|
+
|
|
152
|
+
---
|
|
153
|
+
|
|
154
|
+
## 🎯 Ejemplos útiles
|
|
155
|
+
|
|
156
|
+
### Comparar respuestas de 3 modelos
|
|
157
|
+
```js
|
|
158
|
+
const models = await ai.listModels()
|
|
159
|
+
const pregunta = '¿Qué es la inteligencia artificial?'
|
|
160
|
+
|
|
161
|
+
for (let i = 0; i < Math.min(3, models.length); i++) {
|
|
162
|
+
const r = await ai.ask({
|
|
163
|
+
model: models[i].id,
|
|
164
|
+
question: pregunta,
|
|
165
|
+
maxCompletionTokens: 200
|
|
166
|
+
})
|
|
167
|
+
console.log(`🌿 ${models[i].id} →`, r.text)
|
|
168
|
+
}
|
|
169
|
+
```
|
|
170
|
+
|
|
171
|
+
### Integración rápida en un bot (handler WhatsApp)
|
|
172
|
+
```js
|
|
173
|
+
import AIModelsFree from 'aimodelsfree'
|
|
174
|
+
const ai = new AIModelsFree({ baseURL: 'https://mj.gpt7.icu' })
|
|
175
|
+
|
|
176
|
+
let handler = async (m, { conn, text, usedPrefix, command }) => {
|
|
177
|
+
const chatId = m?.chat || m?.key?.remoteJid
|
|
178
|
+
if (!chatId) return
|
|
179
|
+
|
|
180
|
+
if (command === 'aimodels') {
|
|
181
|
+
await conn.sendMessage(chatId, { react: { text: '🕒', key: m.key } })
|
|
182
|
+
|
|
183
|
+
try {
|
|
184
|
+
const models = await ai.listModels()
|
|
185
|
+
const top = models.slice(0, 30)
|
|
186
|
+
|
|
187
|
+
const msg = [
|
|
188
|
+
'「✦」Modelos disponibles (top 30):',
|
|
189
|
+
...top.map((x, i) => `> ${i + 1}. *${x.id}*`),
|
|
190
|
+
'',
|
|
191
|
+
`> ✐ Uso » *${usedPrefix}ai <modelo>|<pregunta>*`,
|
|
192
|
+
`> ✐ Ejemplo » *${usedPrefix}ai gpt-4o-mini|Hola*`
|
|
193
|
+
].join('\n')
|
|
194
|
+
|
|
195
|
+
await conn.sendMessage(chatId, { text: msg }, { quoted: m })
|
|
196
|
+
await conn.sendMessage(chatId, { react: { text: '✔️', key: m.key } })
|
|
197
|
+
} catch (e) {
|
|
198
|
+
await conn.sendMessage(chatId, { text: `「✦」Error listando modelos.\n> ${String(e?.message || e)}` }, { quoted: m })
|
|
199
|
+
}
|
|
200
|
+
return
|
|
201
|
+
}
|
|
202
|
+
|
|
203
|
+
if (!text || !text.includes('|')) {
|
|
204
|
+
return conn.sendMessage(
|
|
205
|
+
chatId,
|
|
206
|
+
{ text: `「✦」Formato: *${usedPrefix + command} <modelo>|<pregunta>*\n> ✐ Ejemplo » *${usedPrefix + command} gpt-4o-mini|Hola*` },
|
|
207
|
+
{ quoted: m }
|
|
208
|
+
)
|
|
209
|
+
}
|
|
210
|
+
|
|
211
|
+
const [model, ...rest] = text.split('|')
|
|
212
|
+
const question = rest.join('|').trim()
|
|
213
|
+
|
|
214
|
+
await conn.sendMessage(chatId, { react: { text: '🕒', key: m.key } })
|
|
215
|
+
|
|
216
|
+
try {
|
|
217
|
+
const out = await ai.ask({
|
|
218
|
+
model: model.trim(),
|
|
219
|
+
question,
|
|
220
|
+
maxCompletionTokens: 1024
|
|
221
|
+
})
|
|
222
|
+
|
|
223
|
+
await conn.sendMessage(
|
|
224
|
+
chatId,
|
|
225
|
+
{ text: `「✦」*Modelo:* ${model.trim()}\n\n${out.text || 'Sin respuesta.'}` },
|
|
226
|
+
{ quoted: m }
|
|
227
|
+
)
|
|
228
|
+
await conn.sendMessage(chatId, { react: { text: '✔️', key: m.key } })
|
|
229
|
+
} catch (e) {
|
|
230
|
+
await conn.sendMessage(chatId, { text: `「✦」Error consultando IA.\n> ${String(e?.message || e)}` }, { quoted: m })
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
handler.help = ['aimodels', 'ai <modelo>|<pregunta>']
|
|
235
|
+
handler.tags = ['ai']
|
|
236
|
+
handler.command = ['aimodels', 'ai']
|
|
237
|
+
|
|
238
|
+
export default handler
|
|
239
|
+
```
|
|
240
|
+
|
|
241
|
+
---
|
|
242
|
+
|
|
243
|
+
## 🛠️ Troubleshooting — Soluciones rápidas
|
|
244
|
+
|
|
245
|
+
- **ETIMEDOUT** — aumenta `timeoutMs`:
|
|
246
|
+
```js
|
|
247
|
+
new AIModelsFree({ baseURL: 'https://mj.gpt7.icu', timeoutMs: 180000 })
|
|
248
|
+
```
|
|
249
|
+
|
|
250
|
+
- **401 Unauthorized** — revisa `apiKey`.
|
|
251
|
+
|
|
252
|
+
- **No models available** — revisa `baseURL` y prueba refrescar:
|
|
253
|
+
```js
|
|
254
|
+
await ai.listModels({ refresh: true })
|
|
255
|
+
```
|
|
256
|
+
|
|
257
|
+
---
|
|
258
|
+
|
|
259
|
+
## ⚖️ Disclaimer
|
|
260
|
+
> Este proyecto no está afiliado ni es oficial de ningún proveedor de IA. Usa los endpoints respetando los términos del servicio del proveedor. El autor no se hace responsable por el uso indebido.
|
|
261
|
+
|
|
262
|
+
---
|
|
263
|
+
|
|
264
|
+
## 🧾 Licencia
|
|
265
|
+
MIT © 2025 Ado — consulta el archivo [LICENSE](LICENSE).
|
|
266
|
+
|
|
267
|
+
---
|
|
268
|
+
|
|
269
|
+
## 👨💻 Autor
|
|
270
|
+
|
|
271
|
+
<p>
|
|
272
|
+
<img src="https://github.com/Ado21.png" alt="Ado21" width="80" height="80" style="border-radius:50%;vertical-align:middle;margin-right:10px" />
|
|
273
|
+
<strong></strong><br/>
|
|
274
|
+
🐣 <a href="https://github.com/Ado21">Ado21</a> · 📦 <a href="https://www.npmjs.com/package/aimodelsfree">NPM</a>
|
|
275
|
+
</p>
|
|
276
|
+
|
|
277
|
+
---
|
|
278
|
+
|
|
279
|
+
## 🧾 Changelog (rápido)
|
|
280
|
+
- **1.0.0** — Versión inicial (2025-12-30)
|
package/dist/index.cjs
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
const axios = require('axios')
|
|
2
|
+
|
|
3
|
+
function defaultHeaders(opts) {
|
|
4
|
+
opts = opts || {}
|
|
5
|
+
const h = { 'Content-Type': 'application/json' }
|
|
6
|
+
if (opts.userAgent) h['User-Agent'] = opts.userAgent
|
|
7
|
+
if (opts.referer) h['Referer'] = opts.referer
|
|
8
|
+
if (opts.apiKey) {
|
|
9
|
+
h['Authorization'] = String(opts.apiKey).startsWith('Bearer ') ? String(opts.apiKey) : ('Bearer ' + String(opts.apiKey))
|
|
10
|
+
}
|
|
11
|
+
if (opts.extraHeaders && typeof opts.extraHeaders === 'object') {
|
|
12
|
+
for (const k of Object.keys(opts.extraHeaders)) {
|
|
13
|
+
const v = opts.extraHeaders[k]
|
|
14
|
+
if (v != null) h[k] = String(v)
|
|
15
|
+
}
|
|
16
|
+
}
|
|
17
|
+
return h
|
|
18
|
+
}
|
|
19
|
+
|
|
20
|
+
function normalizeModelsPayload(payload) {
|
|
21
|
+
if (payload && Array.isArray(payload.data)) return payload.data
|
|
22
|
+
if (Array.isArray(payload)) return payload
|
|
23
|
+
if (payload && Array.isArray(payload.models)) return payload.models
|
|
24
|
+
return []
|
|
25
|
+
}
|
|
26
|
+
|
|
27
|
+
function extractChatText(data) {
|
|
28
|
+
const content = data && data.choices && data.choices[0] && data.choices[0].message && data.choices[0].message.content
|
|
29
|
+
if (typeof content === 'string') return content
|
|
30
|
+
const text = data && data.choices && data.choices[0] && data.choices[0].text
|
|
31
|
+
if (typeof text === 'string') return text
|
|
32
|
+
return ''
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
function safeJson(v) {
|
|
36
|
+
try { return JSON.stringify(v) } catch (e) { return String(v) }
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
class AIModelsFree {
|
|
40
|
+
constructor(opts) {
|
|
41
|
+
opts = opts || {}
|
|
42
|
+
const baseURL = (opts.baseURL || 'https://mj.gpt7.icu').toString().replace(/\/+$/, '')
|
|
43
|
+
this.baseURL = baseURL
|
|
44
|
+
this.apiKey = opts.apiKey
|
|
45
|
+
this.timeoutMs = opts.timeoutMs == null ? 60000 : opts.timeoutMs
|
|
46
|
+
this.userAgent = opts.userAgent || 'aimodelsfree/1.0 (+https://www.npmjs.com/package/aimodelsfree)'
|
|
47
|
+
this.referer = opts.referer
|
|
48
|
+
this.headers = opts.headers
|
|
49
|
+
|
|
50
|
+
this.http = axios.create({
|
|
51
|
+
baseURL: this.baseURL,
|
|
52
|
+
timeout: this.timeoutMs,
|
|
53
|
+
validateStatus: (s) => s >= 200 && s < 500
|
|
54
|
+
})
|
|
55
|
+
|
|
56
|
+
this._modelsCache = null
|
|
57
|
+
this._modelsCacheAt = 0
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
async listModels(opts) {
|
|
61
|
+
opts = opts || {}
|
|
62
|
+
const refresh = !!opts.refresh
|
|
63
|
+
const cacheTtlMs = opts.cacheTtlMs == null ? (5 * 60 * 1000) : opts.cacheTtlMs
|
|
64
|
+
const now = Date.now()
|
|
65
|
+
|
|
66
|
+
if (!refresh && this._modelsCache && (now - this._modelsCacheAt) < cacheTtlMs) {
|
|
67
|
+
return this._modelsCache
|
|
68
|
+
}
|
|
69
|
+
|
|
70
|
+
const res = await this.http.get('/openapi/v1/models', {
|
|
71
|
+
headers: defaultHeaders({
|
|
72
|
+
userAgent: this.userAgent,
|
|
73
|
+
referer: this.referer,
|
|
74
|
+
apiKey: this.apiKey,
|
|
75
|
+
extraHeaders: this.headers
|
|
76
|
+
})
|
|
77
|
+
})
|
|
78
|
+
|
|
79
|
+
if (res.status >= 400) {
|
|
80
|
+
throw new Error('aimodelsfree: /models HTTP ' + res.status + ': ' + safeJson(res.data))
|
|
81
|
+
}
|
|
82
|
+
|
|
83
|
+
const models = normalizeModelsPayload(res.data)
|
|
84
|
+
this._modelsCache = models
|
|
85
|
+
this._modelsCacheAt = now
|
|
86
|
+
return models
|
|
87
|
+
}
|
|
88
|
+
|
|
89
|
+
async ask(params) {
|
|
90
|
+
params = params || {}
|
|
91
|
+
const model = params.model
|
|
92
|
+
const question = params.question
|
|
93
|
+
|
|
94
|
+
if (!model) throw new Error('aimodelsfree: falta params.model')
|
|
95
|
+
if (!question) throw new Error('aimodelsfree: falta params.question')
|
|
96
|
+
|
|
97
|
+
const messages = []
|
|
98
|
+
if (params.system) messages.push({ role: 'system', content: String(params.system) })
|
|
99
|
+
messages.push({ role: 'user', content: String(question) })
|
|
100
|
+
|
|
101
|
+
const payload = {
|
|
102
|
+
model: String(model),
|
|
103
|
+
messages,
|
|
104
|
+
max_completion_tokens: params.maxCompletionTokens == null ? 3072 : params.maxCompletionTokens,
|
|
105
|
+
top_p: params.topP == null ? 1 : params.topP,
|
|
106
|
+
presence_penalty: params.presencePenalty == null ? 0 : params.presencePenalty,
|
|
107
|
+
frequency_penalty: params.frequencyPenalty == null ? 0 : params.frequencyPenalty,
|
|
108
|
+
stream: !!params.stream
|
|
109
|
+
}
|
|
110
|
+
|
|
111
|
+
if (typeof params.temperature === 'number') payload.temperature = params.temperature
|
|
112
|
+
|
|
113
|
+
const res = await this.http.post('/openapi/v1/chat/completions', payload, {
|
|
114
|
+
headers: defaultHeaders({
|
|
115
|
+
userAgent: this.userAgent,
|
|
116
|
+
referer: this.referer,
|
|
117
|
+
apiKey: this.apiKey,
|
|
118
|
+
extraHeaders: this.headers
|
|
119
|
+
})
|
|
120
|
+
})
|
|
121
|
+
|
|
122
|
+
if (res.status >= 400) {
|
|
123
|
+
throw new Error('aimodelsfree: /chat/completions HTTP ' + res.status + ': ' + safeJson(res.data))
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
const text = extractChatText(res.data)
|
|
127
|
+
return { text, raw: res.data }
|
|
128
|
+
}
|
|
129
|
+
}
|
|
130
|
+
|
|
131
|
+
module.exports = AIModelsFree
|
|
132
|
+
module.exports.AIModelsFree = AIModelsFree
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,150 @@
|
|
|
1
|
+
import axios from 'axios'
|
|
2
|
+
|
|
3
|
+
function defaultHeaders({ userAgent, referer, apiKey, extraHeaders } = {}) {
|
|
4
|
+
const h = {
|
|
5
|
+
'Content-Type': 'application/json'
|
|
6
|
+
}
|
|
7
|
+
if (userAgent) h['User-Agent'] = userAgent
|
|
8
|
+
if (referer) h['Referer'] = referer
|
|
9
|
+
if (apiKey) h['Authorization'] = apiKey.startsWith('Bearer ') ? apiKey : `Bearer ${apiKey}`
|
|
10
|
+
if (extraHeaders && typeof extraHeaders === 'object') {
|
|
11
|
+
for (const [k, v] of Object.entries(extraHeaders)) {
|
|
12
|
+
if (v != null) h[k] = String(v)
|
|
13
|
+
}
|
|
14
|
+
}
|
|
15
|
+
return h
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
function normalizeModelsPayload(payload) {
|
|
19
|
+
|
|
20
|
+
if (payload && Array.isArray(payload.data)) return payload.data
|
|
21
|
+
if (Array.isArray(payload)) return payload
|
|
22
|
+
if (payload && Array.isArray(payload.models)) return payload.models
|
|
23
|
+
return []
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
function extractChatText(data) {
|
|
27
|
+
|
|
28
|
+
const content = data?.choices?.[0]?.message?.content
|
|
29
|
+
if (typeof content === 'string') return content
|
|
30
|
+
|
|
31
|
+
const text = data?.choices?.[0]?.text
|
|
32
|
+
if (typeof text === 'string') return text
|
|
33
|
+
return ''
|
|
34
|
+
}
|
|
35
|
+
|
|
36
|
+
export class AIModelsFree {
|
|
37
|
+
|
|
38
|
+
constructor(opts = {}) {
|
|
39
|
+
const {
|
|
40
|
+
baseURL = 'https://mj.gpt7.icu',
|
|
41
|
+
apiKey,
|
|
42
|
+
timeoutMs = 60_000,
|
|
43
|
+
userAgent = 'aimodelsfree/1.0 (+https://www.npmjs.com/package/aimodelsfree)',
|
|
44
|
+
referer,
|
|
45
|
+
headers
|
|
46
|
+
} = opts
|
|
47
|
+
|
|
48
|
+
this.baseURL = String(baseURL || '').replace(/\/+$/, '')
|
|
49
|
+
this.apiKey = apiKey
|
|
50
|
+
this.timeoutMs = timeoutMs
|
|
51
|
+
this.userAgent = userAgent
|
|
52
|
+
this.referer = referer
|
|
53
|
+
this.headers = headers
|
|
54
|
+
|
|
55
|
+
this.http = axios.create({
|
|
56
|
+
baseURL: this.baseURL,
|
|
57
|
+
timeout: this.timeoutMs,
|
|
58
|
+
validateStatus: (s) => s >= 200 && s < 500
|
|
59
|
+
})
|
|
60
|
+
|
|
61
|
+
this._modelsCache = null
|
|
62
|
+
this._modelsCacheAt = 0
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
async listModels(opts = {}) {
|
|
66
|
+
const { refresh = false, cacheTtlMs = 5 * 60 * 1000 } = opts
|
|
67
|
+
const now = Date.now()
|
|
68
|
+
|
|
69
|
+
if (!refresh && this._modelsCache && now - this._modelsCacheAt < cacheTtlMs) {
|
|
70
|
+
return this._modelsCache
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
const res = await this.http.get('/openapi/v1/models', {
|
|
74
|
+
headers: defaultHeaders({
|
|
75
|
+
userAgent: this.userAgent,
|
|
76
|
+
referer: this.referer,
|
|
77
|
+
apiKey: this.apiKey,
|
|
78
|
+
extraHeaders: this.headers
|
|
79
|
+
})
|
|
80
|
+
})
|
|
81
|
+
|
|
82
|
+
if (res.status >= 400) {
|
|
83
|
+
throw new Error(`aimodelsfree: /models HTTP ${res.status}: ${safeJson(res.data)}`)
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
const models = normalizeModelsPayload(res.data)
|
|
87
|
+
this._modelsCache = models
|
|
88
|
+
this._modelsCacheAt = now
|
|
89
|
+
return models
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
async ask(params = {}) {
|
|
93
|
+
const {
|
|
94
|
+
model,
|
|
95
|
+
question,
|
|
96
|
+
system,
|
|
97
|
+
maxCompletionTokens = 3072,
|
|
98
|
+
temperature,
|
|
99
|
+
topP = 1,
|
|
100
|
+
presencePenalty = 0,
|
|
101
|
+
frequencyPenalty = 0,
|
|
102
|
+
stream = false
|
|
103
|
+
} = params
|
|
104
|
+
|
|
105
|
+
if (!model) throw new Error('aimodelsfree: falta params.model')
|
|
106
|
+
if (!question) throw new Error('aimodelsfree: falta params.question')
|
|
107
|
+
|
|
108
|
+
const messages = []
|
|
109
|
+
if (system) messages.push({ role: 'system', content: String(system) })
|
|
110
|
+
messages.push({ role: 'user', content: String(question) })
|
|
111
|
+
|
|
112
|
+
const payload = {
|
|
113
|
+
model: String(model),
|
|
114
|
+
messages,
|
|
115
|
+
max_completion_tokens: maxCompletionTokens,
|
|
116
|
+
top_p: topP,
|
|
117
|
+
presence_penalty: presencePenalty,
|
|
118
|
+
frequency_penalty: frequencyPenalty,
|
|
119
|
+
stream
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
if (typeof temperature === 'number') payload.temperature = temperature
|
|
123
|
+
|
|
124
|
+
const res = await this.http.post('/openapi/v1/chat/completions', payload, {
|
|
125
|
+
headers: defaultHeaders({
|
|
126
|
+
userAgent: this.userAgent,
|
|
127
|
+
referer: this.referer,
|
|
128
|
+
apiKey: this.apiKey,
|
|
129
|
+
extraHeaders: this.headers
|
|
130
|
+
})
|
|
131
|
+
})
|
|
132
|
+
|
|
133
|
+
if (res.status >= 400) {
|
|
134
|
+
throw new Error(`aimodelsfree: /chat/completions HTTP ${res.status}: ${safeJson(res.data)}`)
|
|
135
|
+
}
|
|
136
|
+
|
|
137
|
+
const text = extractChatText(res.data)
|
|
138
|
+
return { text, raw: res.data }
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
export default AIModelsFree
|
|
143
|
+
|
|
144
|
+
function safeJson(v) {
|
|
145
|
+
try {
|
|
146
|
+
return JSON.stringify(v)
|
|
147
|
+
} catch {
|
|
148
|
+
return String(v)
|
|
149
|
+
}
|
|
150
|
+
}
|
package/package.json
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "aimodelsfree",
|
|
3
|
+
"version": "1.1.0",
|
|
4
|
+
"description": "Cliente ligero para listar modelos y chatear usando un endpoint OpenAI-compatible (/openapi/v1).",
|
|
5
|
+
"keywords": ["ai", "models", "openaifree", "chat", "client"],
|
|
6
|
+
"license": "MIT",
|
|
7
|
+
"author": "Ado",
|
|
8
|
+
"type": "module",
|
|
9
|
+
"main": "./dist/index.cjs",
|
|
10
|
+
"module": "./dist/index.js",
|
|
11
|
+
"exports": {
|
|
12
|
+
".": {
|
|
13
|
+
"import": "./dist/index.js",
|
|
14
|
+
"require": "./dist/index.cjs"
|
|
15
|
+
},
|
|
16
|
+
"./package.json": "./package.json"
|
|
17
|
+
},
|
|
18
|
+
"files": ["dist", "README.md", "LICENSE"],
|
|
19
|
+
"engines": {
|
|
20
|
+
"node": ">=18"
|
|
21
|
+
},
|
|
22
|
+
"scripts": {
|
|
23
|
+
"test": "node ./examples/esm-example.js"
|
|
24
|
+
},
|
|
25
|
+
"dependencies": {
|
|
26
|
+
"axios": "^1.6.8"
|
|
27
|
+
}
|
|
28
|
+
}
|