@jeromeetienne/openai-cache 1.0.0 → 1.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +32 -12
- package/package.json +6 -5
package/README.md
CHANGED
|
@@ -1,24 +1,35 @@
|
|
|
1
1
|
# Cache OpenAI
|
|
2
|
-
|
|
3
|
-
|
|
2
|
+
A simple caching layer for [OpenAI API](https://www.npmjs.com/package/openai), designed to reduce redundant API calls and save time and costs. It works by intercepting API requests and storing their responses in a cache. When the same request is made again, the cached response is returned instead of making a new API call.
|
|
3
|
+
|
|
4
|
+
It is based on the [cacheable](https://cacheable.org/docs/) library, which provides a simple interface for caching data with support for various storage backends (like in-memory, Redis, SQLite, etc). This allows you to easily integrate caching into your OpenAI API usage without having to manage the caching logic yourself.
|
|
5
|
+
|
|
6
|
+
You can use any Keyv storage backend (like Redis, filesystem, etc) to store the cached responses.
|
|
7
|
+
See the [Keyv documentation](https://keyv.org/docs/) for more details on available storage options and how to set them up.
|
|
8
|
+
In the example below, we use a SQLite database to persist the cache.
|
|
4
9
|
|
|
5
10
|
## Usage
|
|
6
11
|
|
|
7
12
|
```ts
|
|
8
13
|
import OpenAI from "openai";
|
|
9
|
-
import { OpenAICache } from "
|
|
10
|
-
|
|
14
|
+
import { OpenAICache } from "@jeromeetienne/openai-cache";
|
|
15
|
+
import KeyvSqlite from '@keyv/sqlite';
|
|
16
|
+
import { Cacheable } from "cacheable";
|
|
11
17
|
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
});
|
|
18
|
+
// init a cacheable instance
|
|
19
|
+
// - here it is backed by a sqlite database, but you can use any Keyv storage backend (redis, filesystem, etc)
|
|
20
|
+
const sqlitePath = `sqlite://${__dirname}/.openai_cache.sqlite`;
|
|
21
|
+
const sqliteCache = new Cacheable({ secondary: new KeyvSqlite(sqlitePath) });
|
|
16
22
|
|
|
23
|
+
// init the OpenAICache with the cacheable instance
|
|
24
|
+
const openaiCache = new OpenAICache(sqliteCache);
|
|
25
|
+
|
|
26
|
+
// init the OpenAI client with the cache's fetch function
|
|
17
27
|
const client = new OpenAI({
|
|
18
28
|
apiKey: process.env.OPENAI_API_KEY,
|
|
19
29
|
fetch: openaiCache.getFetchFn(),
|
|
20
30
|
});
|
|
21
31
|
|
|
32
|
+
// now use it normally - responses will be cached in the sqlite database
|
|
22
33
|
const response = await client.responses.create({
|
|
23
34
|
model: "gpt-4.1-mini",
|
|
24
35
|
input: "Say hello in one short sentence.",
|
|
@@ -35,9 +46,18 @@ data.
|
|
|
35
46
|
|
|
36
47
|
## Possible improvements
|
|
37
48
|
- dont cache if temporature > 0 or top_p < 1, You’ll freeze randomness if cached
|
|
38
|
-
-
|
|
39
|
-
- various storage backends (filesystem, redis, etc)
|
|
40
|
-
- built in TTL support
|
|
49
|
+
- NOTE: do that on options
|
|
41
50
|
- check errors, and dont cache if error
|
|
42
51
|
- 429/500 errors should not be cached, but other errors (like invalid request) could be cached to prevent repeated bad requests
|
|
43
|
-
- tools requests errors should not be cached
|
|
52
|
+
- tools requests errors should not be cached
|
|
53
|
+
|
|
54
|
+
## Developper Notes
|
|
55
|
+
|
|
56
|
+
### Q. how to publish the package to npm?
|
|
57
|
+
A. Do the following steps:
|
|
58
|
+
|
|
59
|
+
```bash
|
|
60
|
+
npm run version:patch && npm run publish:all
|
|
61
|
+
```
|
|
62
|
+
|
|
63
|
+
Lots of trouble with the 2fa system
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@jeromeetienne/openai-cache",
|
|
3
|
-
"version": "1.0.
|
|
3
|
+
"version": "1.0.2",
|
|
4
4
|
"main": "dist/openai_cache.js",
|
|
5
5
|
"types": "dist/openai_cache.d.ts",
|
|
6
6
|
"exports": {
|
|
@@ -25,6 +25,7 @@
|
|
|
25
25
|
"prepublishOnly": "npm run test && npm run build",
|
|
26
26
|
"test": "tsx --test ./test/*_test.ts",
|
|
27
27
|
"test:watch": "tsx --test --watch ./test/*_test.ts",
|
|
28
|
+
"publish:all": "npm run build && npm publish --access public",
|
|
28
29
|
"version:patch": "npm version patch",
|
|
29
30
|
"version:minor": "npm version minor",
|
|
30
31
|
"version:major": "npm version major",
|
|
@@ -46,12 +47,12 @@
|
|
|
46
47
|
},
|
|
47
48
|
"repository": {
|
|
48
49
|
"type": "git",
|
|
49
|
-
"url": "git+https://github.com/jeromeetienne/
|
|
50
|
+
"url": "git+https://github.com/jeromeetienne/openai-cache.git",
|
|
50
51
|
"directory": "packages/openai_cache"
|
|
51
52
|
},
|
|
52
|
-
"homepage": "https://github.com/jeromeetienne/
|
|
53
|
+
"homepage": "https://github.com/jeromeetienne/openai-cache/tree/main/#cache-openai",
|
|
53
54
|
"bugs": {
|
|
54
|
-
"url": "https://github.com/jeromeetienne/
|
|
55
|
+
"url": "https://github.com/jeromeetienne/openai-cache/issues"
|
|
55
56
|
},
|
|
56
57
|
"devDependencies": {
|
|
57
58
|
"@keyv/sqlite": "^4.0.8",
|
|
@@ -66,4 +67,4 @@
|
|
|
66
67
|
"dependencies": {
|
|
67
68
|
"cacheable": "^2.3.3"
|
|
68
69
|
}
|
|
69
|
-
}
|
|
70
|
+
}
|