openai-cache 1.0.14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +112 -0
- package/dist/openai_cache.d.ts +54 -0
- package/dist/openai_cache.js +157 -0
- package/package.json +70 -0
package/LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2026 Jerome Etienne
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
package/README.md
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
# Cache OpenAI
|
|
2
|
+
A simple caching layer for [OpenAI API](https://www.npmjs.com/package/openai), designed to reduce redundant API calls and save time and costs. It works by intercepting API requests and storing their responses in a cache. When the same request is made again, the cached response is returned instead of making a new API call.
|
|
3
|
+
|
|
4
|
+
It is based on the [cacheable](https://cacheable.org/docs/) library, which provides a simple interface for caching data with support for various storage backends (like in-memory, Redis, SQLite, etc). This allows you to easily integrate caching into your OpenAI API usage without having to manage the caching logic yourself.
|
|
5
|
+
|
|
6
|
+
You can use any Keyv storage backend (like Redis, filesystem, etc) to store the cached responses.
|
|
7
|
+
See the [Keyv documentation](https://keyv.org/docs/) for more details on available storage options and how to set them up.
|
|
8
|
+
In the example below, we use a SQLite database to persist the cache.
|
|
9
|
+
|
|
10
|
+
# Installation
|
|
11
|
+
|
|
12
|
+
```bash
|
|
13
|
+
npm install @jeromeetienne/openai-cache
|
|
14
|
+
```
|
|
15
|
+
|
|
16
|
+
If you want to use the SQLite storage backend, you also need to install the `@keyv/sqlite` package:
|
|
17
|
+
|
|
18
|
+
```bash
|
|
19
|
+
npm install @keyv/sqlite
|
|
20
|
+
```
|
|
21
|
+
|
|
22
|
+
## Usage
|
|
23
|
+
|
|
24
|
+
```ts
|
|
25
|
+
import OpenAI from "openai";
|
|
26
|
+
import OpenAICache from "@jeromeetienne/openai-cache";
|
|
27
|
+
import KeyvSqlite from '@keyv/sqlite';
|
|
28
|
+
import { Cacheable } from "cacheable";
|
|
29
|
+
|
|
30
|
+
// init a cacheable instance
|
|
31
|
+
// - here it is backed by a sqlite database, but you can use any Keyv storage backend (redis, filesystem, etc)
|
|
32
|
+
const sqlitePath = `sqlite://${__dirname}/.openai_cache.sqlite`;
|
|
33
|
+
const sqliteCache = new Cacheable({ secondary: new KeyvSqlite(sqlitePath) });
|
|
34
|
+
|
|
35
|
+
// init the OpenAICache with the cacheable instance
|
|
36
|
+
const openaiCache = new OpenAICache(sqliteCache);
|
|
37
|
+
|
|
38
|
+
// init the OpenAI client with the cache's fetch function
|
|
39
|
+
const client = new OpenAI({
|
|
40
|
+
apiKey: process.env.OPENAI_API_KEY,
|
|
41
|
+
fetch: openaiCache.getFetchFn(),
|
|
42
|
+
});
|
|
43
|
+
|
|
44
|
+
// now use it normally - responses will be cached in the sqlite database
|
|
45
|
+
const response = await client.responses.create({
|
|
46
|
+
model: "gpt-4.1-mini",
|
|
47
|
+
input: "Say hello in one short sentence.",
|
|
48
|
+
});
|
|
49
|
+
|
|
50
|
+
console.log(response.output_text);
|
|
51
|
+
```
|
|
52
|
+
|
|
53
|
+
## PRO/CON
|
|
54
|
+
- **PRO**: Reduces redundant API calls, saving time and costs.
|
|
55
|
+
data.
|
|
56
|
+
- **NOTE**: When `temperature === 0`, caching works optimally as responses are deterministic. However, with `temperature > 0`, caching may reduce variety across multiple calls since identical prompts will return cached results instead of generating new varied responses.
|
|
57
|
+
- **NOTE**: Only successful responses (`2xx`) are cached. Error responses (`4xx`/`5xx`) are returned normally but are not persisted.
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
## Possible improvements
|
|
61
|
+
- dont cache if temporature > 0 or top_p < 1, You’ll freeze randomness if cached
|
|
62
|
+
- NOTE: do that on options
|
|
63
|
+
- add configurable cache policy for errors (for example, cache selected deterministic `4xx` while never caching `429`/`5xx`)
|
|
64
|
+
- tools requests errors should not be cached
|
|
65
|
+
|
|
66
|
+
## Developper Notes
|
|
67
|
+
|
|
68
|
+
### Q. How to disable the cache ?
|
|
69
|
+
A. Set the `OPENAI_CACHE` environment variable to `disabled`:
|
|
70
|
+
|
|
71
|
+
```bash
|
|
72
|
+
OPENAI_CACHE=disabled node your_app.js
|
|
73
|
+
```
|
|
74
|
+
|
|
75
|
+
It will still write in the cache but will ignore the cached responses and always call the OpenAI API. This is useful for testing or debugging purposes when you want to bypass the cache without changing your code.
|
|
76
|
+
|
|
77
|
+
### Q. How to know if a given call was a cache hit or miss?
|
|
78
|
+
A. You can enable the `markResponseEnabled` option when initializing the `OpenAICache`. When this option is enabled, the cache will add a custom property
|
|
79
|
+
to the response object to indicate whether it was a cache hit or miss.
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
```ts
|
|
83
|
+
const openaiCache = new OpenAICache(sqliteCache, {
|
|
84
|
+
markResponseEnabled: true, // default is false
|
|
85
|
+
});
|
|
86
|
+
|
|
87
|
+
// later, when you make a call, you can check the custom property to see if it was a cache hit or miss
|
|
88
|
+
const response = await client.responses.create({
|
|
89
|
+
model: "gpt-4.1-mini",
|
|
90
|
+
input: "Say hello in one short sentence.",
|
|
91
|
+
});
|
|
92
|
+
|
|
93
|
+
if (response.X_FROM_OPENAI_CACHE) {
|
|
94
|
+
console.log("Cache hit!");
|
|
95
|
+
} else {
|
|
96
|
+
console.log("Cache miss!");
|
|
97
|
+
}
|
|
98
|
+
```
|
|
99
|
+
|
|
100
|
+
### Q. how to publish the package to npm?
|
|
101
|
+
A. Do the following steps:
|
|
102
|
+
|
|
103
|
+
```bash
|
|
104
|
+
npm run version:patch && npm run publish:all
|
|
105
|
+
```
|
|
106
|
+
|
|
107
|
+
Lots of trouble with the 2fa system
|
|
108
|
+
|
|
109
|
+
Revevant Documentation:
|
|
110
|
+
- https://docs.npmjs.com/requiring-2fa-for-package-publishing-and-settings-modification
|
|
111
|
+
- https://docs.npmjs.com/trusted-publishers
|
|
112
|
+
- https://docs.npmjs.com/creating-and-viewing-access-tokens
|
|
@@ -0,0 +1,54 @@
|
|
|
1
|
+
import { Cacheable } from "cacheable";
|
|
2
|
+
type FetchFn = typeof globalThis.fetch;
|
|
3
|
+
type FetchInput = Parameters<FetchFn>[0];
|
|
4
|
+
type FetchInit = Parameters<FetchFn>[1];
|
|
5
|
+
type FetchResponse = Awaited<ReturnType<FetchFn>>;
|
|
6
|
+
/**
|
|
7
|
+
* OpenAICachingCacheable is a wrapper around the Fetch API that adds caching capabilities for OpenAI requests.
|
|
8
|
+
* It uses a Cacheable instance to store and retrieve cached responses based on a hash of the request details.
|
|
9
|
+
*/
|
|
10
|
+
export default class OpenAICache {
|
|
11
|
+
private readonly _cache;
|
|
12
|
+
private readonly _markResponseEnabled;
|
|
13
|
+
static readonly MarkResponseName = "X_FROM_OPENAI_CACHE";
|
|
14
|
+
/**
|
|
15
|
+
* Creates a new instance of OpenAICache.
|
|
16
|
+
*
|
|
17
|
+
* @param cache cacheable instance
|
|
18
|
+
* @param options.markResponseEnabled whether to mark cached responses with an additional property in the JSON body (default: true).
|
|
19
|
+
* This can be useful for downstream logic that needs to differentiate between live and cached responses, but it does modify
|
|
20
|
+
* the original response body so it is optional. so the response is { X_FROM_OPENAI_CACHE: true, ...originalResponseBody }
|
|
21
|
+
*/
|
|
22
|
+
constructor(cache?: Cacheable, { markResponseEnabled }?: {
|
|
23
|
+
markResponseEnabled?: boolean;
|
|
24
|
+
});
|
|
25
|
+
/**
|
|
26
|
+
* Cleans the OpenAI cache by deleting all cached values.
|
|
27
|
+
*/
|
|
28
|
+
cleanCache(): Promise<void>;
|
|
29
|
+
/**
|
|
30
|
+
* return a fetch function that can be passed to OpenAI client for caching support
|
|
31
|
+
*
|
|
32
|
+
* ```js
|
|
33
|
+
* const openai = new OpenAI({
|
|
34
|
+
* fetch: openaiCache.getFetchFn()
|
|
35
|
+
* });
|
|
36
|
+
* ```
|
|
37
|
+
*/
|
|
38
|
+
getFetchFn(): (input: FetchInput, init?: FetchInit) => Promise<FetchResponse>;
|
|
39
|
+
/**
|
|
40
|
+
* This is the fetch() implementation that adds caching for OpenAI requests.
|
|
41
|
+
*
|
|
42
|
+
* @param input The resource that you wish to fetch.
|
|
43
|
+
* @param init An options object containing any custom settings that you want to apply to the request.
|
|
44
|
+
* @returns A Promise that resolves to the Response to that request.
|
|
45
|
+
*/
|
|
46
|
+
private _fetch;
|
|
47
|
+
/**
|
|
48
|
+
* Remove transfer/content encodings that no longer apply once the body is materialized
|
|
49
|
+
* and optionally set a correct content-length for the cached payload.
|
|
50
|
+
*/
|
|
51
|
+
private static _normalizeHeaders;
|
|
52
|
+
private static _serializeBodyForHash;
|
|
53
|
+
}
|
|
54
|
+
export {};
|
|
@@ -0,0 +1,157 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
// node imports
|
|
7
|
+
const node_crypto_1 = __importDefault(require("node:crypto"));
|
|
8
|
+
const node_buffer_1 = require("node:buffer");
|
|
9
|
+
const cacheable_1 = require("cacheable");
|
|
10
|
+
///////////////////////////////////////////////////////////////////////////////
|
|
11
|
+
///////////////////////////////////////////////////////////////////////////////
|
|
12
|
+
// OpenAICache
|
|
13
|
+
///////////////////////////////////////////////////////////////////////////////
|
|
14
|
+
///////////////////////////////////////////////////////////////////////////////
|
|
15
|
+
/**
|
|
16
|
+
* OpenAICachingCacheable is a wrapper around the Fetch API that adds caching capabilities for OpenAI requests.
|
|
17
|
+
* It uses a Cacheable instance to store and retrieve cached responses based on a hash of the request details.
|
|
18
|
+
*/
|
|
19
|
+
class OpenAICache {
|
|
20
|
+
/**
|
|
21
|
+
* Creates a new instance of OpenAICache.
|
|
22
|
+
*
|
|
23
|
+
* @param cache cacheable instance
|
|
24
|
+
* @param options.markResponseEnabled whether to mark cached responses with an additional property in the JSON body (default: true).
|
|
25
|
+
* This can be useful for downstream logic that needs to differentiate between live and cached responses, but it does modify
|
|
26
|
+
* the original response body so it is optional. so the response is { X_FROM_OPENAI_CACHE: true, ...originalResponseBody }
|
|
27
|
+
*/
|
|
28
|
+
constructor(cache, { markResponseEnabled = false } = {}) {
|
|
29
|
+
this._cache = cache !== null && cache !== void 0 ? cache : new cacheable_1.Cacheable();
|
|
30
|
+
this._markResponseEnabled = markResponseEnabled;
|
|
31
|
+
}
|
|
32
|
+
/**
|
|
33
|
+
* Cleans the OpenAI cache by deleting all cached values.
|
|
34
|
+
*/
|
|
35
|
+
async cleanCache() {
|
|
36
|
+
await this._cache.clear();
|
|
37
|
+
}
|
|
38
|
+
/**
|
|
39
|
+
* return a fetch function that can be passed to OpenAI client for caching support
|
|
40
|
+
*
|
|
41
|
+
* ```js
|
|
42
|
+
* const openai = new OpenAI({
|
|
43
|
+
* fetch: openaiCache.getFetchFn()
|
|
44
|
+
* });
|
|
45
|
+
* ```
|
|
46
|
+
*/
|
|
47
|
+
getFetchFn() {
|
|
48
|
+
return this._fetch.bind(this);
|
|
49
|
+
}
|
|
50
|
+
/**
|
|
51
|
+
* This is the fetch() implementation that adds caching for OpenAI requests.
|
|
52
|
+
*
|
|
53
|
+
* @param input The resource that you wish to fetch.
|
|
54
|
+
* @param init An options object containing any custom settings that you want to apply to the request.
|
|
55
|
+
* @returns A Promise that resolves to the Response to that request.
|
|
56
|
+
*/
|
|
57
|
+
async _fetch(input, init) {
|
|
58
|
+
var _a, _b;
|
|
59
|
+
// Extract the URL from the input (string or Request)
|
|
60
|
+
const url = typeof input === "string" ? input : input instanceof Request ? input.url : input.toString();
|
|
61
|
+
// Normalize HTTP method
|
|
62
|
+
const method = ((init === null || init === void 0 ? void 0 : init.method) || "GET").toUpperCase();
|
|
63
|
+
// Generate body hash payload
|
|
64
|
+
const bodyForHash = OpenAICache._serializeBodyForHash(init === null || init === void 0 ? void 0 : init.body);
|
|
65
|
+
// If body type unsupported, skip caching
|
|
66
|
+
if (bodyForHash === null)
|
|
67
|
+
return fetch(input, init);
|
|
68
|
+
// Build cache key and file path
|
|
69
|
+
const cacheKey = node_crypto_1.default.createHash("sha256")
|
|
70
|
+
.update(`${method}:${url}:${bodyForHash}`)
|
|
71
|
+
.digest("hex");
|
|
72
|
+
const cached = (await this._cache.get(cacheKey));
|
|
73
|
+
if (cached !== undefined && process.env.OPENAI_CACHE !== "disabled") {
|
|
74
|
+
const bodyEncoding = (_a = cached.bodyEncoding) !== null && _a !== void 0 ? _a : "utf8";
|
|
75
|
+
const cachedBodyBuffer = node_buffer_1.Buffer.from(cached.body, bodyEncoding);
|
|
76
|
+
// Return cached response
|
|
77
|
+
let newResponse = new Response(cachedBodyBuffer, {
|
|
78
|
+
status: cached.status,
|
|
79
|
+
headers: cached.headers,
|
|
80
|
+
});
|
|
81
|
+
// honor this._markResponseEnabled option to indicate cache hit
|
|
82
|
+
const contentTypeIsJson = ((_b = newResponse.headers.get("content-type")) === null || _b === void 0 ? void 0 : _b.includes("application/json")) ? true : false;
|
|
83
|
+
if (this._markResponseEnabled && contentTypeIsJson) {
|
|
84
|
+
try {
|
|
85
|
+
// decode JSON from cachedBodyBuffer
|
|
86
|
+
const bodyJson = JSON.parse(cachedBodyBuffer.toString());
|
|
87
|
+
// Set the magic property to indicate this response is from cache
|
|
88
|
+
bodyJson.X_FROM_OPENAI_CACHE = true;
|
|
89
|
+
// Rebuild response with modified body
|
|
90
|
+
const modifiedBodyBuffer = node_buffer_1.Buffer.from(JSON.stringify(bodyJson));
|
|
91
|
+
newResponse = new Response(modifiedBodyBuffer, { status: cached.status, headers: cached.headers, });
|
|
92
|
+
}
|
|
93
|
+
catch (error) {
|
|
94
|
+
// If parsing fails, return the original cached response without modification
|
|
95
|
+
console.warn("Failed to parse cached response body as JSON for header modification:", error);
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
// Return cached response (body already buffered)
|
|
99
|
+
return newResponse;
|
|
100
|
+
}
|
|
101
|
+
// Perform network fetch
|
|
102
|
+
const response = await fetch(input, init);
|
|
103
|
+
const clonedResponse = response.clone();
|
|
104
|
+
// Materialize response body for caching
|
|
105
|
+
const responseBuffer = node_buffer_1.Buffer.from(await clonedResponse.arrayBuffer());
|
|
106
|
+
// Collect headers and normalize them
|
|
107
|
+
const headers = Array.from(clonedResponse.headers.entries());
|
|
108
|
+
const normalizedHeaders = OpenAICache._normalizeHeaders(headers, responseBuffer.length);
|
|
109
|
+
if (response.ok) {
|
|
110
|
+
await this._cache.set(cacheKey, {
|
|
111
|
+
status: clonedResponse.status,
|
|
112
|
+
headers: normalizedHeaders,
|
|
113
|
+
body: responseBuffer.toString("base64"),
|
|
114
|
+
bodyEncoding: "base64",
|
|
115
|
+
});
|
|
116
|
+
}
|
|
117
|
+
// Return live response (body already buffered)
|
|
118
|
+
return new Response(responseBuffer, { status: response.status, headers: normalizedHeaders });
|
|
119
|
+
}
|
|
120
|
+
///////////////////////////////////////////////////////////////////////////////
|
|
121
|
+
///////////////////////////////////////////////////////////////////////////////
|
|
122
|
+
// Private functions
|
|
123
|
+
///////////////////////////////////////////////////////////////////////////////
|
|
124
|
+
///////////////////////////////////////////////////////////////////////////////
|
|
125
|
+
/**
|
|
126
|
+
* Remove transfer/content encodings that no longer apply once the body is materialized
|
|
127
|
+
* and optionally set a correct content-length for the cached payload.
|
|
128
|
+
*/
|
|
129
|
+
static _normalizeHeaders(headers, bodyLength) {
|
|
130
|
+
const drop = new Set([
|
|
131
|
+
"content-encoding", // body is already decoded by fetch()
|
|
132
|
+
"transfer-encoding",
|
|
133
|
+
"content-length", // will be recalculated
|
|
134
|
+
]);
|
|
135
|
+
const filtered = headers.filter(([name]) => drop.has(name.toLowerCase()) === false);
|
|
136
|
+
if (bodyLength !== undefined) {
|
|
137
|
+
filtered.push(["content-length", String(bodyLength)]);
|
|
138
|
+
}
|
|
139
|
+
return filtered;
|
|
140
|
+
}
|
|
141
|
+
// Serialize body into a deterministic string for hashing
|
|
142
|
+
static _serializeBodyForHash(body) {
|
|
143
|
+
if (body === undefined || body === null)
|
|
144
|
+
return "";
|
|
145
|
+
if (typeof body === "string")
|
|
146
|
+
return body;
|
|
147
|
+
if (node_buffer_1.Buffer.isBuffer(body))
|
|
148
|
+
return body.toString("base64");
|
|
149
|
+
if (body instanceof ArrayBuffer)
|
|
150
|
+
return node_buffer_1.Buffer.from(body).toString("base64");
|
|
151
|
+
if (ArrayBuffer.isView(body))
|
|
152
|
+
return node_buffer_1.Buffer.from(body.buffer, body.byteOffset, body.byteLength).toString("base64");
|
|
153
|
+
return null; // unsupported body type
|
|
154
|
+
}
|
|
155
|
+
}
|
|
156
|
+
OpenAICache.MarkResponseName = "X_FROM_OPENAI_CACHE";
|
|
157
|
+
exports.default = OpenAICache;
|
package/package.json
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "openai-cache",
|
|
3
|
+
"version": "1.0.14",
|
|
4
|
+
"main": "dist/openai_cache.js",
|
|
5
|
+
"types": "dist/openai_cache.d.ts",
|
|
6
|
+
"exports": {
|
|
7
|
+
".": {
|
|
8
|
+
"types": "./dist/openai_cache.d.ts",
|
|
9
|
+
"default": "./dist/openai_cache.js"
|
|
10
|
+
}
|
|
11
|
+
},
|
|
12
|
+
"files": [
|
|
13
|
+
"dist",
|
|
14
|
+
"README.md",
|
|
15
|
+
"LICENSE"
|
|
16
|
+
],
|
|
17
|
+
"scripts": {
|
|
18
|
+
"example:openai_cache": "tsx examples/openai_cache_example.ts",
|
|
19
|
+
"example:tts": "tsx examples/tts_example.ts",
|
|
20
|
+
"example:image_generation": "tsx examples/image_generation_example.ts",
|
|
21
|
+
"example:image_understanding": "tsx examples/image_understanding_example.ts",
|
|
22
|
+
"clean_cache": "rm -f ./examples/.openai_cache.sqlite",
|
|
23
|
+
"build": "tsc -p tsconfig.json",
|
|
24
|
+
"clean": "rm -rf dist",
|
|
25
|
+
"prepublishOnly": "npm run test && npm run build",
|
|
26
|
+
"test": "tsx --test ./test/*_test.ts",
|
|
27
|
+
"test:watch": "tsx --test --watch ./test/*_test.ts",
|
|
28
|
+
"publish:all": "npm run build && npm run version:patch && npm publish --access public",
|
|
29
|
+
"version:patch": "npm version patch",
|
|
30
|
+
"version:minor": "npm version minor",
|
|
31
|
+
"version:major": "npm version major",
|
|
32
|
+
"version:prerelease": "npm version prerelease --preid=rc"
|
|
33
|
+
},
|
|
34
|
+
"keywords": [
|
|
35
|
+
"openai",
|
|
36
|
+
"cache",
|
|
37
|
+
"caching",
|
|
38
|
+
"keyv",
|
|
39
|
+
"sqlite",
|
|
40
|
+
"typescript"
|
|
41
|
+
],
|
|
42
|
+
"author": "jerome etienne <jerome.etienne@example.com>",
|
|
43
|
+
"license": "MIT",
|
|
44
|
+
"description": "A caching layer for OpenAI API calls, designed to improve performance and reduce costs.",
|
|
45
|
+
"publishConfig": {
|
|
46
|
+
"access": "public"
|
|
47
|
+
},
|
|
48
|
+
"repository": {
|
|
49
|
+
"type": "git",
|
|
50
|
+
"url": "git+https://github.com/jeromeetienne/openai-cache.git",
|
|
51
|
+
"directory": "packages/openai_cache"
|
|
52
|
+
},
|
|
53
|
+
"homepage": "https://github.com/jeromeetienne/openai-cache/tree/main/#cache-openai",
|
|
54
|
+
"bugs": {
|
|
55
|
+
"url": "https://github.com/jeromeetienne/openai-cache/issues"
|
|
56
|
+
},
|
|
57
|
+
"devDependencies": {
|
|
58
|
+
"@keyv/sqlite": "^4.0.8",
|
|
59
|
+
"@types/node": "^25.1.0",
|
|
60
|
+
"keyv": "^5.6.0",
|
|
61
|
+
"openai": "^6.17.0",
|
|
62
|
+
"ts-node": "^10.9.2",
|
|
63
|
+
"tsx": "^4.21.0",
|
|
64
|
+
"typescript": "^5.9.3",
|
|
65
|
+
"zod": "^4.3.6"
|
|
66
|
+
},
|
|
67
|
+
"dependencies": {
|
|
68
|
+
"cacheable": "^2.3.3"
|
|
69
|
+
}
|
|
70
|
+
}
|