@markwharton/api-core 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/cache.d.ts +43 -0
- package/dist/cache.js +78 -0
- package/dist/errors.d.ts +7 -0
- package/dist/errors.js +14 -0
- package/dist/index.d.ts +10 -0
- package/dist/index.js +13 -0
- package/dist/retry.d.ts +41 -0
- package/dist/retry.js +48 -0
- package/dist/utils.d.ts +16 -0
- package/dist/utils.js +27 -0
- package/package.json +38 -0
package/dist/cache.d.ts
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Simple in-memory TTL cache with request coalescing
|
|
3
|
+
*
|
|
4
|
+
* Provides per-instance memoization for API client responses.
|
|
5
|
+
* In serverless environments (Azure Functions, Static Web Apps),
|
|
6
|
+
* module-level state persists across warm invocations within the
|
|
7
|
+
* same instance — this cache leverages that behavior.
|
|
8
|
+
*
|
|
9
|
+
* Request coalescing: when multiple concurrent callers request the
|
|
10
|
+
* same expired key, only one factory call is made. All callers
|
|
11
|
+
* receive the same resolved value (or the same rejection).
|
|
12
|
+
*
|
|
13
|
+
* Not a distributed cache: each instance has its own cache.
|
|
14
|
+
* Cold starts and instance recycling naturally clear stale data.
|
|
15
|
+
*/
|
|
16
|
+
export declare class TTLCache {
|
|
17
|
+
private store;
|
|
18
|
+
private inflight;
|
|
19
|
+
/**
|
|
20
|
+
* Get a cached value, or call the factory to populate it.
|
|
21
|
+
*
|
|
22
|
+
* If a factory call is already in progress for this key,
|
|
23
|
+
* returns the existing promise instead of starting a duplicate.
|
|
24
|
+
*
|
|
25
|
+
* @param key - Cache key
|
|
26
|
+
* @param ttlMs - Time-to-live in milliseconds
|
|
27
|
+
* @param factory - Async function to produce the value on cache miss
|
|
28
|
+
*/
|
|
29
|
+
get<T>(key: string, ttlMs: number, factory: () => Promise<T>): Promise<T>;
|
|
30
|
+
/**
|
|
31
|
+
* Invalidate cache entries matching a key prefix.
|
|
32
|
+
*
|
|
33
|
+
* Also cancels any in-flight requests for matching keys,
|
|
34
|
+
* so subsequent calls will start fresh factory invocations.
|
|
35
|
+
*
|
|
36
|
+
* Example: invalidate('timesheet:') clears all timesheet entries.
|
|
37
|
+
*/
|
|
38
|
+
invalidate(prefix: string): void;
|
|
39
|
+
/**
|
|
40
|
+
* Clear all cached data and in-flight requests.
|
|
41
|
+
*/
|
|
42
|
+
clear(): void;
|
|
43
|
+
}
|
package/dist/cache.js
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Simple in-memory TTL cache with request coalescing
|
|
3
|
+
*
|
|
4
|
+
* Provides per-instance memoization for API client responses.
|
|
5
|
+
* In serverless environments (Azure Functions, Static Web Apps),
|
|
6
|
+
* module-level state persists across warm invocations within the
|
|
7
|
+
* same instance — this cache leverages that behavior.
|
|
8
|
+
*
|
|
9
|
+
* Request coalescing: when multiple concurrent callers request the
|
|
10
|
+
* same expired key, only one factory call is made. All callers
|
|
11
|
+
* receive the same resolved value (or the same rejection).
|
|
12
|
+
*
|
|
13
|
+
* Not a distributed cache: each instance has its own cache.
|
|
14
|
+
* Cold starts and instance recycling naturally clear stale data.
|
|
15
|
+
*/
|
|
16
|
+
export class TTLCache {
|
|
17
|
+
constructor() {
|
|
18
|
+
this.store = new Map();
|
|
19
|
+
this.inflight = new Map();
|
|
20
|
+
}
|
|
21
|
+
/**
|
|
22
|
+
* Get a cached value, or call the factory to populate it.
|
|
23
|
+
*
|
|
24
|
+
* If a factory call is already in progress for this key,
|
|
25
|
+
* returns the existing promise instead of starting a duplicate.
|
|
26
|
+
*
|
|
27
|
+
* @param key - Cache key
|
|
28
|
+
* @param ttlMs - Time-to-live in milliseconds
|
|
29
|
+
* @param factory - Async function to produce the value on cache miss
|
|
30
|
+
*/
|
|
31
|
+
async get(key, ttlMs, factory) {
|
|
32
|
+
const existing = this.store.get(key);
|
|
33
|
+
if (existing && existing.expiresAt > Date.now()) {
|
|
34
|
+
return existing.data;
|
|
35
|
+
}
|
|
36
|
+
const pending = this.inflight.get(key);
|
|
37
|
+
if (pending) {
|
|
38
|
+
return pending;
|
|
39
|
+
}
|
|
40
|
+
const promise = factory().then((data) => {
|
|
41
|
+
this.store.set(key, { data, expiresAt: Date.now() + ttlMs });
|
|
42
|
+
this.inflight.delete(key);
|
|
43
|
+
return data;
|
|
44
|
+
}, (err) => {
|
|
45
|
+
this.inflight.delete(key);
|
|
46
|
+
throw err;
|
|
47
|
+
});
|
|
48
|
+
this.inflight.set(key, promise);
|
|
49
|
+
return promise;
|
|
50
|
+
}
|
|
51
|
+
/**
|
|
52
|
+
* Invalidate cache entries matching a key prefix.
|
|
53
|
+
*
|
|
54
|
+
* Also cancels any in-flight requests for matching keys,
|
|
55
|
+
* so subsequent calls will start fresh factory invocations.
|
|
56
|
+
*
|
|
57
|
+
* Example: invalidate('timesheet:') clears all timesheet entries.
|
|
58
|
+
*/
|
|
59
|
+
invalidate(prefix) {
|
|
60
|
+
for (const key of this.store.keys()) {
|
|
61
|
+
if (key.startsWith(prefix)) {
|
|
62
|
+
this.store.delete(key);
|
|
63
|
+
}
|
|
64
|
+
}
|
|
65
|
+
for (const key of this.inflight.keys()) {
|
|
66
|
+
if (key.startsWith(prefix)) {
|
|
67
|
+
this.inflight.delete(key);
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
}
|
|
71
|
+
/**
|
|
72
|
+
* Clear all cached data and in-flight requests.
|
|
73
|
+
*/
|
|
74
|
+
clear() {
|
|
75
|
+
this.store.clear();
|
|
76
|
+
this.inflight.clear();
|
|
77
|
+
}
|
|
78
|
+
}
|
package/dist/errors.d.ts
ADDED
package/dist/errors.js
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Shared Error Utilities
|
|
3
|
+
*/
|
|
4
|
+
/** Default error message when none is available */
|
|
5
|
+
const DEFAULT_ERROR_MESSAGE = 'Unknown error';
|
|
6
|
+
/**
|
|
7
|
+
* Get a safe error message from any error type
|
|
8
|
+
*/
|
|
9
|
+
export function getErrorMessage(error) {
|
|
10
|
+
if (error instanceof Error) {
|
|
11
|
+
return error.message;
|
|
12
|
+
}
|
|
13
|
+
return DEFAULT_ERROR_MESSAGE;
|
|
14
|
+
}
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @markwharton/api-core
|
|
3
|
+
*
|
|
4
|
+
* Shared utilities for API client packages.
|
|
5
|
+
*/
|
|
6
|
+
export { TTLCache } from './cache.js';
|
|
7
|
+
export { batchMap } from './utils.js';
|
|
8
|
+
export { getErrorMessage } from './errors.js';
|
|
9
|
+
export { fetchWithRetry } from './retry.js';
|
|
10
|
+
export type { RetryConfig, FetchWithRetryOptions } from './retry.js';
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @markwharton/api-core
|
|
3
|
+
*
|
|
4
|
+
* Shared utilities for API client packages.
|
|
5
|
+
*/
|
|
6
|
+
// Cache
|
|
7
|
+
export { TTLCache } from './cache.js';
|
|
8
|
+
// Utilities
|
|
9
|
+
export { batchMap } from './utils.js';
|
|
10
|
+
// Errors
|
|
11
|
+
export { getErrorMessage } from './errors.js';
|
|
12
|
+
// Retry
|
|
13
|
+
export { fetchWithRetry } from './retry.js';
|
package/dist/retry.d.ts
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Fetch with Retry
|
|
3
|
+
*
|
|
4
|
+
* Automatic retry on HTTP 429 (Too Many Requests) and 503 (Service Unavailable)
|
|
5
|
+
* with exponential backoff. Respects the Retry-After header when present.
|
|
6
|
+
*/
|
|
7
|
+
/**
|
|
8
|
+
* Retry configuration
|
|
9
|
+
*/
|
|
10
|
+
export interface RetryConfig {
|
|
11
|
+
/** Maximum number of retry attempts (default: 3) */
|
|
12
|
+
maxRetries?: number;
|
|
13
|
+
/** Initial delay in milliseconds before first retry (default: 1000) */
|
|
14
|
+
initialDelayMs?: number;
|
|
15
|
+
/** Maximum delay cap in milliseconds (default: 10000) */
|
|
16
|
+
maxDelayMs?: number;
|
|
17
|
+
}
|
|
18
|
+
/**
|
|
19
|
+
* Options for fetchWithRetry
|
|
20
|
+
*/
|
|
21
|
+
export interface FetchWithRetryOptions {
|
|
22
|
+
/** Retry configuration (required to enable retry) */
|
|
23
|
+
retry?: Required<RetryConfig>;
|
|
24
|
+
/** Callback invoked before each retry attempt */
|
|
25
|
+
onRetry?: (info: {
|
|
26
|
+
attempt: number;
|
|
27
|
+
maxRetries: number;
|
|
28
|
+
delayMs: number;
|
|
29
|
+
status: number;
|
|
30
|
+
}) => void;
|
|
31
|
+
}
|
|
32
|
+
/**
|
|
33
|
+
* Fetch with automatic retry on 429/503 using exponential backoff.
|
|
34
|
+
* Respects Retry-After header.
|
|
35
|
+
*
|
|
36
|
+
* @param url - URL to fetch
|
|
37
|
+
* @param init - Standard fetch RequestInit
|
|
38
|
+
* @param options - Retry options
|
|
39
|
+
* @returns Response from the final attempt
|
|
40
|
+
*/
|
|
41
|
+
export declare function fetchWithRetry(url: string, init: RequestInit, options?: FetchWithRetryOptions): Promise<Response>;
|
package/dist/retry.js
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Fetch with Retry
|
|
3
|
+
*
|
|
4
|
+
* Automatic retry on HTTP 429 (Too Many Requests) and 503 (Service Unavailable)
|
|
5
|
+
* with exponential backoff. Respects the Retry-After header when present.
|
|
6
|
+
*/
|
|
7
|
+
/**
|
|
8
|
+
* Fetch with automatic retry on 429/503 using exponential backoff.
|
|
9
|
+
* Respects Retry-After header.
|
|
10
|
+
*
|
|
11
|
+
* @param url - URL to fetch
|
|
12
|
+
* @param init - Standard fetch RequestInit
|
|
13
|
+
* @param options - Retry options
|
|
14
|
+
* @returns Response from the final attempt
|
|
15
|
+
*/
|
|
16
|
+
export async function fetchWithRetry(url, init, options = {}) {
|
|
17
|
+
const { retry, onRetry } = options;
|
|
18
|
+
const maxAttempts = retry ? 1 + retry.maxRetries : 1;
|
|
19
|
+
let lastResponse;
|
|
20
|
+
for (let attempt = 0; attempt < maxAttempts; attempt++) {
|
|
21
|
+
lastResponse = await fetch(url, init);
|
|
22
|
+
// Check if this is a retryable status
|
|
23
|
+
if (retry && (lastResponse.status === 429 || lastResponse.status === 503)) {
|
|
24
|
+
if (attempt >= retry.maxRetries) {
|
|
25
|
+
return lastResponse; // Exhausted retries
|
|
26
|
+
}
|
|
27
|
+
// Calculate delay: respect Retry-After header, or use exponential backoff
|
|
28
|
+
let delayMs;
|
|
29
|
+
const retryAfterHeader = lastResponse.headers.get('Retry-After');
|
|
30
|
+
if (retryAfterHeader) {
|
|
31
|
+
const retryAfterSeconds = parseInt(retryAfterHeader, 10);
|
|
32
|
+
delayMs = Number.isFinite(retryAfterSeconds)
|
|
33
|
+
? retryAfterSeconds * 1000
|
|
34
|
+
: retry.initialDelayMs * Math.pow(2, attempt);
|
|
35
|
+
}
|
|
36
|
+
else {
|
|
37
|
+
delayMs = retry.initialDelayMs * Math.pow(2, attempt);
|
|
38
|
+
}
|
|
39
|
+
delayMs = Math.min(delayMs, retry.maxDelayMs);
|
|
40
|
+
// Notify caller of retry
|
|
41
|
+
onRetry?.({ attempt: attempt + 1, maxRetries: retry.maxRetries, delayMs, status: lastResponse.status });
|
|
42
|
+
await new Promise(resolve => setTimeout(resolve, delayMs));
|
|
43
|
+
continue;
|
|
44
|
+
}
|
|
45
|
+
return lastResponse;
|
|
46
|
+
}
|
|
47
|
+
return lastResponse;
|
|
48
|
+
}
|
package/dist/utils.d.ts
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Shared API Client Utilities
|
|
3
|
+
*/
|
|
4
|
+
/**
|
|
5
|
+
* Map over items with bounded concurrency
|
|
6
|
+
*
|
|
7
|
+
* Processes items in batches of `concurrency`, waiting for each batch
|
|
8
|
+
* to complete before starting the next. This prevents overwhelming
|
|
9
|
+
* APIs with too many simultaneous requests.
|
|
10
|
+
*
|
|
11
|
+
* @param items - Array of items to process
|
|
12
|
+
* @param concurrency - Maximum number of concurrent operations
|
|
13
|
+
* @param fn - Async function to apply to each item
|
|
14
|
+
* @returns Array of results in the same order as input items
|
|
15
|
+
*/
|
|
16
|
+
export declare function batchMap<T, R>(items: T[], concurrency: number, fn: (item: T) => Promise<R>): Promise<R[]>;
|
package/dist/utils.js
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Shared API Client Utilities
|
|
3
|
+
*/
|
|
4
|
+
// ============================================================================
|
|
5
|
+
// Concurrency Helper
|
|
6
|
+
// ============================================================================
|
|
7
|
+
/**
|
|
8
|
+
* Map over items with bounded concurrency
|
|
9
|
+
*
|
|
10
|
+
* Processes items in batches of `concurrency`, waiting for each batch
|
|
11
|
+
* to complete before starting the next. This prevents overwhelming
|
|
12
|
+
* APIs with too many simultaneous requests.
|
|
13
|
+
*
|
|
14
|
+
* @param items - Array of items to process
|
|
15
|
+
* @param concurrency - Maximum number of concurrent operations
|
|
16
|
+
* @param fn - Async function to apply to each item
|
|
17
|
+
* @returns Array of results in the same order as input items
|
|
18
|
+
*/
|
|
19
|
+
export async function batchMap(items, concurrency, fn) {
|
|
20
|
+
const results = [];
|
|
21
|
+
for (let i = 0; i < items.length; i += concurrency) {
|
|
22
|
+
const batch = items.slice(i, i + concurrency);
|
|
23
|
+
const batchResults = await Promise.all(batch.map(fn));
|
|
24
|
+
results.push(...batchResults);
|
|
25
|
+
}
|
|
26
|
+
return results;
|
|
27
|
+
}
|
package/package.json
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "@markwharton/api-core",
|
|
3
|
+
"version": "1.0.0",
|
|
4
|
+
"description": "Shared utilities for API client packages",
|
|
5
|
+
"type": "module",
|
|
6
|
+
"main": "dist/index.js",
|
|
7
|
+
"types": "dist/index.d.ts",
|
|
8
|
+
"exports": {
|
|
9
|
+
".": {
|
|
10
|
+
"types": "./dist/index.d.ts",
|
|
11
|
+
"default": "./dist/index.js"
|
|
12
|
+
}
|
|
13
|
+
},
|
|
14
|
+
"scripts": {
|
|
15
|
+
"build": "tsc",
|
|
16
|
+
"clean": "rm -rf dist"
|
|
17
|
+
},
|
|
18
|
+
"devDependencies": {
|
|
19
|
+
"@types/node": "^20.10.0",
|
|
20
|
+
"typescript": "^5.3.0"
|
|
21
|
+
},
|
|
22
|
+
"files": [
|
|
23
|
+
"dist"
|
|
24
|
+
],
|
|
25
|
+
"repository": {
|
|
26
|
+
"type": "git",
|
|
27
|
+
"url": "git+https://github.com/MarkWharton/api-packages.git",
|
|
28
|
+
"directory": "packages/api-core"
|
|
29
|
+
},
|
|
30
|
+
"publishConfig": {
|
|
31
|
+
"access": "public"
|
|
32
|
+
},
|
|
33
|
+
"author": "Mark Wharton",
|
|
34
|
+
"license": "MIT",
|
|
35
|
+
"engines": {
|
|
36
|
+
"node": ">=20"
|
|
37
|
+
}
|
|
38
|
+
}
|