anyapi-mcp-server 1.6.1 → 1.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +35 -27
- package/build/api-client.js +42 -21
- package/build/api-index.js +79 -27
- package/build/body-file.js +42 -0
- package/build/body-validation.js +102 -0
- package/build/data-cache.js +96 -0
- package/build/graphql-schema.js +342 -40
- package/build/index.js +252 -131
- package/build/json-filter.js +62 -0
- package/build/pagination.js +123 -0
- package/build/pre-write-backup.js +20 -0
- package/build/query-suggestions.js +34 -9
- package/build/rate-limit-tracker.js +59 -0
- package/build/token-budget.js +135 -0
- package/package.json +1 -1
package/build/graphql-schema.js
CHANGED
|
@@ -1,8 +1,144 @@
|
|
|
1
|
-
import { GraphQLSchema, GraphQLObjectType, GraphQLInputObjectType, GraphQLScalarType, GraphQLString, GraphQLInt, GraphQLFloat, GraphQLBoolean, GraphQLList, GraphQLNonNull, graphql as executeGraphQL, printSchema, } from "graphql";
|
|
1
|
+
import { GraphQLSchema, GraphQLObjectType, GraphQLInputObjectType, GraphQLScalarType, GraphQLString, GraphQLInt, GraphQLFloat, GraphQLBoolean, GraphQLList, GraphQLNonNull, isObjectType, isListType, isScalarType, graphql as executeGraphQL, printSchema, } from "graphql";
|
|
2
2
|
import { createHash } from "node:crypto";
|
|
3
|
-
const
|
|
4
|
-
const MAX_SAMPLE_SIZE =
|
|
5
|
-
const
|
|
3
|
+
const MAX_ARRAY_LIMIT = 50;
|
|
4
|
+
const MAX_SAMPLE_SIZE = 50;
|
|
5
|
+
const MAJORITY_THRESHOLD = 0.6;
|
|
6
|
+
/**
|
|
7
|
+
* Estimate token cost of a JSON value by walking its structure.
|
|
8
|
+
* For scalars, estimates based on string length (long strings cost more tokens).
|
|
9
|
+
* For objects, sums child costs. For arrays, averages across multiple samples (up to 10).
|
|
10
|
+
* Bounded by MAX_INFER_DEPTH to match inference behavior.
|
|
11
|
+
*/
|
|
12
|
+
function estimateTokenCost(value, depth = 0) {
|
|
13
|
+
if (depth >= MAX_INFER_DEPTH)
|
|
14
|
+
return 1;
|
|
15
|
+
if (value === null || value === undefined)
|
|
16
|
+
return 1;
|
|
17
|
+
if (Array.isArray(value)) {
|
|
18
|
+
if (value.length === 0)
|
|
19
|
+
return 1;
|
|
20
|
+
// Average across multiple samples for better accuracy
|
|
21
|
+
const sampleCount = Math.min(value.length, 10);
|
|
22
|
+
let totalCost = 0;
|
|
23
|
+
for (let i = 0; i < sampleCount; i++) {
|
|
24
|
+
totalCost += estimateTokenCost(value[i], depth + 1);
|
|
25
|
+
}
|
|
26
|
+
return Math.max(1, Math.round(totalCost / sampleCount));
|
|
27
|
+
}
|
|
28
|
+
if (typeof value === "object") {
|
|
29
|
+
let count = 0;
|
|
30
|
+
for (const v of Object.values(value)) {
|
|
31
|
+
count += estimateTokenCost(v, depth + 1);
|
|
32
|
+
}
|
|
33
|
+
return Math.max(1, count);
|
|
34
|
+
}
|
|
35
|
+
// Scalars: estimate token cost from serialized JSON size.
|
|
36
|
+
// Each string in a JSON response adds ~10 chars of overhead (quotes, comma,
|
|
37
|
+
// indentation, newline), so we use the full serialized length / 4 to approximate
|
|
38
|
+
// LLM tokens (1 token ≈ 4 chars).
|
|
39
|
+
if (typeof value === "string") {
|
|
40
|
+
const jsonLen = value.length + 10; // value + overhead
|
|
41
|
+
return Math.max(1, Math.ceil(jsonLen / 4));
|
|
42
|
+
}
|
|
43
|
+
return 1;
|
|
44
|
+
}
|
|
45
|
+
/**
|
|
46
|
+
* Compute a per-field token cost tree from response data.
|
|
47
|
+
* Uses sanitized field names to match GraphQL schema.
|
|
48
|
+
*/
|
|
49
|
+
export function computeFieldCosts(data, depth = 0) {
|
|
50
|
+
if (depth >= MAX_INFER_DEPTH || data === null || data === undefined) {
|
|
51
|
+
return { _total: 1 };
|
|
52
|
+
}
|
|
53
|
+
if (Array.isArray(data)) {
|
|
54
|
+
if (data.length === 0) {
|
|
55
|
+
return { _total: 1, _perItem: 0, _avgLength: 0 };
|
|
56
|
+
}
|
|
57
|
+
const sampleCount = Math.min(data.length, 10);
|
|
58
|
+
// Check if array of objects
|
|
59
|
+
const firstObj = data.find((el) => typeof el === "object" && el !== null && !Array.isArray(el));
|
|
60
|
+
if (firstObj) {
|
|
61
|
+
// Merge samples for representative fields
|
|
62
|
+
const mergeResult = mergeArraySamples(data);
|
|
63
|
+
const representative = mergeResult ? mergeResult.merged : firstObj;
|
|
64
|
+
const itemCosts = computeFieldCosts(representative, depth + 1);
|
|
65
|
+
const perItem = itemCosts._total;
|
|
66
|
+
return {
|
|
67
|
+
_total: perItem * data.length,
|
|
68
|
+
_perItem: perItem,
|
|
69
|
+
_avgLength: data.length,
|
|
70
|
+
...Object.fromEntries(Object.entries(itemCosts).filter(([k]) => !k.startsWith("_"))),
|
|
71
|
+
};
|
|
72
|
+
}
|
|
73
|
+
// Scalar array
|
|
74
|
+
let totalCost = 0;
|
|
75
|
+
for (let i = 0; i < sampleCount; i++) {
|
|
76
|
+
totalCost += estimateTokenCost(data[i]);
|
|
77
|
+
}
|
|
78
|
+
const perItem = Math.max(1, Math.round(totalCost / sampleCount));
|
|
79
|
+
return {
|
|
80
|
+
_total: perItem * data.length,
|
|
81
|
+
_perItem: perItem,
|
|
82
|
+
_avgLength: data.length,
|
|
83
|
+
};
|
|
84
|
+
}
|
|
85
|
+
if (typeof data === "object") {
|
|
86
|
+
const obj = data;
|
|
87
|
+
const entries = Object.entries(obj);
|
|
88
|
+
let total = 0;
|
|
89
|
+
const result = { _total: 0 };
|
|
90
|
+
for (const [key, value] of entries) {
|
|
91
|
+
const sanitized = sanitizeFieldName(key);
|
|
92
|
+
if (value !== null &&
|
|
93
|
+
value !== undefined &&
|
|
94
|
+
typeof value === "object") {
|
|
95
|
+
const childCost = computeFieldCosts(value, depth + 1);
|
|
96
|
+
result[sanitized] = childCost;
|
|
97
|
+
total += childCost._total;
|
|
98
|
+
}
|
|
99
|
+
else {
|
|
100
|
+
const cost = estimateTokenCost(value);
|
|
101
|
+
result[sanitized] = cost;
|
|
102
|
+
total += cost;
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
result._total = Math.max(1, total);
|
|
106
|
+
return result;
|
|
107
|
+
}
|
|
108
|
+
return { _total: estimateTokenCost(data) };
|
|
109
|
+
}
|
|
110
|
+
/**
|
|
111
|
+
* Compute a default array limit that scales inversely with item token cost.
|
|
112
|
+
* Simple items ([1, 2, 3], [{id, name}]) → high limit (up to 50).
|
|
113
|
+
* Complex items (deeply nested objects with many fields) → low limit (min 3).
|
|
114
|
+
* Long strings (like k8s tags, URLs) → lower limit to avoid token bloat.
|
|
115
|
+
* Aims to keep total token cost for this array around TOKEN_BUDGET.
|
|
116
|
+
*
|
|
117
|
+
* For scalar arrays, samples multiple elements to get a better average cost
|
|
118
|
+
* (first element might not be representative).
|
|
119
|
+
*/
|
|
120
|
+
function dynamicArrayLimit(arr) {
|
|
121
|
+
if (arr.length === 0)
|
|
122
|
+
return MAX_ARRAY_LIMIT;
|
|
123
|
+
const mergeResult = mergeArraySamples(arr);
|
|
124
|
+
let costPerItem;
|
|
125
|
+
if (mergeResult) {
|
|
126
|
+
costPerItem = estimateTokenCost(mergeResult.merged);
|
|
127
|
+
}
|
|
128
|
+
else {
|
|
129
|
+
// Scalar array: sample up to 10 elements for average cost
|
|
130
|
+
const sampleCount = Math.min(arr.length, 10);
|
|
131
|
+
let totalCost = 0;
|
|
132
|
+
for (let i = 0; i < sampleCount; i++) {
|
|
133
|
+
totalCost += estimateTokenCost(arr[i]);
|
|
134
|
+
}
|
|
135
|
+
costPerItem = totalCost / sampleCount;
|
|
136
|
+
}
|
|
137
|
+
const TOKEN_BUDGET = 200;
|
|
138
|
+
const MIN_ITEMS = 3;
|
|
139
|
+
return Math.max(MIN_ITEMS, Math.min(MAX_ARRAY_LIMIT, Math.floor(TOKEN_BUDGET / costPerItem)));
|
|
140
|
+
}
|
|
141
|
+
const MAX_INFER_DEPTH = 8;
|
|
6
142
|
/**
|
|
7
143
|
* Custom scalar that passes arbitrary JSON values through as-is.
|
|
8
144
|
* Used for mixed-type arrays, type-conflicting fields, and deeply nested structures.
|
|
@@ -25,7 +161,7 @@ export function truncateIfArray(data, limit, offset) {
|
|
|
25
161
|
}
|
|
26
162
|
const total = data.length;
|
|
27
163
|
const off = offset ?? 0;
|
|
28
|
-
const lim = limit ??
|
|
164
|
+
const lim = limit ?? MAX_ARRAY_LIMIT;
|
|
29
165
|
const sliced = data.slice(off, off + lim);
|
|
30
166
|
return { data: sliced, truncated: sliced.length < total, total };
|
|
31
167
|
}
|
|
@@ -99,48 +235,77 @@ function inferScalarType(value) {
|
|
|
99
235
|
}
|
|
100
236
|
/**
|
|
101
237
|
* Merge multiple sample objects into a single "super-object" that contains
|
|
102
|
-
* every key seen across all samples.
|
|
238
|
+
* every key seen across all samples.
|
|
239
|
+
* Uses majority-type conflict resolution: if one base type accounts for >=60%
|
|
240
|
+
* of observations for a field, that type wins. Otherwise the field is marked
|
|
241
|
+
* as a conflict (JSON scalar fallback).
|
|
103
242
|
* Nested objects are merged recursively.
|
|
104
|
-
* Tracks fields where different samples have conflicting base types.
|
|
105
243
|
*/
|
|
106
244
|
function mergeSamples(items) {
|
|
107
245
|
const merged = {};
|
|
108
246
|
const conflicts = new Set();
|
|
109
|
-
|
|
247
|
+
// key → (baseType → count) for majority-type resolution
|
|
248
|
+
const typeCounts = new Map();
|
|
249
|
+
// key → (baseType → first value of that type) for setting merged to winning type's value
|
|
250
|
+
const firstValueByType = new Map();
|
|
110
251
|
for (const item of items) {
|
|
111
252
|
for (const [key, value] of Object.entries(item)) {
|
|
112
253
|
if (value === null || value === undefined)
|
|
113
254
|
continue;
|
|
114
255
|
const valueType = baseTypeOf(value);
|
|
256
|
+
// Track type counts
|
|
257
|
+
if (!typeCounts.has(key))
|
|
258
|
+
typeCounts.set(key, new Map());
|
|
259
|
+
const counts = typeCounts.get(key);
|
|
260
|
+
counts.set(valueType, (counts.get(valueType) ?? 0) + 1);
|
|
261
|
+
// Track first value per type
|
|
262
|
+
if (!firstValueByType.has(key))
|
|
263
|
+
firstValueByType.set(key, new Map());
|
|
264
|
+
const typeValues = firstValueByType.get(key);
|
|
265
|
+
if (!typeValues.has(valueType))
|
|
266
|
+
typeValues.set(valueType, value);
|
|
115
267
|
if (!(key in merged) || merged[key] === null || merged[key] === undefined) {
|
|
116
268
|
merged[key] = value;
|
|
117
|
-
if (!seenTypes.has(key))
|
|
118
|
-
seenTypes.set(key, valueType);
|
|
119
269
|
}
|
|
120
|
-
else
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
merged[key] =
|
|
133
|
-
for (const c of sub.conflicts)
|
|
134
|
-
conflicts.add(`${key}.${c}`);
|
|
135
|
-
}
|
|
136
|
-
else if (Array.isArray(value) && Array.isArray(merged[key])) {
|
|
137
|
-
if (merged[key].length === 0 && value.length > 0) {
|
|
138
|
-
merged[key] = value;
|
|
139
|
-
}
|
|
270
|
+
else if (typeof value === "object" && value !== null && !Array.isArray(value) &&
|
|
271
|
+
typeof merged[key] === "object" && merged[key] !== null && !Array.isArray(merged[key])) {
|
|
272
|
+
const sub = mergeSamples([
|
|
273
|
+
merged[key],
|
|
274
|
+
value,
|
|
275
|
+
]);
|
|
276
|
+
merged[key] = sub.merged;
|
|
277
|
+
for (const c of sub.conflicts)
|
|
278
|
+
conflicts.add(`${key}.${c}`);
|
|
279
|
+
}
|
|
280
|
+
else if (Array.isArray(value) && Array.isArray(merged[key])) {
|
|
281
|
+
if (merged[key].length === 0 && value.length > 0) {
|
|
282
|
+
merged[key] = value;
|
|
140
283
|
}
|
|
141
284
|
}
|
|
142
285
|
}
|
|
143
286
|
}
|
|
287
|
+
// Apply majority-type resolution
|
|
288
|
+
for (const [key, counts] of typeCounts) {
|
|
289
|
+
if (counts.size <= 1)
|
|
290
|
+
continue; // single type → no conflict
|
|
291
|
+
const total = Array.from(counts.values()).reduce((a, b) => a + b, 0);
|
|
292
|
+
let majorityType = null;
|
|
293
|
+
for (const [type, count] of counts) {
|
|
294
|
+
if (count / total >= MAJORITY_THRESHOLD) {
|
|
295
|
+
majorityType = type;
|
|
296
|
+
break;
|
|
297
|
+
}
|
|
298
|
+
}
|
|
299
|
+
if (majorityType) {
|
|
300
|
+
// Majority type wins — set merged value to a representative of the winning type
|
|
301
|
+
const winningValue = firstValueByType.get(key).get(majorityType);
|
|
302
|
+
merged[key] = winningValue;
|
|
303
|
+
}
|
|
304
|
+
else {
|
|
305
|
+
// No majority → mark as conflict
|
|
306
|
+
conflicts.add(key);
|
|
307
|
+
}
|
|
308
|
+
}
|
|
144
309
|
return { merged, conflicts };
|
|
145
310
|
}
|
|
146
311
|
/**
|
|
@@ -221,7 +386,7 @@ function inferType(value, typeName, typeRegistry, conflicts, depth = 0) {
|
|
|
221
386
|
}
|
|
222
387
|
if (Array.isArray(value)) {
|
|
223
388
|
if (value.length === 0) {
|
|
224
|
-
return new GraphQLList(
|
|
389
|
+
return new GraphQLList(GraphQLJSON);
|
|
225
390
|
}
|
|
226
391
|
// Mixed-type arrays → JSON scalar (e.g. ["field", 4296, { "temporal-unit": "day" }])
|
|
227
392
|
if (hasMixedTypes(value)) {
|
|
@@ -260,28 +425,53 @@ function inferType(value, typeName, typeRegistry, conflicts, depth = 0) {
|
|
|
260
425
|
const fieldConfigs = {};
|
|
261
426
|
for (const [originalKey, fieldValue] of entries) {
|
|
262
427
|
let sanitized = sanitizeFieldName(originalKey);
|
|
428
|
+
let wasCollision = false;
|
|
263
429
|
if (usedNames.has(sanitized)) {
|
|
264
430
|
let counter = 2;
|
|
265
431
|
while (usedNames.has(`${sanitized}_${counter}`))
|
|
266
432
|
counter++;
|
|
267
433
|
sanitized = `${sanitized}_${counter}`;
|
|
434
|
+
wasCollision = true;
|
|
268
435
|
}
|
|
269
436
|
usedNames.add(sanitized);
|
|
270
437
|
const key = originalKey;
|
|
438
|
+
const needsDescription = wasCollision || sanitized !== originalKey;
|
|
439
|
+
const description = needsDescription ? `Original API field: "${originalKey}"` : undefined;
|
|
271
440
|
// Use JSON scalar for fields with type conflicts across samples
|
|
272
441
|
if (conflicts?.has(originalKey)) {
|
|
273
442
|
fieldConfigs[sanitized] = {
|
|
274
443
|
type: GraphQLJSON,
|
|
444
|
+
...(description ? { description } : {}),
|
|
275
445
|
resolve: (source) => source[key],
|
|
276
446
|
};
|
|
277
447
|
continue;
|
|
278
448
|
}
|
|
279
449
|
const childTypeName = `${typeName}_${sanitized}`;
|
|
280
450
|
const fieldType = inferType(fieldValue, childTypeName, typeRegistry, conflicts, depth + 1);
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
|
|
284
|
-
|
|
451
|
+
if (fieldType instanceof GraphQLList) {
|
|
452
|
+
const arrDefault = Array.isArray(fieldValue) ? dynamicArrayLimit(fieldValue) : MAX_ARRAY_LIMIT;
|
|
453
|
+
fieldConfigs[sanitized] = {
|
|
454
|
+
type: fieldType,
|
|
455
|
+
...(description ? { description } : {}),
|
|
456
|
+
args: {
|
|
457
|
+
limit: { type: GraphQLInt, defaultValue: arrDefault },
|
|
458
|
+
offset: { type: GraphQLInt, defaultValue: 0 },
|
|
459
|
+
},
|
|
460
|
+
resolve: (source, args) => {
|
|
461
|
+
const val = source[key];
|
|
462
|
+
if (!Array.isArray(val))
|
|
463
|
+
return val;
|
|
464
|
+
return val.slice(args.offset, args.offset + args.limit);
|
|
465
|
+
},
|
|
466
|
+
};
|
|
467
|
+
}
|
|
468
|
+
else {
|
|
469
|
+
fieldConfigs[sanitized] = {
|
|
470
|
+
type: fieldType,
|
|
471
|
+
...(description ? { description } : {}),
|
|
472
|
+
resolve: (source) => source[key],
|
|
473
|
+
};
|
|
474
|
+
}
|
|
285
475
|
}
|
|
286
476
|
const realType = new GraphQLObjectType({
|
|
287
477
|
name: typeName,
|
|
@@ -309,6 +499,62 @@ function mapOpenApiTypeToGraphQLInput(type) {
|
|
|
309
499
|
return GraphQLString;
|
|
310
500
|
}
|
|
311
501
|
}
|
|
502
|
+
const MAX_INPUT_DEPTH = 6;
|
|
503
|
+
/**
|
|
504
|
+
* Build a GraphQL input type from a RequestBodyProperty definition.
|
|
505
|
+
* Recursively creates GraphQLInputObjectType for nested objects and
|
|
506
|
+
* GraphQLList(GraphQLInputObjectType) for arrays with object items.
|
|
507
|
+
*/
|
|
508
|
+
function buildInputType(propDef, parentTypeName, propName, depth) {
|
|
509
|
+
if (depth >= MAX_INPUT_DEPTH)
|
|
510
|
+
return GraphQLString;
|
|
511
|
+
if (propDef.type === "object" && propDef.properties) {
|
|
512
|
+
const nestedFields = {};
|
|
513
|
+
const reqFields = new Set(propDef.required_fields ?? []);
|
|
514
|
+
for (const [name, nested] of Object.entries(propDef.properties)) {
|
|
515
|
+
const sanitized = sanitizeFieldName(name);
|
|
516
|
+
let type = buildInputType(nested, `${parentTypeName}_${sanitizeFieldName(propName)}`, name, depth + 1);
|
|
517
|
+
if (nested.required || reqFields.has(name)) {
|
|
518
|
+
type = new GraphQLNonNull(type);
|
|
519
|
+
}
|
|
520
|
+
nestedFields[sanitized] = {
|
|
521
|
+
type,
|
|
522
|
+
...(nested.description ? { description: nested.description } : {}),
|
|
523
|
+
};
|
|
524
|
+
}
|
|
525
|
+
if (Object.keys(nestedFields).length === 0)
|
|
526
|
+
return GraphQLString;
|
|
527
|
+
return new GraphQLInputObjectType({
|
|
528
|
+
name: `${parentTypeName}_${sanitizeFieldName(propName)}`,
|
|
529
|
+
fields: nestedFields,
|
|
530
|
+
});
|
|
531
|
+
}
|
|
532
|
+
if (propDef.type === "array" && propDef.items) {
|
|
533
|
+
if (propDef.items.properties) {
|
|
534
|
+
const itemFields = {};
|
|
535
|
+
const reqFields = new Set(propDef.items.required ?? []);
|
|
536
|
+
for (const [name, nested] of Object.entries(propDef.items.properties)) {
|
|
537
|
+
const sanitized = sanitizeFieldName(name);
|
|
538
|
+
let type = buildInputType(nested, `${parentTypeName}_${sanitizeFieldName(propName)}_Item`, name, depth + 1);
|
|
539
|
+
if (nested.required || reqFields.has(name)) {
|
|
540
|
+
type = new GraphQLNonNull(type);
|
|
541
|
+
}
|
|
542
|
+
itemFields[sanitized] = {
|
|
543
|
+
type,
|
|
544
|
+
...(nested.description ? { description: nested.description } : {}),
|
|
545
|
+
};
|
|
546
|
+
}
|
|
547
|
+
if (Object.keys(itemFields).length > 0) {
|
|
548
|
+
return new GraphQLList(new GraphQLInputObjectType({
|
|
549
|
+
name: `${parentTypeName}_${sanitizeFieldName(propName)}_Item`,
|
|
550
|
+
fields: itemFields,
|
|
551
|
+
}));
|
|
552
|
+
}
|
|
553
|
+
}
|
|
554
|
+
return new GraphQLList(mapOpenApiTypeToGraphQLInput(propDef.items.type));
|
|
555
|
+
}
|
|
556
|
+
return mapOpenApiTypeToGraphQLInput(propDef.type);
|
|
557
|
+
}
|
|
312
558
|
const WRITE_METHODS = new Set(["POST", "PUT", "DELETE", "PATCH"]);
|
|
313
559
|
/**
|
|
314
560
|
* Build a GraphQL schema from an arbitrary JSON response.
|
|
@@ -324,7 +570,7 @@ export function buildSchemaFromData(data, method, pathTemplate, requestBodySchem
|
|
|
324
570
|
let queryType;
|
|
325
571
|
// Array response
|
|
326
572
|
if (Array.isArray(data)) {
|
|
327
|
-
let itemType =
|
|
573
|
+
let itemType = GraphQLJSON;
|
|
328
574
|
if (data.length > 0) {
|
|
329
575
|
// Mixed-type top-level array → items are JSON scalars
|
|
330
576
|
if (hasMixedTypes(data)) {
|
|
@@ -340,12 +586,22 @@ export function buildSchemaFromData(data, method, pathTemplate, requestBodySchem
|
|
|
340
586
|
}
|
|
341
587
|
}
|
|
342
588
|
}
|
|
589
|
+
const topLimit = dynamicArrayLimit(data);
|
|
343
590
|
queryType = new GraphQLObjectType({
|
|
344
591
|
name: "Query",
|
|
345
592
|
fields: {
|
|
346
593
|
items: {
|
|
347
594
|
type: new GraphQLList(itemType),
|
|
348
|
-
|
|
595
|
+
args: {
|
|
596
|
+
limit: { type: GraphQLInt, defaultValue: topLimit },
|
|
597
|
+
offset: { type: GraphQLInt, defaultValue: 0 },
|
|
598
|
+
},
|
|
599
|
+
resolve: (source, args) => {
|
|
600
|
+
const arr = source;
|
|
601
|
+
if (!Array.isArray(arr))
|
|
602
|
+
return arr;
|
|
603
|
+
return arr.slice(args.offset, args.offset + args.limit);
|
|
604
|
+
},
|
|
349
605
|
},
|
|
350
606
|
_count: {
|
|
351
607
|
type: GraphQLInt,
|
|
@@ -365,6 +621,10 @@ export function buildSchemaFromData(data, method, pathTemplate, requestBodySchem
|
|
|
365
621
|
for (const [fieldName, fieldDef] of Object.entries(originalFields)) {
|
|
366
622
|
queryFields[fieldName] = {
|
|
367
623
|
type: fieldDef.type,
|
|
624
|
+
...(fieldDef.description ? { description: fieldDef.description } : {}),
|
|
625
|
+
args: fieldDef.args.length > 0
|
|
626
|
+
? Object.fromEntries(fieldDef.args.map(a => [a.name, { type: a.type, defaultValue: a.defaultValue }]))
|
|
627
|
+
: undefined,
|
|
368
628
|
resolve: fieldDef.resolve,
|
|
369
629
|
};
|
|
370
630
|
}
|
|
@@ -390,7 +650,7 @@ export function buildSchemaFromData(data, method, pathTemplate, requestBodySchem
|
|
|
390
650
|
const inputFields = {};
|
|
391
651
|
for (const [propName, propDef] of Object.entries(requestBodySchema.properties)) {
|
|
392
652
|
const sanitized = sanitizeFieldName(propName);
|
|
393
|
-
let type =
|
|
653
|
+
let type = buildInputType(propDef, `${baseName}_Input`, propName, 0);
|
|
394
654
|
if (propDef.required) {
|
|
395
655
|
type = new GraphQLNonNull(type);
|
|
396
656
|
}
|
|
@@ -434,10 +694,10 @@ export function getOrBuildSchema(data, method, pathTemplate, requestBodySchema,
|
|
|
434
694
|
const key = cacheKey(method, pathTemplate, effectiveHash);
|
|
435
695
|
const cached = schemaCache.get(key);
|
|
436
696
|
if (cached)
|
|
437
|
-
return { schema: cached, shapeHash };
|
|
697
|
+
return { schema: cached, shapeHash, fromCache: true };
|
|
438
698
|
const schema = buildSchemaFromData(data, method, pathTemplate, requestBodySchema);
|
|
439
699
|
schemaCache.set(key, schema);
|
|
440
|
-
return { schema, shapeHash };
|
|
700
|
+
return { schema, shapeHash, fromCache: false };
|
|
441
701
|
}
|
|
442
702
|
/**
|
|
443
703
|
* Convert a GraphQL schema to SDL string for display.
|
|
@@ -445,6 +705,48 @@ export function getOrBuildSchema(data, method, pathTemplate, requestBodySchema,
|
|
|
445
705
|
export function schemaToSDL(schema) {
|
|
446
706
|
return printSchema(schema);
|
|
447
707
|
}
|
|
708
|
+
/**
|
|
709
|
+
* Walk the schema type tree and return field paths typed as the JSON scalar.
|
|
710
|
+
* Helps callers understand which fields are opaque and can't be queried with
|
|
711
|
+
* GraphQL field selection.
|
|
712
|
+
*/
|
|
713
|
+
export function collectJsonFields(schema) {
|
|
714
|
+
const jsonFields = [];
|
|
715
|
+
const queryType = schema.getQueryType();
|
|
716
|
+
if (!queryType)
|
|
717
|
+
return jsonFields;
|
|
718
|
+
const visited = new Set();
|
|
719
|
+
function walk(type, prefix) {
|
|
720
|
+
if (visited.has(type.name))
|
|
721
|
+
return;
|
|
722
|
+
visited.add(type.name);
|
|
723
|
+
for (const [name, field] of Object.entries(type.getFields())) {
|
|
724
|
+
let unwrapped = field.type;
|
|
725
|
+
if (unwrapped instanceof GraphQLNonNull)
|
|
726
|
+
unwrapped = unwrapped.ofType;
|
|
727
|
+
const path = prefix ? `${prefix}.${name}` : name;
|
|
728
|
+
if (isScalarType(unwrapped) && unwrapped.name === "JSON") {
|
|
729
|
+
jsonFields.push(path);
|
|
730
|
+
}
|
|
731
|
+
else if (isObjectType(unwrapped)) {
|
|
732
|
+
walk(unwrapped, path);
|
|
733
|
+
}
|
|
734
|
+
else if (isListType(unwrapped)) {
|
|
735
|
+
let inner = unwrapped.ofType;
|
|
736
|
+
if (inner instanceof GraphQLNonNull)
|
|
737
|
+
inner = inner.ofType;
|
|
738
|
+
if (isScalarType(inner) && inner.name === "JSON") {
|
|
739
|
+
jsonFields.push(path);
|
|
740
|
+
}
|
|
741
|
+
else if (isObjectType(inner)) {
|
|
742
|
+
walk(inner, path);
|
|
743
|
+
}
|
|
744
|
+
}
|
|
745
|
+
}
|
|
746
|
+
}
|
|
747
|
+
walk(queryType, "");
|
|
748
|
+
return jsonFields;
|
|
749
|
+
}
|
|
448
750
|
/**
|
|
449
751
|
* Execute a GraphQL selection query against JSON data using a schema.
|
|
450
752
|
* The query should be a selection set like `{ id name collection { id } }`.
|