@salesforce/lds-runtime-mobile 1.156.0 → 1.157.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/main.js CHANGED
@@ -31,6 +31,7 @@ import formattingOptions from 'lightning/i18nCldrOptions';
31
31
  import eagerEvalValidAt from '@salesforce/gate/lds.eagerEvalValidAt';
32
32
  import eagerEvalStaleWhileRevalidate from '@salesforce/gate/lds.eagerEvalStaleWhileRevalidate';
33
33
  import eagerEvalDefaultCachePolicy from '@salesforce/gate/lds.eagerEvalDefaultCachePolicy';
34
+ import ldsPrimingGraphqlBatch from '@salesforce/gate/lds.primingGraphqlBatch';
34
35
 
35
36
  /**
36
37
  * Copyright (c) 2022, Salesforce, Inc.,
@@ -4053,38 +4054,32 @@ function recordQuery(selection, apiName, alias, predicates, input) {
4053
4054
  const draftsField = { type: FieldType.Scalar, extract, path: 'node._drafts' };
4054
4055
  const idExtract = { type: ValueType.Extract, jsonAlias: alias, field: 'Id' };
4055
4056
  const idField = { type: FieldType.Scalar, extract: idExtract, path: 'node.Id' };
4056
- // When the exclude stale records gate is open, inject an additional predicate
4057
- // to limit the search to records that either have drafts associated to them or
4058
- // were ingested at least as recently as the query.
4059
- if (excludeStaleRecordsGate.isOpen({ fallback: false })) {
4060
- const key = input.connectionKeyBuilder(selection, input.config.variables);
4061
- const queryMetadata = input.metadata[key];
4062
- // If there is no metadata for this query or it somehow lacks a timestamp
4063
- // skip adding the additional predicates
4064
- if (queryMetadata !== undefined && queryMetadata.ingestionTimestamp !== undefined) {
4065
- const timestamp = queryMetadata.ingestionTimestamp;
4066
- const timestampCheck = {
4067
- type: PredicateType$1.comparison,
4068
- left: {
4069
- type: ValueType.Extract,
4070
- jsonAlias: alias,
4071
- field: 'ingestionTimestamp',
4072
- metadata: true,
4073
- },
4074
- operator: ComparisonOperator.gte,
4075
- right: { type: ValueType.IntLiteral, value: timestamp },
4076
- };
4077
- const isDraft = {
4078
- type: PredicateType$1.nullComparison,
4079
- left: { type: ValueType.Extract, jsonAlias: alias, field: 'drafts' },
4080
- operator: NullComparisonOperator.isNot,
4081
- };
4082
- predicates.push({
4083
- type: PredicateType$1.compound,
4084
- operator: CompoundOperator.or,
4085
- children: [timestampCheck, isDraft],
4086
- });
4087
- }
4057
+ // When the exclude stale records gate is open and there is a root timestamp
4058
+ // in the parser input, inject an additional predicate to limit the search
4059
+ // to records that either have drafts associated to them or were ingested at
4060
+ // least as recently as the query.
4061
+ if (excludeStaleRecordsGate.isOpen({ fallback: false }) && input.rootTimestamp !== undefined) {
4062
+ const timestampCheck = {
4063
+ type: PredicateType$1.comparison,
4064
+ left: {
4065
+ type: ValueType.Extract,
4066
+ jsonAlias: alias,
4067
+ field: 'ingestionTimestamp',
4068
+ metadata: true,
4069
+ },
4070
+ operator: ComparisonOperator.gte,
4071
+ right: { type: ValueType.IntLiteral, value: input.rootTimestamp },
4072
+ };
4073
+ const isDraft = {
4074
+ type: PredicateType$1.nullComparison,
4075
+ left: { type: ValueType.Extract, jsonAlias: alias, field: 'drafts' },
4076
+ operator: NullComparisonOperator.isNot,
4077
+ };
4078
+ predicates.push({
4079
+ type: PredicateType$1.compound,
4080
+ operator: CompoundOperator.or,
4081
+ children: [timestampCheck, isDraft],
4082
+ });
4088
4083
  }
4089
4084
  return queryContainer(internalFields, alias, apiName, predicates).map((result) => {
4090
4085
  const { fields, predicates } = result;
@@ -4134,6 +4129,20 @@ function rootRecordQuery(selection, input) {
4134
4129
  if (input.objectInfoMap[alias] === undefined) {
4135
4130
  return failure([missingObjectInfo(apiName)]);
4136
4131
  }
4132
+ // When the exclude stale records gate is open and the query has an
4133
+ // ingestion timestamp in its cache metadata, associate that with the input
4134
+ // so it can later be used to limit the search to records were ingested at
4135
+ // least as recently as the query.
4136
+ if (excludeStaleRecordsGate.isOpen({ fallback: false })) {
4137
+ const key = input.connectionKeyBuilder(selection, input.config.variables);
4138
+ const queryMetadata = input.metadata[key];
4139
+ // If there is no metadata for this query or it somehow lacks a timestamp
4140
+ // skip setting the root timestamp
4141
+ if (queryMetadata !== undefined && queryMetadata.ingestionTimestamp !== undefined) {
4142
+ // subtract 10ms from timestamp to account for ingestion processing time
4143
+ input.rootTimestamp = queryMetadata.ingestionTimestamp - 10;
4144
+ }
4145
+ }
4137
4146
  return recordQuery(selection, alias, apiName, [], input);
4138
4147
  }
4139
4148
  function rootQuery(recordNodes, input) {
@@ -9044,7 +9053,7 @@ function dataTypeToType(objectInfoDataType, apiName) {
9044
9053
  case 'Url':
9045
9054
  return 'UrlValue';
9046
9055
  case 'Picklist':
9047
- return 'UrlValue';
9056
+ return 'PicklistValue';
9048
9057
  case 'MultiPicklist':
9049
9058
  return 'MultiPicklistValue';
9050
9059
  case 'Percent':
@@ -15357,16 +15366,19 @@ function generateTypedBatches(work, batchSize) {
15357
15366
 
15358
15367
  const DEFAULT_BATCH_SIZE = 500;
15359
15368
  const DEFAULT_CONCURRENCY = 6;
15369
+ const DEFAULT_GQL_QUERY_BATCH_SIZE = 5;
15360
15370
  class PrimingSession extends EventEmitter {
15361
15371
  constructor(config) {
15362
15372
  var _a, _b;
15363
15373
  super();
15374
+ this.useBatchGQL = false;
15364
15375
  this.batchSize = (_a = config.batchSize) !== null && _a !== void 0 ? _a : DEFAULT_BATCH_SIZE;
15365
15376
  this.concurrency = (_b = config.concurrency) !== null && _b !== void 0 ? _b : DEFAULT_CONCURRENCY;
15366
15377
  this.recordLoader = config.recordLoader;
15367
15378
  this.recordIngestor = config.recordIngestor;
15368
15379
  this.objectInfoLoader = config.objectInfoLoader;
15369
15380
  this.networkWorkerPool = new AsyncWorkerPool(this.concurrency);
15381
+ this.useBatchGQL = ldsPrimingGraphqlBatch.isOpen({ fallback: false });
15370
15382
  }
15371
15383
  // function that enqueues priming work
15372
15384
  async enqueue(work) {
@@ -15401,91 +15413,129 @@ class PrimingSession extends EventEmitter {
15401
15413
  }
15402
15414
  // parallelizes batches of priming work
15403
15415
  enqueueBatches(batches) {
15404
- for (const batch of batches) {
15405
- const queuedTime = Date.now();
15406
- this.networkWorkerPool.push({
15407
- workFn: (abortController) => {
15408
- const workTime = Date.now();
15409
- this.emit('batch-starting', { queuedTime: workTime - queuedTime });
15410
- return this.recordLoader
15411
- .fetchRecordData(batch, abortController)
15412
- .then(async (result) => {
15413
- if (abortController.aborted) {
15414
- return;
15415
- }
15416
- this.emit('batch-fetched', {
15416
+ if (this.useBatchGQL === false) {
15417
+ for (const batch of batches) {
15418
+ const queuedTime = Date.now();
15419
+ this.networkWorkerPool.push({
15420
+ workFn: (abortController) => {
15421
+ const workTime = Date.now();
15422
+ this.emit('batch-starting', { queuedTime: workTime - queuedTime });
15423
+ return this.recordLoader
15424
+ .fetchRecordData(batch, abortController)
15425
+ .then(async (result) => {
15426
+ this.emit('batch-fetched', {
15427
+ ids: batch.ids,
15428
+ duration: Date.now() - workTime,
15429
+ });
15430
+ this.processFetchedRecords(result, abortController);
15431
+ });
15432
+ },
15433
+ cancelFn: () => {
15434
+ this.emit('error', {
15417
15435
  ids: batch.ids,
15418
- duration: Date.now() - workTime,
15436
+ code: 'canceled',
15437
+ message: `batch canceled`,
15419
15438
  });
15420
- if (result.ok === false) {
15421
- const { error } = result;
15422
- const primingError = error === 'network-error' ? 'service-unavailable' : 'unknown';
15423
- this.emit('error', {
15424
- ids: result.missingIds,
15425
- code: primingError,
15426
- message: `${result.messages.join(',')}`,
15427
- });
15428
- return;
15429
- }
15430
- const { missingIds } = result;
15431
- if (missingIds.length > 0) {
15432
- this.emit('error', {
15433
- ids: missingIds,
15434
- code: 'not-found',
15435
- message: `could not find records: ${missingIds.join(', ')}`,
15436
- });
15437
- }
15438
- const { records } = result;
15439
- const beforeWrite = Date.now();
15440
- // dispatch the write but DO NOT wait on it to unblock the network pool
15441
- this.recordIngestor
15442
- .insertRecords(records)
15443
- .then(({ written, conflicted, errors }) => {
15444
- this.emit('batch-written', {
15445
- written,
15446
- conflicted,
15447
- errors: errors
15448
- .map((e) => e.ids)
15449
- .reduce((a, b) => a.concat(b), []),
15450
- duration: Date.now() - beforeWrite,
15451
- });
15439
+ },
15440
+ });
15441
+ }
15442
+ }
15443
+ else {
15444
+ const chucks = chunk(batches, DEFAULT_GQL_QUERY_BATCH_SIZE);
15445
+ for (const batchChuck of chucks) {
15446
+ const queuedTime = Date.now();
15447
+ this.networkWorkerPool.push({
15448
+ workFn: (abortController) => {
15449
+ const workTime = Date.now();
15450
+ this.emit('batch-starting', { queuedTime: workTime - queuedTime });
15451
+ return this.recordLoader
15452
+ .batchFetchRecordData(batchChuck, abortController)
15453
+ .then(async (results) => {
15452
15454
  if (abortController.aborted) {
15453
15455
  return;
15454
15456
  }
15455
- if (errors.length > 0) {
15456
- errors.forEach(({ ids, message }) => {
15457
- this.emit('error', {
15458
- ids,
15459
- code: 'unknown',
15460
- message: message,
15461
- });
15457
+ const duration = Date.now() - workTime;
15458
+ // For each query within the Batch gql query, result returns at the same time
15459
+ for (let i = 0; i < results.length; i++) {
15460
+ this.emit('batch-fetched', {
15461
+ ids: batchChuck[i].ids,
15462
+ duration,
15462
15463
  });
15463
15464
  }
15464
- // now that the records are persisted, emit the primed event
15465
- if (written.length > 0) {
15466
- this.emit('primed', Array.from(written));
15467
- }
15468
- // TODO [W-12436213]: implement conflict resolution
15469
- if (conflicted.length > 0) {
15470
- // for now emit conlicts as errors
15471
- this.emit('error', {
15472
- ids: Array.from(conflicted),
15473
- code: 'unknown',
15474
- message: 'conflict when persisting record',
15475
- });
15465
+ for (let i = 0; i < results.length; i++) {
15466
+ this.processFetchedRecords(results[i], abortController);
15476
15467
  }
15477
15468
  });
15478
- });
15479
- },
15480
- cancelFn: () => {
15481
- this.emit('error', {
15482
- ids: batch.ids,
15483
- code: 'canceled',
15484
- message: `batch canceled`,
15485
- });
15486
- },
15469
+ },
15470
+ cancelFn: () => {
15471
+ const chuckIds = batchChuck
15472
+ .map((batch) => batch.ids)
15473
+ .reduce((prev, curr) => prev.concat(curr), []);
15474
+ this.emit('error', {
15475
+ ids: chuckIds,
15476
+ code: 'canceled',
15477
+ message: `batch canceled`,
15478
+ });
15479
+ },
15480
+ });
15481
+ }
15482
+ }
15483
+ }
15484
+ processFetchedRecords(result, abortController) {
15485
+ if (result.ok === false) {
15486
+ const { error } = result;
15487
+ const primingError = error === 'network-error' ? 'service-unavailable' : 'unknown';
15488
+ this.emit('error', {
15489
+ ids: result.missingIds,
15490
+ code: primingError,
15491
+ message: `${result.messages.join(',')}`,
15492
+ });
15493
+ return;
15494
+ }
15495
+ const { missingIds } = result;
15496
+ if (missingIds.length > 0) {
15497
+ this.emit('error', {
15498
+ ids: missingIds,
15499
+ code: 'not-found',
15500
+ message: `could not find records: ${missingIds.join(', ')}`,
15487
15501
  });
15488
15502
  }
15503
+ const { records } = result;
15504
+ const beforeWrite = Date.now();
15505
+ // dispatch the write but DO NOT wait on it to unblock the network pool
15506
+ this.recordIngestor.insertRecords(records).then(({ written, conflicted, errors }) => {
15507
+ this.emit('batch-written', {
15508
+ written,
15509
+ conflicted,
15510
+ errors: errors.map((e) => e.ids).reduce((a, b) => a.concat(b), []),
15511
+ duration: Date.now() - beforeWrite,
15512
+ });
15513
+ if (abortController.aborted) {
15514
+ return;
15515
+ }
15516
+ if (errors.length > 0) {
15517
+ errors.forEach(({ ids, message }) => {
15518
+ this.emit('error', {
15519
+ ids,
15520
+ code: 'unknown',
15521
+ message: message,
15522
+ });
15523
+ });
15524
+ }
15525
+ // now that the records are persisted, emit the primed event
15526
+ if (written.length > 0) {
15527
+ this.emit('primed', Array.from(written));
15528
+ }
15529
+ // TODO [W-12436213]: implement conflict resolution
15530
+ if (conflicted.length > 0) {
15531
+ // for now emit conlicts as errors
15532
+ this.emit('error', {
15533
+ ids: Array.from(conflicted),
15534
+ code: 'unknown',
15535
+ message: 'conflict when persisting record',
15536
+ });
15537
+ }
15538
+ });
15489
15539
  }
15490
15540
  async fetchMetadata(batches) {
15491
15541
  const apiNames = Array.from(batches.reduce((acc, x) => {
@@ -15538,12 +15588,39 @@ class RecordLoaderGraphQL {
15538
15588
  missingIds: batch.ids,
15539
15589
  };
15540
15590
  }
15541
- const { data, errors } = rep;
15591
+ return this.generateFetchResult(rep, batch);
15592
+ }
15593
+ async batchFetchRecordData(batchs, abortController) {
15594
+ let reps;
15595
+ try {
15596
+ reps = await this.callBatchGraphQL(batchs, abortController);
15597
+ }
15598
+ catch (e) {
15599
+ const missingIds = batchs
15600
+ .map((batch) => batch.ids)
15601
+ .reduce((prev, curr) => prev.concat(curr), []);
15602
+ return [
15603
+ {
15604
+ ok: false,
15605
+ error: 'network-error',
15606
+ messages: ['Network Error'],
15607
+ missingIds,
15608
+ },
15609
+ ];
15610
+ }
15611
+ const recordFetchResults = [];
15612
+ for (let i = 0; i < reps.length; i++) {
15613
+ recordFetchResults.push(this.generateFetchResult(reps[i], batchs[i]));
15614
+ }
15615
+ return recordFetchResults;
15616
+ }
15617
+ generateFetchResult(repResult, batchInput) {
15618
+ const { data, errors } = repResult;
15542
15619
  if (errors !== undefined && errors.length > 0) {
15543
15620
  // right now if there are any errors in the batch we throw out the entire batch
15544
15621
  // for now this is ok all errors will originate on the same node so there shouldn't be a mix of errors and data
15545
15622
  return {
15546
- missingIds: batch.ids,
15623
+ missingIds: batchInput.ids,
15547
15624
  ok: false,
15548
15625
  error: 'request-error',
15549
15626
  messages: errors.map((x) => x.message),
@@ -15555,11 +15632,11 @@ class RecordLoaderGraphQL {
15555
15632
  ok: false,
15556
15633
  error: 'unknown',
15557
15634
  messages: ['unexpected response retrieved from graphql endpoint'],
15558
- missingIds: batch.ids,
15635
+ missingIds: batchInput.ids,
15559
15636
  };
15560
15637
  }
15561
- const seenRecords = new Set(batch.ids);
15562
- const records = data.uiapi.query[batch.type].edges.map((edge) => {
15638
+ const seenRecords = new Set(batchInput.ids);
15639
+ const records = data.uiapi.query[batchInput.type].edges.map((edge) => {
15563
15640
  const record = this.generateDurableRecordRepresentation(edge.node);
15564
15641
  seenRecords.delete(record.id);
15565
15642
  return record;
@@ -15574,6 +15651,15 @@ class RecordLoaderGraphQL {
15574
15651
  const query = this.generateGraphQLQuery(batch.type, batch.fields);
15575
15652
  return this.networkAdapter.postGraphQL(query, { ids: batch.ids, first: batch.ids.length }, abortController);
15576
15653
  }
15654
+ callBatchGraphQL(batches, abortController) {
15655
+ const gqlInput = batches.map((batch) => {
15656
+ return {
15657
+ query: this.generateGraphQLQuery(batch.type, batch.fields),
15658
+ variables: { ids: batch.ids, first: batch.ids.length },
15659
+ };
15660
+ });
15661
+ return this.networkAdapter.postBatchGraphQL(gqlInput, abortController);
15662
+ }
15577
15663
  generateGraphQLQuery(type, fields) {
15578
15664
  const fieldList = Object.keys(requiredFieldMap)
15579
15665
  .map((field) => {
@@ -15697,7 +15783,56 @@ function instrumentPrimingSession(session) {
15697
15783
  /* global __nimbus */
15698
15784
  // note this is automatically incremented by scripts/release/bump-api-version.js at each release
15699
15785
  const apiVersion = `v59.0`;
15786
+ const batchEndPointPath = `/services/data/${apiVersion}/graphql/batch`;
15787
+ const endPointPath = `/services/data/${apiVersion}/graphql`;
15700
15788
  class NimbusPrimingNetworkAdapter {
15789
+ postBatchGraphQL(configs, abortController) {
15790
+ return new Promise((resolve, reject) => {
15791
+ let listener;
15792
+ const unregisterListener = () => {
15793
+ if (listener) {
15794
+ abortController.removeEventListener(listener);
15795
+ }
15796
+ };
15797
+ __nimbus.plugins.LdsNetworkAdapter
15798
+ .sendRequest({
15799
+ method: 'POST',
15800
+ path: batchEndPointPath,
15801
+ body: JSON.stringify({
15802
+ batchQuery: configs,
15803
+ }),
15804
+ headers: {},
15805
+ queryParams: {},
15806
+ priority: 'background',
15807
+ observabilityContext: {},
15808
+ }, (response) => {
15809
+ unregisterListener();
15810
+ const { body } = response;
15811
+ if (body) {
15812
+ const { results } = JSON.parse(body);
15813
+ if (results) {
15814
+ const gqlResults = results.map((compositeGqlResult) => compositeGqlResult.result);
15815
+ resolve(gqlResults);
15816
+ }
15817
+ else {
15818
+ reject(new Error(`No body returned from ${batchEndPointPath} endpoint`));
15819
+ }
15820
+ }
15821
+ else {
15822
+ reject(new Error(`No body returned from ${batchEndPointPath} endpoint`));
15823
+ }
15824
+ }, (error) => {
15825
+ unregisterListener();
15826
+ reject(error);
15827
+ })
15828
+ .then((cancellationToken) => {
15829
+ listener = () => {
15830
+ __nimbus.plugins.LdsNetworkAdapter.cancelRequest(cancellationToken);
15831
+ };
15832
+ abortController.addEventListener(listener);
15833
+ });
15834
+ });
15835
+ }
15701
15836
  postGraphQL(query, variables, abortController) {
15702
15837
  return new Promise((resolve, reject) => {
15703
15838
  let listener;
@@ -15709,7 +15844,7 @@ class NimbusPrimingNetworkAdapter {
15709
15844
  __nimbus.plugins.LdsNetworkAdapter
15710
15845
  .sendRequest({
15711
15846
  method: 'POST',
15712
- path: `/services/data/${apiVersion}/graphql`,
15847
+ path: endPointPath,
15713
15848
  body: JSON.stringify({
15714
15849
  query,
15715
15850
  variables,
@@ -15725,7 +15860,7 @@ class NimbusPrimingNetworkAdapter {
15725
15860
  resolve(JSON.parse(body));
15726
15861
  }
15727
15862
  else {
15728
- reject(new Error('No body returned from graphql endpoint'));
15863
+ reject(new Error(`No body returned from ${endPointPath} endpoint`));
15729
15864
  }
15730
15865
  }, (error) => {
15731
15866
  unregisterListener();
@@ -15976,4 +16111,4 @@ register({
15976
16111
  });
15977
16112
 
15978
16113
  export { getRuntime, registerReportObserver, reportGraphqlQueryParseError };
15979
- // version: 1.156.0-edce97283
16114
+ // version: 1.157.0-4246d2656
@@ -1,6 +1,7 @@
1
1
  import type { PrimingNetworkAdapter } from '@salesforce/lds-priming';
2
- import type { GraphQLRepresentation } from '@salesforce/lds-adapters-uiapi';
2
+ import type { GraphQLRepresentation, GraphQLInputRepresentation } from '@salesforce/lds-adapters-uiapi';
3
3
  import type { LdsAbortController } from '@salesforce/lds-utils-adapters';
4
4
  export declare class NimbusPrimingNetworkAdapter implements PrimingNetworkAdapter {
5
+ postBatchGraphQL(configs: GraphQLInputRepresentation[], abortController: LdsAbortController): Promise<GraphQLRepresentation[]>;
5
6
  postGraphQL(query: string, variables: Record<string, any>, abortController: LdsAbortController): Promise<GraphQLRepresentation>;
6
7
  }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@salesforce/lds-runtime-mobile",
3
- "version": "1.156.0",
3
+ "version": "1.157.0",
4
4
  "license": "SEE LICENSE IN LICENSE.txt",
5
5
  "description": "LDS runtime for mobile/hybrid environments.",
6
6
  "main": "dist/main.js",
@@ -58,7 +58,7 @@
58
58
  "path": "./dist/main.js",
59
59
  "maxSize": {
60
60
  "none": "700 kB",
61
- "min": "280 kB",
61
+ "min": "282 kB",
62
62
  "compressed": "110 kB"
63
63
  }
64
64
  },
@@ -66,7 +66,7 @@
66
66
  "path": "./sfdc/main.js",
67
67
  "maxSize": {
68
68
  "none": "700 kB",
69
- "min": "280 kB",
69
+ "min": "282 kB",
70
70
  "compressed": "110 kB"
71
71
  }
72
72
  }
package/sfdc/main.js CHANGED
@@ -31,6 +31,7 @@ import formattingOptions from 'lightning/i18nCldrOptions';
31
31
  import eagerEvalValidAt from '@salesforce/gate/lds.eagerEvalValidAt';
32
32
  import eagerEvalStaleWhileRevalidate from '@salesforce/gate/lds.eagerEvalStaleWhileRevalidate';
33
33
  import eagerEvalDefaultCachePolicy from '@salesforce/gate/lds.eagerEvalDefaultCachePolicy';
34
+ import ldsPrimingGraphqlBatch from '@salesforce/gate/lds.primingGraphqlBatch';
34
35
 
35
36
  /**
36
37
  * Copyright (c) 2022, Salesforce, Inc.,
@@ -4053,38 +4054,32 @@ function recordQuery(selection, apiName, alias, predicates, input) {
4053
4054
  const draftsField = { type: FieldType.Scalar, extract, path: 'node._drafts' };
4054
4055
  const idExtract = { type: ValueType.Extract, jsonAlias: alias, field: 'Id' };
4055
4056
  const idField = { type: FieldType.Scalar, extract: idExtract, path: 'node.Id' };
4056
- // When the exclude stale records gate is open, inject an additional predicate
4057
- // to limit the search to records that either have drafts associated to them or
4058
- // were ingested at least as recently as the query.
4059
- if (excludeStaleRecordsGate.isOpen({ fallback: false })) {
4060
- const key = input.connectionKeyBuilder(selection, input.config.variables);
4061
- const queryMetadata = input.metadata[key];
4062
- // If there is no metadata for this query or it somehow lacks a timestamp
4063
- // skip adding the additional predicates
4064
- if (queryMetadata !== undefined && queryMetadata.ingestionTimestamp !== undefined) {
4065
- const timestamp = queryMetadata.ingestionTimestamp;
4066
- const timestampCheck = {
4067
- type: PredicateType$1.comparison,
4068
- left: {
4069
- type: ValueType.Extract,
4070
- jsonAlias: alias,
4071
- field: 'ingestionTimestamp',
4072
- metadata: true,
4073
- },
4074
- operator: ComparisonOperator.gte,
4075
- right: { type: ValueType.IntLiteral, value: timestamp },
4076
- };
4077
- const isDraft = {
4078
- type: PredicateType$1.nullComparison,
4079
- left: { type: ValueType.Extract, jsonAlias: alias, field: 'drafts' },
4080
- operator: NullComparisonOperator.isNot,
4081
- };
4082
- predicates.push({
4083
- type: PredicateType$1.compound,
4084
- operator: CompoundOperator.or,
4085
- children: [timestampCheck, isDraft],
4086
- });
4087
- }
4057
+ // When the exclude stale records gate is open and there is a root timestamp
4058
+ // in the parser input, inject an additional predicate to limit the search
4059
+ // to records that either have drafts associated to them or were ingested at
4060
+ // least as recently as the query.
4061
+ if (excludeStaleRecordsGate.isOpen({ fallback: false }) && input.rootTimestamp !== undefined) {
4062
+ const timestampCheck = {
4063
+ type: PredicateType$1.comparison,
4064
+ left: {
4065
+ type: ValueType.Extract,
4066
+ jsonAlias: alias,
4067
+ field: 'ingestionTimestamp',
4068
+ metadata: true,
4069
+ },
4070
+ operator: ComparisonOperator.gte,
4071
+ right: { type: ValueType.IntLiteral, value: input.rootTimestamp },
4072
+ };
4073
+ const isDraft = {
4074
+ type: PredicateType$1.nullComparison,
4075
+ left: { type: ValueType.Extract, jsonAlias: alias, field: 'drafts' },
4076
+ operator: NullComparisonOperator.isNot,
4077
+ };
4078
+ predicates.push({
4079
+ type: PredicateType$1.compound,
4080
+ operator: CompoundOperator.or,
4081
+ children: [timestampCheck, isDraft],
4082
+ });
4088
4083
  }
4089
4084
  return queryContainer(internalFields, alias, apiName, predicates).map((result) => {
4090
4085
  const { fields, predicates } = result;
@@ -4134,6 +4129,20 @@ function rootRecordQuery(selection, input) {
4134
4129
  if (input.objectInfoMap[alias] === undefined) {
4135
4130
  return failure([missingObjectInfo(apiName)]);
4136
4131
  }
4132
+ // When the exclude stale records gate is open and the query has an
4133
+ // ingestion timestamp in its cache metadata, associate that with the input
4134
+ // so it can later be used to limit the search to records were ingested at
4135
+ // least as recently as the query.
4136
+ if (excludeStaleRecordsGate.isOpen({ fallback: false })) {
4137
+ const key = input.connectionKeyBuilder(selection, input.config.variables);
4138
+ const queryMetadata = input.metadata[key];
4139
+ // If there is no metadata for this query or it somehow lacks a timestamp
4140
+ // skip setting the root timestamp
4141
+ if (queryMetadata !== undefined && queryMetadata.ingestionTimestamp !== undefined) {
4142
+ // subtract 10ms from timestamp to account for ingestion processing time
4143
+ input.rootTimestamp = queryMetadata.ingestionTimestamp - 10;
4144
+ }
4145
+ }
4137
4146
  return recordQuery(selection, alias, apiName, [], input);
4138
4147
  }
4139
4148
  function rootQuery(recordNodes, input) {
@@ -9044,7 +9053,7 @@ function dataTypeToType(objectInfoDataType, apiName) {
9044
9053
  case 'Url':
9045
9054
  return 'UrlValue';
9046
9055
  case 'Picklist':
9047
- return 'UrlValue';
9056
+ return 'PicklistValue';
9048
9057
  case 'MultiPicklist':
9049
9058
  return 'MultiPicklistValue';
9050
9059
  case 'Percent':
@@ -15357,16 +15366,19 @@ function generateTypedBatches(work, batchSize) {
15357
15366
 
15358
15367
  const DEFAULT_BATCH_SIZE = 500;
15359
15368
  const DEFAULT_CONCURRENCY = 6;
15369
+ const DEFAULT_GQL_QUERY_BATCH_SIZE = 5;
15360
15370
  class PrimingSession extends EventEmitter {
15361
15371
  constructor(config) {
15362
15372
  var _a, _b;
15363
15373
  super();
15374
+ this.useBatchGQL = false;
15364
15375
  this.batchSize = (_a = config.batchSize) !== null && _a !== void 0 ? _a : DEFAULT_BATCH_SIZE;
15365
15376
  this.concurrency = (_b = config.concurrency) !== null && _b !== void 0 ? _b : DEFAULT_CONCURRENCY;
15366
15377
  this.recordLoader = config.recordLoader;
15367
15378
  this.recordIngestor = config.recordIngestor;
15368
15379
  this.objectInfoLoader = config.objectInfoLoader;
15369
15380
  this.networkWorkerPool = new AsyncWorkerPool(this.concurrency);
15381
+ this.useBatchGQL = ldsPrimingGraphqlBatch.isOpen({ fallback: false });
15370
15382
  }
15371
15383
  // function that enqueues priming work
15372
15384
  async enqueue(work) {
@@ -15401,91 +15413,129 @@ class PrimingSession extends EventEmitter {
15401
15413
  }
15402
15414
  // parallelizes batches of priming work
15403
15415
  enqueueBatches(batches) {
15404
- for (const batch of batches) {
15405
- const queuedTime = Date.now();
15406
- this.networkWorkerPool.push({
15407
- workFn: (abortController) => {
15408
- const workTime = Date.now();
15409
- this.emit('batch-starting', { queuedTime: workTime - queuedTime });
15410
- return this.recordLoader
15411
- .fetchRecordData(batch, abortController)
15412
- .then(async (result) => {
15413
- if (abortController.aborted) {
15414
- return;
15415
- }
15416
- this.emit('batch-fetched', {
15416
+ if (this.useBatchGQL === false) {
15417
+ for (const batch of batches) {
15418
+ const queuedTime = Date.now();
15419
+ this.networkWorkerPool.push({
15420
+ workFn: (abortController) => {
15421
+ const workTime = Date.now();
15422
+ this.emit('batch-starting', { queuedTime: workTime - queuedTime });
15423
+ return this.recordLoader
15424
+ .fetchRecordData(batch, abortController)
15425
+ .then(async (result) => {
15426
+ this.emit('batch-fetched', {
15427
+ ids: batch.ids,
15428
+ duration: Date.now() - workTime,
15429
+ });
15430
+ this.processFetchedRecords(result, abortController);
15431
+ });
15432
+ },
15433
+ cancelFn: () => {
15434
+ this.emit('error', {
15417
15435
  ids: batch.ids,
15418
- duration: Date.now() - workTime,
15436
+ code: 'canceled',
15437
+ message: `batch canceled`,
15419
15438
  });
15420
- if (result.ok === false) {
15421
- const { error } = result;
15422
- const primingError = error === 'network-error' ? 'service-unavailable' : 'unknown';
15423
- this.emit('error', {
15424
- ids: result.missingIds,
15425
- code: primingError,
15426
- message: `${result.messages.join(',')}`,
15427
- });
15428
- return;
15429
- }
15430
- const { missingIds } = result;
15431
- if (missingIds.length > 0) {
15432
- this.emit('error', {
15433
- ids: missingIds,
15434
- code: 'not-found',
15435
- message: `could not find records: ${missingIds.join(', ')}`,
15436
- });
15437
- }
15438
- const { records } = result;
15439
- const beforeWrite = Date.now();
15440
- // dispatch the write but DO NOT wait on it to unblock the network pool
15441
- this.recordIngestor
15442
- .insertRecords(records)
15443
- .then(({ written, conflicted, errors }) => {
15444
- this.emit('batch-written', {
15445
- written,
15446
- conflicted,
15447
- errors: errors
15448
- .map((e) => e.ids)
15449
- .reduce((a, b) => a.concat(b), []),
15450
- duration: Date.now() - beforeWrite,
15451
- });
15439
+ },
15440
+ });
15441
+ }
15442
+ }
15443
+ else {
15444
+ const chucks = chunk(batches, DEFAULT_GQL_QUERY_BATCH_SIZE);
15445
+ for (const batchChuck of chucks) {
15446
+ const queuedTime = Date.now();
15447
+ this.networkWorkerPool.push({
15448
+ workFn: (abortController) => {
15449
+ const workTime = Date.now();
15450
+ this.emit('batch-starting', { queuedTime: workTime - queuedTime });
15451
+ return this.recordLoader
15452
+ .batchFetchRecordData(batchChuck, abortController)
15453
+ .then(async (results) => {
15452
15454
  if (abortController.aborted) {
15453
15455
  return;
15454
15456
  }
15455
- if (errors.length > 0) {
15456
- errors.forEach(({ ids, message }) => {
15457
- this.emit('error', {
15458
- ids,
15459
- code: 'unknown',
15460
- message: message,
15461
- });
15457
+ const duration = Date.now() - workTime;
15458
+ // For each query within the Batch gql query, result returns at the same time
15459
+ for (let i = 0; i < results.length; i++) {
15460
+ this.emit('batch-fetched', {
15461
+ ids: batchChuck[i].ids,
15462
+ duration,
15462
15463
  });
15463
15464
  }
15464
- // now that the records are persisted, emit the primed event
15465
- if (written.length > 0) {
15466
- this.emit('primed', Array.from(written));
15467
- }
15468
- // TODO [W-12436213]: implement conflict resolution
15469
- if (conflicted.length > 0) {
15470
- // for now emit conlicts as errors
15471
- this.emit('error', {
15472
- ids: Array.from(conflicted),
15473
- code: 'unknown',
15474
- message: 'conflict when persisting record',
15475
- });
15465
+ for (let i = 0; i < results.length; i++) {
15466
+ this.processFetchedRecords(results[i], abortController);
15476
15467
  }
15477
15468
  });
15478
- });
15479
- },
15480
- cancelFn: () => {
15481
- this.emit('error', {
15482
- ids: batch.ids,
15483
- code: 'canceled',
15484
- message: `batch canceled`,
15485
- });
15486
- },
15469
+ },
15470
+ cancelFn: () => {
15471
+ const chuckIds = batchChuck
15472
+ .map((batch) => batch.ids)
15473
+ .reduce((prev, curr) => prev.concat(curr), []);
15474
+ this.emit('error', {
15475
+ ids: chuckIds,
15476
+ code: 'canceled',
15477
+ message: `batch canceled`,
15478
+ });
15479
+ },
15480
+ });
15481
+ }
15482
+ }
15483
+ }
15484
+ processFetchedRecords(result, abortController) {
15485
+ if (result.ok === false) {
15486
+ const { error } = result;
15487
+ const primingError = error === 'network-error' ? 'service-unavailable' : 'unknown';
15488
+ this.emit('error', {
15489
+ ids: result.missingIds,
15490
+ code: primingError,
15491
+ message: `${result.messages.join(',')}`,
15492
+ });
15493
+ return;
15494
+ }
15495
+ const { missingIds } = result;
15496
+ if (missingIds.length > 0) {
15497
+ this.emit('error', {
15498
+ ids: missingIds,
15499
+ code: 'not-found',
15500
+ message: `could not find records: ${missingIds.join(', ')}`,
15487
15501
  });
15488
15502
  }
15503
+ const { records } = result;
15504
+ const beforeWrite = Date.now();
15505
+ // dispatch the write but DO NOT wait on it to unblock the network pool
15506
+ this.recordIngestor.insertRecords(records).then(({ written, conflicted, errors }) => {
15507
+ this.emit('batch-written', {
15508
+ written,
15509
+ conflicted,
15510
+ errors: errors.map((e) => e.ids).reduce((a, b) => a.concat(b), []),
15511
+ duration: Date.now() - beforeWrite,
15512
+ });
15513
+ if (abortController.aborted) {
15514
+ return;
15515
+ }
15516
+ if (errors.length > 0) {
15517
+ errors.forEach(({ ids, message }) => {
15518
+ this.emit('error', {
15519
+ ids,
15520
+ code: 'unknown',
15521
+ message: message,
15522
+ });
15523
+ });
15524
+ }
15525
+ // now that the records are persisted, emit the primed event
15526
+ if (written.length > 0) {
15527
+ this.emit('primed', Array.from(written));
15528
+ }
15529
+ // TODO [W-12436213]: implement conflict resolution
15530
+ if (conflicted.length > 0) {
15531
+ // for now emit conlicts as errors
15532
+ this.emit('error', {
15533
+ ids: Array.from(conflicted),
15534
+ code: 'unknown',
15535
+ message: 'conflict when persisting record',
15536
+ });
15537
+ }
15538
+ });
15489
15539
  }
15490
15540
  async fetchMetadata(batches) {
15491
15541
  const apiNames = Array.from(batches.reduce((acc, x) => {
@@ -15538,12 +15588,39 @@ class RecordLoaderGraphQL {
15538
15588
  missingIds: batch.ids,
15539
15589
  };
15540
15590
  }
15541
- const { data, errors } = rep;
15591
+ return this.generateFetchResult(rep, batch);
15592
+ }
15593
+ async batchFetchRecordData(batchs, abortController) {
15594
+ let reps;
15595
+ try {
15596
+ reps = await this.callBatchGraphQL(batchs, abortController);
15597
+ }
15598
+ catch (e) {
15599
+ const missingIds = batchs
15600
+ .map((batch) => batch.ids)
15601
+ .reduce((prev, curr) => prev.concat(curr), []);
15602
+ return [
15603
+ {
15604
+ ok: false,
15605
+ error: 'network-error',
15606
+ messages: ['Network Error'],
15607
+ missingIds,
15608
+ },
15609
+ ];
15610
+ }
15611
+ const recordFetchResults = [];
15612
+ for (let i = 0; i < reps.length; i++) {
15613
+ recordFetchResults.push(this.generateFetchResult(reps[i], batchs[i]));
15614
+ }
15615
+ return recordFetchResults;
15616
+ }
15617
+ generateFetchResult(repResult, batchInput) {
15618
+ const { data, errors } = repResult;
15542
15619
  if (errors !== undefined && errors.length > 0) {
15543
15620
  // right now if there are any errors in the batch we throw out the entire batch
15544
15621
  // for now this is ok all errors will originate on the same node so there shouldn't be a mix of errors and data
15545
15622
  return {
15546
- missingIds: batch.ids,
15623
+ missingIds: batchInput.ids,
15547
15624
  ok: false,
15548
15625
  error: 'request-error',
15549
15626
  messages: errors.map((x) => x.message),
@@ -15555,11 +15632,11 @@ class RecordLoaderGraphQL {
15555
15632
  ok: false,
15556
15633
  error: 'unknown',
15557
15634
  messages: ['unexpected response retrieved from graphql endpoint'],
15558
- missingIds: batch.ids,
15635
+ missingIds: batchInput.ids,
15559
15636
  };
15560
15637
  }
15561
- const seenRecords = new Set(batch.ids);
15562
- const records = data.uiapi.query[batch.type].edges.map((edge) => {
15638
+ const seenRecords = new Set(batchInput.ids);
15639
+ const records = data.uiapi.query[batchInput.type].edges.map((edge) => {
15563
15640
  const record = this.generateDurableRecordRepresentation(edge.node);
15564
15641
  seenRecords.delete(record.id);
15565
15642
  return record;
@@ -15574,6 +15651,15 @@ class RecordLoaderGraphQL {
15574
15651
  const query = this.generateGraphQLQuery(batch.type, batch.fields);
15575
15652
  return this.networkAdapter.postGraphQL(query, { ids: batch.ids, first: batch.ids.length }, abortController);
15576
15653
  }
15654
+ callBatchGraphQL(batches, abortController) {
15655
+ const gqlInput = batches.map((batch) => {
15656
+ return {
15657
+ query: this.generateGraphQLQuery(batch.type, batch.fields),
15658
+ variables: { ids: batch.ids, first: batch.ids.length },
15659
+ };
15660
+ });
15661
+ return this.networkAdapter.postBatchGraphQL(gqlInput, abortController);
15662
+ }
15577
15663
  generateGraphQLQuery(type, fields) {
15578
15664
  const fieldList = Object.keys(requiredFieldMap)
15579
15665
  .map((field) => {
@@ -15697,7 +15783,56 @@ function instrumentPrimingSession(session) {
15697
15783
  /* global __nimbus */
15698
15784
  // note this is automatically incremented by scripts/release/bump-api-version.js at each release
15699
15785
  const apiVersion = `v59.0`;
15786
+ const batchEndPointPath = `/services/data/${apiVersion}/graphql/batch`;
15787
+ const endPointPath = `/services/data/${apiVersion}/graphql`;
15700
15788
  class NimbusPrimingNetworkAdapter {
15789
+ postBatchGraphQL(configs, abortController) {
15790
+ return new Promise((resolve, reject) => {
15791
+ let listener;
15792
+ const unregisterListener = () => {
15793
+ if (listener) {
15794
+ abortController.removeEventListener(listener);
15795
+ }
15796
+ };
15797
+ __nimbus.plugins.LdsNetworkAdapter
15798
+ .sendRequest({
15799
+ method: 'POST',
15800
+ path: batchEndPointPath,
15801
+ body: JSON.stringify({
15802
+ batchQuery: configs,
15803
+ }),
15804
+ headers: {},
15805
+ queryParams: {},
15806
+ priority: 'background',
15807
+ observabilityContext: {},
15808
+ }, (response) => {
15809
+ unregisterListener();
15810
+ const { body } = response;
15811
+ if (body) {
15812
+ const { results } = JSON.parse(body);
15813
+ if (results) {
15814
+ const gqlResults = results.map((compositeGqlResult) => compositeGqlResult.result);
15815
+ resolve(gqlResults);
15816
+ }
15817
+ else {
15818
+ reject(new Error(`No body returned from ${batchEndPointPath} endpoint`));
15819
+ }
15820
+ }
15821
+ else {
15822
+ reject(new Error(`No body returned from ${batchEndPointPath} endpoint`));
15823
+ }
15824
+ }, (error) => {
15825
+ unregisterListener();
15826
+ reject(error);
15827
+ })
15828
+ .then((cancellationToken) => {
15829
+ listener = () => {
15830
+ __nimbus.plugins.LdsNetworkAdapter.cancelRequest(cancellationToken);
15831
+ };
15832
+ abortController.addEventListener(listener);
15833
+ });
15834
+ });
15835
+ }
15701
15836
  postGraphQL(query, variables, abortController) {
15702
15837
  return new Promise((resolve, reject) => {
15703
15838
  let listener;
@@ -15709,7 +15844,7 @@ class NimbusPrimingNetworkAdapter {
15709
15844
  __nimbus.plugins.LdsNetworkAdapter
15710
15845
  .sendRequest({
15711
15846
  method: 'POST',
15712
- path: `/services/data/${apiVersion}/graphql`,
15847
+ path: endPointPath,
15713
15848
  body: JSON.stringify({
15714
15849
  query,
15715
15850
  variables,
@@ -15725,7 +15860,7 @@ class NimbusPrimingNetworkAdapter {
15725
15860
  resolve(JSON.parse(body));
15726
15861
  }
15727
15862
  else {
15728
- reject(new Error('No body returned from graphql endpoint'));
15863
+ reject(new Error(`No body returned from ${endPointPath} endpoint`));
15729
15864
  }
15730
15865
  }, (error) => {
15731
15866
  unregisterListener();
@@ -15976,4 +16111,4 @@ register({
15976
16111
  });
15977
16112
 
15978
16113
  export { getRuntime, registerReportObserver, reportGraphqlQueryParseError };
15979
- // version: 1.156.0-edce97283
16114
+ // version: 1.157.0-4246d2656
@@ -1,6 +1,7 @@
1
1
  import type { PrimingNetworkAdapter } from '@salesforce/lds-priming';
2
- import type { GraphQLRepresentation } from '@salesforce/lds-adapters-uiapi';
2
+ import type { GraphQLRepresentation, GraphQLInputRepresentation } from '@salesforce/lds-adapters-uiapi';
3
3
  import type { LdsAbortController } from '@salesforce/lds-utils-adapters';
4
4
  export declare class NimbusPrimingNetworkAdapter implements PrimingNetworkAdapter {
5
+ postBatchGraphQL(configs: GraphQLInputRepresentation[], abortController: LdsAbortController): Promise<GraphQLRepresentation[]>;
5
6
  postGraphQL(query: string, variables: Record<string, any>, abortController: LdsAbortController): Promise<GraphQLRepresentation>;
6
7
  }