@jsforce/jsforce-node 3.0.0-next.1 → 3.0.0-next.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/api/bulk.d.ts CHANGED
@@ -9,45 +9,21 @@ import { EventEmitter } from 'events';
9
9
  import { Duplex, Readable, Writable } from 'stream';
10
10
  import Connection from '../connection';
11
11
  import { Serializable, Parsable } from '../record-stream';
12
- import { StreamPromise } from '../util/promise';
13
12
  import { Logger } from '../util/logger';
14
- import { HttpMethods, Record, Schema, Optional } from '../types';
13
+ import { HttpMethods, Record, Schema } from '../types';
15
14
  export type BulkOperation = 'insert' | 'update' | 'upsert' | 'delete' | 'hardDelete' | 'query' | 'queryAll';
16
- export type IngestOperation = Exclude<BulkOperation, 'query' | 'queryAll'>;
17
- export type QueryOperation = Extract<BulkOperation, 'query' | 'queryAll'>;
18
15
  export type BulkOptions = {
19
16
  extIdField?: string;
20
17
  concurrencyMode?: 'Serial' | 'Parallel';
21
18
  assignmentRuleId?: string;
22
19
  };
23
20
  export type JobState = 'Open' | 'Closed' | 'Aborted' | 'Failed' | 'Unknown';
24
- export type JobStateV2 = Exclude<JobState, 'Closed' | 'Unknown'> | 'UploadComplete' | 'InProgress' | 'JobComplete';
25
21
  export type JobInfo = {
26
22
  id: string;
27
23
  object: string;
28
24
  operation: BulkOperation;
29
25
  state: JobState;
30
26
  };
31
- export type JobInfoV2 = {
32
- apiVersion: string;
33
- assignmentRuleId?: string;
34
- columnDelimiter: 'BACKQUOTE' | 'CARET' | 'COMMA' | 'PIPE' | 'SEMICOLON' | 'TAB';
35
- concurrencyMode: 'Parallel';
36
- contentType: 'CSV';
37
- contentUrl: string;
38
- createdById: string;
39
- createdDate: string;
40
- externalIdFieldName?: string;
41
- id: string;
42
- jobType: 'BigObjectIngest' | 'Classic' | 'V2Ingest';
43
- lineEnding: 'LF' | 'CRLF';
44
- object: string;
45
- operation: BulkOperation;
46
- state: JobStateV2;
47
- systemModstamp: string;
48
- numberRecordsProcessed?: number;
49
- numberRecordsFailed?: number;
50
- };
51
27
  export type BatchState = 'Queued' | 'InProgress' | 'Completed' | 'Failed' | 'NotProcessed';
52
28
  export type BatchInfo = {
53
29
  id: string;
@@ -69,46 +45,15 @@ export type BulkIngestBatchResult = Array<{
69
45
  errors: string[];
70
46
  }>;
71
47
  export type BatchResult<Opr extends BulkOperation> = Opr extends 'query' | 'queryAll' ? BulkQueryBatchResult : BulkIngestBatchResult;
72
- type BulkRequest = {
48
+ export type BulkRequest = {
73
49
  method: HttpMethods;
74
- path: string;
50
+ path?: string;
75
51
  body?: string;
76
52
  headers?: {
77
53
  [name: string]: string;
78
54
  };
79
55
  responseType?: string;
80
56
  };
81
- export type IngestJobV2SuccessfulResults<S extends Schema> = Array<{
82
- sf__Created: 'true' | 'false';
83
- sf__Id: string;
84
- } & S>;
85
- export type IngestJobV2FailedResults<S extends Schema> = Array<{
86
- sf__Error: string;
87
- sf__Id: string;
88
- } & S>;
89
- export type IngestJobV2UnprocessedRecords<S extends Schema> = Array<S>;
90
- export type IngestJobV2Results<S extends Schema> = {
91
- successfulResults: IngestJobV2SuccessfulResults<S>;
92
- failedResults: IngestJobV2FailedResults<S>;
93
- unprocessedRecords: IngestJobV2UnprocessedRecords<S>;
94
- };
95
- type NewIngestJobOptions = Required<Pick<JobInfoV2, 'object' | 'operation'>> & Partial<Pick<JobInfoV2, 'assignmentRuleId' | 'externalIdFieldName' | 'lineEnding'>>;
96
- type ExistingIngestJobOptions = Pick<JobInfoV2, 'id'>;
97
- type CreateIngestJobV2Options<S extends Schema> = {
98
- connection: Connection<S>;
99
- jobInfo: NewIngestJobOptions | ExistingIngestJobOptions;
100
- pollingOptions: BulkV2PollingOptions;
101
- };
102
- type CreateQueryJobV2Options<S extends Schema> = {
103
- connection: Connection<S>;
104
- operation: QueryOperation;
105
- query: string;
106
- pollingOptions: BulkV2PollingOptions;
107
- };
108
- type BulkV2PollingOptions = {
109
- pollInterval: number;
110
- pollTimeout: number;
111
- };
112
57
  /**
113
58
  * Class for Bulk API Job
114
59
  */
@@ -223,7 +168,8 @@ export declare class Batch<S extends Schema, Opr extends BulkOperation> extends
223
168
  */
224
169
  retrieve(): Promise<BulkQueryBatchResult | BulkIngestBatchResult>;
225
170
  /**
226
- * Fetch query result as a record stream
171
+ * Fetch query batch result as a record stream
172
+ *
227
173
  * @param {String} resultId - Result id
228
174
  * @returns {RecordStream} - Record stream, convertible to CSV data stream
229
175
  */
@@ -235,15 +181,18 @@ export declare class Batch<S extends Schema, Opr extends BulkOperation> extends
235
181
  * @class
236
182
  */
237
183
  export declare class Bulk<S extends Schema> {
238
- _conn: Connection<S>;
239
- _logger: Logger;
184
+ private readonly _conn;
185
+ readonly _logger: Logger;
240
186
  /**
241
187
  * Polling interval in milliseconds
188
+ *
189
+ * Default: 1000 (1 second)
242
190
  */
243
191
  pollInterval: number;
244
192
  /**
245
193
  * Polling timeout in milliseconds
246
- * @type {Number}
194
+ *
195
+ * Default: 30000 (30 seconds)
247
196
  */
248
197
  pollTimeout: number;
249
198
  /**
@@ -253,192 +202,52 @@ export declare class Bulk<S extends Schema> {
253
202
  /**
254
203
  *
255
204
  */
256
- _request<T>(request_: BulkRequest): StreamPromise<T>;
205
+ _request<T>(request_: BulkRequest): import("../util/promise").StreamPromise<T>;
257
206
  /**
258
207
  * Create and start bulkload job and batch
259
- */
260
- load<Opr extends BulkOperation>(type: string, operation: Opr, input?: Record[] | Readable | string): Batch<S, Opr>;
261
- load<Opr extends BulkOperation>(type: string, operation: Opr, optionsOrInput?: BulkOptions | Record[] | Readable | string, input?: Record[] | Readable | string): Batch<S, Opr>;
262
- /**
263
- * Execute bulk query and get record stream
264
- */
265
- query(soql: string): Parsable<Record>;
266
- /**
267
- * Create a new job instance
268
- */
269
- createJob<Opr extends BulkOperation>(type: string, operation: Opr, options?: BulkOptions): Job<S, Opr>;
270
- /**
271
- * Get a job instance specified by given job ID
272
- *
273
- * @param {String} jobId - Job ID
274
- * @returns {Bulk~Job}
275
- */
276
- job<Opr extends BulkOperation>(jobId: string): Job<S, Opr>;
277
- }
278
- export declare class BulkV2<S extends Schema> {
279
- #private;
280
- /**
281
- * Polling interval in milliseconds
282
- */
283
- pollInterval: number;
284
- /**
285
- * Polling timeout in milliseconds
286
- * @type {Number}
287
- */
288
- pollTimeout: number;
289
- constructor(connection: Connection<S>);
290
- /**
291
- * Create an instance of an ingest job object.
292
- *
293
- * @params {NewIngestJobOptions} options object
294
- * @returns {IngestJobV2} An ingest job instance
295
- * @example
296
- * // Upsert records to the Account object.
297
- *
298
- * const job = connection.bulk2.createJob({
299
- * operation: 'insert'
300
- * object: 'Account',
301
- * });
302
- *
303
- * // create the job in the org
304
- * await job.open()
305
- *
306
- * // upload data
307
- * await job.uploadData(csvFile)
308
- *
309
- * // finished uploading data, mark it as ready for processing
310
- * await job.close()
311
- */
312
- createJob<Opr extends IngestOperation>(options: NewIngestJobOptions): IngestJobV2<S, Opr>;
313
- /**
314
- * Get a ingest job instance specified by a given job ID
315
208
  *
316
- * @param options Options object with a job ID
317
- * @returns IngestJobV2 An ingest job
318
- */
319
- job<Opr extends IngestOperation>(options: ExistingIngestJobOptions): IngestJobV2<S, Opr>;
320
- /**
321
- * Create, upload, and start bulkload job
322
- */
323
- loadAndWaitForResults(options: NewIngestJobOptions & Partial<BulkV2PollingOptions> & {
324
- input: Record[] | Readable | string;
325
- }): Promise<IngestJobV2Results<S>>;
326
- /**
327
- * Execute bulk query and get records
209
+ * This method will return a Batch instance (writable stream)
210
+ * which you can write records into as a CSV string.
328
211
  *
329
- * Default timeout: 10000ms
212
+ * Batch also implements the a promise interface so you can `await` this method to get the job results.
330
213
  *
331
- * @param soql SOQL query
332
- * @param BulkV2PollingOptions options object
214
+ * @example
215
+ * // Insert an array of records and get the job results
333
216
  *
334
- * @returns Record[]
335
- */
336
- query(soql: string, options?: Partial<BulkV2PollingOptions> & {
337
- scanAll?: boolean;
338
- }): Promise<Record[]>;
339
- }
340
- export declare class QueryJobV2<S extends Schema> extends EventEmitter {
341
- #private;
342
- jobInfo: Partial<JobInfoV2> | undefined;
343
- locator: Optional<string>;
344
- finished: boolean;
345
- constructor(options: CreateQueryJobV2Options<S>);
346
- /**
347
- * Creates a query job
348
- */
349
- open(): Promise<void>;
350
- /**
351
- * Set the status to abort
352
- */
353
- abort(): Promise<void>;
354
- /**
355
- * Poll for the state of the processing for the job.
217
+ * const res = await connection.bulk.load('Account', 'insert', accounts)
356
218
  *
357
- * This method will only throw after a timeout. To capture a
358
- * job failure while polling you must set a listener for the
359
- * `failed` event before calling it:
219
+ * @example
220
+ * // Insert records from a csv file using the returned batch stream
360
221
  *
361
- * job.on('failed', (err) => console.error(err))
362
- * await job.poll()
222
+ * const csvFile = fs.createReadStream('accounts.csv')
363
223
  *
364
- * @param interval Polling interval in milliseconds
365
- * @param timeout Polling timeout in milliseconds
366
- * @returns {Promise<Record[]>} A promise that resolves to an array of records
367
- */
368
- poll(interval?: number, timeout?: number): Promise<void>;
369
- /**
370
- * Check the latest batch status in server
371
- */
372
- check(): Promise<JobInfoV2>;
373
- private request;
374
- private getResultsUrl;
375
- /**
376
- * Get the results for a query job.
224
+ * const batch = conn.bulk.load('Account', 'insert')
377
225
  *
378
- * @returns {Promise<Record[]>} A promise that resolves to an array of records
379
- */
380
- getResults(): Promise<Record[]>;
381
- /**
382
- * Deletes a query job.
383
- */
384
- delete(): Promise<void>;
385
- private createQueryRequest;
386
- }
387
- /**
388
- * Class for Bulk API V2 Ingest Job
389
- */
390
- export declare class IngestJobV2<S extends Schema, Opr extends IngestOperation> extends EventEmitter {
391
- #private;
392
- jobInfo: Partial<JobInfoV2>;
393
- /**
226
+ * // The `response` event is emitted when the job results are retrieved
227
+ * batch.on('response', res => {
228
+ * console.log(res)
229
+ * })
230
+
231
+ * csvFile.pipe(batch.stream())
394
232
  *
395
- */
396
- constructor(options: CreateIngestJobV2Options<S>);
397
- get id(): string | undefined;
398
- /**
399
- * Create a job representing a bulk operation in the org
400
- */
401
- open(): Promise<void>;
402
- /** Upload data for a job in CSV format
403
233
  *
404
- * @param input CSV as a string, or array of records or readable stream
405
234
  */
406
- uploadData(input: string | Record[] | Readable): Promise<void>;
407
- getAllResults(): Promise<IngestJobV2Results<S>>;
235
+ load<Opr extends BulkOperation>(type: string, operation: Opr, input?: Record[] | Readable | string): Batch<S, Opr>;
236
+ load<Opr extends BulkOperation>(type: string, operation: Opr, optionsOrInput?: BulkOptions | Record[] | Readable | string, input?: Record[] | Readable | string): Batch<S, Opr>;
408
237
  /**
409
- * Close opened job
238
+ * Execute bulk query and get record stream
410
239
  */
411
- close(): Promise<void>;
240
+ query(soql: string): Promise<Parsable<Record>>;
412
241
  /**
413
- * Set the status to abort
242
+ * Create a new job instance
414
243
  */
415
- abort(): Promise<void>;
244
+ createJob<Opr extends BulkOperation>(type: string, operation: Opr, options?: BulkOptions): Job<S, Opr>;
416
245
  /**
417
- * Poll for the state of the processing for the job.
418
- *
419
- * This method will only throw after a timeout. To capture a
420
- * job failure while polling you must set a listener for the
421
- * `failed` event before calling it:
422
- *
423
- * job.on('failed', (err) => console.error(err))
424
- * await job.poll()
246
+ * Get a job instance specified by given job ID
425
247
  *
426
- * @param interval Polling interval in milliseconds
427
- * @param timeout Polling timeout in milliseconds
428
- * @returns {Promise<void>} A promise that resolves when the job finishes successfully
429
- */
430
- poll(interval?: number, timeout?: number): Promise<void>;
431
- /**
432
- * Check the latest batch status in server
433
- */
434
- check(): Promise<JobInfoV2>;
435
- getSuccessfulResults(): Promise<IngestJobV2SuccessfulResults<S>>;
436
- getFailedResults(): Promise<IngestJobV2FailedResults<S>>;
437
- getUnprocessedRecords(): Promise<IngestJobV2UnprocessedRecords<S>>;
438
- /**
439
- * Deletes an ingest job.
248
+ * @param {String} jobId - Job ID
249
+ * @returns {Bulk~Job}
440
250
  */
441
- delete(): Promise<void>;
442
- private createIngestRequest;
251
+ job<Opr extends BulkOperation>(jobId: string): Job<S, Opr>;
443
252
  }
444
253
  export default Bulk;