select-csv 1.1.9 → 1.1.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/README.md +34 -34
  2. package/package.json +1 -1
package/README.md CHANGED
@@ -65,7 +65,7 @@ parse = parseText(
65
65
  const result = parse.get(); //Return all rows
66
66
  /*
67
67
  {
68
- time:1 ms,
68
+ time:'1 ms',
69
69
  header:["Index","User Id","First Name","Last Name","Sex"],
70
70
  rows:[
71
71
  ["1","5f10e9D33fC5f2b","Sara","Mcguire","Female"],
@@ -96,9 +96,9 @@ result = parse.chunk(c)
96
96
  result = parse.chunk(2) //Return row 0 and 1
97
97
  /*
98
98
  {
99
- "Time": 0,
100
- "Header": [ "Index", "User Id", "First Name", "Last Name", "Sex" ],
101
- "Rows": [
99
+ time: '0 ms',
100
+ header: [ "Index", "User Id", "First Name", "Last Name", "Sex" ],
101
+ rows: [
102
102
  [ "1", "5f10e9D33fC5f2b", "Sara", "Mcguire", "Female" ],
103
103
  [ "2", "751cD1cbF77e005", "Alisha", "Hebert", "Male" ]
104
104
  ],
@@ -109,9 +109,9 @@ result = parse.chunk(2) //Return row 0 and 1
109
109
  result = parse.chunk(3) //Return row 2,3 and 4 (Get rows from last offset saved)
110
110
  /*
111
111
  {
112
- "Time": 0,
113
- "Header": [ "Index", "User Id", "First Name", "Last Name", "Sex" ],
114
- "Rows": [
112
+ time: '0 ms',
113
+ header: [ "Index", "User Id", "First Name", "Last Name", "Sex" ],
114
+ rows: [
115
115
  [ "3", "DcEFDB2D2e62bF9", "Gwendolyn", "Sheppard", "Male" ],
116
116
  [ "4", "C88661E02EEDA9e", "Kristine", "Mccann", "Female" ],
117
117
  [ "5", "fafF1aBDebaB2a6", "Bobby", "Pittman", "Female" ]
@@ -123,9 +123,9 @@ result = parse.chunk(3) //Return row 2,3 and 4 (Get rows from last offset saved)
123
123
  result = parse.chunk(1) //Return row 5 (Get rows from last offset saved)
124
124
  /*
125
125
  {
126
- "Time": 0,
127
- "Header": [ "Index", "User Id", "First Name", "Last Name", "Sex" ],
128
- "Rows": [
126
+ time: '0 ms',
127
+ header: [ "Index", "User Id", "First Name", "Last Name", "Sex" ],
128
+ rows: [
129
129
  [ "6", "BdDb6C8Af309202", "Calvin", "Ramsey", "Female" ]
130
130
  ],
131
131
  "row_count:": 1
@@ -150,9 +150,9 @@ result = parse.rowOffset(from,to)
150
150
  result = parse.rowOffset(6) //Returns all rows from the sixth row to the last row
151
151
  /*
152
152
  {
153
- "Time": 0,
154
- "Header": [ "Index", "User Id", "First Name", "Last Name", "Sex" ],
155
- "Rows": [
153
+ time: '0 ms',
154
+ header: [ "Index", "User Id", "First Name", "Last Name", "Sex" ],
155
+ rows: [
156
156
  [ "7", "FCdfFf08196f633", "Collin", "Allison", "Male" ],
157
157
  [ "8", "356279dAa0F7CbD", "Nicholas", "Branch", "Male" ],
158
158
  [ "9", "F563CcbFBfEcf5a", "Emma", "Robinson", "Female" ],
@@ -165,9 +165,9 @@ result = parse.rowOffset(6) //Returns all rows from the sixth row to the last ro
165
165
  result = parse.rowOffset(5,8) //Returns all rows from 5th to 8th row
166
166
  /*
167
167
  {
168
- "Time": 1,
169
- "Header": [ "Index", "User Id", "First Name", "Last Name", "Sex" ],
170
- "Rows": [
168
+ time: '1 ms',
169
+ header: [ "Index", "User Id", "First Name", "Last Name", "Sex" ],
170
+ rows: [
171
171
  [ "6", "BdDb6C8Af309202", "Calvin", "Ramsey", "Female" ],
172
172
  [ "7", "FCdfFf08196f633", "Collin", "Allison", "Male" ],
173
173
  [ "8", "356279dAa0F7CbD", "Nicholas", "Branch", "Male" ]
@@ -195,9 +195,9 @@ result = parse.setRowOffset(5)
195
195
  result = parse.chunk(1) // Get rows from last offset saved
196
196
  /*
197
197
  {
198
- "Time": 0,
199
- "Header": [ "Index", "User Id", "First Name", "Last Name", "Sex" ],
200
- "Rows": [
198
+ time: '0 ms',
199
+ header: [ "Index", "User Id", "First Name", "Last Name", "Sex" ],
200
+ rows: [
201
201
  [ "6", "BdDb6C8Af309202", "Calvin", "Ramsey", "Female" ]
202
202
  ],
203
203
  "row_count:": 1
@@ -212,9 +212,9 @@ false
212
212
  result = parse.chunk(1) // Get rows from last offset saved
213
213
  /*
214
214
  {
215
- "Time": 0,
216
- "Header": [ "Index", "User Id", "First Name", "Last Name", "Sex" ],
217
- "Rows": [
215
+ time: '0 ms',
216
+ header: [ "Index", "User Id", "First Name", "Last Name", "Sex" ],
217
+ rows: [
218
218
  [ "7", "FCdfFf08196f633", "Collin", "Allison", "Male" ]
219
219
  ],
220
220
  "row_count:": 1
@@ -266,9 +266,9 @@ parse = parseText(
266
266
  result = parse.rowOffset(2)
267
267
  /*
268
268
  {
269
- "Time": 0,
270
- "Header": false,
271
- "Rows": [
269
+ time: '0 ms',
270
+ header: false,
271
+ rows: [
272
272
  [ "2", "751cD1cbF77e005", "Alisha", "Hebert", "Male" ]
273
273
  ],
274
274
  "row_count:": 1
@@ -282,9 +282,9 @@ option = {
282
282
  // delimiter: (String: get rows containing columns, false: get lines without columns)
283
283
  /*
284
284
  {
285
- "Time": 0,
286
- "Header": false,
287
- "Rows": [
285
+ time: '0 ms',
286
+ header: false,
287
+ rows: [
288
288
  [ "2,751cD1cbF77e005,Alisha,Hebert,Male" ] // No columns, just string (all line)
289
289
  ],
290
290
  "row_count:": 1
@@ -318,7 +318,7 @@ const option = {
318
318
  const result = parse.chunk(3)
319
319
  /*
320
320
  {
321
- time: 0 ms,
321
+ time: '0 ms',
322
322
  header: [ 'Index', 'User Id', 'First Name', 'Last Name', 'Sex' ],
323
323
  rows: [
324
324
  {
@@ -380,7 +380,7 @@ var result;
380
380
  result = parse.chunk(100000)
381
381
  /*
382
382
  {
383
- time: 222 ms,
383
+ time: '222 ms',
384
384
  header: false,
385
385
  rows: [
386
386
  [ '198801', '1', '103', '100', '000000190', '0', '35843', '34353' ],
@@ -410,7 +410,7 @@ result = parse.chunk(100000)
410
410
  result = parse.chunk(3) // Return row 100001,100002 and 100003 (Get rows from last offset saved)
411
411
  /*
412
412
  {
413
- time: 1 ms,
413
+ time: '1 ms',
414
414
  header: false,
415
415
  rows: [
416
416
  [ '198801', '1', '326', '500', '841330000', '90', '81', '246' ],
@@ -426,7 +426,7 @@ const to = from + 5;
426
426
  result = parse.rowOffset(from,to)
427
427
  /*
428
428
  {
429
- time: 3743 ms,
429
+ time: '3743 ms',
430
430
  header: false,
431
431
  rows: [
432
432
  [
@@ -449,7 +449,7 @@ const to = from + 4;
449
449
  result = parse.rowOffset(from,to)
450
450
  /*
451
451
  {
452
- time: 44126 ms,
452
+ time: '44126 ms',
453
453
  header: false,
454
454
  rows: [
455
455
  [ '201412', '1', '125', '400', '283525000', '0', '160000', '6492' ],
@@ -464,7 +464,7 @@ result = parse.rowOffset(from,to)
464
464
  result = parse.chunk(3) // Get rows from last offset saved ( row to,to+1 and to+2 )
465
465
  /*
466
466
  {
467
- time: 29 ms,
467
+ time: '29 ms',
468
468
  header: false,
469
469
  rows: [
470
470
  [ '201412', '1', '125', '400', '400932000', '0', '18', '526' ],
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "select-csv",
3
- "version": "1.1.9",
3
+ "version": "1.1.11",
4
4
  "description": "Fastest, simplest and most powerful package of all existing libraries in npmjs. It converts .csv files into an array and even into lines. It contains two important functions parseCsv that handles a csv file, you only need a link to the file. And parseText deals with text, and they both have the same roles and and methods",
5
5
  "keywords": [
6
6
  "csv",