ruby-hdfs-cdh4 0.0.1 → 0.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/ext/hdfs/hdfs.c +200 -91
- metadata +4 -3
data/ext/hdfs/hdfs.c
CHANGED
@@ -14,6 +14,7 @@ static VALUE c_file_system;
|
|
14
14
|
static VALUE c_file_info_file;
|
15
15
|
static VALUE c_file_info_directory;
|
16
16
|
static VALUE e_dfs_exception;
|
17
|
+
static VALUE e_connect_error;
|
17
18
|
static VALUE e_file_error;
|
18
19
|
static VALUE e_could_not_open;
|
19
20
|
static VALUE e_does_not_exist;
|
@@ -21,7 +22,7 @@ static VALUE e_does_not_exist;
|
|
21
22
|
static const int32_t HDFS_DEFAULT_BLOCK_SIZE = 134217728;
|
22
23
|
static const int16_t HDFS_DEFAULT_REPLICATION = 3;
|
23
24
|
static const short HDFS_DEFAULT_MODE = 0644;
|
24
|
-
static const char* HDFS_DEFAULT_HOST = "
|
25
|
+
static const char* HDFS_DEFAULT_HOST = "0.0.0.0";
|
25
26
|
static const int HDFS_DEFAULT_RECURSIVE_DELETE = 0;
|
26
27
|
static const int HDFS_DEFAULT_PATH_STRING_LENGTH = 1024;
|
27
28
|
static const int HDFS_DEFAULT_PORT = 8020;
|
@@ -154,16 +155,32 @@ VALUE HDFS_File_System_alloc(VALUE klass) {
|
|
154
155
|
|
155
156
|
/**
|
156
157
|
* call-seq:
|
157
|
-
* hdfs.new -> hdfs
|
158
|
+
* hdfs.new(host='localhost', port=8020) -> hdfs
|
158
159
|
*
|
159
|
-
* Creates a new HDFS client connection
|
160
|
+
* Creates a new HDFS client connection, returning a new
|
161
|
+
* Hadoop::DFS::FileSystem object if successful. If this fails, raises a
|
162
|
+
* ConnectError.
|
160
163
|
*/
|
161
|
-
VALUE HDFS_File_System_initialize(
|
164
|
+
VALUE HDFS_File_System_initialize(int argc, VALUE* argv, VALUE self) {
|
165
|
+
VALUE host, port;
|
166
|
+
rb_scan_args(argc, argv, "02", &host, &port);
|
167
|
+
// Sets default values for host and port if not supplied by user.
|
168
|
+
char* hdfs_host = (char*) HDFS_DEFAULT_HOST;
|
169
|
+
int hdfs_port = HDFS_DEFAULT_PORT;
|
170
|
+
if (!NIL_P(host)) {
|
171
|
+
hdfs_host = RSTRING_PTR(host);
|
172
|
+
}
|
173
|
+
if (!NIL_P(port)) {
|
174
|
+
hdfs_port = NUM2INT(port);
|
175
|
+
}
|
162
176
|
FSData* data = NULL;
|
163
177
|
Data_Get_Struct(self, FSData, data);
|
164
|
-
data->fs = hdfsConnect(
|
165
|
-
|
166
|
-
|
178
|
+
data->fs = hdfsConnect(hdfs_host, hdfs_port);
|
179
|
+
if (data->fs == NULL) {
|
180
|
+
rb_raise(e_connect_error, "Failed to connect to HDFS at: %s:%d",
|
181
|
+
hdfs_host, hdfs_port);
|
182
|
+
return Qnil;
|
183
|
+
}
|
167
184
|
return self;
|
168
185
|
}
|
169
186
|
|
@@ -188,14 +205,23 @@ VALUE HDFS_File_System_disconnect(VALUE self) {
|
|
188
205
|
* hdfs.delete(path, recursive=false) -> success
|
189
206
|
*
|
190
207
|
* Deletes the file at the supplied path, recursively if specified. Returns
|
191
|
-
* True if successful,
|
208
|
+
* True if successful, raises a DFSException if this fails.
|
192
209
|
*/
|
193
|
-
VALUE HDFS_File_System_delete(
|
210
|
+
VALUE HDFS_File_System_delete(int argc, VALUE* argv, VALUE self) {
|
211
|
+
VALUE path, recursive;
|
212
|
+
rb_scan_args(argc, argv, "11", &path, &recursive);
|
213
|
+
int hdfs_recursive = HDFS_DEFAULT_RECURSIVE_DELETE;
|
214
|
+
if (!NIL_P(recursive)) {
|
215
|
+
hdfs_recursive = (recursive == Qtrue) ? 1 : 0;
|
216
|
+
}
|
194
217
|
FSData* data = NULL;
|
195
218
|
Data_Get_Struct(self, FSData, data);
|
196
|
-
|
197
|
-
|
198
|
-
|
219
|
+
if (hdfsDelete(data->fs, RSTRING_PTR(path), hdfs_recursive) < 0) {
|
220
|
+
rb_raise(e_dfs_exception, "Could not delete file at path: %s",
|
221
|
+
RSTRING_PTR(path));
|
222
|
+
return Qnil;
|
223
|
+
}
|
224
|
+
return Qtrue;
|
199
225
|
}
|
200
226
|
|
201
227
|
/**
|
@@ -203,13 +229,17 @@ VALUE HDFS_File_System_delete(VALUE self, VALUE path, VALUE recursive) {
|
|
203
229
|
* hdfs.rename(from_path, to_path) -> success
|
204
230
|
*
|
205
231
|
* Renames the file at the supplied path to the file at the destination path.
|
206
|
-
* Returns True if successful,
|
232
|
+
* Returns True if successful, raises a DFSException if this fails.
|
207
233
|
*/
|
208
234
|
VALUE HDFS_File_System_rename(VALUE self, VALUE from_path, VALUE to_path) {
|
209
235
|
FSData* data = NULL;
|
210
236
|
Data_Get_Struct(self, FSData, data);
|
211
|
-
|
212
|
-
|
237
|
+
if (hdfsRename(data->fs, RSTRING_PTR(from_path), RSTRING_PTR(to_path)) < 0) {
|
238
|
+
rb_raise(e_dfs_exception, "Could not rename path: %s to path: %s",
|
239
|
+
RSTRING_PTR(from_path), RSTRING_PTR(to_path));
|
240
|
+
return Qnil;
|
241
|
+
}
|
242
|
+
return Qtrue;
|
213
243
|
}
|
214
244
|
|
215
245
|
/**
|
@@ -230,14 +260,18 @@ VALUE HDFS_File_System_exist(VALUE self, VALUE path) {
|
|
230
260
|
* call-seq:
|
231
261
|
* hdfs.create_directory(path) -> success
|
232
262
|
*
|
233
|
-
*
|
234
|
-
* if
|
263
|
+
* Creates a directory at the supplied path. If successful, returns True;
|
264
|
+
* raises a DFSException if this fails.
|
235
265
|
*/
|
236
266
|
VALUE HDFS_File_System_create_directory(VALUE self, VALUE path) {
|
237
267
|
FSData* data = NULL;
|
238
268
|
Data_Get_Struct(self, FSData, data);
|
239
|
-
|
240
|
-
|
269
|
+
if (hdfsCreateDirectory(data->fs, RSTRING_PTR(path)) < 0) {
|
270
|
+
rb_raise(e_dfs_exception, "Could not create directory at path: %s",
|
271
|
+
RSTRING_PTR(path));
|
272
|
+
return Qnil;
|
273
|
+
}
|
274
|
+
return Qtrue;
|
241
275
|
}
|
242
276
|
|
243
277
|
/**
|
@@ -292,17 +326,30 @@ VALUE HDFS_File_System_stat(VALUE self, VALUE path) {
|
|
292
326
|
|
293
327
|
/**
|
294
328
|
* call-seq:
|
295
|
-
* hdfs.set_replication(path, replication) -> success
|
329
|
+
* hdfs.set_replication(path, replication=3) -> success
|
296
330
|
*
|
297
331
|
* Sets the replication of the following path to the supplied number of nodes
|
298
|
-
* it will be replicated against. Returns True if successful;
|
332
|
+
* it will be replicated against. Returns True if successful; raises a
|
333
|
+
* DFSException if this fails.
|
299
334
|
*/
|
300
|
-
VALUE HDFS_File_System_set_replication(
|
335
|
+
VALUE HDFS_File_System_set_replication(int argc, VALUE* argv, VALUE self) {
|
336
|
+
VALUE path, replication;
|
337
|
+
rb_scan_args(argc, argv, "11", &path, &replication);
|
338
|
+
int hdfs_replication;
|
339
|
+
// If no replication value is supplied, uses default replication value.
|
340
|
+
if (NIL_P(replication)) {
|
341
|
+
hdfs_replication = HDFS_DEFAULT_REPLICATION;
|
342
|
+
} else {
|
343
|
+
hdfs_replication = NUM2INT(replication);
|
344
|
+
}
|
301
345
|
FSData* data = NULL;
|
302
346
|
Data_Get_Struct(self, FSData, data);
|
303
|
-
|
304
|
-
|
305
|
-
|
347
|
+
if (hdfsSetReplication(data->fs, RSTRING_PTR(path), hdfs_replication) < 0) {
|
348
|
+
rb_raise(e_dfs_exception, "Failed to set replication to: %d at path: %s",
|
349
|
+
hdfs_replication, RSTRING_PTR(path));
|
350
|
+
return Qnil;
|
351
|
+
}
|
352
|
+
return Qtrue;
|
306
353
|
}
|
307
354
|
|
308
355
|
/**
|
@@ -310,77 +357,101 @@ VALUE HDFS_File_System_set_replication(VALUE self, VALUE path, VALUE replication
|
|
310
357
|
* hdfs.cd(path) -> success
|
311
358
|
*
|
312
359
|
* Changes the current working directory to the supplied path. Returns True if
|
313
|
-
* successful;
|
360
|
+
* successful; raises a DFSException if this fails.
|
314
361
|
*/
|
315
362
|
VALUE HDFS_File_System_cd(VALUE self, VALUE path) {
|
316
363
|
FSData* data = NULL;
|
317
364
|
Data_Get_Struct(self, FSData, data);
|
318
|
-
|
319
|
-
|
365
|
+
if (hdfsSetWorkingDirectory(data->fs, RSTRING_PTR(path)) < 0) {
|
366
|
+
rb_raise(e_dfs_exception,
|
367
|
+
"Failed to change current working directory to path: %s",
|
368
|
+
RSTRING_PTR(path));
|
369
|
+
return Qnil;
|
370
|
+
}
|
371
|
+
return Qtrue;
|
320
372
|
}
|
321
373
|
|
322
374
|
/**
|
323
375
|
* call-seq:
|
324
376
|
* hdfs.cwd -> success
|
325
377
|
*
|
326
|
-
*
|
327
|
-
* successful; False if not.
|
378
|
+
* Displays the current working directory; raises a DFSException if this fails.
|
328
379
|
*/
|
329
380
|
VALUE HDFS_File_System_cwd(VALUE self) {
|
330
381
|
FSData* data = NULL;
|
331
382
|
Data_Get_Struct(self, FSData, data);
|
332
383
|
char* cur_dir = (char *) malloc(
|
333
384
|
sizeof(char) * HDFS_DEFAULT_PATH_STRING_LENGTH);
|
334
|
-
|
335
|
-
HDFS_DEFAULT_PATH_STRING_LENGTH)
|
336
|
-
|
337
|
-
|
338
|
-
|
385
|
+
if (hdfsGetWorkingDirectory(data->fs, cur_dir,
|
386
|
+
HDFS_DEFAULT_PATH_STRING_LENGTH) < 0) {
|
387
|
+
free(cur_dir);
|
388
|
+
rb_raise(e_dfs_exception, "Failed to get current working directory");
|
389
|
+
return Qnil;
|
390
|
+
}
|
391
|
+
return rb_str_new2(cur_dir);
|
339
392
|
}
|
340
393
|
|
341
394
|
/**
|
342
395
|
* call-seq:
|
343
396
|
* hdfs.chgrp(path, group) -> success
|
344
397
|
*
|
345
|
-
* Changes the group of the supplied path. Returns True if successful;
|
346
|
-
* if
|
398
|
+
* Changes the group of the supplied path. Returns True if successful; raises
|
399
|
+
* a DFSException if this fails.
|
347
400
|
*/
|
348
401
|
VALUE HDFS_File_System_chgrp(VALUE self, VALUE path, VALUE group) {
|
349
402
|
FSData* data = NULL;
|
350
403
|
Data_Get_Struct(self, FSData, data);
|
351
|
-
|
352
|
-
|
353
|
-
|
404
|
+
if (hdfsChgrp(data->fs, RSTRING_PTR(path), NULL, RSTRING_PTR(group)) < 0) {
|
405
|
+
rb_raise(e_dfs_exception, "Failed to chgrp path: %s to group: %s",
|
406
|
+
RSTRING_PTR(path), RSTRING_PTR(group));
|
407
|
+
return Qnil;
|
408
|
+
}
|
409
|
+
return Qtrue;
|
354
410
|
}
|
355
411
|
|
356
412
|
/**
|
357
413
|
* call-seq:
|
358
|
-
* hdfs.chgrp(path, mode) -> retval
|
414
|
+
* hdfs.chgrp(path, mode=644) -> retval
|
359
415
|
*
|
360
|
-
* Changes the mode of the supplied path. Returns True if successful;
|
361
|
-
* if
|
416
|
+
* Changes the mode of the supplied path. Returns True if successful; raises
|
417
|
+
* a DFSException if this fails.
|
362
418
|
*/
|
363
|
-
VALUE HDFS_File_System_chmod(
|
419
|
+
VALUE HDFS_File_System_chmod(int argc, VALUE* argv, VALUE self) {
|
420
|
+
VALUE path, mode;
|
421
|
+
rb_scan_args(argc, argv, "11", &path, &mode);
|
364
422
|
FSData* data = NULL;
|
365
423
|
Data_Get_Struct(self, FSData, data);
|
366
|
-
|
367
|
-
|
368
|
-
|
424
|
+
short hdfs_mode;
|
425
|
+
// Sets default mode if none is supplied.
|
426
|
+
if (NIL_P(mode)) {
|
427
|
+
hdfs_mode = HDFS_DEFAULT_MODE;
|
428
|
+
} else {
|
429
|
+
hdfs_mode = octal_decimal(NUM2INT(mode));
|
430
|
+
}
|
431
|
+
if (hdfsChmod(data->fs, RSTRING_PTR(path), hdfs_mode) < 0){
|
432
|
+
rb_raise(e_dfs_exception, "Failed to chmod user path: %s to mode: %d",
|
433
|
+
RSTRING_PTR(path), hdfs_mode);
|
434
|
+
return Qnil;
|
435
|
+
}
|
436
|
+
return Qtrue;
|
369
437
|
}
|
370
438
|
|
371
439
|
/**
|
372
440
|
* call-seq:
|
373
441
|
* hdfs.chown(path, owner) -> retval
|
374
442
|
*
|
375
|
-
* Changes the owner of the supplied path. Returns True if successful;
|
376
|
-
* if
|
443
|
+
* Changes the owner of the supplied path. Returns True if successful; raises
|
444
|
+
* a DFSException if this fails.
|
377
445
|
*/
|
378
446
|
VALUE HDFS_File_System_chown(VALUE self, VALUE path, VALUE owner) {
|
379
447
|
FSData* data = NULL;
|
380
448
|
Data_Get_Struct(self, FSData, data);
|
381
|
-
|
382
|
-
|
383
|
-
|
449
|
+
if (hdfsChown(data->fs, RSTRING_PTR(path), RSTRING_PTR(owner), NULL) < 0) {
|
450
|
+
rb_raise(e_dfs_exception, "Failed to chown user path: %s to user: %s",
|
451
|
+
RSTRING_PTR(path), RSTRING_PTR(owner));
|
452
|
+
return Qnil;
|
453
|
+
}
|
454
|
+
return Qtrue;
|
384
455
|
}
|
385
456
|
|
386
457
|
/**
|
@@ -389,13 +460,16 @@ VALUE HDFS_File_System_chown(VALUE self, VALUE path, VALUE owner) {
|
|
389
460
|
*
|
390
461
|
* Copies the file at HDFS location from_path to HDFS location to_path. If
|
391
462
|
* to_fs is specified, copies to this HDFS over the current HDFS. If
|
392
|
-
* successful, returns
|
463
|
+
* successful, returns True; otherwise, raises a DFSException.
|
393
464
|
*/
|
394
|
-
VALUE HDFS_File_System_copy(
|
465
|
+
VALUE HDFS_File_System_copy(int argc, VALUE* argv, VALUE self) {
|
466
|
+
VALUE from_path, to_path, to_fs;
|
467
|
+
rb_scan_args(argc, argv, "21", &from_path, &to_path, &to_fs);
|
395
468
|
FSData* data = NULL;
|
396
469
|
Data_Get_Struct(self, FSData, data);
|
397
470
|
hdfsFS destFS = data->fs;
|
398
|
-
|
471
|
+
// If no to_fs is supplied, copies to the current file system.
|
472
|
+
if (!NIL_P(to_fs)) {
|
399
473
|
if (CLASS_OF(to_fs) == c_file_system) {
|
400
474
|
FSData* destFSData = NULL;
|
401
475
|
Data_Get_Struct(to_fs, FSData, destFSData);
|
@@ -405,9 +479,13 @@ VALUE HDFS_File_System_copy(VALUE self, VALUE from_path, VALUE to_path, VALUE to
|
|
405
479
|
return Qnil;
|
406
480
|
}
|
407
481
|
}
|
408
|
-
|
409
|
-
RSTRING_PTR(
|
410
|
-
|
482
|
+
if (hdfsCopy(data->fs, RSTRING_PTR(from_path), destFS,
|
483
|
+
RSTRING_PTR(to_path)) < 0) {
|
484
|
+
rb_raise(e_dfs_exception, "Failed to copy path: %s to path: %s",
|
485
|
+
RSTRING_PTR(from_path), RSTRING_PTR(to_path));
|
486
|
+
return Qnil;
|
487
|
+
}
|
488
|
+
return Qtrue;
|
411
489
|
}
|
412
490
|
|
413
491
|
/**
|
@@ -418,11 +496,14 @@ VALUE HDFS_File_System_copy(VALUE self, VALUE from_path, VALUE to_path, VALUE to
|
|
418
496
|
* to_fs is specified, moves to this HDFS over the current HDFS. If
|
419
497
|
* successful, returns true; otherwise, returns false.
|
420
498
|
*/
|
421
|
-
VALUE HDFS_File_System_move(
|
499
|
+
VALUE HDFS_File_System_move(int argc, VALUE* argv, VALUE self) {
|
500
|
+
VALUE from_path, to_path, to_fs;
|
501
|
+
rb_scan_args(argc, argv, "21", &from_path, &to_path, &to_fs);
|
422
502
|
FSData* data = NULL;
|
423
503
|
Data_Get_Struct(self, FSData, data);
|
424
504
|
hdfsFS destFS = data->fs;
|
425
|
-
|
505
|
+
// If no to_fs is supplied, moves to the current file system.
|
506
|
+
if (!NIL_P(to_fs)) {
|
426
507
|
if (CLASS_OF(to_fs) == c_file_system) {
|
427
508
|
FSData* destFSData = NULL;
|
428
509
|
Data_Get_Struct(to_fs, FSData, destFSData);
|
@@ -432,9 +513,12 @@ VALUE HDFS_File_System_move(VALUE self, VALUE from_path, VALUE to_path, VALUE to
|
|
432
513
|
return Qnil;
|
433
514
|
}
|
434
515
|
}
|
435
|
-
|
436
|
-
RSTRING_PTR(
|
437
|
-
|
516
|
+
if (hdfsMove(data->fs, RSTRING_PTR(from_path), destFS,
|
517
|
+
RSTRING_PTR(to_path)) < 0) {
|
518
|
+
rb_raise(e_dfs_exception, "Error while retrieving capacity");
|
519
|
+
return Qnil;
|
520
|
+
}
|
521
|
+
return Qtrue;
|
438
522
|
}
|
439
523
|
|
440
524
|
/**
|
@@ -517,38 +601,63 @@ VALUE HDFS_File_System_used(VALUE self) {
|
|
517
601
|
* Changes the last modified and/or last access time in seconds since the Unix
|
518
602
|
* epoch for the supplied file. Returns true if successful; false if not.
|
519
603
|
*/
|
520
|
-
VALUE HDFS_File_System_utime(
|
604
|
+
VALUE HDFS_File_System_utime(int argc, VALUE* argv, VALUE self) {
|
605
|
+
VALUE path, modified_time, access_time;
|
606
|
+
tTime hdfs_modified_time, hdfs_access_time;
|
607
|
+
rb_scan_args(argc, argv, "12", &path, &modified_time, &access_time);
|
608
|
+
// Sets default values for last modified and/or last access time.
|
609
|
+
if (NIL_P(modified_time)) {
|
610
|
+
hdfs_modified_time = -1;
|
611
|
+
} else {
|
612
|
+
hdfs_modified_time = NUM2LONG(modified_time);
|
613
|
+
}
|
614
|
+
if (NIL_P(access_time)) {
|
615
|
+
hdfs_access_time = -1;
|
616
|
+
} else {
|
617
|
+
hdfs_access_time = NUM2LONG(access_time);
|
618
|
+
}
|
521
619
|
FSData* data = NULL;
|
522
620
|
Data_Get_Struct(self, FSData, data);
|
523
|
-
|
524
|
-
|
525
|
-
|
526
|
-
|
621
|
+
if (hdfsUtime(data->fs, RSTRING_PTR(path), hdfs_modified_time,
|
622
|
+
hdfs_access_time) < 0) {
|
623
|
+
rb_raise(e_dfs_exception,
|
624
|
+
"Error while setting modified time: %lu, access time: %lu at path: %s",
|
625
|
+
(long) hdfs_modified_time, (long) hdfs_access_time, RSTRING_PTR(path));
|
626
|
+
return Qnil;
|
627
|
+
}
|
628
|
+
return Qtrue;
|
527
629
|
}
|
528
630
|
|
529
631
|
/**
|
530
632
|
* call-seq:
|
531
|
-
* hdfs.open(path, mode, options
|
633
|
+
* hdfs.open(path, mode='r', options={}) -> file
|
532
634
|
*
|
533
635
|
* Opens a file. If the file cannot be opened, raises a CouldNotOpenError;
|
534
636
|
* otherwise, returns a Hadoop::DFS::File object corresponding to the file.
|
535
637
|
*/
|
536
|
-
VALUE HDFS_File_System_open(
|
537
|
-
|
538
|
-
|
539
|
-
|
540
|
-
|
541
|
-
if (
|
542
|
-
|
543
|
-
|
544
|
-
|
545
|
-
|
546
|
-
|
547
|
-
|
638
|
+
VALUE HDFS_File_System_open(int argc, VALUE* argv, VALUE self) {
|
639
|
+
VALUE path, mode, options;
|
640
|
+
int flags = O_RDONLY;
|
641
|
+
rb_scan_args(argc, argv, "12", &path, &mode, &options);
|
642
|
+
// Sets file open mode if one is provided by the user.
|
643
|
+
if (!NIL_P(mode)) {
|
644
|
+
if (strcmp("r", StringValuePtr(mode)) == 0) {
|
645
|
+
flags = O_RDONLY;
|
646
|
+
} else if (strcmp("w", StringValuePtr(mode)) == 0) {
|
647
|
+
flags = O_WRONLY;
|
648
|
+
} else {
|
649
|
+
rb_raise(rb_eArgError, "Mode must be 'r' or 'w'");
|
650
|
+
return Qnil;
|
651
|
+
}
|
652
|
+
}
|
653
|
+
if (NIL_P(options)) {
|
654
|
+
options = rb_hash_new();
|
548
655
|
}
|
549
656
|
VALUE r_buffer_size = rb_hash_aref(options, rb_eval_string(":buffer_size"));
|
550
657
|
VALUE r_replication = rb_hash_aref(options, rb_eval_string(":replication"));
|
551
658
|
VALUE r_block_size = rb_hash_aref(options, rb_eval_string(":block_size"));
|
659
|
+
FSData* data = NULL;
|
660
|
+
Data_Get_Struct(self, FSData, data);
|
552
661
|
hdfsFile file = hdfsOpenFile(data->fs, RSTRING_PTR(path), flags,
|
553
662
|
RTEST(r_buffer_size) ? NUM2INT(r_buffer_size) : 0,
|
554
663
|
RTEST(r_replication) ? NUM2INT(r_replication) : 0,
|
@@ -557,7 +666,6 @@ VALUE HDFS_File_System_open(VALUE self, VALUE path, VALUE mode, VALUE options) {
|
|
557
666
|
rb_raise(e_could_not_open, "Could not open file %s", RSTRING_PTR(path));
|
558
667
|
return Qnil;
|
559
668
|
}
|
560
|
-
|
561
669
|
FileData* file_data = ALLOC_N(FileData, 1);
|
562
670
|
file_data->fs = data->fs;
|
563
671
|
file_data->file = file;
|
@@ -873,30 +981,30 @@ void Init_hdfs() {
|
|
873
981
|
|
874
982
|
c_file_system = rb_define_class_under(m_dfs, "FileSystem", rb_cObject);
|
875
983
|
rb_define_alloc_func(c_file_system, HDFS_File_System_alloc);
|
876
|
-
rb_define_method(c_file_system, "initialize", HDFS_File_System_initialize,
|
984
|
+
rb_define_method(c_file_system, "initialize", HDFS_File_System_initialize, -1);
|
877
985
|
rb_define_method(c_file_system, "disconnect", HDFS_File_System_disconnect, 0);
|
878
|
-
rb_define_method(c_file_system, "open", HDFS_File_System_open,
|
879
|
-
rb_define_method(c_file_system, "delete", HDFS_File_System_delete,
|
986
|
+
rb_define_method(c_file_system, "open", HDFS_File_System_open, -1);
|
987
|
+
rb_define_method(c_file_system, "delete", HDFS_File_System_delete, -1);
|
880
988
|
rb_define_method(c_file_system, "rename", HDFS_File_System_rename, 2);
|
881
989
|
rb_define_method(c_file_system, "exist?", HDFS_File_System_exist, 1);
|
882
990
|
rb_define_method(c_file_system, "create_directory", HDFS_File_System_create_directory, 1);
|
883
991
|
rb_define_method(c_file_system, "list_directory", HDFS_File_System_list_directory, 1);
|
884
992
|
rb_define_method(c_file_system, "stat", HDFS_File_System_stat, 1);
|
885
|
-
rb_define_method(c_file_system, "set_replication", HDFS_File_System_set_replication,
|
993
|
+
rb_define_method(c_file_system, "set_replication", HDFS_File_System_set_replication, -1);
|
886
994
|
rb_define_method(c_file_system, "cd", HDFS_File_System_cd, 1);
|
887
995
|
rb_define_method(c_file_system, "cwd", HDFS_File_System_cwd, 0);
|
888
996
|
rb_define_method(c_file_system, "chgrp", HDFS_File_System_chgrp, 2);
|
889
|
-
rb_define_method(c_file_system, "chmod", HDFS_File_System_chmod,
|
997
|
+
rb_define_method(c_file_system, "chmod", HDFS_File_System_chmod, -1);
|
890
998
|
rb_define_method(c_file_system, "chown", HDFS_File_System_chown, 2);
|
891
|
-
rb_define_method(c_file_system, "copy", HDFS_File_System_copy,
|
999
|
+
rb_define_method(c_file_system, "copy", HDFS_File_System_copy, -1);
|
892
1000
|
rb_define_method(c_file_system, "capacity", HDFS_File_System_capacity, 0);
|
893
1001
|
rb_define_method(c_file_system, "default_block_size",
|
894
1002
|
HDFS_File_System_default_block_size, 0);
|
895
1003
|
rb_define_method(c_file_system, "default_block_size_at_path",
|
896
1004
|
HDFS_File_System_default_block_size_at_path, 1);
|
897
|
-
rb_define_method(c_file_system, "move", HDFS_File_System_move,
|
1005
|
+
rb_define_method(c_file_system, "move", HDFS_File_System_move, -1);
|
898
1006
|
rb_define_method(c_file_system, "used", HDFS_File_System_used, 0);
|
899
|
-
rb_define_method(c_file_system, "utime", HDFS_File_System_utime,
|
1007
|
+
rb_define_method(c_file_system, "utime", HDFS_File_System_utime, -1);
|
900
1008
|
|
901
1009
|
c_file = rb_define_class_under(m_dfs, "File", rb_cObject);
|
902
1010
|
rb_define_method(c_file, "read", HDFS_File_read, 1);
|
@@ -929,7 +1037,8 @@ void Init_hdfs() {
|
|
929
1037
|
rb_define_method(c_file_info_directory, "is_directory?", HDFS_File_Info_Directory_is_directory, 0);
|
930
1038
|
|
931
1039
|
e_dfs_exception = rb_define_class_under(m_dfs, "DFSException", rb_eStandardError);
|
932
|
-
|
1040
|
+
e_connect_error = rb_define_class_under(m_dfs, "ConnectError", e_dfs_exception);
|
1041
|
+
e_file_error = rb_define_class_under(m_dfs, "FileError", e_dfs_exception);
|
933
1042
|
e_could_not_open = rb_define_class_under(m_dfs, "CouldNotOpenFileError", e_file_error);
|
934
1043
|
e_does_not_exist = rb_define_class_under(m_dfs, "DoesNotExistError", e_file_error);
|
935
1044
|
}
|
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: ruby-hdfs-cdh4
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.0.
|
4
|
+
version: 0.0.2
|
5
5
|
prerelease:
|
6
6
|
platform: ruby
|
7
7
|
authors:
|
@@ -11,7 +11,7 @@ authors:
|
|
11
11
|
autorequire:
|
12
12
|
bindir: bin
|
13
13
|
cert_chain: []
|
14
|
-
date: 2013-05-
|
14
|
+
date: 2013-05-15 00:00:00.000000000 Z
|
15
15
|
dependencies: []
|
16
16
|
description: ruby hadoop libhdfs client with support for cdh4
|
17
17
|
email:
|
@@ -48,8 +48,9 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
48
48
|
version: 1.9.2
|
49
49
|
requirements: []
|
50
50
|
rubyforge_project:
|
51
|
-
rubygems_version: 1.8.
|
51
|
+
rubygems_version: 1.8.24
|
52
52
|
signing_key:
|
53
53
|
specification_version: 3
|
54
54
|
summary: ruby hadoop libhdfs client with support for cdh4
|
55
55
|
test_files: []
|
56
|
+
has_rdoc:
|