ruby-hdfs-cdh4 0.0.8 → 0.0.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/ext/hdfs/hdfs.c +7 -13
- metadata +1 -1
data/ext/hdfs/hdfs.c
CHANGED
@@ -19,7 +19,7 @@ static VALUE e_file_error;
|
|
19
19
|
static VALUE e_could_not_open;
|
20
20
|
static VALUE e_does_not_exist;
|
21
21
|
|
22
|
-
static const
|
22
|
+
static const tSize HDFS_DEFAULT_BUFFER_SIZE = 131072;
|
23
23
|
static const char* HDFS_DEFAULT_HOST = "0.0.0.0";
|
24
24
|
static const short HDFS_DEFAULT_MODE = 0644;
|
25
25
|
static const int HDFS_DEFAULT_PATH_STRING_LENGTH = 1024;
|
@@ -728,17 +728,15 @@ VALUE HDFS_File_System_open(int argc, VALUE* argv, VALUE self) {
|
|
728
728
|
return Qnil;
|
729
729
|
}
|
730
730
|
}
|
731
|
-
|
732
|
-
options = rb_hash_new();
|
733
|
-
}
|
731
|
+
options = NIL_P(options) ? rb_hash_new() : options;
|
734
732
|
VALUE r_buffer_size = rb_hash_aref(options, rb_eval_string(":buffer_size"));
|
735
733
|
VALUE r_replication = rb_hash_aref(options, rb_eval_string(":replication"));
|
736
734
|
VALUE r_block_size = rb_hash_aref(options, rb_eval_string(":block_size"));
|
737
735
|
FSData* data = NULL;
|
738
736
|
Data_Get_Struct(self, FSData, data);
|
739
|
-
hdfsFile file = hdfsOpenFile(data->fs, RSTRING_PTR(path), flags,
|
740
|
-
RTEST(r_buffer_size) ? NUM2INT(r_buffer_size) : 0,
|
741
|
-
RTEST(r_replication) ? NUM2INT(r_replication) : 0,
|
737
|
+
hdfsFile file = hdfsOpenFile(data->fs, RSTRING_PTR(path), flags,
|
738
|
+
RTEST(r_buffer_size) ? NUM2INT(r_buffer_size) : 0,
|
739
|
+
RTEST(r_replication) ? NUM2INT(r_replication) : 0,
|
742
740
|
RTEST(r_block_size) ? NUM2INT(r_block_size) : 0);
|
743
741
|
if (file == NULL) {
|
744
742
|
rb_raise(e_could_not_open, "Could not open file %s", RSTRING_PTR(path));
|
@@ -766,10 +764,7 @@ VALUE HDFS_File_System_open(int argc, VALUE* argv, VALUE self) {
|
|
766
764
|
VALUE HDFS_File_read(int argc, VALUE* argv, VALUE self) {
|
767
765
|
VALUE length;
|
768
766
|
rb_scan_args(argc, argv, "01", &length);
|
769
|
-
|
770
|
-
if (!NIL_P(length)) {
|
771
|
-
hdfsLength = NUM2UINT(length);
|
772
|
-
}
|
767
|
+
tSize hdfsLength = NIL_P(length) ? HDFS_DEFAULT_BUFFER_SIZE : NUM2INT(length);
|
773
768
|
// Checks whether we're reading more data than HDFS client can support.
|
774
769
|
if (hdfsLength > HDFS_DEFAULT_BUFFER_SIZE) {
|
775
770
|
rb_raise(e_file_error, "Can only read a max of %u bytes from HDFS",
|
@@ -779,8 +774,7 @@ VALUE HDFS_File_read(int argc, VALUE* argv, VALUE self) {
|
|
779
774
|
FileData* data = NULL;
|
780
775
|
Data_Get_Struct(self, FileData, data);
|
781
776
|
ensure_file_open(data);
|
782
|
-
char* buffer = ALLOC_N(char,
|
783
|
-
MEMZERO(buffer, char, length);
|
777
|
+
char* buffer = ALLOC_N(char, hdfsLength);
|
784
778
|
tSize bytes_read = hdfsRead(data->fs, data->file, buffer, hdfsLength);
|
785
779
|
if (bytes_read == -1) {
|
786
780
|
rb_raise(e_file_error, "Failed to read data");
|