sequel_pg 1.5.1 → 1.6.0

Sign up to get free protection for your applications and to get access to all the features.
data/CHANGELOG CHANGED
@@ -1,3 +1,7 @@
1
+ === 1.6.0 (2012-09-04)
2
+
3
+ * Replace PQsetRowProcessor streaming with PQsetSingleRowMode streaming introduced in PostgreSQL 9.2beta3 (jeremyevans)
4
+
1
5
  === 1.5.1 (2012-08-02)
2
6
 
3
7
  * Sprinkle some RB_GC_GUARD to work around segfaults in the PostgreSQL array parser (jeremyevans)
@@ -14,7 +14,7 @@ if enable_config("static-build")
14
14
  end
15
15
 
16
16
  if (have_library('pq') || have_library('libpq') || have_library('ms/libpq')) && have_header('libpq-fe.h')
17
- have_func 'PQsetRowProcessor'
17
+ have_func 'PQsetSingleRowMode'
18
18
  create_makefile("sequel_pg")
19
19
  else
20
20
  puts 'Could not find PostgreSQL build environment (libraries & headers): Makefile not created'
@@ -103,6 +103,12 @@ static ID spg_id_columns;
103
103
  static ID spg_id_encoding;
104
104
  static ID spg_id_values;
105
105
 
106
+ #if HAVE_PQSETSINGLEROWMODE
107
+ static ID spg_id_get_result;
108
+ static ID spg_id_clear;
109
+ static ID spg_id_check;
110
+ #endif
111
+
106
112
  #if SPG_ENCODING
107
113
  static int enc_get_index(VALUE val)
108
114
  {
@@ -403,7 +409,7 @@ static VALUE spg_timestamp(const char *s, VALUE self) {
403
409
  }
404
410
 
405
411
  static VALUE spg_fetch_rows_set_cols(VALUE self, VALUE ignore) {
406
- return self;
412
+ return Qnil;
407
413
  }
408
414
 
409
415
  static VALUE spg__col_value(VALUE self, PGresult *res, long i, long j, VALUE* colconvert
@@ -813,194 +819,115 @@ static VALUE spg_yield_hash_rows(VALUE self, VALUE rres, VALUE ignore) {
813
819
 
814
820
  static VALUE spg_supports_streaming_p(VALUE self) {
815
821
  return
816
- #if HAVE_PQSETROWPROCESSOR
822
+ #if HAVE_PQSETSINGLEROWMODE
817
823
  Qtrue;
818
824
  #else
819
825
  Qfalse;
820
826
  #endif
821
827
  }
822
828
 
823
- #if HAVE_PQSETROWPROCESSOR
824
- static VALUE spg__rp_value(VALUE self, PGresult* res, const PGdataValue* dvs, int j, VALUE* colconvert
825
- #ifdef SPG_ENCODING
826
- , int enc_index
827
- #endif
828
- ) {
829
- const char *v;
830
- PGdataValue dv = dvs[j];
831
- VALUE rv;
832
- size_t l;
833
- int len = dv.len;
834
-
835
- if(len < 0) {
836
- rv = Qnil;
837
- } else {
838
- v = dv.value;
839
-
840
- switch(PQftype(res, j)) {
841
- case 16: /* boolean */
842
- rv = *v == 't' ? Qtrue : Qfalse;
843
- break;
844
- case 17: /* bytea */
845
- v = PQunescapeBytea((unsigned char*)v, &l);
846
- rv = rb_funcall(spg_Blob, spg_id_new, 1, rb_str_new(v, l));
847
- PQfreemem((char *)v);
848
- break;
849
- case 20: /* integer */
850
- case 21:
851
- case 22:
852
- case 23:
853
- case 26:
854
- rv = rb_str2inum(rb_str_new(v, len), 10);
855
- break;
856
- case 700: /* float */
857
- case 701:
858
- if (strncmp("NaN", v, 3) == 0) {
859
- rv = spg_nan;
860
- } else if (strncmp("Infinity", v, 8) == 0) {
861
- rv = spg_pos_inf;
862
- } else if (strncmp("-Infinity", v, 9) == 0) {
863
- rv = spg_neg_inf;
864
- } else {
865
- rv = rb_float_new(rb_str_to_dbl(rb_str_new(v, len), Qfalse));
866
- }
867
- break;
868
- case 790: /* numeric */
869
- case 1700:
870
- rv = rb_funcall(spg_BigDecimal, spg_id_new, 1, rb_str_new(v, len));
871
- break;
872
- case 1082: /* date */
873
- rv = rb_str_new(v, len);
874
- rv = spg_date(StringValuePtr(rv));
875
- break;
876
- case 1083: /* time */
877
- case 1266:
878
- rv = rb_str_new(v, len);
879
- rv = spg_time(StringValuePtr(rv));
880
- break;
881
- case 1114: /* timestamp */
882
- case 1184:
883
- rv = rb_str_new(v, len);
884
- rv = spg_timestamp(StringValuePtr(rv), self);
885
- break;
886
- case 18: /* char */
887
- case 25: /* text */
888
- case 1043: /* varchar*/
889
- rv = rb_tainted_str_new(v, len);
890
- #ifdef SPG_ENCODING
891
- rb_enc_associate_index(rv, enc_index);
892
- #endif
893
- break;
894
- default:
895
- rv = rb_tainted_str_new(v, len);
896
- #ifdef SPG_ENCODING
897
- rb_enc_associate_index(rv, enc_index);
898
- #endif
899
- if (colconvert[j] != Qnil) {
900
- rv = rb_funcall(colconvert[j], spg_id_call, 1, rv);
901
- }
902
- }
829
+ #if HAVE_PQSETSINGLEROWMODE
830
+ static VALUE spg_set_single_row_mode(VALUE self) {
831
+ PGconn *conn;
832
+ Data_Get_Struct(self, PGconn, conn);
833
+ if (PQsetSingleRowMode(conn) != 1) {
834
+ rb_raise(spg_PGError, "cannot set single row mode");
903
835
  }
904
- return rv;
836
+ return Qnil;
905
837
  }
906
838
 
907
- static int spg_row_processor(PGresult *res, const PGdataValue *columns, const char **errmsgp, void *param) {
839
+ static VALUE spg__yield_each_row(VALUE self) {
840
+ PGconn *conn;
841
+ PGresult *res;
842
+ VALUE rres;
843
+ VALUE rconn;
844
+ VALUE colsyms[SPG_MAX_FIELDS];
845
+ VALUE colconvert[SPG_MAX_FIELDS];
908
846
  long nfields;
909
- struct spg_row_proc_info *info;
910
- info = (struct spg_row_proc_info *)param;
911
- VALUE *colsyms = info->colsyms;
912
- VALUE *colconvert = info->colconvert;
913
- VALUE self = info->dataset;
847
+ long j;
848
+ VALUE h;
849
+ VALUE opts;
850
+ VALUE pg_type;
851
+ VALUE pg_value = Qnil;
852
+ char type = SPG_YIELD_NORMAL;
914
853
 
915
- switch (PQresultStatus(res))
916
- {
917
- case PGRES_TUPLES_OK:
918
- case PGRES_COPY_OUT:
919
- case PGRES_COPY_IN:
920
- #ifdef HAVE_CONST_PGRES_COPY_BOTH
921
- case PGRES_COPY_BOTH:
854
+ rconn = rb_ary_entry(self, 1);
855
+ self = rb_ary_entry(self, 0);
856
+ Data_Get_Struct(rconn, PGconn, conn);
857
+
858
+ rres = rb_funcall(rconn, spg_id_get_result, 0);
859
+ rb_funcall(rres, spg_id_check, 0);
860
+ Data_Get_Struct(rres, PGresult, res);
861
+
862
+ #ifdef SPG_ENCODING
863
+ int enc_index;
864
+ enc_index = enc_get_index(rres);
922
865
  #endif
923
- case PGRES_EMPTY_QUERY:
924
- case PGRES_COMMAND_OK:
925
- break;
926
- case PGRES_BAD_RESPONSE:
927
- case PGRES_FATAL_ERROR:
928
- case PGRES_NONFATAL_ERROR:
929
- rb_raise(spg_PGError, "error while streaming results");
930
- default:
931
- rb_raise(spg_PGError, "unexpected result status while streaming results");
866
+
867
+ /* Only handle regular and model types. All other types require compiling all
868
+ * of the results at once, which is not a use case for streaming. The streaming
869
+ * code does not call this function for the other types. */
870
+ opts = rb_funcall(self, spg_id_opts, 0);
871
+ if (rb_type(opts) == T_HASH) {
872
+ pg_type = rb_hash_aref(opts, spg_sym__sequel_pg_type);
873
+ pg_value = rb_hash_aref(opts, spg_sym__sequel_pg_value);
874
+ if (SYMBOL_P(pg_type) && pg_type == spg_sym_model && rb_type(pg_value) == T_CLASS) {
875
+ type = SPG_YIELD_MODEL;
876
+ }
932
877
  }
933
878
 
934
879
  nfields = PQnfields(res);
935
- if(columns == NULL) {
936
- spg_set_column_info(self, res, colsyms, colconvert);
937
- rb_ivar_set(self, spg_id_columns, rb_ary_new4(nfields, colsyms));
938
- } else {
939
- long j;
940
- VALUE h, m;
941
- h = rb_hash_new();
880
+ if (nfields > SPG_MAX_FIELDS) {
881
+ rb_funcall(rres, spg_id_clear, 0);
882
+ rb_raise(rb_eRangeError, "more than %d columns in query", SPG_MAX_FIELDS);
883
+ }
884
+
885
+ spg_set_column_info(self, res, colsyms, colconvert);
886
+
887
+ rb_ivar_set(self, spg_id_columns, rb_ary_new4(nfields, colsyms));
942
888
 
889
+ while (PQntuples(res) != 0) {
890
+ h = rb_hash_new();
943
891
  for(j=0; j<nfields; j++) {
944
- rb_hash_aset(h, colsyms[j], spg__rp_value(self, res, columns, j, colconvert
945
- #ifdef SPG_ENCODING
946
- , info->enc_index
947
- #endif
948
- ));
892
+ rb_hash_aset(h, colsyms[j], spg__col_value(self, res, 0, j, colconvert ENC_INDEX));
949
893
  }
950
894
 
951
- /* optimize_model_load used, return model instance */
952
- if ((m = info->model)) {
953
- m = rb_obj_alloc(m);
954
- rb_ivar_set(m, spg_id_values, h);
955
- h = m;
895
+ rb_funcall(rres, spg_id_clear, 0);
896
+
897
+ if(type == SPG_YIELD_MODEL) {
898
+ /* Abuse local variable */
899
+ pg_type = rb_obj_alloc(pg_value);
900
+ rb_ivar_set(pg_type, spg_id_values, h);
901
+ rb_yield(pg_type);
902
+ } else {
903
+ rb_yield(h);
956
904
  }
957
905
 
958
- rb_funcall(info->block, spg_id_call, 1, h);
906
+ rres = rb_funcall(rconn, spg_id_get_result, 0);
907
+ rb_funcall(rres, spg_id_check, 0);
908
+ Data_Get_Struct(rres, PGresult, res);
959
909
  }
960
- return 1;
961
- }
910
+ rb_funcall(rres, spg_id_clear, 0);
962
911
 
963
- static VALUE spg_unset_row_processor(VALUE rconn) {
964
- PGconn *conn;
965
- Data_Get_Struct(rconn, PGconn, conn);
966
- if ((PQskipResult(conn)) != NULL) {
967
- /* Results remaining when row processor finished,
968
- * either because an exception was raised or the iterator
969
- * exited early, so skip all remaining rows. */
970
- while(PQgetResult(conn) != NULL) {
971
- /* Use a separate while loop as PQgetResult is faster than
972
- * PQskipResult. */
973
- }
974
- }
975
- PQsetRowProcessor(conn, NULL, NULL);
976
- return Qnil;
912
+ return self;
977
913
  }
978
914
 
979
- static VALUE spg_with_row_processor(VALUE self, VALUE rconn, VALUE dataset, VALUE block) {
980
- struct spg_row_proc_info info;
915
+ static VALUE spg__flush_results(VALUE rconn) {
981
916
  PGconn *conn;
917
+ PGresult *res;
982
918
  Data_Get_Struct(rconn, PGconn, conn);
983
- bzero(&info, sizeof(info));
984
-
985
- info.dataset = dataset;
986
- info.block = block;
987
- info.model = 0;
988
- #if SPG_ENCODING
989
- info.enc_index = enc_get_index(rconn);
990
- #endif
991
919
 
992
- /* Abuse local variable, detect if optimize_model_load used */
993
- block = rb_funcall(dataset, spg_id_opts, 0);
994
- if (rb_type(block) == T_HASH && rb_hash_aref(block, spg_sym__sequel_pg_type) == spg_sym_model) {
995
- block = rb_hash_aref(block, spg_sym__sequel_pg_value);
996
- if (rb_type(block) == T_CLASS) {
997
- info.model = block;
998
- }
920
+ while ((res = PQgetResult(conn)) != NULL) {
921
+ PQclear(res);
999
922
  }
1000
923
 
1001
- PQsetRowProcessor(conn, spg_row_processor, (void*)&info);
1002
- rb_ensure(rb_yield, Qnil, spg_unset_row_processor, rconn);
1003
- return Qnil;
924
+ return rconn;
925
+ }
926
+
927
+ static VALUE spg_yield_each_row(VALUE self, VALUE rconn) {
928
+ VALUE v;
929
+ v = rb_ary_new3(2, self, rconn);
930
+ return rb_ensure(spg__yield_each_row, v, spg__flush_results, rconn);
1004
931
  }
1005
932
  #endif
1006
933
 
@@ -1081,9 +1008,14 @@ void Init_sequel_pg(void) {
1081
1008
 
1082
1009
  rb_define_singleton_method(spg_Postgres, "supports_streaming?", spg_supports_streaming_p, 0);
1083
1010
 
1084
- #if HAVE_PQSETROWPROCESSOR
1085
- c = rb_funcall(spg_Postgres, cg, 1, rb_str_new2("Database"));
1086
- rb_define_private_method(c, "with_row_processor", spg_with_row_processor, 3);
1011
+ #if HAVE_PQSETSINGLEROWMODE
1012
+ spg_id_get_result = rb_intern("get_result");
1013
+ spg_id_clear = rb_intern("clear");
1014
+ spg_id_check = rb_intern("check");
1015
+
1016
+ rb_define_private_method(c, "yield_each_row", spg_yield_each_row, 1);
1017
+ c = rb_funcall(spg_Postgres, cg, 1, rb_str_new2("Adapter"));
1018
+ rb_define_private_method(c, "set_single_row_mode", spg_set_single_row_mode, 0);
1087
1019
  #endif
1088
1020
 
1089
1021
  rb_define_singleton_method(spg_Postgres, "parse_pg_array", parse_pg_array, 2);
@@ -0,0 +1,130 @@
1
+ unless Sequel::Postgres.respond_to?(:supports_streaming?)
2
+ raise LoadError, "either sequel_pg not loaded, or an old version of sequel_pg loaded"
3
+ end
4
+ unless Sequel::Postgres.supports_streaming?
5
+ raise LoadError, "streaming is not supported by the version of libpq in use"
6
+ end
7
+
8
+ # Database methods necessary to support streaming. You should load this extension
9
+ # into your database object:
10
+ #
11
+ # DB.extension(:pg_streaming)
12
+ #
13
+ # Then you can call #stream on your datasets to use the streaming support:
14
+ #
15
+ # DB[:table].stream.each{|row| ...}
16
+ #
17
+ # Or change a set so that all dataset calls use streaming:
18
+ #
19
+ # DB.stream_all_queries = true
20
+ module Sequel::Postgres::Streaming
21
+ attr_accessor :stream_all_queries
22
+
23
+ # Also extend the database's datasets to support streaming.
24
+ # This extension requires modifying connections, so disconnect
25
+ # so that new connections will get the methods.
26
+ def self.extended(db)
27
+ db.extend_datasets(DatasetMethods)
28
+ db.stream_all_queries = false
29
+ db.disconnect
30
+ end
31
+
32
+ # Make sure all new connections have the appropriate methods added.
33
+ def connect(server)
34
+ conn = super
35
+ conn.extend(AdapterMethods)
36
+ conn
37
+ end
38
+
39
+ private
40
+
41
+ # If streaming is requested, and a prepared statement is not
42
+ # used, tell the connection to use single row mode for the query.
43
+ def _execute(conn, sql, opts={}, &block)
44
+ if opts[:stream] && !sql.is_a?(Symbol)
45
+ conn.single_row_mode = true
46
+ end
47
+ super
48
+ end
49
+
50
+ # If streaming is requested, send the prepared statement instead
51
+ # of executing it and blocking.
52
+ def _execute_prepared_statement(conn, ps_name, args, opts)
53
+ if opts[:stream]
54
+ conn.send_prepared_statement(ps_name, args)
55
+ else
56
+ super
57
+ end
58
+ end
59
+
60
+ module AdapterMethods
61
+ # Whether the next query on this connection should use
62
+ # single_row_mode.
63
+ attr_accessor :single_row_mode
64
+
65
+ # Send the prepared statement on this connection using
66
+ # single row mode.
67
+ def send_prepared_statement(ps_name, args)
68
+ send_query_prepared(ps_name, args)
69
+ set_single_row_mode
70
+ block
71
+ self
72
+ end
73
+
74
+ private
75
+
76
+ # If using single row mode, send the query instead of executing it.
77
+ def execute_query(sql, args)
78
+ if @single_row_mode
79
+ @single_row_mode = false
80
+ @db.log_yield(sql, args){args ? send_query(sql, args) : send_query(sql)}
81
+ set_single_row_mode
82
+ block
83
+ self
84
+ else
85
+ super
86
+ end
87
+ end
88
+ end
89
+
90
+ # Dataset methods used to implement streaming.
91
+ module DatasetMethods
92
+ # If streaming has been requested and the current dataset
93
+ # can be streamed, request the database use streaming when
94
+ # executing this query, and use yield_each_row to process
95
+ # the separate PGresult for each row in the connection.
96
+ def fetch_rows(sql)
97
+ if stream_results?
98
+ execute(sql, :stream=>true) do |conn|
99
+ yield_each_row(conn){|h| yield h}
100
+ end
101
+ else
102
+ super
103
+ end
104
+ end
105
+
106
+ # Return a clone of the dataset that will use streaming to load
107
+ # rows.
108
+ def stream
109
+ clone(:stream=>true)
110
+ end
111
+
112
+ private
113
+
114
+ # Only stream results if streaming has been specifically requested
115
+ # and the query is streamable.
116
+ def stream_results?
117
+ (@opts[:stream] || db.stream_all_queries) && streamable?
118
+ end
119
+
120
+ # Queries using cursors are not streamable, and queries that use
121
+ # the map/select_map/to_hash/to_hash_groups optimizations are not
122
+ # streamable, but other queries are streamable.
123
+ def streamable?
124
+ spgt = (o = @opts)[:_sequel_pg_type]
125
+ (spgt.nil? || spgt == :model) && !o[:cursor]
126
+ end
127
+ end
128
+ end
129
+
130
+ Sequel::Database.register_extension(:pg_streaming, Sequel::Postgres::Streaming)
metadata CHANGED
@@ -1,7 +1,7 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: sequel_pg
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.5.1
4
+ version: 1.6.0
5
5
  prerelease:
6
6
  platform: ruby
7
7
  authors:
@@ -9,7 +9,7 @@ authors:
9
9
  autorequire:
10
10
  bindir: bin
11
11
  cert_chain: []
12
- date: 2012-08-02 00:00:00.000000000 Z
12
+ date: 2012-09-04 00:00:00.000000000 Z
13
13
  dependencies:
14
14
  - !ruby/object:Gem::Dependency
15
15
  name: pg
@@ -34,7 +34,7 @@ dependencies:
34
34
  requirements:
35
35
  - - ! '>='
36
36
  - !ruby/object:Gem::Version
37
- version: 3.36.0
37
+ version: 3.39.0
38
38
  type: :runtime
39
39
  prerelease: false
40
40
  version_requirements: !ruby/object:Gem::Requirement
@@ -42,7 +42,7 @@ dependencies:
42
42
  requirements:
43
43
  - - ! '>='
44
44
  - !ruby/object:Gem::Version
45
- version: 3.36.0
45
+ version: 3.39.0
46
46
  description: ! 'sequel_pg overwrites the inner loop of the Sequel postgres
47
47
 
48
48
  adapter row fetching code with a C version. The C version
@@ -68,7 +68,7 @@ files:
68
68
  - ext/sequel_pg/extconf.rb
69
69
  - ext/sequel_pg/sequel_pg.c
70
70
  - lib/sequel_pg/sequel_pg.rb
71
- - lib/sequel_pg/streaming.rb
71
+ - lib/sequel/extensions/pg_streaming.rb
72
72
  homepage: http://github.com/jeremyevans/sequel_pg
73
73
  licenses: []
74
74
  post_install_message:
@@ -1,82 +0,0 @@
1
- unless Sequel::Postgres.respond_to?(:supports_streaming?)
2
- raise LoadError, "either sequel_pg not loaded, or an old version of sequel_pg loaded"
3
- end
4
- unless Sequel::Postgres.supports_streaming?
5
- raise LoadError, "streaming is not supported by the version of libpq in use"
6
- end
7
-
8
- # Database methods necessary to support streaming. You should extend your
9
- # Database object with this:
10
- #
11
- # DB.extend Sequel::Postgres::Streaming
12
- #
13
- # Then you can call #stream on your datasets to use the streaming support:
14
- #
15
- # DB[:table].stream.each{|row| ...}
16
- module Sequel::Postgres::Streaming
17
- # Also extend the database's datasets to support streaming
18
- def self.extended(db)
19
- db.extend_datasets(DatasetMethods)
20
- end
21
-
22
- private
23
-
24
- # If streaming is requested, set a row processor while executing
25
- # the query.
26
- def _execute(conn, sql, opts={})
27
- if stream = opts[:stream]
28
- with_row_processor(conn, *stream){super}
29
- else
30
- super
31
- end
32
- end
33
-
34
- # Dataset methods used to implement streaming.
35
- module DatasetMethods
36
- # If streaming has been requested and the current dataset
37
- # can be streamed, request the database use streaming when
38
- # executing this query.
39
- def fetch_rows(sql, &block)
40
- if stream_results?
41
- execute(sql, :stream=>[self, block])
42
- else
43
- super
44
- end
45
- end
46
-
47
- # Return a clone of the dataset that will use streaming to load
48
- # rows.
49
- def stream
50
- clone(:stream=>true)
51
- end
52
-
53
- private
54
-
55
- # Only stream results if streaming has been specifically requested
56
- # and the query is streamable.
57
- def stream_results?
58
- @opts[:stream] && streamable?
59
- end
60
-
61
- # Queries using cursors are not streamable, and queries that use
62
- # the map/select_map/to_hash/to_hash_groups optimizations are not
63
- # streamable, but other queries are streamable.
64
- def streamable?
65
- spgt = (o = @opts)[:_sequel_pg_type]
66
- (spgt.nil? || spgt == :model) && !o[:cursor]
67
- end
68
- end
69
-
70
- # Extend a database's datasets with this module to enable streaming
71
- # on all streamable queries:
72
- #
73
- # DB.extend_datasets(Sequel::Postgres::Streaming::AllQueries)
74
- module AllQueries
75
- private
76
-
77
- # Always stream results if the query is streamable.
78
- def stream_results?
79
- streamable?
80
- end
81
- end
82
- end