sequel_pg 1.5.1-x86-mswin32-60 → 1.6.1-x86-mswin32-60

Sign up to get free protection for your applications and to get access to all the features.
data/CHANGELOG CHANGED
@@ -1,3 +1,11 @@
1
+ === 1.6.1 (2012-10-25)
2
+
3
+ * Make PostgreSQL array parser handle string encodings correctly on ruby 1.9 (jeremyevans)
4
+
5
+ === 1.6.0 (2012-09-04)
6
+
7
+ * Replace PQsetRowProcessor streaming with PQsetSingleRowMode streaming introduced in PostgreSQL 9.2beta3 (jeremyevans)
8
+
1
9
  === 1.5.1 (2012-08-02)
2
10
 
3
11
  * Sprinkle some RB_GC_GUARD to work around segfaults in the PostgreSQL array parser (jeremyevans)
data/README.rdoc CHANGED
@@ -65,7 +65,7 @@ enable the model optimization via:
65
65
 
66
66
  == Streaming
67
67
 
68
- If you are using PostgreSQL 9.2 or higher on the client, then sequel_pg
68
+ If you are using PostgreSQL 9.2beta3 or higher on the client, then sequel_pg
69
69
  should enable streaming support. This allows you to stream returned
70
70
  rows one at a time, instead of collecting the entire result set in
71
71
  memory (which is how PostgreSQL works by default). You can check
@@ -76,8 +76,7 @@ if streaming is supported by:
76
76
  If streaming is supported, you can load the streaming support into the
77
77
  database:
78
78
 
79
- require 'sequel_pg/streaming'
80
- DB.extend Sequel::Postgres::Streaming
79
+ DB.extension(:pg_streaming)
81
80
 
82
81
  Then you can call the Dataset#stream method to have the dataset use
83
82
  the streaming support:
@@ -87,7 +86,11 @@ the streaming support:
87
86
  If you want to enable streaming for all of a database's datasets, you
88
87
  can do the following:
89
88
 
90
- DB.extend_datasets Sequel::Postgres::Streaming::AllQueries
89
+ DB.stream_all_queries = true
90
+
91
+ Note that pg 0.14.1+ is required for streaming to work. This is not
92
+ required by the gem, as it is only a requirement for streaming, not
93
+ for general use.
91
94
 
92
95
  == Installing the gem
93
96
 
@@ -14,7 +14,7 @@ if enable_config("static-build")
14
14
  end
15
15
 
16
16
  if (have_library('pq') || have_library('libpq') || have_library('ms/libpq')) && have_header('libpq-fe.h')
17
- have_func 'PQsetRowProcessor'
17
+ have_func 'PQsetSingleRowMode'
18
18
  create_makefile("sequel_pg")
19
19
  else
20
20
  puts 'Could not find PostgreSQL build environment (libraries & headers): Makefile not created'
@@ -103,6 +103,12 @@ static ID spg_id_columns;
103
103
  static ID spg_id_encoding;
104
104
  static ID spg_id_values;
105
105
 
106
+ #if HAVE_PQSETSINGLEROWMODE
107
+ static ID spg_id_get_result;
108
+ static ID spg_id_clear;
109
+ static ID spg_id_check;
110
+ #endif
111
+
106
112
  #if SPG_ENCODING
107
113
  static int enc_get_index(VALUE val)
108
114
  {
@@ -114,7 +120,11 @@ static int enc_get_index(VALUE val)
114
120
  }
115
121
  #endif
116
122
 
117
- static VALUE read_array(int *index, char *c_pg_array_string, int array_string_length, char *word, VALUE converter)
123
+ static VALUE read_array(int *index, char *c_pg_array_string, int array_string_length, char *word, VALUE converter
124
+ #ifdef SPG_ENCODING
125
+ , int enc_index
126
+ #endif
127
+ )
118
128
  {
119
129
  int word_index = 0;
120
130
 
@@ -155,15 +165,20 @@ static VALUE read_array(int *index, char *c_pg_array_string, int array_string_le
155
165
  {
156
166
  rb_ary_push(array, Qnil);
157
167
  }
158
- else if (RTEST(converter))
168
+ else
159
169
  {
160
- VALUE rword = rb_str_new(word, word_index);
170
+ VALUE rword = rb_tainted_str_new(word, word_index);
161
171
  RB_GC_GUARD(rword);
162
- rb_ary_push(array, rb_funcall(converter, spg_id_call, 1, rword));
163
- }
164
- else
165
- {
166
- rb_ary_push(array, rb_str_new(word, word_index));
172
+
173
+ #ifdef SPG_ENCODING
174
+ rb_enc_associate_index(rword, enc_index);
175
+ #endif
176
+
177
+ if (RTEST(converter)) {
178
+ rword = rb_funcall(converter, spg_id_call, 1, rword);
179
+ }
180
+
181
+ rb_ary_push(array, rword);
167
182
  }
168
183
  }
169
184
  if(c == '}')
@@ -181,7 +196,11 @@ static VALUE read_array(int *index, char *c_pg_array_string, int array_string_le
181
196
  else if(c == '{')
182
197
  {
183
198
  (*index)++;
184
- rb_ary_push(array, read_array(index, c_pg_array_string, array_string_length, word, converter));
199
+ rb_ary_push(array, read_array(index, c_pg_array_string, array_string_length, word, converter
200
+ #ifdef SPG_ENCODING
201
+ , enc_index
202
+ #endif
203
+ ));
185
204
  escapeNext = 1;
186
205
  }
187
206
  else
@@ -224,7 +243,11 @@ static VALUE parse_pg_array(VALUE self, VALUE pg_array_string, VALUE converter)
224
243
  char *word = RSTRING_PTR(buf);
225
244
  int index = 1;
226
245
 
227
- return read_array(&index, c_pg_array_string, array_string_length, word, converter);
246
+ return read_array(&index, c_pg_array_string, array_string_length, word, converter
247
+ #ifdef SPG_ENCODING
248
+ , enc_get_index(pg_array_string)
249
+ #endif
250
+ );
228
251
  }
229
252
 
230
253
  static VALUE spg_time(const char *s) {
@@ -403,7 +426,7 @@ static VALUE spg_timestamp(const char *s, VALUE self) {
403
426
  }
404
427
 
405
428
  static VALUE spg_fetch_rows_set_cols(VALUE self, VALUE ignore) {
406
- return self;
429
+ return Qnil;
407
430
  }
408
431
 
409
432
  static VALUE spg__col_value(VALUE self, PGresult *res, long i, long j, VALUE* colconvert
@@ -813,194 +836,115 @@ static VALUE spg_yield_hash_rows(VALUE self, VALUE rres, VALUE ignore) {
813
836
 
814
837
  static VALUE spg_supports_streaming_p(VALUE self) {
815
838
  return
816
- #if HAVE_PQSETROWPROCESSOR
839
+ #if HAVE_PQSETSINGLEROWMODE
817
840
  Qtrue;
818
841
  #else
819
842
  Qfalse;
820
843
  #endif
821
844
  }
822
845
 
823
- #if HAVE_PQSETROWPROCESSOR
824
- static VALUE spg__rp_value(VALUE self, PGresult* res, const PGdataValue* dvs, int j, VALUE* colconvert
825
- #ifdef SPG_ENCODING
826
- , int enc_index
827
- #endif
828
- ) {
829
- const char *v;
830
- PGdataValue dv = dvs[j];
831
- VALUE rv;
832
- size_t l;
833
- int len = dv.len;
834
-
835
- if(len < 0) {
836
- rv = Qnil;
837
- } else {
838
- v = dv.value;
839
-
840
- switch(PQftype(res, j)) {
841
- case 16: /* boolean */
842
- rv = *v == 't' ? Qtrue : Qfalse;
843
- break;
844
- case 17: /* bytea */
845
- v = PQunescapeBytea((unsigned char*)v, &l);
846
- rv = rb_funcall(spg_Blob, spg_id_new, 1, rb_str_new(v, l));
847
- PQfreemem((char *)v);
848
- break;
849
- case 20: /* integer */
850
- case 21:
851
- case 22:
852
- case 23:
853
- case 26:
854
- rv = rb_str2inum(rb_str_new(v, len), 10);
855
- break;
856
- case 700: /* float */
857
- case 701:
858
- if (strncmp("NaN", v, 3) == 0) {
859
- rv = spg_nan;
860
- } else if (strncmp("Infinity", v, 8) == 0) {
861
- rv = spg_pos_inf;
862
- } else if (strncmp("-Infinity", v, 9) == 0) {
863
- rv = spg_neg_inf;
864
- } else {
865
- rv = rb_float_new(rb_str_to_dbl(rb_str_new(v, len), Qfalse));
866
- }
867
- break;
868
- case 790: /* numeric */
869
- case 1700:
870
- rv = rb_funcall(spg_BigDecimal, spg_id_new, 1, rb_str_new(v, len));
871
- break;
872
- case 1082: /* date */
873
- rv = rb_str_new(v, len);
874
- rv = spg_date(StringValuePtr(rv));
875
- break;
876
- case 1083: /* time */
877
- case 1266:
878
- rv = rb_str_new(v, len);
879
- rv = spg_time(StringValuePtr(rv));
880
- break;
881
- case 1114: /* timestamp */
882
- case 1184:
883
- rv = rb_str_new(v, len);
884
- rv = spg_timestamp(StringValuePtr(rv), self);
885
- break;
886
- case 18: /* char */
887
- case 25: /* text */
888
- case 1043: /* varchar*/
889
- rv = rb_tainted_str_new(v, len);
890
- #ifdef SPG_ENCODING
891
- rb_enc_associate_index(rv, enc_index);
892
- #endif
893
- break;
894
- default:
895
- rv = rb_tainted_str_new(v, len);
896
- #ifdef SPG_ENCODING
897
- rb_enc_associate_index(rv, enc_index);
898
- #endif
899
- if (colconvert[j] != Qnil) {
900
- rv = rb_funcall(colconvert[j], spg_id_call, 1, rv);
901
- }
902
- }
846
+ #if HAVE_PQSETSINGLEROWMODE
847
+ static VALUE spg_set_single_row_mode(VALUE self) {
848
+ PGconn *conn;
849
+ Data_Get_Struct(self, PGconn, conn);
850
+ if (PQsetSingleRowMode(conn) != 1) {
851
+ rb_raise(spg_PGError, "cannot set single row mode");
903
852
  }
904
- return rv;
853
+ return Qnil;
905
854
  }
906
855
 
907
- static int spg_row_processor(PGresult *res, const PGdataValue *columns, const char **errmsgp, void *param) {
856
+ static VALUE spg__yield_each_row(VALUE self) {
857
+ PGconn *conn;
858
+ PGresult *res;
859
+ VALUE rres;
860
+ VALUE rconn;
861
+ VALUE colsyms[SPG_MAX_FIELDS];
862
+ VALUE colconvert[SPG_MAX_FIELDS];
908
863
  long nfields;
909
- struct spg_row_proc_info *info;
910
- info = (struct spg_row_proc_info *)param;
911
- VALUE *colsyms = info->colsyms;
912
- VALUE *colconvert = info->colconvert;
913
- VALUE self = info->dataset;
864
+ long j;
865
+ VALUE h;
866
+ VALUE opts;
867
+ VALUE pg_type;
868
+ VALUE pg_value = Qnil;
869
+ char type = SPG_YIELD_NORMAL;
914
870
 
915
- switch (PQresultStatus(res))
916
- {
917
- case PGRES_TUPLES_OK:
918
- case PGRES_COPY_OUT:
919
- case PGRES_COPY_IN:
920
- #ifdef HAVE_CONST_PGRES_COPY_BOTH
921
- case PGRES_COPY_BOTH:
871
+ rconn = rb_ary_entry(self, 1);
872
+ self = rb_ary_entry(self, 0);
873
+ Data_Get_Struct(rconn, PGconn, conn);
874
+
875
+ rres = rb_funcall(rconn, spg_id_get_result, 0);
876
+ rb_funcall(rres, spg_id_check, 0);
877
+ Data_Get_Struct(rres, PGresult, res);
878
+
879
+ #ifdef SPG_ENCODING
880
+ int enc_index;
881
+ enc_index = enc_get_index(rres);
922
882
  #endif
923
- case PGRES_EMPTY_QUERY:
924
- case PGRES_COMMAND_OK:
925
- break;
926
- case PGRES_BAD_RESPONSE:
927
- case PGRES_FATAL_ERROR:
928
- case PGRES_NONFATAL_ERROR:
929
- rb_raise(spg_PGError, "error while streaming results");
930
- default:
931
- rb_raise(spg_PGError, "unexpected result status while streaming results");
883
+
884
+ /* Only handle regular and model types. All other types require compiling all
885
+ * of the results at once, which is not a use case for streaming. The streaming
886
+ * code does not call this function for the other types. */
887
+ opts = rb_funcall(self, spg_id_opts, 0);
888
+ if (rb_type(opts) == T_HASH) {
889
+ pg_type = rb_hash_aref(opts, spg_sym__sequel_pg_type);
890
+ pg_value = rb_hash_aref(opts, spg_sym__sequel_pg_value);
891
+ if (SYMBOL_P(pg_type) && pg_type == spg_sym_model && rb_type(pg_value) == T_CLASS) {
892
+ type = SPG_YIELD_MODEL;
893
+ }
932
894
  }
933
895
 
934
896
  nfields = PQnfields(res);
935
- if(columns == NULL) {
936
- spg_set_column_info(self, res, colsyms, colconvert);
937
- rb_ivar_set(self, spg_id_columns, rb_ary_new4(nfields, colsyms));
938
- } else {
939
- long j;
940
- VALUE h, m;
941
- h = rb_hash_new();
897
+ if (nfields > SPG_MAX_FIELDS) {
898
+ rb_funcall(rres, spg_id_clear, 0);
899
+ rb_raise(rb_eRangeError, "more than %d columns in query", SPG_MAX_FIELDS);
900
+ }
942
901
 
902
+ spg_set_column_info(self, res, colsyms, colconvert);
903
+
904
+ rb_ivar_set(self, spg_id_columns, rb_ary_new4(nfields, colsyms));
905
+
906
+ while (PQntuples(res) != 0) {
907
+ h = rb_hash_new();
943
908
  for(j=0; j<nfields; j++) {
944
- rb_hash_aset(h, colsyms[j], spg__rp_value(self, res, columns, j, colconvert
945
- #ifdef SPG_ENCODING
946
- , info->enc_index
947
- #endif
948
- ));
909
+ rb_hash_aset(h, colsyms[j], spg__col_value(self, res, 0, j, colconvert ENC_INDEX));
949
910
  }
950
911
 
951
- /* optimize_model_load used, return model instance */
952
- if ((m = info->model)) {
953
- m = rb_obj_alloc(m);
954
- rb_ivar_set(m, spg_id_values, h);
955
- h = m;
912
+ rb_funcall(rres, spg_id_clear, 0);
913
+
914
+ if(type == SPG_YIELD_MODEL) {
915
+ /* Abuse local variable */
916
+ pg_type = rb_obj_alloc(pg_value);
917
+ rb_ivar_set(pg_type, spg_id_values, h);
918
+ rb_yield(pg_type);
919
+ } else {
920
+ rb_yield(h);
956
921
  }
957
922
 
958
- rb_funcall(info->block, spg_id_call, 1, h);
923
+ rres = rb_funcall(rconn, spg_id_get_result, 0);
924
+ rb_funcall(rres, spg_id_check, 0);
925
+ Data_Get_Struct(rres, PGresult, res);
959
926
  }
960
- return 1;
961
- }
927
+ rb_funcall(rres, spg_id_clear, 0);
962
928
 
963
- static VALUE spg_unset_row_processor(VALUE rconn) {
964
- PGconn *conn;
965
- Data_Get_Struct(rconn, PGconn, conn);
966
- if ((PQskipResult(conn)) != NULL) {
967
- /* Results remaining when row processor finished,
968
- * either because an exception was raised or the iterator
969
- * exited early, so skip all remaining rows. */
970
- while(PQgetResult(conn) != NULL) {
971
- /* Use a separate while loop as PQgetResult is faster than
972
- * PQskipResult. */
973
- }
974
- }
975
- PQsetRowProcessor(conn, NULL, NULL);
976
- return Qnil;
929
+ return self;
977
930
  }
978
931
 
979
- static VALUE spg_with_row_processor(VALUE self, VALUE rconn, VALUE dataset, VALUE block) {
980
- struct spg_row_proc_info info;
932
+ static VALUE spg__flush_results(VALUE rconn) {
981
933
  PGconn *conn;
934
+ PGresult *res;
982
935
  Data_Get_Struct(rconn, PGconn, conn);
983
- bzero(&info, sizeof(info));
984
936
 
985
- info.dataset = dataset;
986
- info.block = block;
987
- info.model = 0;
988
- #if SPG_ENCODING
989
- info.enc_index = enc_get_index(rconn);
990
- #endif
991
-
992
- /* Abuse local variable, detect if optimize_model_load used */
993
- block = rb_funcall(dataset, spg_id_opts, 0);
994
- if (rb_type(block) == T_HASH && rb_hash_aref(block, spg_sym__sequel_pg_type) == spg_sym_model) {
995
- block = rb_hash_aref(block, spg_sym__sequel_pg_value);
996
- if (rb_type(block) == T_CLASS) {
997
- info.model = block;
998
- }
937
+ while ((res = PQgetResult(conn)) != NULL) {
938
+ PQclear(res);
999
939
  }
1000
940
 
1001
- PQsetRowProcessor(conn, spg_row_processor, (void*)&info);
1002
- rb_ensure(rb_yield, Qnil, spg_unset_row_processor, rconn);
1003
- return Qnil;
941
+ return rconn;
942
+ }
943
+
944
+ static VALUE spg_yield_each_row(VALUE self, VALUE rconn) {
945
+ VALUE v;
946
+ v = rb_ary_new3(2, self, rconn);
947
+ return rb_ensure(spg__yield_each_row, v, spg__flush_results, rconn);
1004
948
  }
1005
949
  #endif
1006
950
 
@@ -1081,9 +1025,14 @@ void Init_sequel_pg(void) {
1081
1025
 
1082
1026
  rb_define_singleton_method(spg_Postgres, "supports_streaming?", spg_supports_streaming_p, 0);
1083
1027
 
1084
- #if HAVE_PQSETROWPROCESSOR
1085
- c = rb_funcall(spg_Postgres, cg, 1, rb_str_new2("Database"));
1086
- rb_define_private_method(c, "with_row_processor", spg_with_row_processor, 3);
1028
+ #if HAVE_PQSETSINGLEROWMODE
1029
+ spg_id_get_result = rb_intern("get_result");
1030
+ spg_id_clear = rb_intern("clear");
1031
+ spg_id_check = rb_intern("check");
1032
+
1033
+ rb_define_private_method(c, "yield_each_row", spg_yield_each_row, 1);
1034
+ c = rb_funcall(spg_Postgres, cg, 1, rb_str_new2("Adapter"));
1035
+ rb_define_private_method(c, "set_single_row_mode", spg_set_single_row_mode, 0);
1087
1036
  #endif
1088
1037
 
1089
1038
  rb_define_singleton_method(spg_Postgres, "parse_pg_array", parse_pg_array, 2);
data/lib/1.8/sequel_pg.so CHANGED
Binary file
data/lib/1.9/sequel_pg.so CHANGED
Binary file
@@ -0,0 +1,130 @@
1
+ unless Sequel::Postgres.respond_to?(:supports_streaming?)
2
+ raise LoadError, "either sequel_pg not loaded, or an old version of sequel_pg loaded"
3
+ end
4
+ unless Sequel::Postgres.supports_streaming?
5
+ raise LoadError, "streaming is not supported by the version of libpq in use"
6
+ end
7
+
8
+ # Database methods necessary to support streaming. You should load this extension
9
+ # into your database object:
10
+ #
11
+ # DB.extension(:pg_streaming)
12
+ #
13
+ # Then you can call #stream on your datasets to use the streaming support:
14
+ #
15
+ # DB[:table].stream.each{|row| ...}
16
+ #
17
+ # Or change a set so that all dataset calls use streaming:
18
+ #
19
+ # DB.stream_all_queries = true
20
+ module Sequel::Postgres::Streaming
21
+ attr_accessor :stream_all_queries
22
+
23
+ # Also extend the database's datasets to support streaming.
24
+ # This extension requires modifying connections, so disconnect
25
+ # so that new connections will get the methods.
26
+ def self.extended(db)
27
+ db.extend_datasets(DatasetMethods)
28
+ db.stream_all_queries = false
29
+ db.disconnect
30
+ end
31
+
32
+ # Make sure all new connections have the appropriate methods added.
33
+ def connect(server)
34
+ conn = super
35
+ conn.extend(AdapterMethods)
36
+ conn
37
+ end
38
+
39
+ private
40
+
41
+ # If streaming is requested, and a prepared statement is not
42
+ # used, tell the connection to use single row mode for the query.
43
+ def _execute(conn, sql, opts={}, &block)
44
+ if opts[:stream] && !sql.is_a?(Symbol)
45
+ conn.single_row_mode = true
46
+ end
47
+ super
48
+ end
49
+
50
+ # If streaming is requested, send the prepared statement instead
51
+ # of executing it and blocking.
52
+ def _execute_prepared_statement(conn, ps_name, args, opts)
53
+ if opts[:stream]
54
+ conn.send_prepared_statement(ps_name, args)
55
+ else
56
+ super
57
+ end
58
+ end
59
+
60
+ module AdapterMethods
61
+ # Whether the next query on this connection should use
62
+ # single_row_mode.
63
+ attr_accessor :single_row_mode
64
+
65
+ # Send the prepared statement on this connection using
66
+ # single row mode.
67
+ def send_prepared_statement(ps_name, args)
68
+ send_query_prepared(ps_name, args)
69
+ set_single_row_mode
70
+ block
71
+ self
72
+ end
73
+
74
+ private
75
+
76
+ # If using single row mode, send the query instead of executing it.
77
+ def execute_query(sql, args)
78
+ if @single_row_mode
79
+ @single_row_mode = false
80
+ @db.log_yield(sql, args){args ? send_query(sql, args) : send_query(sql)}
81
+ set_single_row_mode
82
+ block
83
+ self
84
+ else
85
+ super
86
+ end
87
+ end
88
+ end
89
+
90
+ # Dataset methods used to implement streaming.
91
+ module DatasetMethods
92
+ # If streaming has been requested and the current dataset
93
+ # can be streamed, request the database use streaming when
94
+ # executing this query, and use yield_each_row to process
95
+ # the separate PGresult for each row in the connection.
96
+ def fetch_rows(sql)
97
+ if stream_results?
98
+ execute(sql, :stream=>true) do |conn|
99
+ yield_each_row(conn){|h| yield h}
100
+ end
101
+ else
102
+ super
103
+ end
104
+ end
105
+
106
+ # Return a clone of the dataset that will use streaming to load
107
+ # rows.
108
+ def stream
109
+ clone(:stream=>true)
110
+ end
111
+
112
+ private
113
+
114
+ # Only stream results if streaming has been specifically requested
115
+ # and the query is streamable.
116
+ def stream_results?
117
+ (@opts[:stream] || db.stream_all_queries) && streamable?
118
+ end
119
+
120
+ # Queries using cursors are not streamable, and queries that use
121
+ # the map/select_map/to_hash/to_hash_groups optimizations are not
122
+ # streamable, but other queries are streamable.
123
+ def streamable?
124
+ spgt = (o = @opts)[:_sequel_pg_type]
125
+ (spgt.nil? || spgt == :model) && !o[:cursor]
126
+ end
127
+ end
128
+ end
129
+
130
+ Sequel::Database.register_extension(:pg_streaming, Sequel::Postgres::Streaming)
metadata CHANGED
@@ -1,13 +1,13 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: sequel_pg
3
3
  version: !ruby/object:Gem::Version
4
- hash: 1
4
+ hash: 13
5
5
  prerelease:
6
6
  segments:
7
7
  - 1
8
- - 5
8
+ - 6
9
9
  - 1
10
- version: 1.5.1
10
+ version: 1.6.1
11
11
  platform: x86-mswin32-60
12
12
  authors:
13
13
  - Jeremy Evans
@@ -15,7 +15,7 @@ autorequire:
15
15
  bindir: bin
16
16
  cert_chain: []
17
17
 
18
- date: 2012-04-07 00:00:00 Z
18
+ date: 2012-09-28 00:00:00 Z
19
19
  dependencies:
20
20
  - !ruby/object:Gem::Dependency
21
21
  name: pg
@@ -41,12 +41,12 @@ dependencies:
41
41
  requirements:
42
42
  - - ">="
43
43
  - !ruby/object:Gem::Version
44
- hash: 151
44
+ hash: 155
45
45
  segments:
46
46
  - 3
47
- - 36
47
+ - 39
48
48
  - 0
49
- version: 3.36.0
49
+ version: 3.39.0
50
50
  type: :runtime
51
51
  version_requirements: *id002
52
52
  description: |
@@ -72,7 +72,7 @@ files:
72
72
  - ext/sequel_pg/extconf.rb
73
73
  - ext/sequel_pg/sequel_pg.c
74
74
  - lib/sequel_pg/sequel_pg.rb
75
- - lib/sequel_pg/streaming.rb
75
+ - lib/sequel/extensions/pg_streaming.rb
76
76
  - lib/1.8/sequel_pg.so
77
77
  - lib/1.9/sequel_pg.so
78
78
  homepage: http://github.com/jeremyevans/sequel_pg
@@ -1,82 +0,0 @@
1
- unless Sequel::Postgres.respond_to?(:supports_streaming?)
2
- raise LoadError, "either sequel_pg not loaded, or an old version of sequel_pg loaded"
3
- end
4
- unless Sequel::Postgres.supports_streaming?
5
- raise LoadError, "streaming is not supported by the version of libpq in use"
6
- end
7
-
8
- # Database methods necessary to support streaming. You should extend your
9
- # Database object with this:
10
- #
11
- # DB.extend Sequel::Postgres::Streaming
12
- #
13
- # Then you can call #stream on your datasets to use the streaming support:
14
- #
15
- # DB[:table].stream.each{|row| ...}
16
- module Sequel::Postgres::Streaming
17
- # Also extend the database's datasets to support streaming
18
- def self.extended(db)
19
- db.extend_datasets(DatasetMethods)
20
- end
21
-
22
- private
23
-
24
- # If streaming is requested, set a row processor while executing
25
- # the query.
26
- def _execute(conn, sql, opts={})
27
- if stream = opts[:stream]
28
- with_row_processor(conn, *stream){super}
29
- else
30
- super
31
- end
32
- end
33
-
34
- # Dataset methods used to implement streaming.
35
- module DatasetMethods
36
- # If streaming has been requested and the current dataset
37
- # can be streamed, request the database use streaming when
38
- # executing this query.
39
- def fetch_rows(sql, &block)
40
- if stream_results?
41
- execute(sql, :stream=>[self, block])
42
- else
43
- super
44
- end
45
- end
46
-
47
- # Return a clone of the dataset that will use streaming to load
48
- # rows.
49
- def stream
50
- clone(:stream=>true)
51
- end
52
-
53
- private
54
-
55
- # Only stream results if streaming has been specifically requested
56
- # and the query is streamable.
57
- def stream_results?
58
- @opts[:stream] && streamable?
59
- end
60
-
61
- # Queries using cursors are not streamable, and queries that use
62
- # the map/select_map/to_hash/to_hash_groups optimizations are not
63
- # streamable, but other queries are streamable.
64
- def streamable?
65
- spgt = (o = @opts)[:_sequel_pg_type]
66
- (spgt.nil? || spgt == :model) && !o[:cursor]
67
- end
68
- end
69
-
70
- # Extend a database's datasets with this module to enable streaming
71
- # on all streamable queries:
72
- #
73
- # DB.extend_datasets(Sequel::Postgres::Streaming::AllQueries)
74
- module AllQueries
75
- private
76
-
77
- # Always stream results if the query is streamable.
78
- def stream_results?
79
- streamable?
80
- end
81
- end
82
- end