datastax_rails 1.0.17.7 → 1.0.18.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
data/README.rdoc CHANGED
@@ -72,9 +72,6 @@ attributes on any model. DSR will only upload schema files if they have changed
72
72
 
73
73
  === Known issues
74
74
 
75
- Calling +find+ on a model with a bogus ID returns an empty model instead of RecordNotFound. This is due to a bug
76
- in DSE that is supposedly fixed in the upcoming 2.2 release.
77
-
78
75
  Setting an integer field to something other than an integer results in nothing being set and no validation error
79
76
  (if you were using one).
80
77
 
@@ -110,101 +110,11 @@
110
110
  persistent, and doesn't work with replication.
111
111
  -->
112
112
  <directoryFactory name="DirectoryFactory"
113
- class="${solr.directoryFactory:solr.StandardDirectoryFactory}"/>
114
-
115
-
116
- <!-- Index Defaults
117
-
118
- Values here affect all index writers and act as a default
119
- unless overridden.
120
-
121
- WARNING: See also the <mainIndex> section below for parameters
122
- that overfor Solr's main Lucene index.
123
- -->
124
- <indexDefaults>
113
+ class="com.datastax.bdp.cassandra.index.solr.DSENRTCachingDirectoryFactory"/>
114
+ <indexConfig>
125
115
 
126
116
  <useCompoundFile>false</useCompoundFile>
127
-
128
- <mergeFactor>10</mergeFactor>
129
- <!-- Sets the amount of RAM that may be used by Lucene indexing
130
- for buffering added documents and deletions before they are
131
- flushed to the Directory. -->
132
- <ramBufferSizeMB>32</ramBufferSizeMB>
133
- <!-- If both ramBufferSizeMB and maxBufferedDocs is set, then
134
- Lucene will flush based on whichever limit is hit first.
135
- -->
136
- <!-- <maxBufferedDocs>1000</maxBufferedDocs> -->
137
-
138
- <maxFieldLength>10000</maxFieldLength>
139
- <writeLockTimeout>1000</writeLockTimeout>
140
- <commitLockTimeout>10000</commitLockTimeout>
141
-
142
- <!-- Expert: Merge Policy
143
-
144
- The Merge Policy in Lucene controls how merging is handled by
145
- Lucene. The default in Solr 3.3 is TieredMergePolicy.
146
-
147
- The default in 2.3 was the LogByteSizeMergePolicy,
148
- previous versions used LogDocMergePolicy.
149
-
150
- LogByteSizeMergePolicy chooses segments to merge based on
151
- their size. The Lucene 2.2 default, LogDocMergePolicy chose
152
- when to merge based on number of documents
153
-
154
- Other implementations of MergePolicy must have a no-argument
155
- constructor
156
- -->
157
- <!--
158
- <mergePolicy class="org.apache.lucene.index.TieredMergePolicy"/>
159
- -->
160
-
161
- <!-- Expert: Merge Scheduler
162
-
163
- The Merge Scheduler in Lucene controls how merges are
164
- performed. The ConcurrentMergeScheduler (Lucene 2.3 default)
165
- can perform merges in the background using separate threads.
166
- The SerialMergeScheduler (Lucene 2.2 default) does not.
167
- -->
168
- <!--
169
- <mergeScheduler class="org.apache.lucene.index.ConcurrentMergeScheduler"/>
170
- -->
171
-
172
- <!-- LockFactory
173
-
174
- This option specifies which Lucene LockFactory implementation
175
- to use.
176
-
177
- single = SingleInstanceLockFactory - suggested for a
178
- read-only index or when there is no possibility of
179
- another process trying to modify the index.
180
- native = NativeFSLockFactory - uses OS native file locking.
181
- Do not use when multiple solr webapps in the same
182
- JVM are attempting to share a single index.
183
- simple = SimpleFSLockFactory - uses a plain file for locking
184
-
185
- (For backwards compatibility with Solr 1.2, 'simple' is the
186
- default if not specified.)
187
-
188
- More details on the nuances of each LockFactory...
189
- http://wiki.apache.org/lucene-java/AvailableLockFactories
190
- -->
191
- <lockType>native</lockType>
192
-
193
- <!-- Expert: Controls how often Lucene loads terms into memory
194
- Default is 128 and is likely good for most everyone.
195
- -->
196
- <!-- <termIndexInterval>256</termIndexInterval> -->
197
- </indexDefaults>
198
-
199
- <!-- Main Index
200
-
201
- Values here override the values in the <indexDefaults> section
202
- for the main on disk index.
203
- -->
204
- <mainIndex>
205
-
206
- <useCompoundFile>false</useCompoundFile>
207
- <ramBufferSizeMB>32</ramBufferSizeMB>
117
+ <ramBufferSizeMB>100</ramBufferSizeMB>
208
118
  <mergeFactor>10</mergeFactor>
209
119
 
210
120
  <!-- Unlock On Startup
@@ -216,8 +126,12 @@
216
126
 
217
127
  This is not needed if lock type is 'none' or 'single'
218
128
  -->
219
- <unlockOnStartup>false</unlockOnStartup>
129
+ <unlockOnStartup>true</unlockOnStartup>
220
130
 
131
+ <maxFieldLength>10000</maxFieldLength>
132
+ <writeLockTimeout>1000</writeLockTimeout>
133
+ <commitLockTimeout>10000</commitLockTimeout>
134
+
221
135
  <!-- If true, IndexReaders will be reopened (often more efficient)
222
136
  instead of closed and then opened.
223
137
  -->
@@ -262,7 +176,7 @@
262
176
  -->
263
177
  <infoStream file="INFOSTREAM.txt">false</infoStream>
264
178
 
265
- </mainIndex>
179
+ </indexConfig>
266
180
 
267
181
  <!-- JMX
268
182
 
@@ -283,6 +197,7 @@
283
197
  -->
284
198
 
285
199
  <!-- The default high-performance update handler -->
200
+ <!-- IN DSE THIS CANNOT BE CHANGED -->
286
201
  <updateHandler class="solr.DirectUpdateHandler2">
287
202
 
288
203
  <!-- AutoCommit
@@ -300,9 +215,9 @@
300
215
  since a document was added before automaticly
301
216
  triggering a new commit.
302
217
  -->
303
- <autoSoftCommit>
304
- <maxDocs>1</maxDocs>
218
+ <autoSoftCommit>
305
219
  <maxTime>1000</maxTime>
220
+ <maxDocs>1</maxDocs>
306
221
  </autoSoftCommit>
307
222
 
308
223
  <!-- Update Related Event Listeners
@@ -579,7 +494,7 @@
579
494
  Recommend values of 1-2 for read-only slaves, higher for
580
495
  masters w/o cache warming.
581
496
  -->
582
- <maxWarmingSearchers>2</maxWarmingSearchers>
497
+ <maxWarmingSearchers>16</maxWarmingSearchers>
583
498
 
584
499
  </query>
585
500
 
@@ -1014,31 +929,6 @@
1014
929
  <str name="echoHandler">true</str>
1015
930
  </lst>
1016
931
  </requestHandler>
1017
-
1018
- <!-- Solr Replication
1019
-
1020
- The SolrReplicationHandler supports replicating indexes from a
1021
- "master" used for indexing and "salves" used for queries.
1022
-
1023
- http://wiki.apache.org/solr/SolrReplication
1024
-
1025
- In the example below, remove the <lst name="master"> section if
1026
- this is just a slave and remove the <lst name="slave"> section
1027
- if this is just a master.
1028
- -->
1029
- <!--
1030
- <requestHandler name="/replication" class="solr.ReplicationHandler" >
1031
- <lst name="master">
1032
- <str name="replicateAfter">commit</str>
1033
- <str name="replicateAfter">startup</str>
1034
- <str name="confFiles">schema.xml,stopwords.txt</str>
1035
- </lst>
1036
- <lst name="slave">
1037
- <str name="masterUrl">http://localhost:8983/solr/replication</str>
1038
- <str name="pollInterval">00:00:60</str>
1039
- </lst>
1040
- </requestHandler>
1041
- -->
1042
932
 
1043
933
  <!-- Search Components
1044
934
 
@@ -1450,38 +1340,6 @@
1450
1340
  </highlighting>
1451
1341
  </searchComponent>
1452
1342
 
1453
- <!-- Update Processors
1454
-
1455
- Chains of Update Processor Factories for dealing with Update
1456
- Requests can be declared, and then used by name in Update
1457
- Request Processors
1458
-
1459
- http://wiki.apache.org/solr/UpdateRequestProcessor
1460
-
1461
- -->
1462
- <!-- Deduplication
1463
-
1464
- An example dedup update processor that creates the "id" field
1465
- on the fly based on the hash code of some other fields. This
1466
- example has overwriteDupes set to false since we are using the
1467
- id field as the signatureField and Solr will maintain
1468
- uniqueness based on that anyway.
1469
-
1470
- -->
1471
- <!--
1472
- <updateRequestProcessorChain name="dedupe">
1473
- <processor class="solr.processor.SignatureUpdateProcessorFactory">
1474
- <bool name="enabled">true</bool>
1475
- <str name="signatureField">id</str>
1476
- <bool name="overwriteDupes">false</bool>
1477
- <str name="fields">name,features,cat</str>
1478
- <str name="signatureClass">solr.processor.Lookup3Signature</str>
1479
- </processor>
1480
- <processor class="solr.LogUpdateProcessorFactory" />
1481
- <processor class="solr.RunUpdateProcessorFactory" />
1482
- </updateRequestProcessorChain>
1483
- -->
1484
-
1485
1343
  <!-- Response Writers
1486
1344
 
1487
1345
  http://wiki.apache.org/solr/QueryResponseWriter
@@ -311,6 +311,7 @@ module DatastaxRails #:nodoc:
311
311
  include Timestamps
312
312
  include Serialization
313
313
  include Migrations
314
+ include SolrRepair
314
315
 
315
316
  # Stores the default scope for the class
316
317
  class_attribute :default_scopes, :instance_writer => false
@@ -346,7 +347,7 @@ module DatastaxRails #:nodoc:
346
347
  @changed_attributes = {}
347
348
  @schema_version = self.class.current_schema_version
348
349
 
349
- set_defaults
350
+ __set_defaults
350
351
 
351
352
  populate_with_current_scope_attributes
352
353
 
@@ -363,7 +364,7 @@ module DatastaxRails #:nodoc:
363
364
  end
364
365
 
365
366
  # Set any default attributes specified by the schema
366
- def set_defaults
367
+ def __set_defaults
367
368
  self.class.attribute_definitions.each do |a,d|
368
369
  unless(d.coder.default.nil?)
369
370
  self.attributes[a]=d.coder.default
@@ -472,7 +473,8 @@ module DatastaxRails #:nodoc:
472
473
  delegate :destroy, :destroy_all, :delete, :update, :update_all, :to => :scoped
473
474
  delegate :order, :limit, :where, :where_not, :page, :paginate, :select, :to => :scoped
474
475
  delegate :per_page, :each, :group, :total_pages, :search, :fulltext, :to => :scoped
475
- delegate :count, :first, :first!, :last, :last!, :to => :scoped
476
+ delegate :count, :first, :first!, :last, :last!, :compute_stats, :to => :scoped
477
+ delegate :sum, :average, :minimum, :maximum, :stddev, :to => :scoped
476
478
  delegate :cql, :with_cassandra, :with_solr, :commit_solr, :to => :scoped
477
479
 
478
480
  # Sets the column family name
@@ -562,7 +564,7 @@ module DatastaxRails #:nodoc:
562
564
  private
563
565
 
564
566
  def construct_finder_relation(options = {}, scope = nil)
565
- relation = options.is_a(Hash) ? unscoped.apply_finder_options(options) : options
567
+ relation = options.is_a?(Hash) ? unscoped.apply_finder_options(options) : options
566
568
  relation = scope.merge(relation) if scope
567
569
  relation
568
570
  end
@@ -16,7 +16,7 @@ module DatastaxRails
16
16
  # already been set up (Rails does this for you).
17
17
  def execute
18
18
  cql = self.to_cql
19
- # puts cql
19
+ puts cql if ENV['DEBUG_CQL'] == 'true'
20
20
  DatastaxRails::Base.connection.execute_cql_query(cql)
21
21
  end
22
22
  end
@@ -55,21 +55,14 @@ module DatastaxRails
55
55
  # @param [Hash] options a hash containing various options
56
56
  # @option options [Symbol] :consistency the consistency to set for the Cassandra operation (e.g., ALL)
57
57
  def write(key, attributes, options = {})
58
- key.tap do |key|
59
- attributes = encode_attributes(attributes)
60
- ActiveSupport::Notifications.instrument("insert.datastax_rails", :column_family => column_family, :key => key, :attributes => attributes) do
61
- c = cql.update(key.to_s).columns(attributes)
62
- if(options[:consistency])
63
- level = options[:consistency].to_s.upcase
64
- if(valid_consistency?(level))
65
- c.using(options[:consistency])
66
- else
67
- raise ArgumentError, "'#{level}' is not a valid Cassandra consistency level"
68
- end
69
- end
70
- c.execute
71
- end
58
+ attributes = encode_attributes(attributes)
59
+ level = (options[:consistency] || self.default_consistency).to_s.upcase
60
+ if(valid_consistency?(level))
61
+ options[:consistency] = level
62
+ else
63
+ raise ArgumentError, "'#{level}' is not a valid Cassandra consistency level"
72
64
  end
65
+ write_with_cql(key, attributes, options)
73
66
  end
74
67
 
75
68
  # Instantiates a new object without calling +initialize+.
@@ -115,6 +108,19 @@ module DatastaxRails
115
108
  end
116
109
  casted
117
110
  end
111
+
112
+ private
113
+ def write_with_cql(key, attributes, options)
114
+ key.tap do |key|
115
+ ActiveSupport::Notifications.instrument("insert.datastax_rails", :column_family => column_family, :key => key, :attributes => attributes) do
116
+ cql.update(key.to_s).columns(attributes).using(options[:consistency]).execute
117
+ end
118
+ end
119
+ end
120
+
121
+ def write_with_solr(key, attributes, options)
122
+
123
+ end
118
124
  end
119
125
 
120
126
  def new_record?
@@ -243,6 +243,30 @@ module DatastaxRails
243
243
  end
244
244
  end
245
245
 
246
+ # Have SOLR compute stats for a given numeric field. Status computed include:
247
+ # * min
248
+ # * max
249
+ # * sum
250
+ # * sum of squares
251
+ # * mean
252
+ # * standard deviation
253
+ #
254
+ # Model.compute_stats(:price)
255
+ # Model.compute_stats(:price, :quantity)
256
+ #
257
+ # NOTE: This is only compatible with solr queries. It will be ignored when
258
+ # a CQL query is made.
259
+ #
260
+ # @param fields [Symbol] the field to compute stats on
261
+ # @return [DatastaxRails::Relation] a new Relation object
262
+ def compute_stats(*fields)
263
+ return self if fields.empty?
264
+
265
+ clone.tap do |r|
266
+ r.stats_values += Array.wrap(fields)
267
+ end
268
+ end
269
+
246
270
  # By default, DatastaxRails will try to pick the right method of performing
247
271
  # a search. You can use this method to force it to make the query via SOLR.
248
272
  #
@@ -0,0 +1,69 @@
1
+ module DatastaxRails
2
+ module StatsMethods
3
+ STATS_FIELDS={'sum' => 'sum', 'maximum' => 'max', 'minimum' => 'min', 'average' => 'mean', 'stddev' => 'stddev'}
4
+
5
+ # @!method sum(field)
6
+ # Calculates the sum of the field listed. Field must be indexed as a number.
7
+ # @param [Symbol] field the field to calculate
8
+ # @return [Fixnum,Float] the sum of the column value rows that match the query
9
+ # @!method grouped_sum(field)
10
+ # Calculates the sum of the field listed for a grouped query.
11
+ # @param [Symbol] field the field to calculate
12
+ # @return [Hash] the sum of the columns that match the query by group. Group name is the key.
13
+ # @!method maximum(field)
14
+ # Calculates the maximum value of the field listed. Field must be indexed as a number.
15
+ # @param [Symbol] field the field to calculate
16
+ # @return [Fixnum,Float] the maximum value of the rows that match the query
17
+ # @!method grouped_maximum(field)
18
+ # Calculates the sum of the field listed for a grouped query.
19
+ # @param [Symbol] field the field to calculate
20
+ # @return [Hash] the sum of the columns that match the query by group. Group name is the key.
21
+ # @!method minimum(field)
22
+ # Calculates the minimum of the field listed. Field must be indexed as a number.
23
+ # @param [Symbol] field the field to calculate
24
+ # @return [Fixnum,Float] the minimum of the columns that match the query
25
+ # @!method grouped_minimum(field)
26
+ # Calculates the minimum of the field listed for a grouped query.
27
+ # @param [Symbol] field the field to calculate
28
+ # @return [Hash] the minimum of the columns that match the query by group. Group name is the key.
29
+ # @!method average(field)
30
+ # Calculates the average of the field listed. Field must be indexed as a number.
31
+ # @param [Symbol] field the field to calculate
32
+ # @return [Fixnum,Float] the average of the columns that match the query
33
+ # @!method grouped_average(field)
34
+ # Calculates the average of the field listed for a grouped query.
35
+ # @param [Symbol] field the field to calculate
36
+ # @return [Hash] the average of the columns that match the query by group. Group name is the key.
37
+ # @!method stddev(field)
38
+ # Calculates the standard deviation of the field listed. Field must be indexed as a number.
39
+ # @param [Symbol] field the field to calculate
40
+ # @return [Fixnum,Float] the standard deviation of the columns that match the query
41
+ # @!method grouped_stddev(field)
42
+ # Calculates the standard deviation of the field listed for a grouped query.
43
+ # @param [Symbol] field the field to calculate
44
+ # @return [Hash] the standard deviation of the columns that match the query by group. Group name is the key.
45
+ %w[sum maximum minimum average stddev].each do |op|
46
+ define_method(op) do |field|
47
+ calculate_stats(field)
48
+ @stats[field] ? @stats[field][STATS_FIELDS[op]] : 0
49
+ end
50
+
51
+ define_method("grouped_#{op}") do |field|
52
+ self.op unless @group_value
53
+ calculate_stats(field)
54
+ values = {}
55
+ @stats[field]["facets"][@group_value].each do |k,v|
56
+ values[k] = v[STATS_FIELDS[op]]
57
+ end
58
+ values
59
+ end
60
+ end
61
+
62
+ private
63
+ def calculate_stats(field)
64
+ unless @stats[field]
65
+ @stats[field] = limit(0).compute_stats(field).stats[field]
66
+ end
67
+ end
68
+ end
69
+ end
@@ -2,10 +2,10 @@ require 'rsolr'
2
2
 
3
3
  module DatastaxRails
4
4
  class Relation
5
- MULTI_VALUE_METHODS = [:order, :where, :where_not, :fulltext, :greater_than, :less_than, :select]
5
+ MULTI_VALUE_METHODS = [:order, :where, :where_not, :fulltext, :greater_than, :less_than, :select, :stats]
6
6
  SINGLE_VALUE_METHODS = [:page, :per_page, :reverse_order, :query_parser, :consistency, :ttl, :use_solr, :escape, :group]
7
7
 
8
- SOLR_CHAR_RX = /([\+\!\(\)\[\]\^\"\~\:\'\=]+)/
8
+ SOLR_CHAR_RX = /([\+\!\(\)\[\]\^\"\~\:\'\=\/]+)/
9
9
 
10
10
  Relation::MULTI_VALUE_METHODS.each do |m|
11
11
  attr_accessor :"#{m}_values"
@@ -19,6 +19,7 @@ module DatastaxRails
19
19
  include ModificationMethods
20
20
  include FinderMethods
21
21
  include SpawnMethods
22
+ include StatsMethods
22
23
 
23
24
  attr_reader :klass, :column_family, :loaded, :cql
24
25
  alias :loaded? :loaded
@@ -45,6 +46,7 @@ module DatastaxRails
45
46
  @extensions = []
46
47
  @create_with_value = {}
47
48
  @escape_value = true
49
+ @stats = {}
48
50
  apply_default_scope
49
51
  end
50
52
 
@@ -91,6 +93,13 @@ module DatastaxRails
91
93
  @count ||= self.use_solr_value ? count_via_solr : count_via_cql
92
94
  end
93
95
 
96
+ def stats
97
+ unless(loaded?)
98
+ to_a
99
+ end
100
+ @stats
101
+ end
102
+
94
103
  # Returns the current page for will_paginate compatibility
95
104
  def current_page
96
105
  self.page_value.try(:to_i)
@@ -149,6 +158,7 @@ module DatastaxRails
149
158
  # will re-run the query.
150
159
  def reset
151
160
  @loaded = @first = @last = @scope_for_create = @count = nil
161
+ @stats = {}
152
162
  @results = []
153
163
  end
154
164
 
@@ -341,6 +351,16 @@ module DatastaxRails
341
351
 
342
352
  select_columns = select_values.empty? ? (@klass.attribute_definitions.keys - @klass.lazy_attributes) : select_values.flatten
343
353
 
354
+ unless(@stats_values.empty?)
355
+ params[:stats] = 'true'
356
+ @stats_values.flatten.each do |sv|
357
+ params['stats.field'] = sv
358
+ end
359
+ if(@group_value)
360
+ params['stats.facet'] = @group_value
361
+ end
362
+ end
363
+ solr_response = nil
344
364
  if(@group_value)
345
365
  results = DatastaxRails::GroupedCollection.new
346
366
  params[:group] = 'true'
@@ -349,7 +369,8 @@ module DatastaxRails
349
369
  params['group.limit'] = @per_page_value
350
370
  params['group.offset'] = (@page_value - 1) * @per_page_value
351
371
  params['group.ngroups'] = 'true'
352
- response = rsolr.post('select', :data => params)["grouped"][@group_value.to_s]
372
+ solr_response = rsolr.post('select', :data => params)
373
+ response = solr_response["grouped"][@group_value.to_s]
353
374
  results.total_groups = response['ngroups'].to_i
354
375
  results.total_for_all = response['matches'].to_i
355
376
  results.total_entries = 0
@@ -358,9 +379,13 @@ module DatastaxRails
358
379
  results.total_entries = results[group['groupValue']].total_entries if results[group['groupValue']].total_entries > results.total_entries
359
380
  end
360
381
  else
361
- response = rsolr.paginate(@page_value, @per_page_value, 'select', :data => params, :method => :post)["response"]
382
+ solr_response = rsolr.paginate(@page_value, @per_page_value, 'select', :data => params, :method => :post)
383
+ response = solr_response["response"]
362
384
  results = parse_docs(response, select_columns)
363
385
  end
386
+ if solr_response["stats"]
387
+ @stats = solr_response["stats"]["stats_fields"].with_indifferent_access
388
+ end
364
389
  results
365
390
  end
366
391
 
@@ -71,6 +71,25 @@ module DatastaxRails
71
71
  return ERB.new(File.read(File.join(File.dirname(__FILE__),"..","..","..","config","schema.xml.erb"))).result(binding)
72
72
  end
73
73
 
74
+ def reindex_solr(model)
75
+ if model == ':all'
76
+ models_to_index = DatastaxRails::Base.models
77
+ else
78
+ models_to_index = [model.constantize]
79
+ end
80
+
81
+ models_to_index.each do |m|
82
+ next if m.payload_model?
83
+
84
+ url = "#{DatastaxRails::Base.solr_base_url}/admin/cores?action=RELOAD&name=#{DatastaxRails::Base.config[:keyspace]}.#{m.column_family}&reindex=true&deleteAll=false"
85
+ puts "Posting reindex command to '#{url}'"
86
+ `curl -s -X POST '#{url}'`
87
+ if Rails.env.production?
88
+ sleep(5)
89
+ end
90
+ end
91
+ end
92
+
74
93
  def upload_solr_schemas(column_family)
75
94
  force = !column_family.nil?
76
95
  column_family ||= :all
@@ -110,7 +129,10 @@ module DatastaxRails
110
129
  connection.execute_cql_query(cql)
111
130
  end
112
131
  else
132
+ newcf = false
133
+ newschema = false
113
134
  unless connection.schema.column_families[model.column_family.to_s]
135
+ newcf = true
114
136
  puts "Creating normal model #{model.column_family}"
115
137
  cql = DatastaxRails::Cql::CreateColumnFamily.new(model.column_family).key_type(:text).columns(:updated_at => :text, :created_at => :text).to_cql
116
138
  puts cql
@@ -172,6 +194,17 @@ module DatastaxRails
172
194
  break
173
195
  end
174
196
  DatastaxRails::Cql::Update.new(SchemaMigration, model.column_family).columns(:digest => schema_digest).execute
197
+ newschema = true
198
+ end
199
+
200
+ if newcf
201
+ # Create the SOLR Core
202
+ url = "#{DatastaxRails::Base.solr_base_url}/admin/cores?action=CREATE&name=#{DatastaxRails::Base.config[:keyspace]}.#{model.column_family}"
203
+ puts "Posting create command to '#{url}'"
204
+ `curl -s -X POST '#{url}'`
205
+ if Rails.env.production?
206
+ sleep(5)
207
+ end
175
208
  end
176
209
 
177
210
  # Check for unindexed columns
@@ -42,12 +42,24 @@ namespace :ds do
42
42
  end
43
43
  end
44
44
 
45
- desc 'Upload SOLR schemas -- pass in CF name to force an upload (all uploads everything).'
45
+ desc 'Upload SOLR schemas -- pass in model name to force an upload (:all uploads everything).'
46
46
  task :schema, [:force_cf] => :configure do |t, args|
47
47
  cf = DatastaxRails::Tasks::ColumnFamily.new(@config['keyspace'])
48
48
  cf.upload_solr_schemas(args[:force_cf])
49
49
  end
50
-
50
+
51
+ desc 'Rebuild SOLR Index -- pass in a model name (:all rebuilds everything)'
52
+ task :reindex, [:model] => :configure do |t, args|
53
+ if args[:model].blank?
54
+ puts "\nUSAGE: rake ds:reindex[Model]"
55
+ else
56
+ cf = DatastaxRails::Tasks::ColumnFamily.new(@config['keyspace'])
57
+ puts "Reindexing #{args[:model]}"
58
+ cf.reindex_solr(args[:model])
59
+ puts "Reindexing will run in the background"
60
+ end
61
+ end
62
+
51
63
  desc 'Load the seed data from ds/seeds.rb'
52
64
  task :seed => :environment do
53
65
  seed_file = Rails.root.join("ks","seeds.rb")
@@ -3,7 +3,7 @@ module DatastaxRails
3
3
  class BooleanType < BaseType
4
4
  DEFAULTS = {:solr_type => 'boolean', :indexed => true, :stored => true, :multi_valued => false, :sortable => true, :tokenized => false, :fulltext => false}
5
5
  TRUE_VALS = [true, 'true', '1', 'Y']
6
- FALSE_VALS = [false, 'false', '0', '', 'N', nil]
6
+ FALSE_VALS = [false, 'false', '0', '', 'N', nil, 'null']
7
7
  VALID_VALS = TRUE_VALS + FALSE_VALS
8
8
 
9
9
  def encode(bool)
@@ -5,12 +5,12 @@ module DatastaxRails
5
5
  REGEX = /\A[-+]?(\d+(\.\d+)?|\.\d+)\Z/
6
6
  def encode(float)
7
7
  return -10191980.0 if float.blank?
8
- raise ArgumentError.new("#{self} requires a Float. You passed #{float.to_s}") unless float.kind_of?(Float) || (float.kind_of?(String) && float.match(REGEX))
8
+ raise ArgumentError.new("#{self} requires a Float. You passed #{float.to_s}") unless float.kind_of?(Float) || (float.kind_of?(String) && float.match(REGEX)) || float.kind_of?(Fixnum)
9
9
  float.to_f
10
10
  end
11
11
 
12
12
  def decode(float)
13
- return nil if float.blank? || float == -10191980.0
13
+ return nil if float.blank? || (float.to_f < -10191979.9 && float.to_f > -10191980.1)
14
14
  float.to_f
15
15
  end
16
16
  end
@@ -0,0 +1,14 @@
1
+ module DatastaxRails
2
+ module SolrRepair
3
+ def repair_solr
4
+ my_attrs = self.attributes.symbolize_keys.reject do |k,v|
5
+ v.nil? ||
6
+ !(self.class.attribute_definitions[k].coder.options[:stored] ||
7
+ self.class.attribute_definitions[k].coder.options[:indexed])
8
+ end
9
+ encoded = self.class.encode_attributes(my_attrs).merge(:id => self.id)
10
+ xml_doc = RSolr::Xml::Generator.new.add(encoded)
11
+ self.class.solr_connection.update(:data => xml_doc, :params => {:replacefields => false})
12
+ end
13
+ end
14
+ end
@@ -1,4 +1,4 @@
1
1
  module DatastaxRails
2
2
  # The current version of the gem
3
- VERSION = "1.0.17.7"
3
+ VERSION = "1.0.18.7"
4
4
  end