large_object_store 1.2.0 → 1.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 4c187c263e1ccb215c60054877952d2b83d5cc6e
4
- data.tar.gz: 81506891e872accdb8cc722d578fce1f78e169ac
3
+ metadata.gz: f2345afc60e415d1e98b4522adc30f493728b84f
4
+ data.tar.gz: b994b636a09a5ec225b212b2ed5dc962d611e858
5
5
  SHA512:
6
- metadata.gz: 22c8236b2d9ae23d9fc3af9f77b2931f99c4077828773aadd6420b2e40a4d1089356efde9ef9fff367ff03e33404efce6340274cce30f285891fa6bbd7769f99
7
- data.tar.gz: d42348f49af53a3f1a45b6c1994f690f6678df1ab25eb9af061daeb3e1b936191a7d429edf135a430a5235312f823b47dc407d263b2004465c343ffb5bc75d4b
6
+ metadata.gz: cd896a3b1f8e88589e9c37d15920cdbf26839faf3bf831b051f842d8c4fabfe728a4dbd1c23af577844c0e24a3a5338f33670aa0372b8afe0b9e1e0919c83bbb
7
+ data.tar.gz: 224c388fa9bdc59020a0272d0f1af8e169e9b26e335ac36955f1e96df542cbedddf173bcc8b7456eee5b9e07df898df91a858ad557d66042000b109c70937f53
data/Readme.md CHANGED
@@ -1,7 +1,7 @@
1
1
  Store large objects in memcache or others by slicing them.
2
2
  - uses read_multi for fast access
3
3
  - returns nil if one slice is missing
4
- - only uses single read/write if data is below 1MB
4
+ - low performance overhead, only uses single read/write if data is below 1MB
5
5
 
6
6
  Install
7
7
  =======
@@ -21,6 +21,9 @@ store.write("a", "a"*10_000_000) # => true -> always!
21
21
  store.read("a").size # => 10_000_000 using multi_get
22
22
  store.read("b") # => nil
23
23
  store.fetch("a"){ "something" } # => "something" executes block on miss
24
+ store.write("a" * 10_000_000, compress: true) # compress when greater than 16k
25
+ store.write("a" * 1000, compress: true, compress_limit: 100) # compress when greater than 100
26
+ store.write("a" * 1000, raw: true) # store as string to avoid marshaling overhead
24
27
  ```
25
28
 
26
29
  Author
@@ -5,12 +5,14 @@ require "securerandom"
5
5
  module LargeObjectStore
6
6
  UUID_BYTES = 16
7
7
  UUID_SIZE = UUID_BYTES * 2
8
- CACHE_VERSION = 2
8
+ CACHE_VERSION = 3
9
9
  MAX_OBJECT_SIZE = 1024**2
10
10
  ITEM_HEADER_SIZE = 100
11
11
  DEFAULT_COMPRESS_LIMIT = 16*1024
12
- COMPRESSED = 'z'
13
- NORMAL = '0'
12
+ NORMAL = 0
13
+ COMPRESSED = 1
14
+ RAW = 2
15
+ RADIX = 32 # we can store 32 different states
14
16
 
15
17
  def self.wrap(store)
16
18
  RailsWrapper.new(store)
@@ -24,19 +26,8 @@ module LargeObjectStore
24
26
  end
25
27
 
26
28
  def write(key, value, options = {})
27
- value = Marshal.dump(value)
28
-
29
29
  options = options.dup
30
- compressed = false
31
- if options.delete(:compress)
32
- # Don't pass compression on to Rails, we're doing it ourselves.
33
- compress_limit = options.delete(:compress_limit) || DEFAULT_COMPRESS_LIMIT
34
- if value.bytesize > compress_limit
35
- value = Zlib::Deflate.deflate(value)
36
- compressed = true
37
- end
38
- end
39
- value.prepend(compressed ? COMPRESSED : NORMAL)
30
+ value = serialize(value, options)
40
31
 
41
32
  # calculate slice size; note that key length is a factor because
42
33
  # the key is stored on the same slab page as the value
@@ -46,7 +37,7 @@ module LargeObjectStore
46
37
  pages = (value.size / slice_size.to_f).ceil
47
38
 
48
39
  if pages == 1
49
- @store.write(key(key, 0), value, options)
40
+ !!@store.write(key(key, 0), value, options)
50
41
  else
51
42
  # store meta
52
43
  uuid = SecureRandom.hex(UUID_BYTES)
@@ -82,11 +73,7 @@ module LargeObjectStore
82
73
  pages
83
74
  end
84
75
 
85
- if data.slice!(0, 1) == COMPRESSED
86
- data = Zlib::Inflate.inflate(data)
87
- end
88
-
89
- Marshal.load(data)
76
+ deserialize(data)
90
77
  end
91
78
 
92
79
  def fetch(key, options={})
@@ -103,6 +90,41 @@ module LargeObjectStore
103
90
 
104
91
  private
105
92
 
93
+ # convert a object to a string
94
+ # modifies options
95
+ def serialize(value, options)
96
+ flag = NORMAL
97
+
98
+ if options.delete(:raw)
99
+ flag |= RAW
100
+ value = value.to_s
101
+ else
102
+ value = Marshal.dump(value)
103
+ end
104
+
105
+ if compress?(value, options)
106
+ flag |= COMPRESSED
107
+ value = Zlib::Deflate.deflate(value)
108
+ end
109
+
110
+ value.prepend(flag.to_s(RADIX))
111
+ end
112
+
113
+ # opposite operations and order of serialize
114
+ def deserialize(data)
115
+ flag = data.slice!(0, 1).to_i(RADIX)
116
+ data = Zlib::Inflate.inflate(data) if flag & COMPRESSED == COMPRESSED
117
+ data = Marshal.load(data) if flag & RAW != RAW
118
+ data
119
+ end
120
+
121
+ # Don't pass compression on to Rails, we're doing it ourselves.
122
+ def compress?(value, options)
123
+ return unless options.delete(:compress)
124
+ compress_limit = options.delete(:compress_limit) || DEFAULT_COMPRESS_LIMIT
125
+ value.bytesize > compress_limit
126
+ end
127
+
106
128
  def key(key, i)
107
129
  "#{key}_#{CACHE_VERSION}_#{i}"
108
130
  end
@@ -1,3 +1,3 @@
1
1
  module LargeObjectStore
2
- VERSION = "1.2.0"
2
+ VERSION = "1.3.0"
3
3
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: large_object_store
3
3
  version: !ruby/object:Gem::Version
4
- version: 1.2.0
4
+ version: 1.3.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Ana Martinez
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2015-11-18 00:00:00.000000000 Z
11
+ date: 2015-12-07 00:00:00.000000000 Z
12
12
  dependencies: []
13
13
  description:
14
14
  email: acemacu@gmail.com