teknobingo-recipes 0.1.39

Sign up to get free protection for your applications and to get access to all the features.
Files changed (41) hide show
  1. checksums.yaml +15 -0
  2. data/.gitignore +3 -0
  3. data/Gemfile +4 -0
  4. data/LICENSE +1 -0
  5. data/README.rdoc +18 -0
  6. data/Rakefile +2 -0
  7. data/lib/teknobingo_recipes.rb +1 -0
  8. data/lib/teknobingo_recipes/helpers.rb +67 -0
  9. data/lib/teknobingo_recipes/recipes/apache.rb +62 -0
  10. data/lib/teknobingo_recipes/recipes/base.rb +16 -0
  11. data/lib/teknobingo_recipes/recipes/check.rb +13 -0
  12. data/lib/teknobingo_recipes/recipes/logrotate.rb +9 -0
  13. data/lib/teknobingo_recipes/recipes/memcached.rb +24 -0
  14. data/lib/teknobingo_recipes/recipes/mogilefs.rb +146 -0
  15. data/lib/teknobingo_recipes/recipes/nfs.rb +99 -0
  16. data/lib/teknobingo_recipes/recipes/nginx.rb +75 -0
  17. data/lib/teknobingo_recipes/recipes/nodejs.rb +15 -0
  18. data/lib/teknobingo_recipes/recipes/passenger.rb +68 -0
  19. data/lib/teknobingo_recipes/recipes/postgresql.rb +44 -0
  20. data/lib/teknobingo_recipes/recipes/profile.rb +43 -0
  21. data/lib/teknobingo_recipes/recipes/rainbows.rb +83 -0
  22. data/lib/teknobingo_recipes/recipes/rbenv.rb +40 -0
  23. data/lib/teknobingo_recipes/recipes/redis.rb +31 -0
  24. data/lib/teknobingo_recipes/version.rb +3 -0
  25. data/teknobingo-recipes.gemspec +27 -0
  26. data/templates/bash/lesslog.sh +1 -0
  27. data/templates/bash/profile +1 -0
  28. data/templates/bash/prompt.sh +13 -0
  29. data/templates/bash/railsc.sh +1 -0
  30. data/templates/bash/taillog.sh +1 -0
  31. data/templates/init.d/nginx +362 -0
  32. data/templates/logrotate.erb +14 -0
  33. data/templates/memcached.erb +16 -0
  34. data/templates/mogilefsd_conf.erb +7 -0
  35. data/templates/mogstored_conf.erb +3 -0
  36. data/templates/nginx.conf.erb +23 -0
  37. data/templates/passenger_mod.erb +3 -0
  38. data/templates/postgresql.yml.erb +8 -0
  39. data/templates/redis_conf.erb +419 -0
  40. data/templates/vhosts/http_site.erb +7 -0
  41. metadata +98 -0
@@ -0,0 +1,3 @@
1
+ LoadModule passenger_module <%= passenger_root %>/buildout/apache2/mod_passenger.so
2
+ PassengerRoot <%= passenger_root %>
3
+ PassengerDefaultRuby /home/capistrano/.rbenv/versions/<%= ruby_version %>/bin/ruby
@@ -0,0 +1,8 @@
1
+ <%= branch %>:
2
+ adapter: postgresql
3
+ encoding: utf-8
4
+ database: <%= postgresql_database %>
5
+ pool: 5
6
+ username: <%= postgresql_user %>
7
+ password: <%= postgresql_password %>
8
+ host: <%= postgresql_host %>
@@ -0,0 +1,419 @@
1
+ # Redis configuration file example
2
+
3
+ # Note on units: when memory size is needed, it is possible to specifiy
4
+ # it in the usual form of 1k 5GB 4M and so forth:
5
+ #
6
+ # 1k => 1000 bytes
7
+ # 1kb => 1024 bytes
8
+ # 1m => 1000000 bytes
9
+ # 1mb => 1024*1024 bytes
10
+ # 1g => 1000000000 bytes
11
+ # 1gb => 1024*1024*1024 bytes
12
+ #
13
+ # units are case insensitive so 1GB 1Gb 1gB are all the same.
14
+
15
+ # By default Redis does not run as a daemon. Use 'yes' if you need it.
16
+ # Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
17
+ daemonize yes
18
+
19
+ # When running daemonized, Redis writes a pid file in /var/run/redis.pid by
20
+ # default. You can specify a custom pid file location here.
21
+ pidfile /var/run/redis/redis-server.pid
22
+
23
+ # Accept connections on the specified port, default is 6379.
24
+ # If port 0 is specified Redis will not listen on a TCP socket.
25
+ port <%= redis_port %>
26
+
27
+ # If you want you can bind a single interface, if the bind option is not
28
+ # specified all the interfaces will listen for incoming connections.
29
+ #
30
+ # bind 127.0.0.1
31
+
32
+ # Specify the path for the unix socket that will be used to listen for
33
+ # incoming connections. There is no default, so Redis will not listen
34
+ # on a unix socket when not specified.
35
+ #
36
+ # unixsocket /var/run/redis/redis.sock
37
+
38
+ # Close the connection after a client is idle for N seconds (0 to disable)
39
+ timeout 300
40
+
41
+ # Set server verbosity to 'debug'
42
+ # it can be one of:
43
+ # debug (a lot of information, useful for development/testing)
44
+ # verbose (many rarely useful info, but not a mess like the debug level)
45
+ # notice (moderately verbose, what you want in production probably)
46
+ # warning (only very important / critical messages are logged)
47
+ loglevel notice
48
+
49
+ # Specify the log file name. Also 'stdout' can be used to force
50
+ # Redis to log on the standard output. Note that if you use standard
51
+ # output for logging but daemonize, logs will be sent to /dev/null
52
+ logfile /var/log/redis/redis-server.log
53
+
54
+ # To enable logging to the system logger, just set 'syslog-enabled' to yes,
55
+ # and optionally update the other syslog parameters to suit your needs.
56
+ # syslog-enabled no
57
+
58
+ # Specify the syslog identity.
59
+ # syslog-ident redis
60
+
61
+ # Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7.
62
+ # syslog-facility local0
63
+
64
+ # Set the number of databases. The default database is DB 0, you can select
65
+ # a different one on a per-connection basis using SELECT <dbid> where
66
+ # dbid is a number between 0 and 'databases'-1
67
+ databases 16
68
+
69
+ ################################ SNAPSHOTTING #################################
70
+ #
71
+ # Save the DB on disk:
72
+ #
73
+ # save <seconds> <changes>
74
+ #
75
+ # Will save the DB if both the given number of seconds and the given
76
+ # number of write operations against the DB occurred.
77
+ #
78
+ # In the example below the behaviour will be to save:
79
+ # after 900 sec (15 min) if at least 1 key changed
80
+ # after 300 sec (5 min) if at least 10 keys changed
81
+ # after 60 sec if at least 10000 keys changed
82
+ #
83
+ # Note: you can disable saving at all commenting all the "save" lines.
84
+
85
+ save 900 1
86
+ save 300 10
87
+ save 60 10000
88
+
89
+ # Compress string objects using LZF when dump .rdb databases?
90
+ # For default that's set to 'yes' as it's almost always a win.
91
+ # If you want to save some CPU in the saving child set it to 'no' but
92
+ # the dataset will likely be bigger if you have compressible values or keys.
93
+ rdbcompression yes
94
+
95
+ # The filename where to dump the DB
96
+ dbfilename dump.rdb
97
+
98
+ # The working directory.
99
+ #
100
+ # The DB will be written inside this directory, with the filename specified
101
+ # above using the 'dbfilename' configuration directive.
102
+ #
103
+ # Also the Append Only File will be created inside this directory.
104
+ #
105
+ # Note that you must specify a directory here, not a file name.
106
+ dir /var/lib/redis
107
+
108
+ ################################# REPLICATION #################################
109
+
110
+ # Master-Slave replication. Use slaveof to make a Redis instance a copy of
111
+ # another Redis server. Note that the configuration is local to the slave
112
+ # so for example it is possible to configure the slave to save the DB with a
113
+ # different interval, or to listen to another port, and so on.
114
+ #
115
+ <% unless redis_is_master %>
116
+ slaveof <%= redis_master %> <%= redis_port %>
117
+ <% end %>
118
+
119
+ # If the master is password protected (using the "requirepass" configuration
120
+ # directive below) it is possible to tell the slave to authenticate before
121
+ # starting the replication synchronization process, otherwise the master will
122
+ # refuse the slave request.
123
+ #
124
+ # masterauth <master-password>
125
+
126
+ # When a slave lost the connection with the master, or when the replication
127
+ # is still in progress, the slave can act in two different ways:
128
+ #
129
+ # 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
130
+ # still reply to client requests, possibly with out of data data, or the
131
+ # data set may just be empty if this is the first synchronization.
132
+ #
133
+ # 2) if slave-serve-stale data is set to 'no' the slave will reply with
134
+ # an error "SYNC with master in progress" to all the kind of commands
135
+ # but to INFO and SLAVEOF.
136
+ #
137
+ slave-serve-stale-data yes
138
+
139
+ ################################## SECURITY ###################################
140
+
141
+ # Require clients to issue AUTH <PASSWORD> before processing any other
142
+ # commands. This might be useful in environments in which you do not trust
143
+ # others with access to the host running redis-server.
144
+ #
145
+ # This should stay commented out for backward compatibility and because most
146
+ # people do not need auth (e.g. they run their own servers).
147
+ #
148
+ # Warning: since Redis is pretty fast an outside user can try up to
149
+ # 150k passwords per second against a good box. This means that you should
150
+ # use a very strong password otherwise it will be very easy to break.
151
+ #
152
+ # requirepass foobared
153
+
154
+ # Command renaming.
155
+ #
156
+ # It is possilbe to change the name of dangerous commands in a shared
157
+ # environment. For instance the CONFIG command may be renamed into something
158
+ # of hard to guess so that it will be still available for internal-use
159
+ # tools but not available for general clients.
160
+ #
161
+ # Example:
162
+ #
163
+ # rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
164
+ #
165
+ # It is also possilbe to completely kill a command renaming it into
166
+ # an empty string:
167
+ #
168
+ # rename-command CONFIG ""
169
+
170
+ ################################### LIMITS ####################################
171
+
172
+ # Set the max number of connected clients at the same time. By default there
173
+ # is no limit, and it's up to the number of file descriptors the Redis process
174
+ # is able to open. The special value '0' means no limits.
175
+ # Once the limit is reached Redis will close all the new connections sending
176
+ # an error 'max number of clients reached'.
177
+ #
178
+ # maxclients 128
179
+
180
+ # Don't use more memory than the specified amount of bytes.
181
+ # When the memory limit is reached Redis will try to remove keys with an
182
+ # EXPIRE set. It will try to start freeing keys that are going to expire
183
+ # in little time and preserve keys with a longer time to live.
184
+ # Redis will also try to remove objects from free lists if possible.
185
+ #
186
+ # If all this fails, Redis will start to reply with errors to commands
187
+ # that will use more memory, like SET, LPUSH, and so on, and will continue
188
+ # to reply to most read-only commands like GET.
189
+ #
190
+ # WARNING: maxmemory can be a good idea mainly if you want to use Redis as a
191
+ # 'state' server or cache, not as a real DB. When Redis is used as a real
192
+ # database the memory usage will grow over the weeks, it will be obvious if
193
+ # it is going to use too much memory in the long run, and you'll have the time
194
+ # to upgrade. With maxmemory after the limit is reached you'll start to get
195
+ # errors for write operations, and this may even lead to DB inconsistency.
196
+ #
197
+ # maxmemory <bytes>
198
+
199
+ # MAXMEMORY POLICY: how Redis will select what to remove when maxmemory
200
+ # is reached? You can select among five behavior:
201
+ #
202
+ # volatile-lru -> remove the key with an expire set using an LRU algorithm
203
+ # allkeys-lru -> remove any key accordingly to the LRU algorithm
204
+ # volatile-random -> remove a random key with an expire set
205
+ # allkeys->random -> remove a random key, any key
206
+ # volatile-ttl -> remove the key with the nearest expire time (minor TTL)
207
+ # noeviction -> don't expire at all, just return an error on write operations
208
+ #
209
+ # Note: with all the kind of policies, Redis will return an error on write
210
+ # operations, when there are not suitable keys for eviction.
211
+ #
212
+ # At the date of writing this commands are: set setnx setex append
213
+ # incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd
214
+ # sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby
215
+ # zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby
216
+ # getset mset msetnx exec sort
217
+ #
218
+ # The default is:
219
+ #
220
+ # maxmemory-policy volatile-lru
221
+
222
+ # LRU and minimal TTL algorithms are not precise algorithms but approximated
223
+ # algorithms (in order to save memory), so you can select as well the sample
224
+ # size to check. For instance for default Redis will check three keys and
225
+ # pick the one that was used less recently, you can change the sample size
226
+ # using the following configuration directive.
227
+ #
228
+ # maxmemory-samples 3
229
+
230
+ ############################## APPEND ONLY MODE ###############################
231
+
232
+ # By default Redis asynchronously dumps the dataset on disk. If you can live
233
+ # with the idea that the latest records will be lost if something like a crash
234
+ # happens this is the preferred way to run Redis. If instead you care a lot
235
+ # about your data and don't want to that a single record can get lost you should
236
+ # enable the append only mode: when this mode is enabled Redis will append
237
+ # every write operation received in the file appendonly.aof. This file will
238
+ # be read on startup in order to rebuild the full dataset in memory.
239
+ #
240
+ # Note that you can have both the async dumps and the append only file if you
241
+ # like (you have to comment the "save" statements above to disable the dumps).
242
+ # Still if append only mode is enabled Redis will load the data from the
243
+ # log file at startup ignoring the dump.rdb file.
244
+ #
245
+ # IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append
246
+ # log file in background when it gets too big.
247
+
248
+ appendonly no
249
+
250
+ # The name of the append only file (default: "appendonly.aof")
251
+ # appendfilename appendonly.aof
252
+
253
+ # The fsync() call tells the Operating System to actually write data on disk
254
+ # instead to wait for more data in the output buffer. Some OS will really flush
255
+ # data on disk, some other OS will just try to do it ASAP.
256
+ #
257
+ # Redis supports three different modes:
258
+ #
259
+ # no: don't fsync, just let the OS flush the data when it wants. Faster.
260
+ # always: fsync after every write to the append only log . Slow, Safest.
261
+ # everysec: fsync only if one second passed since the last fsync. Compromise.
262
+ #
263
+ # The default is "everysec" that's usually the right compromise between
264
+ # speed and data safety. It's up to you to understand if you can relax this to
265
+ # "no" that will will let the operating system flush the output buffer when
266
+ # it wants, for better performances (but if you can live with the idea of
267
+ # some data loss consider the default persistence mode that's snapshotting),
268
+ # or on the contrary, use "always" that's very slow but a bit safer than
269
+ # everysec.
270
+ #
271
+ # If unsure, use "everysec".
272
+
273
+ # appendfsync always
274
+ appendfsync everysec
275
+ # appendfsync no
276
+
277
+ # When the AOF fsync policy is set to always or everysec, and a background
278
+ # saving process (a background save or AOF log background rewriting) is
279
+ # performing a lot of I/O against the disk, in some Linux configurations
280
+ # Redis may block too long on the fsync() call. Note that there is no fix for
281
+ # this currently, as even performing fsync in a different thread will block
282
+ # our synchronous write(2) call.
283
+ #
284
+ # In order to mitigate this problem it's possible to use the following option
285
+ # that will prevent fsync() from being called in the main process while a
286
+ # BGSAVE or BGREWRITEAOF is in progress.
287
+ #
288
+ # This means that while another child is saving the durability of Redis is
289
+ # the same as "appendfsync none", that in pratical terms means that it is
290
+ # possible to lost up to 30 seconds of log in the worst scenario (with the
291
+ # default Linux settings).
292
+ #
293
+ # If you have latency problems turn this to "yes". Otherwise leave it as
294
+ # "no" that is the safest pick from the point of view of durability.
295
+ no-appendfsync-on-rewrite no
296
+
297
+ ################################ VIRTUAL MEMORY ###############################
298
+
299
+ # Virtual Memory allows Redis to work with datasets bigger than the actual
300
+ # amount of RAM needed to hold the whole dataset in memory.
301
+ # In order to do so very used keys are taken in memory while the other keys
302
+ # are swapped into a swap file, similarly to what operating systems do
303
+ # with memory pages.
304
+ #
305
+ # To enable VM just set 'vm-enabled' to yes, and set the following three
306
+ # VM parameters accordingly to your needs.
307
+
308
+ vm-enabled no
309
+ # vm-enabled yes
310
+
311
+ # This is the path of the Redis swap file. As you can guess, swap files
312
+ # can't be shared by different Redis instances, so make sure to use a swap
313
+ # file for every redis process you are running. Redis will complain if the
314
+ # swap file is already in use.
315
+ #
316
+ # The best kind of storage for the Redis swap file (that's accessed at random)
317
+ # is a Solid State Disk (SSD).
318
+ #
319
+ # *** WARNING *** if you are using a shared hosting the default of putting
320
+ # the swap file under /tmp is not secure. Create a dir with access granted
321
+ # only to Redis user and configure Redis to create the swap file there.
322
+ vm-swap-file /var/lib/redis/redis.swap
323
+
324
+ # vm-max-memory configures the VM to use at max the specified amount of
325
+ # RAM. Everything that deos not fit will be swapped on disk *if* possible, that
326
+ # is, if there is still enough contiguous space in the swap file.
327
+ #
328
+ # With vm-max-memory 0 the system will swap everything it can. Not a good
329
+ # default, just specify the max amount of RAM you can in bytes, but it's
330
+ # better to leave some margin. For instance specify an amount of RAM
331
+ # that's more or less between 60 and 80% of your free RAM.
332
+ vm-max-memory 0
333
+
334
+ # Redis swap files is split into pages. An object can be saved using multiple
335
+ # contiguous pages, but pages can't be shared between different objects.
336
+ # So if your page is too big, small objects swapped out on disk will waste
337
+ # a lot of space. If you page is too small, there is less space in the swap
338
+ # file (assuming you configured the same number of total swap file pages).
339
+ #
340
+ # If you use a lot of small objects, use a page size of 64 or 32 bytes.
341
+ # If you use a lot of big objects, use a bigger page size.
342
+ # If unsure, use the default :)
343
+ vm-page-size 32
344
+
345
+ # Number of total memory pages in the swap file.
346
+ # Given that the page table (a bitmap of free/used pages) is taken in memory,
347
+ # every 8 pages on disk will consume 1 byte of RAM.
348
+ #
349
+ # The total swap size is vm-page-size * vm-pages
350
+ #
351
+ # With the default of 32-bytes memory pages and 134217728 pages Redis will
352
+ # use a 4 GB swap file, that will use 16 MB of RAM for the page table.
353
+ #
354
+ # It's better to use the smallest acceptable value for your application,
355
+ # but the default is large in order to work in most conditions.
356
+ vm-pages 134217728
357
+
358
+ # Max number of VM I/O threads running at the same time.
359
+ # This threads are used to read/write data from/to swap file, since they
360
+ # also encode and decode objects from disk to memory or the reverse, a bigger
361
+ # number of threads can help with big objects even if they can't help with
362
+ # I/O itself as the physical device may not be able to couple with many
363
+ # reads/writes operations at the same time.
364
+ #
365
+ # The special value of 0 turn off threaded I/O and enables the blocking
366
+ # Virtual Memory implementation.
367
+ vm-max-threads 4
368
+
369
+ ############################### ADVANCED CONFIG ###############################
370
+
371
+ # Hashes are encoded in a special way (much more memory efficient) when they
372
+ # have at max a given numer of elements, and the biggest element does not
373
+ # exceed a given threshold. You can configure this limits with the following
374
+ # configuration directives.
375
+ hash-max-zipmap-entries 512
376
+ hash-max-zipmap-value 64
377
+
378
+ # Similarly to hashes, small lists are also encoded in a special way in order
379
+ # to save a lot of space. The special representation is only used when
380
+ # you are under the following limits:
381
+ list-max-ziplist-entries 512
382
+ list-max-ziplist-value 64
383
+
384
+ # Sets have a special encoding in just one case: when a set is composed
385
+ # of just strings that happens to be integers in radix 10 in the range
386
+ # of 64 bit signed integers.
387
+ # The following configuration setting sets the limit in the size of the
388
+ # set in order to use this special memory saving encoding.
389
+ set-max-intset-entries 512
390
+
391
+ # Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
392
+ # order to help rehashing the main Redis hash table (the one mapping top-level
393
+ # keys to values). The hash table implementation redis uses (see dict.c)
394
+ # performs a lazy rehashing: the more operation you run into an hash table
395
+ # that is rhashing, the more rehashing "steps" are performed, so if the
396
+ # server is idle the rehashing is never complete and some more memory is used
397
+ # by the hash table.
398
+ #
399
+ # The default is to use this millisecond 10 times every second in order to
400
+ # active rehashing the main dictionaries, freeing memory when possible.
401
+ #
402
+ # If unsure:
403
+ # use "activerehashing no" if you have hard latency requirements and it is
404
+ # not a good thing in your environment that Redis can reply form time to time
405
+ # to queries with 2 milliseconds delay.
406
+ #
407
+ # use "activerehashing yes" if you don't have such hard requirements but
408
+ # want to free memory asap when possible.
409
+ activerehashing yes
410
+
411
+ ################################## INCLUDES ###################################
412
+
413
+ # Include one or more other config files here. This is useful if you
414
+ # have a standard template that goes to all redis server but also need
415
+ # to customize a few per-server settings. Include files can include
416
+ # other files, so use this wisely.
417
+ #
418
+ # include /path/to/local.conf
419
+ # include /path/to/other.conf
@@ -0,0 +1,7 @@
1
+ server {
2
+ listen 80;
3
+ server_name <%= stage == :production ? application : "#{application}-#{stage}" %>.teknobingo.net;
4
+ root /home/capistrano/sites/<%= application %>/current/public;
5
+ passenger_enabled on;
6
+ rails_env <%= rails_env %>;
7
+ }
metadata ADDED
@@ -0,0 +1,98 @@
1
+ --- !ruby/object:Gem::Specification
2
+ name: teknobingo-recipes
3
+ version: !ruby/object:Gem::Version
4
+ version: 0.1.39
5
+ platform: ruby
6
+ authors:
7
+ - Patrick Hanevold
8
+ autorequire:
9
+ bindir: bin
10
+ cert_chain: []
11
+ date: 2014-05-02 00:00:00.000000000 Z
12
+ dependencies:
13
+ - !ruby/object:Gem::Dependency
14
+ name: colorize
15
+ requirement: !ruby/object:Gem::Requirement
16
+ requirements:
17
+ - - ! '>='
18
+ - !ruby/object:Gem::Version
19
+ version: 0.5.8
20
+ type: :runtime
21
+ prerelease: false
22
+ version_requirements: !ruby/object:Gem::Requirement
23
+ requirements:
24
+ - - ! '>='
25
+ - !ruby/object:Gem::Version
26
+ version: 0.5.8
27
+ description: Whole lot of recipes
28
+ email:
29
+ - patrick@teknobingo.net
30
+ executables: []
31
+ extensions: []
32
+ extra_rdoc_files:
33
+ - LICENSE
34
+ - README.rdoc
35
+ files:
36
+ - .gitignore
37
+ - Gemfile
38
+ - LICENSE
39
+ - README.rdoc
40
+ - Rakefile
41
+ - lib/teknobingo_recipes.rb
42
+ - lib/teknobingo_recipes/helpers.rb
43
+ - lib/teknobingo_recipes/recipes/apache.rb
44
+ - lib/teknobingo_recipes/recipes/base.rb
45
+ - lib/teknobingo_recipes/recipes/check.rb
46
+ - lib/teknobingo_recipes/recipes/logrotate.rb
47
+ - lib/teknobingo_recipes/recipes/memcached.rb
48
+ - lib/teknobingo_recipes/recipes/mogilefs.rb
49
+ - lib/teknobingo_recipes/recipes/nfs.rb
50
+ - lib/teknobingo_recipes/recipes/nginx.rb
51
+ - lib/teknobingo_recipes/recipes/nodejs.rb
52
+ - lib/teknobingo_recipes/recipes/passenger.rb
53
+ - lib/teknobingo_recipes/recipes/postgresql.rb
54
+ - lib/teknobingo_recipes/recipes/profile.rb
55
+ - lib/teknobingo_recipes/recipes/rainbows.rb
56
+ - lib/teknobingo_recipes/recipes/rbenv.rb
57
+ - lib/teknobingo_recipes/recipes/redis.rb
58
+ - lib/teknobingo_recipes/version.rb
59
+ - teknobingo-recipes.gemspec
60
+ - templates/bash/lesslog.sh
61
+ - templates/bash/profile
62
+ - templates/bash/prompt.sh
63
+ - templates/bash/railsc.sh
64
+ - templates/bash/taillog.sh
65
+ - templates/init.d/nginx
66
+ - templates/logrotate.erb
67
+ - templates/memcached.erb
68
+ - templates/mogilefsd_conf.erb
69
+ - templates/mogstored_conf.erb
70
+ - templates/nginx.conf.erb
71
+ - templates/passenger_mod.erb
72
+ - templates/postgresql.yml.erb
73
+ - templates/redis_conf.erb
74
+ - templates/vhosts/http_site.erb
75
+ homepage: ''
76
+ licenses: []
77
+ metadata: {}
78
+ post_install_message:
79
+ rdoc_options: []
80
+ require_paths:
81
+ - lib
82
+ required_ruby_version: !ruby/object:Gem::Requirement
83
+ requirements:
84
+ - - ! '>='
85
+ - !ruby/object:Gem::Version
86
+ version: '0'
87
+ required_rubygems_version: !ruby/object:Gem::Requirement
88
+ requirements:
89
+ - - ! '>='
90
+ - !ruby/object:Gem::Version
91
+ version: 1.3.7
92
+ requirements: []
93
+ rubyforge_project:
94
+ rubygems_version: 2.2.2
95
+ signing_key:
96
+ specification_version: 4
97
+ summary: Teknobingo capistrano recipes
98
+ test_files: []