s3-tar-backup 1.1.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/LICENSE.txt +21 -0
- data/README.md +250 -0
- data/bin/s3-tar-backup +7 -0
- data/lib/s3_tar_backup.rb +312 -0
- data/lib/s3_tar_backup/backup.rb +80 -0
- data/lib/s3_tar_backup/ini_parser.rb +207 -0
- data/lib/s3_tar_backup/version.rb +3 -0
- data/lib/trollop.rb +782 -0
- metadata +67 -0
checksums.yaml
ADDED
@@ -0,0 +1,7 @@
|
|
1
|
+
---
|
2
|
+
SHA1:
|
3
|
+
metadata.gz: 842cea517d3276df06766424c4354d644069df99
|
4
|
+
data.tar.gz: 1ad93ccb5dd87a00b794692643cdf9dfc4936bcd
|
5
|
+
SHA512:
|
6
|
+
metadata.gz: bc7a8b17a71626eef16fc974d4725a2cb11482d4cdb51201093c3a51e1fe12565228666a2302f5d1498f298b8579ceb23c2e913a75682961e5425f00e842ca84
|
7
|
+
data.tar.gz: f7b8d94f5f23c8f234c2f1003b6a48132baacba2df14db0b9754b45cdea812f783e20ffee995b20595864fbf8141280ffd6d65160b33ef097abc1d5ed3966d02
|
data/LICENSE.txt
ADDED
@@ -0,0 +1,21 @@
|
|
1
|
+
The MIT License (MIT)
|
2
|
+
|
3
|
+
Copyright (c) <year> <copyright holders>
|
4
|
+
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
7
|
+
in the Software without restriction, including without limitation the rights
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
10
|
+
furnished to do so, subject to the following conditions:
|
11
|
+
|
12
|
+
The above copyright notice and this permission notice shall be included in
|
13
|
+
all copies or substantial portions of the Software.
|
14
|
+
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
21
|
+
THE SOFTWARE.
|
data/README.md
ADDED
@@ -0,0 +1,250 @@
|
|
1
|
+
S3-tar-backup
|
2
|
+
=============
|
3
|
+
|
4
|
+
About
|
5
|
+
-----
|
6
|
+
|
7
|
+
This tool allows you to backup a set of directories to an Amazon S3 bucket, using incremental backups.
|
8
|
+
You can then restore the files at a later date.
|
9
|
+
|
10
|
+
This tool was built as a replacement for duplicity, after duplicity started throwing errors and generally being a bit unreliable.
|
11
|
+
It uses command-line tar to create incremental backup snapshots, and the aws-s3 gem to upload these to S3.
|
12
|
+
|
13
|
+
In practice, it turns out that this tool has few lower bandwidth and CPU requirements, and can restore a backup in a fraction of the time that duplicity would take.
|
14
|
+
|
15
|
+
Installation
|
16
|
+
------------
|
17
|
+
|
18
|
+
This tool is not yet a ruby gem, and unless people ask me, it will remain that way.
|
19
|
+
|
20
|
+
Therefore, to install:
|
21
|
+
|
22
|
+
```
|
23
|
+
$ git clone git://github.com/canton7/s3-tar-backup.git
|
24
|
+
$ cd s3-tar-backup
|
25
|
+
$ sudo rake install
|
26
|
+
```
|
27
|
+
|
28
|
+
Configuration
|
29
|
+
-------------
|
30
|
+
|
31
|
+
### Introduction
|
32
|
+
|
33
|
+
Configuration is done using an ini file, which can be in the location of your choice.
|
34
|
+
|
35
|
+
The config file consists of two sections: a global `[settings]` section, followed by profiles.
|
36
|
+
With the exception of two keys, every configuration key can be specified either in `[settings]`, in a profile, or both (in which case the profile value is either replaces, or is added to, the value from `[settings]`).
|
37
|
+
|
38
|
+
### Global config
|
39
|
+
|
40
|
+
```ini
|
41
|
+
[settings]
|
42
|
+
; These two keys must be located in this section
|
43
|
+
; You can use environmental variables instead of these -- see below
|
44
|
+
aws_access_key_id = <your aws access key>
|
45
|
+
aws_secret_access_key = <your aws secret key>
|
46
|
+
|
47
|
+
; Set this to the AWS region you want to use
|
48
|
+
; See http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region
|
49
|
+
aws_region = <your aws region>
|
50
|
+
|
51
|
+
; The rest of the keys can either be located here, or in your profiles, or both
|
52
|
+
; The value from the profile will replace the value specified here
|
53
|
+
full_if_older_than = <timespec>
|
54
|
+
|
55
|
+
; Choose one of the following two settings
|
56
|
+
remove_older_than = <timespec>
|
57
|
+
remove_all_but_n_full = <number>
|
58
|
+
|
59
|
+
backup_dir = </path/to/dir>
|
60
|
+
dest = <bucket_name>/<path>
|
61
|
+
pre-backup = <some command>
|
62
|
+
post-backup = <some command>
|
63
|
+
|
64
|
+
compression = <compression_type>
|
65
|
+
|
66
|
+
always_full = <bool>
|
67
|
+
|
68
|
+
; You have have multiple lines of the following types.
|
69
|
+
; Values from here and from your profiles will be combined
|
70
|
+
source = </path/to/another/source>
|
71
|
+
source = </path/to/another/source>
|
72
|
+
exclude = </some/dir>
|
73
|
+
exclude = </some/other/dir>
|
74
|
+
|
75
|
+
```
|
76
|
+
|
77
|
+
`aws_access_key_id` and `aws_secret_access_key` are fairly obvious -- you'll have been given these when you signed up for S3.
|
78
|
+
If you can't find them, [look here](http://aws.amazon.com/security-credentials).
|
79
|
+
You can use the environmental variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY instead if you prefer -- the env vars will override the config options.
|
80
|
+
|
81
|
+
`full_if_older_than` tells s3-tar-backup how long if should leave between full backups.
|
82
|
+
`<timespec>` is stolen from duplicity. It is given as an interval, which is a number followed by one of the characers s, m, h, D, W, M, or Y (indicating seconds, minutes, hours, days, weeks, months, or years respectively), or a series of such pairs.
|
83
|
+
For instance, "1h45m" indicates the time that was one hour and 45 minutes ago.
|
84
|
+
The calendar here is unsophisticated: a month is always 30 days, a year is always 365 days, and a day is always 86400 seconds.
|
85
|
+
|
86
|
+
`remove_older_than` tells s3-tar-backup to remove old backups which are older than `<timespec>` (see above for the format of `<timespec>`).
|
87
|
+
|
88
|
+
`remove_all_but_n_full` tells s3-tar-backup to remove all backups which were made before the last `n` full backups.
|
89
|
+
|
90
|
+
`backup_dir` is the directory used (a) to store temporary data, and (b) to store a record of what files were backed up last time (tar's snar file).
|
91
|
+
You can delete this dir at any time, but that will slow down the next backup slightly.
|
92
|
+
|
93
|
+
`dest` is the place where your backups are stored. It consists of the name of the S3 bucket (buckets aren't created automatically), followed by the folder to store objects in, for example `my-backups/tar/`.
|
94
|
+
|
95
|
+
`pre-backup` and `post-backup` are two hooks, which are run before and after a backup, respectively.
|
96
|
+
These lines are optional -- you can have no pre-backup or post-backup lines anywhere in your config if you wish.
|
97
|
+
Note that `post-backup` is only run after a successful command.
|
98
|
+
These can be used to do things such as back up a mysql database.
|
99
|
+
Note that you can have multiple `pre-backup` and `post-backup` lines -- all of the listed commands will be executed.
|
100
|
+
|
101
|
+
`compression` gives the compression type.
|
102
|
+
Valid values are `gzip`, `bzip2`, `lzma`, `lzma2`.
|
103
|
+
s3-tar-backup is capable of auto-detecting the format of a previously-backed-up archive, and so changing this value will not invalidate previous backups.
|
104
|
+
|
105
|
+
`always_full` is an optional key which have have the value `True` or `False`.
|
106
|
+
This is used to say that incremental backups should never be used.
|
107
|
+
|
108
|
+
`source` contains the folders to be backed up.
|
109
|
+
|
110
|
+
`exclude` lines specify files/dirs to exclude from the backup.
|
111
|
+
See the `--exclude` option to tar.
|
112
|
+
The exclude lines are optional -- you can have no exclude lines anywhere in your config if you wish.
|
113
|
+
|
114
|
+
**Note:** You can have multiple profiles which use the same `dest` and `backup_dir`.
|
115
|
+
|
116
|
+
### Profile config
|
117
|
+
|
118
|
+
Next, define your profiles.
|
119
|
+
|
120
|
+
Profiles are used to specify, or override from the global config, those config keys which may be specified in either the global config or in a profile.
|
121
|
+
|
122
|
+
A profile takes the form:
|
123
|
+
|
124
|
+
```ini
|
125
|
+
[profile "profile_name"]
|
126
|
+
; You can optionally specify the following keys
|
127
|
+
backup_dir = </path/to/dir>
|
128
|
+
source = </path/to/source>
|
129
|
+
source = </path/to/another/source>
|
130
|
+
dest = <bucket_name>/<path>
|
131
|
+
exclude = </some/dir>
|
132
|
+
pre-backup = <some command>
|
133
|
+
post-backup = <some command>
|
134
|
+
```
|
135
|
+
|
136
|
+
`profile_name` is the name of the profile. You'll use this later.
|
137
|
+
|
138
|
+
### Example config file
|
139
|
+
|
140
|
+
```ini
|
141
|
+
[settings]
|
142
|
+
aws_access_key = ABCD
|
143
|
+
aws_secret_access_key = ABCDE
|
144
|
+
aws_region = eu-west-1
|
145
|
+
; Do a new full backup every 2 weeks
|
146
|
+
full_if_older_than = 2W
|
147
|
+
; Keep 5 sets of full backups
|
148
|
+
remove_all_but_n_full = 5
|
149
|
+
backup_dir = /root/.backup
|
150
|
+
dest = my-backups/tar
|
151
|
+
; You may prefer bzip2, as it has a much lower CPU cost
|
152
|
+
compression = lzma2
|
153
|
+
|
154
|
+
[profile "www"]
|
155
|
+
source = /srv/http
|
156
|
+
|
157
|
+
[profile "home"]
|
158
|
+
source = /home/me
|
159
|
+
source = /root
|
160
|
+
exclude = .backup
|
161
|
+
; Do full backups less rarely
|
162
|
+
full_if_older_than = 4W
|
163
|
+
|
164
|
+
[profile "mysql"]
|
165
|
+
pre-backup = mysqldump -uuser -ppassword --all-databases > /tmp/mysql_dump.sql
|
166
|
+
source = /tmp/mysql_dump.sql
|
167
|
+
post-backup = rm /tmp/mysql_dump.sql
|
168
|
+
; My MySQL dumps are so small that incremental backups actually add more overhead
|
169
|
+
always_full = True
|
170
|
+
```
|
171
|
+
|
172
|
+
Usage
|
173
|
+
-----
|
174
|
+
|
175
|
+
s3-tar-backup works in a number of different modes: backup, restore, cleanup, backup-config, list-backups.
|
176
|
+
|
177
|
+
### Backup
|
178
|
+
|
179
|
+
```
|
180
|
+
s3-tar-backup --config <config_file> [--profile <profile>] --backup [--full] [--verbose]
|
181
|
+
```
|
182
|
+
|
183
|
+
You can use `-c` instead of `--config`, and `-p` instead of `--profile`.
|
184
|
+
|
185
|
+
`<config_file>` is the path to the file you created above, and `<profile>` is the name of a profile inside it.
|
186
|
+
You can also specify multiple profiles.
|
187
|
+
|
188
|
+
If no profile is specified, all profiles are backed up.
|
189
|
+
|
190
|
+
`--full` will force s3-tar-backup to do a full backup (instead of an incremental one), regardless of which it thinks it should do based on your cofnig file.
|
191
|
+
|
192
|
+
`--verbose` will get tar to list the files that it is backing up.
|
193
|
+
|
194
|
+
Example:
|
195
|
+
|
196
|
+
```
|
197
|
+
s3-tar-backup -c ~/.backup/config.ini -p www home --backup
|
198
|
+
```
|
199
|
+
|
200
|
+
### Cleanup
|
201
|
+
|
202
|
+
**Note:** Cleans are automatically done at the end of each backup.
|
203
|
+
|
204
|
+
```
|
205
|
+
s3-tar-backup --config <config_file> [--profile <profile>] --cleanup
|
206
|
+
```
|
207
|
+
|
208
|
+
s3-tar-backup will go through all old backups, and remove those specified by `remove_all_but_n_full` or `remove_older_than`.
|
209
|
+
|
210
|
+
### Restore
|
211
|
+
|
212
|
+
```
|
213
|
+
s3-tar-backup --config <config_file> [--profile <profile>] --restore <restore_dir> [--restore_date <restore_date>] [--verbose]
|
214
|
+
```
|
215
|
+
|
216
|
+
This command will get s3-tar-backup to fetch all the necessary data to restore the latest version of your backup (or an older one if you use `--restore-date`), and stick it into `<restore_dir>`.
|
217
|
+
|
218
|
+
Using `<restore_date>`, you can tell s3-tar-backup to restore the first backup before the specified date.
|
219
|
+
The date format to use is `YYYYMM[DD[hh[mm[ss]]]]`, for example `20110406` means `2011-04-06 00:00:00`, while `201104062143` means `2011-04-06 21:43:00`.
|
220
|
+
|
221
|
+
`--verbose` makes tar spit out the files that it restores.
|
222
|
+
|
223
|
+
Examples:
|
224
|
+
|
225
|
+
```
|
226
|
+
s3-tar-backup -c ~/.backup/config.ini -p www home --restore my_restore/
|
227
|
+
s3-tar-backup -c ~/.backup/config.ini -p mysql --restore my_restore/ --restore_date 201104062143
|
228
|
+
```
|
229
|
+
|
230
|
+
### Backup Config file
|
231
|
+
|
232
|
+
```
|
233
|
+
s3-tar-backup --config <config_file> [--profile <profile>] --backup-config [--verbose]
|
234
|
+
```
|
235
|
+
|
236
|
+
This command is used to backup the specified configuration file.
|
237
|
+
Where it is backed up to depends on your setup:
|
238
|
+
|
239
|
+
- If you've specified `dest` under `[settings]`, this location is used
|
240
|
+
- If you're only got one profile, and `dest` is under this profile, then this location is used.
|
241
|
+
- If you have multiple profiles, and there's no `dest` under `[settings]`, you must specify a profile, and this profile's `dest` will be used.
|
242
|
+
|
243
|
+
### List backups
|
244
|
+
|
245
|
+
```
|
246
|
+
s3-tar-backup --config <config_file> [--profile <profile>] --list-backups [--verbose]
|
247
|
+
```
|
248
|
+
|
249
|
+
This command is used to view information on the current backed-up archives for the specified profile(s) (or all profiles).
|
250
|
+
This is handy if you need to restore a backup, and want to know things such as how much data you'll have to download, or what dates are available to restore from.
|
data/bin/s3-tar-backup
ADDED
@@ -0,0 +1,312 @@
|
|
1
|
+
require 'aws-sdk'
|
2
|
+
require 'trollop'
|
3
|
+
require 's3_tar_backup/ini_parser'
|
4
|
+
require 's3_tar_backup/backup'
|
5
|
+
require 's3_tar_backup/version'
|
6
|
+
|
7
|
+
module S3TarBackup
|
8
|
+
class Main
|
9
|
+
UPLOAD_TRIES = 5
|
10
|
+
|
11
|
+
def run
|
12
|
+
opts = Trollop::options do
|
13
|
+
version VERSION
|
14
|
+
banner "Backs up files to, and restores files from, Amazon's S3 storage, using tar incremental backups\n\n" \
|
15
|
+
"Usage:\ns3-tar-backup -c config.ini [-p profile] --backup [--full] [-v]\n" \
|
16
|
+
"s3-tar-backup -c config.ini [-p profile] --cleanup [-v]\n" \
|
17
|
+
"s3-tar-backup -c config.ini [-p profile] --restore restore_dir\n\t[--restore_date date] [-v]\n" \
|
18
|
+
"s3-tar-backup -c config.ini [-p profile] --backup-config [--verbose]\n" \
|
19
|
+
"s3-tar-backup -c config.ini [-p profile] --list-backups\n\n" \
|
20
|
+
"Option details:\n"
|
21
|
+
opt :config, "Configuration file", :short => 'c', :type => :string, :required => true
|
22
|
+
opt :backup, "Make an incremental backup"
|
23
|
+
opt :full, "Make the backup a full backup"
|
24
|
+
opt :profile, "The backup profile(s) to use (default all)", :short => 'p', :type => :strings
|
25
|
+
opt :cleanup, "Clean up old backups"
|
26
|
+
opt :restore, "Restore a backup to the specified dir", :type => :string
|
27
|
+
opt :restore_date, "Restore a backup from the specified date. Format YYYYMM[DD[hh[mm[ss]]]]", :type => :string
|
28
|
+
opt :backup_config, "Backs up the specified configuration file"
|
29
|
+
opt :list_backups, "List the stored backup info for one or more profiles"
|
30
|
+
opt :verbose, "Show verbose output", :short => 'v'
|
31
|
+
conflicts :backup, :cleanup, :restore, :backup_config, :list_backups
|
32
|
+
end
|
33
|
+
|
34
|
+
|
35
|
+
Trollop::die "--full requires --backup" if opts[:full] && !opts[:backup]
|
36
|
+
Trollop::die "--restore-date requires --restore" if opts[:restore_date_given] && !opts[:restore_given]
|
37
|
+
unless opts[:backup] || opts[:cleanup] || opts[:restore_given] || opts[:backup_config] || opts[:list_backups]
|
38
|
+
Trollop::die "Need one of --backup, --cleanup, --restore, --backup-config, --list-backups"
|
39
|
+
end
|
40
|
+
|
41
|
+
begin
|
42
|
+
raise "Config file #{opts[:config]} not found" unless File.exists?(opts[:config])
|
43
|
+
config = IniParser.new(opts[:config]).load
|
44
|
+
profiles = opts[:profile] || config.find_sections(/^profile\./).keys.map{ |k| k.to_s.split('.', 2)[1] }
|
45
|
+
@s3 = connect_s3(
|
46
|
+
ENV['AWS_ACCESS_KEY_ID'] || config['settings.aws_access_key_id'],
|
47
|
+
ENV['AWS_SECRET_ACCESS_KEY'] || config['settings.aws_secret_access_key'],
|
48
|
+
config.get('settings.aws_region', false)
|
49
|
+
)
|
50
|
+
|
51
|
+
# This is a bit of a special case
|
52
|
+
if opts[:backup_config]
|
53
|
+
dest = config.get('settings.dest', false)
|
54
|
+
raise "You must specify a single profile (used to determine the location to back up to) " \
|
55
|
+
"if backing up config and dest key is not in [settings]" if !dest && profiles.count != 1
|
56
|
+
dest ||= config["profile.#{profiles[0]}.dest"]
|
57
|
+
puts "===== Backing up config file #{opts[:config]} ====="
|
58
|
+
bucket, prefix = (config.get('settings.dest', false) || config["profile.#{profiles[0]}.dest"]).split('/', 2)
|
59
|
+
puts "Uploading #{opts[:config]} to #{bucket}/#{prefix}/#{File.basename(opts[:config])}"
|
60
|
+
upload(opts[:config], bucket, "#{prefix}/#{File.basename(opts[:config])}")
|
61
|
+
return
|
62
|
+
end
|
63
|
+
|
64
|
+
profiles.dup.each do |profile|
|
65
|
+
raise "No such profile: #{profile}" unless config.has_section?("profile.#{profile}")
|
66
|
+
opts[:profile] = profile
|
67
|
+
backup_config = gen_backup_config(opts[:profile], config)
|
68
|
+
prev_backups = get_objects(backup_config[:bucket], backup_config[:dest_prefix], opts[:profile])
|
69
|
+
perform_backup(opts, prev_backups, backup_config) if opts[:backup]
|
70
|
+
perform_cleanup(prev_backups, backup_config) if opts[:backup] || opts[:cleanup]
|
71
|
+
perform_restore(opts, prev_backups, backup_config) if opts[:restore_given]
|
72
|
+
perform_list_backups(prev_backups, backup_config) if opts[:list_backups]
|
73
|
+
end
|
74
|
+
rescue Exception => e
|
75
|
+
raise e
|
76
|
+
Trollop::die e.to_s
|
77
|
+
end
|
78
|
+
end
|
79
|
+
|
80
|
+
def connect_s3(access_key, secret_key, region)
|
81
|
+
warn "No AWS region specified (config key settings.s3_region). Assuming eu-west-1" unless region
|
82
|
+
AWS::S3.new(access_key_id: access_key, secret_access_key: secret_key, region: region || 'eu-west-1')
|
83
|
+
end
|
84
|
+
|
85
|
+
def gen_backup_config(profile, config)
|
86
|
+
bucket, dest_prefix = (config.get("profile.#{profile}.dest", false) || config['settings.dest']).split('/', 2)
|
87
|
+
backup_config = {
|
88
|
+
:backup_dir => config.get("profile.#{profile}.backup_dir", false) || config['settings.backup_dir'],
|
89
|
+
:name => profile,
|
90
|
+
:sources => [*config.get("profile.#{profile}.source", [])] + [*config.get("settings.source", [])],
|
91
|
+
:exclude => [*config.get("profile.#{profile}.exclude", [])] + [*config.get("settings.exclude", [])],
|
92
|
+
:bucket => bucket,
|
93
|
+
:dest_prefix => dest_prefix.chomp('/'),
|
94
|
+
:pre_backup => [*config.get("profile.#{profile}.pre-backup", [])] + [*config.get('settings.pre-backup', [])],
|
95
|
+
:post_backup => [*config.get("profile.#{profile}.post-backup", [])] + [*config.get('settings.post-backup', [])],
|
96
|
+
:full_if_older_than => config.get("profile.#{profile}.full_if_older_than", false) || config['settings.full_if_older_than'],
|
97
|
+
:remove_older_than => config.get("profile.#{profile}.remove_older_than", false) || config.get('settings.remove_older_than', false),
|
98
|
+
:remove_all_but_n_full => config.get("profile.#{profile}.remove_all_but_n_full", false) || config.get('settings.remove_all_but_n_full', false),
|
99
|
+
:compression => (config.get("profile.#{profile}.compression", false) || config.get('settings.compression', 'gzip')).to_sym,
|
100
|
+
:always_full => config.get('settings.always_full', false) || config.get("profile.#{profile}.always_full", false),
|
101
|
+
}
|
102
|
+
backup_config
|
103
|
+
end
|
104
|
+
|
105
|
+
def perform_backup(opts, prev_backups, backup_config)
|
106
|
+
puts "===== Backing up profile #{backup_config[:name]} ====="
|
107
|
+
backup_config[:pre_backup].each_with_index do |cmd, i|
|
108
|
+
puts "Executing pre-backup hook #{i+1}"
|
109
|
+
system(cmd)
|
110
|
+
end
|
111
|
+
full_required = full_required?(backup_config[:full_if_older_than], prev_backups)
|
112
|
+
puts "Last full backup is too old. Forcing a full backup" if full_required && !opts[:full] && backup_config[:always_full]
|
113
|
+
if full_required || opts[:full] || backup_config[:always_full]
|
114
|
+
backup_full(backup_config, opts[:verbose])
|
115
|
+
else
|
116
|
+
backup_incr(backup_config, opts[:verbose])
|
117
|
+
end
|
118
|
+
backup_config[:post_backup].each_with_index do |cmd, i|
|
119
|
+
puts "Executing post-backup hook #{i+1}"
|
120
|
+
system(cmd)
|
121
|
+
end
|
122
|
+
end
|
123
|
+
|
124
|
+
def perform_cleanup(prev_backups, backup_config)
|
125
|
+
puts "===== Cleaning up profile #{backup_config[:name]} ====="
|
126
|
+
remove = []
|
127
|
+
if age_str = backup_config[:remove_older_than]
|
128
|
+
age = parse_interval(age_str)
|
129
|
+
remove = prev_backups.select{ |o| o[:date] < age }
|
130
|
+
# Don't want to delete anything before the last full backup
|
131
|
+
unless remove.empty?
|
132
|
+
kept = remove.slice!(remove.rindex{ |o| o[:type] == :full }..-1).count
|
133
|
+
puts "Keeping #{kept} old backups as part of a chain" if kept > 1
|
134
|
+
end
|
135
|
+
elsif keep_n = backup_config[:remove_all_but_n_full]
|
136
|
+
keep_n = keep_n.to_i
|
137
|
+
# Get the date of the last full backup to keep
|
138
|
+
if last_full_to_keep = prev_backups.select{ |o| o[:type] == :full }[-keep_n]
|
139
|
+
# If there is a last full one...
|
140
|
+
remove = prev_backups.select{ |o| o[:date] < last_full_to_keep[:date] }
|
141
|
+
end
|
142
|
+
end
|
143
|
+
|
144
|
+
if remove.empty?
|
145
|
+
puts "Nothing to do"
|
146
|
+
else
|
147
|
+
puts "Removing #{remove.count} old backup files"
|
148
|
+
end
|
149
|
+
remove.each do |object|
|
150
|
+
@s3.buckets[backup_config[:bucket]].objects["#{backup_config[:dest_prefix]}/#{object[:name]}"].delete
|
151
|
+
end
|
152
|
+
end
|
153
|
+
|
154
|
+
# Config should have the keys
|
155
|
+
# backup_dir, name, soruces, exclude, bucket, dest_prefix
|
156
|
+
def backup_incr(config, verbose=false)
|
157
|
+
puts "Starting new incremental backup"
|
158
|
+
backup = Backup.new(config[:backup_dir], config[:name], config[:sources], config[:exclude], config[:compression])
|
159
|
+
|
160
|
+
# Try and get hold of the snar file
|
161
|
+
unless backup.snar_exists?
|
162
|
+
puts "Failed to find snar file. Attempting to download..."
|
163
|
+
s3_snar = "#{config[:dest_prefix]}/#{backup.snar}"
|
164
|
+
object = @s3.buckets[config[:bucket]].objects[s3_snar]
|
165
|
+
if object.exists?
|
166
|
+
puts "Found file on S3. Downloading"
|
167
|
+
open(backup.snar_path, 'wb') do |f|
|
168
|
+
object.read do |chunk|
|
169
|
+
f.write(chunk)
|
170
|
+
end
|
171
|
+
end
|
172
|
+
else
|
173
|
+
puts "Failed to download snar file. Defaulting to full backup"
|
174
|
+
end
|
175
|
+
end
|
176
|
+
|
177
|
+
backup(config, backup, verbose)
|
178
|
+
end
|
179
|
+
|
180
|
+
def backup_full(config, verbose=false)
|
181
|
+
puts "Starting new full backup"
|
182
|
+
backup = Backup.new(config[:backup_dir], config[:name], config[:sources], config[:exclude], config[:compression])
|
183
|
+
# Nuke the snar file -- forces a full backup
|
184
|
+
File.delete(backup.snar_path) if File.exists?(backup.snar_path)
|
185
|
+
backup(config, backup, verbose)
|
186
|
+
end
|
187
|
+
|
188
|
+
def backup(config, backup, verbose=false)
|
189
|
+
system(backup.backup_cmd(verbose))
|
190
|
+
puts "Uploading #{config[:bucket]}/#{config[:dest_prefix]}/#{File.basename(backup.archive)} (#{bytes_to_human(File.size(backup.archive))})"
|
191
|
+
upload(backup.archive, config[:bucket], "#{config[:dest_prefix]}/#{File.basename(backup.archive)}")
|
192
|
+
puts "Uploading snar (#{bytes_to_human(File.size(backup.snar_path))})"
|
193
|
+
upload(backup.snar_path, config[:bucket], "#{config[:dest_prefix]}/#{File.basename(backup.snar)}")
|
194
|
+
File.delete(backup.archive)
|
195
|
+
end
|
196
|
+
|
197
|
+
def upload(source, bucket, dest_name)
|
198
|
+
tries = 0
|
199
|
+
begin
|
200
|
+
@s3.buckets[bucket].objects.create(dest_name, Pathname.new(source))
|
201
|
+
rescue Errno::ECONNRESET => e
|
202
|
+
tries += 1
|
203
|
+
if tries <= UPLOAD_TRIES
|
204
|
+
puts "Upload Exception: #{e}"
|
205
|
+
puts "Retrying #{tries}/#{UPLOAD_TRIES}..."
|
206
|
+
retry
|
207
|
+
else
|
208
|
+
raise e
|
209
|
+
end
|
210
|
+
end
|
211
|
+
puts "Succeeded" if tries > 0
|
212
|
+
end
|
213
|
+
|
214
|
+
def perform_restore(opts, prev_backups, backup_config)
|
215
|
+
puts "===== Restoring profile #{backup_config[:name]} ====="
|
216
|
+
# If restore date given, parse
|
217
|
+
if opts[:restore_date_given]
|
218
|
+
m = opts[:restore_date].match(/(\d\d\d\d)(\d\d)(\d\d)?(\d\d)?(\d\d)?(\d\d)?/)
|
219
|
+
raise "Unknown date format in --restore-to" if m.nil?
|
220
|
+
restore_to = Time.new(*m[1..-1].map{ |s| s.to_i if s })
|
221
|
+
else
|
222
|
+
restore_to = Time.now
|
223
|
+
end
|
224
|
+
|
225
|
+
# Find the index of the first backup, incremental or full, before that date
|
226
|
+
restore_end_index = prev_backups.rindex{ |o| o[:date] < restore_to }
|
227
|
+
raise "Failed to find a backup for that date" unless restore_end_index
|
228
|
+
|
229
|
+
# Find the first full backup before that one
|
230
|
+
restore_start_index = prev_backups[0..restore_end_index].rindex{ |o| o[:type] == :full }
|
231
|
+
|
232
|
+
restore_dir = opts[:restore].chomp('/') << '/'
|
233
|
+
|
234
|
+
Dir.mkdir(restore_dir) unless Dir.exists?(restore_dir)
|
235
|
+
raise "Detination dir is not a directory" unless File.directory?(restore_dir)
|
236
|
+
|
237
|
+
prev_backups[restore_start_index..restore_end_index].each do |object|
|
238
|
+
puts "Fetching #{backup_config[:bucket]}/#{backup_config[:dest_prefix]}/#{object[:name]} (#{bytes_to_human(object[:size])})"
|
239
|
+
dl_file = "#{backup_config[:backup_dir]}/#{object[:name]}"
|
240
|
+
open(dl_file, 'wb') do |f|
|
241
|
+
@s3.buckets[backup_config[:bucket]].objects["#{backup_config[:dest_prefix]}/#{object[:name]}"].read do |chunk|
|
242
|
+
f.write(chunk)
|
243
|
+
end
|
244
|
+
end
|
245
|
+
puts "Extracting..."
|
246
|
+
system(Backup.restore_cmd(restore_dir, dl_file, opts[:verbose]))
|
247
|
+
|
248
|
+
File.delete(dl_file)
|
249
|
+
end
|
250
|
+
end
|
251
|
+
|
252
|
+
def perform_list_backups(prev_backups, backup_config)
|
253
|
+
# prev_backups alreays contains just the files for the current profile
|
254
|
+
puts "===== Backups list for #{backup_config[:name]} ====="
|
255
|
+
puts "Type: N: Date:#{' '*18}Size: Chain Size: Format:\n\n"
|
256
|
+
prev_type = ''
|
257
|
+
total_size = 0
|
258
|
+
chain_length = 0
|
259
|
+
chain_cum_size = 0
|
260
|
+
prev_backups.each do |object|
|
261
|
+
type = object[:type] == prev_type && object[:type] == :incr ? " -- " : object[:type].to_s.capitalize
|
262
|
+
prev_type = object[:type]
|
263
|
+
chain_length += 1
|
264
|
+
chain_length = 0 if object[:type] == :full
|
265
|
+
chain_cum_size = 0 if object[:type] == :full
|
266
|
+
chain_cum_size += object[:size]
|
267
|
+
|
268
|
+
chain_length_str = (chain_length == 0 ? '' : chain_length.to_s).ljust(3)
|
269
|
+
chain_cum_size_str = (object[:type] == :full ? '' : bytes_to_human(chain_cum_size)).ljust(8)
|
270
|
+
puts "#{type} #{chain_length_str} #{object[:date].strftime('%F %T')} #{bytes_to_human(object[:size]).ljust(8)} " \
|
271
|
+
"#{chain_cum_size_str} (#{object[:compression]})"
|
272
|
+
total_size += object[:size]
|
273
|
+
end
|
274
|
+
puts "\n"
|
275
|
+
puts "Total size: #{bytes_to_human(total_size)}"
|
276
|
+
puts "\n"
|
277
|
+
end
|
278
|
+
|
279
|
+
def get_objects(bucket, prefix, profile)
|
280
|
+
objects = @s3.buckets[bucket].objects.with_prefix(prefix).map do |object|
|
281
|
+
Backup.parse_object(object, profile)
|
282
|
+
end
|
283
|
+
objects.compact.sort_by{ |o| o[:date] }
|
284
|
+
end
|
285
|
+
|
286
|
+
def parse_interval(interval_str)
|
287
|
+
time = Time.now
|
288
|
+
time -= $1.to_i if interval_str =~ /(\d+)s/
|
289
|
+
time -= $1.to_i*60 if interval_str =~ /(\d+)m/
|
290
|
+
time -= $1.to_i*3600 if interval_str =~ /(\d+)h/
|
291
|
+
time -= $1.to_i*86400 if interval_str =~ /(\d+)D/
|
292
|
+
time -= $1.to_i*604800 if interval_str =~ /(\d+)W/
|
293
|
+
time -= $1.to_i*2592000 if interval_str =~ /(\d+)M/
|
294
|
+
time -= $1.to_i*31536000 if interval_str =~ /(\d+)Y/
|
295
|
+
time
|
296
|
+
end
|
297
|
+
|
298
|
+
def full_required?(interval_str, objects)
|
299
|
+
time = parse_interval(interval_str)
|
300
|
+
objects.select{ |o| o[:type] == :full && o[:date] > time }.empty?
|
301
|
+
end
|
302
|
+
|
303
|
+
def bytes_to_human(n)
|
304
|
+
count = 0
|
305
|
+
while n >= 1014 && count < 4
|
306
|
+
n /= 1024.0
|
307
|
+
count += 1
|
308
|
+
end
|
309
|
+
format("%.2f", n) << %w(B KB MB GB TB)[count]
|
310
|
+
end
|
311
|
+
end
|
312
|
+
end
|