arver 0.0.5

Sign up to get free protection for your applications and to get access to all the features.
Files changed (42) hide show
  1. data/CHANGELOG.textile +9 -0
  2. data/README.textile +120 -0
  3. data/bin/arver +10 -0
  4. data/lib/arver.rb +6 -0
  5. data/lib/arver/action.rb +78 -0
  6. data/lib/arver/adduser_action.rb +54 -0
  7. data/lib/arver/bootstrap.rb +35 -0
  8. data/lib/arver/cli.rb +133 -0
  9. data/lib/arver/close_action.rb +61 -0
  10. data/lib/arver/command_wrapper.rb +42 -0
  11. data/lib/arver/config.rb +54 -0
  12. data/lib/arver/create_action.rb +58 -0
  13. data/lib/arver/deluser_action.rb +34 -0
  14. data/lib/arver/gc_action.rb +12 -0
  15. data/lib/arver/gpg_key_manager.rb +80 -0
  16. data/lib/arver/host.rb +74 -0
  17. data/lib/arver/hostgroup.rb +17 -0
  18. data/lib/arver/info_action.rb +22 -0
  19. data/lib/arver/io_logger.rb +34 -0
  20. data/lib/arver/key_generator.rb +29 -0
  21. data/lib/arver/key_info_action.rb +29 -0
  22. data/lib/arver/key_saver.rb +128 -0
  23. data/lib/arver/keystore.rb +70 -0
  24. data/lib/arver/list_action.rb +7 -0
  25. data/lib/arver/local_config.rb +41 -0
  26. data/lib/arver/log.rb +39 -0
  27. data/lib/arver/log_levels.rb +9 -0
  28. data/lib/arver/luks_wrapper.rb +29 -0
  29. data/lib/arver/node_with_script_hooks.rb +25 -0
  30. data/lib/arver/open_action.rb +70 -0
  31. data/lib/arver/partition.rb +53 -0
  32. data/lib/arver/partition_hierarchy_node.rb +112 -0
  33. data/lib/arver/runtime_config.rb +22 -0
  34. data/lib/arver/ssh_command_wrapper.rb +21 -0
  35. data/lib/arver/string.rb +8 -0
  36. data/lib/arver/target_list.rb +33 -0
  37. data/lib/arver/test_config_loader.rb +21 -0
  38. data/lib/arver/test_partition.rb +9 -0
  39. data/lib/arver/tree.rb +32 -0
  40. data/lib/arver/version.rb +3 -0
  41. data/man/arver.5 +429 -0
  42. metadata +155 -0
@@ -0,0 +1,70 @@
1
+ module Arver
2
+ class OpenAction < Action
3
+ def initialize( target_list )
4
+ super( target_list )
5
+ self.open_keystore
6
+ end
7
+
8
+ def verify?( partition )
9
+ if( Arver::LuksWrapper.open?(partition).execute )
10
+ Arver::Log.error( partition.name+" already open. skipping." )
11
+ return false
12
+ end
13
+ return false unless load_key( partition )
14
+ true
15
+ end
16
+
17
+ def execute_partition( partition )
18
+ Arver::Log.info( "opening: "+partition.path )
19
+ caller = Arver::LuksWrapper.open( partition )
20
+ unless( caller.execute( key ) )
21
+ if( Arver::LuksWrapper.was_wrong_key?( caller ) )
22
+ Arver::Log.error( "Your key for "+partition.name+" is no longer valid. Maybe it was revoked. skipping." )
23
+ else
24
+ Arver::Log.error( "Aborting: Something went wrong when opening "+partition.name+":\n"+caller.output )
25
+ throw( :abort_action )
26
+ end
27
+ end
28
+ end
29
+
30
+ def pre_host( host )
31
+ return if host.pre_open.nil?
32
+ Arver::Log.info( "Running script: " + host.pre_open + " on " + host.name )
33
+ c = Arver::SSHCommandWrapper.create( host.pre_open, [] , host, true )
34
+ unless c.execute
35
+ Arver::Log.error( "Aborting: pre_open on #{host.name} failed:\n"+c.output )
36
+ throw( :abort_action )
37
+ end
38
+ end
39
+
40
+ def pre_partition( partition )
41
+ return if partition.pre_open.nil?
42
+ Arver::Log.info( "Running script: " + partition.pre_open + " on " + partition.parent.name )
43
+ c = Arver::SSHCommandWrapper.create( partition.pre_open, [] , partition.parent, true )
44
+ unless c.execute
45
+ Arver::Log.error( "Aborting: pre_open on #{partition.name} failed:\n"+c.output )
46
+ throw( :abort_action )
47
+ end
48
+ end
49
+
50
+ def post_partition( partition )
51
+ return if partition.post_open.nil?
52
+ Arver::Log.info( "Running script: " + partition.post_open + " on " + partition.parent.name )
53
+ c = Arver::SSHCommandWrapper.create( partition.post_open, [] , partition.parent, true )
54
+ unless c.execute
55
+ Arver::Log.error( "Aborting: post_open on #{partition.name} failed:\n"+c.output )
56
+ throw( :abort_action )
57
+ end
58
+ end
59
+
60
+ def post_host( host )
61
+ return if host.post_open.nil?
62
+ Arver::Log.info( "Running script: " + host.post_open + " on " + host.name )
63
+ c = Arver::SSHCommandWrapper.create( host.post_open, [] , host, true )
64
+ unless c.execute
65
+ Arver::Log.error( "Aborting: post_open on #{host.name} failed:\n"+c.output )
66
+ throw( :abort_action )
67
+ end
68
+ end
69
+ end
70
+ end
@@ -0,0 +1,53 @@
1
+ class Arver::Partition
2
+
3
+ include Arver::PartitionHierarchyNode
4
+ include Arver::NodeWithScriptHooks
5
+
6
+ attr_accessor :device
7
+
8
+ def initialize(name, host)
9
+ self.name = name
10
+ self.device = ''
11
+ self.parent = host
12
+ end
13
+
14
+ def each_partition(&blk)
15
+ yield self
16
+ end
17
+
18
+ def ==(other_partition)
19
+ return name == other_partition.name && device == other_partition.device if other_partition.is_a?(Arver::Partition)
20
+ false
21
+ end
22
+
23
+ def to_yaml
24
+ yaml = ""
25
+ yaml += "'device': '#{device}'\n"
26
+ yaml += script_hooks_to_yaml
27
+ yaml.chop
28
+ end
29
+
30
+ def from_hash( hash )
31
+ script_hooks_from_hash( hash )
32
+ hash.each do | name, data |
33
+ self.device= data if name == "device"
34
+ end
35
+ end
36
+
37
+ def pre_execute( action )
38
+ return action.pre_run_execute_partition(self)
39
+ end
40
+
41
+ def run_action( action )
42
+ if( action.verify?( self ) )
43
+ action.pre_partition(self)
44
+ action.execute_partition(self)
45
+ action.post_partition(self)
46
+ end
47
+ end
48
+
49
+ def device_path
50
+ return self.device if self.device =~ /^\/dev\//
51
+ "/dev/#{self.device}"
52
+ end
53
+ end
@@ -0,0 +1,112 @@
1
+ require 'active_support/core_ext'
2
+
3
+ module Arver
4
+ module PartitionHierarchyNode
5
+
6
+ attr_writer :name
7
+ attr_reader :parent
8
+
9
+ def initialize(a_name, parent_node)
10
+ @name = a_name
11
+ self.parent = parent_node
12
+ end
13
+
14
+ def name
15
+ @name
16
+ end
17
+
18
+ def path
19
+ parent.path<<"/"<<@name
20
+ end
21
+
22
+ def parent=(parent_node)
23
+ @parent = parent_node
24
+ parent_node.add_child(self)
25
+ end
26
+
27
+ def children
28
+ @children ||= {}
29
+ end
30
+
31
+ def add_child(child)
32
+ children[child.name] = child
33
+ end
34
+
35
+ def child(name)
36
+ children[name]
37
+ end
38
+
39
+ def each_node(&blk)
40
+ yield self
41
+ children.each_value do | child |
42
+ child.each_node(&blk)
43
+ end
44
+ end
45
+
46
+ def each_partition(&blk)
47
+ children.each_value do | child |
48
+ child.each_partition(&blk)
49
+ end
50
+ end
51
+
52
+ def target?( list )
53
+ list.any? do |target|
54
+ self.has_child?( target ) || self.has_parent?( target )
55
+ end
56
+ end
57
+
58
+ def has_child?( child )
59
+ return true if self.equal?( child )
60
+ children.each_value do | my_child |
61
+ return true if my_child.has_child?( child )
62
+ end
63
+ false
64
+ end
65
+
66
+ def has_parent?( node )
67
+ return true if self.equal?( node )
68
+ return false if parent.nil?
69
+ return self.parent.has_parent?( node )
70
+ end
71
+
72
+ def find( name )
73
+ found = []
74
+ self.each_node do | node |
75
+ found += [ node ] if ( node.name == name || node.path.ends_with?( name ) )
76
+ end
77
+ found
78
+ end
79
+
80
+ def to_ascii
81
+ list = ""
82
+ children.each do | name, child |
83
+ list += "+- "+name+"\n"
84
+ list += ( child.to_ascii.indent_once ) +"\n" unless child.to_ascii.empty?
85
+ end
86
+ list.chop
87
+ end
88
+
89
+ def == other_node
90
+ equals = true
91
+ children.each do | name, child |
92
+ equals &= child == other_node.child( name )
93
+ end
94
+ equals
95
+ end
96
+
97
+ def to_yaml
98
+ yaml = ""
99
+ children.each do | name, child |
100
+ yaml += "'"+name+"':\n"
101
+ yaml += ( child.to_yaml.indent_once ) +"\n"
102
+ end
103
+ yaml.chop
104
+ end
105
+
106
+ def run_action( action )
107
+ self.children.each_value do | child |
108
+ action.run_on( child )
109
+ end
110
+ end
111
+ end
112
+ end
@@ -0,0 +1,22 @@
1
+ module Arver
2
+ class RuntimeConfig
3
+
4
+ #this Config Object holds the runtime config (i.e. commandline switches etc..)
5
+
6
+ include Singleton
7
+
8
+ attr_accessor :test_mode, :dry_run, :force, :violence, :ask_password, :trust_all
9
+
10
+ instance.test_mode= false
11
+ instance.dry_run= false
12
+ instance.force= false
13
+ instance.violence= false
14
+ instance.ask_password= false
15
+ instance.trust_all= false
16
+
17
+ def trust_all
18
+ # in test mode trust all keys since running arver in cucumber creates a fresh gpg-keyring
19
+ @trust_all || @test_mode
20
+ end
21
+ end
22
+ end
@@ -0,0 +1,21 @@
1
+ module Arver
2
+ class SSHCommandWrapper < CommandWrapper
3
+
4
+ attr_accessor :host, :user, :port, :as_root
5
+
6
+ def self.create( cmd, args, host, as_root = false )
7
+ c = SSHCommandWrapper.new
8
+ c.host= host
9
+ c.as_root= as_root
10
+ c.command= cmd
11
+ c.arguments_array= args
12
+ c
13
+ end
14
+
15
+ def escaped_command
16
+ return Escape.shell_command( [ "ssh", "-p #{host.port}", "#{host.username}@#{host.address}", "sudo", super ] ) if( as_root && host.username != "root" )
17
+ Escape.shell_command( [ "ssh", "-p #{host.port}", "#{host.username}@#{host.address}", super ] )
18
+ end
19
+
20
+ end
21
+ end
@@ -0,0 +1,8 @@
1
+ class String
2
+
3
+ # Returns an indented string, all lines of string will be indented with count of chars
4
+ def indent_once
5
+ (" ") + gsub(/(\n+)/) { $1 + (" ") }
6
+ end
7
+
8
+ end
@@ -0,0 +1,33 @@
1
+ module Arver
2
+ class TargetList
3
+ def self.get_list( names )
4
+
5
+ return [] if names.nil?
6
+
7
+ tree = Arver::Config.instance.tree
8
+
9
+ return [ tree ] if names.eql? "ALL"
10
+
11
+ targets = []
12
+
13
+ names.split( "," ).each do |target_name|
14
+ target = tree.find( target_name )
15
+ if( target.size == 0 )
16
+ Arver::Log.error( "No such target "+target_name )
17
+ next
18
+ end
19
+ if( target.size > 1 )
20
+ Arver::Log.error( "Target not unique. Found:" )
21
+ target.each do |t|
22
+ Arver::Log.error( t.path )
23
+ end
24
+ next
25
+ end
26
+ targets += [ target[0] ]
27
+ end
28
+
29
+ targets
30
+ end
31
+ end
32
+ end
33
+
@@ -0,0 +1,21 @@
1
+ module TestConfigLoader
2
+
3
+ def load_sample_tree
4
+ Arver::Config.instance.tree= Arver::Tree.new
5
+ @hg = Arver::Hostgroup.new "testG"
6
+ @hg2 = Arver::Hostgroup.new "testG2"
7
+ @h = Arver::Host.new "testH", @hg
8
+ @h2 = Arver::Host.new "testH2", @hg2
9
+ @h3 = Arver::Host.new "testH3", @hg2
10
+ @d = Arver::Partition.new "testP1", @h
11
+ @d2 = Arver::Partition.new "testP2", @h2
12
+ @d3 = Arver::Partition.new "testP3", @h3
13
+ @d4 = Arver::Partition.new "testP4", @h3
14
+ end
15
+
16
+ def load_test_config
17
+ Arver::LocalConfig.instance.config_dir= "spec/data"
18
+ config = Arver::Config.instance
19
+ config.load
20
+ end
21
+ end
@@ -0,0 +1,9 @@
1
+ module Arver
2
+ class TestPartition < Partition
3
+
4
+ def initialize(name)
5
+ super(name,Arver::Host.new('foo.host',Arver::Hostgroup.new('foo')))
6
+ end
7
+
8
+ end
9
+ end
@@ -0,0 +1,32 @@
1
+ module Arver
2
+ class Tree
3
+
4
+ include PartitionHierarchyNode
5
+
6
+ def initialize
7
+ end
8
+
9
+ def name
10
+ ""
11
+ end
12
+
13
+ def path
14
+ ""
15
+ end
16
+
17
+ def add_host_group(host_group)
18
+ add_child(host_group)
19
+ end
20
+
21
+ def from_hash hash
22
+ hash.each do | name, data |
23
+ hg = Arver::Hostgroup.new( name )
24
+ hg.from_hash( data )
25
+ end
26
+ end
27
+
28
+ def clear
29
+ @children = {}
30
+ end
31
+ end
32
+ end
@@ -0,0 +1,3 @@
1
+ module Arver
2
+ VERSION = '0.0.5'
3
+ end
@@ -0,0 +1,429 @@
1
+ .\" generated with Ronn/v0.7.3
2
+ .\" http://github.com/rtomayko/ronn/tree/0.7.3
3
+ .
4
+ .TH "ARVER" "5" "April 2012" "" ""
5
+ .
6
+ .SH "NAME"
7
+ \fBarver\fR \- LUKS on the loose
8
+ .
9
+ .SH "SYNOPSIS"
10
+ .
11
+ .nf
12
+
13
+ arver [\-u user] [\-c arverdata] [OPTIONS] \-t TARGET ACTION
14
+ arver [\-u user] [\-c arverdata] [OPTIONS] ACTION
15
+ .
16
+ .fi
17
+ .
18
+ .SH "DESCRIPTION"
19
+ arver is a tool that helps you to manage large amount of encrypted harddisks over mutliple servers and locations\. Furthermore, it helps you to mange access policies to encrypted harddisks of a bunch of people\.
20
+ .
21
+ .P
22
+ By default \fBarver\fR requires a specific action and (for most actions) a target to work\.
23
+ .
24
+ .SH "OPTIONS"
25
+ There are some generic command line options:
26
+ .
27
+ .TP
28
+ \fB\-u USER\fR, \fB\-\-user USER\fR
29
+ By default \fBarver\fR will read \fB~/\.arver\fR to get your username\. Using \fB\-u USER\fR you can override or specify it aswell\.
30
+ .
31
+ .TP
32
+ \fB\-c PATH\fR, \fB\-\-config\-dir PATH\fR
33
+ By default \fBarver\fR will assume your data storage in \fB~/\.arverdata\fR\. However, if you have multiple data storage or you want to put it to a different location you can use the \fB\-c PATH\fR option\.
34
+ .
35
+ .SH "ACTIONS"
36
+ The following actions are supported:
37
+ .
38
+ .TP
39
+ \fB\-\-create TARGET\fR
40
+ Creates LUKS partitions for \fBarver\fR on all targeted disks\.
41
+ .
42
+ .TP
43
+ \fB\-o\fR, \fB\-\-open TARGET\fR
44
+ Opens all targeted disks\.
45
+ .
46
+ .TP
47
+ \fB\-\-close TARGET\fR
48
+ Closes all targeted disks\.
49
+ .
50
+ .TP
51
+ \fB\-a\fR, \fB\-\-add\-user USER TARGET\fR
52
+ Adds permissions for USER on all targeted disks\.
53
+ .
54
+ .TP
55
+ \fB\-d\fR, \fB\-\-del\-user USER TARGET\fR
56
+ Removes permissions for USER on all targeted disks\.
57
+ .
58
+ .TP
59
+ \fB\-i\fR, \fB\-\-info TARGET\fR
60
+ Display the LUKS configuration of all targeted disks\.
61
+ .
62
+ .TP
63
+ \fB\-l\fR, \fB\-\-list\-targets\fR
64
+ List all possible targets\.
65
+ .
66
+ .TP
67
+ \fB\-k\fR, \fB\-\-keys TARGET\fR
68
+ List available keys for the target\.
69
+ .
70
+ .TP
71
+ \fB\-g\fR, \fB\-\-garbage\-collect\fR
72
+ Cleans old entries from your \fBarver\fR keys\.
73
+ .
74
+ .SH "TARGETS"
75
+ All Targets are defined in the \fBdisks\fR config file\. See the section \fBDisks\fR for more details\. A complete target looks like this:
76
+ .
77
+ .IP "" 4
78
+ .
79
+ .nf
80
+
81
+ /location_name/host_name/disk_name
82
+ .
83
+ .fi
84
+ .
85
+ .IP "" 0
86
+ .
87
+ .P
88
+ But the TARGET option accepts also partial names and lists\. E\.g
89
+ .
90
+ .IP "" 4
91
+ .
92
+ .nf
93
+
94
+ \-t location1,location2/host2,host4,disk3
95
+ .
96
+ .fi
97
+ .
98
+ .IP "" 0
99
+ .
100
+ .P
101
+ targets all disks at location1, all disks on host2 at location2, all disks on host4 and disk3\. If any of the provided target names are not unique \fBarver\fR will quit with an error message stating all matching targets\.
102
+ .
103
+ .P
104
+ To run an action on all possible disks use \fBALL\fR as target\.
105
+ .
106
+ .SH "Working with arver"
107
+ Working with arver is quite simple and straight forward\. Within the next paragraphs you\'ll find a detailed overview on the concept of arver, as well as how to set it up and what the different actions are doing exactly\.
108
+ .
109
+ .SH "Concept"
110
+ To setup arver we only need to define in the corresponding configuration files our (admin\-)users and our disks\.
111
+ .
112
+ .P
113
+ Arver\'s configuration files are contained in a single directory which contains all the necessary (non\-private) information to manage your disks\. This directory will be referred to as \fBarverdata\fR\. We recommend you to share that directory amongs your group of admins with a distributed version control system such as git\.
114
+ .
115
+ .P
116
+ Eeach admin will be assigned one global LUKS slot\. Arver will use this information to grant or revoke privileges to other users\. See the \fBManaging users\fR section for a detailed descripton of how this is done\.
117
+ .
118
+ .P
119
+ The \fBarverdata\fR directory contains the following files and directories:
120
+ .
121
+ .IP "" 4
122
+ .
123
+ .nf
124
+
125
+ keys/ <\- contains the users `arver` keyrings
126
+ users <\- yaml file containing all users configuration (see `Users`)
127
+ disks <\- yaml file containing all disk configuration (see `Disks`)
128
+ keys/public/ <\- contains gpg public keys of the admins (managed by arver)
129
+ .
130
+ .fi
131
+ .
132
+ .IP "" 0
133
+ .
134
+ .SS "Users"
135
+ The \fBuser\fR config file contains all your admins in the following structure:
136
+ .
137
+ .IP "" 4
138
+ .
139
+ .nf
140
+
141
+ foo1:
142
+ slot: 0
143
+ gpg: BEAFAFFEBEAFAFFEBEAFAFFEBEAFAFFEBEAFAFFE
144
+ foo2:
145
+ slot 1
146
+ gpg: AFFEBEAFAFFEBEAFAFFEBEAFAFFEBEAFAFFEBEAF
147
+ .
148
+ .fi
149
+ .
150
+ .IP "" 0
151
+ .
152
+ .P
153
+ \fBfoo1\fR is the identifier of one of your admins\. \fBslot\fR referes to the LUKS slot which is used for this admin\. This has to be a unique number between 0 and 7\. If you\'d like to migrate existing LUKS devices take care to avoid the currently used slot number (usually 0)\. \fBgpg\fR is the gpg\-fingerprint of the public key for \fBfoo1\fR\. We recommend to use a dedicated gpg key just for \fBarver\fR\.
154
+ .
155
+ .SS "Disks"
156
+ The \fBdisks\fR file contains the following hash tree in yaml notation:
157
+ .
158
+ .IP "" 4
159
+ .
160
+ .nf
161
+
162
+ \'hostgroup1\':
163
+ \'host1\':
164
+ \'address\' : \'host1\.example\.com\'
165
+ \'pre_open\': \'pre_open_host_script\.sh\'
166
+ \'disk1\' :
167
+ \'device\' : \'storage/disk1\'
168
+ \'post_open\': \'post_open_disk_script\.sh\'
169
+ \'disk2\' :
170
+ \'device\' : \'sdb1\'
171
+ \'host2\':
172
+ \'address\': \'host2\.example\.com\'
173
+ \'port\' : \'2222\'
174
+ \'mails\' :
175
+ \'device\' : \'nonraid/mails\'
176
+ \'pre_open\': \'pre_open_disk_script\.sh\'
177
+ \'hostgroup2\':
178
+ \'host3\':
179
+ \'address\' : \'host3\.example\.com\'
180
+ \'username\': \'foo\'
181
+ \'secure\' :
182
+ \'device\' : \'storage/secure\'
183
+ .
184
+ .fi
185
+ .
186
+ .IP "" 0
187
+ .
188
+ .P
189
+ As you can see this allows you to organize your disks and servers in a tree structure, which will enable you to manage your disks more efficiently within the later commands\.
190
+ .
191
+ .P
192
+ \fBhostgroup1\fR and \fBhostgroup2\fR is just a logical container which can contain any amount of hosts\. You can name them as you like\. This is interesting if you have for example multiple hosts in one location and you need to quickly recover from a power outage from this location\.
193
+ .
194
+ .P
195
+ Invoking
196
+ .
197
+ .IP "" 4
198
+ .
199
+ .nf
200
+
201
+ arver \-\-list\-targets
202
+ .
203
+ .fi
204
+ .
205
+ .IP "" 0
206
+ .
207
+ .P
208
+ will present you the tree of the various targets in your \fBdisks\fR configuration file\.
209
+ .
210
+ .P
211
+ \fBhost1\fR, \fBhost2\fR and \fBhost3\fR are identifiers for different hosts\. These host\- objects can contain multiple disks and can have further information such as the \fBaddress\fR of the host or the ssh\-\fBport\fR number if the ssh daemon is not running on the standart port\.
212
+ .
213
+ .P
214
+ You can also add script hooks to any host or disk\. Those will be run during the \fBopen\fR and \fBclose\fR actions at the appropriate time\. The possible options are: \fBpre_open\fR, \fBpre_close\fR, \fBpost_open\fR and \fBpost_close\fR\.
215
+ .
216
+ .P
217
+ Any other entry within the hosts\-object are actual disks entries of that particular host\. These disks are represented by an identifier and at least a \fBdevice\fR entry pointing to the actual disk path\. So for example the disks on \fBhost1\fR are: \fB/dev/storage/disk1\fR identified by \fBdisk1\fR and \fB/dev/sdb1\fR idetified by \fBdisk2\fR\. The prefix \fB/dev/\fR is alays added to the disk path\.
218
+ .
219
+ .SH "Bootstrapping a new arverdata"
220
+ How do you start and bootstrap a new \fBarverdata\fR, so you can use arver for your storage?
221
+ .
222
+ .P
223
+ First you need to create the basic structure for your \fBarverdata\fR:
224
+ .
225
+ .IP "" 4
226
+ .
227
+ .nf
228
+
229
+ $ mkdir ~/\.arverdata # the location is configurable\. We use the default one\.
230
+ $ gpg \-\-gen\-key # create a dedicated gpg key for arver
231
+ $ vi users # add your user and the key\-id of your new public gpg\-key
232
+ $ vi disks # add your hostgroups, hosts and disks
233
+ $ echo "\'username\': \'<your_arver_username>\' > ~/\.arver #set your default user
234
+ .
235
+ .fi
236
+ .
237
+ .IP "" 0
238
+ .
239
+ .P
240
+ Then we can create the encrypted harddisk:
241
+ .
242
+ .SH "Action Create"
243
+ To initially create an arver managed LUKS device you first need to add the device to the disks config\. See above for various examples\. You can create the LUKS device by invoking the following command:
244
+ .
245
+ .IP "" 4
246
+ .
247
+ .nf
248
+
249
+ $ arver \-\-create TARGET
250
+ .
251
+ .fi
252
+ .
253
+ .IP "" 0
254
+ .
255
+ .P
256
+ What\'s happening behind the scene?
257
+ .
258
+ .P
259
+ Arver creates a new LUKS device with a random password in your LUKS slot on the server\. The password is then encrypted with your public key (defined in \fBusers\fR) and stored in \fBarverdata/keys/USERNAME/xxxx\fR
260
+ .
261
+ .SH "Action Open"
262
+ To open a LUKS device managed by arver you can invoke the \fB\-\-open\fR action on any target:
263
+ .
264
+ .IP "" 4
265
+ .
266
+ .nf
267
+
268
+ $ arver \-\-open TARGET
269
+ .
270
+ .fi
271
+ .
272
+ .IP "" 0
273
+ .
274
+ .P
275
+ arver retrieves the password by decrypting the keys in data/keys/YOURUSERNAME and uses this to open the LUKS device on the server\.
276
+ .
277
+ .P
278
+ See the section \fBTARGET\fR on how to open multiple disks at once\.
279
+ .
280
+ .P
281
+ You can define script hooks to be executed before and after the open command\. See \fBDisks\fR for details\. The hooks are run in the following order:
282
+ .
283
+ .IP "\(bu" 4
284
+ pre_open of host
285
+ .
286
+ .IP "\(bu" 4
287
+ pre_open of disk1
288
+ .
289
+ .IP "\(bu" 4
290
+ open disk1
291
+ .
292
+ .IP "\(bu" 4
293
+ post_open of disk1
294
+ .
295
+ .IP "\(bu" 4
296
+ pre_open of disk2
297
+ .
298
+ .IP "\(bu" 4
299
+ open disk2
300
+ .
301
+ .IP "\(bu" 4
302
+ post_open of disk1
303
+ .
304
+ .IP "\(bu" 4
305
+ post_open of host
306
+ .
307
+ .IP "" 0
308
+ .
309
+ .P
310
+ Those scripts have to be present at the actual host\.
311
+ .
312
+ .P
313
+ If you don\'t have a key for any of the disks that you wish to open it will be skipped (along with its script hooks)\.
314
+ .
315
+ .SH "Action Close"
316
+ Closing luks devices is simply done by invoking
317
+ .
318
+ .IP "" 4
319
+ .
320
+ .nf
321
+
322
+ $ arver \-\-close TARGET
323
+ .
324
+ .fi
325
+ .
326
+ .IP "" 0
327
+ .
328
+ .P
329
+ For this action you can define hooks as well\. See \fBDisks\fR and \fBAction Open\fR for details\.
330
+ .
331
+ .SH "Managing users"
332
+ To add another user to one of the disks you need to have the public key of that user\. Just import his key into your gpg keyring\.
333
+ .
334
+ .P
335
+ If you manage your \fB\.arverdata\fR in a version controll system, you\'ll likely have the key already in \fB\.arverdata/keys/public/USERNAME\fR where it will be imported automatically\.
336
+ .
337
+ .P
338
+ Granting the user access to any disk is done by invoking the following command:
339
+ .
340
+ .IP "" 4
341
+ .
342
+ .nf
343
+
344
+ $ arver \-\-add\-user USERNAME TARGET
345
+ .
346
+ .fi
347
+ .
348
+ .IP "" 0
349
+ .
350
+ .P
351
+ For this command to work, you have to trust the gpg key of USERNAME\. See \fBman gpg\fR section \-\-edit\-key\. You should always verify that you have the correct key, e\.g\. by comparing the fingerprint over a trusted channel\. Alternately you can run \fBarver\fR with \fB\-\-trust\-all\fR option\.
352
+ .
353
+ .P
354
+ \fBarver\fR will create a random password for the specific user and add it to the user\-slot on the targeted disks\. Furthermore, the password is encrypted with the public key of the specific user and stored in the data storage under \fBarverdata/keys/USERNAME/\fR\.
355
+ .
356
+ .P
357
+ For the other user to receive those new privileges he has to copy the new keys to his own \fBarverdata\fR\. So if you use a version controll system you should now commit the new keys\.
358
+ .
359
+ .P
360
+ If you are migrating from an existing LUKS infrastructure and want to add an initial user to the LUKS device, you will need to use the \fB\-\-ask\-password\fR option, to provide an existing password\.
361
+ .
362
+ .P
363
+ To remove the permissions of a certain user you can simply run
364
+ .
365
+ .IP "" 4
366
+ .
367
+ .nf
368
+
369
+ $ arver \-\-del\-user USERNAME TARGET
370
+ .
371
+ .fi
372
+ .
373
+ .IP "" 0
374
+ .
375
+ .P
376
+ Which will remove the password stored in the LUKS slot of that device\. Remember that you can also invoke this command on a whole hostgroup or even on all your managed devices (using \fB\-t ALL\fR)\. This will help you to quickly and savely removing any access to encrypted devices of one user immediately amongst the whole infrastructure\.
377
+ .
378
+ .P
379
+ By design it is not possible to know who has access to which disks by just looking at the \fBarverdata\fR\. All arver keys including the information on which disks they fit are encrypted with the users public key\. So without the corresponding private key it is not possible to see the privileges\.
380
+ .
381
+ .P
382
+ You can however display the targets \fBinformation\fR to see which slots are used\. But to do this you need access to the server and the \fBusers\fR config\.
383
+ .
384
+ .SH "Information about targets"
385
+ To gather various information about the different targets you can invoke
386
+ .
387
+ .IP "" 4
388
+ .
389
+ .nf
390
+
391
+ $ arver \-i TARGET
392
+ .
393
+ .fi
394
+ .
395
+ .IP "" 0
396
+ .
397
+ .P
398
+ Which will display you the current configuration of all devices, as well as different parameters of the LUKS device and slot usage\.
399
+ .
400
+ .SH "Garbage collection"
401
+ As you might add and remove users to disks or reset access to disks the amount of generated key files with random passwords might grow and not all might be needed anymore\. Furthermore it is likely that due to various actions it might be obvious or at least reconstructable to which devices a certain user might have access\.
402
+ .
403
+ .P
404
+ To address this problem \fBarver\fR provides a garbage collection process, which will rearrange all your own keyfiles\. (Only your own as you are not able to read the others key files\.)
405
+ .
406
+ .P
407
+ You can do that by invoking the following command:
408
+ .
409
+ .IP "" 4
410
+ .
411
+ .nf
412
+
413
+ $ arver \-g
414
+ .
415
+ .fi
416
+ .
417
+ .IP "" 0
418
+ .
419
+ .P
420
+ If you use a version controll system to store your \fBarverdata\fR you should do this always before commiting the \fBarverdata\fR\.
421
+ .
422
+ .SH "SEE ALSO"
423
+ \fBcryptsetup\fR(8)\. \fBgnupg\fR(7)\.
424
+ .
425
+ .P
426
+ Arver project site: \fIhttps://git\.codecoop\.org/projects/arver/\fR
427
+ .
428
+ .P
429
+ YAML website: \fIhttp://www\.yaml\.org/\fR