vagrant-lxc 1.0.1 → 1.1.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -16,11 +16,7 @@ module Vagrant
16
16
  when String
17
17
  # Nothing to do here, move along...
18
18
  else
19
- container_name = "#{env[:root_path].basename}_#{env[:machine].name}"
20
- container_name.gsub!(/[^-a-z0-9_]/i, "")
21
- # milliseconds + random number suffix to allow for simultaneous
22
- # `vagrant up` of the same box in different dirs
23
- container_name << "_#{(Time.now.to_f * 1000.0).to_i}_#{rand(100000)}"
19
+ container_name = generate_container_name(env)
24
20
  end
25
21
 
26
22
  env[:machine].provider.driver.create(
@@ -36,6 +32,19 @@ module Vagrant
36
32
 
37
33
  @app.call env
38
34
  end
35
+
36
+ def generate_container_name(env)
37
+ container_name = "#{env[:root_path].basename}_#{env[:machine].name}"
38
+ container_name.gsub!(/[^-a-z0-9_]/i, "")
39
+
40
+ # milliseconds + random number suffix to allow for simultaneous
41
+ # `vagrant up` of the same box in different dirs
42
+ container_name << "_#{(Time.now.to_f * 1000.0).to_i}_#{rand(100000)}"
43
+
44
+ # Trim container name to 64 chars, keeping "randomness"
45
+ trim_point = container_name.size > 64 ? -64 : -(container_name.size)
46
+ container_name[trim_point..-1]
47
+ end
39
48
  end
40
49
  end
41
50
  end
@@ -0,0 +1,46 @@
1
+ module Vagrant
2
+ module LXC
3
+ module Action
4
+ class GcPrivateNetworkBridges
5
+ def initialize(app, env)
6
+ @app = app
7
+ end
8
+
9
+ def call(env)
10
+ was_running = env[:machine].provider.state.id == :running
11
+
12
+ # Continue execution, we need the container to be stopped
13
+ @app.call(env)
14
+
15
+ was_running = was_running && env[:machine].provider.state.id != :running
16
+
17
+ if was_running && private_network_configured?(env[:machine].config)
18
+ private_network_configured?(env[:machine].config)
19
+ remove_bridges_that_are_not_in_use(env)
20
+ end
21
+ end
22
+
23
+ def private_network_configured?(config)
24
+ config.vm.networks.find do |type, _|
25
+ type.to_sym == :private_network
26
+ end
27
+ end
28
+
29
+ def remove_bridges_that_are_not_in_use(env)
30
+ env[:machine].config.vm.networks.find do |type, config|
31
+ next if type.to_sym != :private_network
32
+
33
+ bridge = config.fetch(:lxc__bridge_name)
34
+ driver = env[:machine].provider.driver
35
+
36
+ if ! driver.bridge_is_in_use?(bridge)
37
+ env[:ui].info I18n.t("vagrant_lxc.messages.remove_bridge", name: bridge)
38
+ # TODO: Output that bridge is being removed
39
+ driver.remove_bridge(bridge)
40
+ end
41
+ end
42
+ end
43
+ end
44
+ end
45
+ end
46
+ end
@@ -0,0 +1,43 @@
1
+ module Vagrant
2
+ module LXC
3
+ module Action
4
+ class PrivateNetworks
5
+ def initialize(app, env)
6
+ @app = app
7
+ end
8
+
9
+ def call(env)
10
+ @app.call(env)
11
+
12
+ if private_network_configured?(env[:machine].config)
13
+ env[:ui].output(I18n.t("vagrant_lxc.messages.setup_private_network"))
14
+ configure_private_networks(env)
15
+ end
16
+ end
17
+
18
+ def private_network_configured?(config)
19
+ config.vm.networks.find do |type, _|
20
+ type.to_sym == :private_network
21
+ end
22
+ end
23
+
24
+ def configure_private_networks(env)
25
+ env[:machine].config.vm.networks.find do |type, config|
26
+ next if type.to_sym != :private_network
27
+
28
+ container_name = env[:machine].provider.driver.container_name
29
+ ip = config[:ip]
30
+ bridge_ip = config.fetch(:lxc__bridge_ip) { build_bridge_ip(ip) }
31
+ bridge = config.fetch(:lxc__bridge_name)
32
+
33
+ env[:machine].provider.driver.configure_private_network(bridge, bridge_ip, container_name, ip)
34
+ end
35
+ end
36
+
37
+ def build_bridge_ip(ip)
38
+ ip.sub(/^(\d+\.\d+\.\d+)\.\d+/, '\1.254')
39
+ end
40
+ end
41
+ end
42
+ end
43
+ end
@@ -7,16 +7,16 @@ module Vagrant
7
7
  end
8
8
 
9
9
  def call(env)
10
- if public_or_private_network_configured?(env[:machine].config)
10
+ if public_network_configured?(env[:machine].config)
11
11
  env[:ui].warn(I18n.t("vagrant_lxc.messages.warn_networks"))
12
12
  end
13
13
 
14
14
  @app.call(env)
15
15
  end
16
16
 
17
- def public_or_private_network_configured?(config)
17
+ def public_network_configured?(config)
18
18
  config.vm.networks.find do |type, _|
19
- [:private_network, :public_network].include?(type.to_sym)
19
+ type.to_sym == :public_network
20
20
  end
21
21
  end
22
22
  end
@@ -46,8 +46,9 @@ module Vagrant
46
46
  wrapper = Tempfile.new('lxc-wrapper').tap do |file|
47
47
  template = Vagrant::Util::TemplateRenderer.new(
48
48
  'sudoers.rb',
49
- :template_root => Vagrant::LXC.source_root.join('templates').to_s,
50
- :cmd_paths => build_cmd_paths_hash
49
+ :template_root => Vagrant::LXC.source_root.join('templates').to_s,
50
+ :cmd_paths => build_cmd_paths_hash,
51
+ :pipework_regex => "#{ENV['HOME']}/\.vagrant\.d/gems/gems/vagrant-lxc.+/scripts/pipework"
51
52
  )
52
53
  file.puts template.render
53
54
  end
@@ -78,7 +79,7 @@ module Vagrant
78
79
 
79
80
  def build_cmd_paths_hash
80
81
  {}.tap do |hash|
81
- %w( which cat mkdir cp chown chmod rm tar chown ).each do |cmd|
82
+ %w( which cat mkdir cp chown chmod rm tar chown ip ifconfig brctl ).each do |cmd|
82
83
  hash[cmd] = `which #{cmd}`.strip
83
84
  end
84
85
  hash['lxc_bin'] = Pathname(`which lxc-create`.strip).parent.to_s
@@ -6,6 +6,8 @@ require "vagrant-lxc/driver/cli"
6
6
 
7
7
  require "etc"
8
8
 
9
+ require "tempfile"
10
+
9
11
  module Vagrant
10
12
  module LXC
11
13
  class Driver
@@ -45,7 +47,21 @@ module Vagrant
45
47
  end
46
48
 
47
49
  def rootfs_path
48
- Pathname.new(config_string.match(/^lxc\.rootfs\s+=\s+(.+)$/)[1])
50
+ config_entry = config_string.match(/^lxc\.rootfs\s+=\s+(.+)$/)[1]
51
+ case config_entry
52
+ when /^overlayfs:/
53
+ # Split on colon (:), ignoring any colon escaped by an escape character ( \ )
54
+ # Pays attention to when the escape character is itself escaped.
55
+ fs_type, master_path, overlay_path = config_entry.split(/(?<!\\)(?:\\\\)*:/)
56
+ if overlay_path
57
+ Pathname.new(overlay_path)
58
+ else
59
+ # Malformed: fall back to prior behaviour
60
+ Pathname.new(config_entry)
61
+ end
62
+ else
63
+ Pathname.new(config_entry)
64
+ end
49
65
  end
50
66
 
51
67
  def mac_address
@@ -124,6 +140,52 @@ module Vagrant
124
140
  @cli.attach(*command)
125
141
  end
126
142
 
143
+ def configure_private_network(bridge_name, bridge_ip, container_name, ip)
144
+ @logger.info "Configuring network interface for #{container_name} using #{ip} and bridge #{bridge_name}"
145
+ cmd = [
146
+ Vagrant::LXC.source_root.join('scripts/pipework').to_s,
147
+ bridge_name,
148
+ container_name,
149
+ "#{ip}/24"
150
+ ]
151
+ @sudo_wrapper.run(*cmd)
152
+
153
+ if ! bridge_has_an_ip?(bridge_name)
154
+ @logger.info "Adding #{bridge_ip} to the bridge #{bridge_name}"
155
+ cmd = [
156
+ 'ip',
157
+ 'addr',
158
+ 'add',
159
+ "#{bridge_ip}/24",
160
+ 'dev',
161
+ bridge_name
162
+ ]
163
+ @sudo_wrapper.run(*cmd)
164
+ end
165
+ end
166
+
167
+ def bridge_has_an_ip?(bridge_name)
168
+ @logger.info "Checking whether the bridge #{bridge_name} has an IP"
169
+ `ip -4 addr show scope global #{bridge_name}` =~ /^\s+inet ([0-9.]+)\/[0-9]+\s+/
170
+ end
171
+
172
+ def bridge_is_in_use?(bridge_name)
173
+ # REFACTOR: This method is **VERY** hacky
174
+ @logger.info "Checking if bridge #{bridge_name} is in use"
175
+ brctl_output = `brctl show #{bridge_name} 2>/dev/null | tail -n +2 | grep -q veth`
176
+ $?.to_i == 0
177
+ end
178
+
179
+ def remove_bridge(bridge_name)
180
+ @logger.info "Checking whether bridge #{bridge_name} exists"
181
+ brctl_output = `ifconfig -a | grep -q #{bridge_name}`
182
+ return if $?.to_i != 0
183
+
184
+ @logger.info "Removing bridge #{bridge_name}"
185
+ @sudo_wrapper.run('ifconfig', bridge_name, 'down')
186
+ @sudo_wrapper.run('brctl', 'delbr', bridge_name)
187
+ end
188
+
127
189
  def version
128
190
  @version ||= @cli.version
129
191
  end
@@ -64,9 +64,9 @@ module Vagrant
64
64
 
65
65
  run :create,
66
66
  '-B', backingstore,
67
- *(backingstore_options.to_a.flatten),
68
67
  '--template', template,
69
68
  '--name', @name,
69
+ *(backingstore_options.to_a.flatten),
70
70
  *(config_opts),
71
71
  *extra
72
72
  rescue Errors::ExecuteError => e
@@ -18,6 +18,10 @@ module Vagrant
18
18
  class NamespacesNotSupported < Vagrant::Errors::VagrantError
19
19
  end
20
20
 
21
+ class LxcLinuxRequired < Vagrant::Errors::VagrantError
22
+ error_key(:lxc_linux_required)
23
+ end
24
+
21
25
  class LxcNotInstalled < Vagrant::Errors::VagrantError
22
26
  error_key(:lxc_not_installed)
23
27
  end
@@ -1,5 +1,4 @@
1
1
  require 'vagrant'
2
- require 'vagrant-backports/utils'
3
2
 
4
3
  module Vagrant
5
4
  module LXC
@@ -10,9 +9,7 @@ module Vagrant
10
9
  LXC-based virtual machines.
11
10
  EOF
12
11
 
13
- extra = []
14
- extra << {parallel: true} if Vagrant::Backports.vagrant_1_2_or_later?
15
- provider(:lxc, *extra) do
12
+ provider(:lxc, parallel: true, priority: 7) do
16
13
  require File.expand_path("../provider", __FILE__)
17
14
 
18
15
  I18n.load_path << File.expand_path(File.dirname(__FILE__) + '/../../locales/en.yml')
@@ -31,18 +28,14 @@ module Vagrant
31
28
  Config
32
29
  end
33
30
 
34
- if Vagrant::Backports.vagrant_1_4_or_later?
35
- synced_folder(:lxc) do
36
- require File.expand_path("../synced_folder", __FILE__)
37
- SyncedFolder
38
- end
31
+ synced_folder(:lxc) do
32
+ require File.expand_path("../synced_folder", __FILE__)
33
+ SyncedFolder
39
34
  end
40
35
 
41
- if Vagrant::Backports.vagrant_1_5_or_later?
42
- provider_capability("lxc", "public_address") do
43
- require_relative "provider/cap/public_address"
44
- Provider::Cap::PublicAddress
45
- end
36
+ provider_capability("lxc", "public_address") do
37
+ require_relative "provider/cap/public_address"
38
+ Provider::Cap::PublicAddress
46
39
  end
47
40
  end
48
41
  end
@@ -9,6 +9,14 @@ module Vagrant
9
9
  class Provider < Vagrant.plugin("2", :provider)
10
10
  attr_reader :driver
11
11
 
12
+ def self.usable?(raise_error=false)
13
+ if !Vagrant::Util::Platform.linux?
14
+ raise Errors::LxcLinuxRequired
15
+ end
16
+
17
+ true
18
+ end
19
+
12
20
  def initialize(machine)
13
21
  @logger = Log4r::Logger.new("vagrant::provider::lxc")
14
22
  @machine = machine
@@ -28,7 +36,7 @@ module Vagrant
28
36
 
29
37
  def ensure_lxc_installed!
30
38
  begin
31
- sudo_wrapper.run("which", "lxc-create")
39
+ sudo_wrapper.run("/usr/bin/which", "lxc-create")
32
40
  rescue Vagrant::LXC::Errors::ExecuteError
33
41
  raise Errors::LxcNotInstalled
34
42
  end
@@ -1,5 +1,5 @@
1
1
  module Vagrant
2
2
  module LXC
3
- VERSION = "1.0.1"
3
+ VERSION = "1.1.0"
4
4
  end
5
5
  end
data/locales/en.yml CHANGED
@@ -13,15 +13,18 @@ en:
13
13
  force_shutdown: |-
14
14
  Forcing shutdown of container...
15
15
  warn_networks: |-
16
- Warning! The LXC provider doesn't support any of the Vagrant public / private
17
- network configurations (ex: `config.vm.network :private_network, ip: "some-ip"`).
18
- They will be silently ignored.
16
+ Warning! The LXC provider doesn't support public networks, the settings
17
+ will be silently ignored.
19
18
  warn_group: |-
20
19
  Warning! The LXC provider doesn't support the :group parameter for synced
21
20
  folders. It will be silently ignored.
22
21
  warn_owner: |-
23
22
  Warning! The LXC provider doesn't support the :owner parameter for synced
24
23
  folders. It will be silently ignored.
24
+ setup_private_network: |-
25
+ Setting up private networks...
26
+ remove_bridge: |-
27
+ Removing bridge '%{name}'...
25
28
 
26
29
  vagrant:
27
30
  commands:
@@ -56,6 +59,10 @@ en:
56
59
 
57
60
  Looked up under: %{paths}
58
61
 
62
+ lxc_linux_required: |-
63
+ The LXC provider only works on Linux. Please try to use
64
+ another provider.
65
+
59
66
  lxc_not_installed: |-
60
67
  The `lxc` package does not seem to be installed or is not accessible on the PATH.
61
68
 
data/scripts/pipework ADDED
@@ -0,0 +1,298 @@
1
+ #!/bin/bash
2
+
3
+ # Borrowed from https://github.com/jpetazzo/pipework
4
+
5
+ set -e
6
+
7
+ case "$1" in
8
+ --wait)
9
+ WAIT=1
10
+ ;;
11
+ esac
12
+
13
+ IFNAME=$1
14
+
15
+ # default value set further down if not set here
16
+ CONTAINER_IFNAME=
17
+ if [ "$2" = "-i" ]; then
18
+ CONTAINER_IFNAME=$3
19
+ shift 2
20
+ fi
21
+
22
+ GUESTNAME=$2
23
+ IPADDR=$3
24
+ MACADDR=$4
25
+
26
+ if echo $MACADDR | grep -q @
27
+ then
28
+ VLAN=$(echo $MACADDR | cut -d@ -f2)
29
+ MACADDR=$(echo $MACADDR | cut -d@ -f1)
30
+ else
31
+ VLAN=
32
+ fi
33
+
34
+ [ "$IPADDR" ] || [ "$WAIT" ] || {
35
+ echo "Syntax:"
36
+ echo "pipework <hostinterface> [-i containerinterface] <guest> <ipaddr>/<subnet>[@default_gateway] [macaddr][@vlan]"
37
+ echo "pipework <hostinterface> [-i containerinterface] <guest> dhcp [macaddr][@vlan]"
38
+ echo "pipework --wait [-i containerinterface]"
39
+ exit 1
40
+ }
41
+
42
+ # First step: determine type of first argument (bridge, physical interface...), skip if --wait set
43
+ if [ -z "$WAIT" ]; then
44
+ if [ -d /sys/class/net/$IFNAME ]
45
+ then
46
+ if [ -d /sys/class/net/$IFNAME/bridge ]
47
+ then
48
+ IFTYPE=bridge
49
+ BRTYPE=linux
50
+ elif $(which ovs-vsctl >/dev/null 2>&1) && $(ovs-vsctl list-br|grep -q ^$IFNAME$)
51
+ then
52
+ IFTYPE=bridge
53
+ BRTYPE=openvswitch
54
+ elif [ $(cat /sys/class/net/$IFNAME/type) -eq 32 ]; # Infiniband IPoIB interface type 32
55
+ then
56
+ IFTYPE=ipoib
57
+ # The IPoIB kernel module is fussy, set device name to ib0 if not overridden
58
+ CONTAINER_IFNAME=${CONTAINER_IFNAME:-ib0}
59
+ else IFTYPE=phys
60
+ fi
61
+ else
62
+ # case "$IFNAME" in
63
+ # br*)
64
+ IFTYPE=bridge
65
+ BRTYPE=linux
66
+ # ;;
67
+ # ovs*)
68
+ # if ! $(which ovs-vsctl >/dev/null)
69
+ # then
70
+ # echo "Need OVS installed on the system to create an ovs bridge"
71
+ # exit 1
72
+ # fi
73
+ # IFTYPE=bridge
74
+ # BRTYPE=openvswitch
75
+ # ;;
76
+ # *)
77
+ # echo "I do not know how to setup interface $IFNAME."
78
+ # exit 1
79
+ # ;;
80
+ # esac
81
+ fi
82
+ fi
83
+
84
+ # Set the default container interface name to eth1 if not already set
85
+ CONTAINER_IFNAME=${CONTAINER_IFNAME:-eth1}
86
+
87
+ [ "$WAIT" ] && {
88
+ while ! grep -q ^1$ /sys/class/net/$CONTAINER_IFNAME/carrier 2>/dev/null
89
+ do sleep 1
90
+ done
91
+ exit 0
92
+ }
93
+
94
+ [ $IFTYPE = bridge ] && [ $BRTYPE = linux ] && [ "$VLAN" ] && {
95
+ echo "VLAN configuration currently unsupported for Linux bridge."
96
+ exit 1
97
+ }
98
+
99
+ [ $IFTYPE = ipoib ] && [ $MACADDR ] && {
100
+ echo "MACADDR configuration unsupported for IPoIB interfaces."
101
+ exit 1
102
+ }
103
+
104
+ # Second step: find the guest (for now, we only support LXC containers)
105
+ while read dev mnt fstype options dump fsck
106
+ do
107
+ [ "$fstype" != "cgroup" ] && continue
108
+ echo $options | grep -qw devices || continue
109
+ CGROUPMNT=$mnt
110
+ done < /proc/mounts
111
+
112
+ [ "$CGROUPMNT" ] || {
113
+ echo "Could not locate cgroup mount point."
114
+ exit 1
115
+ }
116
+
117
+ # Try to find a cgroup matching exactly the provided name.
118
+ N=$(find "$CGROUPMNT" -name "$GUESTNAME" | wc -l)
119
+ case "$N" in
120
+ 0)
121
+ # If we didn't find anything, try to lookup the container with Docker.
122
+ if which docker >/dev/null
123
+ then
124
+ RETRIES=3
125
+ while [ $RETRIES -gt 0 ]; do
126
+ DOCKERPID=$(docker inspect --format='{{ .State.Pid }}' $GUESTNAME)
127
+ [ $DOCKERPID != 0 ] && break
128
+ sleep 1
129
+ RETRIES=$((RETRIES - 1))
130
+ done
131
+
132
+ [ "$DOCKERPID" = 0 ] && {
133
+ echo "Docker inspect returned invalid PID 0"
134
+ exit 1
135
+ }
136
+
137
+ [ "$DOCKERPID" = "<no value>" ] && {
138
+ echo "Container $GUESTNAME not found, and unknown to Docker."
139
+ exit 1
140
+ }
141
+ else
142
+ echo "Container $GUESTNAME not found, and Docker not installed."
143
+ exit 1
144
+ fi
145
+ ;;
146
+ 1)
147
+ true
148
+ ;;
149
+ *)
150
+ echo "Found more than one container matching $GUESTNAME."
151
+ exit 1
152
+ ;;
153
+ esac
154
+
155
+ if [ "$IPADDR" = "dhcp" ]
156
+ then
157
+ # Check for first available dhcp client
158
+ DHCP_CLIENT_LIST="udhcpc dhcpcd dhclient"
159
+ for CLIENT in $DHCP_CLIENT_LIST; do
160
+ which $CLIENT >/dev/null && {
161
+ DHCP_CLIENT=$CLIENT
162
+ break
163
+ }
164
+ done
165
+ [ -z $DHCP_CLIENT ] && {
166
+ echo "You asked for DHCP; but no DHCP client could be found."
167
+ exit 1
168
+ }
169
+ else
170
+ # Check if a subnet mask was provided.
171
+ echo $IPADDR | grep -q / || {
172
+ echo "The IP address should include a netmask."
173
+ echo "Maybe you meant $IPADDR/24 ?"
174
+ exit 1
175
+ }
176
+ # Check if a gateway address was provided.
177
+ if echo $IPADDR | grep -q @
178
+ then
179
+ GATEWAY=$(echo $IPADDR | cut -d@ -f2)
180
+ IPADDR=$(echo $IPADDR | cut -d@ -f1)
181
+ else
182
+ GATEWAY=
183
+ fi
184
+ fi
185
+
186
+ if [ $DOCKERPID ]; then
187
+ NSPID=$DOCKERPID
188
+ else
189
+ NSPID=$(head -n 1 $(find "$CGROUPMNT" -name "$GUESTNAME" | head -n 1)/tasks)
190
+ [ "$NSPID" ] || {
191
+ echo "Could not find a process inside container $GUESTNAME."
192
+ exit 1
193
+ }
194
+ fi
195
+
196
+ # Check if an incompatible VLAN device already exists
197
+ [ $IFTYPE = phys ] && [ "$VLAN" ] && [ -d /sys/class/net/$IFNAME.VLAN ] && {
198
+ [ -z "$(ip -d link show $IFNAME.$VLAN | grep "vlan.*id $VLAN")" ] && {
199
+ echo "$IFNAME.VLAN already exists but is not a VLAN device for tag $VLAN"
200
+ exit 1
201
+ }
202
+ }
203
+
204
+ [ ! -d /var/run/netns ] && mkdir -p /var/run/netns
205
+ [ -f /var/run/netns/$NSPID ] && rm -f /var/run/netns/$NSPID
206
+ ln -s /proc/$NSPID/ns/net /var/run/netns/$NSPID
207
+
208
+ # Check if we need to create a bridge.
209
+ [ $IFTYPE = bridge ] && [ ! -d /sys/class/net/$IFNAME ] && {
210
+ [ $BRTYPE = linux ] && {
211
+ (ip link add dev $IFNAME type bridge > /dev/null 2>&1) || (brctl addbr $IFNAME)
212
+ ip link set $IFNAME up
213
+ }
214
+ [ $BRTYPE = openvswitch ] && {
215
+ ovs-vsctl add-br $IFNAME
216
+ }
217
+ }
218
+
219
+ MTU=$(ip link show $IFNAME | awk '{print $5}')
220
+ # If it's a bridge, we need to create a veth pair
221
+ [ $IFTYPE = bridge ] && {
222
+ LOCAL_IFNAME="v${CONTAINER_IFNAME}pl${NSPID}"
223
+ GUEST_IFNAME="v${CONTAINER_IFNAME}pg${NSPID}"
224
+ ip link add name $LOCAL_IFNAME mtu $MTU type veth peer name $GUEST_IFNAME mtu $MTU
225
+ case "$BRTYPE" in
226
+ linux)
227
+ (ip link set $LOCAL_IFNAME master $IFNAME > /dev/null 2>&1) || (brctl addif $IFNAME $LOCAL_IFNAME)
228
+ ;;
229
+ openvswitch)
230
+ ovs-vsctl add-port $IFNAME $LOCAL_IFNAME ${VLAN:+"tag=$VLAN"}
231
+ ;;
232
+ esac
233
+ ip link set $LOCAL_IFNAME up
234
+ }
235
+
236
+ # Note: if no container interface name was specified, pipework will default to ib0
237
+ # Note: no macvlan subinterface or ethernet bridge can be created against an
238
+ # ipoib interface. Infiniband is not ethernet. ipoib is an IP layer for it.
239
+ # To provide additional ipoib interfaces to containers use SR-IOV and pipework
240
+ # to assign them.
241
+ [ $IFTYPE = ipoib ] && {
242
+ GUEST_IFNAME=$CONTAINER_IFNAME
243
+ }
244
+
245
+ # If it's a physical interface, create a macvlan subinterface
246
+ [ $IFTYPE = phys ] && {
247
+ [ "$VLAN" ] && {
248
+ [ ! -d /sys/class/net/$IFNAME.$VLAN ] && {
249
+ ip link add link $IFNAME name $IFNAME.$VLAN mtu $MTU type vlan id $VLAN
250
+ }
251
+
252
+ ip link set $IFNAME up
253
+ IFNAME=$IFNAME.$VLAN
254
+ }
255
+ GUEST_IFNAME=ph$NSPID$CONTAINER_IFNAME
256
+ ip link add link $IFNAME dev $GUEST_IFNAME mtu $MTU type macvlan mode bridge
257
+ ip link set $IFNAME up
258
+ }
259
+
260
+ ip link set $GUEST_IFNAME netns $NSPID
261
+ ip netns exec $NSPID ip link set $GUEST_IFNAME name $CONTAINER_IFNAME
262
+ [ "$MACADDR" ] && ip netns exec $NSPID ip link set dev $CONTAINER_IFNAME address $MACADDR
263
+ if [ "$IPADDR" = "dhcp" ]
264
+ then
265
+ [ $DHCP_CLIENT = "udhcpc" ] && ip netns exec $NSPID $DHCP_CLIENT -qi $CONTAINER_IFNAME -x hostname:$GUESTNAME
266
+ if [ $DHCP_CLIENT = "dhclient" ]
267
+ then
268
+ # kill dhclient after get ip address to prevent device be used after container close
269
+ ip netns exec $NSPID $DHCP_CLIENT -pf "/var/run/dhclient.$NSPID.pid" $CONTAINER_IFNAME
270
+ kill "$(cat "/var/run/dhclient.$NSPID.pid")"
271
+ rm "/var/run/dhclient.$NSPID.pid"
272
+ fi
273
+ [ $DHCP_CLIENT = "dhcpcd" ] && ip netns exec $NSPID $DHCP_CLIENT -q $CONTAINER_IFNAME -h $GUESTNAME
274
+ else
275
+ ip netns exec $NSPID ip addr add $IPADDR dev $CONTAINER_IFNAME
276
+ [ "$GATEWAY" ] && {
277
+ ip netns exec $NSPID ip route delete default >/dev/null 2>&1 && true
278
+ }
279
+ ip netns exec $NSPID ip link set $CONTAINER_IFNAME up
280
+ [ "$GATEWAY" ] && {
281
+ ip netns exec $NSPID ip route get $GATEWAY >/dev/null 2>&1 || \
282
+ ip netns exec $NSPID ip route add $GATEWAY/32 dev $CONTAINER_IFNAME
283
+ ip netns exec $NSPID ip route replace default via $GATEWAY
284
+ }
285
+ fi
286
+
287
+ # Give our ARP neighbors a nudge about the new interface
288
+ if which arping > /dev/null 2>&1
289
+ then
290
+ IPADDR=$(echo $IPADDR | cut -d/ -f1)
291
+ ip netns exec $NSPID arping -c 1 -A -I $CONTAINER_IFNAME $IPADDR > /dev/null 2>&1 || true
292
+ else
293
+ echo "Warning: arping not found; interface may not be immediately reachable"
294
+ fi
295
+
296
+ # Remove NSPID to avoid `ip netns` catch it.
297
+ [ -f /var/run/netns/$NSPID ] && rm -f /var/run/netns/$NSPID
298
+ exit 0