podman 1.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/lib/podman.rb +1 -0
- data/lib/vagrant/podman/action/build.rb +99 -0
- data/lib/vagrant/podman/action/compare_synced_folders.rb +65 -0
- data/lib/vagrant/podman/action/connect_networks.rb +80 -0
- data/lib/vagrant/podman/action/create.rb +165 -0
- data/lib/vagrant/podman/action/destroy.rb +34 -0
- data/lib/vagrant/podman/action/destroy_build_image.rb +51 -0
- data/lib/vagrant/podman/action/destroy_network.rb +53 -0
- data/lib/vagrant/podman/action/forwarded_ports.rb +36 -0
- data/lib/vagrant/podman/action/has_ssh.rb +21 -0
- data/lib/vagrant/podman/action/host_machine.rb +75 -0
- data/lib/vagrant/podman/action/host_machine_build_dir.rb +49 -0
- data/lib/vagrant/podman/action/host_machine_port_checker.rb +34 -0
- data/lib/vagrant/podman/action/host_machine_port_warning.rb +40 -0
- data/lib/vagrant/podman/action/host_machine_required.rb +20 -0
- data/lib/vagrant/podman/action/host_machine_sync_folders.rb +176 -0
- data/lib/vagrant/podman/action/host_machine_sync_folders_disable.rb +91 -0
- data/lib/vagrant/podman/action/init_state.rb +23 -0
- data/lib/vagrant/podman/action/is_build.rb +19 -0
- data/lib/vagrant/podman/action/is_host_machine_created.rb +32 -0
- data/lib/vagrant/podman/action/login.rb +51 -0
- data/lib/vagrant/podman/action/prepare_forwarded_port_collision_params.rb +64 -0
- data/lib/vagrant/podman/action/prepare_networks.rb +397 -0
- data/lib/vagrant/podman/action/prepare_nfs_settings.rb +60 -0
- data/lib/vagrant/podman/action/prepare_nfs_valid_ids.rb +22 -0
- data/lib/vagrant/podman/action/prepare_ssh.rb +48 -0
- data/lib/vagrant/podman/action/pull.rb +30 -0
- data/lib/vagrant/podman/action/start.rb +24 -0
- data/lib/vagrant/podman/action/stop.rb +24 -0
- data/lib/vagrant/podman/action/wait_for_running.rb +71 -0
- data/lib/vagrant/podman/action.rb +319 -0
- data/lib/vagrant/podman/cap/has_communicator.rb +14 -0
- data/lib/vagrant/podman/cap/proxy_machine.rb +15 -0
- data/lib/vagrant/podman/cap/public_address.rb +26 -0
- data/lib/vagrant/podman/command/exec.rb +112 -0
- data/lib/vagrant/podman/command/logs.rb +111 -0
- data/lib/vagrant/podman/command/run.rb +76 -0
- data/lib/vagrant/podman/communicator.rb +199 -0
- data/lib/vagrant/podman/config.rb +368 -0
- data/lib/vagrant/podman/driver/compose.rb +315 -0
- data/lib/vagrant/podman/driver.rb +417 -0
- data/lib/vagrant/podman/errors.rb +108 -0
- data/lib/vagrant/podman/executor/local.rb +48 -0
- data/lib/vagrant/podman/executor/vagrant.rb +88 -0
- data/lib/vagrant/podman/hostmachine/Vagrantfile +3 -0
- data/lib/vagrant/podman/plugin.rb +89 -0
- data/lib/vagrant/podman/provider.rb +216 -0
- data/lib/vagrant/podman/synced_folder.rb +35 -0
- data/templates/locales/providers_podman.yml +321 -0
- metadata +103 -0
@@ -0,0 +1,89 @@
|
|
1
|
+
# Copyright (c) HashiCorp, Inc.
|
2
|
+
# SPDX-License-Identifier: BUSL-1.1
|
3
|
+
|
4
|
+
module VagrantPlugins
|
5
|
+
module PodmanProvider
|
6
|
+
autoload :Action, File.expand_path("../action", __FILE__)
|
7
|
+
autoload :Driver, File.expand_path("../driver", __FILE__)
|
8
|
+
autoload :Errors, File.expand_path("../errors", __FILE__)
|
9
|
+
|
10
|
+
module Executor
|
11
|
+
autoload :Local, File.expand_path("../executor/local", __FILE__)
|
12
|
+
autoload :Vagrant, File.expand_path("../executor/vagrant", __FILE__)
|
13
|
+
end
|
14
|
+
|
15
|
+
class Plugin < Vagrant.plugin("2")
|
16
|
+
name "podman-provider"
|
17
|
+
description <<-EOF
|
18
|
+
The Podman provider allows Vagrant to manage and control
|
19
|
+
Podman containers.
|
20
|
+
EOF
|
21
|
+
|
22
|
+
provider(:podman, box_optional: true, parallel: true, defaultable: false) do
|
23
|
+
require_relative 'provider'
|
24
|
+
init!
|
25
|
+
Provider
|
26
|
+
end
|
27
|
+
|
28
|
+
command("podman-exec", primary: false) do
|
29
|
+
require_relative "command/exec"
|
30
|
+
init!
|
31
|
+
Command::Exec
|
32
|
+
end
|
33
|
+
|
34
|
+
command("podman-logs", primary: false) do
|
35
|
+
require_relative "command/logs"
|
36
|
+
init!
|
37
|
+
Command::Logs
|
38
|
+
end
|
39
|
+
|
40
|
+
command("podman-run", primary: false) do
|
41
|
+
require_relative "command/run"
|
42
|
+
init!
|
43
|
+
Command::Run
|
44
|
+
end
|
45
|
+
|
46
|
+
communicator(:podman_hostvm) do
|
47
|
+
require_relative "communicator"
|
48
|
+
init!
|
49
|
+
Communicator
|
50
|
+
end
|
51
|
+
|
52
|
+
config(:podman, :provider) do
|
53
|
+
require_relative 'config'
|
54
|
+
init!
|
55
|
+
Config
|
56
|
+
end
|
57
|
+
|
58
|
+
synced_folder(:podman) do
|
59
|
+
require File.expand_path("../synced_folder", __FILE__)
|
60
|
+
SyncedFolder
|
61
|
+
end
|
62
|
+
|
63
|
+
provider_capability("podman", "public_address") do
|
64
|
+
require_relative "cap/public_address"
|
65
|
+
Cap::PublicAddress
|
66
|
+
end
|
67
|
+
|
68
|
+
provider_capability("podman", "proxy_machine") do
|
69
|
+
require_relative "cap/proxy_machine"
|
70
|
+
Cap::ProxyMachine
|
71
|
+
end
|
72
|
+
|
73
|
+
provider_capability("podman", "has_communicator") do
|
74
|
+
require_relative "cap/has_communicator"
|
75
|
+
Cap::HasCommunicator
|
76
|
+
end
|
77
|
+
|
78
|
+
protected
|
79
|
+
|
80
|
+
def self.init!
|
81
|
+
return if defined?(@_init)
|
82
|
+
I18n.load_path << File.expand_path(
|
83
|
+
"templates/locales/providers_podman.yml", Vagrant.source_root)
|
84
|
+
I18n.reload!
|
85
|
+
@_init = true
|
86
|
+
end
|
87
|
+
end
|
88
|
+
end
|
89
|
+
end
|
@@ -0,0 +1,216 @@
|
|
1
|
+
# Copyright (c) HashiCorp, Inc.
|
2
|
+
# SPDX-License-Identifier: BUSL-1.1
|
3
|
+
|
4
|
+
require "digest/md5"
|
5
|
+
require "fileutils"
|
6
|
+
require "thread"
|
7
|
+
|
8
|
+
require "log4r"
|
9
|
+
|
10
|
+
require "vagrant/util/silence_warnings"
|
11
|
+
|
12
|
+
module VagrantPlugins
|
13
|
+
module PodmanProvider
|
14
|
+
class Provider < Vagrant.plugin("2", :provider)
|
15
|
+
@@host_vm_mutex = Mutex.new
|
16
|
+
|
17
|
+
def self.usable?(raise_error=false)
|
18
|
+
Driver.new.execute("podman", "version")
|
19
|
+
true
|
20
|
+
rescue Vagrant::Errors::CommandUnavailable, Errors::ExecuteError
|
21
|
+
raise if raise_error
|
22
|
+
return false
|
23
|
+
end
|
24
|
+
|
25
|
+
def initialize(machine)
|
26
|
+
@logger = Log4r::Logger.new("vagrant::provider::podman")
|
27
|
+
@machine = machine
|
28
|
+
|
29
|
+
if host_vm?
|
30
|
+
# We need to use a special communicator that proxies our
|
31
|
+
# SSH requests over our host VM to the container itself.
|
32
|
+
@machine.config.vm.communicator = :podman_hostvm
|
33
|
+
end
|
34
|
+
end
|
35
|
+
|
36
|
+
# @see Vagrant::Plugin::V2::Provider#action
|
37
|
+
def action(name)
|
38
|
+
action_method = "action_#{name}"
|
39
|
+
return Action.send(action_method) if Action.respond_to?(action_method)
|
40
|
+
nil
|
41
|
+
end
|
42
|
+
|
43
|
+
# Returns the driver instance for this provider.
|
44
|
+
def driver
|
45
|
+
if !@driver
|
46
|
+
if @machine.provider_config.compose
|
47
|
+
@driver = Driver::Compose.new(@machine)
|
48
|
+
else
|
49
|
+
@driver = Driver.new
|
50
|
+
end
|
51
|
+
end
|
52
|
+
if host_vm?
|
53
|
+
@driver.executor = Executor::Vagrant.new(host_vm)
|
54
|
+
end
|
55
|
+
|
56
|
+
@driver
|
57
|
+
end
|
58
|
+
|
59
|
+
# This returns the {Vagrant::Machine} that is our host machine.
|
60
|
+
# It does not perform any action on the machine or verify it is
|
61
|
+
# running.
|
62
|
+
#
|
63
|
+
# @return [Vagrant::Machine]
|
64
|
+
def host_vm
|
65
|
+
return @host_vm if @host_vm
|
66
|
+
|
67
|
+
vf_path = @machine.provider_config.vagrant_vagrantfile
|
68
|
+
host_machine_name = @machine.provider_config.vagrant_machine || :default
|
69
|
+
if !vf_path
|
70
|
+
# We don't have a Vagrantfile path set, so we're going to use
|
71
|
+
# the default but we need to copy it into the data dir so that
|
72
|
+
# we don't write into our installation dir (we can't).
|
73
|
+
default_path = File.expand_path("../hostmachine/Vagrantfile", __FILE__)
|
74
|
+
vf_path = @machine.env.data_dir.join("podman-host", "Vagrantfile")
|
75
|
+
begin
|
76
|
+
@machine.env.lock("podman-provider-hostvm") do
|
77
|
+
vf_path.dirname.mkpath
|
78
|
+
FileUtils.cp(default_path, vf_path)
|
79
|
+
end
|
80
|
+
rescue Vagrant::Errors::EnvironmentLockedError
|
81
|
+
# Lock contention, just retry
|
82
|
+
retry
|
83
|
+
end
|
84
|
+
|
85
|
+
# Set the machine name since we hardcode that for the default
|
86
|
+
host_machine_name = :default
|
87
|
+
end
|
88
|
+
|
89
|
+
# Expand it so that the home directories and so on get processed
|
90
|
+
# properly.
|
91
|
+
vf_path = File.expand_path(vf_path, @machine.env.root_path)
|
92
|
+
|
93
|
+
vf_file = File.basename(vf_path)
|
94
|
+
vf_path = File.dirname(vf_path)
|
95
|
+
|
96
|
+
# Create the env to manage this machine
|
97
|
+
@host_vm = Vagrant::Util::SilenceWarnings.silence! do
|
98
|
+
host_env = Vagrant::Environment.new(
|
99
|
+
cwd: vf_path,
|
100
|
+
home_path: @machine.env.home_path,
|
101
|
+
ui_class: @machine.env.ui_class,
|
102
|
+
vagrantfile_name: vf_file,
|
103
|
+
)
|
104
|
+
|
105
|
+
# If there is no root path, then the Vagrantfile wasn't found
|
106
|
+
# and it is an error...
|
107
|
+
raise Errors::VagrantfileNotFound if !host_env.root_path
|
108
|
+
|
109
|
+
host_env.machine(
|
110
|
+
host_machine_name,
|
111
|
+
host_env.default_provider(
|
112
|
+
exclude: [:podman],
|
113
|
+
force_default: false,
|
114
|
+
))
|
115
|
+
end
|
116
|
+
|
117
|
+
@host_vm
|
118
|
+
end
|
119
|
+
|
120
|
+
# This acquires a lock on the host VM.
|
121
|
+
def host_vm_lock
|
122
|
+
hash = Digest::MD5.hexdigest(host_vm.data_dir.to_s)
|
123
|
+
|
124
|
+
# We do a process-level mutex on the outside, since we can
|
125
|
+
# wait for that a short amount of time. Then, we do a process lock
|
126
|
+
# on the inside, which will raise an exception if locked.
|
127
|
+
host_vm_mutex.synchronize do
|
128
|
+
@machine.env.lock(hash) do
|
129
|
+
return yield
|
130
|
+
end
|
131
|
+
end
|
132
|
+
end
|
133
|
+
|
134
|
+
# This is a process-local mutex that can be used by parallel
|
135
|
+
# providers to lock the host VM access.
|
136
|
+
def host_vm_mutex
|
137
|
+
@@host_vm_mutex
|
138
|
+
end
|
139
|
+
|
140
|
+
# This says whether or not Podman will be running within a VM
|
141
|
+
# rather than directly on our system. Podman needs to run in a VM
|
142
|
+
# when we're not on Linux, or not on a Linux that supports Podman.
|
143
|
+
def host_vm?
|
144
|
+
@machine.provider_config.force_host_vm
|
145
|
+
end
|
146
|
+
|
147
|
+
# Returns the SSH info for accessing the Container.
|
148
|
+
def ssh_info
|
149
|
+
# If the container isn't running, we can't SSH into it
|
150
|
+
return nil if state.id != :running
|
151
|
+
|
152
|
+
port_name = "#{@machine.config.ssh.guest_port}/tcp"
|
153
|
+
network = driver.inspect_container(@machine.id)['NetworkSettings']
|
154
|
+
|
155
|
+
if network["Ports"][port_name].respond_to?(:first)
|
156
|
+
port_info = network["Ports"][port_name].first
|
157
|
+
else
|
158
|
+
ip = network["IPAddress"]
|
159
|
+
port = @machine.config.ssh.guest_port
|
160
|
+
if !ip.to_s.empty?
|
161
|
+
port_info = {
|
162
|
+
"HostIp" => ip,
|
163
|
+
"HostPort" => port
|
164
|
+
}
|
165
|
+
end
|
166
|
+
end
|
167
|
+
|
168
|
+
# If we were not able to identify the container's IP, we return nil
|
169
|
+
# here and we let Vagrant core deal with it ;)
|
170
|
+
return nil if port_info.nil? || port_info.empty?
|
171
|
+
|
172
|
+
{
|
173
|
+
host: port_info['HostIp'],
|
174
|
+
port: port_info['HostPort']
|
175
|
+
}
|
176
|
+
end
|
177
|
+
|
178
|
+
def state
|
179
|
+
state_id = nil
|
180
|
+
state_id = :not_created if !@machine.id
|
181
|
+
|
182
|
+
begin
|
183
|
+
state_id = :host_state_unknown if !state_id && \
|
184
|
+
host_vm? && !host_vm.communicate.ready?
|
185
|
+
rescue Errors::VagrantfileNotFound
|
186
|
+
state_id = :host_state_unknown
|
187
|
+
end
|
188
|
+
|
189
|
+
state_id = :not_created if !state_id && \
|
190
|
+
(!@machine.id || !driver.created?(@machine.id))
|
191
|
+
state_id = driver.state(@machine.id) if @machine.id && !state_id
|
192
|
+
state_id = :unknown if !state_id
|
193
|
+
|
194
|
+
# This is a special pseudo-state so that we don't set the
|
195
|
+
# NOT_CREATED_ID while we're setting up the machine. This avoids
|
196
|
+
# clearing the data dir.
|
197
|
+
state_id = :preparing if @machine.id == "preparing"
|
198
|
+
|
199
|
+
short = state_id.to_s.gsub("_", " ")
|
200
|
+
long = I18n.t("podman_provider.status.#{state_id}")
|
201
|
+
|
202
|
+
# If we're not created, then specify the special ID flag
|
203
|
+
if state_id == :not_created
|
204
|
+
state_id = Vagrant::MachineState::NOT_CREATED_ID
|
205
|
+
end
|
206
|
+
|
207
|
+
Vagrant::MachineState.new(state_id, short, long)
|
208
|
+
end
|
209
|
+
|
210
|
+
def to_s
|
211
|
+
id = @machine.id ? @machine.id : "new container"
|
212
|
+
"Podman (#{id})"
|
213
|
+
end
|
214
|
+
end
|
215
|
+
end
|
216
|
+
end
|
@@ -0,0 +1,35 @@
|
|
1
|
+
# Copyright (c) HashiCorp, Inc.
|
2
|
+
# SPDX-License-Identifier: BUSL-1.1
|
3
|
+
|
4
|
+
module VagrantPlugins
|
5
|
+
module PodmanProvider
|
6
|
+
class SyncedFolder < Vagrant.plugin("2", :synced_folder)
|
7
|
+
def usable?(machine, raise_error=false)
|
8
|
+
# These synced folders only work if the provider is Podman
|
9
|
+
if machine.provider_name != :podman
|
10
|
+
if raise_error
|
11
|
+
raise Errors::SyncedFolderNonPodman,
|
12
|
+
provider: machine.provider_name.to_s
|
13
|
+
end
|
14
|
+
|
15
|
+
return false
|
16
|
+
end
|
17
|
+
|
18
|
+
true
|
19
|
+
end
|
20
|
+
|
21
|
+
def prepare(machine, folders, _opts)
|
22
|
+
folders.each do |id, data|
|
23
|
+
next if data[:ignore]
|
24
|
+
|
25
|
+
host_path = data[:hostpath]
|
26
|
+
guest_path = data[:guestpath]
|
27
|
+
# Append consistency option if it exists, otherwise let it nil out
|
28
|
+
consistency = data[:podman_consistency]
|
29
|
+
consistency &&= ":" + consistency
|
30
|
+
machine.provider_config.volumes << "#{host_path}:#{guest_path}#{consistency}"
|
31
|
+
end
|
32
|
+
end
|
33
|
+
end
|
34
|
+
end
|
35
|
+
end
|
@@ -0,0 +1,321 @@
|
|
1
|
+
# Copyright (c) HashiCorp, Inc.
|
2
|
+
# SPDX-License-Identifier: BUSL-1.1
|
3
|
+
|
4
|
+
en:
|
5
|
+
podman_provider:
|
6
|
+
already_built: |-
|
7
|
+
Image is already built from the Podmanfile. `vagrant reload` to rebuild.
|
8
|
+
build_image_destroy: |-
|
9
|
+
Removing built image...
|
10
|
+
build_image_destroy_in_use: |-
|
11
|
+
Build image couldn't be destroyed because the image is in use. The
|
12
|
+
image must be destroyed manually in the future if you want to remove
|
13
|
+
it.
|
14
|
+
build_image_invalid: |-
|
15
|
+
Build image no longer exists. Rebuilding...
|
16
|
+
building: |-
|
17
|
+
Building the container from a Podmanfile...
|
18
|
+
building_git_repo: |-
|
19
|
+
Building the container from the git repository: %{repo}...
|
20
|
+
building_named_podmanfile: |-
|
21
|
+
Building the container from the named Podmanfile: %{file}...
|
22
|
+
building_git_repo_named_podmanfile: |-
|
23
|
+
Building the container from the named Podmanfile: %{file} in the git repository: %{repo}...
|
24
|
+
creating: |-
|
25
|
+
Creating the container...
|
26
|
+
created: |-
|
27
|
+
Container created: %{id}
|
28
|
+
host_machine_disabling_folders: |-
|
29
|
+
Removing synced folders...
|
30
|
+
host_machine_forwarded_ports: |-
|
31
|
+
Warning: When using a remote Podman host, forwarded ports will NOT be
|
32
|
+
immediately available on your machine. They will still be forwarded on
|
33
|
+
the remote machine, however, so if you have a way to access the remote
|
34
|
+
machine, then you should be able to access those ports there. This is
|
35
|
+
not an error, it is only an informational message.
|
36
|
+
host_machine_needed: |-
|
37
|
+
Podman host is required. One will be created if necessary...
|
38
|
+
host_machine_ready: |-
|
39
|
+
Podman host VM is already ready.
|
40
|
+
host_machine_starting: |-
|
41
|
+
Vagrant will now create or start a local VM to act as the Podman
|
42
|
+
host. You'll see the output of the `vagrant up` for this VM below.
|
43
|
+
host_machine_syncing_folders: |-
|
44
|
+
Syncing folders to the host VM...
|
45
|
+
logging_in: |-
|
46
|
+
Logging in to Podman server...
|
47
|
+
logs_host_state_unknown: |-
|
48
|
+
This container requires a host VM, and the state of that VM
|
49
|
+
is unknown. Run `vagrant up` to verify that the container and
|
50
|
+
its host VM is running, then try again.
|
51
|
+
network_bridge_gateway_invalid: |-
|
52
|
+
The provided gateway IP address is invalid (%{gateway}). Please
|
53
|
+
provide a valid IP address.
|
54
|
+
network_bridge_gateway_outofbounds: |-
|
55
|
+
The provided gateway IP (%{gateway}) is not within the defined
|
56
|
+
subnet (%{subnet}). Please provide an IP address within the
|
57
|
+
defined subnet.
|
58
|
+
network_bridge_gateway_request: |-
|
59
|
+
Gateway IP address for %{interface} interface [%{default_gateway}]:
|
60
|
+
network_bridge_iprange_info: |-
|
61
|
+
When an explicit address is not provided to a container attached
|
62
|
+
to this bridged network, podman will supply an address to the
|
63
|
+
container. This is independent of the local DHCP service that
|
64
|
+
may be available on the network.
|
65
|
+
network_bridge_iprange_invalid: |-
|
66
|
+
The provided IP address range is invalid (%{range}). Please
|
67
|
+
provide a valid range.
|
68
|
+
network_bridge_iprange_outofbounds: |-
|
69
|
+
The provided IP address range (%{range}) is not within the
|
70
|
+
defined subnet (%{subnet}). Please provide an address range
|
71
|
+
within the defined subnet.
|
72
|
+
network_bridge_iprange_request: |-
|
73
|
+
Available address range for assignment on %{interface} interface [%{default_range}]:
|
74
|
+
network_create: |-
|
75
|
+
Creating and configuring podman networks...
|
76
|
+
network_connect: |-
|
77
|
+
Enabling network interfaces...
|
78
|
+
network_destroy: |-
|
79
|
+
Removing network %{network_name} ...
|
80
|
+
not_created_skip: |-
|
81
|
+
Container not created. Skipping.
|
82
|
+
not_podman_provider: |-
|
83
|
+
Not backed by Podman provider. Skipping.
|
84
|
+
pull: |-
|
85
|
+
Pulling image '%{image}'...
|
86
|
+
run_command_required: |-
|
87
|
+
`vagrant podman-run` requires a command to execute. This command
|
88
|
+
must be specified after a `--` in the command line. This is used
|
89
|
+
to separate possible machine names and options from the actual
|
90
|
+
command to execute. An example is shown below:
|
91
|
+
|
92
|
+
vagrant podman-run web -- rails new .
|
93
|
+
|
94
|
+
running: |-
|
95
|
+
Container is starting. Output will stream in below...
|
96
|
+
running_detached: |-
|
97
|
+
Container is started detached.
|
98
|
+
ssh_through_host_vm: |-
|
99
|
+
SSH will be proxied through the Podman virtual machine since we're
|
100
|
+
not running Podman natively. This is just a notice, and not an error.
|
101
|
+
subnet_exists: |-
|
102
|
+
A network called '%{network_name}' using subnet '%{subnet}' is already in use.
|
103
|
+
Using '%{network_name}' instead of creating a new network...
|
104
|
+
synced_folders_changed: |-
|
105
|
+
Vagrant has noticed that the synced folder definitions have changed.
|
106
|
+
With Podman, these synced folder changes won't take effect until you
|
107
|
+
destroy the container and recreate it.
|
108
|
+
waiting_for_running: |-
|
109
|
+
Waiting for container to enter "running" state...
|
110
|
+
volume_path_not_expanded: |-
|
111
|
+
Host path `%{host}` exists as a `volumes` key and is a folder on disk. Vagrant
|
112
|
+
will not expand this key like it used to and instead leave it as is defined.
|
113
|
+
If this folder is intended to be used, make sure its full path is defined
|
114
|
+
in your `volumes` config. More information can be found on volumes in the
|
115
|
+
podman compose documentation.
|
116
|
+
|
117
|
+
messages:
|
118
|
+
destroying: |-
|
119
|
+
Deleting the container...
|
120
|
+
not_created: |-
|
121
|
+
The container hasn't been created yet.
|
122
|
+
not_created_original: |-
|
123
|
+
The original container hasn't been created yet. Run `vagrant up`
|
124
|
+
for this machine first.
|
125
|
+
not_running: |-
|
126
|
+
The container is not currently running.
|
127
|
+
preparing: |-
|
128
|
+
Preparing to start the container...
|
129
|
+
provision_no_ssh: |-
|
130
|
+
Provisioners will not be run since container doesn't support SSH.
|
131
|
+
will_not_destroy: |-
|
132
|
+
The container will not be destroyed, since the confirmation was declined.
|
133
|
+
starting: |-
|
134
|
+
Starting container...
|
135
|
+
stopping: |-
|
136
|
+
Stopping container...
|
137
|
+
container_ready: |-
|
138
|
+
Container started and ready for use!
|
139
|
+
not_provisioning: |-
|
140
|
+
The following provisioners require a communicator, though none is available (this container does not support SSH).
|
141
|
+
Not running the following provisioners:
|
142
|
+
- %{provisioners}
|
143
|
+
|
144
|
+
status:
|
145
|
+
host_state_unknown: |-
|
146
|
+
The host VM for the Podman containers appears to not be running
|
147
|
+
or is currently inaccessible. Because of this, we can't determine
|
148
|
+
the state of the containers on that host. Run `vagrant up` to
|
149
|
+
bring up the host VM again.
|
150
|
+
not_created: |-
|
151
|
+
The environment has not yet been created. Run `vagrant up` to
|
152
|
+
create the environment. If a machine is not created, only the
|
153
|
+
default provider will be shown. So if a provider is not listed,
|
154
|
+
then the machine is not created for that environment.
|
155
|
+
preparing: |-
|
156
|
+
Vagrant is preparing to start this Podman container. Run `vagrant up`
|
157
|
+
to continue.
|
158
|
+
running: |-
|
159
|
+
The container is created and running. You can stop it using
|
160
|
+
`vagrant halt`, see logs with `vagrant podman-logs`, and
|
161
|
+
kill/destroy it with `vagrant destroy`.
|
162
|
+
stopped: |-
|
163
|
+
The container is created but not running. You can run it again
|
164
|
+
with `vagrant up`. If the container always goes to "stopped"
|
165
|
+
right away after being started, it is because the command being
|
166
|
+
run exits and doesn't keep running.
|
167
|
+
|
168
|
+
errors:
|
169
|
+
build_error: |-
|
170
|
+
Vagrant received unknown output from `podman build` while building a container: %{result}
|
171
|
+
compose_lock_timeout: |-
|
172
|
+
Vagrant encountered a timeout waiting for the podman compose driver
|
173
|
+
to become available. Please try to run your command again. If you
|
174
|
+
continue to experience this error it may be resolved by disabling
|
175
|
+
parallel execution.
|
176
|
+
podman_compose_not_installed: |-
|
177
|
+
Vagrant has been instructed to use to use the Compose driver for the
|
178
|
+
Podman plugin but was unable to locate the `podman-compose` executable.
|
179
|
+
Ensure that `podman-compose` is installed and available on the PATH.
|
180
|
+
not_created: |-
|
181
|
+
The container hasn't been created yet.
|
182
|
+
not_running: |-
|
183
|
+
The container is not currently running.
|
184
|
+
communicator_non_podman: |-
|
185
|
+
The "podman_hostvm" communicator was specified on a machine that
|
186
|
+
is not provided by the Podman provider. This is a bug with your
|
187
|
+
Vagrantfile. Please contact the creator of your Vagrant environment
|
188
|
+
and notify them to not use this communicator for anything except the
|
189
|
+
"podman" provider.
|
190
|
+
config:
|
191
|
+
both_build_and_image_and_git: |-
|
192
|
+
Only one of "build_dir", "git_repo" or "image" can be set
|
193
|
+
build_dir_invalid: |-
|
194
|
+
"build_dir" must exist and contain a Podmanfile
|
195
|
+
git_repo_invalid: |-
|
196
|
+
"git_repo" must be a valid repository URL
|
197
|
+
build_dir_or_image: |-
|
198
|
+
One of "build_dir", "git_repo" or "image" must be set
|
199
|
+
compose_configuration_hash: |-
|
200
|
+
"compose_configuration" must be a hash
|
201
|
+
compose_force_vm: |-
|
202
|
+
Podman compose is not currently supported from within proxy VM.
|
203
|
+
git_repo_invalid: |-
|
204
|
+
"git_repo" must be a valid git URL
|
205
|
+
create_args_array: |-
|
206
|
+
"create_args" must be an array
|
207
|
+
invalid_link: |-
|
208
|
+
Invalid link (should be 'name:alias'): "%{link}"
|
209
|
+
invalid_vagrantfile: |-
|
210
|
+
"vagrant_vagrantfile" must point to a Vagrantfile that exists.
|
211
|
+
podman_provider_nfs_without_privileged: |-
|
212
|
+
You've configured a NFS synced folder but didn't enable privileged
|
213
|
+
mode for the container. Please set the `privileged` option to true
|
214
|
+
on the provider block from your Vagrantfile, recreate the container
|
215
|
+
and try again.
|
216
|
+
podman_provider_image_not_configured: |-
|
217
|
+
The base Podman image has not been set for the '%{name}' VM!
|
218
|
+
execute_error: |-
|
219
|
+
A Podman command executed by Vagrant didn't complete successfully!
|
220
|
+
The command run along with the output from the command is shown
|
221
|
+
below.
|
222
|
+
|
223
|
+
Command: %{command}
|
224
|
+
|
225
|
+
Stderr: %{stderr}
|
226
|
+
|
227
|
+
Stdout: %{stdout}
|
228
|
+
exec_command_required: |-
|
229
|
+
The "podman-exec" command requires a command to execute. This command
|
230
|
+
must be specified after a "--" in the command line. This is used to
|
231
|
+
separate machine name and options from the actual command to execute.
|
232
|
+
An example is show below:
|
233
|
+
|
234
|
+
$ vagrant podman-exec -t nginx -- bash
|
235
|
+
|
236
|
+
host_vm_communicator_not_ready: |-
|
237
|
+
The Podman provider was able to bring up the host VM successfully
|
238
|
+
but the host VM is still reporting that SSH is unavailable. This
|
239
|
+
sometimes happens with certain providers due to bugs in the
|
240
|
+
underlying hypervisor, and can be fixed with a `vagrant reload`.
|
241
|
+
The ID for the host VM is shown below for convenience.
|
242
|
+
|
243
|
+
If this does not fix it, please verify that the host VM provider
|
244
|
+
is functional and properly configured.
|
245
|
+
|
246
|
+
Host VM ID: %{id}
|
247
|
+
network_address_invalid: |-
|
248
|
+
The configured network address is not valid within the configured
|
249
|
+
subnet of the defined network. Please update the network settings
|
250
|
+
and try again.
|
251
|
+
|
252
|
+
Configured address: %{address}
|
253
|
+
Network name: %{network_name}
|
254
|
+
network_address_required: |-
|
255
|
+
An IP address is required if not using `type: "dhcp"` or not specifying a `subnet`.
|
256
|
+
network_invalid_option: |-
|
257
|
+
Invalid option given for podman network for guest "%{container}". Must specify either
|
258
|
+
a `subnet` or use `type: "dhcp"`.
|
259
|
+
network_name_missing: |-
|
260
|
+
The Podman provider is unable to connect the container to the
|
261
|
+
defined network due to a missing network name. Please validate
|
262
|
+
your configuration and try again.
|
263
|
+
|
264
|
+
Container: %{container}
|
265
|
+
Network Number: %{index}
|
266
|
+
network_name_undefined: |-
|
267
|
+
The Podman provider was unable to configure networking using the
|
268
|
+
provided network name `%{network_name}`. Please ensure the network
|
269
|
+
name is correct and exists, then try again.
|
270
|
+
network_no_interfaces: |-
|
271
|
+
The Podman provider was unable to list any available interfaces to bridge
|
272
|
+
the public network with.
|
273
|
+
network_subnet_invalid: |-
|
274
|
+
The configured network subnet is not valid for the defined network.
|
275
|
+
Please update the network settings and try again.
|
276
|
+
|
277
|
+
Configured subnet: %{subnet}
|
278
|
+
Network name: %{network_name}
|
279
|
+
package_not_supported: |-
|
280
|
+
The "package" command is not supported with the Podman provider.
|
281
|
+
If you'd like to commit or push your Podman container, please SSH
|
282
|
+
into the host VM (if there is one), and run `podman commit` and
|
283
|
+
so on manually.
|
284
|
+
state_not_running: |-
|
285
|
+
The container never entered the "running" state, or entered it
|
286
|
+
briefly but reverted back to another state. Please verify that
|
287
|
+
the configuration of the container is correct.
|
288
|
+
|
289
|
+
If you meant for this container to not remain running, please
|
290
|
+
set the Podman provider configuration "remains_running" to "false":
|
291
|
+
|
292
|
+
config.vm.provider "podman" do |d|
|
293
|
+
d.remains_running = false
|
294
|
+
end
|
295
|
+
|
296
|
+
state_stopped: |-
|
297
|
+
The container started either never left the "stopped" state or
|
298
|
+
very quickly reverted to the "stopped" state. This is usually
|
299
|
+
because the container didn't execute a command that kept it running,
|
300
|
+
and usually indicates a misconfiguration.
|
301
|
+
|
302
|
+
If you meant for this container to not remain running, please
|
303
|
+
set the Podman provider configuration "remains_running" to "false":
|
304
|
+
|
305
|
+
config.vm.provider "podman" do |d|
|
306
|
+
d.remains_running = false
|
307
|
+
end
|
308
|
+
|
309
|
+
suspend_not_supported: |-
|
310
|
+
The "suspend" command is not supported with the Podman provider.
|
311
|
+
Podman containers don't natively support suspend. If you're using
|
312
|
+
a host machine, you can suspend the host machine by finding it
|
313
|
+
in `vagrant global-status` and using `vagrant suspend <id>`.
|
314
|
+
synced_folder_non_podman: |-
|
315
|
+
The "podman" synced folder type can't be used because the provider
|
316
|
+
in use is not Podman. This synced folder type only works with the
|
317
|
+
Podman provider. The provider this machine is using is: %{provider}
|
318
|
+
vagrantfile_not_found: |-
|
319
|
+
The configured host VM Vagrantfile could not be found. Please fix
|
320
|
+
the Vagrantfile for this Podman environment to point to a valid
|
321
|
+
host VM.
|