-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathVagrantfile
188 lines (141 loc) · 6.06 KB
/
Vagrantfile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
# -*- mode: ruby -*-
# vi: set ft=ruby :
# Para aprovechar este Vagrantfile necesita Vagrant y Virtualbox instalados:
#
# * Virtualbox
#
# * Vagrant
#
# * Plugins de Vagrant:
# + vagrant-proxyconf y su configuracion si requiere de un Proxy para salir a Internet
# + vagrant-cachier
# + vagrant-disksize
# + vagrant-share
# + vagrant-vbguest
VAGRANTFILE_API_VERSION = "2"
HOSTNAME = "lxd1"
DOMAIN = "infra.ballardini.com.ar"
$post_up_message = <<POST_UP_MESSAGE
------------------------------------------------------
Cluster LXD con un solo nodo
URLS:
- host only - https://lxd1.ballardini.com.ar/
------------------------------------------------------
POST_UP_MESSAGE
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.post_up_message = $post_up_message
# uso cachier con NFS solamente si el hostmanager gestiona los nombres en /etc/hosts del host
if Vagrant.has_plugin?("vagrant-cachier")
config.cache.auto_detect = false
config.cache.scope = :box
end
config.vm.define HOSTNAME do |srv|
#srv.vm.box = "alpine/alpine64"
srv.vm.box = "generic/alpine312"
srv.vm.network "private_network", ip: "192.168.33.21"
#srv.vm.network "forwarded_port", guest: 22, host: 2220, host_ip: "127.0.0.1", id: 'ssh'
srv.vm.boot_timeout = 3600
srv.vm.box_check_update = true
srv.ssh.forward_agent = true
srv.ssh.forward_x11 = true
srv.vm.hostname = HOSTNAME
if Vagrant.has_plugin?("vagrant-vbguest") then
#srv.vbguest.auto_update = true
srv.vbguest.auto_update = false
srv.vbguest.no_install = false
end
srv.vm.synced_folder ".", "/vagrant", disabled: false, SharedFoldersEnableSymlinksCreate: false
srv.vm.provider :virtualbox do |vb|
vb.gui = false
vb.cpus = 2
vb.memory = "2048"
vb.customize ["modifyvm", :id, "--nested-hw-virt", "on"]
# https://www.virtualbox.org/manual/ch08.html#vboxmanage-modifyvm mas parametros para personalizar en VB
end
end
##
# Aprovisionamiento
#
config.vm.provision "ssh_pub_key", type: :shell do |s|
begin
ssh_pub_key = File.readlines("#{Dir.home}/.ssh/id_rsa.pub").first.strip
s.inline = <<-SHELL
mkdir -p /root/.ssh/
touch /root/.ssh/authorized_keys
echo #{ssh_pub_key} >> /home/vagrant/.ssh/authorized_keys
echo #{ssh_pub_key} >> /root/.ssh/authorized_keys
SHELL
rescue
puts "No hay claves publicas en el HOME de su pc"
s.inline = "echo OK sin claves publicas"
end
end
config.vm.provision "actualiza", type: "shell" do |s|
s.privileged = false
s.inline = <<-SHELL
# enable edge: 2021-01-13 LXD solo esta disponible en edge
echo "http://dl-cdn.alpinelinux.org/alpine/edge/main" | sudo tee /etc/apk/repositories
echo "http://dl-cdn.alpinelinux.org/alpine/edge/community" | sudo tee -a /etc/apk/repositories
echo "http://dl-cdn.alpinelinux.org/alpine/edge/testing" | sudo tee -a /etc/apk/repositories
sudo apk update
sudo apk upgrade --available && sync
sudo apk add virtualbox-guest-additions virtualbox-guest-modules-virt
cat /etc/alpine-release
SHELL
end
config.vm.provision "instala_lxd", type: "shell" do |s|
s.privileged = false
s.inline = <<-SHELL
# Install LXD using apk:
sudo apk add lxd dbus jq sshpass
# If you plan to run systemd based Linux distributions (Debian, Ubuntu, etc.), add this to /etc/conf.d/lxc:
echo "systemd_container=yes" | sudo tee -a /etc/conf.d/lxc
# Enable and start the lxd service:
sudo rc-update add lxd
sudo rc-service lxd start
sudo rc-update add dbus
sudo rc-service dbus start
# We’ll need to enable and start the cgroups service
sudo rc-update add cgroups
sudo rc-service cgroups start
# Add cgfs to pam.d/system-login:
echo "session optional pam_cgfs.so -c freezer,memory,name=systemd,unified" | sudo tee -a /etc/pam.d/system-login
# Add user and group idmaps to LXC config:
echo "lxc.idmap = u 0 100000 65536" | sudo tee -a /etc/lxc/default.conf
echo "lxc.idmap = g 0 100000 65536" | sudo tee -a /etc/lxc/default.conf
# Add root (and other users as well) subuid and subgid:
echo "root:100000:65536" | sudo tee -a /etc/subuid
echo "root:100000:65536" | sudo tee -a /etc/subgid
SHELL
end
config.vm.provision "instala_rclocal", type: "shell" do |s| # https://stackoverflow.com/a/48931432
s.privileged = true
s.inline = <<-SHELL
rc-update add local default
cat <<-RCLOCAL | sed -e 's_^ __' > /etc/local.d/lxdinit.start
#!/bin/bash
# Esto debe correrse DESPUES de reiniciar la VM
#
# Finally, initialize LXD:
lxd init --auto
addgroup vagrant lxd
chgrp lxd /var/lib/lxd/unix.socket
if [ -n "${http_proxy}" ] || [ -n "${https_proxy}" ]
then
# para que LXD pueda descargar las imagenes
lxc config set core.proxy_http "${http_proxy}"
lxc config set core.proxy_https "${https_proxy}"
lxc config set core.proxy_ignore_hosts "${no_proxy}"
# para que los contenedores del perfil default puedan acceder a internet
lxc profile set default environment.http_proxy "${http_proxy}"
lxc profile set default environment.https_proxy "${https_proxy}"
lxc profile set default environment.no_proxy "${no_proxy}"
fi
rm /etc/local.d/lxdinit.start
RCLOCAL
echo "======== SE APAGA LA VM === ejecute 'vagrant up' para levantarla nuevamente"
chmod o+x /etc/local.d/lxdinit.start
shutdown --poweroff now
SHELL
end
end