diff --git a/proxmox/terraform/README.md b/proxmox/terraform/README.md new file mode 100644 --- /dev/null +++ b/proxmox/terraform/README.md @@ -0,0 +1,68 @@ +# What + +Terraform allows to transparently declare our infrastructure as code. Providing +a (non-official so far) plugin, we can provision vm the same way for our rocq +infra (proxmox) + +# The road so far + +## Prepare workstation + +See prepare-workstation.md + +## setup.sh + +Create a `setup.sh` file holding the PM_{USER,PASS} information: + +``` +export PM_USER=@pam +export PM_PASS= +``` + +source it in your current shell session. + +``` +source setup.sh +``` + +## provision new vm + +``` +terraform init +terraform apply +``` + +# Details + +The provisioning is bootstraping vm declared in ".tf" files. It's using a base +template (debian-9-template, debian-10-template) installed in the hypervisor. +Instructions are detailed in the `init-template.md` file. + +# Init + +This initializes your local copy with the necessary: + +``` +terraform init +``` + +# Plan changes + +This will compute all *.tf files present in the folder and compute a +differential plan: + +``` +terraform plan +``` + +Note: It might be a good idea to change the `variables.tf` file to adapt for +example the admin user and its associated public key + +# Apply changes + +Same as previous command except that it applies the diff to the infra +(interactively): + +``` +terraform apply +``` diff --git a/proxmox/terraform/init-template.md b/proxmox/terraform/init-template.md new file mode 100644 --- /dev/null +++ b/proxmox/terraform/init-template.md @@ -0,0 +1,149 @@ +In the following documentation, we will explain the necessary steps +needed to initialize a template vm. + +Expectations: + +- hypervisor: orsay (could be beaubourg, hypervisor3) +- \`/usr/bin/qm\` available from the hypervisor + +Prepare vm template +=================== + +Connect to hypervisor orsay (\`ssh orsay\`) + +And then as root, retrieve openstack images: + +``` +mkdir debian-10 +wget -O debian-10/debian-10-openstack-amd64.qcow2 \ + https://cdimage.debian.org/cdimage/openstack/current/debian-10.0.1-20190708-openstack-amd64.qcow2 +wget -O debian-10/debian-10-openstack-amd64.qcow2.index \ + https://cdimage.debian.org/cdimage/openstack/current/debian-10.0.1-20190708-openstack-amd64.qcow2.index +mkdir debian-9 +wget -O debian-9/debian-9-openstack-amd64.qcow2 \ + https://cloud.debian.org/images/cloud/OpenStack/current-9/debian-9-openstack-amd64.qcow2 +wget -O debian-9/debian-9-openstack-amd64.qcow2.index \ + https://cloud.debian.org/images/cloud/OpenStack/current-9/debian-9-openstack-amd64.qcow2.index +``` + +Note: + +- Not presented here but you should check the hashes of what you + retrieved from the internet + +Create vm +--------- + +``` +chmod +x init-template.sh +./init-template.sh 9 +``` + +This created a basic vm with basic login/pass as root/test so we can +connect to it. + +Note: Implementation wise, this uses an openstack debian image, +cloud-init ready. + +Check image is working +---------------------- + +The rationale is to: + +- boot the vm +- check some basic information (kernel, distribution, connection, + release, etc...). +- adapt slightly the vms (dns resolver, ip, upgrade, etc...) + +### Start vm + +``` +qm start 9000 +``` + +### Checks + +Login through the console web-ui: + +- accessible from +- View \`datacenter\` +- unfold the hypervisor \`orsay\` menu +- select the vm \`9000\` +- click the \`console\` menu. +- log in as root/test password + +Checks: + +- kernel linux version +- debian release + +### Adaptations + +Update grub's timeout to 0 for a faster boot (as root): +``` +sed -i s'/GRUB_TIMEOUT = 5/GRUB_TIMEOUT = 0/' etc/default/grub +update-grub +``` + +Then, add some expected defaults: +``` +apt update +apt upgrade -y +apt install -y puppet +systemctl disable puppet.service +mkdir -p /etc/facter/facts.d +echo location=sesi_rocquencourt_staging > /etc/facter/facts.d/location.txt + +``` +- etc... + +### Remove cloud-init setup from vm + +``` +# stop vm +qm stop 9000 +# remove cloud-init setup +qm set 9000 --delete ciuser,cipassword,ipconfig0,nameserver +``` + +Template the image +------------------ + +When the vm is ready, we can use it as a base template for future +clones: + +``` +qm template 9000 +``` + +Clone image +=========== + +This is a tryout referenced here to demonstrate the shortcoming. That\'s +not necesary to do this as this will be taken care of by proxmox. + +Sadly full clone only works: + +``` +qm clone 9000 666 --name debian-10-tryout --full true +``` + +As in: Fully clone from template \"9000\", the new vm with id \"666\" +dubbed \"buster-tryout\". + +Note (partial clone does not work): + +``` +root@orsay:/home/ardumont/proxmox# qm clone 9000 666 --name buster-tryout +Linked clone feature is not supported for drive 'virtio0' +``` + +Note: + +- tested with all drives: ide, sata, scsi, virtio +- only thing that worked was without a disk (but then no more os...) + +source +====== + + diff --git a/proxmox/terraform/init-template.sh b/proxmox/terraform/init-template.sh new file mode 100644 --- /dev/null +++ b/proxmox/terraform/init-template.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +set -x +set -e + +VERSION=${1-"9"} +NAME="template-debian-${VERSION}" +IMG="debian-$VERSION/debian-$VERSION-openstack-amd64.qcow2" + +VM_ID="${VERSION}000" +VM_DISK="vm-$VM_ID-disk-0" + +# create vm +qm create $VM_ID --memory 4096 --net0 virtio,bridge=vmbr0 --name "$NAME" +# import disk to orsay-ssd-2018 (lots of space there) +qm importdisk $VM_ID $IMG orsay-ssd-2018 --format qcow2 +# finally attach the new disk to the VM as virtio drive +qm set $VM_ID --scsihw virtio-scsi-pci --virtio0 "orsay-ssd-2018:$VM_DISK" +# resize the disk to add 30G (image size is 2G) ~> this increases the clone time so no +# qm resize 9000 virtio0 +30G +# configure a cdrom drive which is used to pass the cloud-init data +# to the vm +qm set $VM_ID --ide2 orsay-ssd-2018:cloudinit +# boot from disk only +qm set $VM_ID --boot c --bootdisk virtio0 +# add serial console (for cloud-init, this is needed or else that won't work) +qm set $VM_ID --serial0 socket +# sets the number of sockets/cores +qm set $VM_ID --sockets 2 --cores 1 + +# cloud init temporary setup +qm set $VM_ID --ciuser root +qm set $VM_ID --cipassword test +qm set $VM_ID --ipconfig0 "ip=192.168.100.125/24,gw=192.168.100.1" +qm set $VM_ID --nameserver "192.168.100.29" diff --git a/proxmox/terraform/prepare-workstation.md b/proxmox/terraform/prepare-workstation.md new file mode 100644 --- /dev/null +++ b/proxmox/terraform/prepare-workstation.md @@ -0,0 +1,42 @@ +This is the required tooling for the following to work. + +# terraform-provider-proxmox + +go module to install + +``` +git clone https://github.com/Telmate/terraform-provider-proxmox +cd terraform-provider-proxmox + +# compile terraform proxmox provider +export GOPATH=`pwd` +make setup +make +make install + +# Install so that terrafor actually sees the plugin +mkdir -p ~/.terraform.d/plugins/linux_amd64 +cp -v ~/go/bin/terraform-provi* ~/terraform.d/plugins/linux_amd64/ +``` + +At the end of this, `terraform init` within /proxmox/terraform/ should now +work. + +Doc: https://github.com/Telmate/terraform-provider-proxmox/blob/master/README.md + +# Puppet provisionner + +The second part of the terraform setup is to delegate the machine setup to +puppet. This needs other tools (bolt and some puppet modules) + +bolt: +``` +wget https://apt.puppet.com/puppet-tools-release-stretch.deb +sudo dpkg -i puppet-tools-release-stretch.deb +sudo apt-get update +sudo apt-get install puppet-bolt +``` + +(Even on buster, bolt ends up installed in /usr/local/bin/bolt) + +Docs: https://puppet.com/docs/bolt/latest/bolt_installing.html#concept-8499 diff --git a/proxmox/terraform/storage.tf b/proxmox/terraform/storage.tf new file mode 100644 --- /dev/null +++ b/proxmox/terraform/storage.tf @@ -0,0 +1,74 @@ +# Keyword use: +# - provider: Define the provider(s) +# - data: Retrieve data information to be used within the file +# - resource: Define resource and create/update + +provider "proxmox" { + pm_tls_insecure = true + pm_api_url = "https://orsay.internal.softwareheritage.org:8006/api2/json" + # in a shell (see README): source ./setup.sh +} + +resource "proxmox_vm_qemu" "storage0" { + name = "storage0" + desc = "swh storage node" + # hypervisor onto which make the vm + target_node = "orsay" + # See init-clone.org to see the bootstrap of the template vm + clone = "template-debian-10" + # linux kernel 2.6 + qemu_os = "l26" + # generic setup + sockets = 1 + cores = 2 + memory = 8192 + onboot = true + #### cloud-init setup + # to actually set some information per os_type (values: ubuntu, centos, cloud-init) + os_type = "cloud-init" + # ciuser - User name to change ssh keys and password for instead of the image’s configured default user. + ciuser = "root" + # searchdomain - Sets DNS search domains for a container. + searchdomain = "internal.staging.swh.network" + # nameserver - Sets DNS server IP address for a container. + nameserver = "192.168.100.29" + # sshkeys - public ssh keys, one per line + sshkeys = "${var.ssh_key_data}" + # FIXME: When T1872 lands, this will need to be updated + # ipconfig0 - [gw =] [,gw6=] [,ip=] [,ip6=] + ipconfig0 = "ip=192.168.100.125/24,gw=192.168.100.1" + ssh_user = "${var.user_admin}" + disk { + id = 0 + type = "virtio" + storage = "orsay-ssd-2018" + storage_type = "ssd" + size = "32G" + } + network { + id = 0 + model = "virtio" + bridge = "vmbr0" + macaddr = "3E:70:84:BF:9B:F5" + } + + # # doc: https://www.terraform.io/docs/provisioners/puppet.html + # provisioner "puppet" { + # # resource's os type + # os_type = "linux" + # # puppet master's fqdn + # server = "pergamon.internal.softwareheritage.org" + # # user for bolt to connect + # server_user = "${var.user_admin}" + # # puppet environment (matches the repository swh-site's git branch) + # environment = "new_staging" + # # certificate's common name to use + # certname = "storage0.internal.staging.swh.network" + # # connection to use + # connection = { + # type = "ssh" + # user = "${var.user_admin}" + # password = "${var.root_password}" + # } + # } +} diff --git a/proxmox/terraform/variables.tf b/proxmox/terraform/variables.tf new file mode 100644 --- /dev/null +++ b/proxmox/terraform/variables.tf @@ -0,0 +1,10 @@ +# `pass search terraform-proxmox` in credential store +variable "ssh_key_data" { + type = "string" + default = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDVKCfpeIMg7GS3Pk03ZAcBWAeDZ+AvWk2k/pPY0z8MJ3YAbqZkRtSK7yaDgJV6Gro7nn/TxdJLo2jEzzWvlC8d8AEzhZPy5Z/qfVVjqBTBM4H5+e+TItAHFfaY5+0WvIahxcfsfaq70MWfpJhszAah3ThJ4mqzYaw+dkr42+a7Gx3Ygpb/m2dpnFnxvXdcuAJYStmHKU5AWGWWM+Fm50/fdMqUfNd8MbKhkJt5ihXQmZWMOt7ls4N8i5NZWnS9YSWow8X/ENOEqCRN9TyRkc+pPS0w9DNi0BCsWvSRJOkyvQ6caEnKWlNoywCmM1AlIQD3k4RUgRWe0vqg/UKPpH3Z root@terraform" +} + +variable "user_admin" { + type = "string" + default = "root" +}