Page MenuHomeSoftware Heritage

No OneTemporary

diff --git a/azure/terraform/admin.tf b/azure/terraform/admin.tf
new file mode 100644
index 0000000..eff6c2a
--- /dev/null
+++ b/azure/terraform/admin.tf
@@ -0,0 +1,185 @@
+# Define a new resource for the admin nodes
+# matching what we names elsewhere "euwest-${resource}"
+variable "backup_servers" {
+ default = 1
+}
+
+variable "backup_disks_per_server" {
+ default = 1
+}
+
+# Size in gb
+variable "backup_disk_size" {
+ default = 200
+}
+
+locals {
+ backup_servers = {
+ for i in range(var.backup_servers) :
+ format("backup%02d", i + 1) => {
+ backupdisks = {
+ for i in range(var.backup_disks_per_server) :
+ format("datadisk%02d", i + 1) => {
+ lun = i + 1
+ path = format("/dev/disk/azure/scsi1/lun%d", i + 1)
+ }
+ }
+ }
+ }
+}
+
+resource "azurerm_resource_group" "euwest-admin" {
+ name = "euwest-admin"
+ location = "westeurope"
+
+ tags = {
+ environment = "SWH Admin"
+ }
+}
+
+resource "azurerm_network_interface" "backup-interface" {
+ for_each = local.backup_servers
+
+ name = format("%s-interface", each.key)
+ location = "westeurope"
+ resource_group_name = azurerm_resource_group.euwest-admin.name
+
+ ip_configuration {
+ name = "backupNicConfiguration"
+ subnet_id = data.azurerm_subnet.default.id
+ public_ip_address_id = ""
+ private_ip_address = "192.168.200.50"
+ private_ip_address_allocation = "Static"
+ }
+}
+
+resource "azurerm_network_interface_security_group_association" "backup-interface-sga" {
+ for_each = local.backup_servers
+
+ network_interface_id = azurerm_network_interface.backup-interface[each.key].id
+ network_security_group_id = data.azurerm_network_security_group.worker-nsg.id
+}
+
+resource "azurerm_virtual_machine" "backup-server" {
+ for_each = local.backup_servers
+
+ name = each.key
+ location = "westeurope"
+ resource_group_name = azurerm_resource_group.euwest-admin.name
+ network_interface_ids = [azurerm_network_interface.backup-interface[each.key].id]
+ vm_size = "Standard_B2s"
+
+ delete_os_disk_on_termination = true
+ delete_data_disks_on_termination = false
+
+ boot_diagnostics {
+ enabled = true
+ storage_uri = var.boot_diagnostics_uri
+ }
+
+ storage_os_disk {
+ name = format("%s-osdisk", each.key)
+ caching = "None"
+ create_option = "FromImage"
+ disk_size_gb = 32
+ managed_disk_type = "Standard_LRS"
+ }
+
+ storage_image_reference {
+ publisher = "debian"
+ offer = "debian-11"
+ sku = "11"
+ version = "latest"
+ }
+
+ os_profile {
+ computer_name = each.key
+ admin_username = var.user_admin
+ }
+
+ os_profile_linux_config {
+ disable_password_authentication = true
+ ssh_keys {
+ path = "/home/${var.user_admin}/.ssh/authorized_keys"
+ key_data = var.ssh_key_data_ardumont
+ }
+ ssh_keys {
+ path = "/home/${var.user_admin}/.ssh/authorized_keys"
+ key_data = var.ssh_key_data_olasd
+ }
+ ssh_keys {
+ path = "/home/${var.user_admin}/.ssh/authorized_keys"
+ key_data = var.ssh_key_data_vsellier
+ }
+ }
+
+ dynamic "storage_data_disk" {
+ for_each = each.value.backupdisks
+
+ content {
+ name = format("%s-%s", each.key, storage_data_disk.key)
+ caching = "None"
+ create_option = "Empty"
+ managed_disk_type = "Standard_LRS"
+ disk_size_gb = var.backup_disk_size
+ lun = storage_data_disk.value.lun
+ }
+ }
+
+ # Configuring the root user
+ provisioner "remote-exec" {
+ inline = [
+ "sudo mkdir -p /root/.ssh", # just in case
+ # Remove the content populated by the azure provisionning
+ # blocking the conneciont as root
+ "sudo rm -v /root/.ssh/authorized_keys",
+ "echo ${var.ssh_key_data_ardumont} | sudo tee -a /root/.ssh/authorized_keys",
+ "echo ${var.ssh_key_data_olasd} | sudo tee -a /root/.ssh/authorized_keys",
+ "echo ${var.ssh_key_data_vsellier} | sudo tee -a /root/.ssh/authorized_keys",
+ ]
+
+ connection {
+ type = "ssh"
+ user = var.user_admin
+ host = azurerm_network_interface.backup-interface[each.key].private_ip_address
+ }
+ }
+
+ # Copy the initial configuration script
+ provisioner "file" {
+ content = templatefile("templates/firstboot.sh.tpl", {
+ hostname = each.key
+ fqdn = format("%s.euwest.azure.internal.softwareheritage.org", each.key),
+ ip_address = azurerm_network_interface.backup-interface[each.key].private_ip_address,
+ facter_subnet = "azure_euwest"
+ facter_deployment = "admin"
+ disk_setup = {}
+ })
+ destination = var.firstboot_script
+
+ connection {
+ type = "ssh"
+ user = "root"
+ host = azurerm_network_interface.backup-interface[each.key].private_ip_address
+ }
+ }
+
+ # Remove the tmpadmin user
+ provisioner "remote-exec" {
+ inline = [
+ "userdel -f ${var.user_admin}",
+ "chmod +x ${var.firstboot_script}",
+ "cat ${var.firstboot_script}",
+ var.firstboot_script,
+ ]
+ connection {
+ type = "ssh"
+ user = "root"
+ host = azurerm_network_interface.backup-interface[each.key].private_ip_address
+ }
+ }
+
+ tags = {
+ environment = "Backup"
+ }
+}
diff --git a/azure/terraform/init.tf b/azure/terraform/init.tf
index 2292f2b..5b98dd8 100644
--- a/azure/terraform/init.tf
+++ b/azure/terraform/init.tf
@@ -1,73 +1,78 @@
# Keyword use:
# - provider: Define the provider(s)
# - data: Retrieve data information to be used within the file
# - resource: Define resource and create/update
terraform {
required_version = ">= 0.13"
backend "azurerm" {
resource_group_name = "euwest-admin"
storage_account_name = "swhterraform"
container_name = "tfstate"
key = "prod.azure.terraform.tfstate"
}
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "=2.97.0"
}
}
}
# Configure the Microsoft Azure Provider
# Empty if using the `az login` tool
provider "azurerm" {
features {}
}
# Reuse the network security group as defined currently
data "azurerm_network_security_group" "worker-nsg" {
name = "worker-nsg"
resource_group_name = "swh-resource"
}
# Same for the subnet
data "azurerm_subnet" "default" {
name = "default"
virtual_network_name = "swh-vnet"
resource_group_name = "swh-resource"
}
# same for resource group used by storage servers
data "azurerm_resource_group" "euwest-servers" {
name = "euwest-servers"
}
variable "firstboot_script" {
type = string
default = "/root/firstboot.sh"
}
variable "ssh_key_data_ardumont" {
type = string
default = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDZarzgHrzUYspvrgSI6fszrALo92BDys7QOkJgUfZa9t9m4g7dUANNtwBiqIbqijAQPmB1zKgG6QTZC5rJkRy6KqXCW/+Qeedw/FWIbuI7jOD5WxnglbEQgvPkkB8kf1xIF7icRfWcQmK2je/3sFd9yS4/+jftNMPPXkBCxYm74onMenyllA1akA8FLyujLu6MNA1D8iLLXvz6pBDTT4GZ5/bm3vSE6Go8Xbuyu4SCtYZSHaHC2lXZ6Hhi6dbli4d3OwkUWz+YhFGaEra5Fx45Iig4UCL6kXPkvL/oSc9KGerpT//Xj9qz1K7p/IrBS8+eA4X69bHYYV0UZKDADZSn ardumont@yavin4"
}
variable "ssh_key_data_douardda" {
type = string
default = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCoON7De2Bx03owpZfzbOyucZTmyQdm7F+LP4D4H9EyOFxtyMpjH2S9Ve/JvMoFIWGQQlXSkYzRv63Z0BzPLKD2NsYgomcjOLdw1Baxnv8VOH+Q01g4B3cabcP2LMVjerHt/KRkY3E6dnKLQGE5UiER/taQ7KazAwvu89nUd4BJsV43rJ3X3DtFEfH3lR4ZEIgFyPUkVemQAjBhueFmN3w8debOdr7t9cBpnYvYKzLQN+G/kQVFc+fgs+fFOtOv+Az9kTXChfLs5pKPBm+MuGxz4gS3fPiAjY9cN6vGzr7ZNkCRUSUjJ10Hlm7Gf2EN8f+k6iSR4CPeixDcZ+scbCg4dCORqTsliSQzUORIJED9fbUR6bBjF4rRwm5GvnXx5ZTToWDJu0PSHYOkomqffp30wqvAvs6gLb+bG1daYsOLp+wYru3q09J9zUAA8vNXoWYaERFxgwsmsf57t8+JevUuePJGUC45asHjQh/ON1H5PDXtULmeD1GKkjqyaS7SBNbpOWgQb21l3pwhLet3Mq3TJmxVqzGMDnYvQMUCkiPdZq2pDplzfpDpOKLaDg8q82rR5+/tAfB4P2Z9RCOqnMLRcQk9AluTyO1D472Mkp+v5VA4di0eTWZ0tuzwYJEft0OVo+QOVTslCGsyGiEUoOcHzkrdgsT5uQziyAfgTMSuiw== david.douard@sdfa3.org"
}
variable "ssh_key_data_olasd" {
type = string
default = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDZ1TCpfzrvxLhEMhxjbxqPDCwY0nazIr1cyIbhGD2bUdAbZqVMdNtr7MeDnlLIKrIPJWuvltauvLNkYU0iLc1jMntdBCBM3hgXjmTyDtc8XvXseeBp5tDqccYNR/cnDUuweNcL5tfeu5kzaAg3DFi5Dsncs5hQK5KQ8CPKWcacPjEk4ir9gdFrtKG1rZmg/wi7YbfxrJYWzb171hdV13gSgyXdsG5UAFsNyxsKSztulcLKxvbmDgYbzytr38FK2udRk7WuqPbtEAW1zV4yrBXBSB/uw8EAMi+wwvLTwyUcEl4u0CTlhREljUx8LhYrsQUCrBcmoPAmlnLCD5Q9XrGH nicolasd@darboux id_rsa.inria.pub"
}
+variable "ssh_key_data_vsellier" {
+ type = string
+ default = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDL2n3ayVSz7zyG89lsdPS4EyIf29FSNX6XwFEz03xoLuHTOPoyq4z2gkuIaBuIWIPJCwhrhJJvn0KqEIJ2yIOF565zjTI/121VTjSZrwpLFBO5QQFGQB1fY4wVg8VYeVxZLeqbGQAdSAvVrpAAJdoMF0Hwv+i/dVC1SVLj3QrAMft6l5G9iz9OM3DwmoNkCPf+rxbqiiJB2ojMbzSIUfOiE5svL5+z811JOYz62ZAEmVAY22H96Ez0R5uCMQi3pdHvr16DogsXXlhA6zBg0p8sFKOLpfHDjah9pnpI+twX14//2ydw303M3W/4FcXZ1bD4kSjEBjCky6GkrM9MCW6f vsellier@swh-vs1"
+}
+
variable "user_admin" {
type = string
default = "tmpadmin"
}
variable "boot_diagnostics_uri" {
default = "https://swhresourcediag966.blob.core.windows.net"
}
diff --git a/azure/terraform/storage.tf b/azure/terraform/storage.tf
index 417ccc4..ac7c59f 100644
--- a/azure/terraform/storage.tf
+++ b/azure/terraform/storage.tf
@@ -1,136 +1,138 @@
# will start from 1 storage01...
variable "storage_servers" {
default = 1
}
variable "storage_disk_size" {
default = 30720
}
locals {
storage_servers = {
for i in range(var.storage_servers) :
format("storage%02d", i + 1) => {
datadisks = {}
}
}
}
resource "azurerm_network_interface" "storage-interface" {
for_each = local.storage_servers
name = format("%s-interface", each.key)
location = "westeurope"
resource_group_name = "euwest-servers"
ip_configuration {
name = "storageNicConfiguration"
subnet_id = data.azurerm_subnet.default.id
public_ip_address_id = ""
private_ip_address_allocation = "Dynamic"
}
}
resource "azurerm_network_interface_security_group_association" "storage-interface-sga" {
for_each = local.storage_servers
network_interface_id = azurerm_network_interface.storage-interface[each.key].id
network_security_group_id = data.azurerm_network_security_group.worker-nsg.id
}
resource "azurerm_virtual_machine" "storage-server" {
for_each = local.storage_servers
name = each.key
location = "westeurope"
resource_group_name = "euwest-servers"
network_interface_ids = [azurerm_network_interface.storage-interface[each.key].id]
vm_size = "Standard_D8s_v3"
boot_diagnostics {
enabled = true
storage_uri = var.boot_diagnostics_uri
}
storage_os_disk {
name = format("%s-osdisk", each.key)
caching = "ReadWrite"
create_option = "FromImage"
managed_disk_type = "Premium_LRS"
}
storage_image_reference {
publisher = "debian"
offer = "debian-10"
sku = "10"
version = "latest"
}
os_profile {
computer_name = each.key
admin_username = var.user_admin
}
os_profile_linux_config {
disable_password_authentication = true
ssh_keys {
path = "/home/${var.user_admin}/.ssh/authorized_keys"
key_data = var.ssh_key_data_ardumont
}
ssh_keys {
path = "/home/${var.user_admin}/.ssh/authorized_keys"
key_data = var.ssh_key_data_olasd
}
}
provisioner "remote-exec" {
inline = [
"sudo mkdir /root/.ssh",
"echo ${var.ssh_key_data_ardumont} | sudo tee -a /root/.ssh/authorized_keys",
"echo ${var.ssh_key_data_olasd} | sudo tee -a /root/.ssh/authorized_keys",
]
connection {
type = "ssh"
user = var.user_admin
host = azurerm_network_interface.storage-interface[each.key].private_ip_address
}
}
provisioner "file" {
content = templatefile("templates/firstboot.sh.tpl", {
hostname = each.key
fqdn = format("%s.euwest.azure.internal.softwareheritage.org", each.key),
ip_address = azurerm_network_interface.storage-interface[each.key].private_ip_address,
- facter_location = "azure_euwest"
+ facter_subnet = "azure_euwest"
+ facter_deployment = "production"
+
disk_setup = {}
})
destination = var.firstboot_script
connection {
type = "ssh"
user = "root"
host = azurerm_network_interface.storage-interface[each.key].private_ip_address
}
}
provisioner "remote-exec" {
inline = [
"userdel -f ${var.user_admin}",
"chmod +x ${var.firstboot_script}",
"cat ${var.firstboot_script}",
var.firstboot_script,
]
connection {
type = "ssh"
user = "root"
host = azurerm_network_interface.storage-interface[each.key].private_ip_address
}
}
tags = {
environment = "Storage"
}
}
diff --git a/azure/terraform/templates/firstboot.sh.tpl b/azure/terraform/templates/firstboot.sh.tpl
index c7e2f90..100cbfc 100644
--- a/azure/terraform/templates/firstboot.sh.tpl
+++ b/azure/terraform/templates/firstboot.sh.tpl
@@ -1,129 +1,132 @@
#!/bin/bash
set -ex
cd /
export DEBIAN_FRONTEND=noninteractive
PUPPET_MASTER=pergamon.internal.softwareheritage.org
# Variables provided by terraform
HOSTNAME=${hostname}
FQDN=${fqdn}
IP=${ip_address}
-FACTER_LOCATION=${facter_location}
+FACTER_DEPLOYMENT=${facter_deployment}
+FACTER_SUBNET=${facter_subnet}
# Handle base system configuration
apt-get -y install lsb-release
debian_suite=$(lsb_release -cs)
# Enable backports
cat > /etc/apt/sources.list.d/backports.list <<EOF
deb http://deb.debian.org/debian $${debian_suite}-backports main
EOF
# Update packages
apt-get update
apt-get -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" dist-upgrade
# Properly set hostname and FQDN
echo $HOSTNAME > /etc/hostname
hostnamectl set-hostname $HOSTNAME
echo "$IP $FQDN $HOSTNAME" >> /etc/hosts
# Handle disk configuration
%{ for disk in try(disk_setup.disks, []) }
# Make one large partition on ${disk.base_disk}
echo ';' | sudo sfdisk --label gpt ${disk.base_disk}
%{ if try(disk.filesystem, "") != "" }
mkfs.${disk.filesystem} ${disk.base_disk}1 ${try(disk.mkfs_options, "")}
mkdir -p ${disk.mountpoint}
uuid=$(blkid -o value -s UUID ${disk.base_disk}1)
echo "UUID=\"$uuid\" ${disk.mountpoint} ${disk.filesystem} ${disk.mount_options} 0 0" >> /etc/fstab
%{ endif }
%{ endfor }
%{ if length(try(disk_setup.raids, [])) != 0 }
apt-get -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --no-install-recommends install mdadm
%{ for raid in disk_setup.raids }
mdadm --create ${raid.path} \
--level=${raid.level} \
--raid-devices ${length(raid.members)} \
%{ if raid.chunk != "" }--chunk=${raid.chunk}%{ endif } \
%{~ for member in raid.members } ${member} %{ endfor ~}
%{ if try(raid.filesystem, "") != "" }
mkfs.${raid.filesystem} ${raid.path} ${try(raid.mkfs_options, "")}
mkdir -p ${raid.mountpoint}
uuid=$(blkid -o value -s UUID ${raid.path})
echo "UUID=\"$uuid\" ${raid.mountpoint} ${raid.filesystem} ${raid.mount_options} 0 0" >> /etc/fstab
%{ endif }
%{ endfor }
/usr/share/mdadm/mkconf > /etc/mdadm/mdadm.conf
update-initramfs -k all -u
%{ endif }
%{ if length(try(disk_setup.lvm_vgs, [])) != 0 }
apt-get -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --no-install-recommends install lvm2
%{ for lvm_vg in disk_setup.lvm_vgs }
vgcreate ${lvm_vg.name} ${join(" ", lvm_vg.pvs)}
%{ for lvm_lv in lvm_vg.lvs }
lvcreate ${lvm_vg.name} -n ${lvm_lv.name} -l ${lvm_lv.extents}
%{ if try(lvm_lv.filesystem, "") != "" }
mkfs.${lvm_lv.filesystem} /dev/${lvm_vg.name}/${lvm_lv.name} ${try(lvm_lv.mkfs_options, "")}
mkdir -p ${lvm_lv.mountpoint}
uuid=$(blkid -o value -s UUID /dev/${lvm_vg.name}/${lvm_lv.name})
echo "UUID=\"$uuid\" ${lvm_lv.mountpoint} ${lvm_lv.filesystem} ${lvm_lv.mount_options} 0 0" >> /etc/fstab
%{ endif }
%{ endfor }
%{ endfor }
update-initramfs -k all -u
%{ endif }
mount -a
# install puppet dependencies
apt-get -y -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" --no-install-recommends install puppet gnupg
# do not need the service live as we need to install some more setup first
service puppet stop
systemctl disable puppet.service
# Install the location fact as that is needed by our puppet manifests
mkdir -p /etc/facter/facts.d
-echo location=$FACTER_LOCATION > /etc/facter/facts.d/location.txt
+echo deployment=$FACTER_DEPLOYMENT > /etc/facter/facts.d/deployment.txt
+echo subnet=$FACTER_SUBNET > /etc/facter/facts.d/subnet.txt
+echo
# first time around, this will:
# - update the node's puppet agent configuration defining the puppet master
# - generate the certificates with the appropriate fqdn
puppet_exit=0
puppet agent --server $PUPPET_MASTER --waitforcert 60 --test --vardir /var/lib/puppet --detailed-exitcodes || puppet_exit=$?
if [ $puppet_exit -ne 2 ]; then
exit $puppet_exit
fi
# reboot

File Metadata

Mime Type
text/x-diff
Expires
Fri, Jul 4, 3:37 PM (1 w, 1 d ago)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
3292903

Event Timeline