diff --git a/azure/terraform/admin.tf b/azure/terraform/admin.tf new file mode 100644 --- /dev/null +++ b/azure/terraform/admin.tf @@ -0,0 +1,184 @@ +# Define a new resource for the admin nodes +# matching what we names elsewhere "euwest-${resource}" +variable "backup_servers" { + default = 1 +} + +variable "backup_disks_per_server" { + default = 1 +} + +# Size in gb +variable "backup_disk_size" { + default = 200 +} + +locals { + backup_servers = { + for i in range(var.backup_servers) : + format("backup%02d", i + 1) => { + backupdisks = { + for i in range(var.backup_disks_per_server) : + format("datadisk%02d", i + 1) => { + lun = i + 1 + path = format("/dev/disk/azure/scsi1/lun%d", i + 1) + } + } + } + } +} + +resource "azurerm_resource_group" "euwest-admin" { + name = "euwest-admin" + location = "westeurope" + + tags = { + environment = "SWH Admin" + } +} + +resource "azurerm_network_interface" "backup-interface" { + for_each = local.backup_servers + + name = format("%s-interface", each.key) + location = "westeurope" + resource_group_name = azurerm_resource_group.euwest-admin.name + + ip_configuration { + name = "backupNicConfiguration" + subnet_id = data.azurerm_subnet.default.id + public_ip_address_id = "" + private_ip_address_allocation = "Dynamic" + } +} + +resource "azurerm_network_interface_security_group_association" "backup-interface-sga" { + for_each = local.backup_servers + + network_interface_id = azurerm_network_interface.backup-interface[each.key].id + network_security_group_id = data.azurerm_network_security_group.worker-nsg.id +} + +resource "azurerm_virtual_machine" "backup-server" { + for_each = local.backup_servers + + name = each.key + location = "westeurope" + resource_group_name = azurerm_resource_group.euwest-admin.name + network_interface_ids = [azurerm_network_interface.backup-interface[each.key].id] + vm_size = "Standard_B2s" + + delete_os_disk_on_termination = true + delete_data_disks_on_termination = false + + boot_diagnostics { + enabled = true + storage_uri = var.boot_diagnostics_uri + } + + storage_os_disk { + name = format("%s-osdisk", each.key) + caching = "None" + create_option = "FromImage" + disk_size_gb = 32 + managed_disk_type = "Standard_LRS" + } + + storage_image_reference { + publisher = "debian" + offer = "debian-11" + sku = "11" + version = "latest" + } + + os_profile { + computer_name = each.key + admin_username = var.user_admin + } + + os_profile_linux_config { + disable_password_authentication = true + ssh_keys { + path = "/home/${var.user_admin}/.ssh/authorized_keys" + key_data = var.ssh_key_data_ardumont + } + ssh_keys { + path = "/home/${var.user_admin}/.ssh/authorized_keys" + key_data = var.ssh_key_data_olasd + } + ssh_keys { + path = "/home/${var.user_admin}/.ssh/authorized_keys" + key_data = var.ssh_key_data_vsellier + } + } + + dynamic "storage_data_disk" { + for_each = each.value.backupdisks + + content { + name = format("%s-%s", each.key, storage_data_disk.key) + caching = "None" + create_option = "Empty" + managed_disk_type = "Standard_LRS" + disk_size_gb = var.backup_disk_size + lun = storage_data_disk.value.lun + } + } + + # Configuring the root user + provisioner "remote-exec" { + inline = [ + "sudo mkdir -p /root/.ssh", # just in case + # Remove the content populated by the azure provisionning + # blocking the conneciont as root + "sudo rm -v /root/.ssh/authorized_keys", + "echo ${var.ssh_key_data_ardumont} | sudo tee -a /root/.ssh/authorized_keys", + "echo ${var.ssh_key_data_olasd} | sudo tee -a /root/.ssh/authorized_keys", + "echo ${var.ssh_key_data_vsellier} | sudo tee -a /root/.ssh/authorized_keys", + ] + + connection { + type = "ssh" + user = var.user_admin + host = azurerm_network_interface.backup-interface[each.key].private_ip_address + } + } + + # Copy the initial configuration script + provisioner "file" { + content = templatefile("templates/firstboot.sh.tpl", { + hostname = each.key + fqdn = format("%s.euwest.azure.internal.softwareheritage.org", each.key), + ip_address = azurerm_network_interface.backup-interface[each.key].private_ip_address, + facter_subnet = "azure_euwest" + facter_deployment = "admin" + disk_setup = {} + }) + destination = var.firstboot_script + + connection { + type = "ssh" + user = "root" + host = azurerm_network_interface.backup-interface[each.key].private_ip_address + } + } + + # Remove the tmpadmin user + provisioner "remote-exec" { + inline = [ + "userdel -f ${var.user_admin}", + "chmod +x ${var.firstboot_script}", + "cat ${var.firstboot_script}", + var.firstboot_script, + ] + connection { + type = "ssh" + user = "root" + host = azurerm_network_interface.backup-interface[each.key].private_ip_address + } + } + + tags = { + environment = "Backup" + } +} diff --git a/azure/terraform/init.tf b/azure/terraform/init.tf --- a/azure/terraform/init.tf +++ b/azure/terraform/init.tf @@ -63,6 +63,11 @@ default = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDZ1TCpfzrvxLhEMhxjbxqPDCwY0nazIr1cyIbhGD2bUdAbZqVMdNtr7MeDnlLIKrIPJWuvltauvLNkYU0iLc1jMntdBCBM3hgXjmTyDtc8XvXseeBp5tDqccYNR/cnDUuweNcL5tfeu5kzaAg3DFi5Dsncs5hQK5KQ8CPKWcacPjEk4ir9gdFrtKG1rZmg/wi7YbfxrJYWzb171hdV13gSgyXdsG5UAFsNyxsKSztulcLKxvbmDgYbzytr38FK2udRk7WuqPbtEAW1zV4yrBXBSB/uw8EAMi+wwvLTwyUcEl4u0CTlhREljUx8LhYrsQUCrBcmoPAmlnLCD5Q9XrGH nicolasd@darboux id_rsa.inria.pub" } +variable "ssh_key_data_vsellier" { + type = string + default = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDL2n3ayVSz7zyG89lsdPS4EyIf29FSNX6XwFEz03xoLuHTOPoyq4z2gkuIaBuIWIPJCwhrhJJvn0KqEIJ2yIOF565zjTI/121VTjSZrwpLFBO5QQFGQB1fY4wVg8VYeVxZLeqbGQAdSAvVrpAAJdoMF0Hwv+i/dVC1SVLj3QrAMft6l5G9iz9OM3DwmoNkCPf+rxbqiiJB2ojMbzSIUfOiE5svL5+z811JOYz62ZAEmVAY22H96Ez0R5uCMQi3pdHvr16DogsXXlhA6zBg0p8sFKOLpfHDjah9pnpI+twX14//2ydw303M3W/4FcXZ1bD4kSjEBjCky6GkrM9MCW6f vsellier@swh-vs1" +} + variable "user_admin" { type = string default = "tmpadmin" diff --git a/azure/terraform/storage.tf b/azure/terraform/storage.tf --- a/azure/terraform/storage.tf +++ b/azure/terraform/storage.tf @@ -104,7 +104,9 @@ hostname = each.key fqdn = format("%s.euwest.azure.internal.softwareheritage.org", each.key), ip_address = azurerm_network_interface.storage-interface[each.key].private_ip_address, - facter_location = "azure_euwest" + facter_subnet = "azure_euwest" + facter_deployment = "production" + disk_setup = {} }) destination = var.firstboot_script diff --git a/azure/terraform/templates/firstboot.sh.tpl b/azure/terraform/templates/firstboot.sh.tpl --- a/azure/terraform/templates/firstboot.sh.tpl +++ b/azure/terraform/templates/firstboot.sh.tpl @@ -12,7 +12,8 @@ HOSTNAME=${hostname} FQDN=${fqdn} IP=${ip_address} -FACTER_LOCATION=${facter_location} +FACTER_DEPLOYMENT=${facter_deployment} +FACTER_SUBNET=${facter_subnet} # Handle base system configuration @@ -113,7 +114,9 @@ # Install the location fact as that is needed by our puppet manifests mkdir -p /etc/facter/facts.d -echo location=$FACTER_LOCATION > /etc/facter/facts.d/location.txt +echo deployment=$FACTER_DEPLOYMENT > /etc/facter/facts.d/deployment.txt +echo subnet=$FACTER_SUBNET > /etc/facter/facts.d/subnet.txt +echo # first time around, this will: # - update the node's puppet agent configuration defining the puppet master