diff --git a/proxmox/terraform/common.tf b/proxmox/terraform/common.tf index 7fd9b77..e99f5f7 100644 --- a/proxmox/terraform/common.tf +++ b/proxmox/terraform/common.tf @@ -1,11 +1,12 @@ locals { config = { - dns = var.dns - domain = "internal.softwareheritage.org" - puppet_environment = "production" - puppet_master = var.puppet_master - gateway_ip = "192.168.100.1" - user_admin = var.user_admin - user_admin_ssh_public_key = var.user_admin_ssh_public_key + dns = var.dns + domain = "internal.softwareheritage.org" + puppet_environment = "production" + puppet_master = var.puppet_master + gateway_ip = "192.168.100.1" + user_admin = var.user_admin + user_admin_ssh_public_key = var.user_admin_ssh_public_key + user_admin_ssh_private_key_path = var.user_admin_ssh_private_key_path } } diff --git a/proxmox/terraform/modules/node/main.tf b/proxmox/terraform/modules/node/main.tf index fe308e9..ecbf5fa 100644 --- a/proxmox/terraform/modules/node/main.tf +++ b/proxmox/terraform/modules/node/main.tf @@ -1,109 +1,109 @@ resource "proxmox_vm_qemu" "node" { name = var.hostname desc = var.description vmid = var.vmid balloon = var.balloon full_clone = false # hypervisor onto which make the vm target_node = var.hypervisor # See init-template.md to see the template vm bootstrap clone = var.template boot = "c" # linux kernel 2.6 qemu_os = "l26" # generic setup sockets = var.sockets cores = var.cores numa = var.numa memory = var.memory # boot machine when hypervirsor starts onboot = true #### cloud-init setup os_type = "cloud-init" # ciuser - User name to change to use when connecting ciuser = var.config["user_admin"] ssh_user = var.config["user_admin"] # sshkeys - public ssh key to use when connecting sshkeys = var.config["user_admin_ssh_public_key"] # searchdomain - Sets DNS search domains for a container. searchdomain = var.config["domain"] # nameserver - Sets DNS server IP address for a container. nameserver = var.config["dns"] # ipconfig0 - [gw =] [,ip=] ipconfig0 = "ip=${var.networks[0]["ip"]}/24,gw=${var.networks[0]["gateway"]}" # Mostly, var.networks holds only one network declaration except for gateways # Try to lookup such value, if it fails (or is undefined), then ipconfig1 # will be empty, thus no secondary ip config ipconfig1 = try(lookup(var.networks[1], "ip"), "") != "" ? "ip=${var.networks[1]["ip"]}/24" : "" #### dynamic disk { for_each = var.storages content { id = disk.value["id"] storage = disk.value["storage"] size = disk.value["size"] type = "virtio" # storage_type: https://pve.proxmox.com/wiki/Storage storage_type = lookup(disk.value, "storage_type", "cephfs") } } dynamic network { for_each = var.networks content { id = lookup(network.value, "id", 0) macaddr = lookup(network.value, "macaddr", "") bridge = lookup(network.value, "bridge", "vmbr443") model = "virtio" } } #### provisioning: (creation time only) connect through ssh # Let puppet do its install provisioner "remote-exec" { inline = concat( var.pre_provision_steps, [ # First install facts... "mkdir -p /etc/facter/facts.d", "echo deployment=${var.facter_deployment} > /etc/facter/facts.d/deployment.txt", "echo subnet=${var.facter_subnet} > /etc/facter/facts.d/subnet.txt", "sed -i 's/127.0.1.1/${lookup(var.networks[0], "ip")}/g' /etc/hosts", # so puppet agent installs the node's role "puppet agent --server ${var.config["puppet_master"]} --environment=${var.config["puppet_environment"]} --waitforcert 60 --test || echo 'Node provisionned!'", ]) connection { type = "ssh" user = "root" host = lookup(var.networks[0], "ip") - private_key = "${file("~/.ssh/id-rsa-terraform-proxmox")}" # <- something changed + private_key = "${file(var.config["user_admin_ssh_private_key_path"])}" } } lifecycle { ignore_changes = [ bootdisk, scsihw, target_node, clone ] } } diff --git a/proxmox/terraform/staging/staging.tf b/proxmox/terraform/staging/staging.tf index 77ac1dc..e4da5a9 100644 --- a/proxmox/terraform/staging/staging.tf +++ b/proxmox/terraform/staging/staging.tf @@ -1,272 +1,273 @@ # Keyword use: # - provider: Define the provider(s) # - data: Retrieve data information to be used within the file # - resource: Define resource and create/update # Default configuration passed along module calls # (There is no other way to avoid duplication) locals { config = { - dns = var.dns - domain = var.domain - puppet_environment = var.puppet_environment - puppet_master = var.puppet_master - gateway_ip = var.gateway_ip - user_admin = var.user_admin - user_admin_ssh_public_key = var.user_admin_ssh_public_key + dns = var.dns + domain = var.domain + puppet_environment = var.puppet_environment + puppet_master = var.puppet_master + gateway_ip = var.gateway_ip + user_admin = var.user_admin + user_admin_ssh_public_key = var.user_admin_ssh_public_key + user_admin_ssh_private_key_path = var.user_admin_ssh_private_key_path } } module "storage0" { source = "../modules/node" config = local.config hypervisor = "orsay" vmid = 114 hostname = "storage0" description = "swh storage services" cores = "4" memory = "8192" balloon = 1024 networks = [{ id = 0 ip = "192.168.130.40" gateway = local.config["gateway_ip"] macaddr = "CA:73:7F:ED:F9:01" bridge = "vmbr443" }] storages = [{ id = 0 storage = "orsay-ssd-2018" size = "32G" storage_type = "ssd" }, { id = 1 storage = "orsay-ssd-2018" size = "812G" storage_type = "ssd" }] } module "db0" { source = "../modules/node" config = local.config hypervisor = "orsay" vmid = 115 hostname = "db0" description = "Node to host storage/indexer/scheduler dbs" cores = "4" memory = "16384" balloon = 1024 networks = [{ id = 0 ip = "192.168.130.10" gateway = local.config["gateway_ip"] macaddr = "3A:65:31:7C:24:17" bridge = "vmbr443" }] storages = [{ id = 0 storage = "orsay-ssd-2018" size = "400G" storage_type = "ssd" }] } output "db0_summary" { value = module.db0.summary } module "scheduler0" { source = "../modules/node" config = local.config vmid = 116 hostname = "scheduler0" description = "Scheduler api services" hypervisor = "beaubourg" cores = "4" memory = "8192" balloon = 1024 networks = [{ id = 0 ip = "192.168.130.50" gateway = local.config["gateway_ip"] macaddr = "92:02:7E:D0:B9:36" bridge = "vmbr443" }] } output "scheduler0_summary" { value = module.scheduler0.summary } module "worker0" { source = "../modules/node" config = local.config vmid = 117 hostname = "worker0" description = "Loader/lister service node" hypervisor = "beaubourg" cores = "4" memory = "12288" balloon = 1024 networks = [{ id = 0 ip = "192.168.130.100" gateway = local.config["gateway_ip"] macaddr = "72:D9:03:46:B1:47" bridge = "vmbr443" }] } output "worker0_summary" { value = module.worker0.summary } module "worker1" { source = "../modules/node" config = local.config vmid = 118 hostname = "worker1" description = "Loader/lister service node" hypervisor = "beaubourg" cores = "4" memory = "12288" balloon = 1024 networks = [{ id = 0 ip = "192.168.130.101" gateway = local.config["gateway_ip"] macaddr = "D6:A9:6F:02:E3:66" bridge = "vmbr443" }] } output "worker1_summary" { value = module.worker1.summary } module "worker2" { source = "../modules/node" config = local.config vmid = 112 hostname = "worker2" description = "Loader/lister service node" hypervisor = "branly" cores = "4" memory = "12288" balloon = 1024 networks = [{ id = 0 ip = "192.168.130.102" gateway = local.config["gateway_ip"] macaddr = "AA:57:27:51:75:18" bridge = "vmbr443" }] } output "worker2_summary" { value = module.worker2.summary } module "webapp" { source = "../modules/node" config = local.config vmid = 119 hostname = "webapp" description = "Archive/Webapp service node" hypervisor = "branly" cores = "4" memory = "16384" balloon = 1024 networks = [{ id = 0 ip = "192.168.130.30" gateway = local.config["gateway_ip"] macaddr = "1A:00:39:95:D4:5F" bridge = "vmbr443" }] } output "webapp_summary" { value = module.webapp.summary } module "deposit" { source = "../modules/node" config = local.config vmid = 120 hostname = "deposit" description = "Deposit service node" hypervisor = "beaubourg" cores = "4" memory = "8192" balloon = 1024 networks = [{ id = 0 ip = "192.168.130.31" gateway = local.config["gateway_ip"] macaddr = "9E:81:DD:58:15:3B" bridge = "vmbr443" }] } output "deposit_summary" { value = module.deposit.summary } module "vault" { source = "../modules/node" config = local.config vmid = 121 hostname = "vault" description = "Vault services node" hypervisor = "beaubourg" cores = "4" memory = "8192" balloon = 1024 networks = [{ id = 0 ip = "192.168.130.60" gateway = local.config["gateway_ip"] macaddr = "16:15:1C:79:CB:DB" bridge = "vmbr443" }] } output "vault_summary" { value = module.vault.summary } module "journal0" { source = "../modules/node" config = local.config vmid = 122 hostname = "journal0" description = "Journal services node" hypervisor = "beaubourg" cores = "4" memory = "12288" balloon = 1024 networks = [{ id = 0 ip = "192.168.130.70" gateway = local.config["gateway_ip"] macaddr = "1E:98:C2:66:BF:33" bridge = "vmbr443" }] } output "journal0_summary" { value = module.journal0.summary } diff --git a/proxmox/terraform/variables.tf b/proxmox/terraform/variables.tf index e86995f..0b5778f 100644 --- a/proxmox/terraform/variables.tf +++ b/proxmox/terraform/variables.tf @@ -1,42 +1,49 @@ variable "domain" { description = "DNS zone for the staging area" type = string default = "internal.staging.swh.network" } variable "puppet_environment" { description = "Puppet environment to use (swh-site's git branch)" type = string default = "staging" } variable "puppet_master" { description = "Puppet master FQDN" type = string default = "pergamon.internal.softwareheritage.org" } variable "dns" { description = "DNS server ip" type = string default = "192.168.100.29" } variable "gateway_ip" { description = "Staging network gateway ip" type = string default = "192.168.130.1" } variable "user_admin" { description = "User admin to use for managing the node" type = string default = "root" } -# define input variables for the modules +# public key part to install through cloud-init so ssh connection is possible # `pass search terraform-proxmox` in credential store variable "user_admin_ssh_public_key" { type = string default = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDVKCfpeIMg7GS3Pk03ZAcBWAeDZ+AvWk2k/pPY0z8MJ3YAbqZkRtSK7yaDgJV6Gro7nn/TxdJLo2jEzzWvlC8d8AEzhZPy5Z/qfVVjqBTBM4H5+e+TItAHFfaY5+0WvIahxcfsfaq70MWfpJhszAah3ThJ4mqzYaw+dkr42+a7Gx3Ygpb/m2dpnFnxvXdcuAJYStmHKU5AWGWWM+Fm50/fdMqUfNd8MbKhkJt5ihXQmZWMOt7ls4N8i5NZWnS9YSWow8X/ENOEqCRN9TyRkc+pPS0w9DNi0BCsWvSRJOkyvQ6caEnKWlNoywCmM1AlIQD3k4RUgRWe0vqg/UKPpH3Z root@terraform" } + +# private key path so the provisioning step can leverage the ssh connection +# `pass search terraform-proxmox` in credential store and install the key locally +variable "user_admin_ssh_private_key_path" { + type = string + default = "~/.ssh/id-rsa-terraform-proxmox-root" +}