diff --git a/proxmox/terraform/kelvingrove.tf b/proxmox/terraform/kelvingrove.tf new file mode 100644 index 0000000..82e1941 --- /dev/null +++ b/proxmox/terraform/kelvingrove.tf @@ -0,0 +1,28 @@ +module "kelvingrove" { + source = "./modules/node" + config = { + dns = var.dns + domain = "internal.softwareheritage.org" + puppet_environment = "production" + puppet_master = var.puppet_master + gateway_ip = "192.168.100.1" + user_admin = var.user_admin + user_admin_ssh_public_key = var.user_admin_ssh_public_key + } + + hostname = "kelvingrove" + description = "Keycloak server" + cores = "4" + memory = "8192" + network = { + ip = "192.168.100.106" + macaddr = "72:55:5E:58:01:0B" + } + hypervisor = "hypervisor3" + storage = { + location = "hypervisor3-ssd" + size = "32G" + } + template = "template-debian-10" + +} diff --git a/proxmox/terraform/modules/node/main.tf b/proxmox/terraform/modules/node/main.tf index 87c55d6..f0ef925 100644 --- a/proxmox/terraform/modules/node/main.tf +++ b/proxmox/terraform/modules/node/main.tf @@ -1,52 +1,80 @@ resource "proxmox_vm_qemu" "node" { - name = "${var.hostname}" - desc = "${var.description}" - # hypervisor onto which make the vm - target_node = "${var.hypervisor}" - # See init-template.md to see the template vm bootstrap - clone = "${var.template}" - # linux kernel 2.6 - qemu_os = "l26" - # generic setup - sockets = "${var.sockets}" - cores = "${var.cores}" - memory = "${var.memory}" - # boot machine when hypervirsor starts - onboot = true - #### cloud-init setup - os_type = "cloud-init" - # ciuser - User name to change to use when connecting - ciuser = "${var.config["user_admin"]}" - ssh_user = "${var.config["user_admin"]}" - # sshkeys - public ssh key to use when connecting - sshkeys = "${var.config["user_admin_ssh_public_key"]}" - # searchdomain - Sets DNS search domains for a container. - searchdomain = "${var.config["domain"]}" - # nameserver - Sets DNS server IP address for a container. - nameserver = "${var.config["dns"]}" - # ipconfig0 - [gw =] [,ip=] - ipconfig0 = "ip=${var.network["ip"]}/24,gw=${var.config["gateway_ip"]}" - #### - disk { - id = 0 - type = "virtio" - storage = "${var.storage["location"]}" - storage_type = "ssd" - size = "${var.storage["size"]}" - } - network { - id = 0 - model = "virtio" - bridge = "vmbr0" - macaddr = "${lookup(var.network, "macaddr", "")}" - } + name = var.hostname + desc = var.description + + # hypervisor onto which make the vm + target_node = var.hypervisor + + # See init-template.md to see the template vm bootstrap + clone = var.template + + boot = "c" + + # linux kernel 2.6 + qemu_os = "l26" + + # generic setup + sockets = var.sockets + cores = var.cores + memory = var.memory + + # boot machine when hypervirsor starts + onboot = true + + #### cloud-init setup + os_type = "cloud-init" - #### provisioning: (creation time only) connect through ssh - # Let puppet do its install - provisioner "remote-exec" { - inline = [ - "sed -i 's/127.0.1.1/${var.network["ip"]}/g' /etc/hosts", - "puppet agent --server ${var.config["puppet_master"]} --environment=${var.config["puppet_environment"]} --waitforcert 60 --test || echo 'Node provisionned!'", - ] + # ciuser - User name to change to use when connecting + ciuser = var.config["user_admin"] + ssh_user = var.config["user_admin"] + + # sshkeys - public ssh key to use when connecting + sshkeys = var.config["user_admin_ssh_public_key"] + + # searchdomain - Sets DNS search domains for a container. + searchdomain = var.config["domain"] + + # nameserver - Sets DNS server IP address for a container. + nameserver = var.config["dns"] + + # ipconfig0 - [gw =] [,ip=] + ipconfig0 = "ip=${var.network["ip"]}/24,gw=${var.config["gateway_ip"]}" + + #### + disk { + id = 0 + type = "virtio" + storage = var.storage["location"] + storage_type = "ssd" + size = var.storage["size"] + } + + network { + id = 0 + model = "virtio" + bridge = "vmbr0" + macaddr = lookup(var.network, "macaddr", "") + } + + #### provisioning: (creation time only) connect through ssh + # Let puppet do its install + provisioner "remote-exec" { + inline = [ + "sed -i 's/127.0.1.1/${var.network["ip"]}/g' /etc/hosts", + "puppet agent --server ${var.config["puppet_master"]} --environment=${var.config["puppet_environment"]} --waitforcert 60 --test || echo 'Node provisionned!'", + ] + connection { + type = "ssh" + user = "root" + host = var.network["ip"] } + } + + lifecycle { + ignore_changes = [ + bootdisk, + scsihw, + ] + } } + diff --git a/proxmox/terraform/modules/node/outputs.tf b/proxmox/terraform/modules/node/outputs.tf index 188ac17..ddb0ce2 100644 --- a/proxmox/terraform/modules/node/outputs.tf +++ b/proxmox/terraform/modules/node/outputs.tf @@ -1,8 +1,10 @@ -output summary { - value = <] - # ip to communicate for now with the prod network through louvre - ipconfig0 = "ip=192.168.100.125/24,gw=192.168.100.1" - # vms from the staging network will use this vm as gateway - ipconfig1 = "ip=${var.gateway_ip}/24" - disk { - id = 0 - type = "virtio" - storage = "orsay-ssd-2018" - storage_type = "ssd" - size = "20G" - } - network { - id = 0 - model = "virtio" - bridge = "vmbr0" - macaddr = "6E:ED:EF:EB:3C:AA" - } - network { - id = 1 - model = "virtio" - bridge = "vmbr0" - macaddr = "FE:95:CC:A5:EB:43" - } - # Delegate to puppet at the end of the provisioning the software setup - provisioner "remote-exec" { - inline = [ - "sysctl -w net.ipv4.ip_forward=1", - # make it persistent - "sed -i 's/#net.ipv4.ip_forward=1/net.ipv4.ip_forward=1/g' /etc/sysctl.conf", - # add route to louvre (the persistence part is done through puppet) - "iptables -t nat -A POSTROUTING -s 192.168.128.0/24 -o eth0 -j MASQUERADE", - "sed -i 's/127.0.1.1/${var.gateway_ip}/g' /etc/hosts", - "puppet agent --server ${var.puppet_master} --environment=${var.puppet_environment} --waitforcert 60 --test || echo 'Node provisionned!'", - ] - } + name = "gateway" + desc = "staging gateway node" + + # hypervisor onto which make the vm + target_node = "orsay" + + # See init-template.md to see the template vm bootstrap + clone = "template-debian-10" + + # linux kernel 2.6 + qemu_os = "l26" + + # generic setup + sockets = 1 + cores = 1 + memory = 1024 + + boot = "c" + + # boot machine when hypervirsor starts + onboot = true + + #### cloud-init setup + # to actually set some information per os_type (values: ubuntu, centos, + # cloud-init). Keep this as cloud-init + os_type = "cloud-init" + + # ciuser - User name to change ssh keys and password for instead of the + # image’s configured default user. + ciuser = var.user_admin + ssh_user = var.user_admin + + # searchdomain - Sets DNS search domains for a container. + searchdomain = var.domain + + # nameserver - Sets DNS server IP address for a container. + nameserver = var.dns + + # sshkeys - public ssh keys, one per line + sshkeys = var.user_admin_ssh_public_key + + # FIXME: When T1872 lands, this will need to be updated + # ipconfig0 - [gw =] [,ip=] + # ip to communicate for now with the prod network through louvre + ipconfig0 = "ip=192.168.100.125/24,gw=192.168.100.1" + + # vms from the staging network will use this vm as gateway + ipconfig1 = "ip=${var.gateway_ip}/24" + disk { + id = 0 + type = "virtio" + storage = "orsay-ssd-2018" + storage_type = "ssd" + size = "20G" + } + network { + id = 0 + model = "virtio" + bridge = "vmbr0" + macaddr = "6E:ED:EF:EB:3C:AA" + } + network { + id = 1 + model = "virtio" + bridge = "vmbr0" + macaddr = "FE:95:CC:A5:EB:43" + } + + # Delegate to puppet at the end of the provisioning the software setup + # Delegate to puppet at the end of the provisioning the software setup + provisioner "remote-exec" { + inline = [ + "sysctl -w net.ipv4.ip_forward=1", + "sed -i 's/#net.ipv4.ip_forward=1/net.ipv4.ip_forward=1/g' /etc/sysctl.conf", + "iptables -t nat -A POSTROUTING -s 192.168.128.0/24 -o eth0 -j MASQUERADE", + "sed -i 's/127.0.1.1/${var.gateway_ip}/g' /etc/hosts", + "puppet agent --server ${var.puppet_master} --environment=${var.puppet_environment} --waitforcert 60 --test || echo 'Node provisionned!'", + ] + } + + lifecycle { + ignore_changes = [ + bootdisk, + scsihw, + ] + } } module "storage0" { - source = "./modules/node" - config = "${local.config}" - - hostname = "storage0" - description = "swh storage services" - cores = "4" - memory = "8192" - network = { - ip = "192.168.128.2" - macaddr = "CA:73:7F:ED:F9:01" - } + source = "./modules/node" + config = local.config + + hostname = "storage0" + description = "swh storage services" + cores = "4" + memory = "8192" + network = { + ip = "192.168.128.2" + macaddr = "CA:73:7F:ED:F9:01" + } } -output storage0_summary { - value = "${module.storage0.summary}" +output "storage0_summary" { + value = module.storage0.summary } module "db0" { - source = "./modules/node" - config = "${local.config}" - - hostname = "db0" - description = "Node to host storage/indexer/scheduler dbs" - cores = "4" - memory = "16384" - network = { - ip = "192.168.128.3" - macaddr = "3A:65:31:7C:24:17" - } - storage = { - location = "orsay-ssd-2018" - size = "100G" - } + source = "./modules/node" + config = local.config + + hostname = "db0" + description = "Node to host storage/indexer/scheduler dbs" + cores = "4" + memory = "16384" + network = { + ip = "192.168.128.3" + macaddr = "3A:65:31:7C:24:17" + } + storage = { + location = "orsay-ssd-2018" + size = "100G" + } } -output db0_summary { - value = "${module.db0.summary}" +output "db0_summary" { + value = module.db0.summary } module "scheduler0" { - source = "./modules/node" - config = "${local.config}" - - hostname = "scheduler0" - description = "Scheduler api services" - cores = "4" - memory = "16384" - network = { - ip = "192.168.128.4" - macaddr = "92:02:7E:D0:B9:36" - } + source = "./modules/node" + config = local.config + + hostname = "scheduler0" + description = "Scheduler api services" + cores = "4" + memory = "16384" + network = { + ip = "192.168.128.4" + macaddr = "92:02:7E:D0:B9:36" + } } -output scheduler0_summary { - value = "${module.scheduler0.summary}" +output "scheduler0_summary" { + value = module.scheduler0.summary } module "worker0" { - source = "./modules/node" - config = "${local.config}" - - hostname = "worker0" - description = "Loader/lister service node" - cores = "4" - memory = "16384" - network = { - ip = "192.168.128.5" - macaddr = "72:D9:03:46:B1:47" - } + source = "./modules/node" + config = local.config + + hostname = "worker0" + description = "Loader/lister service node" + cores = "4" + memory = "16384" + network = { + ip = "192.168.128.5" + macaddr = "72:D9:03:46:B1:47" + } } -output worker0_summary { - value = "${module.worker0.summary}" +output "worker0_summary" { + value = module.worker0.summary } module "worker1" { - source = "./modules/node" - config = "${local.config}" - - hostname = "worker1" - description = "Loader/lister service node" - cores = "4" - memory = "16384" - network = { - ip = "192.168.128.6" - macaddr = "D6:A9:6F:02:E3:66" - } + source = "./modules/node" + config = local.config + + hostname = "worker1" + description = "Loader/lister service node" + cores = "4" + memory = "16384" + network = { + ip = "192.168.128.6" + macaddr = "D6:A9:6F:02:E3:66" + } } -output worker1_summary { - value = "${module.worker1.summary}" +output "worker1_summary" { + value = module.worker1.summary } module "webapp" { - source = "./modules/node" - config = "${local.config}" - - hostname = "webapp" - description = "Archive/Webapp service node" - cores = "4" - memory = "16384" - network = { - ip = "192.168.128.8" - macaddr = "1A:00:39:95:D4:5F" - } + source = "./modules/node" + config = local.config + + hostname = "webapp" + description = "Archive/Webapp service node" + cores = "4" + memory = "16384" + network = { + ip = "192.168.128.8" + macaddr = "1A:00:39:95:D4:5F" + } } -output webapp_summary { - value = "${module.webapp.summary}" +output "webapp_summary" { + value = module.webapp.summary } module "deposit" { - source = "./modules/node" - config = "${local.config}" - - hostname = "deposit" - description = "Deposit service node" - cores = "4" - memory = "16384" - network = { - ip = "192.168.128.7" - macaddr = "9E:81:DD:58:15:3B" - } + source = "./modules/node" + config = local.config + + hostname = "deposit" + description = "Deposit service node" + cores = "4" + memory = "16384" + network = { + ip = "192.168.128.7" + macaddr = "9E:81:DD:58:15:3B" + } } -output deposit_summary { - value = "${module.deposit.summary}" +output "deposit_summary" { + value = module.deposit.summary } module "vault" { - source = "./modules/node" - config = "${local.config}" - - hostname = "vault" - description = "Vault services node" - cores = "4" - memory = "16384" - network = { - ip = "192.168.128.9" - macaddr = "16:15:1C:79:CB:DB" - } + source = "./modules/node" + config = local.config + + hostname = "vault" + description = "Vault services node" + cores = "4" + memory = "16384" + network = { + ip = "192.168.128.9" + macaddr = "16:15:1C:79:CB:DB" + } } -output vault_summary { - value = "${module.vault.summary}" +output "vault_summary" { + value = module.vault.summary } module "journal0" { - source = "./modules/node" - config = "${local.config}" - - hostname = "journal0" - description = "Journal services node" - cores = "4" - memory = "16384" - network = { - ip = "192.168.128.10" - macaddr = "1E:98:C2:66:BF:33" - } + source = "./modules/node" + config = local.config + + hostname = "journal0" + description = "Journal services node" + cores = "4" + memory = "16384" + network = { + ip = "192.168.128.10" + macaddr = "1E:98:C2:66:BF:33" + } } -output journal0_summary { - value = "${module.journal0.summary}" +output "journal0_summary" { + value = module.journal0.summary } module "worker2" { - source = "./modules/node" - config = "${local.config}" - - hostname = "worker2" - description = "Loader/lister service node" - cores = "4" - memory = "16384" - network = { - ip = "192.168.128.11" - macaddr = "AA:57:27:51:75:18" - } + source = "./modules/node" + config = local.config + + hostname = "worker2" + description = "Loader/lister service node" + cores = "4" + memory = "16384" + network = { + ip = "192.168.128.11" + macaddr = "AA:57:27:51:75:18" + } } -output worker2_summary { - value = "${module.worker2.summary}" +output "worker2_summary" { + value = module.worker2.summary } + diff --git a/proxmox/terraform/variables.tf b/proxmox/terraform/variables.tf index 98437e7..a3817c6 100644 --- a/proxmox/terraform/variables.tf +++ b/proxmox/terraform/variables.tf @@ -1,42 +1,43 @@ variable "domain" { - description = "DNS zone for the staging area" - type = "string" - default = "internal.staging.swh.network" + description = "DNS zone for the staging area" + type = string + default = "internal.staging.swh.network" } variable "puppet_environment" { - description = "Puppet environment to use (swh-site's git branch)" - type = "string" - default = "staging" + description = "Puppet environment to use (swh-site's git branch)" + type = string + default = "staging" } variable "puppet_master" { - description = "Puppet master FQDN" - type = "string" - default = "pergamon.internal.softwareheritage.org" + description = "Puppet master FQDN" + type = string + default = "pergamon.internal.softwareheritage.org" } variable "dns" { - description = "DNS server ip" - type = "string" - default = "192.168.100.29" + description = "DNS server ip" + type = string + default = "192.168.100.29" } variable "gateway_ip" { - description = "Staging network gateway ip" - type = "string" - default = "192.168.128.1" + description = "Staging network gateway ip" + type = string + default = "192.168.128.1" } variable "user_admin" { - description = "User admin to use for managing the node" - type = "string" - default = "root" + description = "User admin to use for managing the node" + type = string + default = "root" } # define input variables for the modules # `pass search terraform-proxmox` in credential store variable "user_admin_ssh_public_key" { - type = "string" + type = string default = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDVKCfpeIMg7GS3Pk03ZAcBWAeDZ+AvWk2k/pPY0z8MJ3YAbqZkRtSK7yaDgJV6Gro7nn/TxdJLo2jEzzWvlC8d8AEzhZPy5Z/qfVVjqBTBM4H5+e+TItAHFfaY5+0WvIahxcfsfaq70MWfpJhszAah3ThJ4mqzYaw+dkr42+a7Gx3Ygpb/m2dpnFnxvXdcuAJYStmHKU5AWGWWM+Fm50/fdMqUfNd8MbKhkJt5ihXQmZWMOt7ls4N8i5NZWnS9YSWow8X/ENOEqCRN9TyRkc+pPS0w9DNi0BCsWvSRJOkyvQ6caEnKWlNoywCmM1AlIQD3k4RUgRWe0vqg/UKPpH3Z root@terraform" } + diff --git a/proxmox/terraform/versions.tf b/proxmox/terraform/versions.tf new file mode 100644 index 0000000..ac97c6a --- /dev/null +++ b/proxmox/terraform/versions.tf @@ -0,0 +1,4 @@ + +terraform { + required_version = ">= 0.12" +} diff --git a/proxmox/terraform/zookeeper.tf b/proxmox/terraform/zookeeper.tf new file mode 100644 index 0000000..b5e5e29 --- /dev/null +++ b/proxmox/terraform/zookeeper.tf @@ -0,0 +1,80 @@ +module "zookeeper1" { + source = "./modules/node" + config = { + dns = var.dns + domain = "internal.softwareheritage.org" + puppet_environment = "production" + puppet_master = var.puppet_master + gateway_ip = "192.168.100.1" + user_admin = var.user_admin + user_admin_ssh_public_key = var.user_admin_ssh_public_key + } + + hostname = "zookeeper1" + description = "Zookeeper server" + cores = "2" + memory = "4096" + network = { + ip = "192.168.100.131" + } + hypervisor = "hypervisor3" + storage = { + location = "hypervisor3-ssd" + size = "32G" + } + template = "template-debian-10" +} + +module "zookeeper2" { + source = "./modules/node" + config = { + dns = var.dns + domain = "internal.softwareheritage.org" + puppet_environment = "production" + puppet_master = var.puppet_master + gateway_ip = "192.168.100.1" + user_admin = var.user_admin + user_admin_ssh_public_key = var.user_admin_ssh_public_key + } + + hostname = "zookeeper2" + description = "Zookeeper server" + cores = "2" + memory = "4096" + network = { + ip = "192.168.100.132" + } + hypervisor = "hypervisor3" + storage = { + location = "hypervisor3-ssd" + size = "32G" + } + template = "template-debian-10" +} + +module "zookeeper3" { + source = "./modules/node" + config = { + dns = var.dns + domain = "internal.softwareheritage.org" + puppet_environment = "production" + puppet_master = var.puppet_master + gateway_ip = "192.168.100.1" + user_admin = var.user_admin + user_admin_ssh_public_key = var.user_admin_ssh_public_key + } + + hostname = "zookeeper3" + description = "Zookeeper server" + cores = "2" + memory = "4096" + network = { + ip = "192.168.100.133" + } + hypervisor = "hypervisor3" + storage = { + location = "hypervisor3-ssd" + size = "32G" + } + template = "template-debian-10" +}