diff --git a/proxmox/terraform/.gitignore b/proxmox/terraform/.gitignore new file mode 100644 index 0000000..a565f01 --- /dev/null +++ b/proxmox/terraform/.gitignore @@ -0,0 +1 @@ +archives/ diff --git a/proxmox/terraform/kelvingrove.tf b/proxmox/terraform/kelvingrove.tf index 82e1941..8006a75 100644 --- a/proxmox/terraform/kelvingrove.tf +++ b/proxmox/terraform/kelvingrove.tf @@ -1,28 +1,30 @@ module "kelvingrove" { source = "./modules/node" config = { dns = var.dns domain = "internal.softwareheritage.org" puppet_environment = "production" puppet_master = var.puppet_master gateway_ip = "192.168.100.1" user_admin = var.user_admin user_admin_ssh_public_key = var.user_admin_ssh_public_key } hostname = "kelvingrove" description = "Keycloak server" + hypervisor = "hypervisor3" + vmid = 123 cores = "4" memory = "8192" + numa = true + balloon = 0 network = { ip = "192.168.100.106" macaddr = "72:55:5E:58:01:0B" + bridge = "vmbr0" } - hypervisor = "hypervisor3" storage = { - location = "hypervisor3-ssd" + location = "proxmox" size = "32G" } - template = "template-debian-10" - } diff --git a/proxmox/terraform/modules/node/main.tf b/proxmox/terraform/modules/node/main.tf index f0ef925..fc84eb3 100644 --- a/proxmox/terraform/modules/node/main.tf +++ b/proxmox/terraform/modules/node/main.tf @@ -1,80 +1,85 @@ resource "proxmox_vm_qemu" "node" { name = var.hostname desc = var.description + vmid = var.vmid + balloon = var.balloon + full_clone = false # hypervisor onto which make the vm target_node = var.hypervisor # See init-template.md to see the template vm bootstrap clone = var.template boot = "c" # linux kernel 2.6 qemu_os = "l26" # generic setup sockets = var.sockets cores = var.cores + numa = var.numa memory = var.memory # boot machine when hypervirsor starts onboot = true #### cloud-init setup os_type = "cloud-init" # ciuser - User name to change to use when connecting ciuser = var.config["user_admin"] ssh_user = var.config["user_admin"] # sshkeys - public ssh key to use when connecting sshkeys = var.config["user_admin_ssh_public_key"] # searchdomain - Sets DNS search domains for a container. searchdomain = var.config["domain"] # nameserver - Sets DNS server IP address for a container. nameserver = var.config["dns"] # ipconfig0 - [gw =] [,ip=] ipconfig0 = "ip=${var.network["ip"]}/24,gw=${var.config["gateway_ip"]}" #### disk { id = 0 type = "virtio" storage = var.storage["location"] storage_type = "ssd" size = var.storage["size"] } network { id = 0 model = "virtio" - bridge = "vmbr0" + bridge = lookup(var.network, "bridge", "") macaddr = lookup(var.network, "macaddr", "") } #### provisioning: (creation time only) connect through ssh # Let puppet do its install provisioner "remote-exec" { inline = [ "sed -i 's/127.0.1.1/${var.network["ip"]}/g' /etc/hosts", "puppet agent --server ${var.config["puppet_master"]} --environment=${var.config["puppet_environment"]} --waitforcert 60 --test || echo 'Node provisionned!'", ] connection { type = "ssh" user = "root" host = var.network["ip"] } } lifecycle { ignore_changes = [ bootdisk, scsihw, + network, + disk ] } } - diff --git a/proxmox/terraform/modules/node/variables.tf b/proxmox/terraform/modules/node/variables.tf index 4705223..91cf94b 100644 --- a/proxmox/terraform/modules/node/variables.tf +++ b/proxmox/terraform/modules/node/variables.tf @@ -1,59 +1,75 @@ variable "hostname" { description = "Node's hostname" type = string } variable "description" { description = "Node's description" type = string } variable "hypervisor" { - description = "Hypervisor to install the vm to (choice: orsay, hypervisor3, beaubourg)" + description = "Hypervisor to install the vm to (choice: orsay, hypervisor3, beaubourg, branly)" type = string - default = "orsay" } variable "template" { description = "Template to use (template-debian-9, template-debian-10)" type = string default = "template-debian-10" } variable "sockets" { description = "Number of sockets" type = string default = "1" } variable "cores" { description = "Number of cores" type = string default = "1" } variable "memory" { description = "Memory in Mb" type = string default = "1024" } variable "network" { - description = "staging network's ip/macaddr" + description = "staging network's ip/macaddr/bridge" type = map(string) } + +variable "vmid" { + description = "virtual machine id" + type = number + default = 0 +} + +variable "balloon" { + description = "ballooning option" + type = number + default = 0 +} + +variable "numa" { + type = bool + default = false +} + variable "storage" { description = "Storage disk location and size in the hypervisor storage" type = map(string) default = { - location = "orsay-ssd-2018" + location = "proxmox" size = "32G" } } variable "config" { description = "Local config to avoid duplication from the main module" type = map(string) } - diff --git a/proxmox/terraform/staging.tf b/proxmox/terraform/staging.tf index b6d0c0d..28a4df8 100644 --- a/proxmox/terraform/staging.tf +++ b/proxmox/terraform/staging.tf @@ -1,304 +1,423 @@ # Keyword use: # - provider: Define the provider(s) # - data: Retrieve data information to be used within the file # - resource: Define resource and create/update provider "proxmox" { pm_tls_insecure = true - pm_api_url = "https://orsay.internal.softwareheritage.org:8006/api2/json" + pm_api_url = "https://beaubourg.internal.softwareheritage.org:8006/api2/json" # in a shell (see README): source ./setup.sh } # Default configuration passed along module calls # (There is no other way to avoid duplication) locals { config = { dns = var.dns domain = var.domain puppet_environment = var.puppet_environment puppet_master = var.puppet_master gateway_ip = var.gateway_ip user_admin = var.user_admin user_admin_ssh_public_key = var.user_admin_ssh_public_key } } # Define the staging network gateway # FIXME: Find a way to reuse the module "node" # Main difference between node in module and this: # - gateway define 2 network interfaces # - provisioning step is more complex resource "proxmox_vm_qemu" "gateway" { name = "gateway" desc = "staging gateway node" # hypervisor onto which make the vm - target_node = "orsay" + target_node = "beaubourg" + vmid = 109 + balloon = 0 + full_clone = false # See init-template.md to see the template vm bootstrap clone = "template-debian-10" # linux kernel 2.6 qemu_os = "l26" # generic setup sockets = 1 cores = 1 memory = 1024 boot = "c" # boot machine when hypervirsor starts onboot = true #### cloud-init setup # to actually set some information per os_type (values: ubuntu, centos, # cloud-init). Keep this as cloud-init os_type = "cloud-init" # ciuser - User name to change ssh keys and password for instead of the # image’s configured default user. ciuser = var.user_admin ssh_user = var.user_admin # searchdomain - Sets DNS search domains for a container. searchdomain = var.domain # nameserver - Sets DNS server IP address for a container. nameserver = var.dns # sshkeys - public ssh keys, one per line sshkeys = var.user_admin_ssh_public_key # FIXME: When T1872 lands, this will need to be updated # ipconfig0 - [gw =] [,ip=] # ip to communicate for now with the prod network through louvre ipconfig0 = "ip=192.168.100.125/24,gw=192.168.100.1" # vms from the staging network will use this vm as gateway ipconfig1 = "ip=${var.gateway_ip}/24" disk { id = 0 type = "virtio" - storage = "orsay-ssd-2018" + storage = "proxmox" storage_type = "ssd" size = "20G" } network { id = 0 model = "virtio" bridge = "vmbr0" macaddr = "6E:ED:EF:EB:3C:AA" } network { id = 1 model = "virtio" - bridge = "vmbr0" + bridge = "vmbr443" macaddr = "FE:95:CC:A5:EB:43" } # Delegate to puppet at the end of the provisioning the software setup # Delegate to puppet at the end of the provisioning the software setup provisioner "remote-exec" { inline = [ "sysctl -w net.ipv4.ip_forward=1", "sed -i 's/#net.ipv4.ip_forward=1/net.ipv4.ip_forward=1/g' /etc/sysctl.conf", "iptables -t nat -A POSTROUTING -s 192.168.128.0/24 -o eth0 -j MASQUERADE", "sed -i 's/127.0.1.1/${var.gateway_ip}/g' /etc/hosts", "puppet agent --server ${var.puppet_master} --environment=${var.puppet_environment} --waitforcert 60 --test || echo 'Node provisionned!'", ] } lifecycle { ignore_changes = [ bootdisk, scsihw, + network, + disk ] } } -module "storage0" { - source = "./modules/node" - config = local.config +# Define the staging network gateway +# FIXME: Find a way to reuse the module "node" +# Main difference between node in module and this: +# - storage0 define 2 disks +resource "proxmox_vm_qemu" "storage0" { + name = "storage0" + desc = "swh storage services" - hostname = "storage0" - description = "swh storage services" - cores = "4" - memory = "8192" - network = { - ip = "192.168.128.2" + # hypervisor onto which make the vm + target_node = "orsay" + vmid = 114 + full_clone = false + + # See init-template.md to see the template vm bootstrap + clone = "template-debian-10" + + # linux kernel 2.6 + qemu_os = "l26" + + # generic setup + sockets = 1 + cores = 4 + memory = 8192 + balloon = 1024 + + boot = "c" + + # boot machine when hypervirsor starts + onboot = true + + #### cloud-init setup + # to actually set some information per os_type (values: ubuntu, centos, + # cloud-init). Keep this as cloud-init + os_type = "cloud-init" + + # ciuser - User name to change ssh keys and password for instead of the + # image’s configured default user. + ciuser = var.user_admin + ssh_user = var.user_admin + + # searchdomain - Sets DNS search domains for a container. + searchdomain = var.domain + + # nameserver - Sets DNS server IP address for a container. + nameserver = var.dns + + # sshkeys - public ssh keys, one per line + sshkeys = var.user_admin_ssh_public_key + + # ip to communicate for now with the prod network through louvre + ipconfig0 = "ip=192.168.128.2/24,gw=192.168.128.1" + + disk { + id = 0 + type = "virtio" + storage = "orsay-ssd-2018" + storage_type = "ssd" + size = "32G" + } + disk { + id = 1 + type = "virtio" + storage = "orsay-ssd-2018" + storage_type = "ssd" + size = "512G" + } + + network { + id = 0 + model = "virtio" + bridge = "vmbr443" macaddr = "CA:73:7F:ED:F9:01" } -} -output "storage0_summary" { - value = module.storage0.summary + # Delegate to puppet at the end of the provisioning the software setup + provisioner "remote-exec" { + inline = [ + "sed -i 's/127.0.1.1/192.168.128.2}/g' /etc/hosts", + "puppet agent --server ${var.puppet_master} --environment=${var.puppet_environment} --waitforcert 60 --test || echo 'Node provisionned!'", + ] + connection { + type = "ssh" + user = "root" + host = "192.168.128.2" + } + } + + lifecycle { + ignore_changes = [ + bootdisk, + scsihw, + network, + disk + ] + } } module "db0" { source = "./modules/node" config = local.config + hypervisor = "orsay" + vmid = 115 hostname = "db0" description = "Node to host storage/indexer/scheduler dbs" cores = "4" memory = "16384" + balloon = 1024 network = { ip = "192.168.128.3" macaddr = "3A:65:31:7C:24:17" + bridge = "vmbr443" } storage = { location = "orsay-ssd-2018" - size = "100G" + size = "400G" } + } output "db0_summary" { value = module.db0.summary } module "scheduler0" { source = "./modules/node" config = local.config + vmid = 116 hostname = "scheduler0" description = "Scheduler api services" + hypervisor = "beaubourg" cores = "4" - memory = "16384" + memory = "8192" + balloon = 1024 network = { ip = "192.168.128.4" macaddr = "92:02:7E:D0:B9:36" + bridge = "vmbr443" } } output "scheduler0_summary" { value = module.scheduler0.summary } module "worker0" { source = "./modules/node" config = local.config + vmid = 117 hostname = "worker0" description = "Loader/lister service node" + hypervisor = "beaubourg" cores = "4" - memory = "16384" + memory = "12288" + balloon = 1024 network = { ip = "192.168.128.5" macaddr = "72:D9:03:46:B1:47" } } output "worker0_summary" { value = module.worker0.summary } module "worker1" { source = "./modules/node" config = local.config + vmid = 118 hostname = "worker1" description = "Loader/lister service node" + hypervisor = "beaubourg" cores = "4" - memory = "16384" + memory = "12288" + balloon = 1024 network = { ip = "192.168.128.6" macaddr = "D6:A9:6F:02:E3:66" + bridge = "vmbr443" } } output "worker1_summary" { value = module.worker1.summary } module "webapp" { source = "./modules/node" config = local.config + vmid = 119 hostname = "webapp" description = "Archive/Webapp service node" + hypervisor = "branly" cores = "4" memory = "16384" + balloon = 1024 network = { ip = "192.168.128.8" macaddr = "1A:00:39:95:D4:5F" + bridge = "vmbr443" } } output "webapp_summary" { value = module.webapp.summary } module "deposit" { source = "./modules/node" config = local.config + vmid = 120 hostname = "deposit" description = "Deposit service node" + hypervisor = "beaubourg" cores = "4" - memory = "16384" + memory = "8192" + balloon = 1024 network = { ip = "192.168.128.7" macaddr = "9E:81:DD:58:15:3B" + bridge = "vmbr443" } } output "deposit_summary" { value = module.deposit.summary } module "vault" { source = "./modules/node" config = local.config + vmid = 121 hostname = "vault" description = "Vault services node" + hypervisor = "beaubourg" cores = "4" - memory = "16384" + memory = "8192" + balloon = 1024 network = { ip = "192.168.128.9" macaddr = "16:15:1C:79:CB:DB" + bridge = "vmbr443" } } output "vault_summary" { value = module.vault.summary } module "journal0" { source = "./modules/node" config = local.config + vmid = 122 hostname = "journal0" description = "Journal services node" + hypervisor = "beaubourg" cores = "4" - memory = "16384" + memory = "12288" + balloon = 1024 network = { ip = "192.168.128.10" macaddr = "1E:98:C2:66:BF:33" + bridge = "vmbr443" } } output "journal0_summary" { value = module.journal0.summary } module "worker2" { source = "./modules/node" config = local.config + vmid = 112 hostname = "worker2" description = "Loader/lister service node" + hypervisor = "branly" cores = "4" - memory = "16384" + memory = "12288" + balloon = 1024 network = { ip = "192.168.128.11" macaddr = "AA:57:27:51:75:18" + bridge = "vmbr443" } } output "worker2_summary" { value = module.worker2.summary } - diff --git a/proxmox/terraform/zookeeper.tf b/proxmox/terraform/zookeeper.tf index b5e5e29..5b8718a 100644 --- a/proxmox/terraform/zookeeper.tf +++ b/proxmox/terraform/zookeeper.tf @@ -1,80 +1,83 @@ module "zookeeper1" { source = "./modules/node" config = { dns = var.dns domain = "internal.softwareheritage.org" puppet_environment = "production" puppet_master = var.puppet_master gateway_ip = "192.168.100.1" user_admin = var.user_admin user_admin_ssh_public_key = var.user_admin_ssh_public_key } hostname = "zookeeper1" description = "Zookeeper server" + hypervisor = "hypervisor3" + vmid = 125 cores = "2" memory = "4096" network = { ip = "192.168.100.131" + bridge = "vmbr0" } - hypervisor = "hypervisor3" storage = { - location = "hypervisor3-ssd" + location = "proxmox" size = "32G" } - template = "template-debian-10" } module "zookeeper2" { source = "./modules/node" config = { dns = var.dns domain = "internal.softwareheritage.org" puppet_environment = "production" puppet_master = var.puppet_master gateway_ip = "192.168.100.1" user_admin = var.user_admin user_admin_ssh_public_key = var.user_admin_ssh_public_key } hostname = "zookeeper2" description = "Zookeeper server" + hypervisor = "branly" + vmid = 124 cores = "2" memory = "4096" network = { ip = "192.168.100.132" + bridge = "vmbr0" } - hypervisor = "hypervisor3" storage = { - location = "hypervisor3-ssd" + location = "proxmox" size = "32G" } - template = "template-debian-10" } module "zookeeper3" { source = "./modules/node" config = { dns = var.dns domain = "internal.softwareheritage.org" puppet_environment = "production" puppet_master = var.puppet_master gateway_ip = "192.168.100.1" user_admin = var.user_admin user_admin_ssh_public_key = var.user_admin_ssh_public_key } hostname = "zookeeper3" description = "Zookeeper server" + hypervisor = "beaubourg" + vmid = 101 cores = "2" memory = "4096" network = { ip = "192.168.100.133" + bridge = "vmbr0" } - hypervisor = "hypervisor3" storage = { - location = "hypervisor3-ssd" + location = "proxmox" size = "32G" } - template = "template-debian-10" }