diff --git a/proxmox/terraform/admin/admin.tf b/proxmox/terraform/admin/admin.tf index 3b16c9a..89fe534 100644 --- a/proxmox/terraform/admin/admin.tf +++ b/proxmox/terraform/admin/admin.tf @@ -1,212 +1,212 @@ locals { config = { dns = var.dns domain = "internal.admin.swh.network" puppet_environment = "production" facter_deployment = "admin" facter_subnet = "sesi_rocquencourt_admin" puppet_master = var.puppet_master gateway_ip = "192.168.50.1" user_admin = var.user_admin user_admin_ssh_public_key = var.user_admin_ssh_public_key user_admin_ssh_private_key_path = var.user_admin_ssh_private_key_path vlan = "vmbr442" } } module "bardo" { source = "../modules/node" config = local.config hostname = "bardo" description = "Hedgedoc instance" hypervisor = "branly" vmid = 124 cores = "2" memory = "8192" balloon = 1024 full_clone = true networks = [{ id = 0 ip = "192.168.50.10" gateway = local.config["gateway_ip"] macaddr = "7A:CE:A2:72:FA:E8" bridge = local.config["vlan"] }] } module "rp1" { source = "../modules/node" config = local.config hostname = "rp1" description = "reverse-proxy" hypervisor = "branly" vmid = 115 cores = 2 memory = 4096 balloon = 1024 full_clone = true networks = [{ id = 0 ip = "192.168.50.20" gateway = local.config["gateway_ip"] macaddr = "4E:42:20:E0:B6:65" bridge = local.config["vlan"] }] } module "dali" { source = "../modules/node" config = local.config - template = "debian-bullseye-11.2-2022-01-03" + template = var.templates["stable"] hostname = "dali" description = "admin databases host" hypervisor = "branly" vmid = 144 cores = 4 memory = 16384 balloon = 8192 networks = [{ id = 0 ip = "192.168.50.50" gateway = local.config["gateway_ip"] macaddr = "C2:7C:85:D0:E8:7C" bridge = local.config["vlan"] }] storages = [ { id = 0 storage = "proxmox" size = "32G" }, { id = 1 storage = "proxmox" size = "200G" } ] } output "dali_summary" { value = module.dali.summary } module "grafana0" { source = "../modules/node" config = local.config - template = "debian-bullseye-11.2-2022-01-03" + template = var.templates["stable"] hostname = "grafana0" description = "Grafana server" hypervisor = "branly" vmid = 108 cores = 4 memory = 4096 balloon = 2048 networks = [{ id = 0 ip = "192.168.50.30" gateway = local.config["gateway_ip"] macaddr = "B2:CB:D9:09:D3:3B" bridge = local.config["vlan"] }] } output "grafana0_summary" { value = module.grafana0.summary } module "bojimans" { source = "../modules/node" config = local.config - template = "debian-bullseye-11.2-2022-01-03" + template = var.templates["stable"] hostname = "bojimans" description = "Inventory server (netbox)" hypervisor = "branly" cpu = "kvm64" vmid = 127 sockets = 2 cores = 1 memory = 4096 balloon = 2048 networks = [{ id = 0 ip = "192.168.50.60" gateway = "192.168.50.1" macaddr = "EE:ED:A6:A0:78:9F" bridge = local.config["vlan"] }] storages = [{ id = 0 storage = "proxmox" size = "20G" }] } output "bojimans_summary" { value = module.bojimans.summary } module "money" { source = "../modules/node" config = local.config - template = "debian-bullseye-11.2-2022-01-03" + template = var.templates["stable"] hostname = "money" description = "Azure billing reporting server" hypervisor = "branly" cpu = "kvm64" vmid = 140 sockets = 2 cores = 1 memory = 2048 balloon = 1024 networks = [{ id = 0 ip = "192.168.50.65" gateway = "192.168.50.1" macaddr = "" bridge = local.config["vlan"] }] storages = [{ id = 0 storage = "proxmox" size = "20G" }] } output "money_summary" { value = module.money.summary } module "thanos" { source = "../modules/node" config = local.config onboot = true - template = "debian-bullseye-11.2-2022-01-03" + template = var.templates["stable"] hostname = "thanos" description = "Thanos query service" hypervisor = "branly" sockets = "1" cores = "4" memory = "4096" balloon = "1024" networks = [{ id = 0 ip = "192.168.50.90" gateway = local.config["gateway_ip"] macaddr = "16:3C:72:26:70:34" bridge = local.config["vlan"] }] } output "thanos_summary" { value = module.thanos.summary } diff --git a/proxmox/terraform/modules/node/variables.tf b/proxmox/terraform/modules/node/variables.tf index 7b97bbd..29eaa12 100644 --- a/proxmox/terraform/modules/node/variables.tf +++ b/proxmox/terraform/modules/node/variables.tf @@ -1,138 +1,138 @@ variable "hostname" { description = "Node's hostname" type = string } variable "domainname" { description = "Domain name. If empty the config domain is used as fallback." type = string default = "" } variable "description" { description = "Node's description" type = string } variable "hypervisor" { description = "Hypervisor to install the vm to (choice: orsay, hypervisor3, beaubourg, branly)" type = string } variable "template" { description = "Debian image template created by packer" # Note: use "buster" template for node with swh services (storage, objstorage, ...). # You can use latest "bullseye" templates otherwise. type = string default = "debian-buster-10.10-2021-09-09" # other possible template values: - # - debian-bullseye-2022-04-21 - # - debian-bullseye-zfs-2022-04-21 (for extra zfs dependencies) + # - debian-bullseye-11.4-2022-07-27 + # - debian-bullseye-11.4-zfs-2022-07-27 (for extra zfs dependencies) } variable "sockets" { description = "Number of sockets" type = number default = 1 } variable "cores" { description = "Number of cores" type = number default = 1 } variable "memory" { description = "Memory in Mb" type = number default = 1024 } variable "networks" { description = "Default networks configuration (id, ip, gateway, macaddr, bridge)" type = list(object({ id = number ip = string gateway = string macaddr = optional(string) bridge = string })) default = [] } variable "vmid" { description = "virtual machine id" type = number default = null } variable "balloon" { description = "ballooning option" type = number default = 0 } variable "numa" { type = bool default = false } variable "storages" { description = "Default disks configuration (storage, size)" type = list(object({ storage = string size = string })) default = [{ storage = "proxmox" size = "32G" }] } variable "config" { description = "Local config to avoid duplication from the main module" type = map(string) } variable "args" { description = "args to pass to the qemu command. should not be used in most cases" type = string default = "" } variable "pre_provision_steps" { description = "Sequential provisioning steps to apply *before* common provision steps" type = list(string) default = [] } variable "post_provision_steps" { description = "Sequential provisioning steps to apply *after* common provision steps" type = list(string) default = [] } variable "cicustom" { description = "custom ci parameter" type = string default = "" } variable "full_clone" { description = "Full clone the template" type = bool default = false } variable "cpu" { description = "CPU type possible values (not exhaustive): kvm64, host, ... The default is kvm64 and must be specified to avoid issues on refresh" type = string default = "kvm64" } variable "onboot" { description = "Start the vm on hypervisor boot" type = bool default = true } diff --git a/proxmox/terraform/production/cassandra.tf b/proxmox/terraform/production/cassandra.tf index 1cefc5c..9cabd48 100644 --- a/proxmox/terraform/production/cassandra.tf +++ b/proxmox/terraform/production/cassandra.tf @@ -1,166 +1,166 @@ # Provision the rancher cluster for the cassandra # nodes in production # # Provision 3 vms in proxmox to manage the etcd cluster # and the kubernetes control plane provider "rancher2" { api_url = "https://rancher.euwest.azure.internal.softwareheritage.org/v3" insecure = true } resource "rancher2_cluster" "production_cassandra" { name = "production-cassandra" description = "Production - Cassandra cluster" rke_config { kubernetes_version = "v1.22.10-rancher1-1" network { plugin = "canal" } services { kubelet { extra_binds = [ "/srv/prometheus:/srv/prometheus", # prometheus datastore on mngmt nodes "/srv/cassandra_commitlog:/srv/cassandra_commitlog", # writeintensive disks "/srv/cassandra_data:/srv/cassandra_data", # mixeduse disks ] } } } } output "production_cassandra_cluster_summary" { sensitive = true value = rancher2_cluster.production_cassandra.kube_config } output "production_cassandra_cluster_command" { sensitive = true value = rancher2_cluster.production_cassandra.cluster_registration_token[0].node_command } module "rancher_node_cassandra1" { hostname = "rancher-node-cassandra1" source = "../modules/node" - template = "debian-bullseye-11.3-zfs-2022-04-21" + template = var.templates["stable-zfs"] config = local.config description = "Kubernetes management node for cassandra cluster" hypervisor = "beaubourg" vmid = 159 sockets = "1" cores = "4" onboot = true memory = "8192" balloon = "4096" networks = [{ id = 0 ip = "192.168.100.178" gateway = local.config["gateway_ip"] macaddr = "" bridge = "vmbr0" }] storages = [{ storage = "proxmox" size = "20G" }, { storage = "proxmox" size = "50G" } ] post_provision_steps = [ "systemctl restart docker", # workaround "${rancher2_cluster.production_cassandra.cluster_registration_token[0].node_command} --etcd --controlplane --worker" ] } output "rancher_node_cassandra1_summary" { value = module.rancher_node_cassandra1.summary } module "rancher_node_cassandra2" { hostname = "rancher-node-cassandra2" source = "../modules/node" - template = "debian-bullseye-11.3-zfs-2022-04-21" + template = var.templates["stable-zfs"] config = local.config description = "Kubernetes management node for cassandra cluster" hypervisor = "branly" vmid = 160 sockets = "1" cores = "4" onboot = true memory = "8192" balloon = "4096" networks = [{ id = 0 ip = "192.168.100.179" gateway = local.config["gateway_ip"] macaddr = "" bridge = "vmbr0" }] storages = [{ storage = "proxmox" size = "20G" }, { storage = "proxmox" size = "50G" } ] post_provision_steps = [ "systemctl restart docker", # workaround "${rancher2_cluster.production_cassandra.cluster_registration_token[0].node_command} --etcd --controlplane --worker" ] } output "rancher_node_cassandra2_summary" { value = module.rancher_node_cassandra2.summary } module "rancher_node_cassandra3" { hostname = "rancher-node-cassandra3" source = "../modules/node" - template = "debian-bullseye-11.3-zfs-2022-04-21" + template = var.templates["stable-zfs"] config = local.config description = "Kubernetes management node for cassandra cluster" hypervisor = "hypervisor3" vmid = 161 sockets = "1" cores = "4" onboot = true memory = "8192" balloon = "4096" networks = [{ id = 0 ip = "192.168.100.180" gateway = local.config["gateway_ip"] macaddr = "" bridge = "vmbr0" }] storages = [{ storage = "proxmox" size = "20G" }, { storage = "proxmox" size = "50G" } ] post_provision_steps = [ "systemctl restart docker", # workaround "${rancher2_cluster.production_cassandra.cluster_registration_token[0].node_command} --etcd --controlplane --worker" ] } output "rancher_node_cassandra3_summary" { value = module.rancher_node_cassandra3.summary } diff --git a/proxmox/terraform/production/production.tf b/proxmox/terraform/production/production.tf index 2f75610..a43c47d 100644 --- a/proxmox/terraform/production/production.tf +++ b/proxmox/terraform/production/production.tf @@ -1,186 +1,186 @@ locals { config = { dns = var.dns domain = "internal.softwareheritage.org" puppet_environment = "production" facter_deployment = "production" facter_subnet = "sesi_rocquencourt" puppet_master = var.puppet_master gateway_ip = "192.168.100.1" user_admin = var.user_admin user_admin_ssh_public_key = var.user_admin_ssh_public_key user_admin_ssh_private_key_path = var.user_admin_ssh_private_key_path } } module "kelvingrove" { source = "../modules/node" config = local.config hostname = "kelvingrove" description = "Keycloak server" hypervisor = "hypervisor3" cores = "4" memory = "8192" cpu = "host" numa = true balloon = 0 networks = [{ id = 0 ip = "192.168.100.106" gateway = local.config["gateway_ip"] macaddr = "72:55:5E:58:01:0B" bridge = "vmbr0" }] } module "webapp1" { source = "../modules/node" config = local.config hostname = "webapp1" description = "Webapp for swh-search tests" hypervisor = "branly" cores = "2" memory = "8192" balloon = 2048 networks = [{ id = 0 ip = "192.168.100.71" gateway = local.config["gateway_ip"] macaddr = "06:FF:02:95:31:CF" bridge = "vmbr0" }] } module "search1" { source = "../modules/node" config = local.config hostname = "search1" description = "swh-search node" hypervisor = "branly" cores = "4" memory = "6144" balloon = 1024 networks = [{ id = 0 ip = "192.168.100.85" gateway = local.config["gateway_ip"] macaddr = "3E:46:D3:88:44:F4" bridge = "vmbr0" }] } module "counters1" { source = "../modules/node" config = local.config hostname = "counters1" description = "swh-counters node" hypervisor = "branly" cores = "4" memory = "2048" balloon = 1024 networks = [{ id = 0 ip = "192.168.100.95" gateway = local.config["gateway_ip"] macaddr = "26:8E:7F:D1:F7:99" bridge = "vmbr0" }] } module "worker17" { source = "../modules/node" config = local.config hostname = "worker17" domainname = "softwareheritage.org" description = "swh-worker node (temporary)" hypervisor = "uffizi" cores = "5" sockets = "2" memory = "49152" balloon = 1024 networks = [{ id = 0 ip = "192.168.100.43" gateway = local.config["gateway_ip"] macaddr = "36:E0:2D:70:7C:52" bridge = "vmbr0" }] } module "worker18" { source = "../modules/node" config = local.config hostname = "worker18" domainname = "softwareheritage.org" description = "swh-worker node (temporary)" hypervisor = "uffizi" cores = "5" sockets = "2" memory = "49152" balloon = 1024 networks = [{ id = 0 ip = "192.168.100.44" gateway = local.config["gateway_ip"] macaddr = "C6:29:D9:ED:9C:6B" bridge = "vmbr0" }] } output "worker18_summary" { value = module.worker18.summary } module "provenance-client01" { source = "../modules/node" config = local.config hostname = "provenance-client01" description = "Provenance client" - template = "debian-bullseye-11.2-2022-01-03" + template = var.templates["stable"] hypervisor = "uffizi" cores = "4" sockets = "4" memory = "131072" balloon = 32768 networks = [{ id = 0 ip = "192.168.100.111" gateway = local.config["gateway_ip"] macaddr = null bridge = "vmbr0" }] } module "scrubber1" { source = "../modules/node" config = local.config vmid = 153 onboot = true hostname = "scrubber1" description = "Scrubber checker services" hypervisor = "branly" sockets = "1" cores = "4" memory = "4096" balloon = "1024" networks = [{ id = 0 ip = "192.168.100.90" gateway = local.config["gateway_ip"] macaddr = "B2:E5:3F:E2:77:13" bridge = "vmbr0" }] } output "scrubber1_summary" { value = module.scrubber1.summary } diff --git a/proxmox/terraform/staging/cluster-elastic-workers.tf b/proxmox/terraform/staging/cluster-elastic-workers.tf index 266e91f..a7d99b3 100644 --- a/proxmox/terraform/staging/cluster-elastic-workers.tf +++ b/proxmox/terraform/staging/cluster-elastic-workers.tf @@ -1,215 +1,215 @@ # Plan: # - create cluster with terraform # - Create nodes as usual through terraform # - Retrieve the registration command (out of the cluster creation step) to provide new # node resource "rancher2_cluster" "staging-workers" { name = "staging-workers" description = "staging workers cluster" rke_config { network { plugin = "canal" } } } output "rancher2_cluster_staging_workers_summary" { sensitive = true value = rancher2_cluster.staging-workers.kube_config } output "rancher2_cluster_staging_worker_command" { sensitive = true value = rancher2_cluster.staging-workers.cluster_registration_token[0].node_command } module "elastic-worker0" { source = "../modules/node" - template = "debian-bullseye-11.3-zfs-2022-04-21" + template = var.templates["stable-zfs"] vmid = 146 config = local.config hostname = "elastic-worker0" description = "elastic worker running in rancher cluster" hypervisor = "uffizi" sockets = "1" cores = "4" onboot = true memory = "8192" balloon = "4096" networks = [{ id = 0 ip = "192.168.130.130" gateway = local.config["gateway_ip"] macaddr = "72:CF:A9:AC:B8:EE" bridge = "vmbr443" }] storages = [{ storage = "proxmox" size = "20G" }, { storage = "proxmox" size = "50G" } ] post_provision_steps = [ "systemctl restart docker", # workaround "${rancher2_cluster.staging-workers.cluster_registration_token[0].node_command} --etcd --controlplane --worker" ] } output "elastic-worker0_summary" { value = module.elastic-worker0.summary } module "elastic-worker1" { source = "../modules/node" - template = "debian-bullseye-11.3-zfs-2022-04-21" + template = var.templates["stable-zfs"] config = local.config hostname = "elastic-worker1" description = "elastic worker running in rancher cluster" hypervisor = "uffizi" sockets = "1" cores = "4" onboot = true memory = "8192" balloon = "4096" networks = [{ id = 0 ip = "192.168.130.131" gateway = local.config["gateway_ip"] bridge = "vmbr443" }] storages = [{ storage = "proxmox" size = "20G" }, { storage = "proxmox" size = "50G" } ] post_provision_steps = [ "systemctl restart docker", # workaround "${rancher2_cluster.staging-workers.cluster_registration_token[0].node_command} --etcd --controlplane --worker" ] } output "elastic-worker1_summary" { value = module.elastic-worker1.summary } module "elastic-worker2" { source = "../modules/node" - template = "debian-bullseye-11.3-zfs-2022-04-21" + template = var.templates["stable-zfs"] config = local.config hostname = "elastic-worker2" description = "elastic worker running in rancher cluster" hypervisor = "uffizi" sockets = "1" cores = "4" onboot = true memory = "8192" balloon = "4096" networks = [{ id = 0 ip = "192.168.130.132" gateway = local.config["gateway_ip"] bridge = "vmbr443" }] storages = [{ storage = "proxmox" size = "20G" }, { storage = "proxmox" size = "50G" } ] post_provision_steps = [ "systemctl restart docker", # workaround "${rancher2_cluster.staging-workers.cluster_registration_token[0].node_command} --etcd --controlplane --worker" ] } output "elastic-worker2_summary" { value = module.elastic-worker2.summary } module "elastic-worker3" { source = "../modules/node" - template = "debian-bullseye-11.3-zfs-2022-04-21" + template = var.templates["stable-zfs"] config = local.config hostname = "elastic-worker3" description = "elastic worker running in rancher cluster" hypervisor = "pompidou" sockets = "1" cores = "4" onboot = true memory = "8192" balloon = "4096" networks = [{ id = 0 ip = "192.168.130.133" gateway = local.config["gateway_ip"] bridge = "vmbr443" }] storages = [{ storage = "proxmox" size = "20G" }, { storage = "proxmox" size = "50G" } ] post_provision_steps = [ "systemctl restart docker", # workaround "${rancher2_cluster.staging-workers.cluster_registration_token[0].node_command} --worker" ] } output "elastic-worker3_summary" { value = module.elastic-worker3.summary } resource "rancher2_app_v2" "rancher-monitoring" { cluster_id = rancher2_cluster.staging-workers.id name = "rancher-monitoring" namespace = "cattle-monitoring-system" repo_name = "rancher-charts" chart_name = "rancher-monitoring" # chart_version = "9.4.200" chart_version = "100.1.0+up19.0.3" values = <