diff --git a/proxmox/terraform/admin/cluster-argo.tf b/proxmox/terraform/admin/cluster-argo.tf index 2bde56b..4c2f49d 100644 --- a/proxmox/terraform/admin/cluster-argo.tf +++ b/proxmox/terraform/admin/cluster-argo.tf @@ -1,58 +1,58 @@ resource "rancher2_cluster" "cluster-argo" { name = "cluster-argo" description = "cluster for argo tools (cd, worfklows)" rke_config { kubernetes_version = "v1.21.12-rancher1-1" network { plugin = "canal" } } } -output "cluster-argo-cluster-summary" { +output "cluster-argo-config-summary" { sensitive = true value = rancher2_cluster.cluster-argo.kube_config } -output "cluster-argo-cluster-command" { +output "cluster-argo-register-command" { sensitive = true value = rancher2_cluster.cluster-argo.cluster_registration_token[0].node_command } module "argo-worker01" { hostname = "argo-worker01" vmid = 166 source = "../modules/node" template = var.templates["stable-zfs"] config = local.config description = "Argo worker" hypervisor = "uffizi" sockets = "1" cores = "4" onboot = true memory = "16384" balloon = "8192" networks = [{ id = 0 ip = "192.168.50.40" gateway = local.config["gateway_ip"] bridge = local.config["vlan"] }] storages = [{ storage = "proxmox" size = "20G" }, { storage = "proxmox" size = "50G" } ] post_provision_steps = [ "systemctl restart docker", # workaround "${rancher2_cluster.cluster-argo.cluster_registration_token[0].node_command} --etcd --controlplane --worker" ] } diff --git a/proxmox/terraform/staging/cluster-graphql.tf b/proxmox/terraform/staging/cluster-graphql.tf index 3f313ef..cabfe70 100644 --- a/proxmox/terraform/staging/cluster-graphql.tf +++ b/proxmox/terraform/staging/cluster-graphql.tf @@ -1,79 +1,79 @@ # This declares terraform manifests to provision vms and register containers within # those to a rancher (clusters management service) instance. # Each software has the following responsibilities: # - proxmox: provision vms (with docker dependency) # - rancher: installs kube cluster within containers (running on vms) # Requires (RANCHER_ACCESS_KEY and RANCHER_SECRET_KEY) in your shell environment # $ cat ~/.config/terraform/swh/setup.sh # ... # key_entry=operations/rancher/azure/elastic-loader-lister-keys # export RANCHER_ACCESS_KEY=$(swhpass ls $key_entry | head -1 | cut -d: -f1) # export RANCHER_SECRET_KEY=$(swhpass ls $key_entry | head -1 | cut -d: -f2) # Plan: # - create cluster with terraform # - Create nodes as usual through terraform # - Retrieve the registration command (out of the cluster creation step) to provide new # node resource "rancher2_cluster" "cluster-graphql" { name = "cluster-graphql" description = "graphql staging cluster" rke_config { network { plugin = "canal" } } } -output "rancher2_cluster_graphql_summary" { +output "cluster-graphql-config-summary" { sensitive = true value = rancher2_cluster.cluster-graphql.kube_config } -output "rancher2_cluster_graphql_command" { +output "cluster-graphql-register-command" { sensitive = true value = rancher2_cluster.cluster-graphql.cluster_registration_token[0].node_command } module "graphql-worker0" { source = "../modules/node" vmid = 162 template = var.templates["stable-zfs"] config = local.config hostname = "graphql-worker0" description = "elastic worker running in rancher cluster" hypervisor = "uffizi" sockets = "1" cores = "4" onboot = true memory = "8192" balloon = "4096" networks = [{ id = 0 ip = "192.168.130.150" gateway = local.config["gateway_ip"] bridge = "vmbr443" }] storages = [{ storage = "proxmox" size = "20G" }, { storage = "proxmox" size = "50G" } ] post_provision_steps = [ "systemctl restart docker", # workaround "${rancher2_cluster.cluster-graphql.cluster_registration_token[0].node_command} --etcd --controlplane --worker" ] } output "graphql-worker0_summary" { value = module.graphql-worker0.summary } diff --git a/proxmox/terraform/staging/cluster-graphql3.tf b/proxmox/terraform/staging/cluster-graphql3.tf index 896f16e..35ced37 100644 --- a/proxmox/terraform/staging/cluster-graphql3.tf +++ b/proxmox/terraform/staging/cluster-graphql3.tf @@ -1,160 +1,160 @@ # This declares terraform manifests to provision vms and register containers within # those to a rancher (clusters management service) instance. # Each software has the following responsibilities: # - proxmox: provision vms (with docker dependency) # - rancher: installs kube cluster within containers (running on vms) # Requires (RANCHER_ACCESS_KEY and RANCHER_SECRET_KEY) in your shell environment # $ cat ~/.config/terraform/swh/setup.sh # ... # key_entry=operations/rancher/azure/elastic-loader-lister-keys # export RANCHER_ACCESS_KEY=$(swhpass ls $key_entry | head -1 | cut -d: -f1) # export RANCHER_SECRET_KEY=$(swhpass ls $key_entry | head -1 | cut -d: -f2) # Plan: # - create cluster with terraform # - Create nodes as usual through terraform # - Retrieve the registration command (out of the cluster creation step) to provide new # node resource "rancher2_cluster" "cluster-graphql3" { name = "cluster-graphql3" description = "3rd (tryout) graphql staging cluster" rke_config { kubernetes_version = "v1.21.12-rancher1-1" network { plugin = "canal" } } } -output "rancher2_cluster_graphql3_summary" { +output "cluster-graphql3-config-summary" { sensitive = true value = rancher2_cluster.cluster-graphql3.kube_config } -output "rancher2_cluster_graphql3_command" { +output "cluster-graphql3-register-command" { sensitive = true value = rancher2_cluster.cluster-graphql3.cluster_registration_token[0].node_command } module "graphql-worker3" { source = "../modules/node" vmid = 165 template = var.templates["stable-zfs"] config = local.config hostname = "graphql-worker3" description = "graphql worker running in rancher cluster" hypervisor = "uffizi" sockets = "1" cores = "4" onboot = true memory = "16384" balloon = "8192" networks = [{ id = 0 ip = "192.168.130.153" gateway = local.config["gateway_ip"] bridge = "vmbr443" }] storages = [{ storage = "proxmox" size = "20G" }, { storage = "proxmox" size = "50G" } ] post_provision_steps = [ "systemctl restart docker", # workaround "${rancher2_cluster.cluster-graphql3.cluster_registration_token[0].node_command} --etcd --controlplane --worker" ] } output "graphql-worker3_summary" { value = module.graphql-worker3.summary } module "graphql-worker2" { source = "../modules/node" vmid = 164 template = var.templates["stable-zfs"] config = local.config hostname = "graphql-worker2" description = "graphql worker running in rancher cluster" hypervisor = "uffizi" sockets = "1" cores = "4" onboot = true memory = "8192" balloon = "4096" networks = [{ id = 0 ip = "192.168.130.152" gateway = local.config["gateway_ip"] bridge = "vmbr443" }] storages = [{ storage = "proxmox" size = "20G" }, { storage = "proxmox" size = "50G" } ] post_provision_steps = [ "systemctl restart docker", # workaround "${rancher2_cluster.cluster-graphql3.cluster_registration_token[0].node_command} --worker" ] } output "graphql-worker2_summary" { value = module.graphql-worker2.summary } module "graphql-worker1" { source = "../modules/node" vmid = 163 template = var.templates["stable-zfs"] config = local.config hostname = "graphql-worker1" description = "graphql worker running in rancher cluster" hypervisor = "uffizi" sockets = "1" cores = "4" onboot = true memory = "8192" balloon = "4096" networks = [{ id = 0 ip = "192.168.130.151" gateway = local.config["gateway_ip"] bridge = "vmbr443" }] storages = [{ storage = "proxmox" size = "20G" }, { storage = "proxmox" size = "50G" } ] post_provision_steps = [ "systemctl restart docker", # workaround "${rancher2_cluster.cluster-graphql3.cluster_registration_token[0].node_command} --worker" ] } output "graphql-worker1_summary" { value = module.graphql-worker1.summary }