Page MenuHomeSoftware Heritage
Paste P1435

terraform destroy -target=rancher2_cluster.cluster-graphql3
ActivePublic

Authored by ardumont on Aug 26 2022, 12:35 PM.
terraform destroy -target=rancher2_cluster.cluster-graphql3
rancher2_cluster.cluster-graphql3: Refreshing state... [id=c-9mqhw]
Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols:
- destroy
Terraform will perform the following actions:
# rancher2_cluster.cluster-graphql3 will be destroyed
- resource "rancher2_cluster" "cluster-graphql3" {
- annotations = {
- "authz.management.cattle.io/creator-role-bindings" = jsonencode(
{
- created = [
- "cluster-owner",
]
- required = [
- "cluster-owner",
]
}
)
- "lifecycle.cattle.io/create.cluster-agent-controller-cleanup" = "true"
- "lifecycle.cattle.io/create.cluster-provisioner-controller" = "true"
- "lifecycle.cattle.io/create.cluster-scoped-gc" = "true"
- "lifecycle.cattle.io/create.mgmt-cluster-rbac-remove" = "true"
- "provisioner.cattle.io/encrypt-migrated" = "true"
- "provisioner.cattle.io/ke-driver-update" = "updated"
} -> null
- ca_cert = (sensitive value)
- cluster_registration_token = [
- {
- annotations = {}
- cluster_id = "c-9mqhw"
- command = "kubectl apply -f https://rancher.euwest.azure.internal.softwareheritage.org/v3/import/29zzbsh6www7prb96bxnc2tt4jkw5dp96m6xr74stqqd58jmql7ld4_c-9mqhw.yaml"
- id = "c-9mqhw:default-token"
- insecure_command = "curl --insecure -sfL https://rancher.euwest.azure.internal.softwareheritage.org/v3/import/29zzbsh6www7prb96bxnc2tt4jkw5dp96m6xr74stqqd58jmql7ld4_c-9mqhw.yaml | kubectl apply -f -"
- insecure_node_command = ""
- insecure_windows_node_command = ""
- labels = {}
- manifest_url = "https://rancher.euwest.azure.internal.softwareheritage.org/v3/import/29zzbsh6www7prb96bxnc2tt4jkw5dp96m6xr74stqqd58jmql7ld4_c-9mqhw.yaml"
- name = "default-token"
- node_command = "sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent:v2.6.4 --server https://rancher.euwest.azure.internal.softwareheritage.org --token 29zzbsh6www7prb96bxnc2tt4jkw5dp96m6xr74stqqd58jmql7ld4 --ca-checksum 8850dd89eb7f29a70c0d50a2b389bf1950808a9dff4062c66ab806b80b988bac"
- token = "29zzbsh6www7prb96bxnc2tt4jkw5dp96m6xr74stqqd58jmql7ld4"
- windows_node_command = "PowerShell -NoLogo -NonInteractive -Command \"& {docker run -v c:\\:c:\\host rancher/rancher-agent:v2.6.4 bootstrap --server https://rancher.euwest.azure.internal.softwareheritage.org --token 29zzbsh6www7prb96bxnc2tt4jkw5dp96m6xr74stqqd58jmql7ld4 --ca-checksum 8850dd89eb7f29a70c0d50a2b389bf1950808a9dff4062c66ab806b80b988bac | iex}\""
},
] -> null
- default_project_id = "c-9mqhw:p-276t4" -> null
- description = "3rd (tryout) graphql staging cluster" -> null
- docker_root_dir = "/var/lib/docker" -> null
- driver = "rancherKubernetesEngine" -> null
- enable_cluster_alerting = false -> null
- enable_cluster_monitoring = false -> null
- enable_network_policy = false -> null
- fleet_workspace_name = "fleet-default" -> null
- id = "c-9mqhw" -> null
- istio_enabled = false -> null
- kube_config = (sensitive value)
- labels = {
- "cattle.io/creator" = "norman"
- "provider.cattle.io" = "rke"
} -> null
- name = "cluster-graphql3" -> null
- system_project_id = "c-9mqhw:p-cbrm8" -> null
- windows_prefered_cluster = false -> null
- cluster_auth_endpoint {
- enabled = false -> null
}
- rke_config {
- addon_job_timeout = 45 -> null
- addons_include = [] -> null
- enable_cri_dockerd = false -> null
- ignore_docker_version = true -> null
- kubernetes_version = "v1.21.12-rancher1-1" -> null
- ssh_agent_auth = false -> null
- authentication {
- sans = [] -> null
- strategy = "x509" -> null
}
- authorization {}
- bastion_host {
- ssh_agent_auth = false -> null
}
- cloud_provider {
}
- ingress {
- default_backend = true -> null
- extra_args = {} -> null
- http_port = 0 -> null
- https_port = 0 -> null
- node_selector = {} -> null
- options = {} -> null
- provider = "nginx" -> null
}
- monitoring {
- node_selector = {} -> null
- options = {} -> null
- provider = "metrics-server" -> null
- replicas = 1 -> null
}
- network {
- mtu = 0 -> null
- options = {} -> null
- plugin = "canal" -> null
}
- services {
- etcd {
- creation = "12h" -> null
- external_urls = [] -> null
- extra_args = {
- "election-timeout" = "5000"
- "heartbeat-interval" = "500"
} -> null
- extra_binds = [] -> null
- extra_env = [] -> null
- gid = 0 -> null
- retention = "72h" -> null
- snapshot = false -> null
- uid = 0 -> null
- backup_config {
- enabled = true -> null
- interval_hours = 12 -> null
- retention = 6 -> null
- safe_timestamp = false -> null
- timeout = 300 -> null
}
}
- kube_api {
- admission_configuration = {} -> null
- always_pull_images = false -> null
- extra_args = {} -> null
- extra_binds = [] -> null
- extra_env = [] -> null
- pod_security_policy = false -> null
- service_node_port_range = "30000-32767" -> null
}
- kube_controller {}
- kubelet {
- extra_args = {} -> null
- extra_binds = [] -> null
- extra_env = [] -> null
- fail_swap_on = false -> null
- generate_serving_certificate = false -> null
}
- kubeproxy {}
- scheduler {}
}
- upgrade_strategy {
- drain = false -> null
- max_unavailable_controlplane = "1" -> null
- max_unavailable_worker = "10%" -> null
- drain_input {
- delete_local_data = false -> null
- force = false -> null
- grace_period = -1 -> null
- ignore_daemon_sets = true -> null
- timeout = 120 -> null
}
}
}
}
# module.graphql-worker0.proxmox_vm_qemu.node will be destroyed
- resource "proxmox_vm_qemu" "node" {
- additional_wait = 0 -> null
- agent = 0 -> null
- automatic_reboot = true -> null
- balloon = 4096 -> null
- bios = "seabios" -> null
- boot = "c" -> null
- ciuser = "root" -> null
- clone = "debian-bullseye-11.3-zfs-2022-04-21" -> null
- clone_wait = 0 -> null
- cores = 4 -> null
- cpu = "kvm64" -> null
- define_connection_info = true -> null
- desc = "elastic worker running in rancher cluster" -> null
- disk_gb = 0 -> null
- force_create = false -> null
- full_clone = false -> null
- guest_agent_ready_timeout = 100 -> null
- hotplug = "network,disk,usb" -> null
- id = "uffizi/qemu/162" -> null
- ipconfig0 = "ip=192.168.130.150/24,gw=192.168.130.1" -> null
- kvm = true -> null
- memory = 8192 -> null
- name = "graphql-worker0" -> null
- nameserver = "192.168.100.29" -> null
- numa = false -> null
- onboot = true -> null
- oncreate = true -> null
- os_type = "cloud-init" -> null
- preprovision = true -> null
- qemu_os = "other" -> null
- reboot_required = false -> null
- scsihw = "virtio-scsi-pci" -> null
- searchdomain = "internal.staging.swh.network" -> null
- sockets = 1 -> null
- ssh_user = "root" -> null
- sshkeys = <<-EOT
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDVKCfpeIMg7GS3Pk03ZAcBWAeDZ+AvWk2k/pPY0z8MJ3YAbqZkRtSK7yaDgJV6Gro7nn/TxdJLo2jEzzWvlC8d8AEzhZPy5Z/qfVVjqBTBM4H5+e+TItAHFfaY5+0WvIahxcfsfaq70MWfpJhszAah3ThJ4mqzYaw+dkr42+a7Gx3Ygpb/m2dpnFnxvXdcuAJYStmHKU5AWGWWM+Fm50/fdMqUfNd8MbKhkJt5ihXQmZWMOt7ls4N8i5NZWnS9YSWow8X/ENOEqCRN9TyRkc+pPS0w9DNi0BCsWvSRJOkyvQ6caEnKWlNoywCmM1AlIQD3k4RUgRWe0vqg/UKPpH3Z root@terraform
EOT -> null
- tablet = true -> null
- target_node = "uffizi" -> null
- unused_disk = [] -> null
- vcpus = 0 -> null
- vlan = -1 -> null
- vmid = 162 -> null
- disk {
- backup = 0 -> null
- cache = "none" -> null
- file = "base-10008-disk-0/vm-162-disk-0" -> null
- format = "raw" -> null
- iothread = 0 -> null
- mbps = 0 -> null
- mbps_rd = 0 -> null
- mbps_rd_max = 0 -> null
- mbps_wr = 0 -> null
- mbps_wr_max = 0 -> null
- replicate = 0 -> null
- size = "20G" -> null
- slot = 0 -> null
- ssd = 0 -> null
- storage = "proxmox" -> null
- storage_type = "rbd" -> null
- type = "virtio" -> null
- volume = "proxmox:base-10008-disk-0/vm-162-disk-0" -> null
}
- disk {
- backup = 0 -> null
- cache = "none" -> null
- file = "vm-162-disk-1" -> null
- format = "raw" -> null
- iothread = 0 -> null
- mbps = 0 -> null
- mbps_rd = 0 -> null
- mbps_rd_max = 0 -> null
- mbps_wr = 0 -> null
- mbps_wr_max = 0 -> null
- replicate = 0 -> null
- size = "50G" -> null
- slot = 1 -> null
- ssd = 0 -> null
- storage = "proxmox" -> null
- storage_type = "rbd" -> null
- type = "virtio" -> null
- volume = "proxmox:vm-162-disk-1" -> null
}
- network {
- bridge = "vmbr443" -> null
- firewall = false -> null
- link_down = false -> null
- macaddr = "1A:C2:85:2A:B7:76" -> null
- model = "virtio" -> null
- mtu = 0 -> null
- queues = 0 -> null
- rate = 0 -> null
- tag = -1 -> null
}
}
# module.graphql-worker1.proxmox_vm_qemu.node will be destroyed
- resource "proxmox_vm_qemu" "node" {
- additional_wait = 0 -> null
- agent = 0 -> null
- automatic_reboot = true -> null
- balloon = 4096 -> null
- bios = "seabios" -> null
- boot = "c" -> null
- ciuser = "root" -> null
- clone = "debian-bullseye-11.3-zfs-2022-04-21" -> null
- clone_wait = 0 -> null
- cores = 4 -> null
- cpu = "kvm64" -> null
- define_connection_info = true -> null
- desc = "graphql worker running in rancher cluster" -> null
- disk_gb = 0 -> null
- force_create = false -> null
- full_clone = false -> null
- guest_agent_ready_timeout = 100 -> null
- hotplug = "network,disk,usb" -> null
- id = "uffizi/qemu/163" -> null
- ipconfig0 = "ip=192.168.130.151/24,gw=192.168.130.1" -> null
- kvm = true -> null
- memory = 8192 -> null
- name = "graphql-worker1" -> null
- nameserver = "192.168.100.29" -> null
- numa = false -> null
- onboot = true -> null
- oncreate = true -> null
- os_type = "cloud-init" -> null
- preprovision = true -> null
- qemu_os = "other" -> null
- reboot_required = false -> null
- scsihw = "virtio-scsi-pci" -> null
- searchdomain = "internal.staging.swh.network" -> null
- sockets = 1 -> null
- ssh_user = "root" -> null
- sshkeys = <<-EOT
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDVKCfpeIMg7GS3Pk03ZAcBWAeDZ+AvWk2k/pPY0z8MJ3YAbqZkRtSK7yaDgJV6Gro7nn/TxdJLo2jEzzWvlC8d8AEzhZPy5Z/qfVVjqBTBM4H5+e+TItAHFfaY5+0WvIahxcfsfaq70MWfpJhszAah3ThJ4mqzYaw+dkr42+a7Gx3Ygpb/m2dpnFnxvXdcuAJYStmHKU5AWGWWM+Fm50/fdMqUfNd8MbKhkJt5ihXQmZWMOt7ls4N8i5NZWnS9YSWow8X/ENOEqCRN9TyRkc+pPS0w9DNi0BCsWvSRJOkyvQ6caEnKWlNoywCmM1AlIQD3k4RUgRWe0vqg/UKPpH3Z root@terraform
EOT -> null
- tablet = true -> null
- target_node = "uffizi" -> null
- unused_disk = [] -> null
- vcpus = 0 -> null
- vlan = -1 -> null
- vmid = 163 -> null
- disk {
- backup = 0 -> null
- cache = "none" -> null
- file = "base-10008-disk-0/vm-163-disk-0" -> null
- format = "raw" -> null
- iothread = 0 -> null
- mbps = 0 -> null
- mbps_rd = 0 -> null
- mbps_rd_max = 0 -> null
- mbps_wr = 0 -> null
- mbps_wr_max = 0 -> null
- replicate = 0 -> null
- size = "20G" -> null
- slot = 0 -> null
- ssd = 0 -> null
- storage = "proxmox" -> null
- storage_type = "rbd" -> null
- type = "virtio" -> null
- volume = "proxmox:base-10008-disk-0/vm-163-disk-0" -> null
}
- disk {
- backup = 0 -> null
- cache = "none" -> null
- file = "vm-163-disk-1" -> null
- format = "raw" -> null
- iothread = 0 -> null
- mbps = 0 -> null
- mbps_rd = 0 -> null
- mbps_rd_max = 0 -> null
- mbps_wr = 0 -> null
- mbps_wr_max = 0 -> null
- replicate = 0 -> null
- size = "50G" -> null
- slot = 1 -> null
- ssd = 0 -> null
- storage = "proxmox" -> null
- storage_type = "rbd" -> null
- type = "virtio" -> null
- volume = "proxmox:vm-163-disk-1" -> null
}
- network {
- bridge = "vmbr443" -> null
- firewall = false -> null
- link_down = false -> null
- macaddr = "96:9F:C9:61:45:67" -> null
- model = "virtio" -> null
- mtu = 0 -> null
- queues = 0 -> null
- rate = 0 -> null
- tag = -1 -> null
}
}
# module.graphql-worker2.proxmox_vm_qemu.node will be destroyed
- resource "proxmox_vm_qemu" "node" {
- additional_wait = 0 -> null
- agent = 0 -> null
- automatic_reboot = true -> null
- balloon = 4096 -> null
- bios = "seabios" -> null
- boot = "c" -> null
- ciuser = "root" -> null
- clone = "debian-bullseye-11.4-zfs-2022-07-27" -> null
- clone_wait = 0 -> null
- cores = 4 -> null
- cpu = "kvm64" -> null
- define_connection_info = true -> null
- desc = "graphql worker running in rancher cluster" -> null
- disk_gb = 0 -> null
- force_create = false -> null
- full_clone = false -> null
- guest_agent_ready_timeout = 100 -> null
- hotplug = "network,disk,usb" -> null
- id = "uffizi/qemu/164" -> null
- ipconfig0 = "ip=192.168.130.152/24,gw=192.168.130.1" -> null
- kvm = true -> null
- memory = 8192 -> null
- name = "graphql-worker2" -> null
- nameserver = "192.168.100.29" -> null
- numa = false -> null
- onboot = true -> null
- oncreate = true -> null
- os_type = "cloud-init" -> null
- preprovision = true -> null
- qemu_os = "other" -> null
- reboot_required = false -> null
- scsihw = "virtio-scsi-pci" -> null
- searchdomain = "internal.staging.swh.network" -> null
- sockets = 1 -> null
- ssh_user = "root" -> null
- sshkeys = <<-EOT
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDVKCfpeIMg7GS3Pk03ZAcBWAeDZ+AvWk2k/pPY0z8MJ3YAbqZkRtSK7yaDgJV6Gro7nn/TxdJLo2jEzzWvlC8d8AEzhZPy5Z/qfVVjqBTBM4H5+e+TItAHFfaY5+0WvIahxcfsfaq70MWfpJhszAah3ThJ4mqzYaw+dkr42+a7Gx3Ygpb/m2dpnFnxvXdcuAJYStmHKU5AWGWWM+Fm50/fdMqUfNd8MbKhkJt5ihXQmZWMOt7ls4N8i5NZWnS9YSWow8X/ENOEqCRN9TyRkc+pPS0w9DNi0BCsWvSRJOkyvQ6caEnKWlNoywCmM1AlIQD3k4RUgRWe0vqg/UKPpH3Z root@terraform
EOT -> null
- tablet = true -> null
- target_node = "uffizi" -> null
- unused_disk = [] -> null
- vcpus = 0 -> null
- vlan = -1 -> null
- vmid = 164 -> null
- disk {
- backup = 0 -> null
- cache = "none" -> null
- file = "base-10010-disk-0/vm-164-disk-0" -> null
- format = "raw" -> null
- iothread = 0 -> null
- mbps = 0 -> null
- mbps_rd = 0 -> null
- mbps_rd_max = 0 -> null
- mbps_wr = 0 -> null
- mbps_wr_max = 0 -> null
- replicate = 0 -> null
- size = "20G" -> null
- slot = 0 -> null
- ssd = 0 -> null
- storage = "proxmox" -> null
- storage_type = "rbd" -> null
- type = "virtio" -> null
- volume = "proxmox:base-10010-disk-0/vm-164-disk-0" -> null
}
- disk {
- backup = 0 -> null
- cache = "none" -> null
- file = "vm-164-disk-1" -> null
- format = "raw" -> null
- iothread = 0 -> null
- mbps = 0 -> null
- mbps_rd = 0 -> null
- mbps_rd_max = 0 -> null
- mbps_wr = 0 -> null
- mbps_wr_max = 0 -> null
- replicate = 0 -> null
- size = "50G" -> null
- slot = 1 -> null
- ssd = 0 -> null
- storage = "proxmox" -> null
- storage_type = "rbd" -> null
- type = "virtio" -> null
- volume = "proxmox:vm-164-disk-1" -> null
}
- network {
- bridge = "vmbr443" -> null
- firewall = false -> null
- link_down = false -> null
- macaddr = "8A:17:9F:B3:5B:77" -> null
- model = "virtio" -> null
- mtu = 0 -> null
- queues = 0 -> null
- rate = 0 -> null
- tag = -1 -> null
}
}
# module.graphql-worker3.proxmox_vm_qemu.node will be destroyed
- resource "proxmox_vm_qemu" "node" {
- additional_wait = 0 -> null
- agent = 0 -> null
- automatic_reboot = true -> null
- balloon = 8192 -> null
- bios = "seabios" -> null
- boot = "c" -> null
- ciuser = "root" -> null
- clone = "debian-bullseye-11.4-zfs-2022-07-27" -> null
- clone_wait = 0 -> null
- cores = 4 -> null
- cpu = "kvm64" -> null
- define_connection_info = true -> null
- desc = "graphql worker running in rancher cluster" -> null
- disk_gb = 0 -> null
- force_create = false -> null
- full_clone = false -> null
- guest_agent_ready_timeout = 100 -> null
- hotplug = "network,disk,usb" -> null
- id = "uffizi/qemu/165" -> null
- ipconfig0 = "ip=192.168.130.153/24,gw=192.168.130.1" -> null
- kvm = true -> null
- memory = 16384 -> null
- name = "graphql-worker3" -> null
- nameserver = "192.168.100.29" -> null
- numa = false -> null
- onboot = true -> null
- oncreate = true -> null
- os_type = "cloud-init" -> null
- preprovision = true -> null
- qemu_os = "other" -> null
- reboot_required = false -> null
- scsihw = "virtio-scsi-pci" -> null
- searchdomain = "internal.staging.swh.network" -> null
- sockets = 1 -> null
- ssh_user = "root" -> null
- sshkeys = <<-EOT
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDVKCfpeIMg7GS3Pk03ZAcBWAeDZ+AvWk2k/pPY0z8MJ3YAbqZkRtSK7yaDgJV6Gro7nn/TxdJLo2jEzzWvlC8d8AEzhZPy5Z/qfVVjqBTBM4H5+e+TItAHFfaY5+0WvIahxcfsfaq70MWfpJhszAah3ThJ4mqzYaw+dkr42+a7Gx3Ygpb/m2dpnFnxvXdcuAJYStmHKU5AWGWWM+Fm50/fdMqUfNd8MbKhkJt5ihXQmZWMOt7ls4N8i5NZWnS9YSWow8X/ENOEqCRN9TyRkc+pPS0w9DNi0BCsWvSRJOkyvQ6caEnKWlNoywCmM1AlIQD3k4RUgRWe0vqg/UKPpH3Z root@terraform
EOT -> null
- tablet = true -> null
- target_node = "uffizi" -> null
- unused_disk = [] -> null
- vcpus = 0 -> null
- vlan = -1 -> null
- vmid = 165 -> null
- disk {
- backup = 0 -> null
- cache = "none" -> null
- file = "base-10010-disk-0/vm-165-disk-0" -> null
- format = "raw" -> null
- iothread = 0 -> null
- mbps = 0 -> null
- mbps_rd = 0 -> null
- mbps_rd_max = 0 -> null
- mbps_wr = 0 -> null
- mbps_wr_max = 0 -> null
- replicate = 0 -> null
- size = "20G" -> null
- slot = 0 -> null
- ssd = 0 -> null
- storage = "proxmox" -> null
- storage_type = "rbd" -> null
- type = "virtio" -> null
- volume = "proxmox:base-10010-disk-0/vm-165-disk-0" -> null
}
- disk {
- backup = 0 -> null
- cache = "none" -> null
- file = "vm-165-disk-1" -> null
- format = "raw" -> null
- iothread = 0 -> null
- mbps = 0 -> null
- mbps_rd = 0 -> null
- mbps_rd_max = 0 -> null
- mbps_wr = 0 -> null
- mbps_wr_max = 0 -> null
- replicate = 0 -> null
- size = "50G" -> null
- slot = 1 -> null
- ssd = 0 -> null
- storage = "proxmox" -> null
- storage_type = "rbd" -> null
- type = "virtio" -> null
- volume = "proxmox:vm-165-disk-1" -> null
}
- network {
- bridge = "vmbr443" -> null
- firewall = false -> null
- link_down = false -> null
- macaddr = "BE:AC:7E:EB:E7:82" -> null
- model = "virtio" -> null
- mtu = 0 -> null
- queues = 0 -> null
- rate = 0 -> null
- tag = -1 -> null
}
}
Plan: 0 to add, 0 to change, 5 to destroy.
│ Warning: Resource targeting is in effect
│ You are creating a plan with the -target option, which means that the result of this plan may not represent all of the changes requested by the current configuration.
│ The -target option is not for routine use, and is provided only for exceptional situations such as recovering from errors or mistakes, or when Terraform specifically suggests to use it as part of an error message.
│ Warning: Experimental feature "module_variable_optional_attrs" is active
│ on versions.tf line 3, in terraform:
│ 3: experiments = [module_variable_optional_attrs]
│ Experimental features are subject to breaking changes in future minor or patch releases, based on feedback.
│ If you have feedback on the design of this feature, please open a GitHub issue to discuss it.
│ (and 24 more similar warnings elsewhere)
Do you really want to destroy all resources?
Terraform will destroy all your managed infrastructure, as shown above.
There is no undo. Only 'yes' will be accepted to confirm.
Enter a value: yes
module.graphql-worker2.proxmox_vm_qemu.node: Destroying... [id=uffizi/qemu/164]
module.graphql-worker1.proxmox_vm_qemu.node: Destroying... [id=uffizi/qemu/163]
module.graphql-worker0.proxmox_vm_qemu.node: Destroying... [id=uffizi/qemu/162]
module.graphql-worker3.proxmox_vm_qemu.node: Destroying... [id=uffizi/qemu/165]
module.graphql-worker2.proxmox_vm_qemu.node: Still destroying... [id=uffizi/qemu/164, 10s elapsed]
module.graphql-worker0.proxmox_vm_qemu.node: Still destroying... [id=uffizi/qemu/162, 10s elapsed]
module.graphql-worker1.proxmox_vm_qemu.node: Still destroying... [id=uffizi/qemu/163, 10s elapsed]
module.graphql-worker3.proxmox_vm_qemu.node: Still destroying... [id=uffizi/qemu/165, 10s elapsed]
module.graphql-worker2.proxmox_vm_qemu.node: Still destroying... [id=uffizi/qemu/164, 20s elapsed]
module.graphql-worker1.proxmox_vm_qemu.node: Still destroying... [id=uffizi/qemu/163, 20s elapsed]
module.graphql-worker0.proxmox_vm_qemu.node: Still destroying... [id=uffizi/qemu/162, 20s elapsed]
module.graphql-worker3.proxmox_vm_qemu.node: Still destroying... [id=uffizi/qemu/165, 20s elapsed]
module.graphql-worker2.proxmox_vm_qemu.node: Still destroying... [id=uffizi/qemu/164, 30s elapsed]
module.graphql-worker0.proxmox_vm_qemu.node: Still destroying... [id=uffizi/qemu/162, 30s elapsed]
module.graphql-worker1.proxmox_vm_qemu.node: Still destroying... [id=uffizi/qemu/163, 30s elapsed]
module.graphql-worker3.proxmox_vm_qemu.node: Still destroying... [id=uffizi/qemu/165, 30s elapsed]
module.graphql-worker0.proxmox_vm_qemu.node: Destruction complete after 31s
module.graphql-worker3.proxmox_vm_qemu.node: Destruction complete after 31s
module.graphql-worker1.proxmox_vm_qemu.node: Destruction complete after 31s
module.graphql-worker2.proxmox_vm_qemu.node: Still destroying... [id=uffizi/qemu/164, 40s elapsed]
module.graphql-worker2.proxmox_vm_qemu.node: Still destroying... [id=uffizi/qemu/164, 50s elapsed]
module.graphql-worker2.proxmox_vm_qemu.node: Still destroying... [id=uffizi/qemu/164, 1m0s elapsed]
module.graphql-worker2.proxmox_vm_qemu.node: Still destroying... [id=uffizi/qemu/164, 1m10s elapsed]
module.graphql-worker2.proxmox_vm_qemu.node: Still destroying... [id=uffizi/qemu/164, 1m20s elapsed]
module.graphql-worker2.proxmox_vm_qemu.node: Still destroying... [id=uffizi/qemu/164, 1m30s elapsed]
module.graphql-worker2.proxmox_vm_qemu.node: Destruction complete after 1m32s
rancher2_cluster.cluster-graphql3: Destroying... [id=c-9mqhw]
rancher2_cluster.cluster-graphql3: Still destroying... [id=c-9mqhw, 10s elapsed]
rancher2_cluster.cluster-graphql3: Still destroying... [id=c-9mqhw, 20s elapsed]
rancher2_cluster.cluster-graphql3: Still destroying... [id=c-9mqhw, 30s elapsed]
rancher2_cluster.cluster-graphql3: Still destroying... [id=c-9mqhw, 40s elapsed]
rancher2_cluster.cluster-graphql3: Still destroying... [id=c-9mqhw, 50s elapsed]
rancher2_cluster.cluster-graphql3: Still destroying... [id=c-9mqhw, 1m0s elapsed]
rancher2_cluster.cluster-graphql3: Still destroying... [id=c-9mqhw, 1m10s elapsed]
rancher2_cluster.cluster-graphql3: Still destroying... [id=c-9mqhw, 1m20s elapsed]
rancher2_cluster.cluster-graphql3: Destruction complete after 1m21s
│ Warning: Applied changes may be incomplete
│ The plan was created with the -target option in effect, so some changes requested in the configuration may have been ignored and the output values may not be fully updated. Run the following command to verify that no other changes are
│ pending:
│ terraform plan
│ Note that the -target option is not suitable for routine use, and is provided only for exceptional situations such as recovering from errors or mistakes, or when Terraform specifically suggests to use it as part of an error message.
Destroy complete! Resources: 5 destroyed.