Page MenuHomeSoftware Heritage
Paste P1434

terraform destroy -target=rancher2_cluster.deployment_internship
ActivePublic

Authored by ardumont on Aug 23 2022, 5:04 PM.
```
$ terraform destroy -target=rancher2_cluster.deployment_internship
rancher2_cluster.deployment_internship: Refreshing state... [id=c-fvnrx]
Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols:
- destroy
Terraform will perform the following actions:
# rancher2_cluster.deployment_internship will be destroyed
- resource "rancher2_cluster" "deployment_internship" {
- annotations = {
- "authz.management.cattle.io/creator-role-bindings" = jsonencode(
{
- created = [
- "cluster-owner",
]
- required = [
- "cluster-owner",
]
}
)
- "lifecycle.cattle.io/create.cluster-agent-controller-cleanup" = "true"
- "lifecycle.cattle.io/create.cluster-provisioner-controller" = "true"
- "lifecycle.cattle.io/create.cluster-scoped-gc" = "true"
- "lifecycle.cattle.io/create.mgmt-cluster-rbac-remove" = "true"
- "provisioner.cattle.io/encrypt-migrated" = "true"
- "provisioner.cattle.io/ke-driver-update" = "updated"
} -> null
- ca_cert = (sensitive value)
- cluster_registration_token = [
- {
- annotations = {}
- cluster_id = "c-fvnrx"
- command = "kubectl apply -f https://rancher.euwest.azure.internal.softwareheritage.org/v3/import/47zcb6pbq6wscj9g6qs9hg22qlrpsc4x46mhr8852dfq5hgjz9f5dd_c-fvnrx.yaml"
- id = "c-fvnrx:default-token"
- insecure_command = "curl --insecure -sfL https://rancher.euwest.azure.internal.softwareheritage.org/v3/import/47zcb6pbq6wscj9g6qs9hg22qlrpsc4x46mhr8852dfq5hgjz9f5dd_c-fvnrx.yaml | kubectl apply -f -"
- insecure_node_command = ""
- insecure_windows_node_command = ""
- labels = {
- "cattle.io/creator" = "norman"
}
- manifest_url = "https://rancher.euwest.azure.internal.softwareheritage.org/v3/import/47zcb6pbq6wscj9g6qs9hg22qlrpsc4x46mhr8852dfq5hgjz9f5dd_c-fvnrx.yaml"
- name = "default-token"
- node_command = "sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent:v2.6.4 --server https://rancher.euwest.azure.internal.softwareheritage.org --token 47zcb6pbq6wscj9g6qs9hg22qlrpsc4x46mhr8852dfq5hgjz9f5dd --ca-checksum 8850dd89eb7f29a70c0d50a2b389bf1950808a9dff4062c66ab806b80b988bac"
- token = "47zcb6pbq6wscj9g6qs9hg22qlrpsc4x46mhr8852dfq5hgjz9f5dd"
- windows_node_command = "PowerShell -NoLogo -NonInteractive -Command \"& {docker run -v c:\\:c:\\host rancher/rancher-agent:v2.6.4 bootstrap --server https://rancher.euwest.azure.internal.softwareheritage.org --token 47zcb6pbq6wscj9g6qs9hg22qlrpsc4x46mhr8852dfq5hgjz9f5dd --ca-checksum 8850dd89eb7f29a70c0d50a2b389bf1950808a9dff4062c66ab806b80b988bac | iex}\""
},
] -> null
- default_project_id = "c-fvnrx:p-q4flt" -> null
- description = "staging cluster for deployment test" -> null
- docker_root_dir = "/var/lib/docker" -> null
- driver = "rancherKubernetesEngine" -> null
- enable_cluster_alerting = false -> null
- enable_cluster_monitoring = false -> null
- enable_network_policy = false -> null
- fleet_workspace_name = "fleet-default" -> null
- id = "c-fvnrx" -> null
- istio_enabled = false -> null
- kube_config = (sensitive value)
- labels = {
- "cattle.io/creator" = "norman"
- "provider.cattle.io" = "rke"
} -> null
- name = "deployment-internship" -> null
- system_project_id = "c-fvnrx:p-hlf76" -> null
- windows_prefered_cluster = false -> null
- cluster_auth_endpoint {
- enabled = false -> null
}
- rke_config {
- addon_job_timeout = 45 -> null
- addons_include = [] -> null
- enable_cri_dockerd = false -> null
- ignore_docker_version = true -> null
- kubernetes_version = "v1.21.12-rancher1-1" -> null
- ssh_agent_auth = false -> null
- authentication {
- sans = [] -> null
- strategy = "x509" -> null
}
- authorization {}
- bastion_host {
- ssh_agent_auth = false -> null
}
- cloud_provider {
}
- ingress {
- default_backend = true -> null
- extra_args = {} -> null
- http_port = 0 -> null
- https_port = 0 -> null
- node_selector = {} -> null
- options = {} -> null
- provider = "nginx" -> null
}
- monitoring {
- node_selector = {} -> null
- options = {} -> null
- provider = "metrics-server" -> null
- replicas = 1 -> null
}
- network {
- mtu = 0 -> null
- options = {} -> null
- plugin = "canal" -> null
}
- services {
- etcd {
- creation = "12h" -> null
- external_urls = [] -> null
- extra_args = {
- "election-timeout" = "5000"
- "heartbeat-interval" = "500"
} -> null
- extra_binds = [] -> null
- extra_env = [] -> null
- gid = 0 -> null
- retention = "72h" -> null
- snapshot = false -> null
- uid = 0 -> null
- backup_config {
- enabled = true -> null
- interval_hours = 12 -> null
- retention = 6 -> null
- safe_timestamp = false -> null
- timeout = 300 -> null
}
}
- kube_api {
- admission_configuration = {} -> null
- always_pull_images = false -> null
- extra_args = {} -> null
- extra_binds = [] -> null
- extra_env = [] -> null
- pod_security_policy = false -> null
- service_node_port_range = "30000-32767" -> null
}
- kube_controller {}
- kubelet {
- extra_args = {} -> null
- extra_binds = [] -> null
- extra_env = [] -> null
- fail_swap_on = false -> null
- generate_serving_certificate = false -> null
}
- kubeproxy {}
- scheduler {}
}
- upgrade_strategy {
- drain = false -> null
- max_unavailable_controlplane = "1" -> null
- max_unavailable_worker = "10%" -> null
- drain_input {
- delete_local_data = false -> null
- force = false -> null
- grace_period = -1 -> null
- ignore_daemon_sets = true -> null
- timeout = 120 -> null
}
}
}
- scheduled_cluster_scan {
- enabled = false -> null
}
}
# module.rancher_node_internship0.proxmox_vm_qemu.node will be destroyed
- resource "proxmox_vm_qemu" "node" {
- additional_wait = 0 -> null
- agent = 0 -> null
- automatic_reboot = true -> null
- balloon = 4096 -> null
- bios = "seabios" -> null
- boot = "c" -> null
- ciuser = "root" -> null
- clone = "debian-bullseye-11.3-zfs-2022-04-21" -> null
- clone_wait = 0 -> null
- cores = 4 -> null
- cpu = "kvm64" -> null
- define_connection_info = true -> null
- desc = "Rancher node for the internship" -> null
- disk_gb = 0 -> null
- force_create = false -> null
- full_clone = false -> null
- guest_agent_ready_timeout = 100 -> null
- hotplug = "network,disk,usb" -> null
- id = "uffizi/qemu/150" -> null
- ipconfig0 = "ip=192.168.130.140/24,gw=192.168.130.1" -> null
- kvm = true -> null
- memory = 8192 -> null
- name = "rancher-node-intership0" -> null
- nameserver = "192.168.100.29" -> null
- numa = false -> null
- onboot = true -> null
- oncreate = true -> null
- os_type = "cloud-init" -> null
- preprovision = true -> null
- qemu_os = "other" -> null
- reboot_required = false -> null
- scsihw = "virtio-scsi-pci" -> null
- searchdomain = "internal.staging.swh.network" -> null
- sockets = 1 -> null
- ssh_user = "root" -> null
- sshkeys = <<-EOT
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDVKCfpeIMg7GS3Pk03ZAcBWAeDZ+AvWk2k/pPY0z8MJ3YAbqZkRtSK7yaDgJV6Gro7nn/TxdJLo2jEzzWvlC8d8AEzhZPy5Z/qfVVjqBTBM4H5+e+TItAHFfaY5+0WvIahxcfsfaq70MWfpJhszAah3ThJ4mqzYaw+dkr42+a7Gx3Ygpb/m2dpnFnxvXdcuAJYStmHKU5AWGWWM+Fm50/fdMqUfNd8MbKhkJt5ihXQmZWMOt7ls4N8i5NZWnS9YSWow8X/ENOEqCRN9TyRkc+pPS0w9DNi0BCsWvSRJOkyvQ6caEnKWlNoywCmM1AlIQD3k4RUgRWe0vqg/UKPpH3Z root@terraform
EOT -> null
- tablet = true -> null
- target_node = "uffizi" -> null
- unused_disk = [] -> null
- vcpus = 0 -> null
- vlan = -1 -> null
- disk {
- backup = 0 -> null
- cache = "none" -> null
- file = "base-10008-disk-0/vm-150-disk-0" -> null
- format = "raw" -> null
- iothread = 0 -> null
- mbps = 0 -> null
- mbps_rd = 0 -> null
- mbps_rd_max = 0 -> null
- mbps_wr = 0 -> null
- mbps_wr_max = 0 -> null
- replicate = 0 -> null
- size = "20G" -> null
- slot = 0 -> null
- ssd = 0 -> null
- storage = "proxmox" -> null
- storage_type = "rbd" -> null
- type = "virtio" -> null
- volume = "proxmox:base-10008-disk-0/vm-150-disk-0" -> null
}
- disk {
- backup = 0 -> null
- cache = "none" -> null
- file = "vm-150-disk-1" -> null
- format = "raw" -> null
- iothread = 0 -> null
- mbps = 0 -> null
- mbps_rd = 0 -> null
- mbps_rd_max = 0 -> null
- mbps_wr = 0 -> null
- mbps_wr_max = 0 -> null
- replicate = 0 -> null
- size = "50G" -> null
- slot = 1 -> null
- ssd = 0 -> null
- storage = "proxmox" -> null
- storage_type = "rbd" -> null
- type = "virtio" -> null
- volume = "proxmox:vm-150-disk-1" -> null
}
- network {
- bridge = "vmbr443" -> null
- firewall = false -> null
- link_down = false -> null
- macaddr = "1A:B2:E6:C3:42:C7" -> null
- model = "virtio" -> null
- mtu = 0 -> null
- queues = 0 -> null
- rate = 0 -> null
- tag = -1 -> null
}
}
# module.rancher_node_internship1.proxmox_vm_qemu.node will be destroyed
- resource "proxmox_vm_qemu" "node" {
- additional_wait = 0 -> null
- agent = 0 -> null
- automatic_reboot = true -> null
- balloon = 4096 -> null
- bios = "seabios" -> null
- boot = "c" -> null
- ciuser = "root" -> null
- clone = "debian-bullseye-11.3-zfs-2022-04-21" -> null
- clone_wait = 0 -> null
- cores = 4 -> null
- cpu = "kvm64" -> null
- define_connection_info = true -> null
- desc = "Rancher node for the internship" -> null
- disk_gb = 0 -> null
- force_create = false -> null
- full_clone = false -> null
- guest_agent_ready_timeout = 100 -> null
- hotplug = "network,disk,usb" -> null
- id = "uffizi/qemu/151" -> null
- ipconfig0 = "ip=192.168.130.141/24,gw=192.168.130.1" -> null
- kvm = true -> null
- memory = 8192 -> null
- name = "rancher-node-intership1" -> null
- nameserver = "192.168.100.29" -> null
- numa = false -> null
- onboot = true -> null
- oncreate = true -> null
- os_type = "cloud-init" -> null
- preprovision = true -> null
- qemu_os = "other" -> null
- reboot_required = false -> null
- scsihw = "virtio-scsi-pci" -> null
- searchdomain = "internal.staging.swh.network" -> null
- sockets = 1 -> null
- ssh_user = "root" -> null
- sshkeys = <<-EOT
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDVKCfpeIMg7GS3Pk03ZAcBWAeDZ+AvWk2k/pPY0z8MJ3YAbqZkRtSK7yaDgJV6Gro7nn/TxdJLo2jEzzWvlC8d8AEzhZPy5Z/qfVVjqBTBM4H5+e+TItAHFfaY5+0WvIahxcfsfaq70MWfpJhszAah3ThJ4mqzYaw+dkr42+a7Gx3Ygpb/m2dpnFnxvXdcuAJYStmHKU5AWGWWM+Fm50/fdMqUfNd8MbKhkJt5ihXQmZWMOt7ls4N8i5NZWnS9YSWow8X/ENOEqCRN9TyRkc+pPS0w9DNi0BCsWvSRJOkyvQ6caEnKWlNoywCmM1AlIQD3k4RUgRWe0vqg/UKPpH3Z root@terraform
EOT -> null
- tablet = true -> null
- target_node = "uffizi" -> null
- unused_disk = [] -> null
- vcpus = 0 -> null
- vlan = -1 -> null
- disk {
- backup = 0 -> null
- cache = "none" -> null
- file = "base-10008-disk-0/vm-151-disk-0" -> null
- format = "raw" -> null
- iothread = 0 -> null
- mbps = 0 -> null
- mbps_rd = 0 -> null
- mbps_rd_max = 0 -> null
- mbps_wr = 0 -> null
- mbps_wr_max = 0 -> null
- replicate = 0 -> null
- size = "20G" -> null
- slot = 0 -> null
- ssd = 0 -> null
- storage = "proxmox" -> null
- storage_type = "rbd" -> null
- type = "virtio" -> null
- volume = "proxmox:base-10008-disk-0/vm-151-disk-0" -> null
}
- disk {
- backup = 0 -> null
- cache = "none" -> null
- file = "vm-151-disk-1" -> null
- format = "raw" -> null
- iothread = 0 -> null
- mbps = 0 -> null
- mbps_rd = 0 -> null
- mbps_rd_max = 0 -> null
- mbps_wr = 0 -> null
- mbps_wr_max = 0 -> null
- replicate = 0 -> null
- size = "50G" -> null
- slot = 1 -> null
- ssd = 0 -> null
- storage = "proxmox" -> null
- storage_type = "rbd" -> null
- type = "virtio" -> null
- volume = "proxmox:vm-151-disk-1" -> null
}
- network {
- bridge = "vmbr443" -> null
- firewall = false -> null
- link_down = false -> null
- macaddr = "D6:CB:31:9D:8A:65" -> null
- model = "virtio" -> null
- mtu = 0 -> null
- queues = 0 -> null
- rate = 0 -> null
- tag = -1 -> null
}
}
# module.rancher_node_internship2.proxmox_vm_qemu.node will be destroyed
- resource "proxmox_vm_qemu" "node" {
- additional_wait = 0 -> null
- agent = 0 -> null
- automatic_reboot = true -> null
- balloon = 4096 -> null
- bios = "seabios" -> null
- boot = "c" -> null
- ciuser = "root" -> null
- clone = "debian-bullseye-11.3-zfs-2022-04-21" -> null
- clone_wait = 0 -> null
- cores = 4 -> null
- cpu = "kvm64" -> null
- define_connection_info = true -> null
- desc = "Rancher node for the internship" -> null
- disk_gb = 0 -> null
- force_create = false -> null
- full_clone = false -> null
- guest_agent_ready_timeout = 100 -> null
- hotplug = "network,disk,usb" -> null
- id = "uffizi/qemu/152" -> null
- ipconfig0 = "ip=192.168.130.142/24,gw=192.168.130.1" -> null
- kvm = true -> null
- memory = 8192 -> null
- name = "rancher-node-intership2" -> null
- nameserver = "192.168.100.29" -> null
- numa = false -> null
- onboot = true -> null
- oncreate = true -> null
- os_type = "cloud-init" -> null
- preprovision = true -> null
- qemu_os = "other" -> null
- reboot_required = false -> null
- scsihw = "virtio-scsi-pci" -> null
- searchdomain = "internal.staging.swh.network" -> null
- sockets = 1 -> null
- ssh_user = "root" -> null
- sshkeys = <<-EOT
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDVKCfpeIMg7GS3Pk03ZAcBWAeDZ+AvWk2k/pPY0z8MJ3YAbqZkRtSK7yaDgJV6Gro7nn/TxdJLo2jEzzWvlC8d8AEzhZPy5Z/qfVVjqBTBM4H5+e+TItAHFfaY5+0WvIahxcfsfaq70MWfpJhszAah3ThJ4mqzYaw+dkr42+a7Gx3Ygpb/m2dpnFnxvXdcuAJYStmHKU5AWGWWM+Fm50/fdMqUfNd8MbKhkJt5ihXQmZWMOt7ls4N8i5NZWnS9YSWow8X/ENOEqCRN9TyRkc+pPS0w9DNi0BCsWvSRJOkyvQ6caEnKWlNoywCmM1AlIQD3k4RUgRWe0vqg/UKPpH3Z root@terraform
EOT -> null
- tablet = true -> null
- target_node = "uffizi" -> null
- unused_disk = [] -> null
- vcpus = 0 -> null
- vlan = -1 -> null
- disk {
- backup = 0 -> null
- cache = "none" -> null
- file = "base-10008-disk-0/vm-152-disk-0" -> null
- format = "raw" -> null
- iothread = 0 -> null
- mbps = 0 -> null
- mbps_rd = 0 -> null
- mbps_rd_max = 0 -> null
- mbps_wr = 0 -> null
- mbps_wr_max = 0 -> null
- replicate = 0 -> null
- size = "20G" -> null
- slot = 0 -> null
- ssd = 0 -> null
- storage = "proxmox" -> null
- storage_type = "rbd" -> null
- type = "virtio" -> null
- volume = "proxmox:base-10008-disk-0/vm-152-disk-0" -> null
}
- disk {
- backup = 0 -> null
- cache = "none" -> null
- file = "vm-152-disk-1" -> null
- format = "raw" -> null
- iothread = 0 -> null
- mbps = 0 -> null
- mbps_rd = 0 -> null
- mbps_rd_max = 0 -> null
- mbps_wr = 0 -> null
- mbps_wr_max = 0 -> null
- replicate = 0 -> null
- size = "50G" -> null
- slot = 1 -> null
- ssd = 0 -> null
- storage = "proxmox" -> null
- storage_type = "rbd" -> null
- type = "virtio" -> null
- volume = "proxmox:vm-152-disk-1" -> null
}
- network {
- bridge = "vmbr443" -> null
- firewall = false -> null
- link_down = false -> null
- macaddr = "F2:5B:1E:DE:59:D5" -> null
- model = "virtio" -> null
- mtu = 0 -> null
- queues = 0 -> null
- rate = 0 -> null
- tag = -1 -> null
}
}
Plan: 0 to add, 0 to change, 4 to destroy.
│ Warning: Resource targeting is in effect
│ You are creating a plan with the -target option, which means that the result of this plan may not represent all of the changes requested by the current configuration.
│ The -target option is not for routine use, and is provided only for exceptional situations such as recovering from errors or mistakes, or when Terraform specifically suggests to use it as part of an error message.
│ Warning: Experimental feature "module_variable_optional_attrs" is active
│ on versions.tf line 3, in terraform:
│ 3: experiments = [module_variable_optional_attrs]
│ Experimental features are subject to breaking changes in future minor or patch releases, based on feedback.
│ If you have feedback on the design of this feature, please open a GitHub issue to discuss it.
│ (and 27 more similar warnings elsewhere)
Do you really want to destroy all resources?
Terraform will destroy all your managed infrastructure, as shown above.
There is no undo. Only 'yes' will be accepted to confirm.
Enter a value: yes
module.rancher_node_internship0.proxmox_vm_qemu.node: Destroying... [id=uffizi/qemu/150]
module.rancher_node_internship1.proxmox_vm_qemu.node: Destroying... [id=uffizi/qemu/151]
module.rancher_node_internship2.proxmox_vm_qemu.node: Destroying... [id=uffizi/qemu/152]
module.rancher_node_internship1.proxmox_vm_qemu.node: Still destroying... [id=uffizi/qemu/151, 10s elapsed]
module.rancher_node_internship0.proxmox_vm_qemu.node: Still destroying... [id=uffizi/qemu/150, 10s elapsed]
module.rancher_node_internship2.proxmox_vm_qemu.node: Still destroying... [id=uffizi/qemu/152, 10s elapsed]
module.rancher_node_internship1.proxmox_vm_qemu.node: Still destroying... [id=uffizi/qemu/151, 20s elapsed]
module.rancher_node_internship0.proxmox_vm_qemu.node: Still destroying... [id=uffizi/qemu/150, 20s elapsed]
module.rancher_node_internship2.proxmox_vm_qemu.node: Still destroying... [id=uffizi/qemu/152, 20s elapsed]
module.rancher_node_internship1.proxmox_vm_qemu.node: Still destroying... [id=uffizi/qemu/151, 30s elapsed]
module.rancher_node_internship0.proxmox_vm_qemu.node: Still destroying... [id=uffizi/qemu/150, 30s elapsed]
module.rancher_node_internship2.proxmox_vm_qemu.node: Still destroying... [id=uffizi/qemu/152, 30s elapsed]
module.rancher_node_internship2.proxmox_vm_qemu.node: Destruction complete after 31s
module.rancher_node_internship1.proxmox_vm_qemu.node: Destruction complete after 31s
module.rancher_node_internship0.proxmox_vm_qemu.node: Still destroying... [id=uffizi/qemu/150, 40s elapsed]
module.rancher_node_internship0.proxmox_vm_qemu.node: Still destroying... [id=uffizi/qemu/150, 50s elapsed]
module.rancher_node_internship0.proxmox_vm_qemu.node: Still destroying... [id=uffizi/qemu/150, 1m0s elapsed]
module.rancher_node_internship0.proxmox_vm_qemu.node: Still destroying... [id=uffizi/qemu/150, 1m10s elapsed]
module.rancher_node_internship0.proxmox_vm_qemu.node: Still destroying... [id=uffizi/qemu/150, 1m20s elapsed]
module.rancher_node_internship0.proxmox_vm_qemu.node: Still destroying... [id=uffizi/qemu/150, 1m30s elapsed]
module.rancher_node_internship0.proxmox_vm_qemu.node: Still destroying... [id=uffizi/qemu/150, 1m40s elapsed]
module.rancher_node_internship0.proxmox_vm_qemu.node: Destruction complete after 1m46s
rancher2_cluster.deployment_internship: Destroying... [id=c-fvnrx]
rancher2_cluster.deployment_internship: Still destroying... [id=c-fvnrx, 10s elapsed]
rancher2_cluster.deployment_internship: Still destroying... [id=c-fvnrx, 20s elapsed]
rancher2_cluster.deployment_internship: Still destroying... [id=c-fvnrx, 30s elapsed]
rancher2_cluster.deployment_internship: Still destroying... [id=c-fvnrx, 40s elapsed]
rancher2_cluster.deployment_internship: Still destroying... [id=c-fvnrx, 50s elapsed]
rancher2_cluster.deployment_internship: Still destroying... [id=c-fvnrx, 1m0s elapsed]
rancher2_cluster.deployment_internship: Still destroying... [id=c-fvnrx, 1m10s elapsed]
rancher2_cluster.deployment_internship: Still destroying... [id=c-fvnrx, 1m20s elapsed]
rancher2_cluster.deployment_internship: Destruction complete after 1m21s
│ Warning: Applied changes may be incomplete
│ The plan was created with the -target option in effect, so some changes requested in the configuration may have been ignored and the output values may not be fully updated. Run the following command to verify that no other changes are
│ pending:
│ terraform plan
│ Note that the -target option is not suitable for routine use, and is provided only for exceptional situations such as recovering from errors or mistakes, or when Terraform specifically suggests to use it as part of an error message.
Destroy complete! Resources: 4 destroyed.
```