Page MenuHomeSoftware Heritage
Paste P805

D4071 (staging): terraform plan -no-color
ActivePublic

Authored by ardumont on Oct 5 2020, 4:14 PM.
Refreshing Terraform state in-memory prior to plan...
The refreshed state will be used to calculate this plan, but will not be
persisted to local or remote state storage.
module.worker2.proxmox_vm_qemu.node: Refreshing state... [id=branly/qemu/112]
module.worker0.proxmox_vm_qemu.node: Refreshing state... [id=beaubourg/qemu/117]
module.vault.proxmox_vm_qemu.node: Refreshing state... [id=beaubourg/qemu/121]
module.scheduler0.proxmox_vm_qemu.node: Refreshing state... [id=beaubourg/qemu/116]
module.journal0.proxmox_vm_qemu.node: Refreshing state... [id=beaubourg/qemu/122]
module.db0.proxmox_vm_qemu.node: Refreshing state... [id=orsay/qemu/115]
module.worker1.proxmox_vm_qemu.node: Refreshing state... [id=beaubourg/qemu/118]
proxmox_vm_qemu.storage0: Refreshing state... [id=orsay/qemu/114]
proxmox_vm_qemu.gateway: Refreshing state... [id=beaubourg/qemu/109]
module.webapp.proxmox_vm_qemu.node: Refreshing state... [id=branly/qemu/119]
module.deposit.proxmox_vm_qemu.node: Refreshing state... [id=beaubourg/qemu/120]
------------------------------------------------------------------------
An execution plan has been generated and is shown below.
Resource actions are indicated with the following symbols:
~ update in-place
Terraform will perform the following actions:
# proxmox_vm_qemu.gateway will be updated in-place
~ resource "proxmox_vm_qemu" "gateway" {
agent = 0
balloon = 0
bios = "seabios"
boot = "c"
bootdisk = "virtio0"
ciuser = "root"
clone = "template-debian-10"
+ clone_wait = 15
cores = 1
cpu = "host"
desc = "staging gateway node"
disk_gb = 0
force_create = false
full_clone = false
hotplug = "network,disk,usb"
id = "beaubourg/qemu/109"
ipconfig0 = "ip=192.168.100.125/24,gw=192.168.100.1"
ipconfig1 = "ip=192.168.128.1/24"
kvm = true
memory = 1024
name = "gateway"
nameserver = "192.168.100.29"
numa = false
onboot = true
os_type = "cloud-init"
preprovision = true
qemu_os = "other"
scsihw = "virtio-scsi-pci"
searchdomain = "internal.staging.swh.network"
sockets = 1
ssh_user = "root"
sshkeys = <<~EOT
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDVKCfpeIMg7GS3Pk03ZAcBWAeDZ+AvWk2k/pPY0z8MJ3YAbqZkRtSK7yaDgJV6Gro7nn/TxdJLo2jEzzWvlC8d8AEzhZPy5Z/qfVVjqBTBM4H5+e+TItAHFfaY5+0WvIahxcfsfaq70MWfpJhszAah3ThJ4mqzYaw+dkr42+a7Gx3Ygpb/m2dpnFnxvXdcuAJYStmHKU5AWGWWM+Fm50/fdMqUfNd8MbKhkJt5ihXQmZWMOt7ls4N8i5NZWnS9YSWow8X/ENOEqCRN9TyRkc+pPS0w9DNi0BCsWvSRJOkyvQ6caEnKWlNoywCmM1AlIQD3k4RUgRWe0vqg/UKPpH3Z root@terraform
EOT
target_node = "beaubourg"
vcpus = 0
vlan = -1
+ vmid = 109
disk {
backup = false
cache = "none"
format = "raw"
id = 0
iothread = false
mbps = 0
mbps_rd = 0
mbps_rd_max = 0
mbps_wr = 0
mbps_wr_max = 0
replicate = false
size = "20G"
ssd = false
storage = "proxmox"
storage_type = "ssd"
type = "virtio"
}
network {
bridge = "vmbr0"
firewall = false
id = 0
link_down = false
macaddr = "6E:ED:EF:EB:3C:AA"
model = "virtio"
queues = -1
rate = -1
tag = -1
}
network {
bridge = "vmbr443"
firewall = false
id = 1
link_down = false
macaddr = "FE:95:CC:A5:EB:43"
model = "virtio"
queues = -1
rate = -1
tag = -1
}
}
# proxmox_vm_qemu.storage0 will be updated in-place
~ resource "proxmox_vm_qemu" "storage0" {
agent = 0
balloon = 1024
bios = "seabios"
boot = "c"
bootdisk = "virtio0"
ciuser = "root"
clone = "template-debian-10"
+ clone_wait = 15
cores = 4
cpu = "host"
desc = "swh storage services"
disk_gb = 0
force_create = false
full_clone = false
hotplug = "network,disk,usb"
id = "orsay/qemu/114"
ipconfig0 = "ip=192.168.128.2/24,gw=192.168.128.1"
kvm = true
memory = 8192
name = "storage0"
nameserver = "192.168.100.29"
numa = false
onboot = true
os_type = "cloud-init"
preprovision = true
qemu_os = "other"
scsihw = "virtio-scsi-pci"
searchdomain = "internal.staging.swh.network"
sockets = 1
ssh_user = "root"
sshkeys = <<~EOT
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDVKCfpeIMg7GS3Pk03ZAcBWAeDZ+AvWk2k/pPY0z8MJ3YAbqZkRtSK7yaDgJV6Gro7nn/TxdJLo2jEzzWvlC8d8AEzhZPy5Z/qfVVjqBTBM4H5+e+TItAHFfaY5+0WvIahxcfsfaq70MWfpJhszAah3ThJ4mqzYaw+dkr42+a7Gx3Ygpb/m2dpnFnxvXdcuAJYStmHKU5AWGWWM+Fm50/fdMqUfNd8MbKhkJt5ihXQmZWMOt7ls4N8i5NZWnS9YSWow8X/ENOEqCRN9TyRkc+pPS0w9DNi0BCsWvSRJOkyvQ6caEnKWlNoywCmM1AlIQD3k4RUgRWe0vqg/UKPpH3Z root@terraform
EOT
target_node = "orsay"
vcpus = 0
vlan = -1
+ vmid = 114
disk {
backup = false
cache = "none"
format = "raw"
id = 0
iothread = false
mbps = 0
mbps_rd = 0
mbps_rd_max = 0
mbps_wr = 0
mbps_wr_max = 0
replicate = false
size = "32G"
ssd = false
storage = "orsay-ssd-2018"
storage_type = "ssd"
type = "virtio"
}
disk {
backup = false
cache = "none"
format = "raw"
id = 1
iothread = false
mbps = 0
mbps_rd = 0
mbps_rd_max = 0
mbps_wr = 0
mbps_wr_max = 0
replicate = false
size = "512G"
ssd = false
storage = "orsay-ssd-2018"
storage_type = "ssd"
type = "virtio"
}
network {
bridge = "vmbr443"
firewall = false
id = 0
link_down = false
macaddr = "CA:73:7F:ED:F9:01"
model = "virtio"
queues = -1
rate = -1
tag = -1
}
}
# module.db0.proxmox_vm_qemu.node will be updated in-place
~ resource "proxmox_vm_qemu" "node" {
agent = 0
balloon = 1024
bios = "seabios"
boot = "c"
bootdisk = "virtio0"
ciuser = "root"
clone = "template-debian-10"
+ clone_wait = 15
cores = 4
cpu = "host"
desc = "Node to host storage/indexer/scheduler dbs"
disk_gb = 0
force_create = false
full_clone = false
hotplug = "network,disk,usb"
id = "orsay/qemu/115"
ipconfig0 = "ip=192.168.128.3/24,gw=192.168.128.1"
kvm = true
memory = 16384
name = "db0"
nameserver = "192.168.100.29"
numa = false
onboot = true
os_type = "cloud-init"
preprovision = true
qemu_os = "other"
scsihw = "virtio-scsi-pci"
searchdomain = "internal.staging.swh.network"
sockets = 1
ssh_user = "root"
sshkeys = <<~EOT
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDVKCfpeIMg7GS3Pk03ZAcBWAeDZ+AvWk2k/pPY0z8MJ3YAbqZkRtSK7yaDgJV6Gro7nn/TxdJLo2jEzzWvlC8d8AEzhZPy5Z/qfVVjqBTBM4H5+e+TItAHFfaY5+0WvIahxcfsfaq70MWfpJhszAah3ThJ4mqzYaw+dkr42+a7Gx3Ygpb/m2dpnFnxvXdcuAJYStmHKU5AWGWWM+Fm50/fdMqUfNd8MbKhkJt5ihXQmZWMOt7ls4N8i5NZWnS9YSWow8X/ENOEqCRN9TyRkc+pPS0w9DNi0BCsWvSRJOkyvQ6caEnKWlNoywCmM1AlIQD3k4RUgRWe0vqg/UKPpH3Z root@terraform
EOT
target_node = "orsay"
vcpus = 0
vlan = -1
+ vmid = 115
disk {
backup = false
cache = "none"
format = "raw"
id = 0
iothread = false
mbps = 0
mbps_rd = 0
mbps_rd_max = 0
mbps_wr = 0
mbps_wr_max = 0
replicate = false
size = "400G"
ssd = false
storage = "orsay-ssd-2018"
storage_type = "ssd"
type = "virtio"
}
network {
bridge = "vmbr443"
firewall = false
id = 0
link_down = false
macaddr = "3A:65:31:7C:24:17"
model = "virtio"
queues = -1
rate = -1
tag = -1
}
}
# module.deposit.proxmox_vm_qemu.node will be updated in-place
~ resource "proxmox_vm_qemu" "node" {
agent = 0
balloon = 1024
bios = "seabios"
boot = "c"
bootdisk = "virtio0"
ciuser = "root"
clone = "template-debian-10"
+ clone_wait = 15
cores = 4
cpu = "host"
desc = "Deposit service node"
disk_gb = 0
force_create = false
full_clone = false
hotplug = "network,disk,usb"
id = "beaubourg/qemu/120"
ipconfig0 = "ip=192.168.128.7/24,gw=192.168.128.1"
kvm = true
memory = 8192
name = "deposit"
nameserver = "192.168.100.29"
numa = false
onboot = true
os_type = "cloud-init"
preprovision = true
qemu_os = "other"
scsihw = "virtio-scsi-pci"
searchdomain = "internal.staging.swh.network"
sockets = 1
ssh_user = "root"
sshkeys = <<~EOT
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDVKCfpeIMg7GS3Pk03ZAcBWAeDZ+AvWk2k/pPY0z8MJ3YAbqZkRtSK7yaDgJV6Gro7nn/TxdJLo2jEzzWvlC8d8AEzhZPy5Z/qfVVjqBTBM4H5+e+TItAHFfaY5+0WvIahxcfsfaq70MWfpJhszAah3ThJ4mqzYaw+dkr42+a7Gx3Ygpb/m2dpnFnxvXdcuAJYStmHKU5AWGWWM+Fm50/fdMqUfNd8MbKhkJt5ihXQmZWMOt7ls4N8i5NZWnS9YSWow8X/ENOEqCRN9TyRkc+pPS0w9DNi0BCsWvSRJOkyvQ6caEnKWlNoywCmM1AlIQD3k4RUgRWe0vqg/UKPpH3Z root@terraform
EOT
target_node = "beaubourg"
vcpus = 0
vlan = -1
+ vmid = 120
disk {
backup = false
cache = "none"
format = "raw"
id = 0
iothread = false
mbps = 0
mbps_rd = 0
mbps_rd_max = 0
mbps_wr = 0
mbps_wr_max = 0
replicate = false
size = "32G"
ssd = false
storage = "proxmox"
storage_type = "ssd"
type = "virtio"
}
network {
bridge = "vmbr443"
firewall = false
id = 0
link_down = false
macaddr = "9E:81:DD:58:15:3B"
model = "virtio"
queues = -1
rate = -1
tag = -1
}
}
# module.journal0.proxmox_vm_qemu.node will be updated in-place
~ resource "proxmox_vm_qemu" "node" {
agent = 0
balloon = 1024
bios = "seabios"
boot = "c"
bootdisk = "virtio0"
ciuser = "root"
clone = "template-debian-10"
+ clone_wait = 15
cores = 4
cpu = "host"
desc = "Journal services node"
disk_gb = 0
force_create = false
full_clone = false
hotplug = "network,disk,usb"
id = "beaubourg/qemu/122"
ipconfig0 = "ip=192.168.128.10/24,gw=192.168.128.1"
kvm = true
memory = 12288
name = "journal0"
nameserver = "192.168.100.29"
numa = false
onboot = true
os_type = "cloud-init"
preprovision = true
qemu_os = "other"
scsihw = "virtio-scsi-pci"
searchdomain = "internal.staging.swh.network"
sockets = 1
ssh_user = "root"
sshkeys = <<~EOT
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDVKCfpeIMg7GS3Pk03ZAcBWAeDZ+AvWk2k/pPY0z8MJ3YAbqZkRtSK7yaDgJV6Gro7nn/TxdJLo2jEzzWvlC8d8AEzhZPy5Z/qfVVjqBTBM4H5+e+TItAHFfaY5+0WvIahxcfsfaq70MWfpJhszAah3ThJ4mqzYaw+dkr42+a7Gx3Ygpb/m2dpnFnxvXdcuAJYStmHKU5AWGWWM+Fm50/fdMqUfNd8MbKhkJt5ihXQmZWMOt7ls4N8i5NZWnS9YSWow8X/ENOEqCRN9TyRkc+pPS0w9DNi0BCsWvSRJOkyvQ6caEnKWlNoywCmM1AlIQD3k4RUgRWe0vqg/UKPpH3Z root@terraform
EOT
target_node = "beaubourg"
vcpus = 0
vlan = -1
+ vmid = 122
disk {
backup = false
cache = "none"
format = "raw"
id = 0
iothread = false
mbps = 0
mbps_rd = 0
mbps_rd_max = 0
mbps_wr = 0
mbps_wr_max = 0
replicate = false
size = "32G"
ssd = false
storage = "proxmox"
storage_type = "ssd"
type = "virtio"
}
network {
bridge = "vmbr443"
firewall = false
id = 0
link_down = false
macaddr = "1E:98:C2:66:BF:33"
model = "virtio"
queues = -1
rate = -1
tag = -1
}
}
# module.scheduler0.proxmox_vm_qemu.node will be updated in-place
~ resource "proxmox_vm_qemu" "node" {
agent = 0
balloon = 1024
bios = "seabios"
boot = "c"
bootdisk = "virtio0"
ciuser = "root"
clone = "template-debian-10"
+ clone_wait = 15
cores = 4
cpu = "host"
desc = "Scheduler api services"
disk_gb = 0
force_create = false
full_clone = false
hotplug = "network,disk,usb"
id = "beaubourg/qemu/116"
ipconfig0 = "ip=192.168.128.4/24,gw=192.168.128.1"
kvm = true
memory = 8192
name = "scheduler0"
nameserver = "192.168.100.29"
numa = false
onboot = true
os_type = "cloud-init"
preprovision = true
qemu_os = "other"
scsihw = "virtio-scsi-pci"
searchdomain = "internal.staging.swh.network"
sockets = 1
ssh_user = "root"
sshkeys = <<~EOT
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDVKCfpeIMg7GS3Pk03ZAcBWAeDZ+AvWk2k/pPY0z8MJ3YAbqZkRtSK7yaDgJV6Gro7nn/TxdJLo2jEzzWvlC8d8AEzhZPy5Z/qfVVjqBTBM4H5+e+TItAHFfaY5+0WvIahxcfsfaq70MWfpJhszAah3ThJ4mqzYaw+dkr42+a7Gx3Ygpb/m2dpnFnxvXdcuAJYStmHKU5AWGWWM+Fm50/fdMqUfNd8MbKhkJt5ihXQmZWMOt7ls4N8i5NZWnS9YSWow8X/ENOEqCRN9TyRkc+pPS0w9DNi0BCsWvSRJOkyvQ6caEnKWlNoywCmM1AlIQD3k4RUgRWe0vqg/UKPpH3Z root@terraform
EOT
target_node = "beaubourg"
vcpus = 0
vlan = -1
+ vmid = 116
disk {
backup = false
cache = "none"
format = "raw"
id = 0
iothread = false
mbps = 0
mbps_rd = 0
mbps_rd_max = 0
mbps_wr = 0
mbps_wr_max = 0
replicate = false
size = "32G"
ssd = false
storage = "proxmox"
storage_type = "ssd"
type = "virtio"
}
network {
bridge = "vmbr443"
firewall = false
id = 0
link_down = false
macaddr = "92:02:7E:D0:B9:36"
model = "virtio"
queues = -1
rate = -1
tag = -1
}
}
# module.vault.proxmox_vm_qemu.node will be updated in-place
~ resource "proxmox_vm_qemu" "node" {
agent = 0
balloon = 1024
bios = "seabios"
boot = "c"
bootdisk = "virtio0"
ciuser = "root"
clone = "template-debian-10"
+ clone_wait = 15
cores = 4
cpu = "host"
desc = "Vault services node"
disk_gb = 0
force_create = false
full_clone = false
hotplug = "network,disk,usb"
id = "beaubourg/qemu/121"
ipconfig0 = "ip=192.168.128.9/24,gw=192.168.128.1"
kvm = true
~ memory = 4096 -> 8192
name = "vault"
nameserver = "192.168.100.29"
numa = false
onboot = true
os_type = "cloud-init"
preprovision = true
qemu_os = "other"
scsihw = "virtio-scsi-pci"
searchdomain = "internal.staging.swh.network"
sockets = 1
ssh_user = "root"
sshkeys = <<~EOT
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDVKCfpeIMg7GS3Pk03ZAcBWAeDZ+AvWk2k/pPY0z8MJ3YAbqZkRtSK7yaDgJV6Gro7nn/TxdJLo2jEzzWvlC8d8AEzhZPy5Z/qfVVjqBTBM4H5+e+TItAHFfaY5+0WvIahxcfsfaq70MWfpJhszAah3ThJ4mqzYaw+dkr42+a7Gx3Ygpb/m2dpnFnxvXdcuAJYStmHKU5AWGWWM+Fm50/fdMqUfNd8MbKhkJt5ihXQmZWMOt7ls4N8i5NZWnS9YSWow8X/ENOEqCRN9TyRkc+pPS0w9DNi0BCsWvSRJOkyvQ6caEnKWlNoywCmM1AlIQD3k4RUgRWe0vqg/UKPpH3Z root@terraform
EOT
target_node = "beaubourg"
vcpus = 0
vlan = -1
+ vmid = 121
disk {
backup = false
cache = "none"
format = "raw"
id = 0
iothread = false
mbps = 0
mbps_rd = 0
mbps_rd_max = 0
mbps_wr = 0
mbps_wr_max = 0
replicate = false
size = "32G"
ssd = false
storage = "proxmox"
storage_type = "ssd"
type = "virtio"
}
network {
bridge = "vmbr443"
firewall = false
id = 0
link_down = false
macaddr = "16:15:1C:79:CB:DB"
model = "virtio"
queues = -1
rate = -1
tag = -1
}
}
# module.webapp.proxmox_vm_qemu.node will be updated in-place
~ resource "proxmox_vm_qemu" "node" {
agent = 0
balloon = 1024
bios = "seabios"
boot = "c"
bootdisk = "virtio0"
ciuser = "root"
clone = "template-debian-10"
+ clone_wait = 15
cores = 4
cpu = "host"
desc = "Archive/Webapp service node"
disk_gb = 0
force_create = false
full_clone = false
hotplug = "network,disk,usb"
id = "branly/qemu/119"
ipconfig0 = "ip=192.168.128.8/24,gw=192.168.128.1"
kvm = true
memory = 16384
name = "webapp"
nameserver = "192.168.100.29"
numa = false
onboot = true
os_type = "cloud-init"
preprovision = true
qemu_os = "other"
scsihw = "virtio-scsi-pci"
searchdomain = "internal.staging.swh.network"
sockets = 1
ssh_user = "root"
sshkeys = <<~EOT
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDVKCfpeIMg7GS3Pk03ZAcBWAeDZ+AvWk2k/pPY0z8MJ3YAbqZkRtSK7yaDgJV6Gro7nn/TxdJLo2jEzzWvlC8d8AEzhZPy5Z/qfVVjqBTBM4H5+e+TItAHFfaY5+0WvIahxcfsfaq70MWfpJhszAah3ThJ4mqzYaw+dkr42+a7Gx3Ygpb/m2dpnFnxvXdcuAJYStmHKU5AWGWWM+Fm50/fdMqUfNd8MbKhkJt5ihXQmZWMOt7ls4N8i5NZWnS9YSWow8X/ENOEqCRN9TyRkc+pPS0w9DNi0BCsWvSRJOkyvQ6caEnKWlNoywCmM1AlIQD3k4RUgRWe0vqg/UKPpH3Z root@terraform
EOT
target_node = "branly"
vcpus = 0
vlan = -1
+ vmid = 119
disk {
backup = false
cache = "none"
format = "raw"
id = 0
iothread = false
mbps = 0
mbps_rd = 0
mbps_rd_max = 0
mbps_wr = 0
mbps_wr_max = 0
replicate = false
size = "32G"
ssd = false
storage = "proxmox"
storage_type = "ssd"
type = "virtio"
}
network {
bridge = "vmbr443"
firewall = false
id = 0
link_down = false
macaddr = "1A:00:39:95:D4:5F"
model = "virtio"
queues = -1
rate = -1
tag = -1
}
}
# module.worker0.proxmox_vm_qemu.node will be updated in-place
~ resource "proxmox_vm_qemu" "node" {
agent = 0
balloon = 1024
bios = "seabios"
boot = "c"
bootdisk = "virtio0"
ciuser = "root"
clone = "template-debian-10"
+ clone_wait = 15
cores = 4
cpu = "host"
desc = "Loader/lister service node"
disk_gb = 0
force_create = false
full_clone = false
hotplug = "network,disk,usb"
id = "beaubourg/qemu/117"
ipconfig0 = "ip=192.168.128.5/24,gw=192.168.128.1"
kvm = true
memory = 12288
name = "worker0"
nameserver = "192.168.100.29"
numa = false
onboot = true
os_type = "cloud-init"
preprovision = true
qemu_os = "other"
scsihw = "virtio-scsi-pci"
searchdomain = "internal.staging.swh.network"
sockets = 1
ssh_user = "root"
sshkeys = <<~EOT
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDVKCfpeIMg7GS3Pk03ZAcBWAeDZ+AvWk2k/pPY0z8MJ3YAbqZkRtSK7yaDgJV6Gro7nn/TxdJLo2jEzzWvlC8d8AEzhZPy5Z/qfVVjqBTBM4H5+e+TItAHFfaY5+0WvIahxcfsfaq70MWfpJhszAah3ThJ4mqzYaw+dkr42+a7Gx3Ygpb/m2dpnFnxvXdcuAJYStmHKU5AWGWWM+Fm50/fdMqUfNd8MbKhkJt5ihXQmZWMOt7ls4N8i5NZWnS9YSWow8X/ENOEqCRN9TyRkc+pPS0w9DNi0BCsWvSRJOkyvQ6caEnKWlNoywCmM1AlIQD3k4RUgRWe0vqg/UKPpH3Z root@terraform
EOT
target_node = "beaubourg"
vcpus = 0
vlan = -1
+ vmid = 117
disk {
backup = false
cache = "none"
format = "raw"
id = 0
iothread = false
mbps = 0
mbps_rd = 0
mbps_rd_max = 0
mbps_wr = 0
mbps_wr_max = 0
replicate = false
size = "32G"
ssd = false
storage = "proxmox"
storage_type = "ssd"
type = "virtio"
}
network {
bridge = "vmbr443"
firewall = false
id = 0
link_down = false
macaddr = "72:D9:03:46:B1:47"
model = "virtio"
queues = -1
rate = -1
tag = -1
}
}
# module.worker1.proxmox_vm_qemu.node will be updated in-place
~ resource "proxmox_vm_qemu" "node" {
agent = 0
balloon = 1024
bios = "seabios"
boot = "c"
bootdisk = "virtio0"
ciuser = "root"
clone = "template-debian-10"
+ clone_wait = 15
cores = 4
cpu = "host"
desc = "Loader/lister service node"
disk_gb = 0
force_create = false
full_clone = false
hotplug = "network,disk,usb"
id = "beaubourg/qemu/118"
ipconfig0 = "ip=192.168.128.6/24,gw=192.168.128.1"
kvm = true
memory = 12288
name = "worker1"
nameserver = "192.168.100.29"
numa = false
onboot = true
os_type = "cloud-init"
preprovision = true
qemu_os = "other"
scsihw = "virtio-scsi-pci"
searchdomain = "internal.staging.swh.network"
sockets = 1
ssh_user = "root"
sshkeys = <<~EOT
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDVKCfpeIMg7GS3Pk03ZAcBWAeDZ+AvWk2k/pPY0z8MJ3YAbqZkRtSK7yaDgJV6Gro7nn/TxdJLo2jEzzWvlC8d8AEzhZPy5Z/qfVVjqBTBM4H5+e+TItAHFfaY5+0WvIahxcfsfaq70MWfpJhszAah3ThJ4mqzYaw+dkr42+a7Gx3Ygpb/m2dpnFnxvXdcuAJYStmHKU5AWGWWM+Fm50/fdMqUfNd8MbKhkJt5ihXQmZWMOt7ls4N8i5NZWnS9YSWow8X/ENOEqCRN9TyRkc+pPS0w9DNi0BCsWvSRJOkyvQ6caEnKWlNoywCmM1AlIQD3k4RUgRWe0vqg/UKPpH3Z root@terraform
EOT
target_node = "beaubourg"
vcpus = 0
vlan = -1
+ vmid = 118
disk {
backup = false
cache = "none"
format = "raw"
id = 0
iothread = false
mbps = 0
mbps_rd = 0
mbps_rd_max = 0
mbps_wr = 0
mbps_wr_max = 0
replicate = false
size = "32G"
ssd = false
storage = "proxmox"
storage_type = "ssd"
type = "virtio"
}
network {
bridge = "vmbr443"
firewall = false
id = 0
link_down = false
macaddr = "D6:A9:6F:02:E3:66"
model = "virtio"
queues = -1
rate = -1
tag = -1
}
}
# module.worker2.proxmox_vm_qemu.node will be updated in-place
~ resource "proxmox_vm_qemu" "node" {
agent = 0
balloon = 1024
bios = "seabios"
boot = "c"
bootdisk = "virtio0"
ciuser = "root"
clone = "template-debian-10"
+ clone_wait = 15
cores = 4
cpu = "host"
desc = "Loader/lister service node"
disk_gb = 0
force_create = false
full_clone = false
hotplug = "network,disk,usb"
id = "branly/qemu/112"
ipconfig0 = "ip=192.168.128.11/24,gw=192.168.128.1"
kvm = true
memory = 12288
name = "worker2"
nameserver = "192.168.100.29"
numa = false
onboot = true
os_type = "cloud-init"
preprovision = true
qemu_os = "other"
scsihw = "virtio-scsi-pci"
searchdomain = "internal.staging.swh.network"
sockets = 1
ssh_user = "root"
sshkeys = <<~EOT
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDVKCfpeIMg7GS3Pk03ZAcBWAeDZ+AvWk2k/pPY0z8MJ3YAbqZkRtSK7yaDgJV6Gro7nn/TxdJLo2jEzzWvlC8d8AEzhZPy5Z/qfVVjqBTBM4H5+e+TItAHFfaY5+0WvIahxcfsfaq70MWfpJhszAah3ThJ4mqzYaw+dkr42+a7Gx3Ygpb/m2dpnFnxvXdcuAJYStmHKU5AWGWWM+Fm50/fdMqUfNd8MbKhkJt5ihXQmZWMOt7ls4N8i5NZWnS9YSWow8X/ENOEqCRN9TyRkc+pPS0w9DNi0BCsWvSRJOkyvQ6caEnKWlNoywCmM1AlIQD3k4RUgRWe0vqg/UKPpH3Z root@terraform
EOT
target_node = "branly"
vcpus = 0
vlan = -1
+ vmid = 112
disk {
backup = false
cache = "none"
format = "raw"
id = 0
iothread = false
mbps = 0
mbps_rd = 0
mbps_rd_max = 0
mbps_wr = 0
mbps_wr_max = 0
replicate = false
size = "32G"
ssd = false
storage = "proxmox"
storage_type = "ssd"
type = "virtio"
}
network {
bridge = "vmbr443"
firewall = false
id = 0
link_down = false
macaddr = "AA:57:27:51:75:18"
model = "virtio"
queues = -1
rate = -1
tag = -1
}
}
Plan: 0 to add, 11 to change, 0 to destroy.
------------------------------------------------------------------------
Note: You didn't specify an "-out" parameter to save this plan, so Terraform
can't guarantee that exactly these actions will be performed if
"terraform apply" is subsequently run.

Event Timeline

ardumont changed the title of this paste from D4071: terraform plan -no-color to D4071 (staging): terraform plan -no-color.Oct 6 2020, 11:17 AM
ardumont edited the content of this paste. (Show Details)