Page MenuHomeSoftware Heritage

D4071.id14611.diff
No OneTemporary

D4071.id14611.diff

diff --git a/proxmox/terraform/.gitignore b/proxmox/terraform/.gitignore
new file mode 100644
--- /dev/null
+++ b/proxmox/terraform/.gitignore
@@ -0,0 +1 @@
+archives/
diff --git a/proxmox/terraform/modules/node/main.tf b/proxmox/terraform/modules/node/main.tf
--- a/proxmox/terraform/modules/node/main.tf
+++ b/proxmox/terraform/modules/node/main.tf
@@ -1,7 +1,10 @@
resource "proxmox_vm_qemu" "node" {
name = var.hostname
desc = var.description
+ vmid = var.vmid
+ balloon = 1024
+ full_clone = false
# hypervisor onto which make the vm
target_node = var.hypervisor
@@ -52,7 +55,7 @@
network {
id = 0
model = "virtio"
- bridge = "vmbr0"
+ bridge = "vmbr443"
macaddr = lookup(var.network, "macaddr", "")
}
@@ -74,7 +77,8 @@
ignore_changes = [
bootdisk,
scsihw,
+ network,
+ disk
]
}
}
-
diff --git a/proxmox/terraform/modules/node/variables.tf b/proxmox/terraform/modules/node/variables.tf
--- a/proxmox/terraform/modules/node/variables.tf
+++ b/proxmox/terraform/modules/node/variables.tf
@@ -11,7 +11,7 @@
variable "hypervisor" {
description = "Hypervisor to install the vm to (choice: orsay, hypervisor3, beaubourg)"
type = string
- default = "orsay"
+ default = "beaubourg"
}
variable "template" {
@@ -43,11 +43,18 @@
type = map(string)
}
+
+variable "vmid" {
+ description = "virtual machine id"
+ type = number
+ default = 0
+}
+
variable "storage" {
description = "Storage disk location and size in the hypervisor storage"
type = map(string)
default = {
- location = "orsay-ssd-2018"
+ location = "proxmox"
size = "32G"
}
}
@@ -56,4 +63,3 @@
description = "Local config to avoid duplication from the main module"
type = map(string)
}
-
diff --git a/proxmox/terraform/staging.tf b/proxmox/terraform/staging.tf
--- a/proxmox/terraform/staging.tf
+++ b/proxmox/terraform/staging.tf
@@ -5,7 +5,7 @@
provider "proxmox" {
pm_tls_insecure = true
- pm_api_url = "https://orsay.internal.softwareheritage.org:8006/api2/json"
+ pm_api_url = "https://beaubourg.internal.softwareheritage.org:8006/api2/json"
# in a shell (see README): source ./setup.sh
}
@@ -30,11 +30,15 @@
# - gateway define 2 network interfaces
# - provisioning step is more complex
resource "proxmox_vm_qemu" "gateway" {
+ balloon = 0
+ full_clone = false
+ vmid = 109
+
name = "gateway"
desc = "staging gateway node"
# hypervisor onto which make the vm
- target_node = "orsay"
+ target_node = "beaubourg"
# See init-template.md to see the template vm bootstrap
clone = "template-debian-10"
@@ -81,7 +85,7 @@
disk {
id = 0
type = "virtio"
- storage = "orsay-ssd-2018"
+ storage = "proxmox"
storage_type = "ssd"
size = "20G"
}
@@ -94,7 +98,7 @@
network {
id = 1
model = "virtio"
- bridge = "vmbr0"
+ bridge = "vmbr443"
macaddr = "FE:95:CC:A5:EB:43"
}
@@ -114,32 +118,112 @@
ignore_changes = [
bootdisk,
scsihw,
+ network,
+ disk
]
}
}
-module "storage0" {
- source = "./modules/node"
- config = local.config
+resource "proxmox_vm_qemu" "storage0" {
+ balloon = 0
+ full_clone = false
+ vmid = 114
- hostname = "storage0"
- description = "swh storage services"
- cores = "4"
- memory = "8192"
- network = {
- ip = "192.168.128.2"
+ name = "storage0"
+ desc = "swh storage services"
+
+ # hypervisor onto which make the vm
+ target_node = "orsay"
+
+ # See init-template.md to see the template vm bootstrap
+ clone = "template-debian-10"
+
+ # linux kernel 2.6
+ qemu_os = "l26"
+
+ # generic setup
+ sockets = 1
+ cores = 4
+ memory = 8192
+
+ boot = "c"
+
+ # boot machine when hypervirsor starts
+ onboot = true
+
+ #### cloud-init setup
+ # to actually set some information per os_type (values: ubuntu, centos,
+ # cloud-init). Keep this as cloud-init
+ os_type = "cloud-init"
+
+ # ciuser - User name to change ssh keys and password for instead of the
+ # image’s configured default user.
+ ciuser = var.user_admin
+ ssh_user = var.user_admin
+
+ # searchdomain - Sets DNS search domains for a container.
+ searchdomain = var.domain
+
+ # nameserver - Sets DNS server IP address for a container.
+ nameserver = var.dns
+
+ # sshkeys - public ssh keys, one per line
+ sshkeys = var.user_admin_ssh_public_key
+
+ # ip to communicate for now with the prod network through louvre
+ ipconfig0 = "ip=192.168.128.2/24,gw=192.168.100.1"
+
+ disk {
+ id = 0
+ type = "virtio"
+ storage = "orsay-ssd-2018"
+ storage_type = "ssd"
+ size = "32G"
+ }
+ disk {
+ id = 1
+ type = "virtio"
+ storage = "orsay-ssd-2018"
+ storage_type = "ssd"
+ size = "512G"
+ }
+
+ network {
+ id = 0
+ model = "virtio"
+ bridge = "vmbr443"
macaddr = "CA:73:7F:ED:F9:01"
}
-}
-output "storage0_summary" {
- value = module.storage0.summary
+ # Delegate to puppet at the end of the provisioning the software setup
+ provisioner "remote-exec" {
+ inline = [
+ "sed -i 's/127.0.1.1/192.168.128.2}/g' /etc/hosts",
+ "puppet agent --server ${var.puppet_master} --environment=${var.puppet_environment} --waitforcert 60 --test || echo 'Node provisionned!'",
+ ]
+ connection {
+ type = "ssh"
+ user = "root"
+ host = "192.168.128.2"
+ }
+ }
+
+ lifecycle {
+ ignore_changes = [
+ bootdisk,
+ scsihw,
+ network,
+ disk
+ ]
+ }
}
module "db0" {
source = "./modules/node"
config = local.config
+ hypervisor = "orsay"
+ vmid = 115
hostname = "db0"
description = "Node to host storage/indexer/scheduler dbs"
cores = "4"
@@ -150,8 +234,9 @@
}
storage = {
location = "orsay-ssd-2018"
- size = "100G"
+ size = "400G"
}
+
}
output "db0_summary" {
@@ -162,10 +247,11 @@
source = "./modules/node"
config = local.config
+ vmid = 116
hostname = "scheduler0"
description = "Scheduler api services"
cores = "4"
- memory = "16384"
+ memory = "8192"
network = {
ip = "192.168.128.4"
macaddr = "92:02:7E:D0:B9:36"
@@ -180,10 +266,11 @@
source = "./modules/node"
config = local.config
+ vmid = 117
hostname = "worker0"
description = "Loader/lister service node"
cores = "4"
- memory = "16384"
+ memory = "12288"
network = {
ip = "192.168.128.5"
macaddr = "72:D9:03:46:B1:47"
@@ -198,10 +285,11 @@
source = "./modules/node"
config = local.config
+ vmid = 118
hostname = "worker1"
description = "Loader/lister service node"
cores = "4"
- memory = "16384"
+ memory = "12288"
network = {
ip = "192.168.128.6"
macaddr = "D6:A9:6F:02:E3:66"
@@ -215,7 +303,9 @@
module "webapp" {
source = "./modules/node"
config = local.config
+ hypervisor = "branly"
+ vmid = 119
hostname = "webapp"
description = "Archive/Webapp service node"
cores = "4"
@@ -234,10 +324,11 @@
source = "./modules/node"
config = local.config
+ vmid = 120
hostname = "deposit"
description = "Deposit service node"
cores = "4"
- memory = "16384"
+ memory = "8192"
network = {
ip = "192.168.128.7"
macaddr = "9E:81:DD:58:15:3B"
@@ -252,10 +343,11 @@
source = "./modules/node"
config = local.config
+ vmid = 121
hostname = "vault"
description = "Vault services node"
cores = "4"
- memory = "16384"
+ memory = "8192"
network = {
ip = "192.168.128.9"
macaddr = "16:15:1C:79:CB:DB"
@@ -270,10 +362,11 @@
source = "./modules/node"
config = local.config
+ vmid = 122
hostname = "journal0"
description = "Journal services node"
cores = "4"
- memory = "16384"
+ memory = "12288"
network = {
ip = "192.168.128.10"
macaddr = "1E:98:C2:66:BF:33"
@@ -287,11 +380,13 @@
module "worker2" {
source = "./modules/node"
config = local.config
+ hypervisor = "branly"
+ vmid = 112
hostname = "worker2"
description = "Loader/lister service node"
cores = "4"
- memory = "16384"
+ memory = "12288"
network = {
ip = "192.168.128.11"
macaddr = "AA:57:27:51:75:18"
@@ -301,4 +396,3 @@
output "worker2_summary" {
value = module.worker2.summary
}
-

File Metadata

Mime Type
text/plain
Expires
Jul 3 2025, 8:09 AM (10 w, 17 h ago)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
3235020

Event Timeline