Page Menu
Home
Software Heritage
Search
Configure Global Search
Log In
Files
F9696452
D8155.diff
No One
Temporary
Actions
View File
Edit File
Delete File
View Transforms
Subscribe
Mute Notifications
Award Token
Flag For Later
Size
6 KB
Subscribers
None
D8155.diff
View Options
diff --git a/proxmox/terraform/staging/cluster-graphql.tf b/proxmox/terraform/staging/cluster-graphql.tf
new file mode 100644
--- /dev/null
+++ b/proxmox/terraform/staging/cluster-graphql.tf
@@ -0,0 +1,159 @@
+# This declares terraform manifests to provision vms and register containers within
+# those to a rancher (clusters management service) instance.
+
+# Each software has the following responsibilities:
+# - proxmox: provision vms (with docker dependency)
+# - rancher: installs kube cluster within containers (running on vms)
+
+# Requires (RANCHER_ACCESS_KEY and RANCHER_SECRET_KEY) in your shell environment
+# $ cat ~/.config/terraform/swh/setup.sh
+# ...
+# key_entry=operations/rancher/azure/elastic-loader-lister-keys
+# export RANCHER_ACCESS_KEY=$(swhpass ls $key_entry | head -1 | cut -d: -f1)
+# export RANCHER_SECRET_KEY=$(swhpass ls $key_entry | head -1 | cut -d: -f2)
+
+# Plan:
+# - create cluster with terraform
+# - Create nodes as usual through terraform
+# - Retrieve the registration command (out of the cluster creation step) to provide new
+# node
+
+resource "rancher2_cluster" "cluster-graphql" {
+ name = "cluster-graphql"
+ description = "graphql staging cluster"
+ rke_config {
+ network {
+ plugin = "canal"
+ }
+ }
+}
+
+output "rancher2_cluster_graphql_summary" {
+ sensitive = true
+ value = rancher2_cluster.cluster-graphql.kube_config
+}
+
+output "rancher2_cluster_graphql_command" {
+ sensitive = true
+ value = rancher2_cluster.cluster-graphql.cluster_registration_token[0].node_command
+}
+
+module "graphql-worker0" {
+ source = "../modules/node"
+ vmid = 162
+ template = "debian-bullseye-11.3-zfs-2022-04-21"
+ config = local.config
+ hostname = "graphql-worker0"
+ description = "elastic worker running in rancher cluster"
+ hypervisor = "uffizi"
+ sockets = "1"
+ cores = "4"
+ onboot = true
+ memory = "8192"
+ balloon = "4096"
+
+ networks = [{
+ id = 0
+ ip = "192.168.130.150"
+ gateway = local.config["gateway_ip"]
+ bridge = "vmbr443"
+ }]
+
+ storages = [{
+ storage = "proxmox"
+ size = "20G"
+ }, {
+ storage = "proxmox"
+ size = "50G"
+ }
+ ]
+
+ post_provision_steps = [
+ "systemctl restart docker", # workaround
+ "${rancher2_cluster.cluster-graphql.cluster_registration_token[0].node_command} --etcd --controlplane --worker"
+ ]
+}
+
+output "graphql-worker0_summary" {
+ value = module.graphql-worker0.summary
+}
+
+module "graphql-worker1" {
+ source = "../modules/node"
+ vmid = 163
+ template = "debian-bullseye-11.3-zfs-2022-04-21"
+ config = local.config
+ hostname = "graphql-worker1"
+ description = "graphql worker running in rancher cluster"
+ hypervisor = "uffizi"
+ sockets = "1"
+ cores = "4"
+ onboot = true
+ memory = "8192"
+ balloon = "4096"
+
+ networks = [{
+ id = 0
+ ip = "192.168.130.151"
+ gateway = local.config["gateway_ip"]
+ bridge = "vmbr443"
+ }]
+
+ storages = [{
+ storage = "proxmox"
+ size = "20G"
+ }, {
+ storage = "proxmox"
+ size = "50G"
+ }
+ ]
+
+ post_provision_steps = [
+ "systemctl restart docker", # workaround
+ "${rancher2_cluster.cluster-graphql.cluster_registration_token[0].node_command} --etcd --controlplane --worker"
+ ]
+}
+
+output "graphql-worker1_summary" {
+ value = module.graphql-worker1.summary
+}
+
+module "graphql-worker2" {
+ source = "../modules/node"
+ vmid = 164
+ template = "debian-bullseye-11.3-zfs-2022-04-21"
+ config = local.config
+ hostname = "graphql-worker2"
+ description = "graphql worker running in rancher cluster"
+ hypervisor = "uffizi"
+ sockets = "1"
+ cores = "4"
+ onboot = true
+ memory = "8192"
+ balloon = "4096"
+
+ networks = [{
+ id = 0
+ ip = "192.168.130.152"
+ gateway = local.config["gateway_ip"]
+ bridge = "vmbr443"
+ }]
+
+ storages = [{
+ storage = "proxmox"
+ size = "20G"
+ }, {
+ storage = "proxmox"
+ size = "50G"
+ }
+ ]
+
+ post_provision_steps = [
+ "systemctl restart docker", # workaround
+ "${rancher2_cluster.cluster-graphql.cluster_registration_token[0].node_command} --etcd --controlplane --worker"
+ ]
+}
+
+output "graphql-worker2_summary" {
+ value = module.graphql-worker2.summary
+}
diff --git a/proxmox/terraform/staging/elastic-workers.tf b/proxmox/terraform/staging/elastic-workers.tf
--- a/proxmox/terraform/staging/elastic-workers.tf
+++ b/proxmox/terraform/staging/elastic-workers.tf
@@ -1,22 +1,3 @@
-# This declares terraform manifests to provision vms and register containers within
-# those to a rancher (clusters management service) instance.
-
-# Each software has the following responsibilities:
-# - proxmox: provision vms (with docker dependency)
-# - rancher: installs kube cluster within containers (running on vms)
-
-# Requires (RANCHER_ACCESS_KEY and RANCHER_SECRET_KEY) in your shell environment
-# $ cat ~/.config/terraform/swh/setup.sh
-# ...
-# key_entry=operations/rancher/azure/elastic-loader-lister-keys
-# export RANCHER_ACCESS_KEY=$(swhpass ls $key_entry | head -1 | cut -d: -f1)
-# export RANCHER_SECRET_KEY=$(swhpass ls $key_entry | head -1 | cut -d: -f2)
-provider "rancher2" {
- api_url = "https://rancher.euwest.azure.internal.softwareheritage.org/v3"
- # for now
- insecure = true
-}
-
# Plan:
# - create cluster with terraform
# - Create nodes as usual through terraform
@@ -33,12 +14,12 @@
}
}
-output "rancher2_cluster_summary" {
+output "rancher2_cluster_staging_workers_summary" {
sensitive = true
value = rancher2_cluster.staging-workers.kube_config
}
-output "rancher2_cluster_command" {
+output "rancher2_cluster_staging_worker_command" {
sensitive = true
value = rancher2_cluster.staging-workers.cluster_registration_token[0].node_command
}
diff --git a/proxmox/terraform/staging/rancher-common.tf b/proxmox/terraform/staging/rancher-common.tf
new file mode 100644
--- /dev/null
+++ b/proxmox/terraform/staging/rancher-common.tf
@@ -0,0 +1,19 @@
+# This declares terraform manifests to provision vms and register containers within
+# those to a rancher (clusters management service) instance.
+
+# Each software has the following responsibilities:
+# - proxmox: provision vms (with docker dependency)
+# - rancher: installs kube cluster within containers (running on vms)
+
+# Requires (RANCHER_ACCESS_KEY and RANCHER_SECRET_KEY) in your shell environment
+# $ cat ~/.config/terraform/swh/setup.sh
+# ...
+# key_entry=operations/rancher/azure/elastic-loader-lister-keys
+# export RANCHER_ACCESS_KEY=$(swhpass ls $key_entry | head -1 | cut -d: -f1)
+# export RANCHER_SECRET_KEY=$(swhpass ls $key_entry | head -1 | cut -d: -f2)
+
+provider "rancher2" {
+ api_url = "https://rancher.euwest.azure.internal.softwareheritage.org/v3"
+ # for now
+ insecure = true
+}
File Metadata
Details
Attached
Mime Type
text/plain
Expires
Sun, Aug 17, 8:05 PM (1 d, 2 h ago)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
3220406
Attached To
D8155: Add new staging graphql-worker nodes
Event Timeline
Log In to Comment