Related to T4144
Details
Details
- Reviewers
vsellier - Group Reviewers
System administrators - Maniphest Tasks
- T4144: Elastic worker infrastructure
- Commits
- rSPRE1a611a081b74: rancher: Bootstrap rancher declarations
(it wants to destroy test-rke 'cause it was a test, it's fine).
$ terraform apply rancher2_cluster.test-rke: Refreshing state... [id=c-dqlbs] module.search-esnode0.proxmox_vm_qemu.node: Refreshing state... [id=pompidou/qemu/130] module.scheduler0.proxmox_vm_qemu.node: Refreshing state... [id=pompidou/qemu/116] module.search0.proxmox_vm_qemu.node: Refreshing state... [id=pompidou/qemu/131] module.worker3.proxmox_vm_qemu.node: Refreshing state... [id=pompidou/qemu/137] module.worker1.proxmox_vm_qemu.node: Refreshing state... [id=pompidou/qemu/118] module.rp0.proxmox_vm_qemu.node: Refreshing state... [id=pompidou/qemu/129] module.webapp.proxmox_vm_qemu.node: Refreshing state... [id=pompidou/qemu/119] module.counters0.proxmox_vm_qemu.node: Refreshing state... [id=pompidou/qemu/138] module.worker0.proxmox_vm_qemu.node: Refreshing state... [id=pompidou/qemu/117] module.scrubber0.proxmox_vm_qemu.node: Refreshing state... [id=pompidou/qemu/142] module.mirror-test.proxmox_vm_qemu.node: Refreshing state... [id=uffizi/qemu/132] module.objstorage0.proxmox_vm_qemu.node: Refreshing state... [id=pompidou/qemu/102] module.worker2.proxmox_vm_qemu.node: Refreshing state... [id=pompidou/qemu/112] module.deposit.proxmox_vm_qemu.node: Refreshing state... [id=pompidou/qemu/120] module.maven-exporter0.proxmox_vm_qemu.node: Refreshing state... [id=pompidou/qemu/122] module.vault.proxmox_vm_qemu.node: Refreshing state... [id=pompidou/qemu/121] Terraform used the selected providers to generate the following execution plan. Resource actions are indicated with the following symbols: + create - destroy Terraform will perform the following actions: # rancher2_cluster.staging-workers will be created + resource "rancher2_cluster" "staging-workers" { + annotations = (known after apply) + ca_cert = (sensitive value) + cluster_registration_token = (known after apply) + default_pod_security_policy_template_id = (known after apply) + default_project_id = (known after apply) + description = "staging workers cluster" + desired_agent_image = (known after apply) + desired_auth_image = (known after apply) + docker_root_dir = (known after apply) + driver = (known after apply) + enable_cluster_alerting = (known after apply) + enable_cluster_istio = (known after apply) + enable_cluster_monitoring = (known after apply) + enable_network_policy = (known after apply) + fleet_workspace_name = (known after apply) + id = (known after apply) + istio_enabled = (known after apply) + kube_config = (sensitive value) + labels = (known after apply) + name = "staging-workers" + system_project_id = (known after apply) + windows_prefered_cluster = false + cluster_auth_endpoint { + ca_certs = (known after apply) + enabled = (known after apply) + fqdn = (known after apply) } + cluster_template_answers { + cluster_id = (known after apply) + project_id = (known after apply) + values = (known after apply) } + cluster_template_questions { + default = (known after apply) + required = (known after apply) + type = (known after apply) + variable = (known after apply) } + eks_config_v2 { + cloud_credential_id = (known after apply) + imported = (known after apply) + kms_key = (known after apply) + kubernetes_version = (known after apply) + logging_types = (known after apply) + name = (known after apply) + private_access = (known after apply) + public_access = (known after apply) + public_access_sources = (known after apply) + region = (known after apply) + secrets_encryption = (known after apply) + security_groups = (known after apply) + service_role = (known after apply) + subnets = (known after apply) + tags = (known after apply) + node_groups { + desired_size = (known after apply) + disk_size = (known after apply) + ec2_ssh_key = (known after apply) + gpu = (known after apply) + image_id = (known after apply) + instance_type = (known after apply) + labels = (known after apply) + max_size = (known after apply) + min_size = (known after apply) + name = (known after apply) + request_spot_instances = (known after apply) + resource_tags = (known after apply) + spot_instance_types = (known after apply) + subnets = (known after apply) + tags = (known after apply) + user_data = (known after apply) + version = (known after apply) + launch_template { + id = (known after apply) + name = (known after apply) + version = (known after apply) } } } + k3s_config { + version = (known after apply) + upgrade_strategy { + drain_server_nodes = (known after apply) + drain_worker_nodes = (known after apply) + server_concurrency = (known after apply) + worker_concurrency = (known after apply) } } + rke2_config { + version = (known after apply) + upgrade_strategy { + drain_server_nodes = (known after apply) + drain_worker_nodes = (known after apply) + server_concurrency = (known after apply) + worker_concurrency = (known after apply) } } + rke_config { + addon_job_timeout = (known after apply) + enable_cri_dockerd = false + ignore_docker_version = true + kubernetes_version = (known after apply) + prefix_path = (known after apply) + ssh_agent_auth = false + ssh_cert_path = (known after apply) + ssh_key_path = (known after apply) + win_prefix_path = (known after apply) + authentication { + sans = (known after apply) + strategy = (known after apply) } + authorization { + mode = (known after apply) + options = (known after apply) } + bastion_host { + address = (known after apply) + port = (known after apply) + ssh_agent_auth = (known after apply) + ssh_key = (sensitive value) + ssh_key_path = (known after apply) + user = (known after apply) } + cloud_provider { + custom_cloud_provider = (known after apply) + name = (known after apply) + aws_cloud_provider { + global { + disable_security_group_ingress = (known after apply) + disable_strict_zone_check = (known after apply) + elb_security_group = (known after apply) + kubernetes_cluster_id = (known after apply) + kubernetes_cluster_tag = (known after apply) + role_arn = (known after apply) + route_table_id = (known after apply) + subnet_id = (known after apply) + vpc = (known after apply) + zone = (known after apply) } + service_override { + region = (known after apply) + service = (known after apply) + signing_method = (known after apply) + signing_name = (known after apply) + signing_region = (known after apply) + url = (known after apply) } } + azure_cloud_provider { + aad_client_cert_password = (sensitive value) + aad_client_cert_path = (known after apply) + aad_client_id = (sensitive value) + aad_client_secret = (sensitive value) + cloud = (known after apply) + cloud_provider_backoff = (known after apply) + cloud_provider_backoff_duration = (known after apply) + cloud_provider_backoff_exponent = (known after apply) + cloud_provider_backoff_jitter = (known after apply) + cloud_provider_backoff_retries = (known after apply) + cloud_provider_rate_limit = (known after apply) + cloud_provider_rate_limit_bucket = (known after apply) + cloud_provider_rate_limit_qps = (known after apply) + load_balancer_sku = (known after apply) + location = (known after apply) + maximum_load_balancer_rule_count = (known after apply) + primary_availability_set_name = (known after apply) + primary_scale_set_name = (known after apply) + resource_group = (known after apply) + route_table_name = (known after apply) + security_group_name = (known after apply) + subnet_name = (known after apply) + subscription_id = (sensitive value) + tenant_id = (sensitive value) + use_instance_metadata = (known after apply) + use_managed_identity_extension = (known after apply) + vm_type = (known after apply) + vnet_name = (known after apply) + vnet_resource_group = (known after apply) } + openstack_cloud_provider { + block_storage { + bs_version = (known after apply) + ignore_volume_az = (known after apply) + trust_device_path = (known after apply) } + global { + auth_url = (known after apply) + ca_file = (known after apply) + domain_id = (sensitive value) + domain_name = (known after apply) + password = (sensitive value) + region = (known after apply) + tenant_id = (sensitive value) + tenant_name = (known after apply) + trust_id = (sensitive value) + username = (sensitive value) } + load_balancer { + create_monitor = (known after apply) + floating_network_id = (known after apply) + lb_method = (known after apply) + lb_provider = (known after apply) + lb_version = (known after apply) + manage_security_groups = (known after apply) + monitor_delay = (known after apply) + monitor_max_retries = (known after apply) + monitor_timeout = (known after apply) + subnet_id = (known after apply) + use_octavia = (known after apply) } + metadata { + request_timeout = (known after apply) + search_order = (known after apply) } + route { + router_id = (known after apply) } } + vsphere_cloud_provider { + disk { + scsi_controller_type = (known after apply) } + global { + datacenters = (known after apply) + insecure_flag = (known after apply) + password = (sensitive value) + port = (known after apply) + soap_roundtrip_count = (known after apply) + user = (sensitive value) } + network { + public_network = (known after apply) } + virtual_center { + datacenters = (known after apply) + name = (known after apply) + password = (sensitive value) + port = (known after apply) + soap_roundtrip_count = (known after apply) + user = (sensitive value) } + workspace { + datacenter = (known after apply) + default_datastore = (known after apply) + folder = (known after apply) + resourcepool_path = (known after apply) + server = (known after apply) } } } + dns { + node_selector = (known after apply) + options = (known after apply) + provider = (known after apply) + reverse_cidrs = (known after apply) + upstream_nameservers = (known after apply) + linear_autoscaler_params { + cores_per_replica = (known after apply) + max = (known after apply) + min = (known after apply) + nodes_per_replica = (known after apply) + prevent_single_point_failure = (known after apply) } + nodelocal { + ip_address = (known after apply) + node_selector = (known after apply) } + tolerations { + effect = (known after apply) + key = (known after apply) + operator = (known after apply) + seconds = (known after apply) + value = (known after apply) } + update_strategy { + strategy = (known after apply) + rolling_update { + max_surge = (known after apply) + max_unavailable = (known after apply) } } } + ingress { + default_backend = (known after apply) + dns_policy = (known after apply) + extra_args = (known after apply) + http_port = (known after apply) + https_port = (known after apply) + network_mode = (known after apply) + node_selector = (known after apply) + options = (known after apply) + provider = (known after apply) + tolerations { + effect = (known after apply) + key = (known after apply) + operator = (known after apply) + seconds = (known after apply) + value = (known after apply) } + update_strategy { + strategy = (known after apply) + rolling_update { + max_unavailable = (known after apply) } } } + monitoring { + node_selector = (known after apply) + options = (known after apply) + provider = (known after apply) + replicas = (known after apply) + tolerations { + effect = (known after apply) + key = (known after apply) + operator = (known after apply) + seconds = (known after apply) + value = (known after apply) } + update_strategy { + strategy = (known after apply) + rolling_update { + max_surge = (known after apply) + max_unavailable = (known after apply) } } } + network { + mtu = 0 + options = (known after apply) + plugin = "canal" } + services { + etcd { + ca_cert = (known after apply) + cert = (sensitive value) + creation = (known after apply) + external_urls = (known after apply) + extra_args = (known after apply) + extra_binds = (known after apply) + extra_env = (known after apply) + gid = (known after apply) + image = (known after apply) + key = (sensitive value) + path = (known after apply) + retention = (known after apply) + snapshot = (known after apply) + uid = (known after apply) + backup_config { + enabled = (known after apply) + interval_hours = (known after apply) + retention = (known after apply) + safe_timestamp = (known after apply) + timeout = (known after apply) + s3_backup_config { + access_key = (sensitive value) + bucket_name = (known after apply) + custom_ca = (known after apply) + endpoint = (known after apply) + folder = (known after apply) + region = (known after apply) + secret_key = (sensitive value) } } } + kube_api { + admission_configuration = (known after apply) + always_pull_images = (known after apply) + extra_args = (known after apply) + extra_binds = (known after apply) + extra_env = (known after apply) + image = (known after apply) + pod_security_policy = (known after apply) + service_cluster_ip_range = (known after apply) + service_node_port_range = (known after apply) + audit_log { + enabled = (known after apply) + configuration { + format = (known after apply) + max_age = (known after apply) + max_backup = (known after apply) + max_size = (known after apply) + path = (known after apply) + policy = (known after apply) } } + event_rate_limit { + configuration = (known after apply) + enabled = (known after apply) } + secrets_encryption_config { + custom_config = (known after apply) + enabled = (known after apply) } } + kube_controller { + cluster_cidr = (known after apply) + extra_args = (known after apply) + extra_binds = (known after apply) + extra_env = (known after apply) + image = (known after apply) + service_cluster_ip_range = (known after apply) } + kubelet { + cluster_dns_server = (known after apply) + cluster_domain = (known after apply) + extra_args = (known after apply) + extra_binds = (known after apply) + extra_env = (known after apply) + fail_swap_on = (known after apply) + generate_serving_certificate = (known after apply) + image = (known after apply) + infra_container_image = (known after apply) } + kubeproxy { + extra_args = (known after apply) + extra_binds = (known after apply) + extra_env = (known after apply) + image = (known after apply) } + scheduler { + extra_args = (known after apply) + extra_binds = (known after apply) + extra_env = (known after apply) + image = (known after apply) } } + upgrade_strategy { + drain = (known after apply) + max_unavailable_controlplane = (known after apply) + max_unavailable_worker = (known after apply) + drain_input { + delete_local_data = (known after apply) + force = (known after apply) + grace_period = (known after apply) + ignore_daemon_sets = (known after apply) + timeout = (known after apply) } } } + scheduled_cluster_scan { + enabled = (known after apply) + scan_config { + cis_scan_config { + debug_master = (known after apply) + debug_worker = (known after apply) + override_benchmark_version = (known after apply) + override_skip = (known after apply) + profile = (known after apply) } } + schedule_config { + cron_schedule = (known after apply) + retention = (known after apply) } } } # rancher2_cluster.test-rke will be destroyed # (because rancher2_cluster.test-rke is not in configuration) - resource "rancher2_cluster" "test-rke" { - annotations = { - "authz.management.cattle.io/creator-role-bindings" = jsonencode( { - created = [ - "cluster-owner", ] - required = [ - "cluster-owner", ] } ) - "lifecycle.cattle.io/create.cluster-agent-controller-cleanup" = "true" - "lifecycle.cattle.io/create.cluster-scoped-gc" = "true" - "lifecycle.cattle.io/create.mgmt-cluster-rbac-remove" = "true" } -> null - cluster_registration_token = [ - { - annotations = {} - cluster_id = "c-dqlbs" - command = "kubectl apply -f https://rancher.euwest.azure.internal.softwareheritage.org/v3/import/z284ctbtl4kqfbqhgm2k9g6b5chrpwkz29ljjt5xb2m2bqcp5p4st2_c-dqlbs.yaml" - id = "c-dqlbs:default-token" - insecure_command = "curl --insecure -sfL https://rancher.euwest.azure.internal.softwareheritage.org/v3/import/z284ctbtl4kqfbqhgm2k9g6b5chrpwkz29ljjt5xb2m2bqcp5p4st2_c-dqlbs.yaml | kubectl apply -f -" - insecure_node_command = "" - insecure_windows_node_command = "" - labels = {} - manifest_url = "https://rancher.euwest.azure.internal.softwareheritage.org/v3/import/z284ctbtl4kqfbqhgm2k9g6b5chrpwkz29ljjt5xb2m2bqcp5p4st2_c-dqlbs.yaml" - name = "default-token" - node_command = "sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent:v2.6.4 --server https://rancher.euwest.azure.internal.softwareheritage.org --token z284ctbtl4kqfbqhgm2k9g6b5chrpwkz29ljjt5xb2m2bqcp5p4st2 --ca-checksum 8850dd89eb7f29a70c0d50a2b389bf1950808a9dff4062c66ab806b80b988bac" - token = "z284ctbtl4kqfbqhgm2k9g6b5chrpwkz29ljjt5xb2m2bqcp5p4st2" - windows_node_command = "PowerShell -NoLogo -NonInteractive -Command \"& {docker run -v c:\\:c:\\host rancher/rancher-agent:v2.6.4 bootstrap --server https://rancher.euwest.azure.internal.softwareheritage.org --token z284ctbtl4kqfbqhgm2k9g6b5chrpwkz29ljjt5xb2m2bqcp5p4st2 --ca-checksum 8850dd89eb7f29a70c0d50a2b389bf1950808a9dff4062c66ab806b80b988bac | iex}\"" }, ] -> null - default_project_id = "c-dqlbs:p-shtmm" -> null - description = "Test rke" -> null - docker_root_dir = "/var/lib/docker" -> null - driver = "rancherKubernetesEngine" -> null - enable_cluster_alerting = false -> null - enable_cluster_monitoring = false -> null - enable_network_policy = false -> null - fleet_workspace_name = "fleet-default" -> null - id = "c-dqlbs" -> null - istio_enabled = false -> null - kube_config = (sensitive value) - labels = { - "cattle.io/creator" = "norman" } -> null - name = "test-rke" -> null - system_project_id = "c-dqlbs:p-tbq4f" -> null - windows_prefered_cluster = false -> null - cluster_auth_endpoint { - enabled = false -> null } - rke_config { - addon_job_timeout = 45 -> null - addons_include = [] -> null - enable_cri_dockerd = false -> null - ignore_docker_version = true -> null - kubernetes_version = "v1.22.7-rancher1-2" -> null - ssh_agent_auth = false -> null - authentication { - sans = [] -> null - strategy = "x509" -> null } - authorization {} - bastion_host { - ssh_agent_auth = false -> null } - cloud_provider { } - ingress { - default_backend = true -> null - extra_args = {} -> null - http_port = 0 -> null - https_port = 0 -> null - node_selector = {} -> null - options = {} -> null - provider = "nginx" -> null } - monitoring { - node_selector = {} -> null - options = {} -> null - provider = "metrics-server" -> null - replicas = 1 -> null } - network { - mtu = 0 -> null - options = {} -> null - plugin = "canal" -> null } - services { - etcd { - creation = "12h" -> null - external_urls = [] -> null - extra_args = { - "election-timeout" = "5000" - "heartbeat-interval" = "500" } -> null - extra_binds = [] -> null - extra_env = [] -> null - gid = 0 -> null - retention = "72h" -> null - snapshot = false -> null - uid = 0 -> null - backup_config { - enabled = true -> null - interval_hours = 12 -> null - retention = 6 -> null - safe_timestamp = false -> null - timeout = 300 -> null } } - kube_api { - admission_configuration = {} -> null - always_pull_images = false -> null - extra_args = {} -> null - extra_binds = [] -> null - extra_env = [] -> null - pod_security_policy = false -> null - service_node_port_range = "30000-32767" -> null } - kube_controller {} - kubelet { - extra_args = {} -> null - extra_binds = [] -> null - extra_env = [] -> null - fail_swap_on = false -> null - generate_serving_certificate = false -> null } - kubeproxy {} - scheduler {} } - upgrade_strategy { - drain = false -> null - max_unavailable_controlplane = "1" -> null - max_unavailable_worker = "10%" -> null - drain_input { - delete_local_data = false -> null - force = false -> null - grace_period = -1 -> null - ignore_daemon_sets = true -> null - timeout = 120 -> null } } } } Plan: 1 to add, 0 to change, 1 to destroy. Changes to Outputs: ~ rancher2_cluster_command = "sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent:v2.6.4 --server https://rancher.euwest.azure.internal.softwareheritage.org --token z284ctbtl4kqfbqhgm2k9g6b5chrpwkz29ljjt5xb2m2bqcp5p4st2 --ca-checksum 8850dd89eb7f29a70c0d50a2b389bf1950808a9dff4062c66ab806b80b988bac" -> (known after apply) # Warning: this attribute value will be marked as sensitive and will not # display in UI output after applying this change. ~ rancher2_cluster_summary = (sensitive value) Do you want to perform these actions? Terraform will perform the actions described above. Only 'yes' will be accepted to approve. Enter a value: yes rancher2_cluster.test-rke: Destroying... [id=c-dqlbs] rancher2_cluster.staging-workers: Creating... rancher2_cluster.staging-workers: Creation complete after 4s [id=c-t85mz] rancher2_cluster.test-rke: Still destroying... [id=c-dqlbs, 10s elapsed] rancher2_cluster.test-rke: Still destroying... [id=c-dqlbs, 20s elapsed] rancher2_cluster.test-rke: Destruction complete after 21s Apply complete! Resources: 1 added, 0 changed, 1 destroyed. Outputs: counters0_summary = <<EOT hostname: counters0 fqdn: counters0.internal.staging.swh.network network: ip=192.168.130.95/24,gw=192.168.130.1 macaddrs=E2:6E:12:C7:3E:A4 vmid: 138 EOT deposit_summary = <<EOT hostname: deposit fqdn: deposit.internal.staging.swh.network network: ip=192.168.130.31/24,gw=192.168.130.1 macaddrs=9E:81:DD:58:15:3B vmid: 120 EOT maven-exporter0_summary = <<EOT hostname: maven-exporter0 fqdn: maven-exporter0.internal.staging.swh.network network: ip=192.168.130.70/24,gw=192.168.130.1 macaddrs=36:86:F6:F9:2A:5D vmid: 122 EOT mirror-tests_summary = <<EOT hostname: mirror-test fqdn: mirror-test.internal.staging.swh.network network: ip=192.168.130.160/24,gw=192.168.130.1 macaddrs=E6:3C:8A:B7:26:5D vmid: 132 EOT objstorage0_summary = <<EOT hostname: objstorage0 fqdn: objstorage0.internal.staging.swh.network network: ip=192.168.130.110/24,gw=192.168.130.1 macaddrs=5E:28:EA:7D:50:0D vmid: 102 EOT rancher2_cluster_command = "sudo docker run -d --privileged --restart=unless-stopped --net=host -v /etc/kubernetes:/etc/kubernetes -v /var/run:/var/run rancher/rancher-agent:v2.6.4 --server https://rancher.euwest.azure.internal.softwareheritage.org --token zbtc5cl2l6qfp6jrwmw6jqztmtq6kbdv5m8jk2w7qktq8gb9knb6zq --ca-checksum 8850dd89eb7f29a70c0d50a2b389bf1950808a9dff4062c66ab806b80b988bac" rancher2_cluster_summary = <sensitive> rp0_summary = <<EOT hostname: rp0 fqdn: rp0.internal.staging.swh.network network: ip=192.168.130.20/24,gw=192.168.130.1 macaddrs=4A:80:47:5D:DF:73 vmid: 129 EOT scheduler0_summary = <<EOT hostname: scheduler0 fqdn: scheduler0.internal.staging.swh.network network: ip=192.168.130.50/24,gw=192.168.130.1 macaddrs=92:02:7E:D0:B9:36 vmid: 116 EOT scrubber0_summary = <<EOT hostname: scrubber0 fqdn: scrubber0.internal.staging.swh.network network: ip=192.168.130.120/24,gw=192.168.130.1 macaddrs=86:09:0A:61:AB:C1 vmid: 142 EOT search-esnode0_summary = <<EOT hostname: search-esnode0 fqdn: search-esnode0.internal.staging.swh.network network: ip=192.168.130.80/24,gw=192.168.130.1 macaddrs=96:74:49:BD:B5:08 vmid: 130 EOT search0_summary = <<EOT hostname: search0 fqdn: search0.internal.staging.swh.network network: ip=192.168.130.90/24,gw=192.168.130.1 macaddrs=EE:FA:76:55:CF:99 vmid: 131 EOT vault_summary = <<EOT hostname: vault fqdn: vault.internal.staging.swh.network network: ip=192.168.130.60/24,gw=192.168.130.1 macaddrs=16:15:1C:79:CB:DB vmid: 121 EOT webapp_summary = <<EOT hostname: webapp fqdn: webapp.internal.staging.swh.network network: ip=192.168.130.30/24,gw=192.168.130.1 macaddrs=1A:00:39:95:D4:5F vmid: 119 EOT worker0_summary = <<EOT hostname: worker0 fqdn: worker0.internal.staging.swh.network network: ip=192.168.130.100/24,gw=192.168.130.1 macaddrs=72:D9:03:46:B1:47 vmid: 117 EOT worker1_summary = <<EOT hostname: worker1 fqdn: worker1.internal.staging.swh.network network: ip=192.168.130.101/24,gw=192.168.130.1 macaddrs=D6:A9:6F:02:E3:66 vmid: 118 EOT worker2_summary = <<EOT hostname: worker2 fqdn: worker2.internal.staging.swh.network network: ip=192.168.130.102/24,gw=192.168.130.1 macaddrs=AA:57:27:51:75:18 vmid: 112 EOT worker3_summary = <<EOT hostname: worker3 fqdn: worker3.internal.staging.swh.network network: ip=192.168.130.103/24,gw=192.168.130.1 macaddrs=1A:F8:1A:2C:12:E1 vmid: 137 EOT
And check that the cluster is indeed created within the rancher instance.
Diff Detail
Diff Detail
- Repository
- rSPRE sysadm-provisioning
- Branch
- elastic-workers
- Lint
Lint Skipped - Unit
Unit Tests Skipped - Build Status
Buildable 28584 Build 44681: arc lint + arc unit