This module allows managing Internal HTTP/HTTPS Load Balancers (L7 ILBs). It's designed to expose the full configuration of the underlying resources, and to facilitate common usage patterns by providing sensible defaults, and optionally managing prerequisite resources like health checks, instance groups, etc.
Due to the complexity of the underlying resources, changes to the configuration that involve recreation of resources are best applied in stages, starting by disabling the configuration in the urlmap that references the resources that need recreation, then doing the same for the backend service, etc.
- Examples
- Deploying changes to load balancer configurations
- Files
- Variables
- Outputs
- Fixtures
An HTTP ILB with a backend service pointing to a GCE instance group:
module "ilb-l7" {
source = "./fabric/modules/net-lb-app-int"
name = "ilb-test"
project_id = "$project_ids:myprj"
region = "$locations:ew1"
context = {
locations = { ew1 = "europe-west1" }
networks = { myvpc = "projects/myprj/global/networks/myvpc" }
project_ids = { myprj = "myprj" }
subnets = { mysubnet = "projects/myprj/regions/europe-west1/subnetworks/mysubnet" }
}
backend_service_configs = {
default = {
backends = [{
group = "projects/myprj/zones/europe-west1-a/instanceGroups/my-ig"
}]
}
}
vpc_config = {
network = "$networks:myvpc"
subnetwork = "$subnets:mysubnet"
}
}
# tftest modules=1 resources=5An HTTPS ILB needs a few additional fields:
module "ilb-l7" {
source = "./fabric/modules/net-lb-app-int"
name = "ilb-test"
project_id = var.project_id
region = "europe-west1"
backend_service_configs = {
default = {
backends = [{
group = "projects/myprj/zones/europe-west1-a/instanceGroups/my-ig"
}]
}
}
protocol = "HTTPS"
ssl_certificates = {
certificate_ids = [
"projects/myprj/regions/europe-west1/sslCertificates/my-cert"
]
}
vpc_config = {
network = var.vpc.self_link
subnetwork = var.subnet.self_link
}
}
# tftest modules=1 resources=5When using Shared VPC, this module also allows configuring cross-project backend services:
module "ilb-l7" {
source = "./fabric/modules/net-lb-app-int"
name = "ilb-test"
project_id = "prj-host"
region = "europe-west1"
backend_service_configs = {
default = {
project_id = "prj-svc"
backends = [{
group = "projects/prj-svc/zones/europe-west1-a/instanceGroups/my-ig"
}]
}
}
health_check_configs = {
default = {
project_id = "prj-svc"
http = {
port_specification = "USE_SERVING_PORT"
}
}
}
vpc_config = {
network = var.vpc.self_link
subnetwork = var.subnet.self_link
}
}
# tftest modules=1 resources=5You can leverage externally defined health checks for backend services, or have the module create them for you. By default a simple HTTP health check is created, and used in backend services.
Health check configuration is controlled via the health_check_configs variable, which behaves in a similar way to other LB modules in this repository.
Defining different health checks from the default is very easy. You can for example replace the default HTTP health check with a TCP one and reference it in you backend service:
module "ilb-l7" {
source = "./fabric/modules/net-lb-app-int"
name = "ilb-test"
project_id = var.project_id
region = "europe-west1"
backend_service_configs = {
default = {
backends = [{
group = "projects/myprj/zones/europe-west1-a/instanceGroups/my-ig"
}]
health_checks = ["custom-tcp"]
}
}
health_check_configs = {
custom-tcp = {
tcp = { port = 80 }
}
}
vpc_config = {
network = var.vpc.self_link
subnetwork = var.subnet.self_link
}
}
# tftest modules=1 resources=5To leverage existing health checks without having the module create them, simply pass their self links to backend services and set the health_check_configs variable to an empty map:
module "ilb-l7" {
source = "./fabric/modules/net-lb-app-int"
name = "ilb-test"
project_id = var.project_id
region = "europe-west1"
backend_service_configs = {
default = {
backends = [{
group = "projects/myprj/zones/europe-west1-a/instanceGroups/my-ig"
}]
health_checks = ["projects/myprj/global/healthChecks/custom"]
}
}
health_check_configs = {}
vpc_config = {
network = var.vpc.self_link
subnetwork = var.subnet.self_link
}
}
# tftest modules=1 resources=4The module can optionally create unmanaged instance groups, which can then be referred to in backends via their key:
module "ilb-l7" {
source = "./fabric/modules/net-lb-app-int"
name = "ilb-test"
project_id = var.project_id
region = "europe-west1"
backend_service_configs = {
default = {
port_name = "http"
backends = [
{ group = "default" }
]
}
}
group_configs = {
default = {
zone = "europe-west1-b"
instances = [
"projects/myprj/zones/europe-west1-b/instances/vm-a"
]
named_ports = { http = 80 }
}
}
vpc_config = {
network = var.vpc.self_link
subnetwork = var.subnet.self_link
}
}
# tftest modules=1 resources=6Network Endpoint Groups (NEGs) can be used as backends, by passing their id as the backend group in a backends service configuration:
module "ilb-l7" {
source = "./fabric/modules/net-lb-app-int"
name = "ilb-test"
project_id = var.project_id
region = "europe-west1"
backend_service_configs = {
default = {
backends = [{
balancing_mode = "RATE"
group = "projects/myprj/zones/europe-west1-a/networkEndpointGroups/my-neg"
max_rate = {
per_endpoint = 1
}
}]
}
}
vpc_config = {
network = var.vpc.self_link
subnetwork = var.subnet.self_link
}
}
# tftest modules=1 resources=5Similarly to instance groups, NEGs can also be managed by this module which supports GCE, hybrid, serverless and Private Service Connect NEGs:
resource "google_compute_address" "test" {
project = var.project_id
name = "neg-test"
subnetwork = var.subnet.self_link
address_type = "INTERNAL"
address = "10.0.0.10"
region = "europe-west1"
}
module "ilb-l7" {
source = "./fabric/modules/net-lb-app-int"
name = "ilb-test"
project_id = var.project_id
region = "europe-west1"
backend_service_configs = {
default = {
backends = [{
balancing_mode = "RATE"
group = "my-neg"
max_rate = {
per_endpoint = 1
}
}]
}
}
neg_configs = {
my-neg = {
gce = {
zone = "europe-west1-b"
endpoints = {
e-0 = {
instance = "test-1"
ip_address = google_compute_address.test.address
# ip_address = "10.0.0.10"
port = 80
}
}
}
}
}
vpc_config = {
network = var.vpc.self_link
subnetwork = var.subnet.self_link
}
}
# tftest modules=1 resources=8 inventory=zonal-neg.yamlmodule "ilb-l7" {
source = "./fabric/modules/net-lb-app-int"
name = "ilb-test"
project_id = var.project_id
region = "europe-west1"
backend_service_configs = {
default = {
backends = [{
balancing_mode = "RATE"
group = "my-neg"
max_rate = {
per_endpoint = 1
}
}]
}
}
neg_configs = {
my-neg = {
hybrid = {
zone = "europe-west1-b"
endpoints = {
e-0 = {
ip_address = "10.0.0.10"
port = 80
}
}
}
}
}
vpc_config = {
network = var.vpc.self_link
subnetwork = var.subnet.self_link
}
}
# tftest modules=1 resources=7This is a simple example where both the Cloud Run service and the load balancer are in the same project.
module "ilb-l7" {
source = "./fabric/modules/net-lb-app-int"
name = "ilb-test"
project_id = var.project_id
region = "europe-west1"
backend_service_configs = {
default = {
backends = [{
group = "my-neg"
}]
health_checks = []
protocol = "HTTPS"
}
}
health_check_configs = {}
neg_configs = {
my-neg = {
cloudrun = {
region = "europe-west1"
target_service = {
name = "my-run-service"
}
}
}
}
vpc_config = {
network = var.vpc.self_link
subnetwork = var.subnet.self_link
}
}
# tftest modules=1 resources=5For cross-project referencing, both the load balancer and the cloud run projects need to be service projects of the same Shared VPC host. Then specify the Cloud Run project for both the backend service and NEG.
module "ilb-l7" {
source = "./fabric/modules/net-lb-app-int"
name = "ilb-test"
project_id = var.project_id
region = "europe-west1"
backend_service_configs = {
default = {
backends = [{
group = "my-neg"
}]
health_checks = []
protocol = "HTTPS"
project_id = "cr-project-id"
}
}
health_check_configs = {}
neg_configs = {
my-neg = {
project_id = "cr-project-id"
cloudrun = {
region = "europe-west1"
target_service = {
name = "my-run-service"
}
}
}
}
vpc_config = {
network = var.vpc.self_link
subnetwork = var.subnet.self_link
}
}
# tftest modules=1 resources=5For cross-project referencing, both the load balancer and the cloud run projects need to be service projects of the same Shared VPC host. Then specify the Cloud Run project for both the backend service and NEG.
module "ilb-l7" {
source = "./fabric/modules/net-lb-app-int"
name = "ilb-test"
project_id = var.project_id
region = "europe-west1"
backend_service_configs = {
default = {
backends = [{
group = "my-neg"
}]
health_checks = []
protocol = "HTTPS"
project_id = "cr-project-id"
}
}
health_check_configs = {}
neg_configs = {
my-neg = {
project_id = "cr-project-id"
cloudrun = {
region = "europe-west1"
target_service = {
name = "my-run-service"
}
}
}
}
vpc_config = {
network = var.vpc.self_link
subnetwork = var.subnet.self_link
}
}
# tftest modules=1 resources=5Cloud Run NEGs can also be created via a URL mask, which allows targeting accessing multiple services or revisions. In this case, a tag can be optionally specified to target a specific revision.
module "ilb-l7" {
source = "./fabric/modules/net-lb-app-int"
name = "ilb-test"
project_id = var.project_id
region = "europe-west1"
backend_service_configs = {
default = {
backends = [{
group = "my-neg"
}]
health_checks = []
protocol = "HTTPS"
}
}
health_check_configs = {}
neg_configs = {
my-neg = {
cloudrun = {
region = "europe-west1"
target_urlmask = "example.com/<service>"
tag = "my-tag"
}
}
}
vpc_config = {
network = var.vpc.self_link
subnetwork = var.subnet.self_link
}
}
# tftest modules=1 resources=5module "ilb-l7" {
source = "./fabric/modules/net-lb-app-int"
name = "ilb-test"
project_id = var.project_id
region = var.region
backend_service_configs = {
default = {
backends = [{
group = "my-neg"
}]
health_checks = []
}
}
health_check_configs = {}
neg_configs = {
my-neg = {
psc = {
region = var.region
target_service = "${var.region}-cloudkms.googleapis.com"
}
}
}
vpc_config = {
network = var.vpc.self_link
subnetwork = var.subnet.self_link
}
}
# tftest modules=1 resources=5 e2eThis example shows how to create the load balancer in one project prj-host while using a shared VPC deployed in the prj-svc project. Please note that the load balancer and its front-end will be created in the prj-host project and the back-end will be created in the prj-svc project. This is useful for situations where a shared VPC is being used that has been deployed in another project. Two subnetworks are needed, one for the loab balancer and another one for the PSC endpoint.
module "ilb-l7" {
source = "./fabric/modules/net-lb-app-int"
name = "ilb-test"
project_id = "prj-host"
region = "us-central1"
backend_service_configs = {
default = {
project_id = "prj-svc"
backends = [{
group = "neg-01"
}]
health_check_configs = {}
neg_configs = {
neg-01 = {
project_id = "prj-svc"
description = "Network Endpoint Group for service accessed using Private Service Connect"
psc = {
region = "us-central1"
target_service = "projects/producer_project/regions/us-central1/serviceAttachments/project_id"
network = var.vpc.self_link
subnetwork = "projects/prj-svc/regions/us-central1/subnetworks/psc_subnet"
}
}
}
}
}
vpc_config = {
network = var.vpc.self_link
subnetwork = var.subnet.self_link
}
}This example shows how to create and manage internet NEGs:
module "ilb-l7" {
source = "./fabric/modules/net-lb-app-int"
project_id = var.project_id
name = "ilb-test"
region = var.region
backend_service_configs = {
default = {
backends = [
{ group = "neg-0" }
]
health_checks = []
}
}
# with a single internet NEG the implied default health check is not needed
health_check_configs = {}
neg_configs = {
neg-0 = {
internet = {
region = var.region
use_fqdn = true
endpoints = {
e-0 = {
destination = "www.example.org"
port = 80
}
}
}
}
}
vpc_config = {
network = var.vpc.self_link
subnetwork = var.subnet.self_link
}
}
# tftest modules=1 resources=6 inventory=internet-neg.yaml e2eThe module exposes the full URL map resource configuration, with some minor changes to the interface to decrease verbosity, and support for aliasing backend services via keys.
The default URL map configuration sets the default backend service as the default service for the load balancer as a convenience. Just override the urlmap_config variable to change the default behaviour:
module "ilb-l7" {
source = "./fabric/modules/net-lb-app-int"
name = "ilb-test"
project_id = var.project_id
region = "europe-west1"
backend_service_configs = {
default = {
backends = [{
group = "projects/myprj/zones/europe-west1-a/instanceGroups/my-ig"
}]
}
video = {
backends = [{
group = "projects/myprj/zones/europe-west1-a/instanceGroups/my-ig-2"
}]
log_config = {
enable = true
sample_rate = 0.5
}
}
audio = {
backends = [{
group = "projects/myprj/zones/europe-west1-a/instanceGroups/my-ig-3"
}]
log_config = {
enable = false
}
}
}
urlmap_config = {
default_service = "default"
host_rules = [{
hosts = ["*"]
path_matcher = "pathmap"
}]
path_matchers = {
pathmap = {
default_service = "default"
path_rules = [
{
paths = ["/video", "/video/*"]
service = "video"
},
{
paths = ["/audio", "/audio/*"]
service = "audio"
}
]
}
}
}
vpc_config = {
network = var.vpc.self_link
subnetwork = var.subnet.self_link
}
}
# tftest modules=1 resources=7 inventory=urlmap.yamlSimilarly to health checks, SSL certificates can also be created by the module. In this example we are using private key and certificate resources so that the example test only depends on Terraform providers, but in real use those can be replaced by external files.
resource "tls_private_key" "default" {
algorithm = "RSA"
rsa_bits = 4096
}
resource "tls_self_signed_cert" "default" {
private_key_pem = tls_private_key.default.private_key_pem
subject {
common_name = "example.com"
organization = "ACME Examples, Inc"
}
validity_period_hours = 720
allowed_uses = [
"key_encipherment",
"digital_signature",
"server_auth",
]
}
module "ilb-l7" {
source = "./fabric/modules/net-lb-app-int"
name = "ilb-test"
project_id = var.project_id
region = "europe-west1"
backend_service_configs = {
default = {
backends = [{
group = "projects/myprj/zones/europe-west1-a/instanceGroups/my-ig"
}]
}
}
health_check_configs = {
default = {
https = { port = 443 }
}
}
protocol = "HTTPS"
ssl_certificates = {
create_configs = {
default = {
# certificate and key could also be read via file() from external files
certificate = tls_self_signed_cert.default.cert_pem
private_key = tls_private_key.default.private_key_pem
}
}
}
vpc_config = {
network = var.vpc.self_link
subnetwork = var.subnet.self_link
}
}
# tftest modules=1 resources=8This example shows how to configure Backend Authenticated TLS using the tls_settings block.
module "ilb-l7" {
source = "./fabric/modules/net-lb-app-int"
name = "ilb-test"
project_id = var.project_id
region = "europe-west1"
backend_service_configs = {
default = {
backends = [{
group = "projects/myprj/zones/europe-west1-a/instanceGroups/my-ig"
}]
tls_settings = {
# authentication_config = "projects/myprj/locations/europe-west1/backendTlsPolicies/my-policy"
sni = "backend.example.com"
subject_alt_names = ["backend.example.com"]
}
}
}
vpc_config = {
network = var.vpc.self_link
subnetwork = var.subnet.self_link
}
}
# tftest modules=1 resources=5 inventory=tls-settings.yamlThe optional service_attachment variable allows publishing Private Service Connect service by configuring service attachment for the forwarding rule.
module "ilb-l7" {
source = "./fabric/modules/net-lb-app-int"
name = "ilb-test"
project_id = var.project_id
region = var.region
backend_service_configs = {
default = {
backends = [{
group = module.compute-vm-group-b.group.id
}]
}
}
vpc_config = {
network = var.vpc.self_link
subnetwork = var.subnet.self_link
}
service_attachment = {
nat_subnets = [var.subnet_psc_1.self_link]
automatic_connection = false
consumer_accept_lists = {
# map of `project_id` => `connection_limit`
(var.project_id) = 10
}
}
}
# tftest modules=3 resources=10 fixtures=fixtures/compute-vm-group-bc.tf e2eThe module supports the contexts interpolation. For example:
module "ilb-l7" {
source = "./fabric/modules/net-lb-app-int"
name = "ilb-test-0"
project_id = "$project_ids:test"
region = "$locations:ew8"
vpc_config = {
network = "$networks:test"
subnetwork = "$subnets:test"
}
address = "$addresses:test"
backend_service_configs = {
default = {
backends = [
{ group = "projects/foo-test-0/zones/europe-west8-b/instanceGroups/ig-b" },
{ group = "ig-c" }
]
}
neg-cloudrun = {
backends = [{ group = "neg-cloudrun" }]
health_checks = []
}
neg-gce = {
backends = [{ group = "neg-gce" }]
balancing_mode = "RATE"
max_rate = { per_endpoint = 10 }
}
neg-hybrid = {
backends = [{ group = "neg-hybrid" }]
balancing_mode = "RATE"
max_rate = { per_endpoint = 10 }
}
neg-internet = {
backends = [{ group = "neg-internet" }]
health_checks = []
}
neg-psc = {
backends = [{ group = "neg-psc" }]
health_checks = []
}
}
group_configs = {
ig-c = {
zone = "$locations:ew8-c"
instances = [
"projects/foo-test-0/zones/europe-west8-c/instances/vm-c"
]
named_ports = { http = 80 }
}
}
health_check_configs = {
default = {
http = {
host = "hello.example.org"
port_specification = "USE_SERVING_PORT"
}
}
}
neg_configs = {
neg-cloudrun = {
cloudrun = {
region = "$locations:ew8"
target_service = {
name = "hello"
}
}
}
neg-gce = {
gce = {
network = "$networks:test"
subnetwork = "$subnets:test"
zone = "$locations:ew8-b"
endpoints = {
e-0 = {
instance = "nginx-ew8-b"
ip_address = "$addresses:test"
port = 80
}
}
}
}
neg-hybrid = {
hybrid = {
network = "$networks:test"
zone = "$locations:ew8-b"
endpoints = {
e-0 = {
ip_address = "$addresses:test-hybrid"
port = 80
}
}
}
}
neg-internet = {
internet = {
region = "$locations:ew8"
use_fqdn = true
endpoints = {
e-0 = {
destination = "hello.example.org"
port = 80
}
}
}
}
neg-psc = {
psc = {
region = "$locations:ew8"
target_service = "projects/foo-test-0/regions/europe-west8/serviceAttachments/sa"
network = "$networks:test"
subnetwork = "$subnets:test"
}
}
}
urlmap_config = {
default_service = "default"
host_rules = [{
hosts = ["*"]
path_matcher = "pathmap"
}]
path_matchers = {
pathmap = {
default_service = "default"
path_rules = [
{ paths = ["/cloudrun", "/cloudrun/*"], service = "neg-cloudrun" },
{ paths = ["/gce", "/gce/*"], service = "neg-gce" },
{ paths = ["/hybrid", "/hybrid/*"], service = "neg-hybrid" },
{ paths = ["/internet", "/internet/*"], service = "neg-internet" },
{ paths = ["/psc", "/psc/*"], service = "neg-psc" },
]
}
}
}
context = {
addresses = {
test = "10.0.0.10"
test-hybrid = "192.168.0.3"
}
locations = {
ew8 = "europe-west8"
ew8-b = "europe-west8-b"
ew8-c = "europe-west8-c"
}
networks = {
test = "projects/foo-dev-net-spoke-0/global/networks/dev-spoke-0"
}
project_ids = {
test = "foo-test-0"
}
subnets = {
test = "projects/foo-dev-net-spoke-0/regions/europe-west8/subnetworks/gce"
}
}
}
# tftest modules=1 resources=19 inventory=context.yamlThis example mixes group and NEG backends, and shows how to set HTTPS for specific backends.
module "ilb-l7" {
source = "./fabric/modules/net-lb-app-int"
name = "ilb-l7-test-0"
project_id = "prj-gce"
region = "europe-west8"
backend_service_configs = {
default = {
backends = [
{ group = "nginx-ew8-b" },
{ group = "nginx-ew8-c" },
]
}
gce-neg = {
backends = [{
balancing_mode = "RATE"
group = "neg-nginx-ew8-c"
max_rate = {
per_endpoint = 1
}
}]
}
home = {
backends = [{
balancing_mode = "RATE"
group = "neg-home-hello"
max_rate = {
per_endpoint = 1
}
}]
health_checks = ["neg"]
locality_lb_policy = "ROUND_ROBIN"
protocol = "HTTPS"
}
}
group_configs = {
nginx-ew8-b = {
zone = "europe-west8-b"
instances = [
"projects/prj-gce/zones/europe-west8-b/instances/nginx-ew8-b"
]
named_ports = { http = 80 }
}
nginx-ew8-c = {
zone = "europe-west8-c"
instances = [
"projects/prj-gce/zones/europe-west8-c/instances/nginx-ew8-c"
]
named_ports = { http = 80 }
}
}
health_check_configs = {
default = {
http = {
port = 80
}
}
neg = {
https = {
host = "hello.home.example.com"
port = 443
}
}
}
neg_configs = {
neg-nginx-ew8-c = {
gce = {
zone = "europe-west8-c"
endpoints = {
e-0 = {
instance = "nginx-ew8-c"
ip_address = "10.24.32.26"
port = 80
}
}
}
}
neg-home-hello = {
hybrid = {
zone = "europe-west8-b"
endpoints = {
e-0 = {
ip_address = "192.168.0.3"
port = 443
}
}
}
}
}
urlmap_config = {
default_service = "default"
host_rules = [
{
hosts = ["*"]
path_matcher = "gce"
},
{
hosts = ["hello.home.example.com"]
path_matcher = "home"
}
]
path_matchers = {
gce = {
default_service = "default"
path_rules = [
{
paths = ["/gce-neg", "/gce-neg/*"]
service = "gce-neg"
}
]
}
home = {
default_service = "home"
}
}
}
vpc_config = {
network = "projects/prj-host/global/networks/shared-vpc"
subnetwork = "projects/prj-host/regions/europe-west8/subnetworks/gce"
}
}
# tftest modules=1 resources=14For deploying changes to load balancer configuration please refer to net-lb-app-ext README.md
| name | description | resources |
|---|---|---|
| backend-service.tf | Backend service resources. | google_compute_region_backend_service |
| groups.tf | None | google_compute_instance_group |
| health-check.tf | Health check resources. | google_compute_health_check · google_compute_region_health_check |
| main.tf | Module-level locals and resources. | google_compute_forwarding_rule · google_compute_network_endpoint · google_compute_network_endpoint_group · google_compute_region_network_endpoint · google_compute_region_network_endpoint_group · google_compute_region_ssl_certificate · google_compute_region_target_http_proxy · google_compute_region_target_https_proxy · google_compute_service_attachment |
| outputs.tf | Module outputs. | |
| urlmap.tf | URL map resources. | google_compute_region_url_map |
| variables-backend-service.tf | Backend services variables. | |
| variables-health-check.tf | Health check variable. | |
| variables-urlmap.tf | URLmap variable. | |
| variables.tf | Module variables. | |
| versions.tf | Version pins. |
| name | description | type | required | default |
|---|---|---|---|---|
| name | Load balancer name. | string |
✓ | |
| project_id | Project id. | string |
✓ | |
| region | The region where to allocate the ILB resources. | string |
✓ | |
| vpc_config | VPC-level configuration. | object({…}) |
✓ | |
| address | Optional IP address used for the forwarding rule. | string |
null |
|
| backend_service_configs | Backend service level configuration. | map(object({…})) |
{} |
|
| context | Context-specific interpolations. | object({…}) |
{} |
|
| description | Optional description used for resources. | string |
"Terraform managed." |
|
| global_access | Allow client access from all regions. | bool |
null |
|
| group_configs | Optional unmanaged groups to create. Can be referenced in backends via key or outputs. | map(object({…})) |
{} |
|
| health_check_configs | Optional auto-created health check configurations, use the output self-link to set it in the auto healing policy. Refer to examples for usage. | map(object({…})) |
{…} |
|
| http_proxy_config | HTTP proxy configuration. Only used for non-classic load balancers. | object({…}) |
{} |
|
| https_proxy_config | HTTPS proxy configuration. | object({…}) |
{} |
|
| labels | Labels set on resources. | map(string) |
{} |
|
| neg_configs | Optional network endpoint groups to create. Can be referenced in backends via key or outputs. | map(object({…})) |
{} |
|
| network_tier_premium | Use premium network tier. Defaults to true. | bool |
true |
|
| ports | Optional ports for HTTP load balancer. | list(string) |
null |
|
| protocol | Protocol supported by this load balancer. | string |
"HTTP" |
|
| service_attachment | PSC service attachment. | object({…}) |
null |
|
| service_directory_registration | Service directory namespace and service used to register this load balancer. | object({…}) |
null |
|
| ssl_certificates | SSL target proxy certificates (only if protocol is HTTPS). | object({…}) |
{} |
|
| urlmap_config | The URL map configuration. | object({…}) |
{…} |
| name | description | sensitive |
|---|---|---|
| address | Forwarding rule address. | |
| backend_service_ids | Backend service resources. | |
| backend_service_names | Backend service resource names. | |
| forwarding_rule | Forwarding rule resource. | |
| group_ids | Autogenerated instance group ids. | |
| health_check_ids | Autogenerated health check ids. | |
| id | Fully qualified forwarding rule id. | |
| neg_ids | Autogenerated network endpoint group ids. | |
| psc_neg_ids | Autogenerated PSC network endpoint group ids. | |
| regional_neg_ids | Autogenerated regional network endpoint group ids. | |
| service_attachment_id | Id of the service attachment. | |
| url_map_id | Fully qualified URL map ID (resource path) for use in IAM conditions and API calls. |