Skip to content

Commit 5011550

Browse files
refactor: use vpc for scaletest networking infrastructure
Co-authored-by: Dean Sheather <dean@deansheather.com>
1 parent 4a62cfb commit 5011550

File tree

4 files changed

+143
-13
lines changed

4 files changed

+143
-13
lines changed

scaletest/terraform/action/gcp_clusters.tf

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -78,12 +78,13 @@ resource "google_container_cluster" "cluster" {
7878
name = "${var.name}-${each.key}"
7979
location = each.value.zone
8080
project = var.project_id
81-
network = local.vpc_name
82-
subnetwork = local.subnet_name
81+
network = google_compute_network.network.name
82+
subnetwork = google_compute_subnetwork.subnetwork[each.key].name
8383
networking_mode = "VPC_NATIVE"
8484
default_max_pods_per_node = 256
8585
ip_allocation_policy { # Required with networking_mode=VPC_NATIVE
86-
86+
cluster_secondary_range_name = local.secondary_ip_range_k8s_pods
87+
services_secondary_range_name = local.secondary_ip_range_k8s_services
8788
}
8889
release_channel {
8990
# Setting release channel as STABLE can cause unexpected cluster upgrades.
@@ -108,7 +109,6 @@ resource "google_container_cluster" "cluster" {
108109
workload_pool = "${data.google_project.project.project_id}.svc.id.goog"
109110
}
110111

111-
112112
lifecycle {
113113
ignore_changes = [
114114
maintenance_policy,

scaletest/terraform/action/gcp_db.tf

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ resource "google_sql_database_instance" "db" {
2323

2424
ip_configuration {
2525
ipv4_enabled = false
26-
private_network = local.vpc_id
26+
private_network = google_compute_network.network.id
2727
}
2828

2929
insights_config {

scaletest/terraform/action/gcp_vpc.tf

Lines changed: 133 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,91 @@
11
locals {
2-
vpc_name = "scaletest"
3-
vpc_id = "projects/${var.project_id}/global/networks/${local.vpc_name}"
4-
subnet_name = "scaletest"
2+
# Generate a /14 for each deployment.
3+
cidr_networks = cidrsubnets(
4+
"172.16.0.0/12",
5+
2,
6+
2,
7+
2,
8+
)
9+
10+
networks = {
11+
alpha = local.cidr_networks[0]
12+
bravo = local.cidr_networks[1]
13+
charlie = local.cidr_networks[2]
14+
}
15+
16+
# Generate a bunch of /18s within the subnet we're using from the above map.
17+
cidr_subnetworks = cidrsubnets(
18+
local.networks[var.name],
19+
4, # PSA
20+
4, # primary subnetwork
21+
4, # primary k8s pod network
22+
4, # primary k8s services network
23+
4, # europe subnetwork
24+
4, # europe k8s pod network
25+
4, # europe k8s services network
26+
4, # asia subnetwork
27+
4, # asia k8s pod network
28+
4, # asia k8s services network
29+
)
30+
31+
psa_range_address = split("/", local.cidr_subnetworks[0])[0]
32+
psa_range_prefix_length = tonumber(split("/", local.cidr_subnetworks[0])[1])
33+
34+
subnetworks = {
35+
primary = local.cidr_subnetworks[1]
36+
europe = local.cidr_subnetworks[4]
37+
asia = local.cidr_subnetworks[7]
38+
}
39+
cluster_ranges = {
40+
primary = {
41+
pods = local.cidr_subnetworks[2]
42+
services = local.cidr_subnetworks[3]
43+
}
44+
europe = {
45+
pods = local.cidr_subnetworks[5]
46+
services = local.cidr_subnetworks[6]
47+
}
48+
asia = {
49+
pods = local.cidr_subnetworks[8]
50+
services = local.cidr_subnetworks[9]
51+
}
52+
}
53+
54+
secondary_ip_range_k8s_pods = "k8s-pods"
55+
secondary_ip_range_k8s_services = "k8s-services"
56+
}
57+
58+
# Create a VPC for the deployment
59+
resource "google_compute_network" "network" {
60+
project = var.project_id
61+
name = "${var.name}-scaletest"
62+
description = "scaletest network for ${var.name}"
63+
auto_create_subnetworks = false
64+
}
65+
66+
# Create a subnetwork with a unique range for each region
67+
resource "google_compute_subnetwork" "subnetwork" {
68+
for_each = local.subnetworks
69+
name = "${var.name}-${each.key}"
70+
# Use the deployment region
71+
region = local.deployments[each.key].region
72+
network = google_compute_network.network.id
73+
project = var.project_id
74+
ip_cidr_range = each.value
75+
private_ip_google_access = true
76+
77+
secondary_ip_range {
78+
range_name = local.secondary_ip_range_k8s_pods
79+
ip_cidr_range = local.cluster_ranges[each.key].pods
80+
}
81+
82+
secondary_ip_range {
83+
range_name = local.secondary_ip_range_k8s_services
84+
ip_cidr_range = local.cluster_ranges[each.key].services
85+
}
586
}
687

88+
# Create a public IP for each region
789
resource "google_compute_address" "coder" {
890
for_each = local.deployments
991
project = var.project_id
@@ -13,17 +95,60 @@ resource "google_compute_address" "coder" {
1395
network_tier = "PREMIUM"
1496
}
1597

16-
resource "google_compute_global_address" "sql_peering" {
98+
# Reserve an internal range for Google-managed services (PSA), used for Cloud
99+
# SQL
100+
resource "google_compute_global_address" "psa_peering" {
17101
project = var.project_id
18102
name = "${var.name}-sql-peering"
19103
purpose = "VPC_PEERING"
20104
address_type = "INTERNAL"
21-
prefix_length = 16
22-
network = local.vpc_name
105+
address = local.psa_range_address
106+
prefix_length = local.psa_range_prefix_length
107+
network = google_compute_network.network.self_link
23108
}
24109

25110
resource "google_service_networking_connection" "private_vpc_connection" {
26-
network = local.vpc_id
111+
network = google_compute_network.network.id
27112
service = "servicenetworking.googleapis.com"
28-
reserved_peering_ranges = [google_compute_global_address.sql_peering.name]
113+
reserved_peering_ranges = [google_compute_global_address.psa_peering.name]
114+
}
115+
116+
# Join the new network to the observability network so we can talk to the
117+
# Prometheus instance
118+
data "google_compute_network" "observability" {
119+
project = var.project_id
120+
name = var.observability_cluster_vpc
121+
}
122+
123+
resource "google_compute_network_peering" "scaletest_to_observability" {
124+
name = "peer-${google_compute_network.network.name}-to-${data.google_compute_network.observability.name}"
125+
network = google_compute_network.network.self_link
126+
peer_network = data.google_compute_network.observability.self_link
127+
import_custom_routes = true
128+
export_custom_routes = true
129+
}
130+
131+
resource "google_compute_network_peering" "observability_to_scaletest" {
132+
name = "peer-${data.google_compute_network.observability.name}-to-${google_compute_network.network.name}"
133+
network = data.google_compute_network.observability.self_link
134+
peer_network = google_compute_network.network.self_link
135+
import_custom_routes = true
136+
export_custom_routes = true
137+
}
138+
139+
# Allow traffic from the scaletest network into the observability network so we
140+
# can connect to Prometheus
141+
resource "google_compute_firewall" "observability_allow_from_scaletest" {
142+
project = var.project_id
143+
name = "allow-from-scaletest-${var.name}"
144+
network = data.google_compute_network.observability.self_link
145+
direction = "INGRESS"
146+
source_ranges = [local.networks[var.name]]
147+
allow {
148+
protocol = "icmp"
149+
}
150+
allow {
151+
protocol = "tcp"
152+
ports = ["0-65535"]
153+
}
29154
}

scaletest/terraform/action/vars.tf

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -85,6 +85,11 @@ variable "observability_cluster_location" {
8585
default = "us-east1-b"
8686
}
8787

88+
variable "observability_cluster_vpc" {
89+
description = "Name of the observability cluster VPC network to peer with."
90+
default = "default"
91+
}
92+
8893
variable "cloudflare_api_token_secret" {
8994
description = "Name of the Google Secret Manager secret containing the Cloudflare API token."
9095
default = "cloudflare-api-token-dns"

0 commit comments

Comments
 (0)