Skip to content

Commit

Permalink
[DRAFT] Add load balanced tor proxy to AWS terraform config (#406)
Browse files Browse the repository at this point in the history
* [DRAFT] Add load balanced tor proxy

This is more a basis for discussion than a serious proposal to merge. I'm happy to take suggestions, I am not a tor expert so there may be much that can be improved. Notes:
- var.name is changed as NLBs don't support underscores in their names
- the db1000n servers will need to be restarted if the NLB is recreated. Is it possible to change the db1000n proxy config without restarting?
- there is a var.zones variable that allows you to configure the number of availability zones - defaults to 2. LBs require >1
- the tor instances have a netcat-based healthcheck. There's probably a better way
- the tor instances have a cron job that calls HUP on tor processes every min, to get a new IP
- works with the newly defaulted arm instances

* Module-ised the tor proxy and changed the default behaviour back to 'no proxy'

Co-authored-by: Jim Page <[email protected]>
  • Loading branch information
SemiConscious and jim-page authored Mar 25, 2022
1 parent 014ef18 commit 01572db
Show file tree
Hide file tree
Showing 4 changed files with 314 additions and 19 deletions.
3 changes: 2 additions & 1 deletion terraform/aws/ireland.tfvars
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# Make changes in this file
region = "eu-west-1"
name = "ir_db1000n"
name = "ir-db1000n"
desired_capacity = 2
min_size = 0
max_size = 32
zones = 2
91 changes: 73 additions & 18 deletions terraform/aws/main.tf
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,8 @@ terraform {
}

provider "aws" {
region = var.region
region = var.region
profile = var.profile
}

data "aws_ami" "latest_amazon_linux" {
Expand Down Expand Up @@ -84,16 +85,19 @@ resource "aws_security_group" "instance_connect" {
name_prefix = "instance_connect"
description = "allow ssh"

ingress {
cidr_blocks = ["0.0.0.0/0", ]
description = ""
from_port = 22
ipv6_cidr_blocks = []
prefix_list_ids = []
protocol = "tcp"
security_groups = []
self = false
to_port = 22
dynamic "ingress" {
for_each = var.allow_ssh ? ["ssh"] : []
content {
cidr_blocks = ["0.0.0.0/0", ]
description = ""
from_port = 22
ipv6_cidr_blocks = []
prefix_list_ids = []
protocol = "tcp"
security_groups = []
self = false
to_port = 22
}
}
egress {
from_port = 0
Expand All @@ -116,10 +120,20 @@ resource "aws_route_table" "route-table-test-env" {
}

resource "aws_route_table_association" "subnet-association" {
subnet_id = aws_subnet.main.id
for_each = { for az, subnet in aws_subnet.main : az => subnet.id }
subnet_id = each.value
route_table_id = aws_route_table.route-table-test-env.id
}

locals {
proxy_run_cmd = <<EOF
PIPS=$(host -4 ${contains(keys(module.tor-proxy), "tor-proxy") ? module.tor-proxy["tor-proxy"].lb.dns_name : ""} | egrep -o '[0-9]+(\.[0-9]+){3}$' | awk '{printf("socks5://%s:9050\n", $0)}' | paste -d',' -s -)
docker run -ti -d --restart always ghcr.io/arriven/db1000n-advanced ./db1000n -proxy $PIPS
EOF
no_proxy_run_cmd = "docker run -ti -d --restart always ghcr.io/arriven/db1000n-advanced"
docker_run_cmd = var.enable_tor_proxy ? local.proxy_run_cmd : local.no_proxy_run_cmd
}

resource "aws_launch_template" "example" {
name = var.name
image_id = data.aws_ami.latest_amazon_linux.id
Expand All @@ -128,18 +142,18 @@ resource "aws_launch_template" "example" {
instance_market_options {
market_type = "spot"
}
user_data = base64encode(<<EOF
user_data = base64encode(join("\n", [<<EOF
#!/bin/bash -xe
exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1
yum update -y
amazon-linux-extras install docker
service docker start
usermod -a -G docker ec2-user
chkconfig docker on
docker run -ti -d --restart always ghcr.io/arriven/db1000n-advanced
EOF
)
, local.docker_run_cmd
, var.extra_startup_script
]))
iam_instance_profile {
name = aws_iam_instance_profile.instance_profile.name
}
Expand All @@ -162,6 +176,7 @@ EOF
Name = "db1000n-server"
}
}
depends_on = [module.tor-proxy]
}

resource "aws_autoscaling_group" "example" {
Expand All @@ -170,12 +185,14 @@ resource "aws_autoscaling_group" "example" {
desired_capacity = var.desired_capacity
max_size = var.max_size
min_size = var.min_size
vpc_zone_identifier = [aws_subnet.main.id]
vpc_zone_identifier = [for subnet in aws_subnet.main : subnet.id]
health_check_grace_period = 180
launch_template {
id = aws_launch_template.example.id
version = aws_launch_template.example.latest_version
}

lifecycle { create_before_destroy = true }
}

resource "aws_vpc" "main" {
Expand All @@ -184,8 +201,46 @@ resource "aws_vpc" "main" {
enable_dns_support = true
}

locals {
public_subnet_cidr = cidrsubnet(aws_vpc.main.cidr_block, 4, 0)
private_subnet_cidr = cidrsubnet(aws_vpc.main.cidr_block, 4, 1)
}

data "aws_availability_zones" "azs" {
state = "available"
}

resource "aws_subnet" "private" {
for_each = { for azid, zone in slice(data.aws_availability_zones.azs.names, 0, var.zones) : zone => azid }
availability_zone = each.key
vpc_id = aws_vpc.main.id
cidr_block = cidrsubnet(local.private_subnet_cidr, 4, each.value)
map_public_ip_on_launch = false
}

resource "aws_subnet" "main" {
for_each = { for azid, zone in slice(data.aws_availability_zones.azs.names, 0, var.zones) : zone => azid }
availability_zone = each.key
vpc_id = aws_vpc.main.id
cidr_block = "10.0.1.0/24"
cidr_block = cidrsubnet(local.public_subnet_cidr, 4, each.value)
map_public_ip_on_launch = true
}

# optional tor proxy

module "tor-proxy" {
source = "./tor-proxy"
for_each = toset(var.enable_tor_proxy ? ["tor-proxy"] : [])
name = var.name
private_subnet_ids = [for subnet in aws_subnet.private : subnet.id]
public_subnet_ids = [for subnet in aws_subnet.main : subnet.id]
vpc = aws_vpc.main
allow_ssh = var.allow_ssh
arch_ami = var.arch_ami
instance_type = var.instance_type
extra_startup_script = var.extra_startup_script
instance_profile = aws_iam_instance_profile.instance_profile
desired_capacity = var.desired_capacity
min_size = var.min_size
max_size = var.max_size
}
202 changes: 202 additions & 0 deletions terraform/aws/tor-proxy/tor-proxy.tf
Original file line number Diff line number Diff line change
@@ -0,0 +1,202 @@
variable "name" {}
variable "private_subnet_ids" {}
variable "public_subnet_ids" {}
variable "vpc" {}
variable "allow_ssh" {}
variable "arch_ami" {}
variable "instance_type" {}
variable "extra_startup_script" {}
variable "instance_profile" {}
variable "desired_capacity" {}
variable "min_size" {}
variable "max_size" {}

output "lb" {
value = aws_lb.proxy-lb
}

resource "aws_lb" "proxy-lb" {
name = "${var.name}-proxy"
internal = true
load_balancer_type = "network"
subnets = var.private_subnet_ids
}

resource "aws_lb_target_group" "proxy-lb-tg" {
name = "${var.name}-proxy"
port = 9050
protocol = "TCP"
vpc_id = var.vpc.id

health_check {
path = "/"
healthy_threshold = 3
unhealthy_threshold = 3
interval = 30
protocol = "HTTP"
port = 8080
}
}

resource "aws_lb_listener" "proxy-lb-listener" {
load_balancer_arn = aws_lb.proxy-lb.arn
port = "9050"
protocol = "TCP"

default_action {
type = "forward"
target_group_arn = aws_lb_target_group.proxy-lb-tg.arn
}
}

resource "aws_security_group" "proxy_instance" {
vpc_id = var.vpc.id
name_prefix = "proxy_instance"
description = "access to/from proxy instances"

dynamic "ingress" {
for_each = var.allow_ssh ? ["ssh"] : []
content {
cidr_blocks = ["0.0.0.0/0", ]
description = "ssh"
protocol = "tcp"
from_port = 0
to_port = 22
}
}

ingress {
cidr_blocks = [var.vpc.cidr_block]
description = "socks5"
protocol = "tcp"
from_port = 0
to_port = 9050
}

egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}

resource "aws_security_group" "lb" {
name = "${var.name}-proxy-lb-security-group"
description = "controls access to the proxy load balancer"
vpc_id = var.vpc.id

ingress {
protocol = -1
from_port = 0
to_port = 0
security_groups = [aws_security_group.proxy_instance.id]
}
}

resource "aws_security_group_rule" "lb-egress" {
security_group_id = aws_security_group.lb.id
type = "egress"
protocol = -1
from_port = 0
to_port = 0
source_security_group_id = aws_security_group.proxy-target-group.id
}

resource "aws_security_group" "proxy-target-group" {
name = "${var.name}-proxy-target-group"
description = "controls access to the proxy containers"
vpc_id = var.vpc.id

ingress {
protocol = -1
from_port = 0
to_port = 0
security_groups = [aws_security_group.lb.id]
}

egress {
protocol = -1
from_port = 0
to_port = 0
security_groups = [aws_security_group.lb.id]
}
}

data "aws_ami" "latest_amazon_linux" {
owners = ["amazon"]
most_recent = true
filter {
name = "name"
values = ["amzn2-ami-kernel-*-hvm-*-${var.arch_ami}-gp2"]
}
}

resource "aws_launch_template" "proxy-instance-template" {
name = "${var.name}-proxy"
image_id = data.aws_ami.latest_amazon_linux.id
instance_initiated_shutdown_behavior = "terminate"
instance_type = var.instance_type
instance_market_options {
market_type = "spot"
}
user_data = base64encode(join("\n", [<<EOF
#!/bin/bash
yum update -y
amazon-linux-extras install epel -y
yum-config-manager --enable epel
yum install tor nc -y
echo "SOCKSPort 0.0.0.0:9050" >> /etc/tor/torrc
service tor start
chkconfig tor on
while true; do echo -e 'HTTP/1.1 200 OK\r\n' | nc -lp 8080 > /dev/null; echo Healthcheck >> /var/log/messages; done &
systemctl start crond
systemctl enable crond
cat << EOSC > /tmp/hup
#!/bin/bash
echo "Sending hup to tor processes"
for pid in \$(pgrep tor); do /bin/kill -1 \$pid ; done
EOSC
chmod +x /tmp/hup
(crontab -l 2>/dev/null || true; echo "* * * * * /tmp/hup >> /var/log/messages") | crontab -
EOF
, var.extra_startup_script
]))
iam_instance_profile {
name = var.instance_profile.name
}
vpc_security_group_ids = [aws_security_group.proxy_instance.id]
tag_specifications {
resource_type = "instance"
tags = {
Name = "db1000n-proxy"
}
}
tag_specifications {
resource_type = "volume"
tags = {
Name = "db1000n-proxy"
}
}
tag_specifications {
resource_type = "network-interface"
tags = {
Name = "db1000n-proxy"
}
}
}

resource "aws_autoscaling_group" "proxy" {
name = "${var.name}-proxy"
capacity_rebalance = true
desired_capacity = var.desired_capacity
max_size = var.max_size
min_size = var.min_size
vpc_zone_identifier = var.public_subnet_ids
health_check_grace_period = 180
target_group_arns = [aws_lb_target_group.proxy-lb-tg.arn]
launch_template {
id = aws_launch_template.proxy-instance-template.id
version = aws_launch_template.proxy-instance-template.latest_version
}
}
Loading

0 comments on commit 01572db

Please sign in to comment.