From 345315b80824594542d92b224f9d1f5ea5a5c2e6 Mon Sep 17 00:00:00 2001 From: Eric Yablonowitz Date: Fri, 26 Jul 2019 19:20:45 -0400 Subject: [PATCH 01/13] Support tf v.12, tunnel-only users, egress limiting, static ssh users/keys --- README.md | 7 +++++-- main.tf | 49 ++++++++++++++++++++++--------------------------- user_data.sh | 29 ++++++++++++++++++++++++++++- variables.tf | 22 +++++++++++++++++++++- 4 files changed, 76 insertions(+), 31 deletions(-) diff --git a/README.md b/README.md index d8fc528..7aee3ed 100644 --- a/README.md +++ b/README.md @@ -59,15 +59,16 @@ module "bastion" { | Name | Description | Type | Default | Required | |------|-------------|:----:|:-----:|:-----:| | auto_scaling_group_subnets | List of subnet were the Auto Scalling Group will deploy the instances | list | - | yes | -| bastion_amis | | map | `` | no | +| bastion_ami_id | Machine Image ID of bastion instances | string | `` | no | | bastion_host_key_pair | Select the key pair to use to launch the bastion host | string | - | yes | | bastion_instance_count | Count of bastion instance created on VPC | string | `1` | no | +| bastion_open_egress | Allow open egress from bastion hosts | bool | true | no | | bastion_record_name | DNS record name to use for the bastion | string | `` | no | | bucket_name | Bucket name were the bastion will store the logs | string | - | yes | | bucket_force_destroy | On destroy, bucket and all objects should be destroyed when using true | string | false | no | | bucket_versioning | Enable bucket versioning or not | string | true | no | | cidrs | List of CIDRs than can access to the bastion. Default : 0.0.0.0/0 | list | `` | no | -| create_dns_record | Choose if you want to create a record name for the bastion (LB). If true 'hosted_zone_name' and 'bastion_record_name' are mandatory | integer | - | yes | +| create_dns_record | Choose if you want to create a record name for the bastion (LB). If true 'hosted_zone_name' and 'bastion_record_name' are mandatory | bool | - | yes | | elb_subnets | List of subnet were the ELB will be deployed | list | - | yes | | hosted_zone_name | Name of the hosted zone were we'll register the bastion DNS name | string | `` | no | | is_lb_private | If TRUE the load balancer scheme will be "internal" else "internet-facing" | string | - | yes | @@ -78,6 +79,8 @@ module "bastion" { | private_ssh_port | Set the SSH port to use between the bastion and private instance | string | `22` | no | | public_ssh_port | Set the SSH port to use from desktop to the bastion | string | `22` | no | | region | | string | - | yes | +| ssh_tunnel_only_users | comma separated list of users who can use the bastion only for port-forwarding | string | `nobody` | no | +| static_ssh_users | ssh users that we want to create statically in userdata rather than use s3 sync e.g. [ {name = "someone", public_key "id_rsa..." }] | list(map) | [] no | | tags | A mapping of tags to assign | map | `` | no | | vpc_id | VPC id were we'll deploy the bastion | string | - | yes | diff --git a/main.tf b/main.tf index 25247ab..709f459 100644 --- a/main.tf +++ b/main.tf @@ -1,12 +1,3 @@ -data "template_file" "user_data" { - template = "${file("${path.module}/user_data.sh")}" - - vars { - aws_region = "${var.region}" - bucket_name = "${var.bucket_name}" - } -} - resource "aws_s3_bucket" "bucket" { bucket = "${var.bucket_name}" acl = "bucket-owner-full-control" @@ -23,7 +14,7 @@ resource "aws_s3_bucket" "bucket" { prefix = "logs/" - tags { + tags = { "rule" = "log" "autoclean" = "${var.log_auto_clean}" } @@ -66,13 +57,14 @@ resource "aws_security_group_rule" "ingress_bastion" { from_port = "${var.public_ssh_port}" to_port = "${var.public_ssh_port}" protocol = "TCP" - cidr_blocks = ["${concat(data.aws_subnet.subnets.*.cidr_block, var.cidrs)}"] + cidr_blocks = "${concat(data.aws_subnet.subnets.*.cidr_block, var.cidrs)}" security_group_id = "${aws_security_group.bastion_host_security_group.id}" } resource "aws_security_group_rule" "egress_bastion" { description = "Outgoing traffic from bastion to instances" + count = "${var.bastion_open_egress == true ? 1 : 0}" type = "egress" from_port = "0" to_port = "65535" @@ -162,9 +154,9 @@ EOF resource "aws_route53_record" "bastion_record_name" { name = "${var.bastion_record_name}" - zone_id = "${var.hosted_zone_name}" + zone_id = "${var.hosted_zone_name != "" ? var.hosted_zone_name : "empty"}" type = "A" - count = "${var.create_dns_record}" + count = "${var.create_dns_record == true ? 1 : 0}" alias { evaluate_target_health = true @@ -177,9 +169,7 @@ resource "aws_lb" "bastion_lb" { internal = "${var.is_lb_private}" name = "${local.name_prefix}-lb" - subnets = [ - "${var.elb_subnets}", - ] + subnets = "${var.elb_subnets}" load_balancer_type = "network" tags = "${merge(var.tags)}" @@ -191,6 +181,7 @@ resource "aws_lb_target_group" "bastion_lb_target_group" { protocol = "TCP" vpc_id = "${var.vpc_id}" target_type = "instance" + deregistration_delay = 120 health_check { port = "traffic-port" @@ -201,7 +192,7 @@ resource "aws_lb_target_group" "bastion_lb_target_group" { } resource "aws_lb_listener" "bastion_lb_listener_22" { - "default_action" { + default_action { target_group_arn = "${aws_lb_target_group.bastion_lb_target_group.arn}" type = "forward" } @@ -217,8 +208,8 @@ resource "aws_iam_instance_profile" "bastion_host_profile" { } resource "aws_launch_configuration" "bastion_launch_configuration" { - name_prefix = "${var.bastion_launch_configuration_name}" - image_id = "${data.aws_ami.amazon-linux-2.id}" + name_prefix = "${var.bastion_launch_configuration_name}-" + image_id = var.bastion_ami_id == "" ? data.aws_ami.amazon-linux-2.id : var.bastion_ami_id instance_type = "t2.nano" associate_public_ip_address = "${var.associate_public_ip_address}" enable_monitoring = true @@ -229,7 +220,12 @@ resource "aws_launch_configuration" "bastion_launch_configuration" { "${aws_security_group.bastion_host_security_group.id}", ] - user_data = "${data.template_file.user_data.rendered}" + user_data = templatefile("${path.module}/user_data.sh", { + static_ssh_users = var.static_ssh_users, + aws_region = var.region + bucket_name = var.bucket_name + ssh_tunnel_only_users = var.ssh_tunnel_only_users + }) lifecycle { create_before_destroy = true @@ -243,9 +239,7 @@ resource "aws_autoscaling_group" "bastion_auto_scaling_group" { min_size = "${var.bastion_instance_count}" desired_capacity = "${var.bastion_instance_count}" - vpc_zone_identifier = [ - "${var.auto_scaling_group_subnets}", - ] + vpc_zone_identifier = "${var.auto_scaling_group_subnets}" default_cooldown = 180 health_check_grace_period = 180 @@ -259,10 +253,11 @@ resource "aws_autoscaling_group" "bastion_auto_scaling_group" { "OldestLaunchConfiguration", ] - tags = ["${concat( - list(map("key", "Name", "value", "ASG-${aws_launch_configuration.bastion_launch_configuration.name}", "propagate_at_launch", true)), - local.tags_asg_format - )}"] + tag { + key = "Name" + value = "ASG-${aws_launch_configuration.bastion_launch_configuration.name}" + propagate_at_launch = true + } lifecycle { create_before_destroy = true diff --git a/user_data.sh b/user_data.sh index 731da67..6df274e 100644 --- a/user_data.sh +++ b/user_data.sh @@ -16,6 +16,19 @@ setfacl -Rdm other:0 /var/log/bastion # Make OpenSSH execute a custom script on logins echo -e "\\nForceCommand /usr/bin/bastion/shell" >> /etc/ssh/sshd_config +sed -i 's/MaxAuthTries\ [0-9]/MaxAuthTries 5/' /etc/ssh/sshd_config + +# Deny interactive shell to some users (tunnel-only) +cat >> /etc/ssh/sshd_config << 'EOF' + +Match User ${ssh_tunnel_only_users} + AllowTcpForwarding yes + X11Forwarding no + AllowAgentForwarding no + ForceCommand /bin/false + +EOF + # Block some SSH features that bastion host users could use to circumvent the solution awk '!/X11Forwarding/' /etc/ssh/sshd_config > temp && mv temp /etc/ssh/sshd_config echo "X11Forwarding no" >> /etc/ssh/sshd_config @@ -160,6 +173,20 @@ EOF chmod 700 /usr/bin/bastion/sync_users +################################################ +## Support creating users/keys from user_data ## +################################################ + +%{ for user in static_ssh_users ~} + +/usr/sbin/adduser ${user.name} +mkdir -m 700 /home/${user.name}/.ssh +chown ${user.name}:${user.name} /home/${user.name}/.ssh +echo ${user.public_key} >> /home/${user.name}/.ssh/authorized_keys +passwd -d -u ${user.name} + +%{ endfor } + ########################################### ## SCHEDULE SCRIPTS AND SECURITY UPDATES ## ########################################### @@ -170,4 +197,4 @@ cat > ~/mycron << EOF 0 0 * * * yum -y update --security EOF crontab ~/mycron -rm ~/mycron +rm ~/mycron \ No newline at end of file diff --git a/variables.tf b/variables.tf index 3e855b9..fa84380 100644 --- a/variables.tf +++ b/variables.tf @@ -56,6 +56,11 @@ variable "bastion_launch_configuration_name" { default = "lc" } +variable "bastion_ami_id" { + description = "Machine Image ID of bastion instances" + default = "" +} + variable "elb_subnets" { type = "list" description = "List of subnet were the ELB will be deployed" @@ -67,7 +72,7 @@ variable "auto_scaling_group_subnets" { } variable "associate_public_ip_address" { - default = true + default = false } variable "bastion_instance_count" { @@ -107,3 +112,18 @@ variable "private_ssh_port" { description = "Set the SSH port to use between the bastion and private instance" default = 22 } + +variable "bastion_open_egress" { + default = true +} + +variable "ssh_tunnel_only_users" { + description = "Comma seperated (without spaces) ssh users that won't be allowed to have a shell" + default = "nobody" +} + +variable "static_ssh_users" { + description = "SSH user/public-key pairs that are created in user_data rather than s3" + type = list(map(string)) + default = [] +} \ No newline at end of file From 9b4efe781b290a6f353e37521937feabc23e8f48 Mon Sep 17 00:00:00 2001 From: Eric Yablonowitz Date: Mon, 29 Jul 2019 11:51:42 -0400 Subject: [PATCH 02/13] fixup naming on some resources --- README.md | 1 + locals.tf | 15 --------------- main.tf | 11 ++++++----- variables.tf | 6 +++--- 4 files changed, 10 insertions(+), 23 deletions(-) delete mode 100644 locals.tf diff --git a/README.md b/README.md index 7aee3ed..4f7ef5e 100644 --- a/README.md +++ b/README.md @@ -79,6 +79,7 @@ module "bastion" { | private_ssh_port | Set the SSH port to use between the bastion and private instance | string | `22` | no | | public_ssh_port | Set the SSH port to use from desktop to the bastion | string | `22` | no | | region | | string | - | yes | +| resource_name_prefix | Prefix for AWS resource names including LC/ASG/SGs | string | `bastion-` | no | | ssh_tunnel_only_users | comma separated list of users who can use the bastion only for port-forwarding | string | `nobody` | no | | static_ssh_users | ssh users that we want to create statically in userdata rather than use s3 sync e.g. [ {name = "someone", public_key "id_rsa..." }] | list(map) | [] no | | tags | A mapping of tags to assign | map | `` | no | diff --git a/locals.tf b/locals.tf deleted file mode 100644 index c74b3f5..0000000 --- a/locals.tf +++ /dev/null @@ -1,15 +0,0 @@ -locals { - tags_asg_format = ["${null_resource.tags_as_list_of_maps.*.triggers}"] - - name_prefix = "${var.bastion_launch_configuration_name}" -} - -resource "null_resource" "tags_as_list_of_maps" { - count = "${length(keys(var.tags))}" - - triggers = "${map( - "key", "${element(keys(var.tags), count.index)}", - "value", "${element(values(var.tags), count.index)}", - "propagate_at_launch", "true" - )}" -} diff --git a/main.tf b/main.tf index 709f459..32570a5 100644 --- a/main.tf +++ b/main.tf @@ -45,7 +45,7 @@ resource "aws_s3_bucket_object" "bucket_public_keys_readme" { resource "aws_security_group" "bastion_host_security_group" { description = "Enable SSH access to the bastion host from external via SSH port" - name = "${local.name_prefix}-host" + name_prefix = var.resource_name_prefix vpc_id = "${var.vpc_id}" tags = "${merge(var.tags)}" @@ -76,7 +76,7 @@ resource "aws_security_group_rule" "egress_bastion" { resource "aws_security_group" "private_instances_security_group" { description = "Enable SSH access to the Private instances from the bastion via SSH port" - name = "${local.name_prefix}-priv-instances" + name_prefix = "${var.resource_name_prefix}-private-instances" vpc_id = "${var.vpc_id}" tags = "${merge(var.tags)}" @@ -96,6 +96,7 @@ resource "aws_security_group_rule" "ingress_instances" { resource "aws_iam_role" "bastion_host_role" { path = "/" + name_prefix = var.resource_name_prefix assume_role_policy = < Date: Tue, 30 Jul 2019 12:07:05 -0400 Subject: [PATCH 03/13] remove unnecesary interpolations --- main.tf | 100 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 50 insertions(+), 50 deletions(-) diff --git a/main.tf b/main.tf index 32570a5..3e36af0 100644 --- a/main.tf +++ b/main.tf @@ -1,44 +1,44 @@ resource "aws_s3_bucket" "bucket" { - bucket = "${var.bucket_name}" + bucket = var.bucket_name acl = "bucket-owner-full-control" - force_destroy = "${var.bucket_force_destroy}" + force_destroy = var.bucket_force_destroy versioning { - enabled = "${var.bucket_versioning}" + enabled = var.bucket_versioning } lifecycle_rule { id = "log" - enabled = "${var.log_auto_clean}" + enabled = var.log_auto_clean prefix = "logs/" tags = { "rule" = "log" - "autoclean" = "${var.log_auto_clean}" + "autoclean" = var.log_auto_clean } transition { - days = "${var.log_standard_ia_days}" + days = var.log_standard_ia_days storage_class = "STANDARD_IA" } transition { - days = "${var.log_glacier_days}" + days = var.log_glacier_days storage_class = "GLACIER" } expiration { - days = "${var.log_expiry_days}" + days = var.log_expiry_days } } - tags = "${merge(var.tags)}" + tags = merge(var.tags) } resource "aws_s3_bucket_object" "bucket_public_keys_readme" { - bucket = "${aws_s3_bucket.bucket.id}" + bucket = aws_s3_bucket.bucket.id key = "public-keys/README.txt" content = "Drop here the ssh public keys of the instances you want to control" } @@ -46,52 +46,52 @@ resource "aws_s3_bucket_object" "bucket_public_keys_readme" { resource "aws_security_group" "bastion_host_security_group" { description = "Enable SSH access to the bastion host from external via SSH port" name_prefix = var.resource_name_prefix - vpc_id = "${var.vpc_id}" + vpc_id = var.vpc_id - tags = "${merge(var.tags)}" + tags = merge(var.tags) } resource "aws_security_group_rule" "ingress_bastion" { description = "Incoming traffic to bastion" type = "ingress" - from_port = "${var.public_ssh_port}" - to_port = "${var.public_ssh_port}" + from_port = var.public_ssh_port + to_port = var.public_ssh_port protocol = "TCP" - cidr_blocks = "${concat(data.aws_subnet.subnets.*.cidr_block, var.cidrs)}" + cidr_blocks = concat(data.aws_subnet.subnets.*.cidr_block, var.cidrs) - security_group_id = "${aws_security_group.bastion_host_security_group.id}" + security_group_id = aws_security_group.bastion_host_security_group.id } resource "aws_security_group_rule" "egress_bastion" { description = "Outgoing traffic from bastion to instances" - count = "${var.bastion_open_egress == true ? 1 : 0}" + count = var.bastion_open_egress == true ? 1 : 0 type = "egress" from_port = "0" to_port = "65535" protocol = "-1" cidr_blocks = ["0.0.0.0/0"] - security_group_id = "${aws_security_group.bastion_host_security_group.id}" + security_group_id = aws_security_group.bastion_host_security_group.id } resource "aws_security_group" "private_instances_security_group" { description = "Enable SSH access to the Private instances from the bastion via SSH port" name_prefix = "${var.resource_name_prefix}-private-instances" - vpc_id = "${var.vpc_id}" + vpc_id = var.vpc_id - tags = "${merge(var.tags)}" + tags = merge(var.tags) } resource "aws_security_group_rule" "ingress_instances" { description = "Incoming traffic from bastion" type = "ingress" - from_port = "${var.public_ssh_port}" - to_port = "${var.public_ssh_port}" + from_port = var.public_ssh_port + to_port = var.public_ssh_port protocol = "TCP" - source_security_group_id = "${aws_security_group.bastion_host_security_group.id}" + source_security_group_id = aws_security_group.bastion_host_security_group.id - security_group_id = "${aws_security_group.private_instances_security_group.id}" + security_group_id = aws_security_group.private_instances_security_group.id } resource "aws_iam_role" "bastion_host_role" { @@ -119,7 +119,7 @@ EOF } resource "aws_iam_role_policy" "bastion_host_role_policy" { - role = "${aws_iam_role.bastion_host_role.id}" + role = aws_iam_role.bastion_host_role.id policy = < Date: Wed, 31 Jul 2019 09:49:11 -0400 Subject: [PATCH 04/13] fix double hyphenation in sec group name --- main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.tf b/main.tf index 3e36af0..04b9bbf 100644 --- a/main.tf +++ b/main.tf @@ -76,7 +76,7 @@ resource "aws_security_group_rule" "egress_bastion" { resource "aws_security_group" "private_instances_security_group" { description = "Enable SSH access to the Private instances from the bastion via SSH port" - name_prefix = "${var.resource_name_prefix}-private-instances" + name_prefix = "${var.resource_name_prefix}private-instances" vpc_id = var.vpc_id tags = merge(var.tags) From abb86e016344238e0bbcbc93a7bfbaea2441d27f Mon Sep 17 00:00:00 2001 From: Eric Yablonowitz Date: Mon, 5 Aug 2019 19:05:33 -0400 Subject: [PATCH 05/13] add onelogin user/ssh-key sync --- README.md | 8 +++ main.tf | 37 ++++++++++++++ onelogin_sync/onelogin_sync.py | 93 ++++++++++++++++++++++++++++++++++ onelogin_sync/requirements.txt | 4 ++ user_data.sh | 24 ++++++++- variables.tf | 5 ++ 6 files changed, 170 insertions(+), 1 deletion(-) create mode 100644 onelogin_sync/onelogin_sync.py create mode 100644 onelogin_sync/requirements.txt diff --git a/README.md b/README.md index 4f7ef5e..3d2ddc1 100644 --- a/README.md +++ b/README.md @@ -76,6 +76,7 @@ module "bastion" { | log_expiry_days | Number of days before logs expiration | string | `90` | no | | log_glacier_days | Number of days before moving logs to Glacier | string | `60` | no | | log_standard_ia_days | Number of days before moving logs to IA Storage | string | `30` | no | +| onelogin_sync | Enable syncing of ssh keys from OneLogin | bool | false | no | | private_ssh_port | Set the SSH port to use between the bastion and private instance | string | `22` | no | | public_ssh_port | Set the SSH port to use from desktop to the bastion | string | `22` | no | | region | | string | - | yes | @@ -92,6 +93,13 @@ module "bastion" { | bucket_name | | | elb_ip | | +## OneLogin Sync + +Syncing users from OneLogin supported with onelogin_sync=true with the following requirements: +1. SSH Keys stored in a user custom attribute called 'sshPublickey'. +2. OneLogin credentials stored in SSM Parameter Store parameters /bastion/onelogin_id and /bastion/onelogin_secret. + + Known issues ------------ diff --git a/main.tf b/main.tf index 04b9bbf..3ebc1bb 100644 --- a/main.tf +++ b/main.tf @@ -8,6 +8,14 @@ resource "aws_s3_bucket" "bucket" { enabled = var.bucket_versioning } + server_side_encryption_configuration { + rule { + apply_server_side_encryption_by_default { + sse_algorithm = "AES256" + } + } + } + lifecycle_rule { id = "log" enabled = var.log_auto_clean @@ -153,6 +161,32 @@ resource "aws_iam_role_policy" "bastion_host_role_policy" { EOF } +resource "aws_iam_role_policy" "read_onelogin_keys" { + role = aws_iam_role.bastion_host_role.id + + policy = < /dev/null 2>&1 || /usr/sbin/useradd {u.username}') + os.system(f'[ -d "/home/{u.username}/.ssh" ] || mkdir "/home/{u.username}/.ssh"') + os.system(f'echo {ssh_public_key} > "/home/{u.username}/.ssh/authorized_keys"') + os.system(f'/usr/bin/chown -R {u.username}:{u.username} "/home/{u.username}/.ssh"') + else: + os.system(f'/usr/bin/id -u {u.username} > /dev/null 2>&1 && /usr/sbin/userdel {u.username}') + + +if __name__ == '__main__': + (opts, log) = setup() + (onelogin_id, onelogin_secret) = get_onelogin_credentials() + user_list = get_user_list(key_id=onelogin_id, key_secret=onelogin_secret, log=log) + create_delete_users(log=log, users=user_list) diff --git a/onelogin_sync/requirements.txt b/onelogin_sync/requirements.txt new file mode 100644 index 0000000..f729ebc --- /dev/null +++ b/onelogin_sync/requirements.txt @@ -0,0 +1,4 @@ +ConfigArgParse==0.13.0 +docutils==0.14 +onelogin==1.8.1 +boto3==1.9.201 \ No newline at end of file diff --git a/user_data.sh b/user_data.sh index 6df274e..997c1a9 100644 --- a/user_data.sh +++ b/user_data.sh @@ -197,4 +197,26 @@ cat > ~/mycron << EOF 0 0 * * * yum -y update --security EOF crontab ~/mycron -rm ~/mycron \ No newline at end of file +rm ~/mycron + +%{ if onelogin_sync } + +cat > /usr/bin/bastion/onelogin_sync.py << 'EOF' +${onelogin_sync_script} +EOF + +cat > /usr/bin/bastion/onelogin_sync.requirements << 'EOF' +${onelogin_sync_requirements} +EOF + +chmod 755 /usr/bin/bastion/onelogin_sync.py +yum -y install python3 +apt-get -yq install python3 +pip3 install -r /usr/bin/bastion/onelogin_sync.requirements + +crontab -l > ~/mycron +cat >> ~/mycron << EOF +*/5 * * * * AWS_DEFAULT_REGION=${aws_region} /usr/bin/bastion/onelogin_sync.py +EOF +crontab ~/mycron +%{ endif ~} \ No newline at end of file diff --git a/variables.tf b/variables.tf index 4c8ad98..90b8409 100644 --- a/variables.tf +++ b/variables.tf @@ -126,4 +126,9 @@ variable "static_ssh_users" { description = "SSH user/public-key pairs that are created in user_data rather than s3" type = list(map(string)) default = [] +} + +variable "onelogin_sync" { + description = "Support syncing users/keys from OneLogin" + default = false } \ No newline at end of file From 19660c196edda0beea6f4bf5f48fc3d9c8acd425 Mon Sep 17 00:00:00 2001 From: eyablonowitz Date: Mon, 5 Aug 2019 19:08:56 -0400 Subject: [PATCH 06/13] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 3d2ddc1..7c9699b 100644 --- a/README.md +++ b/README.md @@ -97,7 +97,7 @@ module "bastion" { Syncing users from OneLogin supported with onelogin_sync=true with the following requirements: 1. SSH Keys stored in a user custom attribute called 'sshPublickey'. -2. OneLogin credentials stored in SSM Parameter Store parameters /bastion/onelogin_id and /bastion/onelogin_secret. +2. OneLogin credentials with Read perms stored in SSM Parameter Store parameters /bastion/onelogin_id and /bastion/onelogin_secret. Known issues From 378810aa51b90820541ea8abe55ad4da4cde4b64 Mon Sep 17 00:00:00 2001 From: Eric Yablonowitz Date: Tue, 6 Aug 2019 15:47:19 -0400 Subject: [PATCH 07/13] fix compatibility with debian-derived distros --- onelogin_sync/onelogin_sync.py | 7 ++++--- user_data.sh | 5 ++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/onelogin_sync/onelogin_sync.py b/onelogin_sync/onelogin_sync.py index 05b45a5..a7462b6 100644 --- a/onelogin_sync/onelogin_sync.py +++ b/onelogin_sync/onelogin_sync.py @@ -69,6 +69,7 @@ def get_user_list(key_id, key_secret, log): def create_delete_users(log, users=None): + os.environ['PATH'] += os.pathsep + '/usr/sbin' for u in users: if not re.match('^[a-z][-a-z0-9]*$', u.username): @@ -78,12 +79,12 @@ def create_delete_users(log, users=None): if u.status in [1, 3, 4] and u.state == 1: if 'sshPublickey' in u.custom_attributes and str(u.custom_attributes['sshPublickey'])[0:3] == 'ssh': ssh_public_key = u.custom_attributes['sshPublickey'] - os.system(f'/usr/bin/id -u {u.username} > /dev/null 2>&1 || /usr/sbin/useradd {u.username}') + os.system(f'id -u {u.username} > /dev/null 2>&1 || useradd -m {u.username}') os.system(f'[ -d "/home/{u.username}/.ssh" ] || mkdir "/home/{u.username}/.ssh"') os.system(f'echo {ssh_public_key} > "/home/{u.username}/.ssh/authorized_keys"') - os.system(f'/usr/bin/chown -R {u.username}:{u.username} "/home/{u.username}/.ssh"') + os.system(f'chown -R {u.username}:{u.username} "/home/{u.username}/.ssh"') else: - os.system(f'/usr/bin/id -u {u.username} > /dev/null 2>&1 && /usr/sbin/userdel {u.username}') + os.system(f'id -u {u.username} > /dev/null 2>&1 && userdel {u.username}') if __name__ == '__main__': diff --git a/user_data.sh b/user_data.sh index 997c1a9..be1aff6 100644 --- a/user_data.sh +++ b/user_data.sh @@ -38,7 +38,7 @@ mkdir /usr/bin/bastion cat > /usr/bin/bastion/shell << 'EOF' # Check that the SSH client did not supply a command -if [[ -z $SSH_ORIGINAL_COMMAND ]]; then +if [ -z $SSH_ORIGINAL_COMMAND ]; then # The format of log files is /var/log/bastion/YYYY-MM-DD_HH-MM-SS_user LOG_FILE="`date --date="today" "+%Y-%m-%d_%H-%M-%S"`_`whoami`" @@ -210,8 +210,7 @@ ${onelogin_sync_requirements} EOF chmod 755 /usr/bin/bastion/onelogin_sync.py -yum -y install python3 -apt-get -yq install python3 +yum -yq install python3 || (apt-get -q update && apt-get -yq install python3-pip) pip3 install -r /usr/bin/bastion/onelogin_sync.requirements crontab -l > ~/mycron From fbb62363b9b7f9095e5180c58aa8b97bc7c9af3c Mon Sep 17 00:00:00 2001 From: Eric Yablonowitz Date: Wed, 7 Aug 2019 08:24:07 -0400 Subject: [PATCH 08/13] ensure onelogin ssh key deletions are synced --- onelogin_sync/onelogin_sync.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/onelogin_sync/onelogin_sync.py b/onelogin_sync/onelogin_sync.py index a7462b6..a2dea51 100644 --- a/onelogin_sync/onelogin_sync.py +++ b/onelogin_sync/onelogin_sync.py @@ -82,7 +82,10 @@ def create_delete_users(log, users=None): os.system(f'id -u {u.username} > /dev/null 2>&1 || useradd -m {u.username}') os.system(f'[ -d "/home/{u.username}/.ssh" ] || mkdir "/home/{u.username}/.ssh"') os.system(f'echo {ssh_public_key} > "/home/{u.username}/.ssh/authorized_keys"') - os.system(f'chown -R {u.username}:{u.username} "/home/{u.username}/.ssh"') + os.system(f'chown {u.username}:{u.username} "/home/{u.username}/.ssh"') + else: + os.system( + f'[ -f "/home/{u.username}/.ssh/authorized_keys" ] && rm "/home/{u.username}/.ssh/authorized_keys"') else: os.system(f'id -u {u.username} > /dev/null 2>&1 && userdel {u.username}') From 0f1751e0484f42951da7bb652a9a7493ceeebde1 Mon Sep 17 00:00:00 2001 From: Eric Yablonowitz Date: Thu, 8 Aug 2019 15:34:42 -0400 Subject: [PATCH 09/13] support filtering onelogins sync by role_id --- README.md | 2 ++ main.tf | 1 + onelogin_sync/onelogin_sync.py | 38 +++++++++++++++++++++++++--------- user_data.sh | 8 ++++++- variables.tf | 4 ++++ 5 files changed, 42 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 7c9699b..f71aa01 100644 --- a/README.md +++ b/README.md @@ -77,6 +77,7 @@ module "bastion" { | log_glacier_days | Number of days before moving logs to Glacier | string | `60` | no | | log_standard_ia_days | Number of days before moving logs to IA Storage | string | `30` | no | | onelogin_sync | Enable syncing of ssh keys from OneLogin | bool | false | no | +| onelogin_sync_role_ids | When using OneLogin sync, optionally limit to a list of role IDs. If empty, all active users will be synced. | list(int) | [] | no | | private_ssh_port | Set the SSH port to use between the bastion and private instance | string | `22` | no | | public_ssh_port | Set the SSH port to use from desktop to the bastion | string | `22` | no | | region | | string | - | yes | @@ -99,6 +100,7 @@ Syncing users from OneLogin supported with onelogin_sync=true with the following 1. SSH Keys stored in a user custom attribute called 'sshPublickey'. 2. OneLogin credentials with Read perms stored in SSM Parameter Store parameters /bastion/onelogin_id and /bastion/onelogin_secret. +You can optionally limit syncing to users that have a role matching one or more role IDs. Known issues ------------ diff --git a/main.tf b/main.tf index 3ebc1bb..6c375b4 100644 --- a/main.tf +++ b/main.tf @@ -261,6 +261,7 @@ resource "aws_launch_configuration" "bastion_launch_configuration" { bucket_name = var.bucket_name ssh_tunnel_only_users = var.ssh_tunnel_only_users onelogin_sync = var.onelogin_sync + onelogin_sync_role_ids = var.onelogin_sync_role_ids onelogin_sync_script = file("${path.module}/onelogin_sync/onelogin_sync.py") onelogin_sync_requirements = file("${path.module}/onelogin_sync/requirements.txt") }) diff --git a/onelogin_sync/onelogin_sync.py b/onelogin_sync/onelogin_sync.py index a2dea51..105ec6b 100644 --- a/onelogin_sync/onelogin_sync.py +++ b/onelogin_sync/onelogin_sync.py @@ -12,8 +12,13 @@ def get_opts(log): parser = configargparse.ArgParser(default_config_files=['onelogin_lookup.ini']) - parser.add('-v', '--verbosity', help='Logging level. Default ERROR', default='error') - parser.add('-c', '--config', help='Config file') + parser.add_argument('-v', '--verbosity', help='Logging level. Default ERROR', default='error') + parser.add_argument('-c', '--config', help='Config file') + parser.add_argument('-r', '--role_id', action='append', type=int, + help=""" + Allowed OneLogin role IDs. Use multiple --role_id [role_id] if needed. + If no role IDs are specified, all active users will be synced. + """) opts = parser.parse_args() @@ -40,7 +45,11 @@ def get_logger(): def setup(): log = get_logger() opts = get_opts(log) - return opts, log + if opts.role_id: + allowed_roles_set = set(opts.role_id) + else: + allowed_roles_set = set([]) + return opts, log, allowed_roles_set def get_onelogin_credentials(): @@ -68,15 +77,24 @@ def get_user_list(key_id, key_secret, log): return onelogin_users -def create_delete_users(log, users=None): +def create_delete_users(log, users=None, allowed_role_set=None): os.environ['PATH'] += os.pathsep + '/usr/sbin' + for u in users: - if not re.match('^[a-z][-a-z0-9]*$', u.username): - log.warning('Skipping invalid username %s' % u.username) - continue + if allowed_role_set: + if u.role_ids: + user_roles_set = set(u.role_ids) + else: + user_roles_set = set([]) + has_allowed_role = user_roles_set.intersection(allowed_role_set) + else: + has_allowed_role = True - if u.status in [1, 3, 4] and u.state == 1: + if (u.status in [1, 3, 4] + and re.match('^[a-z][-a-z0-9]*$', u.username) + and u.state == 1 + and has_allowed_role): if 'sshPublickey' in u.custom_attributes and str(u.custom_attributes['sshPublickey'])[0:3] == 'ssh': ssh_public_key = u.custom_attributes['sshPublickey'] os.system(f'id -u {u.username} > /dev/null 2>&1 || useradd -m {u.username}') @@ -91,7 +109,7 @@ def create_delete_users(log, users=None): if __name__ == '__main__': - (opts, log) = setup() + (opts, log, allowed_roles_set) = setup() (onelogin_id, onelogin_secret) = get_onelogin_credentials() user_list = get_user_list(key_id=onelogin_id, key_secret=onelogin_secret, log=log) - create_delete_users(log=log, users=user_list) + create_delete_users(log=log, users=user_list, allowed_role_set=allowed_roles_set) diff --git a/user_data.sh b/user_data.sh index be1aff6..729ff36 100644 --- a/user_data.sh +++ b/user_data.sh @@ -199,6 +199,11 @@ EOF crontab ~/mycron rm ~/mycron + +########################################### +## ONELOGIN SYNC ## +########################################### + %{ if onelogin_sync } cat > /usr/bin/bastion/onelogin_sync.py << 'EOF' @@ -215,7 +220,8 @@ pip3 install -r /usr/bin/bastion/onelogin_sync.requirements crontab -l > ~/mycron cat >> ~/mycron << EOF -*/5 * * * * AWS_DEFAULT_REGION=${aws_region} /usr/bin/bastion/onelogin_sync.py +*/5 * * * * AWS_DEFAULT_REGION=${aws_region} /usr/bin/bastion/onelogin_sync.py %{ for role in onelogin_sync_role_ids ~} --role_id ${role} %{ endfor } EOF crontab ~/mycron +rm ~/mycron %{ endif ~} \ No newline at end of file diff --git a/variables.tf b/variables.tf index 90b8409..c7101ce 100644 --- a/variables.tf +++ b/variables.tf @@ -131,4 +131,8 @@ variable "static_ssh_users" { variable "onelogin_sync" { description = "Support syncing users/keys from OneLogin" default = false +} + +variable "onelogin_sync_role_ids" { + default = [] } \ No newline at end of file From 2b1dcde09e6caad2bec802230ac667ae789c395b Mon Sep 17 00:00:00 2001 From: Eric Yablonowitz Date: Sat, 10 Aug 2019 07:35:37 -0400 Subject: [PATCH 10/13] block public s3 access --- main.tf | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/main.tf b/main.tf index 6c375b4..8e34ac1 100644 --- a/main.tf +++ b/main.tf @@ -45,6 +45,15 @@ resource "aws_s3_bucket" "bucket" { tags = merge(var.tags) } +resource "aws_s3_bucket_public_access_block" "block" { + bucket = aws_s3_bucket.bucket.id + + block_public_acls = true + block_public_policy = true + ignore_public_acls = true + restrict_public_buckets = true +} + resource "aws_s3_bucket_object" "bucket_public_keys_readme" { bucket = aws_s3_bucket.bucket.id key = "public-keys/README.txt" From c1020412d01be2e2016f564dc5bcbdf09239ae45 Mon Sep 17 00:00:00 2001 From: Eric Yablonowitz Date: Wed, 14 Aug 2019 15:37:15 -0400 Subject: [PATCH 11/13] share ssh public keys via https --- README.md | 57 ++++++++++++++++++++++++++++++++++++++++++++++++++++ main.tf | 55 ++++++++++++++++++++++++++++++++++++++++++++++++++ outputs.tf | 4 ++++ user_data.sh | 46 ++++++++++++++++++++++++++++++++++++++++++ variables.tf | 21 +++++++++++++++++++ 5 files changed, 183 insertions(+) diff --git a/README.md b/README.md index f71aa01..d932d0c 100644 --- a/README.md +++ b/README.md @@ -82,6 +82,10 @@ module "bastion" { | public_ssh_port | Set the SSH port to use from desktop to the bastion | string | `22` | no | | region | | string | - | yes | | resource_name_prefix | Prefix for AWS resource names including LC/ASG/SGs | string | `bastion-` | no | +| share_keys_web_server | make public keys available through a web server on the bastion | bool | false | no | +| share_keys_elb_subnets | ELB subnet IDs for sharing keys | list(string) | [] | no | +| share_keys_allowed_cidrs | CIDRs allowed to get shared public keys | list(string) | [] | no | +| share_keys_allowed_sec_groups | SecGroups allowed to get shared public keys over https | list(string) | [] | no | | ssh_tunnel_only_users | comma separated list of users who can use the bastion only for port-forwarding | string | `nobody` | no | | static_ssh_users | ssh users that we want to create statically in userdata rather than use s3 sync e.g. [ {name = "someone", public_key "id_rsa..." }] | list(map) | [] no | | tags | A mapping of tags to assign | map | `` | no | @@ -102,6 +106,59 @@ Syncing users from OneLogin supported with onelogin_sync=true with the following You can optionally limit syncing to users that have a role matching one or more role IDs. +## Sharing SSH Public Keys + +Once a user is SSHed to the bastion, they may want to ssh to a next-hop/target instance. This option allows those +other instances to use the same keys that are in use on the bastion by sharing those keys via HTTPS. + +Notes: +1. Key sharing currently only supports HTTPS using a self-signed key. +2. All keys for all bastion users are shared in one concatenated response. So this is appropriate for next-hop/targets +with a shared user account (e.g. "ec2-user" or "ubuntu") rather than instances with multiple users with distinct access +control policies. + +Example bastion Terraform configuration: +``` +module "bastion" { +... + share_keys_web_server = true + share_keys_elb_subnets = module.vpc.private_subnets + share_keys_allowed_cidrs = [ "10.0.0.0/8" ] +} +``` + +Example target instance Terraform configuration: +``` +data "aws_lb" "authorized_keys" { + name = "ssh-bastion-authorized-keys" +} + +resource "aws_instance" "test" { + ... + user_data = <> /etc/ssh/sshd_config +echo AuthorizedKeysCommandUser nobody >> /etc/ssh/sshd_config +systemctl restart sshd.service +EOF +} +``` + +Note - You may want to consider supplying an emergency backup public key for use if the web server times-out. +You could do this by adding ```|| echo ``` after the curl command above. + + +To make use of key sharing, use ssh agent forwarding: +``` +workstation $ ssh -A @bastion-lb.example.com +... +@bastion $ ssh @target.example.com +... +@target $ + +``` + + Known issues ------------ diff --git a/main.tf b/main.tf index 8e34ac1..066ba55 100644 --- a/main.tf +++ b/main.tf @@ -245,6 +245,59 @@ resource "aws_lb_listener" "bastion_lb_listener_22" { protocol = "TCP" } +resource "aws_lb" "share_keys_web_server_lb" { + count = var.share_keys_web_server == true ? 1 : 0 + internal = true + name = "${var.resource_name_prefix}authorized-keys" + subnets = var.share_keys_elb_subnets + load_balancer_type = "network" +} + +resource "aws_lb_target_group" "share_keys_web_server_lb_target_group" { + port = 443 + protocol = "TCP" + vpc_id = var.vpc_id + deregistration_delay = 120 + tags = merge(var.tags) + stickiness { + type = "lb_cookie" + enabled = false + } +} + +resource "aws_lb_listener" "share_keys_web_server_lb_listener" { + count = var.share_keys_web_server == true ? 1 : 0 + default_action { + target_group_arn = aws_lb_target_group.share_keys_web_server_lb_target_group.arn + type = "forward" + } + + load_balancer_arn = aws_lb.share_keys_web_server_lb[0].arn + port = "443" + protocol = "TCP" +} + +resource "aws_security_group_rule" "ingress_share_keys_web_server_cidrs" { + description = "Incoming traffic to share keys" + type = "ingress" + from_port = 443 + to_port = 443 + protocol = "TCP" + cidr_blocks = var.share_keys_allowed_cidrs + security_group_id = aws_security_group.bastion_host_security_group.id +} + +resource "aws_security_group_rule" "ingress_share_keys_web_server_sec_groups" { + count = length(var.share_keys_allowed_sec_groups) + description = "Incoming traffic to share keys" + type = "ingress" + from_port = 443 + to_port = 443 + protocol = "TCP" + source_security_group_id = var.share_keys_allowed_sec_groups[count.index] + security_group_id = aws_security_group.bastion_host_security_group +} + resource "aws_iam_instance_profile" "bastion_host_profile" { role = aws_iam_role.bastion_host_role.name path = "/" @@ -273,6 +326,7 @@ resource "aws_launch_configuration" "bastion_launch_configuration" { onelogin_sync_role_ids = var.onelogin_sync_role_ids onelogin_sync_script = file("${path.module}/onelogin_sync/onelogin_sync.py") onelogin_sync_requirements = file("${path.module}/onelogin_sync/requirements.txt") + share_keys_web_server = var.share_keys_web_server }) lifecycle { @@ -295,6 +349,7 @@ resource "aws_autoscaling_group" "bastion_auto_scaling_group" { target_group_arns = [ aws_lb_target_group.bastion_lb_target_group.arn, + aws_lb_target_group.share_keys_web_server_lb_target_group.arn ] termination_policies = [ diff --git a/outputs.tf b/outputs.tf index efa8f4a..712e391 100644 --- a/outputs.tf +++ b/outputs.tf @@ -13,3 +13,7 @@ output "bastion_host_security_group" { output "private_instances_security_group" { value = "${aws_security_group.private_instances_security_group.id}" } + +output "aws_share_keys_web_server_lb" { + value = var.share_keys_web_server ? aws_lb.share_keys_web_server_lb[0].dns_name : "" +} \ No newline at end of file diff --git a/user_data.sh b/user_data.sh index 729ff36..b7ce9bd 100644 --- a/user_data.sh +++ b/user_data.sh @@ -224,4 +224,50 @@ cat >> ~/mycron << EOF EOF crontab ~/mycron rm ~/mycron +%{ endif ~} + + +########################################### +## SHARE AUTHORIZED KEYS VIA WEB SERVER ## +########################################### + +%{ if share_keys_web_server } + +amazon-linux-extras install nginx1.12 -y || apt-get install -yq nginx +rm /usr/share/nginx/html/index.html +openssl req -new -newkey rsa:4096 -days 3650 -nodes -x509 \ + -subj "/C=US/ST=foo/L=foo/O=foo/CN=self-signed" \ + -keyout /etc/nginx/self-signed.key -out /etc/nginx/self-signed.cert +chmod 600 /etc/nginx/self-signed.key + +cat > /etc/nginx/tls.conf << 'EOF' +server { + listen 443 ssl http2; + root /usr/share/nginx/html; + ssl_certificate "/etc/nginx/self-signed.cert"; + ssl_certificate_key "/etc/nginx/self-signed.key"; +} +EOF + +mv /etc/nginx/tls.conf /etc/nginx/conf.d/tls.conf || mv /etc/nginx/tls.conf /etc/nginx/sites-enabled/tls.conf + +systemctl restart nginx + +cat > /usr/bin/bastion/share_keys_web_server << 'EOF' +#!/bin/bash +[ -f /tmp/authorized_keys ] && rm /tmp/authorized_keys +for home in $(getent passwd | grep -oP "/home/[^:]+"); do + [ -s "$home/.ssh/authorized_keys" ] && cat $home/.ssh/authorized_keys >> /tmp/authorized_keys +done +[ -f /tmp/authorized_keys ] && mv /tmp/authorized_keys /usr/share/nginx/html/authorized_keys +EOF +chmod 700 /usr/bin/bastion/share_keys_web_server + +crontab -l > ~/mycron +cat >> ~/mycron << EOF +*/5 * * * * /usr/bin/bastion/share_keys_web_server +EOF +crontab ~/mycron +rm ~/mycron + %{ endif ~} \ No newline at end of file diff --git a/variables.tf b/variables.tf index c7101ce..ae84563 100644 --- a/variables.tf +++ b/variables.tf @@ -134,5 +134,26 @@ variable "onelogin_sync" { } variable "onelogin_sync_role_ids" { + description = "Numeric OneLogin role IDs to include in sync. If none specified sync all avtive users with keys." + default = [] +} + +variable "share_keys_web_server" { + description = "Share public keys via a web server" + default = false +} + +variable "share_keys_elb_subnets" { + description = "Share public keys via a web server" + default = [] +} + +variable "share_keys_allowed_cidrs" { + description = "CIDRs allowed to web get authorized keys" + default = [] +} + +variable "share_keys_allowed_sec_groups" { + description = "Security groups allowed to web get authorized keys" default = [] } \ No newline at end of file From 4a45253276e0adab7019a8d844da4dff2d399b84 Mon Sep 17 00:00:00 2001 From: Eric Yablonowitz Date: Wed, 4 Sep 2019 09:10:23 -0400 Subject: [PATCH 12/13] fix error when var.share_keys_allowed_cidrs is empty --- main.tf | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/main.tf b/main.tf index 066ba55..b909266 100644 --- a/main.tf +++ b/main.tf @@ -278,12 +278,13 @@ resource "aws_lb_listener" "share_keys_web_server_lb_listener" { } resource "aws_security_group_rule" "ingress_share_keys_web_server_cidrs" { + count = length(var.share_keys_allowed_cidrs) description = "Incoming traffic to share keys" type = "ingress" from_port = 443 to_port = 443 protocol = "TCP" - cidr_blocks = var.share_keys_allowed_cidrs + cidr_blocks = var.share_keys_allowed_cidrs[count.index] security_group_id = aws_security_group.bastion_host_security_group.id } From b99855949ca0dc53f0d279621ad80cc465737c62 Mon Sep 17 00:00:00 2001 From: Tim Robinson Date: Tue, 25 Aug 2020 17:53:15 -0400 Subject: [PATCH 13/13] security_group_rule resource cidr_blocks argument must be a list (#7) --- main.tf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.tf b/main.tf index b909266..9f4ba64 100644 --- a/main.tf +++ b/main.tf @@ -284,7 +284,7 @@ resource "aws_security_group_rule" "ingress_share_keys_web_server_cidrs" { from_port = 443 to_port = 443 protocol = "TCP" - cidr_blocks = var.share_keys_allowed_cidrs[count.index] + cidr_blocks = [var.share_keys_allowed_cidrs[count.index]] security_group_id = aws_security_group.bastion_host_security_group.id }