Merge branch 'feature/KASM-1761_aws_multi_region' into 'develop'

KASM-1761 New AWS Multi-Region project

Closes KASM-1761

See merge request kasm-technologies/internal/terraform!1
This commit is contained in:
Justin Travis 2021-08-07 15:53:16 +00:00
commit da6d27d63b
32 changed files with 1107 additions and 53 deletions

View file

@ -5,4 +5,5 @@ Administators should review the projects and add additional customizations and s
# AWS
- [Multi-Serrver Single Region](aws/standard/README.md)
- [Multi-Server Single Region](aws/standard/README.md)
- [Multi-Region](aws/multi_region/README.md)

View file

@ -0,0 +1,62 @@
# AWS Multi-Region Deploy
This project will deploy Kasm Workspaces within multiple AWS regions of your choice. Multiple [Deployment Zones](https://kasmweb.com/docs/latest/guide/zones/deployment_zones.html) will be configured for the
deployment that correspond to the desired AWS regions.
All webapp roles will be deployed in a single **"Primary"** region, with Agent roles deployed in any additional region(s).
Route53 latency policies are used to automatically connect users to webapp servers that represent their closest
Zone/Region so that session gets created in the user's closest region by default.
It is expected that administrators will configure the
[Direct to Agent](https://kasmweb.com/docs/latest/how_to/direct_to_agent.html) workflow post deployment so that session
traffic does not always traverse the **Primary Region** and instead flows directly to the Agent in whichever region it
is deployed.
![Diagram][Image_Diagram]
[Image_Diagram]: https://f.hubspotusercontent30.net/hubfs/5856039/terraform/diagrams/aws-multi-region-int-gw.png "Diagram"
# Pre-Configuration
Consider creating a special sub account for the Kasm deployment.
### DNS Zone
In your AWS account create a DNS Public zone that matches the desired domain name for the deployment. e.g `kasm.contoso.com`
### SSH Key Pair
In the desired AWS region create an aws Key pair. This will be configured as the SSH key for the deployed EC2 machines
### AWS API Keys
Create a user via the IAM console that will be used for the terraform deployment. Give the user **Programatic Access**
and attach the existing policy **AdministratorAccess**. Save the key and key secret
# Terraform Configuration
1. Initialize the project
terraform init
2. Open `variables.tf` and update the global variables. The variable definitions and descriptions
can be found in `<module-name>/variables.tf`
3. Open `deployment.tf` and update the module level variables as desired.
4. Verify the configuration
terraform plan
5. Deploy
terraform deploy
6. Login to the Deployment as an Admin via the domain defined e.g `https://kasm.contoso.com`
7. Navigate to the Agents tab, and enable each Agent after it checks in. (May take a few minutes)

View file

@ -0,0 +1,29 @@
resource "aws_instance" "kasm-agent" {
count = "${var.num_agents}"
ami = "${var.ec2_ami}"
instance_type = "${var.agent_instance_type}"
vpc_security_group_ids = ["${aws_security_group.kasm-agent-sg.id}"]
subnet_id = "${aws_subnet.kasm-agent-subnet.id}"
key_name = "${var.aws_key_pair}"
root_block_device {
volume_size = "50"
}
user_data = <<-EOF
#!/bin/bash
fallocate -l 4g /mnt/kasm.swap
chmod 600 /mnt/kasm.swap
mkswap /mnt/kasm.swap
swapon /mnt/kasm.swap
echo '/mnt/kasm.swap swap swap defaults 0 0' | tee -a /etc/fstab
cd /tmp
wget ${var.kasm_build}
tar xvf kasm_*.tar.gz
PUBLIC_DNS=(`curl -s http://169.254.169.254/latest/meta-data/public-ipv4`)
bash kasm_release/install.sh -S agent -e -p $PUBLIC_DNS -m ${var.zone_name}-lb.${var.aws_domain_name} -M ${var.manager_token}
EOF
tags = {
Name = "${var.project_name}-${var.zone_name}-kasm-agent"
}
}

View file

@ -0,0 +1,8 @@
provider "aws" {
region = "${var.aws_region}"
access_key = "${var.aws_access_key}"
secret_key = "${var.aws_secret_key}"
}
data "aws_availability_zones" "available" {
state = "available"
}

View file

@ -0,0 +1,26 @@
resource "aws_security_group" "kasm-agent-sg" {
name = "${var.project_name}-${var.zone_name}-kasm-agent-access"
description = "Allow access to agents"
vpc_id = "${aws_vpc.kasm-default-vpc.id}"
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["${var.ssh_access_cidr}"]
}
ingress {
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}

View file

@ -0,0 +1,9 @@
resource "aws_subnet" "kasm-agent-subnet" {
vpc_id = "${aws_vpc.kasm-default-vpc.id}"
cidr_block = "10.0.40.0/24"
availability_zone = data.aws_availability_zones.available.names[0]
map_public_ip_on_launch = true
tags = {
Name = "${var.project_name}-${var.zone_name}-kasm-agent-subnet"
}
}

View file

@ -0,0 +1,51 @@
variable "aws_access_key" {
description = "The AWS access key used for deployment"
}
variable "aws_secret_key" {
description = "The AWS secret key used for deployment"
}
variable "project_name" {
description = "The name of the deployment (e.g dev, staging). A short single word"
}
variable "aws_domain_name" {
description = "The Route53 Zone used for the dns entries. This must already exist in the AWS account. (e.g dev.kasm.contoso.com). The deployment will be accessed via this zone name via https"
}
variable "num_agents" {
description = "The number of Agent Role Servers to create in the deployment"
}
variable "agent_instance_type" {
description = "the instance type for the agents"
}
variable "aws_region" {
description = "The AWS region for the deployment. (e.g us-east-1)"
}
variable "kasm_build" {
description = "The URL for the Kasm Workspaces build"
}
variable "zone_name" {
description = "A name given to the Kasm deployment Zone"
}
variable "aws_key_pair" {
description = "The name of an aws keypair to use."
}
variable "ec2_ami" {
description = "The AMI used for the EC2 nodes. Recommended Ubuntu 18.04 LTS."
}
variable "manager_token" {
description = "The password for the database. No special characters"
}
variable "ssh_access_cidr" {
description = "CIDR notation of the bastion host allowed to SSH in to the machines"
}

View file

@ -0,0 +1,19 @@
resource "aws_vpc" "kasm-default-vpc" {
cidr_block = "10.0.0.0/16"
tags = {
Name = "${var.project_name}-${var.zone_name}-kasm-vpc"
}
}
resource "aws_internet_gateway" "kasm-default-ig" {
vpc_id = "${aws_vpc.kasm-default-vpc.id}"
tags = {
Name = "${var.project_name}-${var.zone_name}-kasm-ig"
}
}
resource "aws_route" "internet_access" {
route_table_id = "${aws_vpc.kasm-default-vpc.main_route_table_id}"
destination_cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.kasm-default-ig.id}"
}

View file

@ -0,0 +1,90 @@
###########################################################
# Define a primary region.
# This will house the Kasm Workspaces DB, and a set of
# agents/webapps that map to this region.
###########################################################
module "primary_region" {
source = "./primary"
aws_region = "us-east-1"
zone_name = "us-east-1"
ec2_ami = "ami-013f17f36f8b1fefb"
db_instance_type = "t3.small"
num_agents = 2
agent_instance_type = "t3.medium"
num_webapps = 2
webapp_instance_type = "t3.small"
aws_access_key = "${var.aws_access_key}"
aws_secret_key = "${var.aws_secret_key}"
project_name = "${var.project_name}"
kasm_build = "${var.kasm_build}"
database_password = "${var.database_password}"
redis_password = "${var.redis_password}"
user_password = "${var.user_password}"
admin_password = "${var.admin_password}"
manager_token = "${var.manager_token}"
aws_key_pair = "${var.aws_key_pair}"
aws_domain_name = "${var.aws_domain_name}"
ssh_access_cidr = "${var.ssh_access_cidr}"
}
###########################################################
# Add a pair of webapp and agent modules
# for each additional region desired.
###########################################################
module "us-west-1-webapps" {
source = "./webapps"
faux_aws_region = "us-west-1"
zone_name = "us-west-1"
num_webapps = 2
webapp_instance_type = "t3.small"
ec2_ami = "ami-013f17f36f8b1fefb"
primary_aws_region = "${module.primary_region.primary_aws_region}"
webapp_subnet_id_1 = "${module.primary_region.webapp_subnet_1_id}"
webapp_subnet_id_2 = "${module.primary_region.webapp_subnet_2_id}"
agent_subnet_id = "${module.primary_region.agent_subnet_id}"
aws_access_key = "${var.aws_access_key}"
aws_secret_key = "${var.aws_secret_key}"
aws_domain_name = "${var.aws_domain_name}"
project_name = "${var.project_name}"
kasm_build = "${var.kasm_build}"
database_password = "${var.database_password}"
redis_password = "${var.redis_password}"
manager_token = "${var.manager_token}"
aws_key_pair = "${var.aws_key_pair}"
kasm_db_ip = "${module.primary_region.kasm_db_ip}"
primary_vpc_id = "${module.primary_region.primary_vpc_id}"
certificate_arn = "${module.primary_region.certificate_arn}"
ssh_access_cidr = "${var.ssh_access_cidr}"
}
module "us-west-1-agents" {
source = "./agents"
aws_region = "us-west-1"
zone_name = "us-west-1"
num_agents = 2
agent_instance_type = "t3.medium"
ec2_ami = "ami-08d0eee5e00da8a9b"
aws_access_key = "${var.aws_access_key}"
aws_secret_key = "${var.aws_secret_key}"
aws_domain_name = "${var.aws_domain_name}"
project_name = "${var.project_name}"
kasm_build = "${var.kasm_build}"
manager_token = "${var.manager_token}"
aws_key_pair = "${var.aws_key_pair}"
ssh_access_cidr = "${var.ssh_access_cidr}"
}

View file

@ -0,0 +1,42 @@
data "aws_route53_zone" "kasm-route53-zone" {
name = "${var.aws_domain_name}"
private_zone = false
}
resource "aws_acm_certificate" "kasm-alb-cert" {
domain_name = "${var.aws_domain_name}"
subject_alternative_names = ["*.${var.aws_domain_name}"]
validation_method = "DNS"
lifecycle {
create_before_destroy = true
}
}
resource "aws_route53_record" "kasm-route53-cert-validation-record" {
for_each = {
for dvo in aws_acm_certificate.kasm-alb-cert.domain_validation_options: dvo.domain_name => {
name = dvo.resource_record_name
record = dvo.resource_record_value
type = dvo.resource_record_type
}
}
name = each.value.name
type = each.value.type
records = [each.value.record]
zone_id = data.aws_route53_zone.kasm-route53-zone.id
ttl = 30
allow_overwrite = true
}
resource "aws_acm_certificate_validation" "kasm-elb-certificate-validation" {
certificate_arn = aws_acm_certificate.kasm-alb-cert.arn
validation_record_fqdns = [for record in aws_route53_record.kasm-route53-cert-validation-record: record.fqdn]
}
output "certificate_arn" {
value = "${aws_acm_certificate_validation.kasm-elb-certificate-validation.certificate_arn}"
}

View file

@ -0,0 +1,33 @@
resource "aws_instance" "kasm-db" {
ami = "${var.ec2_ami}"
instance_type = "${var.db_instance_type}"
vpc_security_group_ids = ["${aws_security_group.kasm-default-sg.id}"]
subnet_id = "${aws_subnet.kasm-database-subnet.id}"
key_name = "${var.aws_key_pair}"
root_block_device {
volume_size = "40"
}
user_data = <<-EOF
#!/bin/bash
fallocate -l 4g /mnt/kasm.swap
chmod 600 /mnt/kasmswap
mkswap /mnt/kasm.swap
swapon /mnt/kasm.swap
echo '/mnt/kasm.swap swap swap defaults 0 0' | tee -a /etc/fstab
cd /tmp
wget ${var.kasm_build}
tar xvf kasm_*.tar.gz
bash kasm_release/install.sh -S db -e -Q ${var.database_password} -R ${var.redis_password} -U ${var.user_password} -P ${var.admin_password} -M ${var.manager_token}
EOF
tags = {
Name = "${var.project_name}-kasm-db"
}
}
output "kasm_db_ip" {
value = "${aws_instance.kasm-db.private_ip}"
}

View file

@ -0,0 +1,14 @@
provider "aws" {
region = "${var.aws_region}"
access_key = "${var.aws_access_key}"
secret_key = "${var.aws_secret_key}"
}
data "aws_availability_zones" "available" {
state = "available"
}
output "primary_aws_region" {
value = "${var.aws_region}"
}

View file

@ -0,0 +1,37 @@
resource "aws_security_group" "kasm-default-sg" {
name = "${var.project_name}-kasm-allow-db-access"
description = "Allow access to db"
vpc_id = "${aws_vpc.kasm-default-vpc.id}"
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["${var.ssh_access_cidr}"]
}
ingress {
from_port = 5432
to_port = 5432
protocol = "tcp"
cidr_blocks = [aws_subnet.kasm-webapp-subnet-1.cidr_block, aws_subnet.kasm-webapp-subnet-2.cidr_block]
}
ingress {
from_port = 6379
to_port = 6379
protocol = "tcp"
cidr_blocks = [aws_subnet.kasm-webapp-subnet-1.cidr_block, aws_subnet.kasm-webapp-subnet-2.cidr_block]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "${var.project_name}-kasm-allow-db-access"
}
}

View file

@ -0,0 +1,54 @@
resource "aws_subnet" "kasm-database-subnet" {
vpc_id = "${aws_vpc.kasm-default-vpc.id}"
cidr_block = "10.0.0.0/24"
map_public_ip_on_launch = true
tags = {
Name = "${var.project_name}-kasm-subnet"
}
}
resource "aws_subnet" "kasm-webapp-subnet-1" {
vpc_id = "${aws_vpc.kasm-default-vpc.id}"
cidr_block = "10.0.20.0/24"
availability_zone = data.aws_availability_zones.available.names[2]
map_public_ip_on_launch = true
tags = {
Name = "${var.project_name}-kasm-webapp-subnet-1"
}
}
resource "aws_subnet" "kasm-webapp-subnet-2" {
vpc_id = "${aws_vpc.kasm-default-vpc.id}"
cidr_block = "10.0.30.0/24"
availability_zone = data.aws_availability_zones.available.names[1]
map_public_ip_on_launch = true
tags = {
Name = "${var.project_name}-kasm-webapp-subnet-2"
}
}
resource "aws_subnet" "kasm-agent-subnet" {
vpc_id = "${aws_vpc.kasm-default-vpc.id}"
cidr_block = "10.0.40.0/24"
availability_zone = data.aws_availability_zones.available.names[1]
map_public_ip_on_launch = true
tags = {
Name = "${var.project_name}-kasm-webapp-subnet-2"
}
}
output "webapp_subnet_1_id" {
value = "${aws_subnet.kasm-webapp-subnet-1.id}"
}
output "webapp_subnet_2_id" {
value = "${aws_subnet.kasm-webapp-subnet-2.id}"
}
output "agent_subnet_id" {
value = "${aws_subnet.kasm-agent-subnet.id}"
}

View file

@ -0,0 +1,82 @@
variable "aws_region" {
description = "The AWS region for the deployment. (e.g us-east-1)"
}
variable "aws_access_key" {
description = "The AWS access key used for deployment"
}
variable "aws_secret_key" {
description = "The AWS secret key used for deployment"
}
variable "project_name" {
description = "The name of the deployment (e.g dev, staging). A short single word"
}
variable "aws_domain_name" {
description = "The Route53 Zone used for the dns entries. This must already exist in the AWS account. (e.g dev.kasm.contoso.com). The deployment will be accessed via this zone name via https"
}
variable "kasm_build" {
description = "The URL for the Kasm Workspaces build"
}
variable "database_password" {
description = "The password for the database. No special characters"
}
variable "redis_password" {
description = "The password for the database. No special characters"
}
variable "user_password" {
description = "The password for the database. No special characters"
}
variable "admin_password" {
description = "The password for the database. No special characters"
}
variable "manager_token" {
description = "The password for the database. No special characters"
}
variable "zone_name" {
default = "default"
description="A name given to the kasm deployment Zone"
}
variable "aws_key_pair" {
description = "The name of an aws keypair to use."
}
variable "db_instance_type" {
default = "t3.small"
description = "The instance type for the Database"
}
variable "ec2_ami" {
description = "The AMI used for the EC2 nodes. Recommended Ubuntu 18.04 LTS."
}
variable "ssh_access_cidr" {
description = "CIDR notation of the bastion host allowed to SSH in to the machines"
}
variable "num_agents" {
default = "2"
description = "The number of Agent Role Servers to create in the deployment"
}
variable "num_webapps" {
description = "The number of WebApp role servers to create in the deployment"
}
variable "agent_instance_type" {
default = "t3.medium"
}
variable "webapp_instance_type" {
default = "t3.small"
description = "The instance type for the webapps"
}

View file

@ -0,0 +1,25 @@
resource "aws_vpc" "kasm-default-vpc" {
cidr_block = "10.0.0.0/16"
tags = {
Name = "${var.project_name}-kasm-db-vpc"
}
}
resource "aws_internet_gateway" "kasm-default-ig" {
vpc_id = "${aws_vpc.kasm-default-vpc.id}"
tags = {
Name = "${var.project_name}-kasm-ig"
}
}
resource "aws_route" "internet_access" {
route_table_id = "${aws_vpc.kasm-default-vpc.main_route_table_id}"
destination_cidr_block = "0.0.0.0/0"
gateway_id = "${aws_internet_gateway.kasm-default-ig.id}"
}
output "primary_vpc_id" {
value = "${aws_vpc.kasm-default-vpc.id}"
}

View file

@ -0,0 +1,27 @@
module "primary-webapps" {
source = "../webapps"
primary_aws_region = "${var.aws_region}"
faux_aws_region = "${var.aws_region}"
zone_name = "${var.zone_name}"
num_agents = "${var.num_agents}"
agent_instance_type = "${var.agent_instance_type}"
num_webapps = "${var.num_webapps}"
webapp_instance_type = "${var.webapp_instance_type}"
ec2_ami = "${var.ec2_ami}"
webapp_subnet_id_1 = "${aws_subnet.kasm-webapp-subnet-1.id}"
webapp_subnet_id_2 = "${aws_subnet.kasm-webapp-subnet-2.id}"
agent_subnet_id = "${aws_subnet.kasm-agent-subnet.id}"
aws_access_key = "${var.aws_access_key}"
aws_secret_key = "${var.aws_secret_key}"
aws_domain_name = "${var.aws_domain_name}"
project_name = "${var.project_name}"
kasm_build = "${var.kasm_build}"
database_password = "${var.database_password}"
redis_password = "${var.redis_password}"
manager_token = "${var.manager_token}"
aws_key_pair = "${var.aws_key_pair}"
kasm_db_ip = "${aws_instance.kasm-db.private_ip}"
primary_vpc_id = "${aws_vpc.kasm-default-vpc.id}"
certificate_arn = "${aws_acm_certificate_validation.kasm-elb-certificate-validation.certificate_arn}"
ssh_access_cidr = "${var.ssh_access_cidr}"
}

View file

@ -0,0 +1,47 @@
variable "aws_domain_name" {
default = "kasm.contoso.com"
}
variable "project_name" {
default = "contoso"
}
variable "aws_key_pair" {
default = ""
}
variable "aws_access_key" {
default = ""
}
variable "aws_secret_key" {
default = ""
}
variable "database_password" {
default = "changeme"
}
variable "redis_password" {
default = "changeme"
}
variable "user_password" {
default = "changeme"
}
variable "admin_password" {
default = "changeme"
}
variable "manager_token" {
default = "changeme"
}
variable "kasm_build" {
default = "https://kasm-static-content.s3.amazonaws.com/kasm_release_1.9.0.077388.tar.gz"
}
variable "ssh_access_cidr" {
default = "0.0.0.0/0"
}

View file

@ -0,0 +1,29 @@
resource "aws_instance" "kasm-agent" {
count = "${var.num_agents}"
ami = "${var.ec2_ami}"
instance_type = "${var.agent_instance_type}"
vpc_security_group_ids = ["${aws_security_group.kasm-agent-sg.id}"]
subnet_id = "${var.webapp_subnet_id_1}"
key_name = "${var.aws_key_pair}"
root_block_device {
volume_size = "50"
}
user_data = <<-EOF
#!/bin/bash
fallocate -l 4g /mnt/kasm.swap
chmod 600 /mnt/kasm.swap
mkswap /mnt/kasm.swap
swapon /mnt/kasm.swap
echo '/mnt/kasm.swap swap swap defaults 0 0' | tee -a /etc/fstab
cd /tmp
wget ${var.kasm_build}
tar xvf kasm_*.tar.gz
PUBLIC_DNS=(`curl -s http://169.254.169.254/latest/meta-data/public-ipv4`)
bash kasm_release/install.sh -S agent -e -p $PUBLIC_DNS -m ${var.zone_name}-lb.${var.aws_domain_name} -M ${var.manager_token}
EOF
tags = {
Name = "${var.project_name}-${var.zone_name}-kasm-agent"
}
}

View file

@ -0,0 +1,162 @@
resource "aws_security_group" "kasm-default-elb-sg" {
name = "${var.project_name}-${var.zone_name}-kasm-allow-elb-access"
description = "Security Group for ELB"
vpc_id = "${var.primary_vpc_id}"
ingress {
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
tags = {
Name = "${var.project_name}-${var.zone_name}-kasm-allow-access"
}
}
resource "aws_s3_bucket" "kasm-s3-logs" {
bucket_prefix = "${var.project_name}-${var.zone_name}-"
acl = "private"
force_destroy = true
}
resource "aws_s3_bucket_policy" "kasm-s3-logs-policy" {
bucket = aws_s3_bucket.kasm-s3-logs.id
policy = <<POLICY
{
"Id": "Policy",
"Version": "2012-10-17",
"Statement": [
{
"Action": [
"s3:PutObject"
],
"Effect": "Allow",
"Resource": "${aws_s3_bucket.kasm-s3-logs.arn}/AWSLogs/*",
"Principal": {
"AWS": [
"${data.aws_elb_service_account.main.arn}"
]
}
}
]
}
POLICY
}
data "aws_elb_service_account" "main" {}
resource "aws_lb" "kasm-alb" {
name = "${var.project_name}-${var.zone_name}-kasm-lb"
internal = false
load_balancer_type = "application"
security_groups = ["${aws_security_group.kasm-default-elb-sg.id}"]
subnets = ["${var.webapp_subnet_id_1}", "${var.webapp_subnet_id_2}"]
access_logs {
bucket = "${aws_s3_bucket.kasm-s3-logs.bucket}"
enabled = true
}
}
resource "aws_lb_target_group" "kasm-target-group" {
name = "${var.project_name}-${var.zone_name}-tg"
port = 443
protocol = "HTTPS"
vpc_id = "${var.primary_vpc_id}"
health_check {
path = "/api/__healthcheck"
matcher = 200
protocol = "HTTPS"
}
}
data "aws_route53_zone" "kasm-route53-zone" {
name = "${var.aws_domain_name}"
private_zone = false
}
resource "aws_lb_listener" "kasm-alb-listener" {
load_balancer_arn = aws_lb.kasm-alb.arn
port = "443"
protocol = "HTTPS"
certificate_arn = "${var.certificate_arn}"
default_action {
type = "forward"
target_group_arn = aws_lb_target_group.kasm-target-group.arn
}
}
resource "aws_lb_target_group_attachment" "kasm-target-group-attachment" {
count = "${var.num_webapps}"
target_group_arn = aws_lb_target_group.kasm-target-group.arn
target_id = aws_instance.kasm-web-app[count.index].id
port = 443
}
resource "aws_route53_record" "kasm-route53-elb-record" {
zone_id = data.aws_route53_zone.kasm-route53-zone.zone_id
name = "${var.zone_name}-lb.${var.aws_domain_name}"
type = "A"
alias {
name = aws_lb.kasm-alb.dns_name
zone_id = aws_lb.kasm-alb.zone_id
evaluate_target_health = true
}
}
resource "aws_route53_record" "kasm-app-url" {
zone_id = data.aws_route53_zone.kasm-route53-zone.zone_id
name = "${var.aws_domain_name}"
type = "A"
set_identifier = "${var.project_name}-${var.zone_name}-set-id"
alias {
name = aws_lb.kasm-alb.dns_name
zone_id = aws_lb.kasm-alb.zone_id
evaluate_target_health = true
}
latency_routing_policy {
region = "${var.faux_aws_region}"
}
}
resource "aws_route53_health_check" "kasm-elb-hc" {
fqdn = "${var.zone_name}-lb.${var.aws_domain_name}"
port = 443
type = "HTTPS"
resource_path = "/api/__healthcheck"
failure_threshold = "5"
request_interval = "30"
tags = {
Name = "hc-${var.zone_name}-lb.${var.aws_domain_name}"
}
}

View file

@ -0,0 +1,9 @@
provider "aws" {
region = "${var.primary_aws_region}"
access_key = "${var.aws_access_key}"
secret_key = "${var.aws_secret_key}"
}
data "aws_availability_zones" "available" {
state = "available"
}

View file

@ -0,0 +1,63 @@
resource "aws_security_group" "kasm-webapp-sg" {
name = "${var.project_name}-${var.zone_name}-kasm-webapp-access"
description = "Allow access to webapps"
vpc_id = "${var.primary_vpc_id}"
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["${var.ssh_access_cidr}"]
}
ingress {
from_port = 443
to_port = 443
protocol = "tcp"
security_groups = ["${aws_security_group.kasm-default-elb-sg.id}"]
}
ingress {
from_port = 443
to_port = 443
protocol = "tcp"
security_groups = ["${aws_security_group.kasm-agent-sg.id}"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}
resource "aws_security_group" "kasm-agent-sg" {
name = "${var.project_name}-${var.zone_name}-kasm-agent-access"
description = "Allow access to agents"
vpc_id = "${var.primary_vpc_id}"
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = ["${var.ssh_access_cidr}"]
}
ingress {
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
egress {
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = ["0.0.0.0/0"]
}
}

View file

@ -0,0 +1,98 @@
variable "aws_access_key" {
description = "The AWS access key used for deployment"
}
variable "aws_secret_key" {
description = "The AWS secret key used for deployment"
}
variable "project_name" {
description = "The name of the deployment (e.g dev, staging). A short single word"
}
variable "aws_domain_name" {
description = "The Route53 Zone used for the dns entries. This must already exist in the AWS account. (e.g dev.kasm.contoso.com). The deployment will be accessed via this zone name via https"
}
variable "database_password" {
description = "The password for the database. No special characters"
}
variable "redis_password" {
description = "The password for the database. No special characters"
}
variable "num_webapps" {
description = "The number of WebApp role servers to create in the deployment"
}
variable "webapp_instance_type" {
default = "t3.small"
description = "The instance type for the webapps"
}
variable "num_agents" {
default = 0
description = "The number of Agent Role Servers to create in the deployment"
}
variable "agent_instance_type" {
default = "t3.medium"
description = "the instance type for the agents"
}
variable "primary_aws_region" {
description = "The AWS region for primary region of the deployment. (e.g us-east-1)"
}
variable "faux_aws_region" {
description = "The AWS region for this region is supposed to represent even though it will be created in the primary region of the deployment. (e.g us-east-1)"
}
variable "kasm_build" {
description = "The URL for the Kasm Workspaces build"
}
variable "kasm_db_ip" {
description = "The IP/DINS name of the Kasm database"
}
variable "zone_name" {
description = "A name given to the Kasm deployment Zone"
}
variable "aws_key_pair" {
description = "The name of an aws keypair to use."
}
variable "ec2_ami" {
description = "The AMI used for the EC2 nodes. Recommended Ubuntu 18.04 LTS."
}
variable "manager_token" {
description = "The password for the database. No special characters"
}
variable "certificate_arn" {
description = "The certificate ARN created in the primary region for use with all load balancers in the deployment."
}
variable "ssh_access_cidr" {
description = "CIDR notation of the bastion host allowed to SSH in to the machines"
}
variable "webapp_subnet_id_1" {
description = "One of two subnet IDs created to host webapps in the primary region"
}
variable "webapp_subnet_id_2" {
description = "One of two subnet IDs created to host webapps in the primary region"
}
variable "agent_subnet_id" {
description = "Subnet ID created for agents"
}
variable "primary_vpc_id" {
description = "The VPC ID of the primary region"
}

View file

@ -0,0 +1,36 @@
resource "aws_instance" "kasm-web-app" {
count = "${var.num_webapps}"
ami = "${var.ec2_ami}"
instance_type = "${var.webapp_instance_type}"
vpc_security_group_ids = ["${aws_security_group.kasm-webapp-sg.id}"]
subnet_id = "${var.webapp_subnet_id_1}"
key_name = "${var.aws_key_pair}"
root_block_device {
volume_size = "40"
}
user_data = <<-EOF
#!/bin/bash
set -x
fallocate -l 4g /mnt/kasm.swap
chmod 600 /mnt/kasm.swap
mkswap /mnt/kasm.swap
swapon /mnt/kasm.swap
echo '/mnt/kasm.swap swap swap defaults 0 0' | tee -a /etc/fstab
cd /tmp
wget ${var.kasm_build}
tar xvf kasm_*.tar.gz
echo "Checking for Kasm DB..."
while ! nc -w 1 -z ${var.kasm_db_ip} 5432; do
echo "Not Ready..."
sleep 5
done
echo "DB is alive"
bash kasm_release/install.sh -S app -e -z ${var.zone_name} -q ${var.kasm_db_ip} -Q ${var.database_password} -R ${var.redis_password}
EOF
tags = {
Name = "${var.project_name}-${var.zone_name}-kasm-webapp"
}
}

View file

@ -17,7 +17,6 @@ module "standard" {
ec2_ami = "ami-0747bdcabd34c712a"
s3_unique_id = "f3g2dc"
ssh_access_cidr = "0.0.0.0/0"
database_password = "changeme"
redis_password = "changeme"

View file

@ -1,10 +1,10 @@
resource "aws_instance" "kasm-agent" {
count = "${var.num_agents}"
ami = "${var.ec2_ami}"
instance_type = "${var.agent_instance_type}"
vpc_security_group_ids = ["${aws_security_group.kasm-agent-sg.id}"]
subnet_id = "${aws_subnet.kasm-use-natgw-subnet.id}"
key_name = "${var.aws_key_pair}"
count = "${var.num_agents}"
ami = "${var.ec2_ami}"
instance_type = "${var.agent_instance_type}"
vpc_security_group_ids = ["${aws_security_group.kasm-agent-sg.id}"]
subnet_id = "${aws_subnet.kasm-use-natgw-subnet.id}"
key_name = "${var.aws_key_pair}"
associate_public_ip_address = false
root_block_device {
@ -13,14 +13,14 @@ resource "aws_instance" "kasm-agent" {
user_data = <<-EOF
#!/bin/bash
fallocate -l 5g /mnt/1GiB.swap
chmod 600 /mnt/1GiB.swap
mkswap /mnt/1GiB.swap
swapon /mnt/1GiB.swap
fallocate -l 5g /mnt/kasm.swap
chmod 600 /mnt/kasm.swap
mkswap /mnt/kasm.swap
swapon /mnt/kasm.swap
echo '/mnt/kasm.swap swap swap defaults 0 0' | tee -a /etc/fstab
cd /tmp
wget ${var.kasm_build}
tar xvf kasm_*.tar.gz
PUBLIC_DNS=(`curl -s http://169.254.169.254/latest/meta-data/public-ipv4`)
PRIVATE_IP=(`curl -s http://169.254.169.254/latest/meta-data/local-ipv4`)
bash kasm_release/install.sh -S agent -e -p $PRIVATE_IP -m ${var.zone_name}-lb.${var.aws_domain_name} -M ${var.manager_token}
EOF

View file

@ -12,10 +12,11 @@ resource "aws_instance" "kasm-db" {
user_data = <<-EOF
#!/bin/bash
fallocate -l 4g /mnt/1GiB.swap
chmod 600 /mnt/1GiB.swap
mkswap /mnt/1GiB.swap
swapon /mnt/1GiB.swap
fallocate -l 4g /mnt/kasm.swap
chmod 600 /mnt/kasm.swap
mkswap /mnt/kasm.swap
swapon /mnt/kasm.swap
echo '/mnt/kasm.swap swap swap defaults 0 0' | tee -a /etc/fstab
cd /tmp
wget ${var.kasm_build}
tar xvf kasm_*.tar.gz

View file

@ -3,9 +3,14 @@ data "aws_route53_zone" "kasm-route53-zone" {
}
resource "aws_s3_bucket" "kasm-s3-logs" {
bucket = "${var.project_name}-${var.s3_unique_id}-kasm-bucket"
bucket_prefix = "${var.project_name}-${var.zone_name}-"
acl = "private"
force_destroy = true
}
resource "aws_s3_bucket_policy" "kasm-s3-logs-policy" {
bucket = aws_s3_bucket.kasm-s3-logs.id
policy = <<POLICY
{
@ -17,7 +22,7 @@ resource "aws_s3_bucket" "kasm-s3-logs" {
"s3:PutObject"
],
"Effect": "Allow",
"Resource": "arn:aws:s3:::${var.project_name}-${var.s3_unique_id}-kasm-bucket/AWSLogs/*",
"Resource": "${aws_s3_bucket.kasm-s3-logs.arn}/AWSLogs/*",
"Principal": {
"AWS": [
"${data.aws_elb_service_account.main.arn}"
@ -27,7 +32,9 @@ resource "aws_s3_bucket" "kasm-s3-logs" {
]
}
POLICY
}
data "aws_elb_service_account" "main" {}
@ -38,8 +45,6 @@ resource "aws_lb" "kasm-alb" {
security_groups = ["${aws_security_group.kasm-default-elb-sg.id}"]
subnets = ["${aws_subnet.kasm-webapp-subnet.id}", "${aws_subnet.kasm-webapp-subnet-2.id}"]
#enable_deletion_protection = true
access_logs {
bucket = "${aws_s3_bucket.kasm-s3-logs.bucket}"
enabled = true
@ -51,6 +56,12 @@ resource "aws_lb_target_group" "kasm-target-group" {
port = 443
protocol = "HTTPS"
vpc_id = "${aws_vpc.kasm-default-vpc.id}"
health_check {
path = "/api/__healthcheck"
matcher = 200
protocol = "HTTPS"
}
}
@ -87,8 +98,6 @@ resource "aws_route53_record" "kasm-route53-elb-record" {
}
}
# The mail domain url will be use latency routing among all the load balancers
resource "aws_route53_record" "kasm-app-url" {
zone_id = data.aws_route53_zone.kasm-route53-zone.zone_id
name = "${var.aws_domain_name}"

View file

@ -30,7 +30,6 @@ resource "aws_security_group" "kasm-webapp-sg" {
description = "Allow access to webapps"
vpc_id = "${aws_vpc.kasm-default-vpc.id}"
# SSH access from bastion
ingress {
from_port = 22
to_port = 22
@ -38,7 +37,6 @@ resource "aws_security_group" "kasm-webapp-sg" {
cidr_blocks = ["${var.ssh_access_cidr}"]
}
# Allow HTTPS only from the load balancer
ingress {
from_port = 443
to_port = 443
@ -46,7 +44,6 @@ resource "aws_security_group" "kasm-webapp-sg" {
security_groups = ["${aws_security_group.kasm-default-elb-sg.id}"]
}
# Allow direct HTTP connections via Agents
ingress {
from_port = 443
to_port = 443
@ -54,7 +51,6 @@ resource "aws_security_group" "kasm-webapp-sg" {
cidr_blocks = ["${aws_subnet.kasm-agent-subnet.cidr_block}"]
}
# outbound internet access
egress {
from_port = 0
to_port = 0
@ -73,7 +69,6 @@ resource "aws_security_group" "kasm-agent-sg" {
description = "Allow access to agents"
vpc_id = "${aws_vpc.kasm-default-vpc.id}"
# SSH access from bastion
ingress {
from_port = 22
to_port = 22
@ -88,7 +83,6 @@ resource "aws_security_group" "kasm-agent-sg" {
cidr_blocks = ["${aws_subnet.kasm-webapp-subnet.cidr_block}", "${aws_subnet.kasm-webapp-subnet-2.cidr_block}" ]
}
# outbound internet access
egress {
from_port = 0
to_port = 0
@ -104,7 +98,6 @@ resource "aws_security_group" "kasm-db-sg" {
description = "Allow access to webapps"
vpc_id = "${aws_vpc.kasm-default-vpc.id}"
# SSH access from bastion
ingress {
from_port = 22
to_port = 22
@ -112,7 +105,6 @@ resource "aws_security_group" "kasm-db-sg" {
cidr_blocks = ["${var.ssh_access_cidr}"]
}
# Allow HTTPS only from the load balancer
ingress {
from_port = 5432
to_port = 5432
@ -120,7 +112,6 @@ resource "aws_security_group" "kasm-db-sg" {
cidr_blocks = ["${aws_subnet.kasm-webapp-subnet.cidr_block}"]
}
# Allow direct HTTP connections via Agents
ingress {
from_port = 6379
to_port = 6379
@ -128,7 +119,6 @@ resource "aws_security_group" "kasm-db-sg" {
cidr_blocks = ["${aws_subnet.kasm-webapp-subnet.cidr_block}"]
}
# outbound internet access
egress {
from_port = 0
to_port = 0

View file

@ -36,7 +36,7 @@ variable "num_webapps" {
variable "webapp_instance_type" {
default = "t3.small"
description = "The instance type for the Agents"
description = "The instance type for the webapps"
}
@ -63,11 +63,6 @@ variable "kasm_build" {
description = "The URL for the Kasm Workspaces build"
}
variable "s3_unique_id" {
default = "4id0"
description = "A unique id to give to the S3 buckets so they are globally unique"
}
variable "master_subnet_id" {
default = "0"
description = "The 2nd octect of VPC subnet"

View file

@ -1,4 +1,3 @@
# Create a VPC to launch our instances into
resource "aws_vpc" "kasm-default-vpc" {
cidr_block = "10.${var.master_subnet_id}.0.0/16"
tags = {
@ -6,7 +5,6 @@ resource "aws_vpc" "kasm-default-vpc" {
}
}
# Create an internet gateway to give our subnet access to the outside world
resource "aws_internet_gateway" "kasm-default-ig" {
vpc_id = "${aws_vpc.kasm-default-vpc.id}"
tags = {
@ -14,7 +12,6 @@ resource "aws_internet_gateway" "kasm-default-ig" {
}
}
# Grant the VPC internet access on its main route table
resource "aws_route" "internet_access" {
route_table_id = "${aws_vpc.kasm-default-vpc.main_route_table_id}"
destination_cidr_block = "0.0.0.0/0"

View file

@ -1,10 +1,10 @@
resource "aws_instance" "kasm-web-app" {
count = "${var.num_webapps}"
ami = "${var.ec2_ami}"
instance_type = "${var.webapp_instance_type}"
vpc_security_group_ids = ["${aws_security_group.kasm-webapp-sg.id}"]
subnet_id = "${aws_subnet.kasm-webapp-subnet.id}"
key_name = "${var.aws_key_pair}"
count = "${var.num_webapps}"
ami = "${var.ec2_ami}"
instance_type = "${var.webapp_instance_type}"
vpc_security_group_ids = ["${aws_security_group.kasm-webapp-sg.id}"]
subnet_id = "${aws_subnet.kasm-webapp-subnet.id}"
key_name = "${var.aws_key_pair}"
associate_public_ip_address = true
root_block_device {
@ -13,16 +13,26 @@ resource "aws_instance" "kasm-web-app" {
user_data = <<-EOF
#!/bin/bash
fallocate -l 4g /mnt/1GiB.swap
chmod 600 /mnt/1GiB.swap
mkswap /mnt/1GiB.swap
swapon /mnt/1GiB.swap
set -x
fallocate -l 4g /mnt/kasm.swap
chmod 600 /mnt/kasm.swap
mkswap /mnt/kasm.swap
swapon /mnt/kasm.swap
echo '/mnt/kasm.swap swap swap defaults 0 0' | tee -a /etc/fstab
cd /tmp
wget ${var.kasm_build}
tar xvf kasm_*.tar.gz
echo "Checking for Kasm DB..."
while ! nc -w 1 -z ${aws_instance.kasm-db.private_ip} 5432; do
echo "Not Ready..."
sleep 5
done
echo "DB is alive"
bash kasm_release/install.sh -S app -e -z ${var.zone_name} -q "${aws_instance.kasm-db.private_ip}" -Q ${var.database_password} -R ${var.redis_password}
EOF
tags = {
Name = "${var.project_name}-${var.zone_name}-kasm-web-app"
Name = "${var.project_name}-${var.zone_name}-kasm-webapp"
}
}