Refactor to cache AWS resources

This commit is contained in:
EC2 Default User 2024-08-14 01:05:06 +00:00
parent 4854f11021
commit fb94b40c23
28 changed files with 2676 additions and 2474 deletions

View File

@ -124,16 +124,16 @@
"autoscaling-multiple-az": { "autoscaling-multiple-az": {
"enabled": true, "enabled": true,
"level": 2 "level": 2
},
"autoscaling-launch-template": {
"enabled": true,
"level": 2
} }
} }
}, },
"EC2": { "EC2": {
"enabled": true, "enabled": true,
"rules": { "rules": {
"autoscaling-launch-template": {
"enabled": true,
"level": 2
},
"ec2-ebs-encryption-by-default": { "ec2-ebs-encryption-by-default": {
"enabled": true, "enabled": true,
"level": 2 "level": 2
@ -432,15 +432,6 @@
} }
} }
}, },
"Tags": {
"enabled": true,
"rules": {
"required-tags": {
"enabled": true,
"level": 2
}
}
},
"S3": { "S3": {
"enabled": true, "enabled": true,
"rules": { "rules": {

12
main.py
View File

@ -2,6 +2,8 @@ from InquirerLib import prompt
from InquirerLib.InquirerPy.utils import InquirerPyKeybindings from InquirerLib.InquirerPy.utils import InquirerPyKeybindings
from InquirerLib.InquirerPy.base import Choice from InquirerLib.InquirerPy.base import Choice
from colorama import Style, Fore from colorama import Style, Fore
from datetime import datetime
from importlib import import_module
from utils import * from utils import *
import services import services
@ -40,12 +42,18 @@ def perform_bp_rules_check(bp):
if service_name == "Lambda": if service_name == "Lambda":
service_name = "_lambda" service_name = "_lambda"
module = getattr(services, convert_snake_case(service_name)) now = datetime.now()
rule_checker = getattr(
services, convert_snake_case(service_name)
).rule_checker()
for rule_name, rule in service["rules"].items(): for rule_name, rule in service["rules"].items():
if not rule["enabled"]: if not rule["enabled"]:
continue continue
rule["result"] = rule_checker.check_rule(convert_snake_case(rule_name))
rule["result"] = getattr(module, convert_snake_case(rule_name))() elapsed_time = datetime.now() - now
print(convert_snake_case(service_name), elapsed_time.total_seconds())
return bp return bp

View File

@ -1,4 +1,5 @@
from pydantic import BaseModel from pydantic import BaseModel
from utils import convert_snake_case
from typing import List from typing import List
@ -6,3 +7,12 @@ class RuleCheckResult(BaseModel):
passed: bool passed: bool
compliant_resources: List[str] compliant_resources: List[str]
non_compliant_resources: List[str] non_compliant_resources: List[str]
class RuleChecker:
def __init__(self):
pass
def check_rule(self, rule_name) -> RuleCheckResult:
check_func = getattr(self, convert_snake_case(rule_name))
return check_func()

View File

@ -16,7 +16,6 @@ from . import (
elasticache, elasticache,
iam, iam,
_lambda, _lambda,
tags,
s3, s3,
secrets_manager, secrets_manager,
security_hub, security_hub,

View File

@ -1,18 +1,24 @@
from models import RuleCheckResult from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3 import boto3
import json import json
client = boto3.client("lambda") class LambdaRuleChecker(RuleChecker):
iam_client = boto3.client("iam") def __init__(self):
self.client = boto3.client("lambda")
self.iam_client = boto3.client("iam")
@cached_property
def functions(self):
return self.client.list_functions()["Functions"]
def lambda_dlq_check(): def lambda_dlq_check(self):
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
functions = client.list_functions()["Functions"]
for function in functions: for function in self.functions:
if "DeadLetterConfig" in function: if "DeadLetterConfig" in function:
compliant_resource.append(function["FunctionArn"]) compliant_resource.append(function["FunctionArn"])
else: else:
@ -24,17 +30,24 @@ def lambda_dlq_check():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def lambda_function_public_access_prohibited(self):
def lambda_function_public_access_prohibited():
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
functions = client.list_functions()["Functions"]
for function in functions: for function in self.functions:
try: try:
policy = json.loads(client.get_policy(FunctionName=function["FunctionName"])["Policy"]) policy = json.loads(
self.client.get_policy(FunctionName=function["FunctionName"])[
"Policy"
]
)
for statement in policy["Statement"]: for statement in policy["Statement"]:
if statement["Principal"] in ["*", "", '{"AWS": ""}', '{"AWS": "*"}']: if statement["Principal"] in [
"*",
"",
'{"AWS": ""}',
'{"AWS": "*"}',
]:
non_compliant_resources.append(function["FunctionArn"]) non_compliant_resources.append(function["FunctionArn"])
break break
else: else:
@ -51,17 +64,18 @@ def lambda_function_public_access_prohibited():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def lambda_function_settings_check(self):
def lambda_function_settings_check():
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
functions = client.list_functions()["Functions"]
default_timeout = 3 default_timeout = 3
default_memory_size = 128 default_memory_size = 128
for function in functions: for function in self.functions:
if function["Timeout"] == default_timeout or function["MemorySize"] == default_memory_size: if (
function["Timeout"] == default_timeout
or function["MemorySize"] == default_memory_size
):
non_compliant_resources.append(function["FunctionArn"]) non_compliant_resources.append(function["FunctionArn"])
else: else:
compliant_resource.append(function["FunctionArn"]) compliant_resource.append(function["FunctionArn"])
@ -72,13 +86,11 @@ def lambda_function_settings_check():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def lambda_inside_vpc(self):
def lambda_inside_vpc():
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
functions = client.list_functions()["Functions"]
for function in functions: for function in self.functions:
if "VpcConfig" in function: if "VpcConfig" in function:
compliant_resource.append(function["FunctionArn"]) compliant_resource.append(function["FunctionArn"])
else: else:
@ -89,3 +101,6 @@ def lambda_inside_vpc():
compliant_resources=compliant_resource, compliant_resources=compliant_resource,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = LambdaRuleChecker

View File

@ -1,123 +1,150 @@
from models import RuleCheckResult from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3 import boto3
client = boto3.client("elbv2") class ALBRuleChecker(RuleChecker):
wafv2_client = boto3.client("wafv2") def __init__(self):
self.client = boto3.client("elbv2")
self.wafv2_client = boto3.client("wafv2")
def alb_http_drop_invalid_header_enabled(): @cached_property
load_balancers = client.describe_load_balancers() def load_balancers(self):
return self.client.describe_load_balancers()["LoadBalancers"]
@cached_property
def load_balancer_attributes(self):
responses = [
self.client.describe_load_balancer_attributes(
LoadBalancerArn=load_balancer["LoadBalancerArn"]
)
for load_balancer in self.load_balancers
]
return {
load_balancer["LoadBalancerArn"]: response
for load_balancer, response in zip(self.load_balancers, responses)
}
def alb_http_drop_invalid_header_enabled(self):
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for load_balancer in load_balancers['LoadBalancers']:
response = client.describe_load_balancer_attributes( for load_balancer in self.load_balancers:
LoadBalancerArn=load_balancer['LoadBalancerArn'] response = self.load_balancer_attributes[load_balancer["LoadBalancerArn"]]
)
result = [ result = [
attribute attribute
for attribute in filter( for attribute in filter(
lambda x: x['Key'] == "routing.http.drop_invalid_header_fields.enabled" lambda x: x["Key"]
and x['Value'] == "true", == "routing.http.drop_invalid_header_fields.enabled"
response['Attributes'], and x["Value"] == "true",
response["Attributes"],
) )
] ]
if result: compliant_resource.append(load_balancer['LoadBalancerArn']) if result:
else: non_compliant_resources.append(load_balancer['LoadBalancerArn']) compliant_resource.append(load_balancer["LoadBalancerArn"])
else:
non_compliant_resources.append(load_balancer["LoadBalancerArn"])
return RuleCheckResult( return RuleCheckResult(
passed=not non_compliant_resources, passed=not non_compliant_resources,
compliant_resources=compliant_resource, compliant_resources=compliant_resource,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def alb_waf_enabled(self):
def alb_waf_enabled():
load_balancers = client.describe_load_balancers()
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for load_balancer in load_balancers['LoadBalancers']:
response = wafv2_client.get_web_acl_for_resource( for load_balancer in self.load_balancers:
ResourceArn=load_balancer['LoadBalancerArn'] response = self.wafv2_client.get_web_acl_for_resource(
ResourceArn=load_balancer["LoadBalancerArn"]
) )
if 'WebACL' in response: compliant_resource.append(load_balancer['LoadBalancerArn']) if "WebACL" in response:
else: non_compliant_resources.append(load_balancer['LoadBalancerArn']) compliant_resource.append(load_balancer["LoadBalancerArn"])
else:
non_compliant_resources.append(load_balancer["LoadBalancerArn"])
return RuleCheckResult( return RuleCheckResult(
passed=not non_compliant_resources, passed=not non_compliant_resources,
compliant_resources=compliant_resource, compliant_resources=compliant_resource,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def elb_cross_zone_load_balancing_enabled(self):
def elb_cross_zone_load_balancing_enabled():
load_balancers = client.describe_load_balancers()
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for load_balancer in load_balancers['LoadBalancers']:
response = client.describe_load_balancer_attributes( for load_balancer in self.load_balancers:
LoadBalancerArn=load_balancer['LoadBalancerArn'] response = self.load_balancer_attributes[load_balancer["LoadBalancerArn"]]
)
result = [ result = [
attribute attribute
for attribute in filter( for attribute in filter(
lambda x: x['Key'] == "load_balancing.cross_zone.enabled" lambda x: x["Key"] == "load_balancing.cross_zone.enabled"
and x['Value'] == "true", and x["Value"] == "true",
response['Attributes'], response["Attributes"],
) )
] ]
if result: compliant_resource.append(load_balancer['LoadBalancerArn']) if result:
else: non_compliant_resources.append(load_balancer['LoadBalancerArn']) compliant_resource.append(load_balancer["LoadBalancerArn"])
else:
non_compliant_resources.append(load_balancer["LoadBalancerArn"])
return RuleCheckResult( return RuleCheckResult(
passed=not non_compliant_resources, passed=not non_compliant_resources,
compliant_resources=compliant_resource, compliant_resources=compliant_resource,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def elb_deletion_protection_enabled(self):
def elb_deletion_protection_enabled():
load_balancers = client.describe_load_balancers()
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for load_balancer in load_balancers['LoadBalancers']:
response = client.describe_load_balancer_attributes( for load_balancer in self.load_balancers:
LoadBalancerArn=load_balancer['LoadBalancerArn'] response = self.load_balancer_attributes[load_balancer["LoadBalancerArn"]]
)
result = [ result = [
attribute attribute
for attribute in filter( for attribute in filter(
lambda x: x['Key'] == "deletion_protection.enabled" lambda x: x["Key"] == "deletion_protection.enabled"
and x['Value'] == "true", and x["Value"] == "true",
response['Attributes'], response["Attributes"],
) )
] ]
if result: compliant_resource.append(load_balancer['LoadBalancerArn']) if result:
else: non_compliant_resources.append(load_balancer['LoadBalancerArn']) compliant_resource.append(load_balancer["LoadBalancerArn"])
else:
non_compliant_resources.append(load_balancer["LoadBalancerArn"])
return RuleCheckResult( return RuleCheckResult(
passed=not non_compliant_resources, passed=not non_compliant_resources,
compliant_resources=compliant_resource, compliant_resources=compliant_resource,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def elb_logging_enabled(self):
def elb_logging_enabled():
load_balancers = client.describe_load_balancers()
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for load_balancer in load_balancers['LoadBalancers']:
response = client.describe_load_balancer_attributes( for load_balancer in self.load_balancers:
LoadBalancerArn=load_balancer['LoadBalancerArn'] response = self.load_balancer_attributes[load_balancer["LoadBalancerArn"]]
)
result = [ result = [
attribute attribute
for attribute in filter( for attribute in filter(
lambda x: x['Key'] == "access_logs.s3.enabled" lambda x: x["Key"] == "access_logs.s3.enabled"
and x['Value'] == "true", and x["Value"] == "true",
response['Attributes'], response["Attributes"],
) )
] ]
if result: compliant_resource.append(load_balancer['LoadBalancerArn']) if result:
else: non_compliant_resources.append(load_balancer['LoadBalancerArn']) compliant_resource.append(load_balancer["LoadBalancerArn"])
else:
non_compliant_resources.append(load_balancer["LoadBalancerArn"])
return RuleCheckResult( return RuleCheckResult(
passed=not non_compliant_resources, passed=not non_compliant_resources,
compliant_resources=compliant_resource, compliant_resources=compliant_resource,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = ALBRuleChecker

View File

@ -1,18 +1,37 @@
from models import RuleCheckResult from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3 import boto3
v1_client = boto3.client("apigateway") class APIGatewayRuleChecker(RuleChecker):
v2_client = boto3.client("apigatewayv2") def __init__(self):
self.v1_client = boto3.client("apigateway")
self.v2_client = boto3.client("apigatewayv2")
@cached_property
def http_apis(self):
return self.v2_client.get_apis()["Items"]
def api_gwv2_access_logs_enabled(): @cached_property
apis = v2_client.get_apis() def rest_apis(self):
return self.v1_client.get_rest_apis()["items"]
@cached_property
def rest_api_stages(self):
responses = [
self.v1_client.get_stages(
restApiId=api["id"],
)
for api in self.rest_apis
]
return {api["id"]: response for api, response in zip(self.rest_apis, responses)}
def api_gwv2_access_logs_enabled(self):
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
for api in apis["Items"]: for api in self.http_apis:
stages = v2_client.get_stages( stages = self.v2_client.get_stages(
ApiId=api["ApiId"], ApiId=api["ApiId"],
) )
@ -23,7 +42,12 @@ def api_gwv2_access_logs_enabled():
] ]
compliant_resources += list( compliant_resources += list(
set([f"{api['Name']} / {stage['StageName']}" for stage in stages["Items"]]) set(
[
f"{api['Name']} / {stage['StageName']}"
for stage in stages["Items"]
]
)
- set(non_compliant_resources) - set(non_compliant_resources)
) )
@ -33,14 +57,12 @@ def api_gwv2_access_logs_enabled():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def api_gwv2_authorization_type_configured(self):
def api_gwv2_authorization_type_configured():
apis = v2_client.get_apis()
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
for api in apis["Items"]: for api in self.http_apis:
response = v2_client.get_routes( response = self.v2_client.get_routes(
ApiId=api["ApiId"], ApiId=api["ApiId"],
) )
@ -51,7 +73,12 @@ def api_gwv2_authorization_type_configured():
] ]
compliant_resources += list( compliant_resources += list(
set([f"{api['Name']} / {route['RouteKey']}" for route in response["Items"]]) set(
[
f"{api['Name']} / {route['RouteKey']}"
for route in response["Items"]
]
)
- set(non_compliant_resources) - set(non_compliant_resources)
) )
@ -61,19 +88,15 @@ def api_gwv2_authorization_type_configured():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def api_gw_associated_with_waf(self):
def api_gw_associated_with_waf():
apis = v1_client.get_rest_apis()
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
for api in apis["items"]: for api in self.rest_apis:
stages = v1_client.get_stages( stages = self.rest_api_stages[api["id"]]
restApiId=api["id"],
)
for stage in stages["item"]: for stage in stages["item"]:
stage_arn = f"arn:aws:apigateway:{v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}" stage_arn = f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
if "webAclArn" in stage: if "webAclArn" in stage:
compliant_resources.append(stage_arn) compliant_resources.append(stage_arn)
@ -86,19 +109,15 @@ def api_gw_associated_with_waf():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def api_gw_cache_enabled_and_encrypted(self):
def api_gw_cache_enabled_and_encrypted():
apis = v1_client.get_rest_apis()
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
for api in apis["items"]: for api in self.rest_apis:
stages = v1_client.get_stages( stages = self.rest_api_stages[api["id"]]
restApiId=api["id"],
)
non_compliant_resources += [ non_compliant_resources += [
f"arn:aws:apigateway:{v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}" f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
for stage in stages["item"] for stage in stages["item"]
if not "*/*" in stage["methodSettings"] if not "*/*" in stage["methodSettings"]
or ( or (
@ -109,7 +128,7 @@ def api_gw_cache_enabled_and_encrypted():
compliant_resources += list( compliant_resources += list(
set( set(
[ [
f"arn:aws:apigateway:{v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}" f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
for stage in stages["item"] for stage in stages["item"]
] ]
) )
@ -122,18 +141,14 @@ def api_gw_cache_enabled_and_encrypted():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def api_gw_execution_logging_enabled(self):
def api_gw_execution_logging_enabled():
apis = v1_client.get_rest_apis()
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
for api in apis["items"]: for api in self.rest_apis:
stages = v1_client.get_stages( stages = self.rest_api_stages[api["id"]]
restApiId=api["id"],
)
non_compliant_resources += [ non_compliant_resources += [
f"arn:aws:apigateway:{v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}" f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
for stage in stages["item"] for stage in stages["item"]
if not "*/*" in stage["methodSettings"] if not "*/*" in stage["methodSettings"]
or ( or (
@ -144,7 +159,7 @@ def api_gw_execution_logging_enabled():
compliant_resources += list( compliant_resources += list(
set( set(
[ [
f"arn:aws:apigateway:{v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}" f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
for stage in stages["item"] for stage in stages["item"]
] ]
) )
@ -157,25 +172,21 @@ def api_gw_execution_logging_enabled():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def api_gw_xray_enabled(self):
def api_gw_xray_enabled():
apis = v1_client.get_rest_apis()
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
for api in apis["items"]: for api in self.rest_apis:
stages = v1_client.get_stages( stages = self.rest_api_stages[api["id"]]
restApiId=api["id"],
)
non_compliant_resources += [ non_compliant_resources += [
f"arn:aws:apigateway:{v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}" f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
for stage in stages["item"] for stage in stages["item"]
if not stage["tracingEnabled"] if not stage["tracingEnabled"]
] ]
compliant_resources += list( compliant_resources += list(
set( set(
[ [
f"arn:aws:apigateway:{v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}" f"arn:aws:apigateway:{self.v1_client.meta.region_name}::/restapis/{api['id']}/stages/{stage['stageName']}"
for stage in stages["item"] for stage in stages["item"]
] ]
) )
@ -187,3 +198,6 @@ def api_gw_xray_enabled():
compliant_resources=compliant_resources, compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = APIGatewayRuleChecker

View File

@ -1,17 +1,26 @@
from models import RuleCheckResult from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3 import boto3
client = boto3.client("autoscaling") class ASGRuleChecker(RuleChecker):
def __init__(self):
self.client = boto3.client("autoscaling")
@cached_property
def asgs(self):
return self.client.describe_auto_scaling_groups()["AutoScalingGroups"]
def autoscaling_group_elb_healthcheck_required(): def autoscaling_group_elb_healthcheck_required(self):
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
asgs = client.describe_auto_scaling_groups()["AutoScalingGroups"]
for asg in asgs: for asg in self.asgs:
if asg["LoadBalancerNames"] or asg["TargetGroupARNs"] and asg["HealthCheckType"] != "ELB": if (
asg["LoadBalancerNames"]
or asg["TargetGroupARNs"]
and asg["HealthCheckType"] != "ELB"
):
non_compliant_resources.append(asg["AutoScalingGroupARN"]) non_compliant_resources.append(asg["AutoScalingGroupARN"])
else: else:
compliant_resources.append(asg["AutoScalingGroupARN"]) compliant_resources.append(asg["AutoScalingGroupARN"])
@ -22,13 +31,11 @@ def autoscaling_group_elb_healthcheck_required():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def autoscaling_multiple_az(self):
def autoscaling_multiple_az():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
asgs = client.describe_auto_scaling_groups()["AutoScalingGroups"]
for asg in asgs: for asg in self.asgs:
if len(asg["AvailabilityZones"]) > 1: if len(asg["AvailabilityZones"]) > 1:
compliant_resources.append(asg["AutoScalingGroupARN"]) compliant_resources.append(asg["AutoScalingGroupARN"])
else: else:
@ -39,3 +46,22 @@ def autoscaling_multiple_az():
compliant_resources=compliant_resources, compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def autoscaling_launch_template(self):
compliant_resources = []
non_compliant_resources = []
for asg in self.asgs:
if "LaunchConfigurationName" in asg:
non_compliant_resources.append(asg["AutoScalingGroupARN"])
else:
compliant_resources.append(asg["AutoScalingGroupARN"])
return RuleCheckResult(
passed=not non_compliant_resources,
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
rule_checker = ASGRuleChecker

View File

@ -1,17 +1,33 @@
from models import RuleCheckResult from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3 import boto3
client = boto3.client("cloudfront") class CloudFrontRuleChecker(RuleChecker):
def __init__(self):
self.client = boto3.client("cloudfront")
@cached_property
def distributions(self):
return self.client.list_distributions()["DistributionList"]["Items"]
def cloudfront_accesslogs_enabled(): @cached_property
def distribution_details(self):
responses = [
self.client.get_distribution(Id=distribution["Id"])["Distribution"]
for distribution in self.distributions
]
return {
distribution["Id"]: response
for distribution, response in zip(self.distributions, responses)
}
def cloudfront_accesslogs_enabled(self):
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
distributions = client.list_distributions()["DistributionList"]["Items"]
for distribution in distributions: for distribution in self.distributions:
distribution = client.get_distribution(Id=distribution["Id"])["Distribution"] distribution = self.distribution_details[distribution["Id"]]
if ( if (
"Logging" in distribution["DistributionConfig"] "Logging" in distribution["DistributionConfig"]
and distribution["DistributionConfig"]["Logging"]["Enabled"] == True and distribution["DistributionConfig"]["Logging"]["Enabled"] == True
@ -26,13 +42,11 @@ def cloudfront_accesslogs_enabled():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def cloudfront_associated_with_waf(self):
def cloudfront_associated_with_waf():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
distributions = client.list_distributions()["DistributionList"]["Items"]
for distribution in distributions: for distribution in self.distributions:
if "WebACLId" in distribution and distribution["WebACLId"] != "": if "WebACLId" in distribution and distribution["WebACLId"] != "":
compliant_resources.append(distribution["ARN"]) compliant_resources.append(distribution["ARN"])
else: else:
@ -44,14 +58,12 @@ def cloudfront_associated_with_waf():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def cloudfront_default_root_object_configured(self):
def cloudfront_default_root_object_configured():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
distributions = client.list_distributions()["DistributionList"]["Items"]
for distribution in distributions: for distribution in self.distributions:
distribution = client.get_distribution(Id=distribution["Id"])["Distribution"] distribution = self.distribution_details[distribution["Id"]]
if distribution["DistributionConfig"]["DefaultRootObject"] != "": if distribution["DistributionConfig"]["DefaultRootObject"] != "":
compliant_resources.append(distribution["ARN"]) compliant_resources.append(distribution["ARN"])
@ -64,18 +76,18 @@ def cloudfront_default_root_object_configured():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def cloudfront_no_deprecated_ssl_protocols(self):
def cloudfront_no_deprecated_ssl_protocols():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
distributions = client.list_distributions()["DistributionList"]["Items"]
for distribution in distributions: for distribution in self.distributions:
for origin in distribution["Origins"]["Items"]: for origin in distribution["Origins"]["Items"]:
if ( if (
"CustomOriginConfig" in origin "CustomOriginConfig" in origin
and origin["CustomOriginConfig"]["OriginProtocolPolicy"] in ["https-only", "match-viewer"] and origin["CustomOriginConfig"]["OriginProtocolPolicy"]
and "SSLv3" in origin["CustomOriginConfig"]["OriginSslProtocols"]["Items"] in ["https-only", "match-viewer"]
and "SSLv3"
in origin["CustomOriginConfig"]["OriginSslProtocols"]["Items"]
): ):
non_compliant_resources.append(distribution["ARN"]) non_compliant_resources.append(distribution["ARN"])
@ -89,13 +101,11 @@ def cloudfront_no_deprecated_ssl_protocols():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def cloudfront_s3_origin_access_control_enabled(self):
def cloudfront_s3_origin_access_control_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
distributions = client.list_distributions()["DistributionList"]
for distribution in distributions["Items"]: for distribution in self.distributions:
for origin in distribution["Origins"]["Items"]: for origin in distribution["Origins"]["Items"]:
if "S3OriginConfig" in origin and origin["OriginAccessControlId"] == "": if "S3OriginConfig" in origin and origin["OriginAccessControlId"] == "":
non_compliant_resources.append(distribution["ARN"]) non_compliant_resources.append(distribution["ARN"])
@ -109,14 +119,15 @@ def cloudfront_s3_origin_access_control_enabled():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def cloudfront_viewer_policy_https(self):
def cloudfront_viewer_policy_https():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
distributions = client.list_distributions()["DistributionList"]["Items"]
for distribution in distributions: for distribution in self.distributions:
if distribution["DefaultCacheBehavior"]["ViewerProtocolPolicy"] == "allow-all": if (
distribution["DefaultCacheBehavior"]["ViewerProtocolPolicy"]
== "allow-all"
):
non_compliant_resources.append(distribution["ARN"]) non_compliant_resources.append(distribution["ARN"])
continue continue
@ -136,3 +147,6 @@ def cloudfront_viewer_policy_https():
compliant_resources=compliant_resources, compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = CloudFrontRuleChecker

View File

@ -1,15 +1,16 @@
from models import RuleCheckResult from models import RuleCheckResult, RuleChecker
import boto3 import boto3
client = boto3.client("cloudwatch") class CloudWatchRuleChecker(RuleChecker):
logs_client = boto3.client("logs") def __init__(self):
self.client = boto3.client("cloudwatch")
self.logs_client = boto3.client("logs")
def cw_loggroup_retention_period_check(self):
def cw_loggroup_retention_period_check():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
log_groups = logs_client.describe_log_groups()["logGroups"] log_groups = self.logs_client.describe_log_groups()["logGroups"]
# This rule should check if `retentionInDays` is less than n days. # This rule should check if `retentionInDays` is less than n days.
# But, instead of that, this will check if the retention setting is set to "Never expire" or not # But, instead of that, this will check if the retention setting is set to "Never expire" or not
@ -25,11 +26,10 @@ def cw_loggroup_retention_period_check():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def cloudwatch_alarm_settings_check(self):
def cloudwatch_alarm_settings_check():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
alarms = client.describe_alarms()["MetricAlarms"] alarms = self.client.describe_alarms()["MetricAlarms"]
parameters = { parameters = {
"MetricName": "", # required "MetricName": "", # required
"Threshold": None, "Threshold": None,
@ -55,3 +55,6 @@ def cloudwatch_alarm_settings_check():
compliant_resources=compliant_resources, compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = CloudWatchRuleChecker

View File

@ -1,20 +1,23 @@
from models import RuleCheckResult from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3 import boto3
build_client = boto3.client("codebuild") class CodeSeriesChecker(RuleChecker):
def __init__(self):
self.build_client = boto3.client("codebuild")
self.deploy_client = boto3.client("codedeploy")
deploy_client = boto3.client("codedeploy") @cached_property
def projects(self):
project_names = self.build_client.list_projects()["projects"]
return self.build_client.batch_get_projects(names=project_names)["projects"]
def codebuild_project_environment_privileged_check(self):
def codebuild_project_environment_privileged_check():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
projects = build_client.list_projects()["projects"]
for project in projects:
project = build_client.batch_get_projects(names=[project])["projects"][0]
for project in self.projects:
if not project["environment"]["privilegedMode"]: if not project["environment"]["privilegedMode"]:
compliant_resources.append(project["arn"]) compliant_resources.append(project["arn"])
else: else:
@ -26,17 +29,17 @@ def codebuild_project_environment_privileged_check():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def codebuild_project_logging_enabled(self):
def codebuild_project_logging_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
projects = build_client.list_projects()["projects"]
for project in projects: for project in self.projects:
project = build_client.batch_get_projects(names=[project])["projects"][0]
logs_config = project["logsConfig"] logs_config = project["logsConfig"]
if logs_config["cloudWatchLogs"]["status"] == "ENABLED" or logs_config["s3Logs"]["status"] == "ENABLED": if (
logs_config["cloudWatchLogs"]["status"] == "ENABLED"
or logs_config["s3Logs"]["status"] == "ENABLED"
):
compliant_resources.append(project["arn"]) compliant_resources.append(project["arn"])
else: else:
non_compliant_resources.append(project["arn"]) non_compliant_resources.append(project["arn"])
@ -47,18 +50,20 @@ def codebuild_project_logging_enabled():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def codedeploy_auto_rollback_monitor_enabled(self):
def codedeploy_auto_rollback_monitor_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
applications = deploy_client.list_applications()["applications"]
applications = self.deploy_client.list_applications()["applications"]
for application in applications: for application in applications:
deployment_groups = deploy_client.list_deployment_groups(applicationName=application)["deploymentGroups"] deployment_group_names = self.deploy_client.list_deployment_groups(
applicationName=application
)["deploymentGroups"]
deployment_groups = self.deploy_client.batch_get_deployment_groups(
applicationName=application, deploymentGroupNames=deployment_group_names
)["deploymentGroupsInfo"]
for deployment_group in deployment_groups: for deployment_group in deployment_groups:
deployment_group = deploy_client.get_deployment_group(
applicationName=application, deploymentGroupName=deployment_group
)["deploymentGroupInfo"]
if ( if (
deployment_group["alarmConfiguration"]["enabled"] deployment_group["alarmConfiguration"]["enabled"]
@ -66,10 +71,15 @@ def codedeploy_auto_rollback_monitor_enabled():
): ):
compliant_resources.append(deployment_group["deploymentGroupId"]) compliant_resources.append(deployment_group["deploymentGroupId"])
else: else:
non_compliant_resources.append(deployment_group["deploymentGroupId"]) non_compliant_resources.append(
deployment_group["deploymentGroupId"]
)
return RuleCheckResult( return RuleCheckResult(
passed=not non_compliant_resources, passed=not non_compliant_resources,
compliant_resources=compliant_resources, compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = CodeSeriesChecker

View File

@ -1,30 +1,43 @@
from models import RuleCheckResult from models import RuleCheckResult, RuleChecker
import datetime from functools import cached_property
from datetime import datetime, timedelta
from dateutil.tz import tzlocal from dateutil.tz import tzlocal
import boto3 import boto3
client = boto3.client("dynamodb") class DynamoDBRuleChecker(RuleChecker):
backup_client = boto3.client("backup") def __init__(self):
autoscaling_client = boto3.client("application-autoscaling") self.client = boto3.client("dynamodb")
self.backup_client = boto3.client("backup")
self.autoscaling_client = boto3.client("application-autoscaling")
@cached_property
def tables(self):
table_names = self.client.list_tables()["TableNames"]
return [
self.client.describe_table(TableName=table_name)["Table"]
for table_name in table_names
]
def dynamodb_autoscaling_enabled(): def dynamodb_autoscaling_enabled(self):
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
table_names = client.list_tables()["TableNames"]
for table_name in table_names: for table in self.tables:
table = client.describe_table(TableName=table_name)["Table"] if (
table.get("BillingModeSummary", {}).get("BillingMode")
if table.get("BillingModeSummary", {}).get("BillingMode") == "PAY_PER_REQUEST": == "PAY_PER_REQUEST"
):
compliant_resources.append(table["TableArn"]) compliant_resources.append(table["TableArn"])
continue continue
scaling_policies = autoscaling_client.describe_scaling_policies( scaling_policies = self.autoscaling_client.describe_scaling_policies(
ServiceNamespace="dynamodb", ResourceId=f"table/{table_name}" ServiceNamespace="dynamodb", ResourceId=f"table/{table['TableName']}"
)["ScalingPolicies"] )["ScalingPolicies"]
scaling_policy_dimensions = [i["ScalableDimension"] for i in scaling_policies] scaling_policy_dimensions = [
policy["ScalableDimension"] for policy in scaling_policies
]
if ( if (
"dynamodb:table:ReadCapacityUnits" in scaling_policy_dimensions "dynamodb:table:ReadCapacityUnits" in scaling_policy_dimensions
and "dynamodb:table:WriteCapacityUnits" in scaling_policy_dimensions and "dynamodb:table:WriteCapacityUnits" in scaling_policy_dimensions
@ -39,24 +52,46 @@ def dynamodb_autoscaling_enabled():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def dynamodb_last_backup_recovery_point_created(self):
def dynamodb_last_backup_recovery_point_created():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
table_names = client.list_tables()["TableNames"]
for table_name in table_names: for table in self.tables:
table = client.describe_table(TableName=table_name)["Table"] recovery_points = self.backup_client.list_recovery_points_by_resource(
recovery_points = backup_client.list_recovery_points_by_resource(ResourceArn=table["TableArn"])[ ResourceArn=table["TableArn"]
"RecoveryPoints" )["RecoveryPoints"]
] if not recovery_points:
recovery_point_creation_dates = sorted([i["CreationDate"] for i in recovery_points])
if len(recovery_point_creation_dates) == 0:
non_compliant_resources.append(table["TableArn"]) non_compliant_resources.append(table["TableArn"])
continue continue
if datetime.datetime.now(tz=tzlocal()) - recovery_point_creation_dates[-1] < datetime.timedelta(days=1): latest_recovery_point = sorted(
[recovery_point["CreationDate"] for recovery_point in recovery_points]
)[-1]
if datetime.now(tz=tzlocal()) - latest_recovery_point > timedelta(days=1):
non_compliant_resources.append(table["TableArn"])
else:
compliant_resources.append(table["TableArn"])
return RuleCheckResult(
passed=not non_compliant_resources,
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
def dynamodb_pitr_enabled(self):
compliant_resources = []
non_compliant_resources = []
for table in self.tables:
backup = self.client.describe_continuous_backups(
TableName=table["TableName"]
)["ContinuousBackupsDescription"]
if (
backup["PointInTimeRecoveryDescription"]["PointInTimeRecoveryStatus"]
== "ENABLED"
):
compliant_resources.append(table["TableArn"]) compliant_resources.append(table["TableArn"])
else: else:
non_compliant_resources.append(table["TableArn"]) non_compliant_resources.append(table["TableArn"])
@ -67,36 +102,11 @@ def dynamodb_last_backup_recovery_point_created():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def dynamodb_table_deletion_protection_enabled(self):
def dynamodb_pitr_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
table_names = client.list_tables()["TableNames"]
for table_name in table_names:
backup = client.describe_continuous_backups(TableName=table_name)["ContinuousBackupsDescription"]
table = client.describe_table(TableName=table_name)["Table"]
if backup["PointInTimeRecoveryDescription"]["PointInTimeRecoveryStatus"] == "ENABLED":
compliant_resources.append(table["TableArn"])
else:
non_compliant_resources.append(table["TableArn"])
return RuleCheckResult(
passed=not non_compliant_resources,
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
def dynamodb_table_deletion_protection_enabled():
compliant_resources = []
non_compliant_resources = []
table_names = client.list_tables()["TableNames"]
for table_name in table_names:
table = client.describe_table(TableName=table_name)["Table"]
for table in self.tables:
if table["DeletionProtectionEnabled"] == True: if table["DeletionProtectionEnabled"] == True:
compliant_resources.append(table["TableArn"]) compliant_resources.append(table["TableArn"])
else: else:
@ -108,15 +118,11 @@ def dynamodb_table_deletion_protection_enabled():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def dynamodb_table_encrypted_kms(self):
def dynamodb_table_encrypted_kms():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
table_names = client.list_tables()["TableNames"]
for table_name in table_names:
table = client.describe_table(TableName=table_name)["Table"]
for table in self.tables:
if ( if (
"SSEDescription" in table "SSEDescription" in table
and table["SSEDescription"]["Status"] == "ENABLED" and table["SSEDescription"]["Status"] == "ENABLED"
@ -132,16 +138,15 @@ def dynamodb_table_encrypted_kms():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def dynamodb_table_encryption_enabled(self):
def dynamodb_table_encryption_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
table_names = client.list_tables()["TableNames"]
for table_name in table_names: for table in self.tables:
table = client.describe_table(TableName=table_name)["Table"] if (
"SSEDescription" in table
if "SSEDescription" in table and table["SSEDescription"]["Status"] == "ENABLED": and table["SSEDescription"]["Status"] == "ENABLED"
):
compliant_resources.append(table["TableArn"]) compliant_resources.append(table["TableArn"])
else: else:
non_compliant_resources.append(table["TableArn"]) non_compliant_resources.append(table["TableArn"])
@ -151,3 +156,6 @@ def dynamodb_table_encryption_enabled():
compliant_resources=compliant_resources, compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = DynamoDBRuleChecker

View File

@ -1,22 +1,33 @@
from models import RuleCheckResult from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3 import boto3
client = boto3.client("ec2") class EC2RuleChecker(RuleChecker):
autoscaling_client = boto3.client("autoscaling") def __init__(self):
ssm_client = boto3.client("ssm") self.client = boto3.client("ec2")
self.ssm_client = boto3.client("ssm")
@cached_property
def instances(self):
valid_instances = [
instance
for reservation in self.client.describe_instances()["Reservations"]
for instance in reservation["Instances"]
if instance["State"]["Name"] != "terminated"
]
return valid_instances
def autoscaling_launch_template(): def ec2_ebs_encryption_by_default(self):
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
asgs = autoscaling_client.describe_auto_scaling_groups()["AutoScalingGroups"]
for asg in asgs: volumes = self.client.describe_volumes()["Volumes"]
if "LaunchConfigurationName" in asg: for volume in volumes:
non_compliant_resources.append(asg["AutoScalingGroupARN"]) if volume["Encrypted"]:
compliant_resources.append(volume["VolumeId"])
else: else:
compliant_resources.append(asg["AutoScalingGroupARN"]) non_compliant_resources.append(volume["VolumeId"])
return RuleCheckResult( return RuleCheckResult(
passed=not non_compliant_resources, passed=not non_compliant_resources,
@ -24,34 +35,11 @@ def autoscaling_launch_template():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def ec2_imdsv2_check(self):
def ec2_ebs_encryption_by_default():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
ebses = client.describe_volumes()["Volumes"]
for ebs in ebses: for instance in self.instances:
if ebs["Encrypted"] == True:
compliant_resources.append(ebs["VolumeId"])
else:
non_compliant_resources.append(ebs["VolumeId"])
return RuleCheckResult(
passed=not non_compliant_resources,
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
def ec2_imdsv2_check():
compliant_resources = []
non_compliant_resources = []
reservations = client.describe_instances()["Reservations"]
for reservation in reservations:
for instance in reservation["Instances"]:
if instance["State"]["Name"] == "terminated":
continue
if instance["MetadataOptions"]["HttpTokens"] == "required": if instance["MetadataOptions"]["HttpTokens"] == "required":
compliant_resources.append(instance["InstanceId"]) compliant_resources.append(instance["InstanceId"])
else: else:
@ -63,16 +51,11 @@ def ec2_imdsv2_check():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def ec2_instance_detailed_monitoring_enabled(self):
def ec2_instance_detailed_monitoring_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
reservations = client.describe_instances()["Reservations"]
for reservation in reservations: for instance in self.instances:
for instance in reservation["Instances"]:
if instance["State"]["Name"] == "terminated":
continue
if instance["Monitoring"]["State"] == "enabled": if instance["Monitoring"]["State"] == "enabled":
compliant_resources.append(instance["InstanceId"]) compliant_resources.append(instance["InstanceId"])
else: else:
@ -84,18 +67,18 @@ def ec2_instance_detailed_monitoring_enabled():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def ec2_instance_managed_by_systems_manager(self):
def ec2_instance_managed_by_systems_manager():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
reservations = client.describe_instances()["Reservations"]
informations = ssm_client.describe_instance_information()["InstanceInformationList"]
managed_instance_ids = [i["InstanceId"] for i in informations if i["PingStatus"]]
for reservation in reservations: informations = self.ssm_client.describe_instance_information()[
for instance in reservation["Instances"]: "InstanceInformationList"
if instance["State"]["Name"] == "terminated": ]
continue managed_instance_ids = [
info["InstanceId"] for info in informations if info["PingStatus"]
]
for instance in self.instances:
if instance["InstanceId"] in managed_instance_ids: if instance["InstanceId"] in managed_instance_ids:
compliant_resources.append(instance["InstanceId"]) compliant_resources.append(instance["InstanceId"])
else: else:
@ -107,16 +90,11 @@ def ec2_instance_managed_by_systems_manager():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def ec2_instance_profile_attached(self):
def ec2_instance_profile_attached():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
reservations = client.describe_instances()["Reservations"]
for reservation in reservations: for instance in self.instances:
for instance in reservation["Instances"]:
if instance["State"]["Name"] == "terminated":
continue
if "IamInstanceProfile" in instance: if "IamInstanceProfile" in instance:
compliant_resources.append(instance["InstanceId"]) compliant_resources.append(instance["InstanceId"])
else: else:
@ -128,16 +106,11 @@ def ec2_instance_profile_attached():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def ec2_no_amazon_key_pair(self):
def ec2_no_amazon_key_pair():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
reservations = client.describe_instances()["Reservations"]
for reservation in reservations: for instance in self.instances:
for instance in reservation["Instances"]:
if instance["State"]["Name"] == "terminated":
continue
if "KeyName" in instance: if "KeyName" in instance:
non_compliant_resources.append(instance["InstanceId"]) non_compliant_resources.append(instance["InstanceId"])
else: else:
@ -149,16 +122,11 @@ def ec2_no_amazon_key_pair():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def ec2_stopped_instance(self):
def ec2_stopped_instance():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
reservations = client.describe_instances()["Reservations"]
for reservation in reservations: for instance in self.instances:
for instance in reservation["Instances"]:
if instance["State"]["Name"] == "terminated":
continue
if instance["State"]["Name"] != "stopped": if instance["State"]["Name"] != "stopped":
compliant_resources.append(instance["InstanceId"]) compliant_resources.append(instance["InstanceId"])
else: else:
@ -170,16 +138,11 @@ def ec2_stopped_instance():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def ec2_token_hop_limit_check(self):
def ec2_token_hop_limit_check():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
reservations = client.describe_instances()["Reservations"]
for reservation in reservations: for instance in self.instances:
for instance in reservation["Instances"]:
if instance["State"]["Name"] == "terminated":
continue
if instance["MetadataOptions"]["HttpPutResponseHopLimit"] < 2: if instance["MetadataOptions"]["HttpPutResponseHopLimit"] < 2:
compliant_resources.append(instance["InstanceId"]) compliant_resources.append(instance["InstanceId"])
else: else:
@ -190,3 +153,6 @@ def ec2_token_hop_limit_check():
compliant_resources=compliant_resources, compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = EC2RuleChecker

View File

@ -1,17 +1,21 @@
from models import RuleCheckResult from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3 import boto3
import botocore
client = boto3.client("ecr") class ECRRuleChecker(RuleChecker):
def __init__(self):
self.client = boto3.client("ecr")
@cached_property
def repositories(self):
return self.client.describe_repositories()["repositories"]
def ecr_private_image_scanning_enabled(): def ecr_private_image_scanning_enabled(self):
repositories = client.describe_repositories()
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for repository in repositories["repositories"]: for repository in self.repositories:
if repository["imageScanningConfiguration"]["scanOnPush"] == True: if repository["imageScanningConfiguration"]["scanOnPush"] == True:
compliant_resource.append(repository["repositoryArn"]) compliant_resource.append(repository["repositoryArn"])
else: else:
@ -23,15 +27,13 @@ def ecr_private_image_scanning_enabled():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def ecr_private_lifecycle_policy_configured(self):
def ecr_private_lifecycle_policy_configured():
repositories = client.describe_repositories()
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for repository in repositories["repositories"]: for repository in self.repositories:
try: try:
response = client.get_lifecycle_policy( response = self.client.get_lifecycle_policy(
registryId=repository["registryId"], registryId=repository["registryId"],
repositoryName=repository["repositoryName"], repositoryName=repository["repositoryName"],
) )
@ -48,13 +50,11 @@ def ecr_private_lifecycle_policy_configured():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def ecr_private_tag_immutability_enabled(self):
def ecr_private_tag_immutability_enabled():
repositories = client.describe_repositories()
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for repository in repositories["repositories"]: for repository in self.repositories:
if repository["imageTagMutability"] == "IMMUTABLE": if repository["imageTagMutability"] == "IMMUTABLE":
compliant_resource.append(repository["repositoryArn"]) compliant_resource.append(repository["repositoryArn"])
else: else:
@ -66,13 +66,11 @@ def ecr_private_tag_immutability_enabled():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def ecr_kms_encryption_1(self):
def ecr_kms_encryption_1():
repositories = client.describe_repositories()
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for repository in repositories["repositories"]: for repository in self.repositories:
if repository["encryptionConfiguration"]["encryptionType"] == "KMS": if repository["encryptionConfiguration"]["encryptionType"] == "KMS":
compliant_resource.append(repository["repositoryArn"]) compliant_resource.append(repository["repositoryArn"])
else: else:
@ -83,3 +81,6 @@ def ecr_kms_encryption_1():
compliant_resources=compliant_resource, compliant_resources=compliant_resource,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = ECRRuleChecker

View File

@ -1,24 +1,57 @@
from models import RuleCheckResult from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3 import boto3
client = boto3.client("ecs") class ECSRuleChecker(RuleChecker):
def __init__(self):
self.client = boto3.client("ecs")
@cached_property
def ecs_awsvpc_networking_enabled(): def task_definitions(self):
compliant_resources = [] task_definition_arns = self.client.list_task_definitions(status="ACTIVE")[
non_compliant_resources = [] "taskDefinitionArns"
task_definitions = client.list_task_definitions(status="ACTIVE")["taskDefinitionArns"] ]
latest_task_definitions = {} latest_task_definitions = {}
for task_definition in task_definitions: # Filter latest task definition arns
family, revision = task_definition.rsplit(":", 1) for task_definition_arn in task_definition_arns:
latest_task_definitions[family] = max(latest_task_definitions.get(family, 0), int(revision)) family, revision = task_definition_arn.rsplit(":", 1)
latest_task_definitions[family] = max(
latest_task_definitions.get(family, 0), int(revision)
)
for family, revision in latest_task_definitions.items(): # Fetch latest task definition details
task_definition_arn = f"{family}:{revision}" task_definitions = [
task_definition = client.describe_task_definition(taskDefinition=task_definition_arn)["taskDefinition"] self.client.describe_task_definition(taskDefinition=f"{family}:{revision}")[
"taskDefinition"
]
for family, revision in latest_task_definitions.items()
]
return task_definitions
@cached_property
def clusters(self):
return self.client.describe_clusters(include=["SETTINGS"])["clusters"]
@cached_property
def services(self):
services = []
for cluster in self.clusters:
service_arns = self.client.list_services(
cluster=cluster["clusterArn"], launchType="FARGATE"
)["serviceArns"]
services += self.client.describe_services(
cluster=cluster["clusterArn"], services=service_arns
)["services"]
return services
def ecs_awsvpc_networking_enabled(self):
compliant_resources = []
non_compliant_resources = []
for task_definition in self.task_definitions:
if task_definition.get("networkMode") == "awsvpc": if task_definition.get("networkMode") == "awsvpc":
compliant_resources.append(task_definition["taskDefinitionArn"]) compliant_resources.append(task_definition["taskDefinitionArn"])
else: else:
@ -30,26 +63,18 @@ def ecs_awsvpc_networking_enabled():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def ecs_containers_nonprivileged(self):
def ecs_containers_nonprivileged():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
task_definitions = client.list_task_definitions(status="ACTIVE")["taskDefinitionArns"]
latest_task_definitions = {}
for task_definition in task_definitions: for task_definition in self.task_definitions:
family, revision = task_definition.rsplit(":", 1)
latest_task_definitions[family] = max(latest_task_definitions.get(family, 0), int(revision))
for family, revision in latest_task_definitions.items():
task_definition_arn = f"{family}:{revision}"
task_definition = client.describe_task_definition(taskDefinition=task_definition_arn)["taskDefinition"]
containers = task_definition["containerDefinitions"] containers = task_definition["containerDefinitions"]
privileged_containers = [
container for container in containers if container.get("privileged")
]
for container in containers: if privileged_containers:
if container.get("privileged"):
non_compliant_resources.append(task_definition["taskDefinitionArn"]) non_compliant_resources.append(task_definition["taskDefinitionArn"])
break
else: else:
compliant_resources.append(task_definition["taskDefinitionArn"]) compliant_resources.append(task_definition["taskDefinitionArn"])
@ -59,26 +84,20 @@ def ecs_containers_nonprivileged():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def ecs_containers_readonly_access(self):
def ecs_containers_readonly_access():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
task_definitions = client.list_task_definitions(status="ACTIVE")["taskDefinitionArns"]
latest_task_definitions = {}
for task_definition in task_definitions: for task_definition in self.task_definitions:
family, revision = task_definition.rsplit(":", 1)
latest_task_definitions[family] = max(latest_task_definitions.get(family, 0), int(revision))
for family, revision in latest_task_definitions.items():
task_definition_arn = f"{family}:{revision}"
task_definition = client.describe_task_definition(taskDefinition=task_definition_arn)["taskDefinition"]
containers = task_definition["containerDefinitions"] containers = task_definition["containerDefinitions"]
not_readonly_containers = [
container
for container in containers
if not container.get("readonlyRootFilesystem")
]
for container in containers: if not_readonly_containers:
if not container.get("readonlyRootFilesystem"):
non_compliant_resources.append(task_definition["taskDefinitionArn"]) non_compliant_resources.append(task_definition["taskDefinitionArn"])
break
else: else:
compliant_resources.append(task_definition["taskDefinitionArn"]) compliant_resources.append(task_definition["taskDefinitionArn"])
@ -88,17 +107,21 @@ def ecs_containers_readonly_access():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def ecs_container_insights_enabled(self):
def ecs_container_insights_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
clusters = client.describe_clusters(include=["SETTINGS"])["clusters"] for cluster in self.clusters:
container_insights_setting = [
setting
for setting in cluster["settings"]
if setting["name"] == "containerInsights"
]
for cluster in clusters: if (
container_insights_setting = [setting for setting in cluster["settings"] if setting["name"] == "containerInsights"] container_insights_setting
and container_insights_setting[0]["value"] == "enabled"
if container_insights_setting and container_insights_setting[0]["value"] == "enabled": ):
compliant_resources.append(cluster["clusterArn"]) compliant_resources.append(cluster["clusterArn"])
else: else:
non_compliant_resources.append(cluster["clusterArn"]) non_compliant_resources.append(cluster["clusterArn"])
@ -109,17 +132,11 @@ def ecs_container_insights_enabled():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def ecs_fargate_latest_platform_version(self):
def ecs_fargate_latest_platform_version():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
cluster_arns = client.list_clusters()["clusterArns"]
for cluster_arn in cluster_arns: for service in self.services:
service_arns = client.list_services(cluster=cluster_arn, launchType="FARGATE")["serviceArns"]
services = client.describe_services(cluster=cluster_arn, services=service_arns)["services"]
for service in services:
if service["platformVersion"] == "LATEST": if service["platformVersion"] == "LATEST":
compliant_resources.append(service["serviceArn"]) compliant_resources.append(service["serviceArn"])
else: else:
@ -131,26 +148,67 @@ def ecs_fargate_latest_platform_version():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def ecs_task_definition_log_configuration(self):
def ecs_task_definition_log_configuration():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
task_definitions = client.list_task_definitions(status="ACTIVE")["taskDefinitionArns"]
latest_task_definitions = {}
for task_definition in task_definitions: for task_definition in self.task_definitions:
family, revision = task_definition.rsplit(":", 1)
latest_task_definitions[family] = max(latest_task_definitions.get(family, 0), int(revision))
for family, revision in latest_task_definitions.items():
task_definition_arn = f"{family}:{revision}"
task_definition = client.describe_task_definition(taskDefinition=task_definition_arn)["taskDefinition"]
containers = task_definition["containerDefinitions"] containers = task_definition["containerDefinitions"]
for container in containers: log_disabled_containers = [
if "logConfiguration" not in container: container
for container in containers
if "logConfiguration" not in container
]
if log_disabled_containers:
non_compliant_resources.append(task_definition["taskDefinitionArn"])
else:
compliant_resources.append(task_definition["taskDefinitionArn"])
return RuleCheckResult(
passed=not non_compliant_resources,
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
def ecs_task_definition_memory_hard_limit(self):
compliant_resources = []
non_compliant_resources = []
for task_definition in self.task_definitions:
containers = task_definition["containerDefinitions"]
containers_without_memory_limit = [
container for container in containers if "memory" not in container
]
if containers_without_memory_limit:
non_compliant_resources.append(task_definition["taskDefinitionArn"])
else:
compliant_resources.append(task_definition["taskDefinitionArn"])
return RuleCheckResult(
passed=not non_compliant_resources,
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
def ecs_task_definition_nonroot_user(self):
compliant_resources = []
non_compliant_resources = []
for task_definition in self.task_definitions:
containers = task_definition["containerDefinitions"]
privileged_containers = [
container
for container in containers
if container.get("user") in [None, "root"]
]
if privileged_containers:
non_compliant_resources.append(task_definition["taskDefinitionArn"]) non_compliant_resources.append(task_definition["taskDefinitionArn"])
break
else: else:
compliant_resources.append(task_definition["taskDefinitionArn"]) compliant_resources.append(task_definition["taskDefinitionArn"])
@ -161,59 +219,4 @@ def ecs_task_definition_log_configuration():
) )
def ecs_task_definition_memory_hard_limit(): rule_checker = ECSRuleChecker
compliant_resources = []
non_compliant_resources = []
task_definitions = client.list_task_definitions(status="ACTIVE")["taskDefinitionArns"]
latest_task_definitions = {}
for task_definition in task_definitions:
family, revision = task_definition.rsplit(":", 1)
latest_task_definitions[family] = max(latest_task_definitions.get(family, 0), int(revision))
for family, revision in latest_task_definitions.items():
task_definition_arn = f"{family}:{revision}"
task_definition = client.describe_task_definition(taskDefinition=task_definition_arn)["taskDefinition"]
containers = task_definition["containerDefinitions"]
for container in containers:
if "memory" not in container:
non_compliant_resources.append(task_definition["taskDefinitionArn"])
break
else:
compliant_resources.append(task_definition["taskDefinitionArn"])
return RuleCheckResult(
passed=not non_compliant_resources,
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)
def ecs_task_definition_nonroot_user():
compliant_resources = []
non_compliant_resources = []
task_definitions = client.list_task_definitions(status="ACTIVE")["taskDefinitionArns"]
latest_task_definitions = {}
for task_definition in task_definitions:
family, revision = task_definition.rsplit(":", 1)
latest_task_definitions[family] = max(latest_task_definitions.get(family, 0), int(revision))
for family, revision in latest_task_definitions.items():
task_definition_arn = f"{family}:{revision}"
task_definition = client.describe_task_definition(taskDefinition=task_definition_arn)["taskDefinition"]
containers = task_definition["containerDefinitions"]
for container in containers:
if container.get("user") in [None, "root"]:
non_compliant_resources.append(task_definition["taskDefinitionArn"])
break
else:
compliant_resources.append(task_definition["taskDefinitionArn"])
return RuleCheckResult(
passed=not non_compliant_resources,
compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources,
)

View File

@ -1,17 +1,26 @@
from models import RuleCheckResult from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3 import boto3
client = boto3.client("efs") class EFSRuleChecker(RuleChecker):
ec2_client = boto3.client("ec2") def __init__(self):
self.client = boto3.client("efs")
self.ec2_client = boto3.client("ec2")
@cached_property
def access_points(self):
return self.client.describe_access_points()["AccessPoints"]
def efs_access_point_enforce_root_directory(): @cached_property
access_points = client.describe_access_points()["AccessPoints"] def file_systems(self):
return self.client.describe_file_systems()["FileSystems"]
def efs_access_point_enforce_root_directory(self):
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for access_point in access_points: for access_point in self.access_points:
if access_point["RootDirectory"]["Path"] != "/": if access_point["RootDirectory"]["Path"] != "/":
compliant_resource.append(access_point["AccessPointArn"]) compliant_resource.append(access_point["AccessPointArn"])
else: else:
@ -23,13 +32,11 @@ def efs_access_point_enforce_root_directory():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def efs_access_point_enforce_user_identity(self):
def efs_access_point_enforce_user_identity():
access_points = client.describe_access_points()["AccessPoints"]
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for access_point in access_points: for access_point in self.access_points:
if "PosixUser" in access_point: if "PosixUser" in access_point:
compliant_resource.append(access_point["AccessPointArn"]) compliant_resource.append(access_point["AccessPointArn"])
else: else:
@ -41,16 +48,15 @@ def efs_access_point_enforce_user_identity():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def efs_automatic_backups_enabled(self):
def efs_automatic_backups_enabled():
file_systems = client.describe_file_systems()["FileSystems"]
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for file_system in file_systems: for file_system in self.file_systems:
response = client.describe_backup_policy( response = self.client.describe_backup_policy(
FileSystemId=file_system["FileSystemId"] FileSystemId=file_system["FileSystemId"]
) )
if response["BackupPolicy"]["Status"] == "ENABLED": if response["BackupPolicy"]["Status"] == "ENABLED":
compliant_resource.append(file_system["FileSystemArn"]) compliant_resource.append(file_system["FileSystemArn"])
else: else:
@ -62,14 +68,12 @@ def efs_automatic_backups_enabled():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def efs_encrypted_check(self):
def efs_encrypted_check():
file_systems = client.describe_file_systems()["FileSystems"]
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for file_system in file_systems: for file_system in self.file_systems:
if file_system["Encrypted"] == True: if file_system["Encrypted"]:
compliant_resource.append(file_system["FileSystemArn"]) compliant_resource.append(file_system["FileSystemArn"])
else: else:
non_compliant_resources.append(file_system["FileSystemArn"]) non_compliant_resources.append(file_system["FileSystemArn"])
@ -80,19 +84,18 @@ def efs_encrypted_check():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def efs_mount_target_public_accessible(self):
def efs_mount_target_public_accessible():
file_systems = client.describe_file_systems()["FileSystems"]
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for file_system in file_systems: for file_system in self.file_systems:
mount_targets = client.describe_mount_targets( mount_targets = self.client.describe_mount_targets(
FileSystemId=file_system["FileSystemId"] FileSystemId=file_system["FileSystemId"]
)["MountTargets"] )["MountTargets"]
for mount_target in mount_targets: for mount_target in mount_targets:
subnet_id = mount_target["SubnetId"] subnet_id = mount_target["SubnetId"]
routes = ec2_client.describe_route_tables( routes = self.ec2_client.describe_route_tables(
Filters=[{"Name": "association.subnet-id", "Values": [subnet_id]}] Filters=[{"Name": "association.subnet-id", "Values": [subnet_id]}]
)["RouteTables"][0]["Routes"] )["RouteTables"][0]["Routes"]
@ -105,14 +108,17 @@ def efs_mount_target_public_accessible():
): ):
non_compliant_resources.append(file_system["FileSystemArn"]) non_compliant_resources.append(file_system["FileSystemArn"])
break break
else:
compliant_resource.append(file_system["FileSystemArn"])
compliant_resource = list(set(compliant_resource))
non_compliant_resources = list(set(non_compliant_resources)) non_compliant_resources = list(set(non_compliant_resources))
compliant_resource = list(
set(compliant_resource) - set(non_compliant_resources)
)
return RuleCheckResult( return RuleCheckResult(
passed=not non_compliant_resources, passed=not non_compliant_resources,
compliant_resources=compliant_resource, compliant_resources=compliant_resource,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = EFSRuleChecker

View File

@ -1,24 +1,32 @@
from models import RuleCheckResult from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3 import boto3
client = boto3.client("eks") class EKSRuleChecker(RuleChecker):
def __init__(self):
self.client = boto3.client("eks")
@cached_property
def clusters(self):
cluster_names = self.client.list_clusters()["clusters"]
return [
self.client.describe_cluster(name=cluster_name)["cluster"]
for cluster_name in cluster_names
]
def eks_cluster_logging_enabled(): def eks_cluster_logging_enabled(self):
clusters = client.list_clusters()["clusters"]
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for cluster in clusters: for cluster in self.clusters:
response = client.describe_cluster(name=cluster)["cluster"]
if ( if (
len(response["logging"]["clusterLogging"][0]["types"]) == 5 cluster["logging"]["clusterLogging"][0]["enabled"]
and response["logging"]["clusterLogging"][0]["enabled"] == True and len(cluster["logging"]["clusterLogging"][0]["types"]) == 5
): ):
compliant_resource.append(response["arn"]) compliant_resource.append(cluster["arn"])
else: else:
non_compliant_resources.append(response["arn"]) non_compliant_resources.append(cluster["arn"])
return RuleCheckResult( return RuleCheckResult(
passed=not non_compliant_resources, passed=not non_compliant_resources,
@ -26,21 +34,18 @@ def eks_cluster_logging_enabled():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def eks_cluster_secrets_encrypted(self):
def eks_cluster_secrets_encrypted():
clusters = client.list_clusters()["clusters"]
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for cluster in clusters: for cluster in self.clusters:
response = client.describe_cluster(name=cluster)["cluster"]
if ( if (
"encryptionConfig" in response "encryptionConfig" in cluster
and "secrets" in response["encryptionConfig"][0]["resources"] and "secrets" in cluster["encryptionConfig"][0]["resources"]
): ):
compliant_resource.append(response["arn"]) compliant_resource.append(cluster["arn"])
else: else:
non_compliant_resources.append(response["arn"]) non_compliant_resources.append(cluster["arn"])
return RuleCheckResult( return RuleCheckResult(
passed=not non_compliant_resources, passed=not non_compliant_resources,
@ -48,21 +53,21 @@ def eks_cluster_secrets_encrypted():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def eks_endpoint_no_public_access(self):
def eks_endpoint_no_public_access():
clusters = client.list_clusters()["clusters"]
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for cluster in clusters: for cluster in self.clusters:
response = client.describe_cluster(name=cluster)["cluster"] if cluster["resourcesVpcConfig"]["endpointPublicAccess"]:
if response["resourcesVpcConfig"]["endpointPublicAccess"] == False: non_compliant_resources.append(cluster["arn"])
compliant_resource.append(response["arn"])
else: else:
non_compliant_resources.append(response["arn"]) compliant_resource.append(cluster["arn"])
return RuleCheckResult( return RuleCheckResult(
passed=not non_compliant_resources, passed=not non_compliant_resources,
compliant_resources=compliant_resource, compliant_resources=compliant_resource,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = EKSRuleChecker

View File

@ -1,17 +1,26 @@
from models import RuleCheckResult from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3 import boto3
client = boto3.client("elasticache") class ElastiCacheRuleChecker(RuleChecker):
def __init__(self):
self.client = boto3.client("elasticache")
@cached_property
def clusters(self):
return self.client.describe_cache_clusters()["CacheClusters"]
def elasticache_auto_minor_version_upgrade_check(): @cached_property
clusters = client.describe_cache_clusters()["CacheClusters"] def replication_groups(self):
return self.client.describe_replication_groups()["ReplicationGroups"]
def elasticache_auto_minor_version_upgrade_check(self):
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for cluster in clusters: for cluster in self.clusters:
if cluster["AutoMinorVersionUpgrade"] == True: if cluster["AutoMinorVersionUpgrade"]:
compliant_resource.append(cluster["ARN"]) compliant_resource.append(cluster["ARN"])
else: else:
non_compliant_resources.append(cluster["ARN"]) non_compliant_resources.append(cluster["ARN"])
@ -22,13 +31,11 @@ def elasticache_auto_minor_version_upgrade_check():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def elasticache_redis_cluster_automatic_backup_check(self):
def elasticache_redis_cluster_automatic_backup_check():
replication_groups = client.describe_replication_groups()["ReplicationGroups"]
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for replication_group in replication_groups: for replication_group in self.replication_groups:
if "SnapshottingClusterId" in replication_group: if "SnapshottingClusterId" in replication_group:
compliant_resource.append(replication_group["ARN"]) compliant_resource.append(replication_group["ARN"])
else: else:
@ -40,13 +47,11 @@ def elasticache_redis_cluster_automatic_backup_check():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def elasticache_repl_grp_auto_failover_enabled(self):
def elasticache_repl_grp_auto_failover_enabled():
replication_groups = client.describe_replication_groups()["ReplicationGroups"]
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for replication_group in replication_groups: for replication_group in self.replication_groups:
if replication_group["AutomaticFailover"] == "enabled": if replication_group["AutomaticFailover"] == "enabled":
compliant_resource.append(replication_group["ARN"]) compliant_resource.append(replication_group["ARN"])
else: else:
@ -58,13 +63,11 @@ def elasticache_repl_grp_auto_failover_enabled():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def elasticache_repl_grp_encrypted_at_rest(self):
def elasticache_repl_grp_encrypted_at_rest():
replication_groups = client.describe_replication_groups()["ReplicationGroups"]
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for replication_group in replication_groups: for replication_group in self.replication_groups:
if replication_group["AtRestEncryptionEnabled"] == True: if replication_group["AtRestEncryptionEnabled"] == True:
compliant_resource.append(replication_group["ARN"]) compliant_resource.append(replication_group["ARN"])
else: else:
@ -76,13 +79,11 @@ def elasticache_repl_grp_encrypted_at_rest():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def elasticache_repl_grp_encrypted_in_transit(self):
def elasticache_repl_grp_encrypted_in_transit():
replication_groups = client.describe_replication_groups()["ReplicationGroups"]
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for replication_group in replication_groups: for replication_group in self.replication_groups:
if replication_group["TransitEncryptionEnabled"] == True: if replication_group["TransitEncryptionEnabled"] == True:
compliant_resource.append(replication_group["ARN"]) compliant_resource.append(replication_group["ARN"])
else: else:
@ -94,13 +95,11 @@ def elasticache_repl_grp_encrypted_in_transit():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def elasticache_subnet_group_check(self):
def elasticache_subnet_group_check():
clusters = client.describe_cache_clusters()["CacheClusters"]
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
for cluster in clusters: for cluster in self.clusters:
if cluster["CacheSubnetGroupName"] != "default": if cluster["CacheSubnetGroupName"] != "default":
compliant_resource.append(cluster["ARN"]) compliant_resource.append(cluster["ARN"])
else: else:
@ -111,3 +110,6 @@ def elasticache_subnet_group_check():
compliant_resources=compliant_resource, compliant_resources=compliant_resource,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = ElastiCacheRuleChecker

View File

@ -1,19 +1,36 @@
from models import RuleCheckResult from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3 import boto3
client = boto3.client("iam") class IAMRuleChecker(RuleChecker):
def __init__(self):
self.client = boto3.client("iam")
@cached_property
def policies(self):
return self.client.list_policies(Scope="Local")["Policies"]
def iam_policy_no_statements_with_admin_access(): @cached_property
def policy_default_versions(self):
responses = [
self.client.get_policy_version(
PolicyArn=policy["Arn"], VersionId=policy["DefaultVersionId"]
)["PolicyVersion"]
for policy in self.policies
]
return {
policy["Arn"]: response
for policy, response in zip(self.policies, responses)
}
def iam_policy_no_statements_with_admin_access(self):
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
policies = client.list_policies(Scope="Local")["Policies"]
for policy in policies: for policy in self.policies:
policy_version = client.get_policy_version(PolicyArn=policy["Arn"], VersionId=policy["DefaultVersionId"])[ policy_version = self.policy_default_versions[policy["Arn"]]
"PolicyVersion"
]
for statement in policy_version["Document"]["Statement"]: for statement in policy_version["Document"]["Statement"]:
if ( if (
@ -32,16 +49,12 @@ def iam_policy_no_statements_with_admin_access():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def iam_policy_no_statements_with_full_access(self):
def iam_policy_no_statements_with_full_access():
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
policies = client.list_policies(Scope="Local")["Policies"]
for policy in policies: for policy in self.policies:
policy_version = client.get_policy_version(PolicyArn=policy["Arn"], VersionId=policy["DefaultVersionId"])[ policy_version = self.policy_default_versions[policy["Arn"]]
"PolicyVersion"
]
for statement in policy_version["Document"]["Statement"]: for statement in policy_version["Document"]["Statement"]:
if statement["Effect"] == "Deny": if statement["Effect"] == "Deny":
@ -50,7 +63,9 @@ def iam_policy_no_statements_with_full_access():
if type(statement["Action"]) == str: if type(statement["Action"]) == str:
statement["Action"] = [statement["Action"]] statement["Action"] = [statement["Action"]]
full_access_actions = [action for action in statement["Action"] if action.endswith(":*")] full_access_actions = [
action for action in statement["Action"] if action.endswith(":*")
]
if full_access_actions: if full_access_actions:
non_compliant_resources.append(policy["Arn"]) non_compliant_resources.append(policy["Arn"])
break break
@ -63,15 +78,18 @@ def iam_policy_no_statements_with_full_access():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def iam_role_managed_policy_check(self):
def iam_role_managed_policy_check():
compliant_resource = [] compliant_resource = []
non_compliant_resources = [] non_compliant_resources = []
policy_arns = [] # 검사할 managed policy arn 목록 policy_arns = [] # 검사할 managed policy arn 목록
for policy in policy_arns: for policy in policy_arns:
response = client.list_entities_for_policy(PolicyArn=policy) response = self.client.list_entities_for_policy(PolicyArn=policy)
if response["PolicyGroups"] == [] and response["PolicyUsers"] == [] and response["PolicyRoles"] == []: if (
response["PolicyGroups"] == []
and response["PolicyUsers"] == []
and response["PolicyRoles"] == []
):
non_compliant_resources.append(policy) non_compliant_resources.append(policy)
else: else:
compliant_resource.append(policy) compliant_resource.append(policy)
@ -81,3 +99,6 @@ def iam_role_managed_policy_check():
compliant_resources=compliant_resource, compliant_resources=compliant_resource,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = IAMRuleChecker

View File

@ -1,17 +1,18 @@
from models import RuleCheckResult from models import RuleCheckResult, RuleChecker
import boto3 import boto3
client = boto3.client("kms") class KMSRuleChecker(RuleChecker):
def __init__(self):
self.client = boto3.client("kms")
def cmk_backing_key_rotation_enabled(self):
def cmk_backing_key_rotation_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
keys = client.list_keys()["Keys"] keys = self.client.list_keys()["Keys"]
for key in keys: for key in keys:
response = client.get_key_rotation_status(KeyId=key["KeyId"]) response = self.client.get_key_rotation_status(KeyId=key["KeyId"])
if response["KeyRotationEnabled"] == True: if response["KeyRotationEnabled"] == True:
compliant_resources.append(response["KeyId"]) compliant_resources.append(response["KeyId"])
@ -23,3 +24,6 @@ def cmk_backing_key_rotation_enabled():
compliant_resources=compliant_resources, compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = KMSRuleChecker

View File

@ -1,24 +1,39 @@
from models import RuleCheckResult from models import RuleCheckResult, RuleChecker
from functools import cached_property
import datetime import datetime
from dateutil.tz import tzlocal from dateutil.tz import tzlocal
import boto3 import boto3
client = boto3.client("rds")
backup_client = boto3.client("backup")
ec2_client = boto3.client("ec2")
class RDSRuleChecker(RuleChecker):
def __init__(self):
self.client = boto3.client("rds")
self.backup_client = boto3.client("backup")
self.ec2_client = boto3.client("ec2")
def aurora_last_backup_recovery_point_created(): @cached_property
def db_clusters(self):
return self.client.describe_db_clusters()["DBClusters"]
@cached_property
def db_instances(self):
return self.client.describe_db_instances()["DBInstances"]
def aurora_last_backup_recovery_point_created(self):
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
clusters = client.describe_db_clusters()["DBClusters"]
clusters = self.db_clusters
for cluster in clusters: for cluster in clusters:
recovery_points = backup_client.list_recovery_points_by_resource(ResourceArn=cluster["DBClusterArn"])[ recovery_points = self.backup_client.list_recovery_points_by_resource(
"RecoveryPoints" ResourceArn=cluster["DBClusterArn"]
] )["RecoveryPoints"]
recovery_point_creation_dates = sorted([i["CreationDate"] for i in recovery_points]) recovery_point_creation_dates = sorted(
if datetime.datetime.now(tz=tzlocal()) - recovery_point_creation_dates[-1] < datetime.timedelta(days=1): [i["CreationDate"] for i in recovery_points]
)
if datetime.datetime.now(tz=tzlocal()) - recovery_point_creation_dates[
-1
] < datetime.timedelta(days=1):
compliant_resources.append(cluster["DBClusterArn"]) compliant_resources.append(cluster["DBClusterArn"])
else: else:
non_compliant_resources.append(cluster["DBClusterArn"]) non_compliant_resources.append(cluster["DBClusterArn"])
@ -29,14 +44,16 @@ def aurora_last_backup_recovery_point_created():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def aurora_mysql_backtracking_enabled(self):
def aurora_mysql_backtracking_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
clusters = client.describe_db_clusters()["DBClusters"]
clusters = self.db_clusters
for cluster in clusters: for cluster in clusters:
if cluster["Engine"] == "aurora-mysql" and cluster.get("EarliestBacktrackTime", None) == None: if (
cluster["Engine"] == "aurora-mysql"
and cluster.get("EarliestBacktrackTime", None) == None
):
non_compliant_resources.append(cluster["DBClusterArn"]) non_compliant_resources.append(cluster["DBClusterArn"])
else: else:
compliant_resources.append(cluster["DBClusterArn"]) compliant_resources.append(cluster["DBClusterArn"])
@ -47,12 +64,11 @@ def aurora_mysql_backtracking_enabled():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def db_instance_backup_enabled(self):
def db_instance_backup_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
clusters = client.describe_db_clusters()["DBClusters"]
clusters = self.db_clusters
for cluster in clusters: for cluster in clusters:
if "BackupRetentionPeriod" in cluster: if "BackupRetentionPeriod" in cluster:
compliant_resources.append(cluster["DBClusterArn"]) compliant_resources.append(cluster["DBClusterArn"])
@ -65,12 +81,11 @@ def db_instance_backup_enabled():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def rds_cluster_auto_minor_version_upgrade_enable(self):
def rds_cluster_auto_minor_version_upgrade_enable():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
clusters = client.describe_db_clusters()["DBClusters"]
clusters = self.db_clusters
for cluster in clusters: for cluster in clusters:
if cluster["Engine"] == "docdb" or cluster.get("AutoMinorVersionUpgrade"): if cluster["Engine"] == "docdb" or cluster.get("AutoMinorVersionUpgrade"):
compliant_resources.append(cluster["DBClusterArn"]) compliant_resources.append(cluster["DBClusterArn"])
@ -83,12 +98,11 @@ def rds_cluster_auto_minor_version_upgrade_enable():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def rds_cluster_default_admin_check(self):
def rds_cluster_default_admin_check():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
clusters = client.describe_db_clusters()["DBClusters"]
clusters = self.db_clusters
for cluster in clusters: for cluster in clusters:
if cluster["MasterUsername"] not in ["admin", "postgres"]: if cluster["MasterUsername"] not in ["admin", "postgres"]:
compliant_resources.append(cluster["DBClusterArn"]) compliant_resources.append(cluster["DBClusterArn"])
@ -101,12 +115,11 @@ def rds_cluster_default_admin_check():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def rds_cluster_deletion_protection_enabled(self):
def rds_cluster_deletion_protection_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
clusters = client.describe_db_clusters()["DBClusters"]
clusters = self.db_clusters
for cluster in clusters: for cluster in clusters:
if cluster["DeletionProtection"]: if cluster["DeletionProtection"]:
compliant_resources.append(cluster["DBClusterArn"]) compliant_resources.append(cluster["DBClusterArn"])
@ -119,12 +132,11 @@ def rds_cluster_deletion_protection_enabled():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def rds_cluster_encrypted_at_rest(self):
def rds_cluster_encrypted_at_rest():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
clusters = client.describe_db_clusters()["DBClusters"]
clusters = self.db_clusters
for cluster in clusters: for cluster in clusters:
if cluster["StorageEncrypted"]: if cluster["StorageEncrypted"]:
compliant_resources.append(cluster["DBClusterArn"]) compliant_resources.append(cluster["DBClusterArn"])
@ -137,14 +149,15 @@ def rds_cluster_encrypted_at_rest():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def rds_cluster_iam_authentication_enabled(self):
def rds_cluster_iam_authentication_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
clusters = client.describe_db_clusters()["DBClusters"]
clusters = self.db_clusters
for cluster in clusters: for cluster in clusters:
if cluster["Engine"] == "docdb" or cluster.get("IAMDatabaseAuthenticationEnabled"): if cluster["Engine"] == "docdb" or cluster.get(
"IAMDatabaseAuthenticationEnabled"
):
compliant_resources.append(cluster["DBClusterArn"]) compliant_resources.append(cluster["DBClusterArn"])
else: else:
non_compliant_resources.append(cluster["DBClusterArn"]) non_compliant_resources.append(cluster["DBClusterArn"])
@ -155,12 +168,11 @@ def rds_cluster_iam_authentication_enabled():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def rds_cluster_multi_az_enabled(self):
def rds_cluster_multi_az_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
clusters = client.describe_db_clusters()["DBClusters"]
clusters = self.db_clusters
for cluster in clusters: for cluster in clusters:
if len(cluster.get("AvailabilityZones", [])) > 1: if len(cluster.get("AvailabilityZones", [])) > 1:
compliant_resources.append(cluster["DBClusterArn"]) compliant_resources.append(cluster["DBClusterArn"])
@ -173,17 +185,22 @@ def rds_cluster_multi_az_enabled():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def rds_db_security_group_not_allowed(self):
def rds_db_security_group_not_allowed():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
clusters = client.describe_db_clusters()["DBClusters"]
security_groups = ec2_client.describe_security_groups()["SecurityGroups"] clusters = self.db_clusters
default_security_group_ids = [i["GroupId"] for i in security_groups if i["GroupName"] == "default"] security_groups = self.ec2_client.describe_security_groups()["SecurityGroups"]
default_security_group_ids = [
i["GroupId"] for i in security_groups if i["GroupName"] == "default"
]
for cluster in clusters: for cluster in clusters:
db_security_groups = [i["VpcSecurityGroupId"] for i in cluster["VpcSecurityGroups"] if i["Status"] == "active"] db_security_groups = [
i["VpcSecurityGroupId"]
for i in cluster["VpcSecurityGroups"]
if i["Status"] == "active"
]
for default_security_group_id in default_security_group_ids: for default_security_group_id in default_security_group_ids:
if default_security_group_id in db_security_groups: if default_security_group_id in db_security_groups:
@ -198,12 +215,11 @@ def rds_db_security_group_not_allowed():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def rds_enhanced_monitoring_enabled(self):
def rds_enhanced_monitoring_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
instances = client.describe_db_instances()["DBInstances"]
instances = self.db_instances
for instance in instances: for instance in instances:
if instance.get("MonitoringInterval", 0): if instance.get("MonitoringInterval", 0):
compliant_resources.append(instance["DBInstanceArn"]) compliant_resources.append(instance["DBInstanceArn"])
@ -216,12 +232,11 @@ def rds_enhanced_monitoring_enabled():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def rds_instance_public_access_check(self):
def rds_instance_public_access_check():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
instances = client.describe_db_instances()["DBInstances"]
instances = self.db_instances
for instance in instances: for instance in instances:
if instance["PubliclyAccessible"]: if instance["PubliclyAccessible"]:
non_compliant_resources.append(instance["DBInstanceArn"]) non_compliant_resources.append(instance["DBInstanceArn"])
@ -234,20 +249,21 @@ def rds_instance_public_access_check():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def rds_logging_enabled(self):
def rds_logging_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
clusters = client.describe_db_clusters()["DBClusters"]
clusters = self.db_clusters
logs_for_engine = { logs_for_engine = {
"aurora-mysql": ["audit", "error", "general", "slowquery"], "aurora-mysql": ["audit", "error", "general", "slowquery"],
"aurora-postgresql": ["postgresql"], "aurora-postgresql": ["postgresql"],
"docdb": ["audit", "profiler"] "docdb": ["audit", "profiler"],
} }
for cluster in clusters: for cluster in clusters:
if sorted(cluster["EnabledCloudwatchLogsExports"]) == logs_for_engine.get(cluster["Engine"]): if sorted(cluster["EnabledCloudwatchLogsExports"]) == logs_for_engine.get(
cluster["Engine"]
):
compliant_resources.append(cluster["DBClusterArn"]) compliant_resources.append(cluster["DBClusterArn"])
else: else:
non_compliant_resources.append(cluster["DBClusterArn"]) non_compliant_resources.append(cluster["DBClusterArn"])
@ -258,12 +274,13 @@ def rds_logging_enabled():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def rds_snapshot_encrypted(self):
def rds_snapshot_encrypted():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
cluster_snapshots = client.describe_db_cluster_snapshots()["DBClusterSnapshots"] cluster_snapshots = self.client.describe_db_cluster_snapshots()[
"DBClusterSnapshots"
]
for snapshot in cluster_snapshots: for snapshot in cluster_snapshots:
if snapshot.get("StorageEncrypted") == True: if snapshot.get("StorageEncrypted") == True:
@ -276,3 +293,6 @@ def rds_snapshot_encrypted():
compliant_resources=compliant_resources, compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = RDSRuleChecker

View File

@ -1,20 +1,31 @@
from models import RuleCheckResult from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3 import boto3
import botocore.exceptions import botocore.exceptions
client = boto3.client("s3") class S3RuleChecker(RuleChecker):
sts_client = boto3.client("sts") def __init__(self):
s3control_client = boto3.client("s3control") self.client = boto3.client("s3")
backup_client = boto3.client("backup") self.sts_client = boto3.client("sts")
self.s3control_client = boto3.client("s3control")
self.backup_client = boto3.client("backup")
@cached_property
def account_id(self):
return self.sts_client.get_caller_identity().get("Account")
def s3_access_point_in_vpc_only(): @cached_property
def buckets(self):
return self.client.list_buckets()["Buckets"]
def s3_access_point_in_vpc_only(self):
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
account_id = sts_client.get_caller_identity().get("Account")
access_points = s3control_client.list_access_points(AccountId=account_id)["AccessPointList"]
access_points = self.s3control_client.list_access_points(
AccountId=self.account_id
)["AccessPointList"]
for access_point in access_points: for access_point in access_points:
if access_point["NetworkOrigin"] == "VPC": if access_point["NetworkOrigin"] == "VPC":
compliant_resources.append(access_point["AccessPointArn"]) compliant_resources.append(access_point["AccessPointArn"])
@ -27,18 +38,21 @@ def s3_access_point_in_vpc_only():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def s3_bucket_default_lock_enabled(self):
def s3_bucket_default_lock_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
buckets = client.list_buckets()["Buckets"]
for bucket in buckets: for bucket in self.buckets:
try: try:
response = client.get_object_lock_configuration(Bucket=bucket["Name"]) response = self.client.get_object_lock_configuration(
Bucket=bucket["Name"]
)
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
except botocore.exceptions.ClientError as e: except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "ObjectLockConfigurationNotFoundError": if (
e.response["Error"]["Code"]
== "ObjectLockConfigurationNotFoundError"
):
non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
else: else:
raise e raise e
@ -49,14 +63,12 @@ def s3_bucket_default_lock_enabled():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def s3_bucket_level_public_access_prohibited(self):
def s3_bucket_level_public_access_prohibited():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
buckets = client.list_buckets()["Buckets"]
for bucket in buckets: for bucket in self.buckets:
response = client.get_public_access_block(Bucket=bucket["Name"]) response = self.client.get_public_access_block(Bucket=bucket["Name"])
if False not in response["PublicAccessBlockConfiguration"].values(): if False not in response["PublicAccessBlockConfiguration"].values():
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
else: else:
@ -68,14 +80,12 @@ def s3_bucket_level_public_access_prohibited():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def s3_bucket_logging_enabled(self):
def s3_bucket_logging_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
buckets = client.list_buckets()["Buckets"]
for bucket in buckets: for bucket in self.buckets:
response = client.get_bucket_logging(Bucket=bucket["Name"]) response = self.client.get_bucket_logging(Bucket=bucket["Name"])
if "LoggingEnabled" in response: if "LoggingEnabled" in response:
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
else: else:
@ -87,14 +97,12 @@ def s3_bucket_logging_enabled():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def s3_bucket_ssl_requests_only(self):
def s3_bucket_ssl_requests_only():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
buckets = client.list_buckets()["Buckets"]
for bucket in buckets: for bucket in self.buckets:
policy = client.get_bucket_policy(Bucket=bucket["Name"])["Policy"] policy = self.client.get_bucket_policy(Bucket=bucket["Name"])["Policy"]
if "aws:SecureTransport" in policy: if "aws:SecureTransport" in policy:
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
else: else:
@ -106,14 +114,12 @@ def s3_bucket_ssl_requests_only():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def s3_bucket_versioning_enabled(self):
def s3_bucket_versioning_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
buckets = client.list_buckets()["Buckets"]
for bucket in buckets: for bucket in self.buckets:
response = client.get_bucket_versioning(Bucket=bucket["Name"]) response = self.client.get_bucket_versioning(Bucket=bucket["Name"])
if "Status" in response and response["Status"] == "Enabled": if "Status" in response and response["Status"] == "Enabled":
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
else: else:
@ -125,16 +131,21 @@ def s3_bucket_versioning_enabled():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def s3_default_encryption_kms(self):
def s3_default_encryption_kms():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
buckets = client.list_buckets()["Buckets"]
for bucket in buckets: for bucket in self.buckets:
configuration = client.get_bucket_encryption(Bucket=bucket["Name"])["ServerSideEncryptionConfiguration"] configuration = self.client.get_bucket_encryption(Bucket=bucket["Name"])[
"ServerSideEncryptionConfiguration"
]
if configuration["Rules"][0]["ApplyServerSideEncryptionByDefault"]["SSEAlgorithm"] == "aws:kms": if (
configuration["Rules"][0]["ApplyServerSideEncryptionByDefault"][
"SSEAlgorithm"
]
== "aws:kms"
):
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
else: else:
non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
@ -145,14 +156,14 @@ def s3_default_encryption_kms():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def s3_event_notifications_enabled(self):
def s3_event_notifications_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
buckets = client.list_buckets()["Buckets"]
for bucket in buckets: for bucket in self.buckets:
configuration = client.get_bucket_notification_configuration(Bucket=bucket["Name"]) configuration = self.client.get_bucket_notification_configuration(
Bucket=bucket["Name"]
)
if ( if (
"LambdaFunctionConfigurations" in configuration "LambdaFunctionConfigurations" in configuration
or "QueueConfigurations" in configuration or "QueueConfigurations" in configuration
@ -168,14 +179,14 @@ def s3_event_notifications_enabled():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def s3_last_backup_recovery_point_created(self):
def s3_last_backup_recovery_point_created():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
buckets = client.list_buckets()["Buckets"]
for bucket in buckets: for bucket in self.buckets:
backups = backup_client.list_recovery_points_by_resource(ResourceArn=f"arn:aws:s3:::{bucket['Name']}") backups = self.backup_client.list_recovery_points_by_resource(
ResourceArn=f"arn:aws:s3:::{bucket['Name']}"
)
if backups["RecoveryPoints"] != []: if backups["RecoveryPoints"] != []:
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
@ -188,18 +199,18 @@ def s3_last_backup_recovery_point_created():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def s3_lifecycle_policy_check(self):
def s3_lifecycle_policy_check():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
buckets = client.list_buckets()["Buckets"]
for bucket in buckets: for bucket in self.buckets:
try: try:
configuration = client.get_bucket_lifecycle_configuration(Bucket=bucket["Name"]) configuration = self.client.get_bucket_lifecycle_configuration(
Bucket=bucket["Name"]
)
compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
except botocore.exceptions.ClientError as e: except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "NoSuchLifecycleConfiguration": if e.response["Error"]["Code"] == "NoSuchLifecycleConfiguration":
non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}") non_compliant_resources.append(f"arn:aws:s3:::{bucket['Name']}")
else: else:
raise e raise e
@ -209,3 +220,6 @@ def s3_lifecycle_policy_check():
compliant_resources=compliant_resources, compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = S3RuleChecker

View File

@ -1,19 +1,24 @@
from models import RuleCheckResult from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3 import boto3
import datetime from datetime import datetime, timedelta
from dateutil.tz import tzlocal from dateutil.tz import tzlocal
client = boto3.client("secretsmanager") class SecretsManagerRuleChecker(RuleChecker):
def __init__(self):
self.client = boto3.client("secretsmanager")
@cached_property
def secrets(self):
return self.client.list_secrets()["SecretList"]
def secretsmanager_rotation_enabled_check(): def secretsmanager_rotation_enabled_check(self):
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
secrets = client.list_secrets()["SecretList"]
for secret in secrets: for secret in self.secrets:
if secret.get("RotationEnabled") == True: if secret.get("RotationEnabled", False):
compliant_resources.append(secret["ARN"]) compliant_resources.append(secret["ARN"])
else: else:
non_compliant_resources.append(secret["ARN"]) non_compliant_resources.append(secret["ARN"])
@ -24,20 +29,18 @@ def secretsmanager_rotation_enabled_check():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def secretsmanager_scheduled_rotation_success_check(self):
def secretsmanager_scheduled_rotation_success_check():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
secrets = client.list_secrets()["SecretList"]
for secret in secrets: for secret in self.secrets:
if secret.get("RotationEnabled") == True: if secret.get("RotationEnabled", False):
if 'LastRotatedDate' not in secret: if "LastRotatedDate" not in secret:
non_compliant_resources.append(secret["ARN"]) non_compliant_resources.append(secret["ARN"])
continue continue
now = datetime.datetime.now(tz=tzlocal()) now = datetime.now(tz=tzlocal())
rotation_period = datetime.timedelta( rotation_period = timedelta(
days=secret["RotationRules"]["AutomaticallyAfterDays"] + 2 days=secret["RotationRules"]["AutomaticallyAfterDays"] + 2
) # 최대 2일 지연 가능 (aws) ) # 최대 2일 지연 가능 (aws)
elapsed_time_after_rotation = now - secret["LastRotatedDate"] elapsed_time_after_rotation = now - secret["LastRotatedDate"]
@ -53,22 +56,20 @@ def secretsmanager_scheduled_rotation_success_check():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def secretsmanager_secret_periodic_rotation(self):
def secretsmanager_secret_periodic_rotation():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
secrets = client.list_secrets()["SecretList"]
for secret in secrets: for secret in self.secrets:
if secret.get("RotationEnabled") == True: if secret.get("RotationEnabled") == True:
if 'LastRotatedDate' not in secret: if "LastRotatedDate" not in secret:
non_compliant_resources.append(secret["ARN"]) non_compliant_resources.append(secret["ARN"])
continue continue
now = datetime.datetime.now(tz=tzlocal()) now = datetime.now(tz=tzlocal())
elapsed_time_after_rotation = now - secret["LastRotatedDate"] elapsed_time_after_rotation = now - secret["LastRotatedDate"]
if elapsed_time_after_rotation > datetime.timedelta(days=90): if elapsed_time_after_rotation > timedelta(days=90):
non_compliant_resources.append(secret["ARN"]) non_compliant_resources.append(secret["ARN"])
else: else:
compliant_resources.append(secret["ARN"]) compliant_resources.append(secret["ARN"])
@ -78,3 +79,6 @@ def secretsmanager_secret_periodic_rotation():
compliant_resources=compliant_resources, compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = SecretsManagerRuleChecker

View File

@ -1,19 +1,19 @@
from models import RuleCheckResult from models import RuleCheckResult, RuleChecker
import boto3 import boto3
client = boto3.client("securityhub") class SecurityHubRuleChecker(RuleChecker):
def __init__(self):
self.client = boto3.client("securityhub")
self.sts_client = boto3.client("sts")
sts_client = boto3.client("sts") def securityhub_enabled(self):
def securityhub_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
aws_account_id = sts_client.get_caller_identity()["Account"] aws_account_id = self.sts_client.get_caller_identity()["Account"]
try: try:
hub = client.describe_hub() hub = self.client.describe_hub()
compliant_resources.append(aws_account_id) compliant_resources.append(aws_account_id)
except Exception as e: except Exception as e:
if e.__class__.__name__ == "InvalidAccessException": if e.__class__.__name__ == "InvalidAccessException":
@ -26,3 +26,6 @@ def securityhub_enabled():
compliant_resources=compliant_resources, compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = SecurityHubRuleChecker

View File

@ -1,17 +1,25 @@
from models import RuleCheckResult from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3 import boto3
client = boto3.client("sns") class SNSRuleChecker(RuleChecker):
def __init__(self):
self.client = boto3.client("sns")
@cached_property
def topics(self):
topics = self.client.list_topics()["Topics"]
return [
self.client.get_topic_attributes(TopicArn=topic["TopicArn"])["Attributes"]
for topic in topics
]
def sns_encrypted_kms(): def sns_encrypted_kms(self):
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
topics = client.list_topics()["Topics"]
for topic in topics: for topic in self.topics:
topic = client.get_topic_attributes(TopicArn=topic["TopicArn"])["Attributes"]
if "KmsMasterKeyId" in topic: if "KmsMasterKeyId" in topic:
compliant_resources.append(topic["TopicArn"]) compliant_resources.append(topic["TopicArn"])
else: else:
@ -23,19 +31,19 @@ def sns_encrypted_kms():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def sns_topic_message_delivery_notification_enabled(self):
def sns_topic_message_delivery_notification_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
topics = client.list_topics()["Topics"]
for topic in topics: for topic in self.topics:
topic = client.get_topic_attributes(TopicArn=topic["TopicArn"])["Attributes"] notification_roles = [
attribute
for attribute in topic.keys()
if attribute.endswith("FeedbackRoleArn")
]
for key in topic.keys(): if notification_roles:
if key.endswith("FeedbackRoleArn") == True:
compliant_resources.append(topic["TopicArn"]) compliant_resources.append(topic["TopicArn"])
break
else: else:
non_compliant_resources.append(topic["TopicArn"]) non_compliant_resources.append(topic["TopicArn"])
@ -44,3 +52,6 @@ def sns_topic_message_delivery_notification_enabled():
compliant_resources=compliant_resources, compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = SNSRuleChecker

View File

@ -1,11 +0,0 @@
from models import RuleCheckResult
import boto3
# client = boto3.client("")
def required_tags():
return RuleCheckResult(
passed=False, compliant_resources=[], non_compliant_resources=[]
)

View File

@ -1,13 +1,18 @@
from models import RuleCheckResult from models import RuleCheckResult, RuleChecker
from pprint import pprint from functools import cached_property
import boto3 import boto3
ec2 = boto3.client("ec2") class VPCRuleChecker(RuleChecker):
def __init__(self):
self.ec2 = boto3.client("ec2")
@cached_property
def security_group_rules(self):
return self.ec2.describe_security_group_rules()["SecurityGroupRules"]
def ec2_transit_gateway_auto_vpc_attach_disabled(): def ec2_transit_gateway_auto_vpc_attach_disabled(self):
response = ec2.describe_transit_gateways() response = self.ec2.describe_transit_gateways()
non_compliant_resources = [ non_compliant_resources = [
resource["TransitGatewayArn"] resource["TransitGatewayArn"]
@ -18,7 +23,12 @@ def ec2_transit_gateway_auto_vpc_attach_disabled():
] ]
compliant_resources = list( compliant_resources = list(
set([resource["TransitGatewayArn"] for resource in response["TransitGateways"]]) set(
[
resource["TransitGatewayArn"]
for resource in response["TransitGateways"]
]
)
- set(non_compliant_resources) - set(non_compliant_resources)
) )
@ -28,10 +38,7 @@ def ec2_transit_gateway_auto_vpc_attach_disabled():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def restricted_ssh(self):
def restricted_ssh():
response = ec2.describe_security_group_rules()
non_compliant_resources = [ non_compliant_resources = [
f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}' f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}'
for resource in filter( for resource in filter(
@ -39,7 +46,7 @@ def restricted_ssh():
and x["FromPort"] <= 22 and x["FromPort"] <= 22
and x["ToPort"] >= 22 and x["ToPort"] >= 22
and x.get("CidrIpv4") == "0.0.0.0/0", and x.get("CidrIpv4") == "0.0.0.0/0",
response["SecurityGroupRules"], self.security_group_rules,
) )
] ]
@ -47,7 +54,7 @@ def restricted_ssh():
set( set(
[ [
f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}' f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}'
for resource in response["SecurityGroupRules"] for resource in self.security_group_rules
] ]
) )
- set(non_compliant_resources) - set(non_compliant_resources)
@ -58,8 +65,7 @@ def restricted_ssh():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def restricted_common_ports(self):
def restricted_common_ports():
common_ports = [ common_ports = [
22, # SSH 22, # SSH
80, # HTTP 80, # HTTP
@ -69,7 +75,6 @@ def restricted_common_ports():
6379, # Redis 6379, # Redis
11211, # Memcached 11211, # Memcached
] ]
response = ec2.describe_security_group_rules()
non_compliant_resources = [ non_compliant_resources = [
f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}' f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}'
@ -78,14 +83,14 @@ def restricted_common_ports():
and x["FromPort"] in common_ports and x["FromPort"] in common_ports
and x["ToPort"] in common_ports and x["ToPort"] in common_ports
and x.get("PrefixListId") is None, and x.get("PrefixListId") is None,
response["SecurityGroupRules"], self.security_group_rules,
) )
] ]
compliant_resources = list( compliant_resources = list(
set( set(
f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}' f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}'
for resource in response["SecurityGroupRules"] for resource in self.security_group_rules
) )
- set(non_compliant_resources) - set(non_compliant_resources)
) )
@ -96,13 +101,14 @@ def restricted_common_ports():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def subnet_auto_assign_public_ip_disabled(self):
def subnet_auto_assign_public_ip_disabled(): response = self.ec2.describe_subnets()
response = ec2.describe_subnets()
non_compliant_resources = [ non_compliant_resources = [
resource["SubnetId"] resource["SubnetId"]
for resource in filter(lambda x: x["MapPublicIpOnLaunch"], response["Subnets"]) for resource in filter(
lambda x: x["MapPublicIpOnLaunch"], response["Subnets"]
)
] ]
compliant_resources = list( compliant_resources = list(
@ -116,9 +122,8 @@ def subnet_auto_assign_public_ip_disabled():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def vpc_default_security_group_closed(self):
def vpc_default_security_group_closed(): response = self.ec2.describe_security_groups(
response = ec2.describe_security_groups(
Filters=[{"Name": "group-name", "Values": ["default"]}] Filters=[{"Name": "group-name", "Values": ["default"]}]
) )
@ -141,14 +146,13 @@ def vpc_default_security_group_closed():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def vpc_flow_logs_enabled(self):
def vpc_flow_logs_enabled(): response = self.ec2.describe_flow_logs()
response = ec2.describe_flow_logs()
flow_log_enabled_vpcs = [ flow_log_enabled_vpcs = [
resource["ResourceId"] for resource in response["FlowLogs"] resource["ResourceId"] for resource in response["FlowLogs"]
] ]
response = ec2.describe_vpcs() response = self.ec2.describe_vpcs()
non_compliant_resources = [ non_compliant_resources = [
resource["VpcId"] resource["VpcId"]
@ -168,13 +172,14 @@ def vpc_flow_logs_enabled():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def vpc_network_acl_unused_check(self):
def vpc_network_acl_unused_check(): response = self.ec2.describe_network_acls()
response = ec2.describe_network_acls()
non_compliant_resources = [ non_compliant_resources = [
resource["NetworkAclId"] resource["NetworkAclId"]
for resource in filter(lambda x: not x["Associations"], response["NetworkAcls"]) for resource in filter(
lambda x: not x["Associations"], response["NetworkAcls"]
)
] ]
compliant_resources = list( compliant_resources = list(
@ -188,9 +193,8 @@ def vpc_network_acl_unused_check():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def vpc_peering_dns_resolution_check(self):
def vpc_peering_dns_resolution_check(): response = self.ec2.describe_vpc_peering_connections()
response = ec2.describe_vpc_peering_connections()
non_compliant_resources = [ non_compliant_resources = [
resource["VpcPeeringConnectionId"] resource["VpcPeeringConnectionId"]
@ -223,10 +227,7 @@ def vpc_peering_dns_resolution_check():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def vpc_sg_open_only_to_authorized_ports(self):
def vpc_sg_open_only_to_authorized_ports():
response = ec2.describe_security_group_rules()
authorized_port = [ authorized_port = [
# 80 # 80
] ]
@ -238,14 +239,14 @@ def vpc_sg_open_only_to_authorized_ports():
and (x.get("CidrIpv4") == "0.0.0.0/0" or x.get("CidrIpv6") == "::/0") and (x.get("CidrIpv4") == "0.0.0.0/0" or x.get("CidrIpv6") == "::/0")
and x["FromPort"] not in authorized_port and x["FromPort"] not in authorized_port
and x["ToPort"] not in authorized_port, and x["ToPort"] not in authorized_port,
response["SecurityGroupRules"], self.security_group_rules,
) )
] ]
compliant_resources = list( compliant_resources = list(
set( set(
f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}' f'{resource["GroupId"]} / {resource["SecurityGroupRuleId"]}'
for resource in response["SecurityGroupRules"] for resource in self.security_group_rules
) )
- set(non_compliant_resources) - set(non_compliant_resources)
) )
@ -255,3 +256,6 @@ def vpc_sg_open_only_to_authorized_ports():
compliant_resources=compliant_resources, compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = VPCRuleChecker

View File

@ -1,20 +1,48 @@
from models import RuleCheckResult from models import RuleCheckResult, RuleChecker
from functools import cached_property
import boto3 import boto3
client = boto3.client("wafv2") class WAFv2RuleChecker(RuleChecker):
global_client = boto3.client("wafv2", region_name="us-east-1") def __init__(self):
self.client = boto3.client("wafv2")
self.global_client = boto3.client("wafv2", region_name="us-east-1")
@cached_property
def regional_web_acls(self):
return self.client.list_web_acls(Scope="REGIONAL")["WebACLs"]
def wafv2_logging_enabled(): @cached_property
def cloudfront_web_acls(self):
return self.global_client.list_web_acls(Scope="CLOUDFRONT")["WebACLs"]
@cached_property
def regional_rule_groups(self):
rule_groups = self.client.list_rule_groups(Scope="REGIONAL")["RuleGroups"]
return [
self.client.get_rule_group(ARN=rule_group["ARN"])["RuleGroup"]
for rule_group in rule_groups
]
@cached_property
def cloudfront_rule_groups(self):
rule_groups = self.global_client.list_rule_groups(Scope="CLOUDFRONT")[
"RuleGroups"
]
return [
self.global_client.get_rule_group(ARN=rule_group["ARN"])["RuleGroup"]
for rule_group in rule_groups
]
def wafv2_logging_enabled(self):
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
regional_web_acls = client.list_web_acls(Scope="REGIONAL")["WebACLs"]
cloudfront_web_acls = global_client.list_web_acls(Scope="CLOUDFRONT")["WebACLs"]
for web_acl in regional_web_acls: for web_acl in self.regional_web_acls:
try: try:
configuration = client.get_logging_configuration(ResourceArn=web_acl["ARN"]) configuration = self.client.get_logging_configuration(
ResourceArn=web_acl["ARN"]
)
compliant_resources.append(web_acl["ARN"]) compliant_resources.append(web_acl["ARN"])
except Exception as e: except Exception as e:
if e.__class__.__name__ == "WAFNonexistentItemException": if e.__class__.__name__ == "WAFNonexistentItemException":
@ -22,9 +50,11 @@ def wafv2_logging_enabled():
else: else:
raise e raise e
for web_acl in cloudfront_web_acls: for web_acl in self.cloudfront_web_acls:
try: try:
configuration = global_client.get_logging_configuration(ResourceArn=web_acl["ARN"]) configuration = self.global_client.get_logging_configuration(
ResourceArn=web_acl["ARN"]
)
compliant_resources.append(web_acl["ARN"]) compliant_resources.append(web_acl["ARN"])
except Exception as e: except Exception as e:
if e.__class__.__name__ == "WAFNonexistentItemException": if e.__class__.__name__ == "WAFNonexistentItemException":
@ -38,24 +68,18 @@ def wafv2_logging_enabled():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def wafv2_rulegroup_logging_enabled(self):
def wafv2_rulegroup_logging_enabled():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
regional_rule_groups = client.list_rule_groups(Scope="REGIONAL")["RuleGroups"]
cloudfront_rule_groups = global_client.list_rule_groups(Scope="CLOUDFRONT")["RuleGroups"]
for rule_group in self.regional_rule_groups:
for rule_group in regional_rule_groups: if rule_group["VisibilityConfig"]["CloudWatchMetricsEnabled"] == True:
configuration = client.get_rule_group(ARN=rule_group["ARN"])
if configuration["RuleGroup"]["VisibilityConfig"]["CloudWatchMetricsEnabled"] == True:
compliant_resources.append(rule_group["ARN"]) compliant_resources.append(rule_group["ARN"])
else: else:
non_compliant_resources.append(rule_group["ARN"]) non_compliant_resources.append(rule_group["ARN"])
for rule_group in cloudfront_rule_groups: for rule_group in self.cloudfront_rule_groups:
configuration = global_client.get_rule_group(ARN=rule_group["ARN"]) if rule_group["VisibilityConfig"]["CloudWatchMetricsEnabled"] == True:
if configuration["RuleGroup"]["VisibilityConfig"]["CloudWatchMetricsEnabled"] == True:
compliant_resources.append(rule_group["ARN"]) compliant_resources.append(rule_group["ARN"])
else: else:
non_compliant_resources.append(rule_group["ARN"]) non_compliant_resources.append(rule_group["ARN"])
@ -66,23 +90,18 @@ def wafv2_rulegroup_logging_enabled():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def wafv2_rulegroup_not_empty(self):
def wafv2_rulegroup_not_empty():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
regional_rule_groups = client.list_rule_groups(Scope="REGIONAL")["RuleGroups"]
cloudfront_rule_groups = global_client.list_rule_groups(Scope="CLOUDFRONT")["RuleGroups"]
for rule_group in regional_rule_groups: for rule_group in self.regional_rule_groups:
configuration = client.get_rule_group(ARN=rule_group["ARN"]) if len(rule_group["Rules"]) > 0:
if len(configuration["RuleGroup"]["Rules"]) > 0:
compliant_resources.append(rule_group["ARN"]) compliant_resources.append(rule_group["ARN"])
else: else:
non_compliant_resources.append(rule_group["ARN"]) non_compliant_resources.append(rule_group["ARN"])
for rule_group in cloudfront_rule_groups: for rule_group in self.cloudfront_rule_groups:
configuration = global_client.get_rule_group(ARN=rule_group["ARN"]) if len(rule_group["Rules"]) > 0:
if len(configuration["RuleGroup"]["Rules"]) > 0:
compliant_resources.append(rule_group["ARN"]) compliant_resources.append(rule_group["ARN"])
else: else:
non_compliant_resources.append(rule_group["ARN"]) non_compliant_resources.append(rule_group["ARN"])
@ -93,21 +112,23 @@ def wafv2_rulegroup_not_empty():
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
def wafv2_webacl_not_empty(self):
def wafv2_webacl_not_empty():
compliant_resources = [] compliant_resources = []
non_compliant_resources = [] non_compliant_resources = []
regional_web_acls = client.list_web_acls(Scope="REGIONAL")["WebACLs"]
cloudfront_web_acls = global_client.list_web_acls(Scope="CLOUDFRONT")["WebACLs"]
for web_acl in regional_web_acls: for web_acl in self.regional_web_acls:
response = client.get_web_acl(Id=web_acl["Id"], Name=web_acl["Name"], Scope="REGIONAL") response = self.client.get_web_acl(
Id=web_acl["Id"], Name=web_acl["Name"], Scope="REGIONAL"
)
if len(response["WebACL"]["Rules"]) > 0: if len(response["WebACL"]["Rules"]) > 0:
compliant_resources.append(web_acl["ARN"]) compliant_resources.append(web_acl["ARN"])
else: else:
non_compliant_resources.append(web_acl["ARN"]) non_compliant_resources.append(web_acl["ARN"])
for web_acl in cloudfront_web_acls:
response = global_client.get_web_acl(Id=web_acl["Id"], Name=web_acl["Name"], Scope="CLOUDFRONT") for web_acl in self.cloudfront_web_acls:
response = self.global_client.get_web_acl(
Id=web_acl["Id"], Name=web_acl["Name"], Scope="CLOUDFRONT"
)
if len(response["WebACL"]["Rules"]) > 0: if len(response["WebACL"]["Rules"]) > 0:
compliant_resources.append(web_acl["ARN"]) compliant_resources.append(web_acl["ARN"])
else: else:
@ -118,3 +139,6 @@ def wafv2_webacl_not_empty():
compliant_resources=compliant_resources, compliant_resources=compliant_resources,
non_compliant_resources=non_compliant_resources, non_compliant_resources=non_compliant_resources,
) )
rule_checker = WAFv2RuleChecker